repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
koelling/amplimap | [
"cbd5b7b8c2f703982d8964a3c77bd350a47f08a6"
] | [
"amplimap/coverage.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains methods for processing and aggregating coverage files generated by ``bedtools``.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\n\nfrom .reader import read_sample_info\n\ncov_cols = ['Target', 'min_coverage', 'sum_coverage', 'basepairs', 'cov_per_bp', 'fraction_zero_coverage', 'fraction_10x_coverage', 'fraction_30x_coverage']\ncov_cols_dtypes = dict(zip(cov_cols, [str, int, int, int, float, float]))\n\ndef fraction_zero_coverage(coverage):\n \"\"\"Calculate fraction of bases with coverage 0.\"\"\"\n return 1.0 * (coverage == 0).sum() / len(coverage)\n\ndef fraction_10x_coverage(coverage):\n \"\"\"Calculate fraction of bases with coverage 10 or more.\"\"\"\n return 1.0 * (coverage >= 10).sum() / len(coverage)\n\ndef fraction_30x_coverage(coverage):\n \"\"\"Calculate fraction of bases with coverage 30 or more.\"\"\"\n return 1.0 * (coverage >= 30).sum() / len(coverage)\n\ndef process_file(input: str, output: str):\n \"\"\"Read raw bedtools coverage file, calculate summary statistics and output them as CSV file.\n\n Args:\n input: path to a bedtools coverage file\n output: path to the summary CSV file\n \"\"\"\n\n # read bedtools output\n depth = pd.read_csv(input, sep='\\t', names = ['chr', 'start_0', 'end', 'id', 'score', 'strand', 'position', 'coverage'], low_memory=False)\n\n # summarize\n summary = depth.groupby('id').aggregate({'coverage': [np.min, np.sum, len, np.mean, fraction_zero_coverage, fraction_10x_coverage, fraction_30x_coverage]})\n\n # make id index into normal column, then reset column names\n summary.reset_index(level=0, inplace=True)\n summary.columns = cov_cols\n\n # write file\n summary.to_csv(output, index = False)\n\ndef aggregate(input, output):\n \"\"\"Read coverage summary files and create aggregate files.\n\n Args:\n input: dict containing 'csvs', the list of csvs fils to aggregate, and optionally 'sample_info', a table with additional sample annotation\n output: dict containing paths for output files: merged, min_coverage, cov_per_bp, fraction_zero_coverage\n \"\"\"\n # load sample information table\n sample_info = None\n if 'sample_info' in input and len(input['sample_info']) > 0:\n sample_info = read_sample_info(input['sample_info'][0])\n\n merged = None\n for file in input['csvs']:\n sname = os.path.basename(file)\n sname = re.sub(r'\\.coverage\\.csv$', '', sname)\n\n print('Reading', file, 'for', sname, '...')\n df = pd.read_csv(file,\n index_col = False,\n dtype = cov_cols_dtypes)\n df['Sample'] = sname\n print(sname, 'coverage data shape:', str(df.shape))\n\n if merged is None:\n merged = df\n else:\n merged = merged.append(df, ignore_index = True)\n\n assert merged is not None, \\\n '\\n\\nABORTED: Did not find any coverage data!\\n\\n'\n\n print('Merged data shape:', str(merged.shape))\n print(merged.head())\n\n print('Duplicated:')\n print(merged[merged.duplicated(['Target', 'Sample'], keep=False)])\n\n if sample_info is not None:\n merged = merged.join(sample_info, on = ['Sample', 'Target'], how = 'left')\n\n # make matrices\n for column in ['min_coverage', 'cov_per_bp', 'fraction_zero_coverage']:\n pivoted = merged.pivot(index='Target', columns='Sample', values=column)\n print('Made pivot table for', column, ' with shape', str(pivoted.shape))\n pivoted.to_csv(output[column])\n print(output[column])\n\n # output full merged data set\n merged.to_csv(output['merged'], index = False)"
] | [
[
"pandas.read_csv"
]
] |
joey12300/Paddle | [
"59102c6dcd2def3091f5c37816354ac69d669809"
] | [
"python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport sys\nsys.path.append(\"..\")\n\nfrom test_softmax_op import stable_softmax\nfrom op_test import OpTest\nimport paddle.fluid.core as core\nimport paddle\n\nimport unittest\nimport numpy as np\n\n\ndef cross_entropy(softmax, label, soft_label, axis, ignore_index=-1):\n if soft_label:\n return (-label * np.log(softmax)).sum(axis=axis, keepdims=True)\n\n shape = softmax.shape\n axis %= len(shape)\n n = int(np.prod(shape[:axis]))\n axis_dim = shape[axis]\n remain = int(np.prod(shape[axis + 1:]))\n softmax_reshape = softmax.reshape((n, axis_dim, remain))\n label_reshape = label.reshape((n, 1, remain))\n result = np.zeros_like(label_reshape, dtype=softmax.dtype)\n for i in range(n):\n for j in range(remain):\n lbl = label_reshape[i, 0, j]\n if lbl != ignore_index:\n result[i, 0, j] -= np.log(softmax_reshape[i, lbl, j])\n return result.reshape(label.shape)\n\n\nclass TestSoftmaxWithCrossEntropyOp(OpTest):\n \"\"\"\n Test softmax with cross entropy operator with discreate one-hot labels.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = False\n self.soft_label = False\n self.dtype = np.float32\n self.axis = -1\n self.ignore_index = -1\n self.shape = [41, 37]\n self.use_xpu = True\n\n def setUp(self):\n self.initParams()\n\n logits = getattr(\n self, \"logits\",\n np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))\n softmax = np.apply_along_axis(stable_softmax, self.axis, logits)\n\n if self.soft_label:\n labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)\n labels /= np.sum(labels, axis=self.axis, keepdims=True)\n else:\n axis_dim = self.shape[self.axis]\n self.shape[self.axis] = 1\n labels = np.random.randint(0, axis_dim, self.shape, dtype=\"int64\")\n\n loss = cross_entropy(softmax, labels, self.soft_label, self.axis,\n self.ignore_index)\n\n self.inputs = {\"Logits\": logits, \"Label\": labels}\n self.outputs = {\n \"Softmax\": softmax.astype(self.dtype),\n \"Loss\": loss.astype(self.dtype)\n }\n self.attrs = {\n \"numeric_stable_mode\": self.numeric_stable_mode,\n \"soft_label\": self.soft_label,\n }\n if self.ignore_index >= 0:\n self.attrs['ignore_index'] = self.ignore_index\n if self.axis != -1:\n self.attrs['axis'] = self.axis\n\n def test_check_output(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place, atol=1e-2)\n\n def test_check_grad(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_grad_with_place(\n place, [\"Logits\"], \"Loss\", max_relative_error=0.2)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOp(TestSoftmaxWithCrossEntropyOp):\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.axis = -1\n self.ignore_index = -1\n self.dtype = np.float32\n self.use_xpu = True\n\n def test_check_output(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place, atol=1e-2)\n\n def test_check_grad(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_grad_with_place(\n place, [\"Logits\"], \"Loss\", max_relative_error=0.2)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOp2(TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with soft labels.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = True\n self.dtype = np.float32\n self.axis = -1\n self.ignore_index = -1\n self.shape = [41, 37]\n self.use_xpu = True\n\n def test_check_output(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place, atol=1e-2)\n\n def test_check_grad(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_grad_with_place(\n place, [\"Logits\"], \"Loss\", max_relative_error=0.2)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOp3(TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with ignore_index.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [41, 37]\n self.ignore_index = 5\n self.axis = -1\n self.dtype = np.float32\n\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpAxis1(TestXPUSoftmaxWithCrossEntropyOp):\n# \"\"\"\n# Test softmax with cross entropy operator with discreate one-hot labels.\n# Given axis != -1\n# \"\"\"\n\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.dtype = np.float32\n# self.axis = 0\n# self.ignore_index = -1\n# self.shape = [3, 5, 7, 11]\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpAxis2(TestXPUSoftmaxWithCrossEntropyOp):\n# \"\"\"\n# Test softmax with cross entropy operator with discreate one-hot labels.\n# Given axis != -1\n# \"\"\"\n\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.dtype = np.float32\n# self.axis = 1\n# self.ignore_index = -1\n# self.shape = [3, 5, 7, 11]\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpAxis3(TestXPUSoftmaxWithCrossEntropyOp):\n# \"\"\"\n# Test softmax with cross entropy operator with discreate one-hot labels.\n# Given axis != -1\n# \"\"\"\n\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.dtype = np.float32\n# self.axis = 2\n# self.ignore_index = -1\n# self.shape = [3, 5, 7, 11]\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpAxis4(TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with discreate one-hot labels.\n Given axis != -1\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.dtype = np.float32\n self.axis = 3\n self.ignore_index = -1\n self.shape = [3, 5, 7, 11]\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpAxisDimEqualOne(\n TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with discreate one-hot labels.\n Given axis != -1\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.dtype = np.float32\n self.axis = -1\n self.ignore_index = -1\n self.shape = [3, 5, 7, 1]\n\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis1(\n# TestXPUSoftmaxWithCrossEntropyOp):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = True\n# self.shape = [3, 5, 7, 11]\n# self.axis = 0\n# self.ignore_index = -1\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis2(\n# TestXPUSoftmaxWithCrossEntropyOp2):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = True\n# self.shape = [3, 5, 7, 11]\n# self.axis = 1\n# self.ignore_index = -1\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis3(\n# TestXPUSoftmaxWithCrossEntropyOp2):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = True\n# self.shape = [3, 5, 7, 11]\n# self.axis = 2\n# self.ignore_index = -1\n# self.dtype = np.float32\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis4(\n TestXPUSoftmaxWithCrossEntropyOp2):\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = True\n self.shape = [3, 5, 7, 11]\n self.axis = 3\n self.ignore_index = -1\n self.dtype = np.float32\n\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis1(\n# TestXPUSoftmaxWithCrossEntropyOp3):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.shape = [3, 5, 7, 11]\n# self.ignore_index = 1\n# self.axis = 0\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis2(\n# TestXPUSoftmaxWithCrossEntropyOp3):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.shape = [3, 5, 7, 11]\n# self.ignore_index = 0\n# self.axis = 1\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis3(\n# TestXPUSoftmaxWithCrossEntropyOp3):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.shape = [3, 5, 7, 11]\n# self.ignore_index = 3\n# self.axis = 2\n# self.dtype = np.float32\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis4(\n TestXPUSoftmaxWithCrossEntropyOp3):\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.ignore_index = 3\n self.axis = 3\n self.dtype = np.float32\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpBoundary0(\n TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test stable softmax with cross entropy operator will not product INF\n with small logits value.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.axis = -1\n self.ignore_index = -1\n self.dtype = np.float32\n self.logits = np.full(self.shape, -500.0).astype(self.dtype)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpBoundary1(\n TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test stable softmax with cross entropy operator will not product INF\n with small logits value.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.axis = -1\n self.ignore_index = -1\n self.dtype = np.float32\n self.logits = np.full(self.shape, 1000.0).astype(self.dtype)\n self.logits[:, :, 0, :] = -1000.0\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.sum",
"numpy.prod",
"numpy.log",
"numpy.apply_along_axis",
"numpy.random.randint",
"numpy.full"
]
] |
computationalartist/tensorflow | [
"b89cf636c412abdff53b3e8f201bde671c92209d"
] | [
"tensorflow/python/kernel_tests/math_ops/argmax_op_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.argmax_op.\"\"\"\nimport functools\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass ArgMaxTest(test.TestCase):\n\n def _testArg(self,\n method,\n x,\n axis,\n expected_values,\n use_gpu=False,\n expected_err_re=None):\n with self.session(use_gpu=use_gpu):\n ans = method(x, axis=axis)\n if expected_err_re is None:\n tf_ans = self.evaluate(ans)\n # Defaults to int64 output.\n self.assertEqual(np.int64, tf_ans.dtype)\n self.assertAllEqual(tf_ans, expected_values)\n self.assertShapeEqual(expected_values, ans)\n else:\n with self.assertRaisesOpError(expected_err_re):\n self.evaluate(ans)\n\n def _testBothArg(self,\n method,\n x,\n axis,\n expected_values,\n expected_err_re=None):\n self._testArg(method, x, axis, expected_values, True, expected_err_re)\n # Compilation time is too large with XLA/CPU autojit.\n if not test_util.is_xla_enabled():\n self._testArg(method, x, axis, expected_values, False, expected_err_re)\n\n def _testBasic(self, dtype):\n x = np.arange(200, dtype=np.float32).astype(dtype)\n np.random.shuffle(x)\n\n # Check that argmin and argmax match numpy along the primary axis\n self._testBothArg(math_ops.argmax, x, 0, x.argmax())\n self._testBothArg(math_ops.argmin, x, 0, x.argmin())\n\n def _testTieBreaking(self, dtype):\n x = np.zeros(200, dtype=dtype)\n\n # Check that argmin and argmax match numpy along the primary axis for\n # breaking ties.\n self._testBothArg(math_ops.argmax, x, 0, x.argmax())\n self._testBothArg(math_ops.argmin, x, 0, x.argmin())\n\n # Check that argmin and argmax match numpy along axis=1 for\n # breaking ties.\n x = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [0, 1, 0, 1]], dtype=dtype)\n self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))\n self._testBothArg(math_ops.argmin, x, 1, x.argmin(axis=1))\n\n def _testDim(self, dtype):\n shape = (3, 2, 4, 5, 6, 3, 7)\n x = np.arange(\n functools.reduce(lambda x, y: x * y, shape),\n dtype=np.float32).astype(dtype)\n np.random.shuffle(x)\n x = x.reshape(shape)\n\n # Check that argmin and argmax match numpy along all axes\n for axis in range(-7, 7):\n self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))\n self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))\n\n def testFloat(self):\n self._testBasic(np.float32)\n self._testTieBreaking(np.float32)\n self._testDim(np.float32)\n\n def testFloatInt32Output(self):\n x = np.asarray(100 * np.random.randn(200), dtype=np.float32)\n expected_values = x.argmax()\n with self.session():\n ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)\n tf_ans = self.evaluate(ans)\n self.assertEqual(np.int32, tf_ans.dtype)\n # The values are equal when comparing int32 to int64 because\n # the values don't have a range that exceeds 32-bit integers.\n self.assertAllEqual(tf_ans, expected_values)\n expected_values = x.argmin()\n with self.session():\n ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)\n tf_ans = self.evaluate(ans)\n self.assertEqual(np.int32, tf_ans.dtype)\n self.assertAllEqual(tf_ans, expected_values)\n\n def testDouble(self):\n self._testBasic(np.float64)\n self._testTieBreaking(np.float64)\n self._testDim(np.float64)\n\n def testInt32(self):\n self._testBasic(np.int32)\n self._testTieBreaking(np.int32)\n self._testDim(np.int32)\n\n def testInt64(self):\n self._testBasic(np.int64)\n self._testTieBreaking(np.int64)\n self._testDim(np.int64)\n\n def testBool(self):\n self._testBasic(np.bool_)\n self._testTieBreaking(np.bool_)\n self._testDim(np.bool_)\n\n def testEmpty(self):\n with self.cached_session():\n for op in math_ops.argmin, math_ops.argmax:\n with self.assertRaisesOpError(\n r\"Reduction axis 0 is empty in shape \\[0\\]\"):\n op([], 0).eval()\n\n @test_util.run_deprecated_v1\n def testDefaultAxis(self):\n with self.cached_session():\n for op in math_ops.argmin, math_ops.argmax:\n ans = op([1]).eval()\n self.assertAllEqual(ans, 0)\n\n @test_util.run_deprecated_v1\n def testOutputEmpty(self):\n with self.cached_session():\n for op in math_ops.argmin, math_ops.argmax:\n ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()\n self.assertEqual(ret.shape, (1, 0))\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.framework.test_util.is_xla_enabled",
"numpy.random.shuffle",
"numpy.zeros",
"tensorflow.python.ops.array_ops.zeros",
"numpy.random.randn",
"numpy.arange",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.math_ops.argmax",
"numpy.array",
"tensorflow.python.ops.math_ops.argmin"
]
] |
QuESt-Calculator/pyscf | [
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910"
] | [
"pyscf/grad/casci.py",
"pyscf/symm/test/test_Dmatrix.py"
] | [
"#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nCASCI analytical nuclear gradients\n\nRef.\nJ. Comput. Chem., 5, 589\n'''\n\nimport sys\n\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf import ao2mo\nfrom pyscf.lib import logger\nfrom pyscf.grad import rhf as rhf_grad\nfrom pyscf.grad.mp2 import _shell_prange\nfrom pyscf.scf import cphf\n\nif sys.version_info < (3,):\n RANGE_TYPE = list\nelse:\n RANGE_TYPE = range\n\n\ndef grad_elec(mc_grad, mo_coeff=None, ci=None, atmlst=None, verbose=None):\n mc = mc_grad.base\n if mo_coeff is None: mo_coeff = mc._scf.mo_coeff\n if ci is None: ci = mc.ci\n\n time0 = logger.process_clock(), logger.perf_counter()\n log = logger.new_logger(mc_grad, verbose)\n mol = mc_grad.mol\n ncore = mc.ncore\n ncas = mc.ncas\n nocc = ncore + ncas\n nelecas = mc.nelecas\n nao, nmo = mo_coeff.shape\n nao_pair = nao * (nao+1) // 2\n mo_energy = mc._scf.mo_energy\n\n mo_occ = mo_coeff[:,:nocc]\n mo_core = mo_coeff[:,:ncore]\n mo_cas = mo_coeff[:,ncore:nocc]\n neleca, nelecb = mol.nelec\n assert(neleca == nelecb)\n orbo = mo_coeff[:,:neleca]\n orbv = mo_coeff[:,neleca:]\n\n casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)\n dm_core = numpy.dot(mo_core, mo_core.T) * 2\n dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))\n aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_coeff, mo_cas), compact=False)\n aapa = aapa.reshape(ncas,ncas,nmo,ncas)\n vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))\n h1 = mc.get_hcore()\n vhf_c = vj[0] - vk[0] * .5\n vhf_a = vj[1] - vk[1] * .5\n # Imat = h1_{pi} gamma1_{iq} + h2_{pijk} gamma_{iqkj}\n Imat = numpy.zeros((nmo,nmo))\n Imat[:,:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c + vhf_a, mo_occ)) * 2\n Imat[:,ncore:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c, mo_cas, casdm1))\n Imat[:,ncore:nocc] += lib.einsum('uviw,vuwt->it', aapa, casdm2)\n aapa = vj = vk = vhf_c = vhf_a = h1 = None\n\n ee = mo_energy[:,None] - mo_energy\n zvec = numpy.zeros_like(Imat)\n zvec[:ncore,ncore:neleca] = Imat[:ncore,ncore:neleca] / -ee[:ncore,ncore:neleca]\n zvec[ncore:neleca,:ncore] = Imat[ncore:neleca,:ncore] / -ee[ncore:neleca,:ncore]\n zvec[nocc:,neleca:nocc] = Imat[nocc:,neleca:nocc] / -ee[nocc:,neleca:nocc]\n zvec[neleca:nocc,nocc:] = Imat[neleca:nocc,nocc:] / -ee[neleca:nocc,nocc:]\n\n zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))\n vhf = mc._scf.get_veff(mol, zvec_ao) * 2\n xvo = reduce(numpy.dot, (orbv.T, vhf, orbo))\n xvo += Imat[neleca:,:neleca] - Imat[:neleca,neleca:].T\n def fvind(x):\n x = x.reshape(xvo.shape)\n dm = reduce(numpy.dot, (orbv, x, orbo.T))\n v = mc._scf.get_veff(mol, dm + dm.T)\n v = reduce(numpy.dot, (orbv.T, v, orbo))\n return v * 2\n dm1resp = cphf.solve(fvind, mo_energy, mc._scf.mo_occ, xvo, max_cycle=30)[0]\n zvec[neleca:,:neleca] = dm1resp\n\n zeta = numpy.einsum('ij,j->ij', zvec, mo_energy)\n zeta = reduce(numpy.dot, (mo_coeff, zeta, mo_coeff.T))\n\n zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))\n p1 = numpy.dot(mo_coeff[:,:neleca], mo_coeff[:,:neleca].T)\n vhf_s1occ = reduce(numpy.dot, (p1, mc._scf.get_veff(mol, zvec_ao), p1))\n\n Imat[:ncore,ncore:neleca] = 0\n Imat[ncore:neleca,:ncore] = 0\n Imat[nocc:,neleca:nocc] = 0\n Imat[neleca:nocc,nocc:] = 0\n Imat[neleca:,:neleca] = Imat[:neleca,neleca:].T\n im1 = reduce(numpy.dot, (mo_coeff, Imat, mo_coeff.T))\n\n casci_dm1 = dm_core + dm_cas\n hf_dm1 = mc._scf.make_rdm1(mo_coeff, mc._scf.mo_occ)\n hcore_deriv = mc_grad.hcore_generator(mol)\n s1 = mc_grad.get_ovlp(mol)\n\n diag_idx = numpy.arange(nao)\n diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx\n casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)\n dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,\n (0, nao, 0, nao)).reshape(ncas**2,nao,nao)\n dm2buf = lib.pack_tril(dm2buf)\n dm2buf[:,diag_idx] *= .5\n dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)\n casdm2 = casdm2_cc = None\n\n if atmlst is None:\n atmlst = range(mol.natm)\n aoslices = mol.aoslice_by_atom()\n de = numpy.zeros((len(atmlst),3))\n\n max_memory = mc_grad.max_memory - lib.current_memory()[0]\n blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))\n blksize = min(nao, max(2, blksize))\n\n for k, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n h1ao = hcore_deriv(ia)\n de[k] += numpy.einsum('xij,ij->x', h1ao, casci_dm1)\n de[k] += numpy.einsum('xij,ij->x', h1ao, zvec_ao)\n\n q1 = 0\n for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):\n q0, q1 = q1, q1 + nf\n dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])\n shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)\n eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',\n shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)\n de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2\n\n for i in range(3):\n eri1tmp = lib.unpack_tril(eri1[i].reshape((p1-p0)*nf,-1))\n eri1tmp = eri1tmp.reshape(p1-p0,nf,nao,nao)\n de[k,i] -= numpy.einsum('ijkl,ij,kl', eri1tmp, hf_dm1[p0:p1,q0:q1], zvec_ao) * 2\n de[k,i] -= numpy.einsum('ijkl,kl,ij', eri1tmp, hf_dm1, zvec_ao[p0:p1,q0:q1]) * 2\n de[k,i] += numpy.einsum('ijkl,il,kj', eri1tmp, hf_dm1[p0:p1], zvec_ao[q0:q1])\n de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, hf_dm1[q0:q1], zvec_ao[p0:p1])\n\n #:vhf1c, vhf1a = mc_grad.get_veff(mol, (dm_core, dm_cas))\n #:de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], casci_dm1[p0:p1]) * 2\n #:de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2\n de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1]) * 2\n de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1])\n de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1]) * 2\n de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1])\n eri1 = eri1tmp = None\n\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])\n de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], im1[:,p0:p1])\n\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1]) * 2\n de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], zeta[:,p0:p1]) * 2\n\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2\n de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], vhf_s1occ[:,p0:p1]) * 2\n\n log.timer('CASCI nuclear gradients', *time0)\n return de\n\n\ndef as_scanner(mcscf_grad, state=None):\n '''Generating a nuclear gradients scanner/solver (for geometry optimizer).\n\n The returned solver is a function. This function requires one argument\n \"mol\" as input and returns energy and first order nuclear derivatives.\n\n The solver will automatically use the results of last calculation as the\n initial guess of the new calculation. All parameters assigned in the\n nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are\n automatically applied in the solver.\n\n Note scanner has side effects. It may change many underlying objects\n (_scf, with_df, with_x2c, ...) during calculation.\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)\n >>> mc_grad_scanner = mcscf.CASCI(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()\n >>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))\n >>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))\n '''\n from pyscf import gto\n from pyscf.mcscf.addons import StateAverageMCSCFSolver\n if isinstance(mcscf_grad, lib.GradScanner):\n return mcscf_grad\n if (state is not None and\n isinstance(mcscf_grad.base, StateAverageMCSCFSolver)):\n raise RuntimeError('State-Average MCSCF Gradients does not support '\n 'state-specific nuclear gradients.')\n\n logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)\n\n class CASCI_GradScanner(mcscf_grad.__class__, lib.GradScanner):\n def __init__(self, g):\n lib.GradScanner.__init__(self, g)\n def __call__(self, mol_or_geom, state=state, **kwargs):\n if isinstance(mol_or_geom, gto.Mole):\n mol = mol_or_geom\n else:\n mol = self.mol.set_geom_(mol_or_geom, inplace=False)\n\n if state is None:\n state = self.state\n\n mc_scanner = self.base\n# TODO: Check root flip\n e_tot = mc_scanner(mol)\n ci = mc_scanner.ci\n if isinstance(mc_scanner, StateAverageMCSCFSolver):\n e_tot = mc_scanner.e_average\n elif not isinstance(e_tot, float):\n if state >= mc_scanner.fcisolver.nroots:\n raise ValueError('State ID greater than the number of CASCI roots')\n e_tot = e_tot[state]\n # target at a specific state, to avoid overwriting self.state\n # in self.kernel\n ci = ci[state]\n\n self.mol = mol\n de = self.kernel(ci=ci, state=state, **kwargs)\n return e_tot, de\n return CASCI_GradScanner(mcscf_grad)\n\n\nclass Gradients(rhf_grad.GradientsMixin):\n '''Non-relativistic restricted Hartree-Fock gradients'''\n def __init__(self, mc):\n from pyscf.mcscf.addons import StateAverageMCSCFSolver\n if isinstance(mc, StateAverageMCSCFSolver):\n self.state = None # not a specific state\n else:\n self.state = 0 # of which the gradients to be computed.\n rhf_grad.GradientsMixin.__init__(self, mc)\n\n def dump_flags(self, verbose=None):\n log = logger.new_logger(self, verbose)\n log.info('\\n')\n if not self.base.converged:\n log.warn('Ground state %s not converged', self.base.__class__)\n log.info('******** %s for %s ********',\n self.__class__, self.base.__class__)\n if self.state is None:\n weights = self.base.weights\n log.info('State-average gradients over %d states with weights %s',\n len(weights), weights)\n elif self.state != 0 and self.base.fcisolver.nroots > 1:\n log.info('State ID = %d', self.state)\n log.info('max_memory %d MB (current use %d MB)',\n self.max_memory, lib.current_memory()[0])\n return self\n\n grad_elec = grad_elec\n\n def kernel(self, mo_coeff=None, ci=None, atmlst=None,\n state=None, verbose=None):\n log = logger.new_logger(self, verbose)\n if ci is None: ci = self.base.ci\n if self.state is None: # state average MCSCF calculations\n assert(state is None)\n elif isinstance(ci, (list, tuple, RANGE_TYPE)):\n if state is None:\n state = self.state\n else:\n self.state = state\n ci = ci[state]\n log.info('Multiple roots are found in CASCI solver. '\n 'Nuclear gradients of root %d are computed.', state)\n\n if atmlst is None:\n atmlst = self.atmlst\n else:\n self.atmlst = atmlst\n\n if self.verbose >= logger.WARN:\n self.check_sanity()\n if self.verbose >= logger.INFO:\n self.dump_flags()\n\n de = self.grad_elec(mo_coeff, ci, atmlst, log)\n self.de = de = de + self.grad_nuc(atmlst=atmlst)\n if self.mol.symmetry:\n self.de = self.symmetrize(self.de, atmlst)\n self._finalize()\n return self.de\n\n # Initialize hcore_deriv with the underlying SCF object because some\n # extensions (e.g. x2c, QM/MM, solvent) modifies the SCF object only.\n def hcore_generator(self, mol=None):\n mf_grad = self.base._scf.nuc_grad_method()\n return mf_grad.hcore_generator(mol)\n\n # Calling the underlying SCF nuclear gradients because it may be modified\n # by external modules (e.g. QM/MM, solvent)\n def grad_nuc(self, mol=None, atmlst=None):\n mf_grad = self.base._scf.nuc_grad_method()\n return mf_grad.grad_nuc(mol, atmlst)\n\n def _finalize(self):\n if self.verbose >= logger.NOTE:\n if self.state is None:\n logger.note(self, '--------- %s gradients ----------',\n self.base.__class__.__name__)\n else:\n logger.note(self, '--------- %s gradients for state %d ----------',\n self.base.__class__.__name__, self.state)\n self._write(self.mol, self.de, self.atmlst)\n logger.note(self, '----------------------------------------------')\n\n as_scanner = as_scanner\n\nGrad = Gradients\n\nfrom pyscf import mcscf\nmcscf.casci.CASCI.Gradients = lib.class_as_method(Gradients)\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import mcscf\n\n mol = gto.Mole()\n mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'\n mol.build()\n mf = scf.RHF(mol).run(conv_tol=1e-14)\n mc = mcscf.CASCI(mf, 4, 4).run()\n g1 = mc.Gradients().kernel()\n print(lib.finger(g1) - -0.066025991364829367)\n\n mcs = mc.as_scanner()\n mol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')\n e1 = mcs(mol)\n mol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')\n e2 = mcs(mol)\n print(g1[1,2], (e1-e2)/0.002*lib.param.BOHR)\n",
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import reduce\nimport unittest\nimport numpy\nfrom pyscf import gto, lib\nfrom pyscf.symm import Dmatrix, geom\n\n\nclass KnownValues(unittest.TestCase):\n def test_Dmatrix(self):\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(0, -.7, .5, .2)), 1, 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(1, -.7, .5, .2)), 0.7014811805222106, 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(2, -.7, .5, .2)), 1.247436140965072 , 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(3, -.7, .5, .2)), 0.9226598665854279, 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(4, -.7, .5, .2)), -0.425143083298510, 12)\n\n def test_real_sph_vec(self):\n c0 = c = numpy.random.random(3)\n\n mol1 = gto.M(atom=['H1 0 0 0', ['H2', c]],\n basis = {'H1': [[0, (1, 1)]],\n 'H2': [[l, (1, 1)] for l in range(1,6)]})\n alpha = .2\n beta = .4\n gamma = -.3\n c1 = numpy.dot(geom.rotation_mat((0,0,1), gamma), c0)\n c1 = numpy.dot(geom.rotation_mat((0,1,0), beta), c1)\n c1 = numpy.dot(geom.rotation_mat((0,0,1), alpha), c1)\n mol2 = gto.M(atom=['H1 0 0 0', ['H2', c1]],\n basis = {'H1': [[0, (1, 1)]],\n 'H2': [[l, (1, 1)] for l in range(1,6)]})\n\n for l in range(1, 6):\n s1 = mol1.intor('int1e_ovlp', shls_slice=(0,1,l,l+1))\n s2 = mol2.intor('int1e_ovlp', shls_slice=(0,1,l,l+1))\n\n # Rotating a basis is equivalent to an inversed rotation over the axes.\n # The Eular angles that rotates molecule to a new geometry (axes\n # transformation) corresponds to the inversed rotation over basis.\n #r = small_dmatrix(l, -beta, reorder_p=True)\n r = Dmatrix.Dmatrix(l, -gamma, -beta, -alpha, reorder_p=True)\n self.assertAlmostEqual(abs(numpy.dot(s1, r) - s2).max(), 0, 12)\n\n def test_euler_angles(self):\n c0 = numpy.random.random(3)\n c2 = numpy.random.random(3)\n self.assertRaises(AssertionError, Dmatrix.get_euler_angles, c0, c2)\n\n c0 /= numpy.linalg.norm(c0)\n c2 /= numpy.linalg.norm(c2)\n alpha, beta, gamma = Dmatrix.get_euler_angles(c0, c2)\n c1 = numpy.dot(geom.rotation_mat((0,0,1), gamma), c0)\n c1 = numpy.dot(geom.rotation_mat((0,1,0), beta), c1)\n c1 = numpy.dot(geom.rotation_mat((0,0,1), alpha), c1)\n self.assertAlmostEqual(abs(c2 - c1).max(), 0, 12)\n\n # transform coordinates\n numpy.random.seed(1)\n u, w, vh = numpy.linalg.svd(numpy.random.random((3,3)))\n c1 = u.dot(vh)\n u, w, vh = numpy.linalg.svd(c1+2*numpy.random.random((3,3)))\n c2 = u.dot(vh)\n alpha, beta, gamma = Dmatrix.get_euler_angles(c1, c2)\n yp = numpy.einsum('j,kj->k', c1[1], geom.rotation_mat(c1[2], alpha))\n tmp = numpy.einsum('ij,kj->ik', c1 , geom.rotation_mat(c1[2], alpha))\n tmp = numpy.einsum('ij,kj->ik', tmp, geom.rotation_mat(yp , beta ))\n c2p = numpy.einsum('ij,kj->ik', tmp, geom.rotation_mat(c2[2], gamma))\n self.assertAlmostEqual((c2-c2p).max(), 0, 13)\n\n\nif __name__ == \"__main__\":\n print(\"Full Tests for Dmatrix\")\n unittest.main()\n\n"
] | [
[
"numpy.zeros_like",
"numpy.zeros",
"numpy.arange",
"numpy.einsum",
"numpy.dot"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.dot",
"numpy.linalg.norm"
]
] |
tsheaff/keras | [
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b"
] | [
"keras/utils/layer_utils_test.py",
"keras/optimizers/optimizer_v2/rmsprop.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for layer_utils.\"\"\"\n\nimport keras\nimport tensorflow.compat.v2 as tf\n\nimport collections\nimport contextlib\nimport multiprocessing.dummy\nimport os\nimport pickle\nimport shutil\nimport sys\nimport time\nimport timeit\n\nimport numpy as np\nfrom keras.utils import io_utils\nfrom keras.utils import layer_utils\n\n\n_PICKLEABLE_CALL_COUNT = collections.Counter()\n\n\nclass MyPickleableObject(tf.__internal__.tracking.AutoTrackable):\n \"\"\"Needed for InterfaceTests.test_property_cache_serialization.\n\n This class must be at the top level. This is a constraint of pickle,\n unrelated to `cached_per_instance`.\n \"\"\"\n\n @property\n @layer_utils.cached_per_instance\n def my_id(self):\n _PICKLEABLE_CALL_COUNT[self] += 1\n return id(self)\n\n\nclass LayerUtilsTest(tf.test.TestCase):\n\n def test_print_summary(self):\n model = keras.Sequential()\n model.add(\n keras.layers.Conv2D(\n filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))\n model.add(keras.layers.Flatten(name='flat'))\n model.add(keras.layers.Dense(5, name='dense'))\n\n file_name = 'model_1.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(model, print_fn=print_to_file)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n self.assertEqual(len(lines), 15)\n except ImportError:\n pass\n\n def test_print_summary_without_print_fn(self):\n model = keras.Sequential([\n keras.layers.Dense(5, input_shape=(10,), name='dense')])\n io_utils.enable_interactive_logging()\n with self.captureWritesToStream(sys.stdout) as printed:\n layer_utils.print_summary(model)\n self.assertIn('dense (Dense)', printed.contents())\n\n def test_print_summary_expand_nested(self):\n shape = (None, None, 3)\n\n def make_model():\n x = inputs = keras.Input(shape)\n x = keras.layers.Conv2D(3, 1)(x)\n x = keras.layers.BatchNormalization()(x)\n return keras.Model(inputs, x)\n\n x = inner_inputs = keras.Input(shape)\n x = make_model()(x)\n inner_model = keras.Model(inner_inputs, x)\n\n inputs = keras.Input(shape)\n model = keras.Model(inputs, inner_model(inputs))\n\n file_name = 'model_2.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model, print_fn=print_to_file, expand_nested=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n check_str = (\n 'Model: \"model_2\"\\n'\n '_________________________________________________________________\\n'\n ' Layer (type) Output Shape Param # \\n'\n '=================================================================\\n'\n ' input_3 (InputLayer) [(None, None, None, 3)] 0 \\n'\n ' \\n'\n ' model_1 (Functional) (None, None, None, 3) 24 \\n'\n '|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n'\n '| input_1 (InputLayer) [(None, None, None, 3)] 0 |\\n'\n '| |\\n'\n '| model (Functional) (None, None, None, 3) 24 |\\n'\n '||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\\n'\n '|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\\n'\n '|| ||\\n'\n '|| conv2d (Conv2D) (None, None, None, 3) 12 ||\\n'\n '|| ||\\n'\n '|| batch_normalization (BatchN (None, None, None, 3) 12 ||\\n'\n '|| ormalization) ||\\n'\n '|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n'\n '¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\\n'\n '=================================================================\\n'\n 'Total params: 24\\n'\n 'Trainable params: 18\\n'\n 'Non-trainable params: 6\\n'\n '_________________________________________________________________\\n')\n\n fin_str = ''\n for line in lines:\n fin_str += line\n\n self.assertIn(fin_str, check_str)\n self.assertEqual(len(lines), 25)\n except ImportError:\n pass\n\n def test_summary_subclass_model_expand_nested(self):\n\n class Sequential(keras.Model):\n\n def __init__(self, *args):\n super(Sequential, self).__init__()\n self.module_list = list(args) if args else []\n\n def call(self, x):\n for module in self.module_list:\n x = module(x)\n return x\n\n class Block(keras.Model):\n\n def __init__(self):\n super(Block, self).__init__()\n self.module = Sequential(\n keras.layers.Dense(10),\n keras.layers.Dense(10),\n )\n\n def call(self, input_tensor):\n x = self.module(input_tensor)\n return x\n\n class Base(keras.Model):\n\n def __init__(self):\n super(Base, self).__init__()\n self.module = Sequential(Block(), Block())\n\n def call(self, input_tensor):\n x = self.module(input_tensor)\n y = self.module(x)\n return x, y\n\n class Network(keras.Model):\n\n def __init__(self):\n super(Network, self).__init__()\n self.child = Base()\n\n def call(self, inputs):\n return self.child(inputs)\n\n net = Network()\n inputs = keras.Input(shape=(10,))\n outputs = net(inputs)\n model = keras.models.Model(inputs=inputs, outputs=outputs)\n\n file_name = 'model_3.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model, line_length=120, print_fn=print_to_file, expand_nested=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n # The output content are slightly different for the input shapes between\n # v1 and v2.\n if tf.__internal__.tf2.enabled():\n self.assertEqual(len(lines), 39)\n else:\n self.assertEqual(len(lines), 40)\n except ImportError:\n pass\n\n def test_print_summary_show_trainable(self):\n model = keras.Sequential(name='trainable')\n untrained = keras.layers.Conv2D(\n filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')\n model.add(untrained)\n model.add(keras.layers.Flatten(name='flat'))\n model.add(keras.layers.Dense(5, name='dense'))\n\n untrained.trainable = False\n\n file_name = 'model_4.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model, print_fn=print_to_file, show_trainable=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n check_str = (\n 'Model: '\n '\"trainable\"\\n____________________________________________________________________________\\n'\n ' Layer (type) Output Shape Param # '\n 'Trainable '\n '\\n============================================================================\\n'\n ' conv (Conv2D) (None, 2, 3, 2) 62 N'\n ' \\n'\n ' '\n '\\n flat (Flatten) (None, 12) 0 '\n 'Y \\n'\n ' '\n '\\n dense (Dense) (None, 5) 65 '\n 'Y \\n'\n ' '\n '\\n============================================================================\\nTotal'\n ' params: 127\\nTrainable params: 65\\nNon-trainable params: '\n '62\\n____________________________________________________________________________\\n'\n '____________________________________________________________________________\\n'\n )\n\n fin_str = ''\n for line in lines:\n fin_str += line\n\n self.assertIn(fin_str, check_str)\n self.assertEqual(len(lines), 15)\n except ImportError:\n pass\n\n def test_print_summary_expand_nested_show_trainable(self):\n shape = (None, None, 3)\n\n def make_model():\n x = inputs = keras.Input(shape, name='input2')\n untrainable = keras.layers.Conv2D(3, 1)\n untrainable.trainable = False\n x = untrainable(x)\n x = keras.layers.BatchNormalization()(x)\n return keras.Model(inputs, x)\n\n x = inner_inputs = keras.Input(shape, name='input1')\n x = make_model()(x)\n inner_model = keras.Model(inner_inputs, x)\n\n inputs = keras.Input(shape, name='input3')\n model = keras.Model(inputs, inner_model(inputs))\n\n file_name = 'model_6.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model,\n print_fn=print_to_file,\n expand_nested=True,\n show_trainable=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n check_str = (\n 'Model: '\n '\"model_2\"\\n____________________________________________________________________________\\n'\n ' Layer (type) Output Shape Param # '\n 'Trainable '\n '\\n============================================================================\\n'\n ' input3 (InputLayer) [(None, None, None, 3)] 0 Y'\n ' \\n'\n ' '\n '\\n model_1 (Functional) (None, None, None, 3) 24 '\n 'Y '\n '\\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n|'\n ' input1 (InputLayer) [(None, None, None, 3)] 0 Y'\n ' |\\n|'\n ' '\n '|\\n| model (Functional) (None, None, None, 3) 24 '\n 'Y '\n '|\\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\\n||'\n ' input2 (InputLayer) [(None, None, None, 3)] 0 Y'\n ' ||\\n||'\n ' '\n '||\\n|| conv2d (Conv2D) (None, None, None, 3) 12 '\n 'N ||\\n||'\n ' '\n '||\\n|| batch_normalization (BatchN (None, None, None, 3) 12 '\n 'Y ||\\n|| ormalization)'\n ' '\n '||\\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\\n============================================================================\\nTotal'\n ' params: 24\\nTrainable params: 6\\nNon-trainable params: '\n '18\\n____________________________________________________________________________\\n'\n '____________________________________________________________________________\\n'\n )\n\n fin_str = ''\n for line in lines:\n fin_str += line\n\n self.assertIn(fin_str, check_str)\n self.assertEqual(len(lines), 25)\n except ImportError:\n pass\n\n def test_property_cache(self):\n test_counter = collections.Counter()\n\n class MyObject(tf.__internal__.tracking.AutoTrackable):\n\n def __init__(self):\n super(MyObject, self).__init__()\n self._frozen = True\n\n def __setattr__(self, key, value):\n \"\"\"Enforce that cache does not set attribute on MyObject.\"\"\"\n if getattr(self, '_frozen', False):\n raise ValueError('Cannot mutate when frozen.')\n return super(MyObject, self).__setattr__(key, value)\n\n @property\n @layer_utils.cached_per_instance\n def test_property(self):\n test_counter[id(self)] += 1\n return id(self)\n\n first_object = MyObject()\n second_object = MyObject()\n\n # Make sure the objects return the correct values\n self.assertEqual(first_object.test_property, id(first_object))\n self.assertEqual(second_object.test_property, id(second_object))\n\n # Make sure the cache does not share across objects\n self.assertNotEqual(first_object.test_property, second_object.test_property)\n\n # Check again (Now the values should be cached.)\n self.assertEqual(first_object.test_property, id(first_object))\n self.assertEqual(second_object.test_property, id(second_object))\n\n # Count the function calls to make sure the cache is actually being used.\n self.assertAllEqual(tuple(test_counter.values()), (1, 1))\n\n def test_property_cache_threaded(self):\n call_count = collections.Counter()\n\n class MyObject(tf.__internal__.tracking.AutoTrackable):\n\n @property\n @layer_utils.cached_per_instance\n def test_property(self):\n # Random sleeps to ensure that the execution thread changes\n # mid-computation.\n call_count['test_property'] += 1\n time.sleep(np.random.random() + 1.)\n\n # Use a RandomState which is seeded off the instance's id (the mod is\n # because numpy limits the range of seeds) to ensure that an instance\n # returns the same value in different threads, but different instances\n # return different values.\n return int(np.random.RandomState(id(self) % (2 ** 31)).randint(2 ** 16))\n\n def get_test_property(self, _):\n \"\"\"Function provided to .map for threading test.\"\"\"\n return self.test_property\n\n # Test that multiple threads return the same value. This requires that\n # the underlying function is repeatable, as cached_property makes no attempt\n # to prioritize the first call.\n test_obj = MyObject()\n with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:\n # Intentionally make a large pool (even when there are only a small number\n # of cpus) to ensure that the runtime switches threads.\n results = pool.map(test_obj.get_test_property, range(64))\n self.assertEqual(len(set(results)), 1)\n\n # Make sure we actually are testing threaded behavior.\n self.assertGreater(call_count['test_property'], 1)\n\n # Make sure new threads still cache hit.\n with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:\n start_time = timeit.default_timer() # Don't time pool instantiation.\n results = pool.map(test_obj.get_test_property, range(4))\n total_time = timeit.default_timer() - start_time\n\n # Note(taylorrobie): The reason that it is safe to time a unit test is that\n # a cache hit will be << 1 second, and a cache miss is\n # guaranteed to be >= 1 second. Empirically confirmed by\n # 100,000 runs with no flakes.\n self.assertLess(total_time, 0.95)\n\n def test_property_cache_serialization(self):\n # Reset call count. .keys() must be wrapped in a list, because otherwise we\n # would mutate the iterator while iterating.\n for k in list(_PICKLEABLE_CALL_COUNT.keys()):\n _PICKLEABLE_CALL_COUNT.pop(k)\n\n first_instance = MyPickleableObject()\n self.assertEqual(id(first_instance), first_instance.my_id)\n\n # Test that we can pickle and un-pickle\n second_instance = pickle.loads(pickle.dumps(first_instance))\n\n self.assertEqual(id(second_instance), second_instance.my_id)\n self.assertNotEqual(first_instance.my_id, second_instance.my_id)\n\n # Make sure de-serialized object uses the cache.\n self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)\n\n # Make sure the decorator cache is not being serialized with the object.\n expected_size = len(pickle.dumps(second_instance))\n for _ in range(5):\n # Add some more entries to the cache.\n _ = MyPickleableObject().my_id\n self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)\n size_check_instance = MyPickleableObject()\n _ = size_check_instance.my_id\n self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RMSprop optimizer implementation.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n# pylint: disable=g-classes-have-attributes\n\nimport numpy as np\nfrom keras import backend_config\nfrom keras.optimizers.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n# pylint: disable=g-classes-have-attributes\n@keras_export(\"keras.optimizers.RMSprop\", \"keras.optimizers_legacy.RMSprop\")\nclass RMSprop(optimizer_v2.OptimizerV2):\n r\"\"\"Optimizer that implements the RMSprop algorithm.\n\n The gist of RMSprop is to:\n\n - Maintain a moving (discounted) average of the square of gradients\n - Divide the gradient by the root of this average\n\n This implementation of RMSprop uses plain momentum, not Nesterov momentum.\n\n The centered version additionally maintains a moving average of the\n gradients, and uses that average to estimate the variance.\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable\n that takes no arguments and returns the actual value to use. The\n learning rate. Defaults to 0.001.\n rho: Discounting factor for the history/coming gradient. Defaults to 0.9.\n momentum: A scalar or a scalar `Tensor`. Defaults to 0.0.\n epsilon: A small constant for numerical stability. This epsilon is\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to\n 1e-7.\n centered: Boolean. If `True`, gradients are normalized by the estimated\n variance of the gradient; if False, by the uncentered second moment.\n Setting this to `True` may help with training, but is slightly more\n expensive in terms of computation and memory. Defaults to `False`.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to `\"RMSprop\"`.\n **kwargs: keyword arguments. Allowed arguments are `clipvalue`,\n `clipnorm`, `global_clipnorm`.\n If `clipvalue` (float) is set, the gradient of each weight\n is clipped to be no higher than this value.\n If `clipnorm` (float) is set, the gradient of each weight\n is individually clipped so that its norm is no higher than this value.\n If `global_clipnorm` (float) is set the gradient of all weights is\n clipped so that their global norm is no higher than this value.\n\n Note that in the dense implementation of this algorithm, variables and their\n corresponding accumulators (momentum, gradient moving average, square\n gradient moving average) will be updated even if the gradient is zero\n (i.e. accumulators will decay, momentum will be applied). The sparse\n implementation (used when the gradient is an `IndexedSlices` object,\n typically because of `tf.gather` or an embedding lookup in the forward pass)\n will not update variable slices or their accumulators unless those slices\n were used in the forward pass (nor is there an \"eventual\" correction to\n account for these omitted updates). This leads to more efficient updates for\n large embedding lookup tables (where most of the slices are not accessed in\n a particular graph execution), but differs from the published algorithm.\n\n Usage:\n\n >>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1)\n >>> var1 = tf.Variable(10.0)\n >>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1\n >>> step_count = opt.minimize(loss, [var1]).numpy()\n >>> var1.numpy()\n 9.683772\n\n Reference:\n - [Hinton, 2012](\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self,\n learning_rate=0.001,\n rho=0.9,\n momentum=0.0,\n epsilon=1e-7,\n centered=False,\n name=\"RMSprop\",\n **kwargs):\n \"\"\"Construct a new RMSprop optimizer.\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable\n that takes no arguments and returns the actual value to use. The\n learning rate. Defaults to 0.001.\n rho: Discounting factor for the history/coming gradient. Defaults to 0.9.\n momentum: A scalar or a scalar `Tensor`. Defaults to 0.0.\n epsilon: A small constant for numerical stability. This epsilon is\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to\n 1e-7.\n centered: Boolean. If `True`, gradients are normalized by the estimated\n variance of the gradient; if False, by the uncentered second moment.\n Setting this to `True` may help with training, but is slightly more\n expensive in terms of computation and memory. Defaults to `False`.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to \"RMSprop\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n\n @compatibility(eager)\n When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and\n `epsilon` can each be a callable that takes no arguments and returns the\n actual value to use. This can be useful for changing these values across\n different invocations of optimizer functions.\n @end_compatibility\n \"\"\"\n super(RMSprop, self).__init__(name, **kwargs)\n self._set_hyper(\"learning_rate\", kwargs.get(\"lr\", learning_rate))\n self._set_hyper(\"decay\", self._initial_decay)\n self._set_hyper(\"rho\", rho)\n\n self._momentum = False\n if isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0:\n self._momentum = True\n if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):\n raise ValueError(f\"`momentum` must be between [0, 1]. Received: \"\n f\"momentum={momentum} (of type {type(momentum)}).\")\n self._set_hyper(\"momentum\", momentum)\n\n self.epsilon = epsilon or backend_config.epsilon()\n self.centered = centered\n\n def _create_slots(self, var_list):\n for var in var_list:\n self.add_slot(var, \"rms\")\n if self._momentum:\n for var in var_list:\n self.add_slot(var, \"momentum\")\n if self.centered:\n for var in var_list:\n self.add_slot(var, \"mg\")\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(RMSprop, self)._prepare_local(var_device, var_dtype, apply_state)\n\n rho = tf.identity(self._get_hyper(\"rho\", var_dtype))\n apply_state[(var_device, var_dtype)].update(\n dict(\n neg_lr_t=-apply_state[(var_device, var_dtype)][\"lr_t\"],\n epsilon=tf.convert_to_tensor(\n self.epsilon, var_dtype),\n rho=rho,\n momentum=tf.identity(self._get_hyper(\"momentum\", var_dtype)),\n one_minus_rho=1. - rho))\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n rms = self.get_slot(var, \"rms\")\n if self._momentum:\n mom = self.get_slot(var, \"momentum\")\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n return tf.raw_ops.ResourceApplyCenteredRMSProp(\n var=var.handle,\n mg=mg.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n use_locking=self._use_locking)\n else:\n return tf.raw_ops.ResourceApplyRMSProp(\n var=var.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n use_locking=self._use_locking)\n else:\n rms_t = (coefficients[\"rho\"] * rms +\n coefficients[\"one_minus_rho\"] * tf.square(grad))\n rms_t = tf.compat.v1.assign(rms, rms_t, use_locking=self._use_locking)\n denom_t = rms_t\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n mg_t = coefficients[\"rho\"] * mg + coefficients[\"one_minus_rho\"] * grad\n mg_t = tf.compat.v1.assign(mg, mg_t, use_locking=self._use_locking)\n denom_t = rms_t - tf.square(mg_t)\n var_t = var - coefficients[\"lr_t\"] * grad / (\n tf.sqrt(denom_t) + coefficients[\"epsilon\"])\n return tf.compat.v1.assign(var, var_t, use_locking=self._use_locking).op\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n rms = self.get_slot(var, \"rms\")\n if self._momentum:\n mom = self.get_slot(var, \"momentum\")\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n return tf.raw_ops.ResourceSparseApplyCenteredRMSProp(\n var=var.handle,\n mg=mg.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n indices=indices,\n use_locking=self._use_locking)\n else:\n return tf.raw_ops.ResourceSparseApplyRMSProp(\n var=var.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n indices=indices,\n use_locking=self._use_locking)\n else:\n rms_scaled_g_values = (grad * grad) * coefficients[\"one_minus_rho\"]\n rms_t = tf.compat.v1.assign(rms, rms * coefficients[\"rho\"],\n use_locking=self._use_locking)\n with tf.control_dependencies([rms_t]):\n rms_t = self._resource_scatter_add(rms, indices, rms_scaled_g_values)\n rms_slice = tf.gather(rms_t, indices)\n denom_slice = rms_slice\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n mg_scaled_g_values = grad * coefficients[\"one_minus_rho\"]\n mg_t = tf.compat.v1.assign(mg, mg * coefficients[\"rho\"],\n use_locking=self._use_locking)\n with tf.control_dependencies([mg_t]):\n mg_t = self._resource_scatter_add(mg, indices, mg_scaled_g_values)\n mg_slice = tf.gather(mg_t, indices)\n denom_slice = rms_slice - tf.square(mg_slice)\n var_update = self._resource_scatter_add(\n var, indices, coefficients[\"neg_lr_t\"] * grad / (\n tf.sqrt(denom_slice) + coefficients[\"epsilon\"]))\n if self.centered:\n return tf.group(*[var_update, rms_t, mg_t])\n return tf.group(*[var_update, rms_t])\n\n def set_weights(self, weights):\n params = self.weights\n # Override set_weights for backward compatibility of Keras V1 optimizer\n # since it does not include iteration at head of the weight list. Set\n # iteration to 0.\n if len(params) == len(weights) + 1:\n weights = [np.array(0)] + weights\n super(RMSprop, self).set_weights(weights)\n\n def get_config(self):\n config = super(RMSprop, self).get_config()\n config.update({\n \"learning_rate\": self._serialize_hyperparameter(\"learning_rate\"),\n \"decay\": self._initial_decay,\n \"rho\": self._serialize_hyperparameter(\"rho\"),\n \"momentum\": self._serialize_hyperparameter(\"momentum\"),\n \"epsilon\": self.epsilon,\n \"centered\": self.centered,\n })\n return config\n\n\nRMSProp = RMSprop\n"
] | [
[
"numpy.random.random",
"tensorflow.compat.v2.io.gfile.exists",
"tensorflow.compat.v2.__internal__.tf2.enabled",
"tensorflow.compat.v2.test.main"
],
[
"tensorflow.compat.v2.compat.v1.assign",
"tensorflow.compat.v2.sqrt",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.raw_ops.ResourceApplyCenteredRMSProp",
"tensorflow.compat.v2.raw_ops.ResourceSparseApplyCenteredRMSProp",
"tensorflow.compat.v2.raw_ops.ResourceSparseApplyRMSProp",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.raw_ops.ResourceApplyRMSProp",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.group",
"numpy.array"
]
] |
systemquant/book-pandas-for-finance | [
"90b7eb9be1de20a12ae72b9bb5d51424a979b174"
] | [
"old/03/08.py"
] | [
"from pandas import Series\n\ndata = [1000, 2000, 3000]\nindex = [\"메로나\", \"구구콘\", \"하겐다즈\"]\ns = Series(data=data, index=index)\n\nprint(s.loc['메로나':'구구콘'])\n"
] | [
[
"pandas.Series"
]
] |
vidursatija/SongWCT | [
"c892c2833ff9f85cfb31788babf016699c5eec8f"
] | [
"models.py"
] | [
"import torch\nimport torch.nn as nn\ntry:\n from torch.hub import load_state_dict_from_url\nexcept ImportError:\n from torch.utils.model_zoo import load_url as load_state_dict_from_url\nfrom torchsummary import summary\nimport numpy as np\n\n\nclass X_Enc(nn.Module):\n def __init__(self, layers, num_classes=1000, init_weights=True):\n super(X_Enc, self).__init__()\n\n self.features = nn.Sequential(*layers) # layers\n print(self.features)\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n all_maxpools = []\n for l in self.features:\n if isinstance(l, nn.MaxPool1d) == False:\n x = l(x)\n else:\n x, pool_indices = l(x)\n all_maxpools.append(pool_indices)\n return x, all_maxpools\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers_enc(cfg):\n layers = []\n conv_layers = []\n in_channels = cfg[0]\n cfg = cfg[1:]\n for v in cfg:\n if v == 'M':\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n conv_layers = []\n layers += [nn.MaxPool1d(kernel_size=2, stride=2, return_indices=True)]\n else:\n conv1d = nn.Conv1d(in_channels, v, kernel_size=3, padding=1)\n conv_layers += [conv1d, nn.ReLU(inplace=True)]\n in_channels = v\n if len(conv_layers) > 0:\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n return layers\n\n\nconfigs_enc = [\n [128, 128],\n [128, 128, 128, 'M', 256],\n [128, 128, 128, 'M', 256, 256, 'M', 512],\n [128, 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512]\n]\n\nconfigs_dec = [\n [128, 128],\n [256, 128, 'M', 128, 128],\n [512, 256, 'M', 256, 128, 'M', 128, 128],\n [512, 512, 'M', 512, 256, 'M', 256, 128, 'M', 128, 128]\n]\n\n\ndef encoder(x, pretrained_path=None, **kwargs):\n if pretrained_path is not None:\n kwargs['init_weights'] = False\n model = X_Enc(make_layers_enc(configs_enc[x-1]), **kwargs)\n if pretrained_path is not None:\n model.load_state_dict(torch.load(pretrained_path), strict=False)\n return model\n\n\nclass X_Dec(nn.Module):\n def __init__(self, layers, num_classes=1000, init_weights=True):\n super(X_Dec, self).__init__()\n\n self.layers = nn.Sequential(*layers)\n print(self.layers)\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x, all_maxpools):\n ct = -1\n for l in self.layers:\n if isinstance(l, nn.MaxUnpool1d) == False:\n x = l(x)\n else:\n x = l(x, all_maxpools[ct])\n ct -= 1\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers_dec(cfg):\n layers = []\n conv_layers = []\n in_channels = cfg[0]\n cfg = cfg[1:]\n for i, v in enumerate(cfg):\n if v == 'M':\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n conv_layers = []\n layers += [nn.MaxUnpool1d(kernel_size=2, stride=2)]\n else:\n conv1d = nn.ConvTranspose1d(in_channels, v, kernel_size=3, padding=1)\n if i != len(cfg) - 1:\n conv_layers += [conv1d, nn.ReLU(inplace=True)]\n else:\n conv_layers += [conv1d]\n in_channels = v\n if len(conv_layers) > 0:\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n return layers\n\n\ndef decoder(x, pretrained_path=None, **kwargs):\n if pretrained_path is not None:\n kwargs['init_weights'] = False\n model = X_Dec(make_layers_dec(configs_dec[x-1]), **kwargs)\n if pretrained_path is not None:\n model.load_state_dict(torch.load(pretrained_path), strict=False)\n return model\n\n\nif __name__ == '__main__':\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # PyTorch v0.4.0\n encoder = vgg16_enc(x=3, pretrained=True) # .to(device)\n for k in encoder.state_dict():\n print(k)\n summary(encoder, (3, 224, 224), device=\"cpu\")\n z, all_maxpools = encoder(torch.from_numpy(np.zeros([1, 3, 224, 224])).float())\n\n decoder = vgg16_dec(x=3, pretrained=False) # .to(device)\n for k in decoder.state_dict():\n print(k)\n x_rebuild = decoder(z, all_maxpools)\n # summary(decoder, (256, 56, 56), device=\"cpu\")\n"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.nn.MaxPool1d",
"torch.nn.MaxUnpool1d",
"torch.load",
"torch.nn.init.constant_",
"numpy.zeros",
"torch.nn.init.normal_",
"torch.nn.Conv1d",
"torch.cuda.is_available",
"torch.nn.Sequential",
"torch.nn.ConvTranspose1d",
"torch.nn.ReLU"
]
] |
eembees/solar_flares | [
"9022f92c0577efaf06d7425002995e4fa4df74b4"
] | [
"reading_data.py"
] | [
"from pathlib import Path\nimport ijson\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom json import JSONDecoder, JSONDecodeError # for reading the JSON data files\nimport re # for regular expressions\nimport os # for os related operations\nfrom sklearn.preprocessing import maxabs_scale\n\n\ndef decode_obj(line, pos=0, decoder=JSONDecoder()):\n no_white_space_regex = re.compile(r'[^\\s]')\n while True:\n match = no_white_space_regex.search(line, pos)\n if not match:\n return\n pos = match.start()\n try:\n obj, pos = decoder.raw_decode(line, pos)\n except JSONDecodeError as err:\n print('Oops! something went wrong. Error: {}'.format(err))\n yield obj\n\n\ndef get_obj_with_last_n_val(line, n):\n obj = next(decode_obj(line)) # type:dict\n id = obj['id']\n class_label = obj['classNum']\n\n data = pd.DataFrame.from_dict(obj['values']) # type:pd.DataFrame\n data.set_index(data.index.astype(int), inplace=True)\n last_n_indices = np.arange(0, 60)[-n:]\n data = data.loc[last_n_indices]\n\n return {'id': id, 'classType': class_label, 'values': data}\n\n\ndef get_obj_with_all(line):\n obj = next(decode_obj(line)) # type:dict\n id = obj['id']\n try:\n class_label = obj['classNum']\n except KeyError:\n class_label = None\n\n data = pd.DataFrame.from_dict(obj['values']) # type:pd.DataFrame\n data.set_index(data.index.astype(int), inplace=True)\n # last_n_indices = np.arange(0, 60)[-n:]\n # data = data.loc[last_n_indices]\n\n return {'id': id, 'classType': class_label, 'values': data}\n\n\ndef read_json_data_to_df(file_path: Path):\n \"\"\"\n Generates a dataframe by concatenating the last values of each\n multi-variate time series. This method is designed as an example\n to show how a json object can be converted into a csv file.\n :param data_dir: the path to the data directory.\n :param file_name: name of the file to be read, with the extension.\n :return: the generated dataframe.\n \"\"\"\n\n all_df, labels, ids = [], [], []\n with open(file_path, 'r') as infile: # Open the file for reading\n for line in infile: # Each 'line' is one MVTS with its single label (0 or 1).\n obj = get_obj_with_all(line)\n all_df.append(obj['values'])\n labels.append(obj['classType'])\n ids.append(obj['id'])\n print(type(obj))\n print(obj['values'])\n print(type(obj['values']))\n # df =\n\n exit()\n\n df = pd.concat(all_df).reset_index(drop=True)\n df = df.assign(LABEL=pd.Series(labels))\n df = df.assign(ID=pd.Series(ids))\n df.set_index([pd.Index(ids)])\n # Uncomment if you want to save this as CSV\n # df.to_csv(file_name + '_last_vals.csv', index=False)\n\n return df\n\n\ndef read_json_data_to_arr(file_path: Path):\n \"\"\"\n Generates a dataframe by concatenating the last values of each\n multi-variate time series. This method is designed as an example\n to show how a json object can be converted into a csv file.\n :param data_dir: the path to the data directory.\n :param file_name: name of the file to be read, with the extension.\n :return: the generated dataframe.\n \"\"\"\n\n all_df, labels, ids = [], [], []\n with open(file_path, 'r') as infile: # Open the file for reading\n for line in infile: # Each 'line' is one MVTS with its single label (0 or 1).\n obj = get_obj_with_all(line)\n # if obj['id'] < 100:\n df = obj['values'].sort_index()\n # remove anything 2 std dev from the mean\n df = df.mask(df.sub(df.mean()).div(df.std()).abs().gt(2))\n # do interpolation of variables\n\n df = df.interpolate(method='linear', extrapolate=False)\n\n df = df.fillna(method='ffill').fillna(method='bfill').fillna(0.0)\n\n\n\n\n all_df.append(df.values)\n labels.append(obj['classType'])\n ids.append(obj['id'])\n\n\n all_df = np.array(all_df)\n labels = np.array(labels)\n ids = np.array(ids)\n\n return all_df, labels, ids\n\n\ndef save_DF_to_NPZ(fp: Path, out_dir):\n fo = out_dir / fp.with_suffix('.npz').name\n # fo_k = Path(str(fo).replace(('.npz', '_keys.npz')))\n df = pd.read_json(fp, lines=True)\n\n np.savez(fo, df=df, keys=df.keys, index=df.index)\n\n pass\n\n\ndef save_arr_to_npz(arr: np.ndarray, labels: np.ndarray, ids: np.ndarray, fo: Path):\n np.savez(fo, data=arr, labels=labels, index=ids)\n pass\n\n\ndef load_npz_file(path: Path, return_ids = False):\n a = np.load(path)\n\n X = a['data']\n\n if np.any(np.isnan(X)):\n X = np.nan_to_num(X)\n\n\n try:\n y = a['labels']\n except KeyError:\n y = None\n except ValueError:\n y = None\n\n if return_ids:\n try:\n ids = a['ids']\n except KeyError:\n ids = None\n except ValueError:\n ids = None\n\n return X, y, ids\n else:\n return X, y\n\n\ndef save_y_preds(y_index: np.ndarray, y_pred: np.ndarray, fo: Path):\n np.savez(fo, index=y_index, labels=y_pred)\n pass\n\n\n\ndef preprocess_data(X, scaler=maxabs_scale):\n shap = X.shape\n # print(shap[1:])\n if shap[1:] != (60, 25):\n raise ValueError('Data shape wrong')\n for i, x_i in enumerate(X):\n x_i_t = np.zeros_like(x_i.transpose())\n for j, series in enumerate(x_i.transpose()):\n series = scaler(series)\n x_i_t[j] = series\n X[i] = x_i_t.transpose()\n return X\n\n\n\nif __name__ == '__main__':\n data_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/')\n out_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/npz')\n # out_dir = Path('./input/npz')\n\n file_paths = list(data_dir.glob('test*.json'))\n print(file_paths)\n for fp in file_paths:\n fo = out_dir / fp.with_suffix('.npz').name\n all_df, labels, ids = read_json_data_to_arr(fp)\n\n save_arr_to_npz(all_df, labels, ids, fo)\n"
] | [
[
"numpy.load",
"pandas.Series",
"numpy.savez",
"pandas.Index",
"pandas.read_json",
"numpy.arange",
"pandas.concat",
"numpy.isnan",
"numpy.array",
"numpy.nan_to_num",
"pandas.DataFrame.from_dict"
]
] |
geoffreynyaga/ostrich-project | [
"157cd7a3c3d9014e31ef21ca21de43f04d039997"
] | [
"CORE/engines/constraint.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n##################################################################################\r\n# File: c:\\Projects\\KENYA ONE PROJECT\\CORE\\engines\\constraint.py #\r\n# Project: c:\\Projects\\KENYA ONE PROJECT\\CORE\\engines #\r\n# Created Date: Thursday, January 9th 2020, 8:56:55 pm #\r\n# Author: Geoffrey Nyaga Kinyua ( <[email protected]> ) #\r\n# ----- #\r\n# Last Modified: Thursday January 9th 2020 8:56:55 pm #\r\n# Modified By: Geoffrey Nyaga Kinyua ( <[email protected]> ) #\r\n# ----- #\r\n# MIT License #\r\n# #\r\n# Copyright (c) 2020 KENYA ONE PROJECT #\r\n# #\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of#\r\n# this software and associated documentation files (the \"Software\"), to deal in #\r\n# the Software without restriction, including without limitation the rights to #\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #\r\n# of the Software, and to permit persons to whom the Software is furnished to do #\r\n# so, subject to the following conditions: #\r\n# #\r\n# The above copyright notice and this permission notice shall be included in all #\r\n# copies or substantial portions of the Software. #\r\n# #\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #\r\n# SOFTWARE. #\r\n# ----- #\r\n# Copyright (c) 2020 KENYA ONE PROJECT #\r\n##################################################################################\r\n\r\n\r\nimport sys\r\n\r\nsys.path.append(\"../\")\r\nfrom CORE.API.db_API import write_to_db, read_from_db\r\n\r\nimport numpy as np # type: ignore\r\nimport matplotlib.pylab as plt # type: ignore\r\n\r\na = np.arange(50)\r\n\r\nws = np.arange(10, 35, 0.01)\r\n\r\ncdmin: float = 0.025\r\nwrite_to_db(\"cdMin\", cdmin)\r\n\r\ndo = read_from_db(\"rhoSL\")\r\ndalt = read_from_db(\"altitudeDensity\") # AAAAA\r\nk = read_from_db(\"k\")\r\n\r\n# v = read_from_db('cruiseSpeed') * 1.688\r\nv: float = 140 * 1.688 # AAAAA\r\nqcruise = 0.5 * dalt * v ** 2 # dynamic pressure at cruise\r\nqtakeoff = 0.5 * do * v ** 2 # dynamic pressure at take-off\r\n\r\nturnangle = 40 # turn angle\r\nloadfactor = 1 / (np.cos(turnangle)) # loadfactor\r\ntwturn = (\r\n qcruise\r\n * ((cdmin / ws) + (k * (loadfactor / qcruise) ** 2) * ws)\r\n * (v * 5850 / (0.8 * 550 * 0.6604))\r\n)\r\n\r\n# rate of climb\r\nroc = read_from_db(\"rateOfClimb\") * 3.28 * 60 # rate of climb ft/min #AAAAAAA\r\n# Vy=sqrt((2/do)*ws * sqrt( k/(3*cdmin) ))\r\nVy = 150\r\nVv = roc / 60\r\nqclimb = 0.5 * do * (Vy ** 2)\r\ntwclimb = (\r\n (Vv / Vy) + ((qclimb / ws) * cdmin) + ((qclimb / ws) * cdmin) + ((k / qclimb) * ws)\r\n) * (Vy * 5850 / (0.6 * 550))\r\n\r\n# ground run\r\nSg: int = 1000 # ground run ft\r\nVlof: float = 70 * 1.688\r\nclto: float = 1.4670\r\nu: float = 0.04\r\ncdto = 0.03\r\nq1 = 0.5 * do * (Vlof / np.sqrt(2)) ** 2\r\ntwtakeoff = (\r\n ((Vlof ** 2) / (2 * 32.174 * Sg)) + ((q1 * cdto) / ws) + u * (1 - (q1 * clto / ws))\r\n) * (Vlof * 5850 / (0.6 * 550))\r\n\r\n# cruise altitude\r\ntwcruise = (((qcruise * cdmin) / ws) + ((k / qcruise) * ws)) * (\r\n v * 5850 / (0.6 * 550 * 0.6604)\r\n)\r\n\r\n# service ceiling\r\ntwservceiling = (\r\n (1.668 / np.sqrt((2 * ws / dalt) * np.sqrt(k / (3 * cdmin))))\r\n + (4 * np.sqrt(k * cdmin / 3))\r\n) * ((v * 5850) / (0.7 * 550 * 0.6604))\r\n\r\nplt.plot(ws, twclimb, label=\"climb\")\r\nplt.plot(ws, twturn, label=\"turn\")\r\nplt.plot(ws, twtakeoff, label=\"Takeoff\")\r\nplt.plot(ws, twservceiling, label=\"Service Ceiling\")\r\nplt.plot(ws, twcruise, label=\"cruise\")\r\nplotWS = read_from_db(\"WS\")\r\nplt.axvline(x=plotWS) ################################\r\nplt.legend(loc=\"upper left\")\r\n\r\nif __name__ == \"__main__\":\r\n plt.show()\r\n\r\n\r\ndef find_nearest(array, value):\r\n idx = (np.abs(array - value)).argmin()\r\n return idx\r\n\r\n\r\n# print(find_nearest(ws, plotWS))\r\nmyidx = find_nearest(ws, plotWS)\r\n\r\n# cruiseidx = (twcruise[myidx])\r\n# takeoffidx = twtakeoff[myidx]\r\n# climbidx = twclimb[myidx]\r\n# turnidx = twturn[myidx]\r\n# ceilingidx = twservceiling[myidx]\r\n# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])\r\n\r\n\r\ndef point():\r\n cruiseidx = twcruise[myidx]\r\n takeoffidx = twtakeoff[myidx]\r\n climbidx = twclimb[myidx]\r\n turnidx = twturn[myidx]\r\n ceilingidx = twservceiling[myidx]\r\n # print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])\r\n # print (cruiseidx,\"cruiseidx\")\r\n\r\n x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx])\r\n idx = x.argmax()\r\n return x[idx]\r\n\r\n\r\nfinalBHP = point()\r\n# print ( finalBHP,\"BHP\")\r\n\r\nwrite_to_db(\"finalBHP\", finalBHP)\r\n\r\nS = (read_from_db(\"finalMTOW\")) / (plotWS * 10.57)\r\nwrite_to_db(\"S\", S)\r\n"
] | [
[
"numpy.sqrt",
"matplotlib.pylab.legend",
"numpy.abs",
"numpy.cos",
"matplotlib.pylab.show",
"numpy.arange",
"matplotlib.pylab.axvline",
"numpy.array",
"matplotlib.pylab.plot"
]
] |
nkuxx161/baseline-SR | [
"c4caf06c5a5a88d7f8e27069018316b319f0913b"
] | [
"plot.py"
] | [
"import pandas as pd\nimport os\n\ncurve_name = '5_k7'\n\ndata = pd.read_csv(os.path.join('result', curve_name+'.csv'))\ntimestamp = data['timestamp']\nvalue = data['value']\nmag = data['mag']\nisAnomaly = data['isAnomaly']\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt \n\nplt.subplot(3, 1, 1)\nplt.plot(timestamp, value)\nplt.title('value')\n\nplt.subplot(3, 1, 2)\nplt.plot(timestamp, mag)\nplt.title('mag')\n\nplt.subplot(3, 1, 3)\nplt.plot(timestamp, isAnomaly)\nplt.title('isAnomaly')\n\nplt.savefig(os.path.join('./images', 'SR_'+curve_name+'.png'))\nplt.show()\nplt.close()"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"matplotlib.use",
"matplotlib.pyplot.plot"
]
] |
catniplab/ML-music-analysis | [
"793d54ed16166fbcd9acf4eec24998892334e064"
] | [
"models/_sources/model_trainer_c4d127b7cc8008ff2c0c849733ead6e1.py"
] | [
"\"\"\"\nThis script creates an instance of a sacred experiment and defines default configurations for training a neural network or a regression model.\n\"\"\"\n\nfrom src.neural_nets.models import get_model\nfrom src.neural_nets.load_data import get_loader\nfrom src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n\nimport src.regression.logistic_regression as reg\n\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchsso.optim as soptim\nimport torch.nn.functional as F\nimport random\n\nfrom torch.utils.data import DataLoader\nfrom sacred import Experiment\nfrom torch import Tensor, device\nfrom copy import deepcopy\nfrom time import sleep\nfrom tqdm import tqdm\n\nfrom typing import List\nfrom itertools import product\n\n\n# create a new sacred experiment whose name is an integer\nex = Experiment(name=str(random.randint(0, 1000000)))\n\n\n# default configurations\[email protected]\ndef cfg():\n\n # system\n cuda = torch.cuda.is_available()\n gpu = 0\n base_dir = os.getcwd()\n\n # supported datasets\n # JSB_Chorales (short)\n # Nottingham (medium)\n # Piano_midi (long)\n # MuseData (extra long)\n dataset = \"JSB_Chorales\"\n\n # training\n num_epochs = 150\n batch_size = 128\n # mask some low notes and some high notes because they never show up\n low_off_notes = 0\n high_off_notes = 88\n lr = 0.001\n decay = 1.0\n optmzr = \"SGD\"\n regularization = 0.0\n\n # hyperparameter search\n do_hpsearch = False\n learning_rates = 10**np.linspace(-2, -4, 5)\n decays = 1 - np.linspace(0, 0.1, num=5)\n regularizations = 10**np.linspace(-2, -4, num=5)\n hps_epochs = 50\n\n # Supported architectures\n # REGRESSION\n # LDS\n # TANH\n architecture = 'LDS'\n readout = 'linear'\n gradient_clipping = 1\n jit = False # not fully implemented\n # for regression\n lag = 1\n window = 1\n # for neural networks\n input_size = 88\n hidden_size = 300\n num_layers = 1\n output_size = 88\n\n # see models.py and initialization.py for details\n init = 'default'\n scale = 1.0\n parity = None # see models.py\n t_distrib = torch.distributions.Uniform(0, 0.75)\n path = 'results/77/final_state_dict.pt'\n\n # when to save state dictionaries\n save_init_model = True\n save_final_model = True\n save_every_epoch = False\n\n # detect backprop anomalies\n detect_anomaly = False\n\n\n# give all random number generators the same seed\ndef _seed_all(_seed) -> None:\n torch.manual_seed(_seed)\n np.random.seed(_seed)\n random.seed(_seed)\n\n\n# this context is used when we are running things on the cpu\nclass NullContext(object):\n def __init__(self):\n pass\n def __enter__(self):\n pass\n def __exit__(self, type, value, traceback):\n pass\n\n\n# this function simply trains regression models and logs the results\n# see regression.trainer for details\[email protected]\ndef sklearn_experiment(dataset: str,\n save_dir: str,\n num_epochs: int,\n high_off_notes: int,\n low_off_notes: int,\n lag: int,\n window: int,\n _seed,\n _log,\n _run):\n \"\"\"\n :param dataset: name of the dataset to be used\n :save_dir: temporary directory where artifacts are being stored\n :lag: how many time steps into the future the regression model is to predict\n :window: how many time steps the regression model is to take into account\n :param _seed: sacred random seed\n :param _log: sacred object used to output to the command line\n :param _run: sacred object used to monitor the runtime\n \"\"\"\n\n num_notes = high_off_notes - low_off_notes\n\n models = reg.train_models(dataset,\n num_epochs,\n low_off_notes,\n high_off_notes,\n _seed,\n lag=lag,\n window=window)\n\n coefs = np.zeros((num_notes, num_notes*window))\n intercepts = np.zeros(num_notes*window)\n\n for i in range(num_notes):\n\n model = models[i]\n\n # if there were no notes played for this channel, a model won't be trained\n # simply save all parameters as -1 to discourage the note from being played\n if model == None:\n coefs[i] = -1\n intercepts[i] = -1\n\n else:\n coefs[i] = model.coef_\n intercepts[i] = model.intercept_\n\n np.save(save_dir + 'coefs.npy', coefs)\n np.save(save_dir + 'intercepts.npy', intercepts)\n\n _run.add_artifact(save_dir + 'coefs.npy')\n _run.add_artifact(save_dir + 'intercepts.npy')\n\n train_loss = reg.compute_loss(models,\n dataset,\n 'traindata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n test_loss = reg.compute_loss(models,\n dataset,\n 'testdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n valid_loss = reg.compute_loss(models,\n dataset,\n 'validdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n\n _run.log_scalar('trainLoss', train_loss)\n _run.log_scalar('testLoss', test_loss)\n _run.log_scalar('validLoss', valid_loss)\n\n train_acc = reg.compute_accuracy(models,\n dataset,\n 'traindata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n test_acc = reg.compute_accuracy(models,\n dataset,\n 'testdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n valid_acc = reg.compute_accuracy(models,\n dataset,\n 'validdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n\n _run.log_scalar('trainAccuracy', train_acc)\n _run.log_scalar('testAccuracy', test_acc)\n _run.log_scalar('validAccuracy', valid_acc)\n\n\n# a single optimization step\[email protected]\ndef train_iter(device: device,\n cuda_device: torch.cuda.device,\n input_tensor: Tensor,\n target: Tensor,\n mask: Tensor,\n model: nn.Module,\n loss_fcn: nn.Module,\n optimizer: optim.Optimizer,\n save_every_epoch: bool,\n save_dir: str,\n train_loader: DataLoader,\n test_loader: DataLoader,\n valid_loader: DataLoader,\n low_off_notes: int,\n high_off_notes: int,\n _log,\n _run,\n logging=True):\n\n input_tensor = input_tensor.to(device)\n\n # number of songs in this batch\n N = input_tensor.shape[0]\n\n output, hidden_tensors = model(input_tensor)\n\n loss = loss_fcn(output, target, mask, model)/N\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # use sacred to log training loss and accuracy\n if logging:\n train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)\n _run.log_scalar(\"trainLoss\", loss.cpu().detach().item())\n _run.log_scalar(\"trainAccuracy\", train_acc)\n\n # save a copy of the model and make sacred remember it each epoch\n if save_every_epoch and logging:\n sd = deepcopy(model.state_dict())\n torch.save(init_sd, save_dir + 'state_dict_' + str(epoch) + '.pt')\n _run.add_artifact(save_dir + 'state_dict_' + str(epoch) + '.pt')\n\n\n# train a neural network\n# returns the final loss and accuracy on the training, testing, and validation sets\[email protected]\ndef pytorch_train_loop(cuda: bool,\n model_dict: dict,\n initializer: dict,\n train_loader: DataLoader,\n test_loader: DataLoader,\n valid_loader: DataLoader,\n low_off_notes: int,\n high_off_notes: int,\n optmzr: str,\n lr: float,\n decay: float,\n regularization: float,\n num_epochs: int,\n save_dir: str,\n save_init_model,\n save_every_epoch,\n save_final_model,\n _seed,\n _log,\n _run,\n logging=True):\n\n # construct and initialize the model\n model = get_model(model_dict, initializer, cuda)\n\n # save a copy of the initial model and make sacred remember it\n if save_init_model and logging:\n init_sd = deepcopy(model.state_dict())\n torch.save(init_sd, save_dir + 'initial_state_dict.pt')\n _run.add_artifact(save_dir + 'initial_state_dict.pt')\n\n # if we are on cuda we construct the device and run everything on it\n cuda_device = NullContext()\n device = torch.device('cpu')\n if cuda:\n dev_name = 'cuda:' + str(gpu)\n cuda_device = torch.cuda.device(dev_name)\n device = torch.device(dev_name)\n model = model.to(device)\n\n with cuda_device:\n\n # see metrics.py\n loss_fcn = MaskedBCE(regularization, low_off_notes=low_off_notes, high_off_notes=high_off_notes)\n\n # compute the metrics before training and log them\n if logging:\n\n train_loss = compute_loss(loss_fcn, model, train_loader)\n test_loss = compute_loss(loss_fcn, model, test_loader)\n val_loss = compute_loss(loss_fcn, model, valid_loader)\n\n _run.log_scalar(\"trainLoss\", train_loss)\n _run.log_scalar(\"testLoss\", test_loss)\n _run.log_scalar(\"validLoss\", val_loss)\n\n train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)\n test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)\n val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)\n\n _run.log_scalar(\"trainAccuracy\", train_acc)\n _run.log_scalar(\"testAccuracy\", test_acc)\n _run.log_scalar(\"validAccuracy\", val_acc)\n\n # construct the optimizer\n optimizer = None\n if optmzr == \"SGD\":\n optimizer = optim.SGD(model.parameters(), lr=lr)\n elif optmzr == \"Adam\":\n optimizer = optim.Adam(model.parameters(), lr=lr)\n elif optmzr == \"RMSprop\":\n optimizer = optim.RMSprop(model.parameters(), lr=lr)\n else:\n raise ValueError(\"Optimizer {} not recognized.\".format(optmzr))\n\n # learning rate decay\n scheduler = None\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: decay**epoch)\n\n # begin training loop\n for epoch in tqdm(range(num_epochs)):\n\n for input_tensor, target, mask in train_loader:\n train_iter(device,\n cuda_device,\n input_tensor,\n target,\n mask,\n model,\n loss_fcn,\n optimizer,\n save_every_epoch,\n save_dir,\n train_loader,\n test_loader,\n valid_loader,\n low_off_notes,\n high_off_notes,\n _log,\n _run,\n logging=logging)\n\n # learning rate decay\n scheduler.step()\n\n # use sacred to log testing and validation loss and accuracy\n if logging:\n\n test_loss = compute_loss(loss_fcn, model, test_loader)\n val_loss = compute_loss(loss_fcn, model, valid_loader)\n test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)\n val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)\n\n _run.log_scalar(\"testLoss\", test_loss)\n _run.log_scalar(\"validLoss\", val_loss)\n _run.log_scalar(\"testAccuracy\", test_acc)\n _run.log_scalar(\"validAccuracy\", val_acc)\n\n # save a copy of the trained model and make sacred remember it\n if save_final_model and logging:\n fin_sd = deepcopy(model.state_dict())\n torch.save(fin_sd, save_dir + 'final_state_dict.pt')\n _run.add_artifact(save_dir + 'final_state_dict.pt')\n\n # recompute the metrics so that this function can return them\n train_loss = compute_loss(loss_fcn, model, train_loader)\n test_loss = compute_loss(loss_fcn, model, test_loader)\n val_loss = compute_loss(loss_fcn, model, valid_loader)\n\n train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)\n test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)\n val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)\n\n return ((train_loss, test_loss, val_loss), (train_acc, test_acc, val_acc))\n\n\n# main function\[email protected]\ndef train_loop(cuda,\n gpu,\n base_dir,\n dataset,\n num_epochs,\n batch_size,\n low_off_notes,\n high_off_notes,\n lr,\n decay,\n optmzr,\n regularization,\n do_hpsearch,\n learning_rates,\n decays,\n regularizations,\n hps_epochs,\n architecture,\n readout,\n gradient_clipping,\n jit,\n lag,\n window,\n input_size,\n hidden_size,\n num_layers,\n output_size,\n detect_anomaly,\n init,\n scale,\n parity,\n t_distrib,\n path,\n save_init_model,\n save_final_model,\n save_every_epoch,\n _seed,\n _log,\n _run):\n\n # save artifacts to a temporary directory that gets erased when the experiment is over\n save_dir = base_dir + '/tmp_' + str(_seed)\n os.system('mkdir ' + save_dir)\n save_dir += '/'\n\n # give all random number generators the same seed\n _seed_all(_seed)\n\n sklearn_program = architecture == 'REGRESSION'\n\n # regression models and neural networks are trained very differently\n if sklearn_program:\n\n sklearn_experiment(dataset,\n save_dir,\n num_epochs,\n high_off_notes,\n low_off_notes,\n lag,\n window,\n _seed,\n _log,\n _run)\n\n # run a pytorch program\n else:\n\n model_dict = {'architecture': architecture,\n 'readout': readout,\n 'gradient_clipping': gradient_clipping,\n 'jit': jit,\n 'lag': lag,\n 'window': window,\n 'input_size': input_size,\n 'hidden_size': hidden_size,\n 'num_layers': num_layers,\n 'output_size': output_size\n }\n\n initializer = {'init': init,\n 'scale': scale,\n 'parity': parity,\n 't_distrib': t_distrib,\n 'path': path,\n 'low_off_notes': low_off_notes,\n 'high_off_notes': high_off_notes\n }\n\n # if we are debugging we may want to detect autograd anomalies\n torch.autograd.set_detect_anomaly(detect_anomaly)\n\n # construct the pytorch data loaders\n train_loader, test_loader, valid_loader = get_loader(dataset, batch_size)\n\n # standard training loop\n if not do_hpsearch:\n\n # the training loop function returns the metrics achieved at the end of training\n # they will be logged by default, no need to do anything with them here\n metrics = pytorch_train_loop(cuda,\n model_dict,\n initializer,\n train_loader,\n test_loader,\n valid_loader,\n low_off_notes,\n high_off_notes,\n optmzr,\n lr,\n decay,\n regularization,\n num_epochs,\n save_dir,\n save_init_model,\n save_every_epoch,\n save_final_model,\n _seed,\n _log,\n _run)\n\n # only goal here is to find the best hyper parameters\n else:\n\n min_test_loss = float('inf')\n best_lr = 0\n best_dcay = 0\n best_reg = 0\n\n hyperparams = product(learning_rates, decays, regularizations)\n\n for rate, dcay, reg in hyperparams:\n\n # train a model with the given hyperparameters\n # don't log anything, otherwise we will have a ridiculous amount of extraneous info\n metrics = pytorch_train_loop(cuda,\n model_dict,\n initializer,\n train_loader,\n test_loader,\n valid_loader,\n optmzr,\n rate,\n dcay,\n reg,\n hps_epochs,\n save_dir,\n save_init_model,\n save_every_epoch,\n save_final_model,\n _seed,\n _log,\n _run,\n logging=False)\n\n # loss is first index, test set is second index\n test_loss = metrics[0][1]\n\n # compare loss against other hyperparams and update if necessary\n if test_loss == test_loss and test_loss < min_test_loss:\n min_test_loss = test_loss\n best_lr = rate\n best_dcay = dcay\n best_reg = reg\n\n # record the best hyperparameters\n _run.log_scalar(\"learning_rate\", best_lr)\n _run.log_scalar(\"decay\", best_dcay)\n _run.log_scalar(\"regularization\", best_reg)\n\n # wait a second then remove the temporary directory used for storing artifacts\n sleep(1)\n os.system('rm -r ' + save_dir)\n"
] | [
[
"torch.distributions.Uniform",
"numpy.save",
"numpy.zeros",
"torch.optim.lr_scheduler.LambdaLR",
"torch.manual_seed",
"torch.save",
"numpy.random.seed",
"torch.cuda.is_available",
"torch.autograd.set_detect_anomaly",
"numpy.linspace",
"torch.device",
"torch.cuda.device"
]
] |
dutxubo/nni | [
"c16f4e1c89b54b8b80661ef0072433d255ad2d24"
] | [
"test/ut/tools/annotation/testcase/usercode/mnist.py"
] | [
"# -*- encoding:utf8 -*-\n\n\"\"\"A deep MNIST classifier using convolutional layers.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport math\nimport tempfile\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nlogger = logging.getLogger('mnist')\n\nFLAGS = None\n\nclass MnistNetwork(object):\n def __init__(self,\n channel_1_num = 32,\n channel_2_num = 64,\n conv_size = 5,\n hidden_size = 1024,\n pool_size = 2,\n learning_rate = 0.0001,\n x_dim = 784,\n y_dim = 10):\n self.channel_1_num = channel_1_num\n self.channel_2_num = channel_2_num\n '''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)'''\n self.conv_size = conv_size\n '''@nni.variable(nni.choice(124,512,1024),name=self.hidden_size)'''\n self.hidden_size = hidden_size\n self.pool_size = pool_size\n '''@nni.variable(nni.randint(2,3,5),name=self.learning_rate)'''\n self.learning_rate = learning_rate\n self.x_dim = x_dim\n self.y_dim = y_dim\n\n def build_network(self):\n self.x = tf.placeholder(tf.float32, [None, self.x_dim], name = 'input_x')\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name = 'input_y')\n self.keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')\n\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n try:\n input_dim = int(math.sqrt(self.x_dim))\n except:\n #print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))\n logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))\n raise\n x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([self.conv_size, self.conv_size, 1, self.channel_1_num])\n b_conv1 = bias_variable([self.channel_1_num])\n \"\"\"@nni.function_choice(tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1),tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1),tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1),name=tf.nn.relu)\"\"\"\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n \"\"\"@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)\"\"\"\n h_pool1 = max_pool(h_conv1, self.pool_size)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num])\n b_conv2 = bias_variable([self.channel_2_num])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n #\"\"\"@nni.dynamic(input={cnn_block:1, concat:2},function_choice={\"cnn_block\":(x,nni.choice([3,4])),\"cnn_block\":(x),\"concat\":(x,y)},limit={\"cnn_block.input\":[concat,input],\"concat.input\":[this.depth-1,this.depth-3,this.depth-5],\"graph.width\":[1]})\"\"\"\n h_pool2 = max_pool(h_conv2, self.pool_size)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n last_dim = int(input_dim / (self.pool_size * self.pool_size))\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([last_dim * last_dim * self.channel_2_num, self.hidden_size])\n b_fc1 = bias_variable([self.hidden_size])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.channel_2_num])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout - controls the complexity of the model, prevents co-adaptation of features.\n with tf.name_scope('dropout'):\n h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([self.hidden_size, self.y_dim])\n b_fc2 = bias_variable([self.y_dim])\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n with tf.name_scope('loss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y, logits = y_conv))\n with tf.name_scope('adam_optimizer'):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(self.y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool(x, pool_size):\n \"\"\"max_pool downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, pool_size, pool_size, 1],\n strides=[1, pool_size, pool_size, 1], padding='SAME')\ndef avg_pool(x,pool_size):\n return tf.nn.avg_pool(x, ksize=[1, pool_size, pool_size, 1],\n strides=[1, pool_size, pool_size, 1], padding='SAME')\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef main():\n # Import data\n data_dir= '/tmp/tensorflow/mnist/input_data'\n mnist = input_data.read_data_sets(data_dir, one_hot=True)\n logger.debug('Mnist download data done.')\n\n # Create the model\n # Build the graph for the deep net\n mnist_network = MnistNetwork()\n mnist_network.build_network()\n logger.debug('Mnist build network done.')\n\n # Write log\n graph_location = tempfile.mkdtemp()\n logger.debug('Saving graph to: %s', graph_location)\n # print('Saving graph to: %s' % graph_location)\n train_writer = tf.summary.FileWriter(graph_location)\n train_writer.add_graph(tf.get_default_graph())\n\n test_acc = 0.0\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n batch_num=200\n for i in range(batch_num):\n '''@nni.variable(nni.choice(50,250,500),name=batch_size)'''\n batch_size=50\n batch = mnist.train.next_batch(batch_size)\n '''@nni.variable(nni.choice(1,5),name=dropout_rate)'''\n dropout_rate=0.5\n mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: dropout_rate})\n\n if i % 100 == 0:\n #train_accuracy = mnist_network.accuracy.eval(feed_dict={\n # mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: params['dropout_rate']})\n #print('step %d, training accuracy %g' % (i, train_accuracy))\n\n test_acc = mnist_network.accuracy.eval(feed_dict={\n mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})\n '''@nni.report_intermediate_result(test_acc)'''\n\n test_acc = mnist_network.accuracy.eval(feed_dict={\n mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})\n '''@nni.report_final_result(test_acc)'''\n\n\ndef generate_default_params():\n params = {'data_dir': '/tmp/tensorflow/mnist/input_data',\n 'dropout_rate': 0.5,\n 'channel_1_num': 32,\n 'channel_2_num': 64,\n 'conv_size': 5,\n 'pool_size': 2,\n 'hidden_size': 1024,\n 'batch_size': 50,\n 'batch_num': 200,\n 'learning_rate': 1e-4}\n return params\n\nif __name__ == '__main__':\n # run command: python mnist.py --init_file_path ./init.json\n\n #FLAGS, unparsed = parse_command()\n #original_params = parse_init_json(FLAGS.init_file_path, {})\n\n #pipe_interface.set_params_to_env()\n '''@nni.get_next_parameter()'''\n try:\n params = generate_default_params()\n logger.debug('params')\n logger.debug('params update')\n main()\n except:\n logger.exception('Got some exception in while loop in mnist.py')\n raise\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.reshape",
"tensorflow.nn.avg_pool",
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.Variable",
"tensorflow.summary.FileWriter",
"tensorflow.nn.dropout",
"tensorflow.nn.max_pool",
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"tensorflow.cast",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.truncated_normal",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.get_default_graph",
"tensorflow.argmax"
]
] |
jessehui/occlum | [
"8a5f3033881c090340d678f2aecdca4ac6355bf4"
] | [
"demos/python/python_musl/demo.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.datasets import dump_svmlight_file\n\ndf1 = pd.read_csv(\"./dataset/input_label.csv\")\ndf2 = pd.read_csv(\"./dataset/input.csv\")\nres = pd.merge(df1, df2, how='left', left_on='id', right_on='id')\n\nX = res[np.setdiff1d(res.columns,['label','id'])]\ny = res.label\n\ndump_svmlight_file(X,y,'/host/smvlight.dat',zero_based=True,multilabel=False)\n"
] | [
[
"pandas.read_csv",
"sklearn.datasets.dump_svmlight_file",
"pandas.merge",
"numpy.setdiff1d"
]
] |
adrenadine33/graphvite | [
"34fc203f96ff13095073c605ecfcae32213e7f6a"
] | [
"python/graphvite/application/application.py"
] | [
"# Copyright 2019 MilaGraph. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Zhaocheng Zhu\n\n\"\"\"Implementation of applications\"\"\"\nfrom __future__ import print_function, absolute_import, unicode_literals, division\n\nimport os\nimport re\nimport pickle\nimport logging\nimport multiprocessing\nfrom collections import defaultdict\n\nfrom future.builtins import str, map, range\nfrom easydict import EasyDict\nimport numpy as np\n\nfrom .. import lib, cfg, auto\nfrom .. import graph, solver\nfrom ..util import assert_in, monitor, SharedNDArray\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApplicationMixin(object):\n \"\"\"\n General interface of graph applications.\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n gpu_memory_limit (int, optional): memory limit per GPU in bytes, default is all memory\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n \"\"\"\n def __init__(self, dim, gpus=[], cpu_per_gpu=auto, gpu_memory_limit=auto,\n float_type=cfg.float_type, index_type=cfg.index_type):\n self.dim = dim\n self.gpus = gpus\n self.cpu_per_gpu = cpu_per_gpu\n self.gpu_memory_limit = gpu_memory_limit\n self.float_type = float_type\n self.index_type = index_type\n self.set_format()\n\n def get_graph(self, **kwargs):\n raise NotImplementedError\n\n def get_solver(self, **kwargs):\n raise NotImplementedError\n\n def set_format(self, delimiters=\" \\t\\r\\n\", comment=\"#\"):\n \"\"\"\n Set the format for parsing input data.\n\n Parameters:\n delimiters (str, optional): string of delimiter characters\n comment (str, optional): prefix of comment strings\n \"\"\"\n self.delimiters = delimiters\n self.comment = comment\n self.pattern = re.compile(\"[%s]\" % self.delimiters)\n\n @monitor.time\n def load(self, **kwargs):\n \"\"\"load(**kwargs)\n Load a graph from file or Python object.\n Arguments depend on the underlying graph type.\n \"\"\"\n self.graph = self.get_graph(**kwargs)\n if \"file_name\" in kwargs or \"vector_file\" in \"kwargs\":\n self.graph.load(delimiters=self.delimiters, comment=self.comment, **kwargs)\n else:\n self.graph.load(**kwargs)\n\n @monitor.time\n def build(self, **kwargs):\n \"\"\"build(**kwargs)\n Build the solver from the graph.\n Arguments depend on the underlying solver type.\n \"\"\"\n self.solver = self.get_solver(**kwargs)\n self.solver.build(self.graph, **kwargs)\n\n @monitor.time\n def train(self, **kwargs):\n \"\"\"train(**kwargs)\n Train embeddings with the solver.\n Arguments depend on the underlying solver type.\n \"\"\"\n self.solver.train(**kwargs)\n\n @monitor.time\n def evaluate(self, task, **kwargs):\n \"\"\"evaluate(task, **kwargs)\n Evaluate the learned embeddings on a downstream task.\n Arguments depend on the underlying graph type and the task.\n\n Parameters:\n task (str): name of task\n\n Returns:\n dict: metrics and their values\n \"\"\"\n func_name = task.replace(\" \", \"_\")\n if not hasattr(self, func_name):\n raise ValueError(\"Unknown task `%s`\" % task)\n\n logger.info(lib.io.header(task))\n result = getattr(self, func_name)(**kwargs)\n if isinstance(result, dict):\n for metric, value in sorted(result.items()):\n logger.warning(\"%s: %g\" % (metric, value))\n\n return result\n\n @monitor.time\n def load_model(self, file_name):\n \"\"\"\n Load model in pickle format.\n\n Parameters:\n file_name (str): file name:\n \"\"\"\n logger.warning(\"load model from `%s`\" % file_name)\n\n with open(file_name, \"rb\") as fin:\n model = pickle.load(fin)\n self.set_parameters(model)\n\n @monitor.time\n def save_model(self, file_name, save_hyperparameter=False):\n \"\"\"\n Save model in pickle format.\n\n Parameters:\n file_name (str): file name\n save_hyperparameter (bool, optional): save hyperparameters or not, default is false\n \"\"\"\n def is_mapping(name, attribute):\n return \"2\" in name\n\n def is_embedding(name, attribute):\n if name[0] == \"_\":\n return False\n return isinstance(attribute, np.ndarray)\n\n def is_hyperparameter(name, attribute):\n if name[0] == \"_\":\n return False\n return isinstance(attribute, int) or isinstance(attribute, float) or isinstance(attribute, str)\n\n def get_attributes(object, filter):\n attributes = EasyDict()\n for name in dir(object):\n attribute = getattr(object, name)\n if filter(name, attribute):\n attributes[name] = attribute\n return attributes\n\n logger.warning(\"save model to `%s`\" % file_name)\n\n model = EasyDict()\n model.graph = get_attributes(self.graph, is_mapping)\n model.solver = get_attributes(self.solver, is_embedding)\n if save_hyperparameter:\n model.graph.update(get_attributes(self.graph, is_hyperparameter))\n model.solver.update(get_attributes(self.solver, is_hyperparameter))\n model.solver.optimizer = get_attributes(self.solver.optimizer, is_hyperparameter)\n model.solver.optimizer.schedule = self.solver.optimizer.schedule.type\n\n with open(file_name, \"wb\") as fout:\n pickle.dump(model, fout, protocol=pickle.HIGHEST_PROTOCOL)\n\n def get_mapping(self, id2name, name2id):\n mapping = []\n for name in id2name:\n if name not in name2id:\n raise ValueError(\"Can't find the embedding for `%s`\" % name)\n mapping.append(name2id[name])\n return mapping\n\n def tokenize(self, str):\n str = str.strip(self.delimiters)\n comment_start = str.find(self.comment)\n if comment_start != -1:\n str = str[:comment_start]\n return self.pattern.split(str)\n\n def name_map(self, dicts, names):\n assert len(dicts) == len(names), \"The number of dictionaries and names must be equal\"\n\n indexes = [[] for _ in range(len(names))]\n num_param = len(names)\n num_sample = len(names[0])\n for i in range(num_sample):\n valid = True\n for j in range(num_param):\n if names[j][i] not in dicts[j]:\n valid = False\n break\n if valid:\n for j in range(num_param):\n indexes[j].append(dicts[j][names[j][i]])\n return indexes\n\n def gpu_map(self, func, settings):\n import torch\n\n gpus = self.gpus if self.gpus else range(torch.cuda.device_count())\n new_settings = []\n for i, setting in enumerate(settings):\n new_settings.append(setting + (gpus[i % len(gpus)],))\n settings = new_settings\n\n try:\n start_method = multiprocessing.get_start_method()\n # if there are other running processes, this could cause leakage of semaphores\n multiprocessing.set_start_method(\"spawn\", force=True)\n pool = multiprocessing.Pool(len(gpus))\n results = pool.map(func, settings, chunksize=1)\n multiprocessing.set_start_method(start_method, force=True)\n except AttributeError:\n logger.info(\"Spawn mode is not supported by multiprocessing. Switch to serial execution.\")\n results = list(map(func, settings))\n\n return results\n\n\nclass GraphApplication(ApplicationMixin):\n \"\"\"\n Node embedding application.\n\n Given a graph, it embeds each node into a continuous vector representation.\n The learned embeddings can be used for many downstream tasks.\n e.g. **node classification**, **link prediction**, **node analogy**.\n The similarity between node embeddings can be measured by cosine distance.\n\n Supported Models:\n - DeepWalk (`DeepWalk: Online Learning of Social Representations`_)\n - LINE (`LINE: Large-scale Information Network Embedding`_)\n - node2vec (`node2vec: Scalable Feature Learning for Networks`_)\n\n .. _DeepWalk\\: Online Learning of Social Representations:\n https://arxiv.org/pdf/1403.6652.pdf\n .. _LINE\\: Large-scale Information Network Embedding:\n https://arxiv.org/pdf/1503.03578.pdf\n .. _node2vec\\: Scalable Feature Learning for Networks:\n https://www.kdd.org/kdd2016/papers/files/rfp0218-groverA.pdf\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n See also:\n :class:`Graph <graphvite.graph.Graph>`,\n :class:`GraphSolver <graphvite.solver.GraphSolver>`\n \"\"\"\n\n def get_graph(self, **kwargs):\n return graph.Graph(self.index_type)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n return solver.GraphSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n mapping = self.get_mapping(self.graph.id2name, model.graph.name2id)\n self.solver.vertex_embeddings[:] = model.solver.vertex_embeddings[mapping]\n self.solver.context_embeddings[:] = model.solver.context_embeddings[mapping]\n\n def node_classification(self, X=None, Y=None, file_name=None, portions=(0.02,), normalization=False, times=1,\n patience=100):\n \"\"\"\n Evaluate node embeddings on node classification task.\n\n Parameters:\n X (list of str, optional): names of nodes\n Y (list, optional): labels of nodes\n file_name (str, optional): file of nodes & labels\n portions (tuple of float, optional): how much data for training\n normalization (bool, optional): normalize the embeddings or not\n times (int, optional): number of trials\n patience (int, optional): patience on loss convergence\n\n Returns:\n dict: macro-F1 & micro-F1 averaged over all trials\n \"\"\"\n import scipy.sparse as sp\n\n self.solver.clear()\n\n if file_name:\n if not (X is None and Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n X = []\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n x, y = tokens\n X.append(x)\n Y.append(y)\n if X is None or Y is None:\n raise ValueError(\"Either evaluataion data (X, Y) or a file name should be provided\")\n\n name2id = self.graph.name2id\n class2id = {c:i for i, c in enumerate(np.unique(Y))}\n new_X, new_Y = self.name_map((name2id, class2id), (X, Y))\n logger.info(\"effective labels: %d / %d\" % (len(new_X), len(X)))\n X = np.asarray(new_X)\n Y = np.asarray(new_Y)\n\n labels = sp.coo_matrix((np.ones_like(X), (X, Y)), dtype=np.int32).todense()\n indexes, _ = np.where(np.sum(labels, axis=1) > 0)\n # discard non-labeled nodes\n labels = labels[indexes]\n vertex_embeddings = SharedNDArray(self.solver.vertex_embeddings[indexes])\n\n settings = []\n for portion in portions:\n settings.append((vertex_embeddings, labels, portion, normalization, times, patience))\n results = self.gpu_map(linear_classification, settings)\n\n metrics = {}\n for result in results:\n metrics.update(result)\n return metrics\n\n def link_prediction(self, H=None, T=None, Y=None, file_name=None, filter_H=None, filter_T=None, filter_file=None):\n \"\"\"\n Evaluate node embeddings on link prediction task.\n\n Parameters:\n H (list of str, optional): names of head nodes\n T (list of str, optional): names of tail nodes\n Y (list of int, optional): labels of edges\n file_name (str, optional): file of edges and labels (e.g. validation set)\n filter_H (list of str, optional): names of head nodes to filter out\n filter_T (list of str, optional): names of tail nodes to filter out\n filter_file (str, optional): file of edges to filter out (e.g. training set)\n\n Returns:\n dict: AUC of link prediction\n \"\"\"\n import torch\n\n from .network import LinkPredictor\n\n self.solver.clear()\n\n if file_name:\n if not (H is None and T is None and Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n H = []\n T = []\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n h, t, y = tokens\n H.append(h)\n T.append(t)\n Y.append(y)\n if H is None or T is None or Y is None:\n raise ValueError(\"Either evaluation data or file should be provided\")\n\n if filter_file:\n if not (filter_H is None and filter_T is None):\n raise ValueError(\"Filter data and file should not be provided at the same time\")\n filter_H = []\n filter_T = []\n with open(filter_file, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n h, t = tokens\n filter_H.append(h)\n filter_T.append(t)\n elif filter_H is None:\n filter_H = []\n filter_T = []\n\n name2id = self.graph.name2id\n Y = [int(y) for y in Y]\n new_H, new_T, new_Y = self.name_map((name2id, name2id, {0:0, 1:1}), (H, T, Y))\n logger.info(\"effective edges: %d / %d\" % (len(new_H), len(H)))\n H = new_H\n T = new_T\n Y = new_Y\n new_H, new_T = self.name_map((name2id, name2id), (filter_H, filter_T))\n logger.info(\"effective filter edges: %d / %d\" % (len(new_H), len(filter_H)))\n filters = set(zip(new_H, new_T))\n new_H = []\n new_T = []\n new_Y = []\n for h, t, y in zip(H, T, Y):\n if (h, t) not in filters:\n new_H.append(h)\n new_T.append(t)\n new_Y.append(y)\n logger.info(\"remaining edges: %d / %d\" % (len(new_H), len(H)))\n H = np.asarray(new_H)\n T = np.asarray(new_T)\n Y = np.asarray(new_Y)\n\n vertex_embeddings = self.solver.vertex_embeddings\n context_embeddings = self.solver.context_embeddings\n model = LinkPredictor(self.solver.model, vertex_embeddings, context_embeddings)\n model = model.cuda()\n\n H = torch.as_tensor(H)\n T = torch.as_tensor(T)\n Y = torch.as_tensor(Y)\n H = H.cuda()\n T = T.cuda()\n Y = Y.cuda()\n score = model(H, T)\n order = torch.argsort(score, descending=True)\n Y = Y[order]\n hit = torch.cumsum(Y, dim=0)\n all = torch.sum(Y == 0) * torch.sum(Y == 1)\n auc = torch.sum(hit[Y == 0]).item() / all.item()\n\n return {\n \"AUC\": auc\n }\n\n\ndef linear_classification(args):\n import torch\n from torch import optim\n from torch.nn import functional as F\n from .network import NodeClassifier\n\n def generate_one_vs_rest(indexes, labels):\n new_indexes = []\n new_labels = []\n num_class = labels.shape[1]\n for index, sample_labels in zip(indexes, labels):\n for cls in np.where(sample_labels)[0]:\n new_indexes.append(index)\n new_label = np.zeros(num_class, dtype=np.int)\n new_label[cls] = 1\n new_labels.append(new_label)\n return torch.as_tensor(new_indexes), torch.as_tensor(new_labels)\n\n embeddings, labels, portion, normalization, times, patience, gpu = args\n embeddings = np.asarray(embeddings)\n num_sample, num_class = labels.shape\n num_train = int(num_sample * portion)\n\n macro_f1s = []\n micro_f1s = []\n for t in range(times):\n samples = np.random.permutation(num_sample)\n train_samples = samples[:num_train]\n train_labels = np.asarray(labels[train_samples])\n train_samples, train_labels = generate_one_vs_rest(train_samples, train_labels)\n test_samples = torch.as_tensor(samples[num_train:])\n test_labels = torch.as_tensor(labels[test_samples])\n\n model = NodeClassifier(embeddings, num_class, normalization=normalization)\n\n train_samples = train_samples.cuda(gpu)\n train_labels = train_labels.cuda(gpu)\n test_samples = test_samples.cuda(gpu)\n test_labels = test_labels.cuda(gpu)\n model = model.cuda(gpu)\n\n # train\n optimizer = optim.SGD(model.parameters(), lr=1, weight_decay=2e-5, momentum=0.9)\n best_loss = float(\"inf\")\n best_epoch = -1\n for epoch in range(100000):\n optimizer.zero_grad()\n logits = model(train_samples)\n loss = F.binary_cross_entropy_with_logits(logits, train_labels.float())\n loss.backward()\n optimizer.step()\n\n loss = loss.item()\n if loss < best_loss:\n best_epoch = epoch\n best_loss = loss\n if epoch == best_epoch + patience:\n break\n\n # test\n logits = model(test_samples)\n num_labels = test_labels.sum(dim=1, keepdim=True)\n sorted, _ = logits.sort(dim=1, descending=True)\n thresholds = sorted.gather(dim=1, index=num_labels-1)\n predictions = (logits >= thresholds).int()\n # compute metric\n num_TP_per_class = (predictions & test_labels).sum(dim=0).float()\n num_T_per_class = test_labels.sum(dim=0).float()\n num_P_per_class = predictions.sum(dim=0).float()\n macro_f1s.append((2 * num_TP_per_class / (num_T_per_class + num_P_per_class)).mean().item())\n num_TP = (predictions & test_labels).sum().float()\n num_T = test_labels.sum().float()\n num_P = predictions.sum().float()\n micro_f1s.append((2 * num_TP / (num_T + num_P)).item())\n\n return {\n \"macro-F1@%g%%\" % (portion * 100): np.mean(macro_f1s),\n \"micro-F1@%g%%\" % (portion * 100): np.mean(micro_f1s)\n }\n\n\nclass WordGraphApplication(ApplicationMixin):\n \"\"\"\n Word node embedding application.\n\n Given a corpus, it embeds each word into a continuous vector representation.\n The learned embeddings can be used for natural language processing tasks.\n This can be viewed as a variant of the word2vec algorithm, with random walk augmentation support.\n The similarity between node embeddings can be measured by cosine distance.\n\n Supported Models:\n - LINE (`LINE: Large-scale Information Network Embedding`_)\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n See also:\n :class:`WordGraph <graphvite.graph.WordGraph>`,\n :class:`GraphSolver <graphvite.solver.GraphSolver>`\n \"\"\"\n def get_graph(self, **kwargs):\n return graph.WordGraph(self.index_type)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n return solver.GraphSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n mapping = self.get_mapping(self.graph.id2name, model.graph.name2id)\n self.solver.vertex_embeddings[:] = model.solver.vertex_embeddings[mapping]\n self.solver.context_embeddings[:] = model.solver.context_embeddings[mapping]\n\n\nclass KnowledgeGraphApplication(ApplicationMixin):\n \"\"\"\n Knowledge graph embedding application.\n\n Given a knowledge graph, it embeds each entity and relation into a continuous vector representation respectively.\n The learned embeddings can be used for analysis of knowledge graphs.\n e.g. **entity prediction**, **link prediction**.\n The likelihood of edges can be predicted by computing the score function over embeddings of triplets.\n\n Supported Models:\n - TransE (`Translating Embeddings for Modeling Multi-relational Data`_)\n - DistMult (`Embedding Entities and Relations for Learning and Inference in Knowledge Bases`_)\n - ComplEx (`Complex Embeddings for Simple Link Prediction`_)\n - SimplE (`SimplE Embedding for Link Prediction in Knowledge Graphs`_)\n - RotatE (`RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space`_)\n\n .. _Translating Embeddings for Modeling Multi-relational Data:\n http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf\n .. _Embedding Entities and Relations for Learning and Inference in Knowledge Bases:\n https://arxiv.org/pdf/1412.6575.pdf\n .. _Complex Embeddings for Simple Link Prediction:\n http://proceedings.mlr.press/v48/trouillon16.pdf\n .. _SimplE Embedding for Link Prediction in Knowledge Graphs:\n https://papers.nips.cc/paper/7682-simple-embedding-for-link-prediction-in-knowledge-graphs.pdf\n .. _RotatE\\: Knowledge Graph Embedding by Relational Rotation in Complex Space:\n https://arxiv.org/pdf/1902.10197.pdf\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n Note:\n The implementation of TransE, DistMult and ComplEx, SimplE are slightly different from their original papers.\n The loss function and the regularization term generally follow `this repo`_.\n Self-adversarial negative sampling is also adopted in these models like RotatE.\n\n .. _this repo: https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding\n\n See also:\n :class:`KnowledgeGraph <graphvite.graph.KnowledgeGraph>`,\n :class:`KnowledgeGraphSolver <graphvite.solver.KnowledgeGraphSolver>`\n \"\"\"\n\n SAMPLE_PER_DIMENSION = 7\n MEMORY_SCALE_FACTOR = 1.5\n\n def get_graph(self, **kwargs):\n return graph.KnowledgeGraph(self.index_type)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n return solver.KnowledgeGraphSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n entity_mapping = self.get_mapping(self.graph.id2entity, model.graph.entity2id)\n relation_mapping = self.get_mapping(self.graph.id2relation, model.graph.relation2id)\n self.solver.entity_embeddings[:] = model.solver.entity_embeddings[entity_mapping]\n self.solver.relation_embeddings[:] = model.solver.relation_embeddings[relation_mapping]\n\n def entity_prediction(self, H=None, R=None, T=None, file_name=None, save_file=None, target=\"tail\", k=10,\n backend=cfg.backend):\n \"\"\"\n Predict the distribution of missing entity or relation for triplets.\n\n Parameters:\n H (list of str, optional): names of head entities\n R (list of str, optional): names of relations\n T (list of str, optional): names of tail entities\n file_name (str, optional): file of triplets (e.g. validation set)\n save_file (str, optional): ``txt`` or ``pkl`` file to save predictions\n k (int, optional): top-k recalls will be returned\n target (str, optional): 'head' or 'tail'\n backend (str, optional): 'graphvite' or 'torch'\n\n Return:\n list of list of tuple: top-k recalls for each triplet, if save file is not provided\n \"\"\"\n def torch_predict():\n import torch\n\n entity_embeddings = SharedNDArray(self.solver.entity_embeddings)\n relation_embeddings = SharedNDArray(self.solver.relation_embeddings)\n\n num_gpu = len(self.gpus) if self.gpus else torch.cuda.device_count()\n work_load = (num_sample + num_gpu - 1) // num_gpu\n settings = []\n\n for i in range(num_gpu):\n work_H = H[work_load * i: work_load * (i+1)]\n work_R = R[work_load * i: work_load * (i+1)]\n work_T = T[work_load * i: work_load * (i+1)]\n settings.append((entity_embeddings, relation_embeddings, work_H, work_R, work_T,\n None, None, target, k, self.solver.model, self.solver.margin))\n\n results = self.gpu_map(triplet_prediction, settings)\n return sum(results, [])\n\n def graphvite_predict():\n num_entity = len(entity2id)\n batch_size = self.get_batch_size(num_entity)\n recalls = []\n\n for i in range(0, num_sample, batch_size):\n batch_h = H[i: i + batch_size]\n batch_r = R[i: i + batch_size]\n batch_t = T[i: i + batch_size]\n batch = self.generate_one_vs_rest(batch_h, batch_r, batch_t, num_entity, target)\n\n scores = self.solver.predict(batch)\n scores = scores.reshape(-1, num_entity)\n indexes = np.argpartition(scores, num_entity - k, axis=-1)\n for index, score in zip(indexes, scores):\n index = index[-k:]\n score = score[index]\n order = np.argsort(score)[::-1]\n recall = list(zip(index[order], score[order]))\n recalls.append(recall)\n\n return recalls\n\n assert_in([\"head\", \"tail\"], target=target)\n assert_in([\"graphvite\", \"torch\"], backend=backend)\n\n if backend == \"torch\":\n self.solver.clear()\n\n if file_name:\n if not (H is None and R is None and T is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n H = []\n R = []\n T = []\n with open(file_name, \"r\") as fin:\n for i, line in enumerate(fin):\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n if 3 <= len(tokens) <= 4:\n h, r, t = tokens[:3]\n elif len(tokens) == 2:\n if target == \"head\":\n r, t = tokens\n h = None\n else:\n h, r = tokens\n t = None\n else:\n raise ValueError(\"Invalid line format at line %d in %s\" % (i + 1, file_name))\n H.append(h)\n R.append(r)\n T.append(t)\n if (H is None and T is None) or R is None:\n raise ValueError(\"Either evaluation data or file should be provided\")\n if H is None:\n target = \"head\"\n if T is None:\n target = \"tail\"\n\n entity2id = self.graph.entity2id\n relation2id = self.graph.relation2id\n num_sample = len(R)\n new_H = np.zeros(num_sample, dtype=np.uint32)\n new_T = np.zeros(num_sample, dtype=np.uint32)\n if target == \"head\":\n new_R, new_T = self.name_map((relation2id, entity2id), (R, T))\n if target == \"tail\":\n new_H, new_R = self.name_map((entity2id, relation2id), (H, R))\n assert len(new_R) == len(R), \"Can't recognize some entities or relations\"\n H = np.asarray(new_H, dtype=np.uint32)\n R = np.asarray(new_R, dtype=np.uint32)\n T = np.asarray(new_T, dtype=np.uint32)\n\n if backend == \"graphvite\":\n recalls = graphvite_predict()\n else:\n recalls = torch_predict()\n\n id2entity = self.graph.id2entity\n new_recalls = []\n for recall in recalls:\n new_recall = [(id2entity[e], s) for e, s in recall]\n new_recalls.append(new_recall)\n recalls = new_recalls\n\n if save_file:\n logger.warning(\"save entity predictions to `%s`\" % save_file)\n extension = os.path.splitext(save_file)[1]\n if extension == \".txt\":\n with open(save_file, \"w\") as fout:\n for recall in recalls:\n tokens = [\"%s: %g\" % x for x in recall]\n fout.write(\"%s\\n\" % \"\\t\".join(tokens))\n elif extension == \".pkl\":\n with open(save_file, \"wb\") as fout:\n pickle.dump(recalls, fout, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n raise ValueError(\"Unknown file extension `%s`\" % extension)\n else:\n return recalls\n\n def link_prediction(self, H=None, R=None, T=None, filter_H=None, filter_R=None, filter_T=None, file_name=None,\n filter_files=None, target=\"both\", fast_mode=None, backend=cfg.backend):\n \"\"\"\n Evaluate knowledge graph embeddings on link prediction task.\n\n Parameters:\n H (list of str, optional): names of head entities\n R (list of str, optional): names of relations\n T (list of str, optional): names of tail entities\n file_name (str, optional): file of triplets (e.g. validation set)\n filter_H (list of str, optional): names of head entities to filter out\n filter_R (list of str, optional): names of relations to filter out\n filter_T (list of str, optional): names of tail entities to filter out\n filter_files (str, optional): files of triplets to filter out (e.g. training / validation / test set)\n target (str, optional): 'head', 'tail' or 'both'\n fast_mode (int, optional): if specified, only that number of samples will be evaluated\n backend (str, optional): 'graphvite' or 'torch'\n\n Returns:\n dict: MR, MRR, HITS\\@1, HITS\\@3 & HITS\\@10 of link prediction\n \"\"\"\n def torch_predict():\n import torch\n\n entity_embeddings = SharedNDArray(self.solver.entity_embeddings)\n relation_embeddings = SharedNDArray(self.solver.relation_embeddings)\n\n num_gpu = len(self.gpus) if self.gpus else torch.cuda.device_count()\n work_load = (fast_mode + num_gpu - 1) // num_gpu\n settings = []\n\n for i in range(num_gpu):\n work_H = H[work_load * i: work_load * (i+1)]\n work_R = R[work_load * i: work_load * (i+1)]\n work_T = T[work_load * i: work_load * (i+1)]\n settings.append((entity_embeddings, relation_embeddings, work_H, work_R, work_T,\n exclude_H, exclude_T, target, None, self.solver.model, self.solver.margin))\n\n results = self.gpu_map(triplet_prediction, settings)\n return np.concatenate(results)\n\n def graphvite_predict():\n num_entity = len(entity2id)\n if target == \"both\":\n batch_size = self.get_batch_size(num_entity * 2)\n else:\n batch_size = self.get_batch_size(num_entity)\n rankings = []\n\n for i in range(0, fast_mode, batch_size):\n batch_h = H[i: i + batch_size]\n batch_r = R[i: i + batch_size]\n batch_t = T[i: i + batch_size]\n batch = self.generate_one_vs_rest(batch_h, batch_r, batch_t, num_entity, target)\n masks = self.generate_mask(batch_h, batch_r, batch_t, exclude_H, exclude_T, num_entity, target)\n if target == \"head\":\n positives = batch_h\n if target == \"tail\":\n positives = batch_t\n if target == \"both\":\n positives = np.asarray([batch_h, batch_t]).transpose()\n positives = positives.ravel()\n\n scores = self.solver.predict(batch)\n scores = scores.reshape(-1, num_entity)\n truths = scores[range(len(positives)), positives]\n ranking = np.sum((scores >= truths[:, np.newaxis]) * masks, axis=1)\n rankings.append(ranking)\n\n return np.concatenate(rankings)\n\n assert_in([\"head\", \"tail\", \"both\"], target=target)\n assert_in([\"graphvite\", \"torch\"], backend=backend)\n\n if backend == \"torch\":\n self.solver.clear()\n\n if file_name:\n if not (H is None and R is None and T is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n H = []\n R = []\n T = []\n with open(file_name, \"r\") as fin:\n for i, line in enumerate(fin):\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n if 3 <= len(tokens) <= 4:\n h, r, t = tokens[:3]\n else:\n raise ValueError(\"Invalid line format at line %d in %s\" % (i + 1, file_name))\n H.append(h)\n R.append(r)\n T.append(t)\n if H is None or R is None or T is None:\n raise ValueError(\"Either evaluation data or file should be provided\")\n\n if filter_files:\n if not (filter_H is None and filter_R is None and filter_T is None):\n raise ValueError(\"Filter data and file should not be provided at the same time\")\n filter_H = []\n filter_R = []\n filter_T = []\n for filter_file in filter_files:\n with open(filter_file, \"r\") as fin:\n for i, line in enumerate(fin):\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n if 3 <= len(tokens) <= 4:\n h, r, t = tokens[:3]\n else:\n raise ValueError(\"Invalid line format at line %d in %s\" % (i + 1, filter_file))\n filter_H.append(h)\n filter_R.append(r)\n filter_T.append(t)\n elif filter_H is None:\n filter_H = []\n filter_R = []\n filter_T = []\n\n entity2id = self.graph.entity2id\n relation2id = self.graph.relation2id\n new_H, new_R, new_T = self.name_map((entity2id, relation2id, entity2id), (H, R, T))\n logger.info(\"effective triplets: %d / %d\" % (len(new_H), len(H)))\n H = np.asarray(new_H, dtype=np.uint32)\n R = np.asarray(new_R, dtype=np.uint32)\n T = np.asarray(new_T, dtype=np.uint32)\n new_H, new_R, new_T = self.name_map((entity2id, relation2id, entity2id), (filter_H, filter_R, filter_T))\n logger.info(\"effective filter triplets: %d / %d\" % (len(new_H), len(filter_H)))\n filter_H = np.asarray(new_H, dtype=np.uint32)\n filter_R = np.asarray(new_R, dtype=np.uint32)\n filter_T = np.asarray(new_T, dtype=np.uint32)\n\n exclude_H = defaultdict(set)\n exclude_T = defaultdict(set)\n for h, r, t in zip(filter_H, filter_R, filter_T):\n exclude_H[(t, r)].add(h)\n exclude_T[(h, r)].add(t)\n\n num_sample = len(H)\n fast_mode = fast_mode or num_sample\n indexes = np.random.permutation(num_sample)[:fast_mode]\n H = H[indexes]\n R = R[indexes]\n T = T[indexes]\n\n if backend == \"graphvite\":\n rankings = graphvite_predict()\n elif backend == \"torch\":\n rankings = torch_predict()\n\n return {\n \"MR\": np.mean(rankings),\n \"MRR\": np.mean(1 / rankings),\n \"HITS@1\": np.mean(rankings <= 1),\n \"HITS@3\": np.mean(rankings <= 3),\n \"HITS@10\": np.mean(rankings <= 10)\n }\n\n def get_batch_size(self, sample_size):\n import psutil\n memory = psutil.virtual_memory()\n\n batch_size = int(self.SAMPLE_PER_DIMENSION * self.dim * self.graph.num_vertex\n * self.solver.num_partition / self.solver.num_worker / sample_size)\n # 2 triplet (Python, C++ sample pool) + 1 sample index\n mem_per_sample = sample_size * (2 * 3 * np.uint32().itemsize + 1 * np.uint64().itemsize)\n max_batch_size = int(memory.available / mem_per_sample / self.MEMORY_SCALE_FACTOR)\n if max_batch_size < batch_size:\n logger.info(\"Memory is not enough for optimal prediction batch size. \"\n \"Use the maximal possible size instead.\")\n batch_size = max_batch_size\n return batch_size\n\n def generate_one_vs_rest(self, H, R, T, num_entity, target=\"both\"):\n one = np.ones(num_entity, dtype=np.bool)\n all = np.arange(num_entity, dtype=np.uint32)\n batches = []\n\n for h, r, t in zip(H, R, T):\n if target == \"head\" or target == \"both\":\n batch = np.asarray([all, t * one, r * one]).transpose()\n batches.append(batch)\n if target == \"tail\" or target == \"both\":\n batch = np.asarray([h * one, all, r * one]).transpose()\n batches.append(batch)\n\n batches = np.concatenate(batches)\n return batches\n\n def generate_mask(self, H, R, T, exclude_H, exclude_T, num_entity, target=\"both\"):\n one = np.ones(num_entity, dtype=np.bool)\n masks = []\n\n for h, r, t in zip(H, R, T):\n if target == \"head\" or target == \"both\":\n mask = one.copy()\n mask[list(exclude_H[(t, r)])] = 0\n mask[h] = 1\n masks.append(mask)\n if target == \"tail\" or target == \"both\":\n mask = one.copy()\n mask[list(exclude_T[(h, r)])] = 0\n mask[t] = 1\n masks.append(mask)\n\n masks = np.asarray(masks)\n return masks\n\n\ndef triplet_prediction(args):\n import torch\n from .network import LinkPredictor\n torch.set_grad_enabled(False)\n\n entity_embeddings, relation_embeddings, H, R, T, \\\n exclude_H, exclude_T, target, k, score_function, margin, device = args\n entity_embeddings = np.asarray(entity_embeddings)\n relation_embeddings = np.asarray(relation_embeddings)\n num_entity = len(entity_embeddings)\n score_function = LinkPredictor(score_function, entity_embeddings, relation_embeddings, entity_embeddings,\n margin=margin)\n\n if device != \"cpu\":\n try:\n score_function = score_function.to(device)\n except RuntimeError:\n logger.info(\"Model is too large for GPU evaluation with PyTorch. Switch to CPU evaluation.\")\n device = \"cpu\"\n if device == \"cpu\":\n del score_function\n torch.cuda.empty_cache()\n score_function = LinkPredictor(score_function, entity_embeddings, relation_embeddings, entity_embeddings,\n margin=margin)\n\n one = torch.ones(num_entity, dtype=torch.long, device=device)\n all = torch.arange(num_entity, dtype=torch.long, device=device)\n results = [] # rankings or top-k recalls\n\n for h, r, t in zip(H, R, T):\n if target == \"head\" or target == \"both\":\n batch_h = all\n batch_r = r * one\n batch_t = t * one\n score = score_function(batch_h, batch_r, batch_t)\n if k: # top-k recalls\n score, index = torch.topk(score, k)\n score = score.cpu().numpy()\n index = index.cpu().numpy()\n recall = list(zip(index, score))\n results.append(recall)\n else: # ranking\n mask = torch.ones(num_entity, dtype=torch.uint8, device=device)\n index = torch.tensor(list(exclude_H[(t, r)]), dtype=torch.long, device=device)\n mask[index] = 0\n mask[h] = 1\n ranking = torch.sum((score >= score[h]) * mask).item()\n results.append(ranking)\n\n if target == \"tail\" or target == \"both\":\n batch_h = h * one\n batch_r = r * one\n batch_t = all\n score = score_function(batch_h, batch_r, batch_t)\n if k: # top-k recalls\n score, index = torch.topk(score, k)\n score = score.cpu().numpy()\n index = index.cpu().numpy()\n recall = list(zip(index, score))\n results.append(recall)\n else: # ranking\n mask = torch.ones(num_entity, dtype=torch.uint8, device=device)\n index = torch.tensor(list(exclude_T[(h, r)]), dtype=torch.long, device=device)\n mask[index] = 0\n mask[t] = 1\n ranking = torch.sum((score >= score[t]) * mask).item()\n results.append(ranking)\n\n if not k: # ranking\n results = np.asarray(results)\n return results\n\n\nclass VisualizationApplication(ApplicationMixin):\n \"\"\"\n Graph & high-dimensional data visualization.\n \n Given a graph or high-dimensional vectors, it maps each node to 2D or 3D coordinates to\n faciliate visualization. The learned coordinates preserve most local similarity information\n of the original input, and may shed some light on the structure of the graph or the\n high-dimensional space.\n\n Supported Models:\n - LargeVis (`Visualizing Large-scale and High-dimensional Data`_)\n\n .. _Visualizing Large-scale and High-dimensional Data: https://arxiv.org/pdf/1602.00370.pdf\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n See also:\n :class:`Graph <graphvite.graph.Graph>`,\n :class:`KNNGraph <graphvite.graph.KNNGraph>`,\n :class:`VisualizationSolver <graphvite.solver.VisualizationSolver>`\n \"\"\"\n\n OUTLIER_THRESHOLD = 5\n\n def get_graph(self, **kwargs):\n if \"file_name\" in kwargs or \"edge_list\" in kwargs:\n return graph.Graph(self.index_type)\n else:\n return graph.KNNGraph(self.index_type, self.gpus, self.cpu_per_gpu)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n\n return solver.VisualizationSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n if self.solver.coordinates.shape != model.solver.coordinates.shape:\n raise ValueError(\"Expect coordinates with shape %s, but %s is found\" %\n (self.solver.coordinates.shape, model.solver.coordinates.shape))\n self.solver.coordinates[:] = model.solver.coordinates\n\n def visualization(self, Y=None, file_name=None, save_file=None, figure_size=10, scale=2):\n \"\"\"\n Visualize learned 2D or 3D coordinates.\n\n Parameters:\n Y (list of str, optional): labels of vectors\n file_name (str, optional): file of labels\n save_file (str, optional): ``png`` or ``pdf`` file to save visualization,\n if not provided, show the figure in window\n figure_size (int, optional): size of figure\n scale (int, optional): size of points\n \"\"\"\n from matplotlib import pyplot as plt\n plt.switch_backend(\"agg\") # for compatibility\n\n self.solver.clear()\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if not (dim == 2 or dim == 3):\n raise ValueError(\"Can't visualize %dD data\" % dim)\n\n if file_name:\n if not (Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n y, = tokens\n Y.append(y)\n elif Y is None:\n Y = [\"unknown\"] * self.graph.num_vertex\n Y = np.asarray(Y)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n Y = Y[indexes]\n classes = sorted(np.unique(Y))\n\n fig = plt.figure(figsize=(figure_size, figure_size))\n if dim == 2:\n ax = fig.gca()\n elif dim == 3:\n from mpl_toolkits.mplot3d import Axes3D\n ax = fig.gca(projection=\"3d\")\n for cls in classes:\n indexes, = np.where(Y == cls)\n ax.scatter(*coordinates[indexes].T, s=scale)\n ax.set_xticks([])\n ax.set_yticks([])\n if dim == 3:\n ax.set_zticks([])\n if len(classes) > 1:\n ax.legend(classes, markerscale=6, loc=\"upper right\")\n if save_file:\n logger.warning(\"save visualization to `%s`\" % save_file)\n plt.savefig(save_file)\n else:\n plt.show()\n\n return {}\n\n def hierarchy(self, HY=None, file_name=None, target=None, save_file=None, figure_size=10, scale=2, duration=3):\n \"\"\"\n Visualize learned 2D coordinates with hierarchical labels.\n\n Parameters:\n HY (list of list of str, optional): hierarchical labels of vectors\n file_name (str, optional): file of hierarchical labels\n target (str): target class\n save_file (str): ``gif`` file to save visualization\n figure_size (int, optional): size of figure\n scale (int, optional): size of points\n duration (float, optional): duration of each frame in seconds\n \"\"\"\n import imageio\n from matplotlib import pyplot as plt\n plt.switch_backend(\"agg\") # for compatibility\n\n self.solver.clear()\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if dim != 2:\n raise ValuerError(\"Can't visualize the hierarchy of %dD data\" % dim)\n\n if file_name:\n if not (HY is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n HY = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) > 0:\n HY.append(tokens)\n elif HY is None:\n raise ValueError(\"No label is provided for hierarchy\")\n HY = np.asarray(HY)\n min_type = \"S%d\" % len(\"else\")\n if HY.dtype < min_type:\n HY = HY.astype(min_type)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n HY = HY[indexes].T\n\n if target is None:\n raise ValueError(\"Target class is not provided\")\n for depth, Y in enumerate(HY):\n indexes, = np.where(Y == target)\n if len(indexes) > 0:\n sample = indexes[0]\n break\n else:\n raise ValueError(\"Can't find target `%s` in the hierarchy\" % target)\n\n settings = [(coordinates, None, HY[0], sample, figure_size, scale, 0)]\n for i in range(depth):\n settings.append((coordinates, HY[i], HY[i + 1], sample, figure_size, scale, i+1))\n pool = multiprocessing.Pool(self.solver.num_worker + self.solver.num_sampler)\n frames = pool.map(render_hierarchy, settings)\n logger.warning(\"save hierarchy to `%s`\" % save_file)\n imageio.mimsave(save_file, frames, fps=1 / duration, subrectangles=True)\n\n return {}\n\n def animation(self, Y=None, file_name=None, save_file=None, figure_size=5, scale=1, elevation=30, num_frame=700):\n \"\"\"\n Rotate learn 3D coordinates as an animation.\n\n Parameters:\n Y (list of str, optional): labels of vectors\n file_name (str, optional): file of labels\n save_file (str): ``gif`` file to save visualization\n figure_size (int, optional): size of figure\n scale (int, optional): size of points\n elevation (float, optional): elevation angle\n num_frame (int, optional): number of frames\n \"\"\"\n import imageio\n from matplotlib import pyplot as plt, animation\n from mpl_toolkits.mplot3d import Axes3D\n plt.switch_backend(\"agg\") # for compatibility\n\n self.solver.clear()\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if dim != 3:\n raise ValueError(\"Can't animate %dD data\" % dim)\n\n if file_name:\n if not (Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n y, = tokens\n Y.append(y)\n elif Y is None:\n Y = [\"unknown\"] * self.graph.num_vertex\n Y = np.asarray(Y)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n Y = Y[indexes]\n\n settings = []\n degrees = np.linspace(0, 360, num_frame, endpoint=False)\n for degree in degrees:\n settings.append((coordinates, Y, degree, figure_size, scale, elevation))\n pool = multiprocessing.Pool(self.solver.num_worker + self.solver.num_sampler)\n frames = pool.map(render_animation, settings)\n logger.warning(\"save animation to `%s`\" % save_file)\n imageio.mimsave(save_file, frames, fps=num_frame / 70, subrectangles=True) # 70 seconds\n\n return {}\n\n\ndef render_hierarchy(args):\n from matplotlib import pyplot as plt\n plt.switch_backend(\"agg\")\n\n coordinates, H, Y, sample, figure_size, scale, depth = args\n\n fig = plt.figure(figsize=(figure_size, figure_size))\n ax = fig.gca()\n if H is not None:\n for i in range(len(Y)):\n if H[i] != H[sample]:\n Y[i] = \"else\"\n classes = set(Y)\n classes.discard(Y[sample])\n classes.discard(\"else\")\n classes = [Y[sample]] + sorted(classes) + [\"else\"]\n for i, cls in enumerate(classes):\n indexes, = np.where(Y == cls)\n color = \"lightgrey\" if cls == \"else\" else None\n ax.scatter(*coordinates[indexes].T, s=2, c=color, zorder=-i)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.legend(classes, markerscale=6, loc=\"upper right\")\n fig.canvas.draw()\n frame = np.asarray(fig.canvas.renderer._renderer)\n\n return frame\n\n\ndef render_animation(args):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.switch_backend(\"agg\")\n\n coordinates, Y, degree, figure_size, scale, elevation = args\n classes = sorted(np.unique(Y))\n\n fig = plt.figure(figsize=(figure_size, figure_size))\n ax = fig.gca(projection=\"3d\")\n for cls in classes:\n indexes, = np.where(Y == cls)\n ax.scatter(*coordinates[indexes].T, s=scale)\n ax.view_init(elev=elevation, azim=degree)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if len(classes) > 1:\n ax.legend(classes, markerscale=6)\n fig.canvas.draw()\n frame = np.asarray(fig.canvas.renderer._renderer)\n\n return frame\n\n\nclass Application(object):\n \"\"\"\n Application(type, *args, **kwargs)\n Create an application instance of any type.\n\n Parameters:\n type (str): application type,\n can be 'graph', 'word graph', 'knowledge graph' or 'visualization'\n \"\"\"\n\n application = {\n \"graph\": GraphApplication,\n \"word graph\": WordGraphApplication,\n \"knowledge graph\": KnowledgeGraphApplication,\n \"visualization\": VisualizationApplication\n }\n\n def __new__(cls, type, *args, **kwargs):\n if type in cls.application:\n return cls.application[type](*args, **kwargs)\n else:\n raise ValueError(\"Unknown application `%s`\" % type)\n\n__all__ = [\n \"Application\",\n \"GraphApplication\", \"WordGraphApplication\", \"KnowledgeGraphApplication\", \"VisualizationApplication\"\n]"
] | [
[
"numpy.ones",
"numpy.sum",
"torch.as_tensor",
"torch.argsort",
"numpy.ones_like",
"numpy.asarray",
"numpy.argsort",
"torch.cumsum",
"torch.cuda.empty_cache",
"torch.set_grad_enabled",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.argpartition",
"numpy.abs",
"torch.cuda.device_count",
"numpy.uint32",
"torch.arange",
"numpy.where",
"numpy.linspace",
"numpy.unique",
"numpy.mean",
"torch.ones",
"numpy.zeros",
"numpy.uint64",
"numpy.arange",
"numpy.all",
"numpy.std",
"torch.sum",
"numpy.random.permutation",
"matplotlib.pyplot.switch_backend",
"torch.topk",
"matplotlib.pyplot.show",
"numpy.concatenate"
]
] |
zhoudoufu/lingvo | [
"bd0f89809942fd0508ff43bd4b6bca1b598220cb",
"bd0f89809942fd0508ff43bd4b6bca1b598220cb"
] | [
"lingvo/core/test_utils_test.py",
"lingvo/tasks/asr/model_test.py"
] | [
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for test_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom lingvo.core import test_utils\n\n\nclass TestUtilsTest(test_utils.TestCase):\n\n def testReplaceGoldenSingleFloat(self):\n old_line = ' CompareToGoldenSingleFloat(self, 1.489712, vs[0])\\n'\n expected = ' CompareToGoldenSingleFloat(self, 1.000000, vs[0])\\n'\n actual = test_utils.ReplaceGoldenSingleFloat(old_line, 1.0)\n self.assertEqual(expected, actual)\n\n old_line = ('test_utils.CompareToGoldenSingleFloat(self, -2.e-3, vs[0])'\n ' # pylint: disable=line-too-long\\n')\n expected = ('test_utils.CompareToGoldenSingleFloat(self, 1.000000, vs[0])'\n ' # pylint: disable=line-too-long\\n')\n actual = test_utils.ReplaceGoldenSingleFloat(old_line, 1.0)\n self.assertEqual(expected, actual)\n\n def CompareToGoldenSingleFloat(self, unused_v1, v2):\n return test_utils.ReplaceGoldenStackAnalysis(v2)\n\n def testReplaceGoldenStackAnalysis(self):\n v2 = 2.0\n result = TestUtilsTest.CompareToGoldenSingleFloat(self, 1.0, v2)\n self.assertTrue(result[0].endswith('test_utils_test.py'))\n old_line = (' result = TestUtilsTest.CompareToGoldenSingleFloat('\n 'self, 1.0, v2)\\n')\n new_line = (' result = TestUtilsTest.CompareToGoldenSingleFloat('\n 'self, 2.000000, v2)\\n')\n self.assertEqual(old_line, result[2])\n self.assertEqual(new_line, result[3])\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for Asr Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\n\nimport numpy as np\nimport six\nfrom six.moves import range\n\nimport tensorflow as tf\n\nfrom lingvo.core import base_layer\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\nfrom lingvo.core import summary_utils\nfrom lingvo.core import test_helper\nfrom lingvo.core import test_utils\nfrom lingvo.tasks.asr import decoder\nfrom lingvo.tasks.asr import input_generator\nfrom lingvo.tasks.asr import model\nfrom lingvo.tasks.asr import model_test_input_generator as tig\n\n\nclass DecoderForTest(decoder.AsrDecoder):\n \"\"\"Unit test class for AsrDecoder with functional.for based unrolling.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(DecoderForTest, cls).Params()\n p.use_while_loop_based_unrolling = False\n return p\n\n\nclass AsrModelTest(test_utils.TestCase):\n\n def _testParams(self):\n input_shape = [2, 16, 8, 3]\n p = model.AsrModel.Params()\n p.decoder.target_seq_len = 5\n p.encoder.input_shape = input_shape\n p.input = tig.TestInputGenerator.Params()\n p.input.target_max_length = 5\n p.input.source_shape = input_shape\n p.input.target_shape = [2, 5]\n p.name = 'test_mdl'\n return p\n\n def testMakeDecoderTheta(self):\n # Test that decoder theta returns a copy of theta.decoder without changes.\n with self.session(use_gpu=False, graph=tf.Graph()):\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None)\n mdl.BProp()\n self.assertEqual(decoder_theta, mdl.theta.decoder)\n\n def testFProp(self):\n with self.session(use_gpu=False):\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n tf.global_variables_initializer().run()\n test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())\n\n actual_var_names = [_.name for _ in tf.all_variables()]\n print('all vars \\n', '\\n'.join(actual_var_names))\n expected_var_names = [\n 'global_step:0', 'test_mdl/enc/conv_L0/w/var:0',\n 'test_mdl/enc/conv_L0/beta/var:0', 'test_mdl/enc/conv_L0/gamma/var:0',\n 'test_mdl/enc/conv_L0/moving_mean/var:0',\n 'test_mdl/enc/conv_L0/moving_variance/var:0',\n 'test_mdl/enc/conv_L1/w/var:0', 'test_mdl/enc/conv_L1/beta/var:0',\n 'test_mdl/enc/conv_L1/gamma/var:0',\n 'test_mdl/enc/conv_L1/moving_mean/var:0',\n 'test_mdl/enc/conv_L1/moving_variance/var:0',\n 'test_mdl/enc/f_conv_lstm_0/wm/var:0',\n 'test_mdl/enc/f_conv_lstm_0/b/var:0',\n 'test_mdl/enc/b_conv_lstm_0/wm/var:0',\n 'test_mdl/enc/b_conv_lstm_0/b/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/w/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/beta/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/gamma/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/moving_mean/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/moving_variance/var:0',\n 'test_mdl/enc/fwd_rnn_L0/wm/var:0', 'test_mdl/enc/fwd_rnn_L0/b/var:0',\n 'test_mdl/enc/bak_rnn_L0/wm/var:0', 'test_mdl/enc/bak_rnn_L0/b/var:0',\n 'test_mdl/enc/proj_L0/w/var:0', 'test_mdl/enc/proj_L0/beta/var:0',\n 'test_mdl/enc/proj_L0/gamma/var:0',\n 'test_mdl/enc/proj_L0/moving_mean/var:0',\n 'test_mdl/enc/proj_L0/moving_variance/var:0',\n 'test_mdl/enc/fwd_rnn_L1/wm/var:0', 'test_mdl/enc/fwd_rnn_L1/b/var:0',\n 'test_mdl/enc/bak_rnn_L1/wm/var:0', 'test_mdl/enc/bak_rnn_L1/b/var:0',\n 'test_mdl/enc/proj_L1/w/var:0', 'test_mdl/enc/proj_L1/beta/var:0',\n 'test_mdl/enc/proj_L1/gamma/var:0',\n 'test_mdl/enc/proj_L1/moving_mean/var:0',\n 'test_mdl/enc/proj_L1/moving_variance/var:0',\n 'test_mdl/enc/fwd_rnn_L2/wm/var:0', 'test_mdl/enc/fwd_rnn_L2/b/var:0',\n 'test_mdl/enc/bak_rnn_L2/wm/var:0', 'test_mdl/enc/bak_rnn_L2/b/var:0',\n 'test_mdl/dec/emb/var_0/var:0', 'test_mdl/dec/rnn_cell/wm/var:0',\n 'test_mdl/dec/rnn_cell/b/var:0',\n 'test_mdl/dec/atten/source_var/var:0',\n 'test_mdl/dec/atten/query_var/var:0',\n 'test_mdl/dec/atten/hidden_var/var:0',\n 'test_mdl/dec/softmax/weight_0/var:0',\n 'test_mdl/dec/softmax/bias_0/var:0'\n ]\n self.assertEqual(sorted(expected_var_names), sorted(actual_var_names))\n\n def testDecode(self):\n with self.session(use_gpu=False) as sess:\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n input_batch = mdl.input_generator.GetPreprocessedInputBatch()\n dec_out_dict = mdl.Decode(input_batch)\n tf.global_variables_initializer().run()\n dec_out = sess.run(dec_out_dict)\n print('dec_out', dec_out)\n metrics_dict = mdl.CreateDecoderMetrics()\n key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)\n\n self.assertEqual(1.0, metrics_dict['wer'].value)\n self.assertEqual(1.0, metrics_dict['norm_wer'].value)\n self.assertEqual(1.0, metrics_dict['ter'].value)\n self.assertEqual(0, len(key_value_pairs))\n\n def testPostProcessDecodeOut(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 2\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['a b c d', 'a'],\n 'topk_decoded': [['a b c d', 'a b c d'], ['wrong', '']],\n 'topk_scores': [[1.0, 0.9], [1.0, 0.9]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7]],\n 'topk_lens': [2, 4, 4, 2],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[0, 0, 0, 1], [0, 0, 0, 1]],\n 'norm_wer_errors': [[0, 0], [1, 1]],\n 'norm_wer_words': [[4, 4], [1, 1]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n key_value_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(0 + 1, metrics_dict['wer'].total_value)\n self.assertEqual(4 + 1, metrics_dict['wer'].total_weight)\n self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)\n self.assertEqual(4 + 1, metrics_dict['norm_wer'].total_weight)\n self.assertEqual(4, metrics_dict['ter'].total_value)\n self.assertEqual(6, metrics_dict['ter'].total_weight)\n self.assertEqual(2, metrics_dict['num_samples_in_batch'].total_value)\n self.assertEqual(1.0, metrics_dict['num_samples_in_batch'].total_weight)\n self.assertEqual((4 / 5 * 3 / 3 * 2 / 2 * 1 / 1)**(1 / 4),\n metrics_dict['corpus_bleu'].value)\n self.assertEqual((0 + 1) / 2, metrics_dict['sacc'].value)\n self.assertEqual((0 + 1) / (4 + 1), metrics_dict['oracle_norm_wer'].value)\n self.assertEqual(0, len(key_value_pairs))\n\n def testPostProcessDecodeOutFiltersEpsilonTokensForWER(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 1\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['a b c d', 'a b c'],\n 'topk_decoded': [['a b<epsilon>c d'], ['<epsilon>a b<epsilon>']],\n 'topk_scores': [[1.0], [1.0]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'topk_lens': [3, 4],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]],\n 'norm_wer_errors': [[0], [1]],\n 'norm_wer_words': [[4], [3]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(0 + 1, metrics_dict['wer'].total_value)\n self.assertEqual(7, metrics_dict['wer'].total_weight)\n self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)\n self.assertEqual(7, metrics_dict['norm_wer'].total_weight)\n self.assertEqual(0, len(kv_pairs))\n\n def testPostProcessDecodeOutFiltersNoiseTokensForWER(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 1\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['a b c d', 'a b c'],\n 'topk_decoded': [['a b <noise> c d'], ['<noise> a b <noise>']],\n 'topk_scores': [[1.0], [1.0]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'topk_lens': [3, 4],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]],\n 'norm_wer_errors': [[0], [1]],\n 'norm_wer_words': [[4], [3]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(0 + 1, metrics_dict['wer'].total_value)\n self.assertEqual(7, metrics_dict['wer'].total_weight)\n self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)\n self.assertEqual(7, metrics_dict['norm_wer'].total_weight)\n self.assertEqual(0, len(kv_pairs))\n\n def testPostProcessDecodeOutHandlesEmptyRef(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 1\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['', 'a b c d'],\n 'topk_decoded': [['a'], ['a b c d']],\n 'topk_scores': [[1.0], [1.0]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'topk_lens': [3, 4],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[1, 1, 1, 1], [0, 0, 1, 1]],\n 'norm_wer_errors': [[1], [0]],\n 'norm_wer_words': [[0], [4]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(1 + 0, metrics_dict['wer'].total_value)\n self.assertEqual(0 + 4, metrics_dict['wer'].total_weight)\n self.assertEqual(1 + 0, metrics_dict['norm_wer'].total_value)\n self.assertEqual(0 + 4, metrics_dict['norm_wer'].total_weight)\n\n def testBProp(self):\n with self.session(use_gpu=False):\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n mdl.BProp()\n tf.global_variables_initializer().run()\n test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())\n mdl.train_op.run()\n\n def testBPropSmoothDecay(self):\n with self.session(use_gpu=False):\n tf.set_random_seed(93820985)\n p = self._testParams()\n p.train.lr_schedule = (\n schedule.ContinuousLearningRateSchedule.Params().Set(\n start_step=350000, half_life_steps=45000))\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n mdl.BProp()\n tf.global_variables_initializer().run()\n test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())\n mdl.train_op.run()\n\n def testAllLayerParams(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n lps = base_layer.RecursiveFindLayerParams(mdl.params)\n l_names = sorted([p.cls.__name__ for p in lps])\n expected_layers = sorted([\n 'Adam',\n 'AdditiveAttention',\n 'AsciiTokenizer',\n 'AsrDecoder',\n 'AsrEncoder',\n 'AsrModel',\n 'BeamSearchHelper',\n 'TargetSequenceSampler',\n 'ConvLSTMCell',\n 'Conv2DLayer',\n 'Conv2DLayer',\n 'EmbeddingLayer',\n 'HighwaySkipLayer',\n 'LSTMCellSimple',\n 'LSTMCellSimple',\n 'NullContextualizer',\n 'NullFusion',\n 'NullLm',\n 'Learner',\n 'PiecewiseConstantLearningRateSchedule',\n 'ProjectionLayer',\n 'SimpleFullSoftmax',\n 'SpectrumAugmenter',\n 'StackingOverTime',\n 'TestInputGenerator',\n ])\n self.assertEqual(expected_layers, l_names)\n\n def testParamValueSumSquared(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n all_vars = tf.trainable_variables()\n py_utils.SumSquared(all_vars)\n\n def testCollectVarHistogram(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars)\n summary_utils.CollectVarHistogram(var_grads)\n\n def testGradientMult(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars)\n py_utils.ApplyGradMultiplier(var_grads, -1.1)\n\n def testLRDecay(self):\n with self.session(use_gpu=False, graph=tf.Graph()) as sess:\n p = self._testParams()\n tp = p.train\n tp.lr_schedule.boundaries = [300000, 400000, 500000]\n tp.lr_schedule.values = [1.0, 0.1, 0.01, 0.001]\n lrs = tp.lr_schedule.Instantiate()\n steps = [299999, 300001, 399999, 400001, 499999, 500001]\n fetches = [lrs.Value(_) for _ in steps]\n values = sess.run(fetches)\n self.assertAllClose([1.0, 0.1, 0.1, 0.01, 0.01, 0.001], values)\n\n def testBatchSplit(self):\n\n def Run(num_splits):\n p = self._testParams()\n with self.session(use_gpu=False, graph=tf.Graph()) as sess:\n tf.set_random_seed(93820981)\n p.is_eval = True\n p.input.cur_iter_in_seed = False\n p.input.bucket_batch_limit = [\n b * 2 / num_splits for b in p.input.bucket_batch_limit\n ]\n with cluster_factory.ForTestingWorker(gpus=num_splits):\n mdl = p.Instantiate()\n metrics = mdl.FPropDefaultTheta()[0]\n tf.global_variables_initializer().run()\n return sess.run(metrics['loss'])\n\n res1, res2 = Run(1), Run(2)\n self.assertAllClose(res1[0], res2[0])\n self.assertAllEqual(res1[1], res2[1])\n\n def testInference(self):\n\n def _CreateModelParamsForTest():\n p = model.AsrModel.Params()\n p.name = 'test_config'\n\n # Encoder params.\n ep = p.encoder\n ep.input_shape = [None, None, 80, 1]\n ep.lstm_cell_size = 16\n ep.num_lstm_layers = 2\n ep.conv_filter_shapes = [(3, 3, 1, 32), (3, 3, 32, 32)]\n ep.conv_filter_strides = [(2, 2), (2, 2)]\n ep.num_conv_lstm_layers = 0\n # Initialize decoder params.\n dp = p.decoder\n dp.rnn_cell_dim = 16\n dp.rnn_layers = 2\n dp.source_dim = ep.lstm_cell_size * 2\n # Use functional while based unrolling.\n dp.use_while_loop_based_unrolling = False\n\n p.input = input_generator.AsrInput.Params()\n ip = p.input\n ip.frame_size = 80\n ip.append_eos_frame = True\n ip.pad_to_max_seq_length = False\n\n p.is_eval = True\n return p\n\n with self.session(use_gpu=False, graph=tf.Graph()) as sess:\n p = _CreateModelParamsForTest()\n mdl = p.Instantiate()\n subgraphs = mdl.Inference()\n self.assertTrue('default' in subgraphs)\n\n fetches, feeds = subgraphs['default']\n self.assertTrue('wav' in feeds)\n for name in ['hypotheses', 'scores', 'src_frames', 'encoder_frames']:\n self.assertTrue(name in fetches)\n\n with open(\n test_helper.test_src_dir_path('tools/testdata/gan_or_vae.16k.wav'),\n 'rb') as f:\n wav = f.read()\n sess.run(tf.global_variables_initializer())\n fetches = sess.run(fetches, {feeds['wav']: wav})\n\n self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),\n fetches['hypotheses'].shape)\n self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),\n fetches['scores'].shape)\n self.assertAllEqual((1, 314, p.encoder.input_shape[2], 1),\n fetches['src_frames'].shape)\n self.assertAllEqual((80, 1, 2 * p.encoder.lstm_cell_size),\n fetches['encoder_frames'].shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.test.main"
],
[
"tensorflow.global_variables_initializer",
"tensorflow.trainable_variables",
"tensorflow.Graph",
"tensorflow.set_random_seed",
"tensorflow.all_variables",
"tensorflow.test.main"
]
] |
garaytc/reinforcement | [
"e6af258bf2ac3b45c20e0ed3d2f58ca7bc2b232f"
] | [
"tests/agents/test_agent_interface.py"
] | [
"import pytest\nimport torch\nfrom gym.spaces import Discrete, MultiDiscrete, MultiBinary, Dict, Tuple, Box\n\nfrom blobrl.agents import AgentInterface\n\n\nclass MOCKAgentInterface(AgentInterface):\n def __init__(self, observation_space, action_space, device):\n super().__init__(observation_space, action_space, device)\n\n def get_action(self, observation):\n pass\n\n def enable_exploration(self):\n pass\n\n def disable_exploration(self):\n pass\n\n def learn(self, observation, action, reward, next_observation, done) -> None:\n pass\n\n def episode_finished(self) -> None:\n pass\n\n def save(self, file_name, dire_name=\".\"):\n pass\n\n @classmethod\n def load(cls, file_name, dire_name=\".\", device=None):\n pass\n\n def __str__(self):\n return \"\"\n\n\nclass TestAgentInterface:\n __test__ = True\n\n agent = MOCKAgentInterface\n\n list_work = [\n [Discrete(3), Discrete(1)],\n [Discrete(3), Discrete(3)],\n [Discrete(10), Discrete(50)],\n [MultiDiscrete([3]), MultiDiscrete([1])],\n [MultiDiscrete([3, 3]), MultiDiscrete([3, 3])],\n [MultiDiscrete([4, 4, 4]), MultiDiscrete([50, 4, 4])],\n [MultiDiscrete([[100, 3], [3, 5]]), MultiDiscrete([[100, 3], [3, 5]])],\n [MultiDiscrete([[[100, 3], [3, 5]], [[100, 3], [3, 5]]]),\n MultiDiscrete([[[100, 3], [3, 5]], [[100, 3], [3, 5]]])],\n [MultiBinary(1), MultiBinary(1)],\n [MultiBinary(3), MultiBinary(3)],\n # [MultiBinary([3, 2]), MultiBinary([3, 2])], # Don't work yet because gym don't implemented this\n [Box(low=0, high=10, shape=[1]), Box(low=0, high=10, shape=[1])],\n [Box(low=0, high=10, shape=[2, 2]), Box(low=0, high=10, shape=[2, 2])],\n [Box(low=0, high=10, shape=[2, 2, 2]), Box(low=0, high=10, shape=[2, 2, 2])],\n\n [Tuple([Discrete(1), MultiDiscrete([1, 1])]), Tuple([Discrete(1), MultiDiscrete([1, 1])])],\n [Dict({\"first\": Discrete(1), \"second\": MultiDiscrete([1, 1])}),\n Dict({\"first\": Discrete(1), \"second\": MultiDiscrete([1, 1])})],\n\n ]\n list_fail = [\n [None, None],\n [\"dedrfe\", \"qdzq\"],\n [1215.4154, 157.48],\n [\"zdzd\", (Discrete(1))],\n [Discrete(1), \"zdzd\"],\n [\"zdzd\", (1, 4, 7)],\n [(1, 4, 7), \"zdzd\"],\n [152, 485]\n ]\n\n def test_init(self):\n for o, a in self.list_work:\n with pytest.raises(TypeError):\n self.agent(o, a, \"cpu\")\n\n for o, a in self.list_fail:\n with pytest.raises(TypeError):\n self.agent(o, a, \"cpu\")\n\n def test_device(self):\n for o, a in self.list_work:\n device = torch.device(\"cpu\")\n assert device == self.agent(o, a, device).device\n\n device = None\n assert torch.device(\"cpu\") == self.agent(o, a, device).device\n\n for device in [\"dzeqdzqd\", 1512, object(), 151.515]:\n with pytest.raises(TypeError):\n self.agent(o, a, device)\n\n if torch.cuda.is_available():\n self.agent(o, a, torch.device(\"cuda\"))\n\n def test__str__(self):\n\n pass\n"
] | [
[
"torch.cuda.is_available",
"torch.device"
]
] |
ivary43/pandas | [
"46adc5b1c2aacb312d72729af72bc0ad600917c0",
"46adc5b1c2aacb312d72729af72bc0ad600917c0"
] | [
"pandas/tests/series/test_alter_axes.py",
"pandas/tests/plotting/common.py"
] | [
"from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, MultiIndex, RangeIndex, Series\nimport pandas.util.testing as tm\n\n\nclass TestSeriesAlterAxes:\n\n def test_setindex(self, string_series):\n # wrong type\n msg = (r\"Index\\(\\.\\.\\.\\) must be called with a collection of some\"\n r\" kind, None was passed\")\n with pytest.raises(TypeError, match=msg):\n string_series.index = None\n\n # wrong length\n msg = (\"Length mismatch: Expected axis has 30 elements, new\"\n \" values have 29 elements\")\n with pytest.raises(ValueError, match=msg):\n string_series.index = np.arange(len(string_series) - 1)\n\n # works\n string_series.index = np.arange(len(string_series))\n assert isinstance(string_series.index, Index)\n\n # Renaming\n\n def test_rename(self, datetime_series):\n ts = datetime_series\n renamer = lambda x: x.strftime('%Y%m%d')\n renamed = ts.rename(renamer)\n assert renamed.index[0] == renamer(ts.index[0])\n\n # dict\n rename_dict = dict(zip(ts.index, renamed.index))\n renamed2 = ts.rename(rename_dict)\n tm.assert_series_equal(renamed, renamed2)\n\n # partial dict\n s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')\n renamed = s.rename({'b': 'foo', 'd': 'bar'})\n tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))\n\n # index with name\n renamer = Series(np.arange(4),\n index=Index(['a', 'b', 'c', 'd'], name='name'),\n dtype='int64')\n renamed = renamer.rename({})\n assert renamed.index.name == renamer.index.name\n\n def test_rename_by_series(self):\n s = Series(range(5), name='foo')\n renamer = Series({1: 10, 2: 20})\n result = s.rename(renamer)\n expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')\n tm.assert_series_equal(result, expected)\n\n def test_rename_set_name(self):\n s = Series(range(4), index=list('abcd'))\n for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:\n result = s.rename(name)\n assert result.name == name\n tm.assert_numpy_array_equal(result.index.values, s.index.values)\n assert s.name is None\n\n def test_rename_set_name_inplace(self):\n s = Series(range(3), index=list('abc'))\n for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:\n s.rename(name, inplace=True)\n assert s.name == name\n\n exp = np.array(['a', 'b', 'c'], dtype=np.object_)\n tm.assert_numpy_array_equal(s.index.values, exp)\n\n def test_rename_axis_supported(self):\n # Supporting axis for compatibility, detailed in GH-18589\n s = Series(range(5))\n s.rename({}, axis=0)\n s.rename({}, axis='index')\n with pytest.raises(ValueError, match='No axis named 5'):\n s.rename({}, axis=5)\n\n def test_set_name_attribute(self):\n s = Series([1, 2, 3])\n s2 = Series([1, 2, 3], name='bar')\n for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), \"\\u05D0\"]:\n s.name = name\n assert s.name == name\n s2.name = name\n assert s2.name == name\n\n def test_set_name(self):\n s = Series([1, 2, 3])\n s2 = s._set_name('foo')\n assert s2.name == 'foo'\n assert s.name is None\n assert s is not s2\n\n def test_rename_inplace(self, datetime_series):\n renamer = lambda x: x.strftime('%Y%m%d')\n expected = renamer(datetime_series.index[0])\n\n datetime_series.rename(renamer, inplace=True)\n assert datetime_series.index[0] == expected\n\n def test_set_index_makes_timeseries(self):\n idx = tm.makeDateIndex(10)\n\n s = Series(range(10))\n s.index = idx\n assert s.index.is_all_dates\n\n def test_reset_index(self):\n df = tm.makeDataFrame()[:5]\n ser = df.stack()\n ser.index.names = ['hash', 'category']\n\n ser.name = 'value'\n df = ser.reset_index()\n assert 'value' in df\n\n df = ser.reset_index(name='value2')\n assert 'value2' in df\n\n # check inplace\n s = ser.reset_index(drop=True)\n s2 = ser\n s2.reset_index(drop=True, inplace=True)\n tm.assert_series_equal(s, s2)\n\n # level\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n rs = s.reset_index(level=1)\n assert len(rs.columns) == 2\n\n rs = s.reset_index(level=[0, 2], drop=True)\n tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))\n assert isinstance(rs, Series)\n\n def test_reset_index_name(self):\n s = Series([1, 2, 3], index=Index(range(3), name='x'))\n assert s.reset_index().index.name is None\n assert s.reset_index(drop=True).index.name is None\n\n def test_reset_index_level(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]],\n columns=['A', 'B', 'C'])\n\n for levels in ['A', 'B'], [0, 1]:\n # With MultiIndex\n s = df.set_index(['A', 'B'])['C']\n\n result = s.reset_index(level=levels[0])\n tm.assert_frame_equal(result, df.set_index('B'))\n\n result = s.reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df.set_index('B'))\n\n result = s.reset_index(level=levels)\n tm.assert_frame_equal(result, df)\n\n result = df.set_index(['A', 'B']).reset_index(level=levels,\n drop=True)\n tm.assert_frame_equal(result, df[['C']])\n\n with pytest.raises(KeyError, match='Level E '):\n s.reset_index(level=['A', 'E'])\n\n # With single-level Index\n s = df.set_index('A')['B']\n\n result = s.reset_index(level=levels[0])\n tm.assert_frame_equal(result, df[['A', 'B']])\n\n result = s.reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df[['A', 'B']])\n\n result = s.reset_index(level=levels[0], drop=True)\n tm.assert_series_equal(result, df['B'])\n\n with pytest.raises(IndexError, match='Too many levels'):\n s.reset_index(level=[0, 1, 2])\n\n # Check that .reset_index([],drop=True) doesn't fail\n result = Series(range(4)).reset_index([], drop=True)\n expected = Series(range(4))\n tm.assert_series_equal(result, expected)\n\n def test_reset_index_range(self):\n # GH 12071\n s = Series(range(2), name='A', dtype='int64')\n series_result = s.reset_index()\n assert isinstance(series_result.index, RangeIndex)\n series_expected = DataFrame([[0, 0], [1, 1]],\n columns=['index', 'A'],\n index=RangeIndex(stop=2))\n tm.assert_frame_equal(series_result, series_expected)\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n s = Series(np.arange(6), index=index)\n\n # no change, position\n result = s.reorder_levels([0, 1, 2])\n tm.assert_series_equal(s, result)\n\n # no change, labels\n result = s.reorder_levels(['L0', 'L1', 'L2'])\n tm.assert_series_equal(s, result)\n\n # rotate, position\n result = s.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = Series(np.arange(6), index=e_idx)\n tm.assert_series_equal(result, expected)\n\n def test_rename_axis_mapper(self):\n # GH 19978\n mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],\n names=['ll', 'nn'])\n s = Series([i for i in range(len(mi))], index=mi)\n\n result = s.rename_axis(index={'ll': 'foo'})\n assert result.index.names == ['foo', 'nn']\n\n result = s.rename_axis(index=str.upper, axis=0)\n assert result.index.names == ['LL', 'NN']\n\n result = s.rename_axis(index=['foo', 'goo'])\n assert result.index.names == ['foo', 'goo']\n\n with pytest.raises(TypeError, match='unexpected'):\n s.rename_axis(columns='wrong')\n\n def test_rename_axis_inplace(self, datetime_series):\n # GH 15704\n expected = datetime_series.rename_axis('foo')\n result = datetime_series\n no_return = result.rename_axis('foo', inplace=True)\n\n assert no_return is None\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('kwargs', [{'mapper': None}, {'index': None}, {}])\n def test_rename_axis_none(self, kwargs):\n # GH 25034\n index = Index(list('abc'), name='foo')\n df = Series([1, 2, 3], index=index)\n\n result = df.rename_axis(**kwargs)\n expected_index = index.rename(None) if kwargs else index\n expected = Series([1, 2, 3], index=expected_index)\n tm.assert_series_equal(result, expected)\n\n def test_set_axis_inplace_axes(self, axis_series):\n # GH14636\n ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')\n\n expected = ser.copy()\n expected.index = list('abcd')\n\n # inplace=True\n # The FutureWarning comes from the fact that we would like to have\n # inplace default to False some day\n for inplace, warn in [(None, FutureWarning), (True, None)]:\n result = ser.copy()\n kwargs = {'inplace': inplace}\n with tm.assert_produces_warning(warn):\n result.set_axis(list('abcd'), axis=axis_series, **kwargs)\n tm.assert_series_equal(result, expected)\n\n def test_set_axis_inplace(self):\n # GH14636\n\n s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')\n\n expected = s.copy()\n expected.index = list('abcd')\n\n # inplace=False\n result = s.set_axis(list('abcd'), axis=0, inplace=False)\n tm.assert_series_equal(expected, result)\n\n # omitting the \"axis\" parameter\n with tm.assert_produces_warning(None):\n result = s.set_axis(list('abcd'), inplace=False)\n tm.assert_series_equal(result, expected)\n\n # wrong values for the \"axis\" parameter\n for axis in [2, 'foo']:\n with pytest.raises(ValueError, match='No axis named'):\n s.set_axis(list('abcd'), axis=axis, inplace=False)\n\n def test_set_axis_prior_to_deprecation_signature(self):\n s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')\n\n expected = s.copy()\n expected.index = list('abcd')\n\n for axis in [0, 'index']:\n with tm.assert_produces_warning(FutureWarning):\n result = s.set_axis(0, list('abcd'), inplace=False)\n tm.assert_series_equal(result, expected)\n\n def test_reset_index_drop_errors(self):\n # GH 20925\n\n # KeyError raised for series index when passed level name is missing\n s = Series(range(4))\n with pytest.raises(KeyError, match='must be same as name'):\n s.reset_index('wrong', drop=True)\n with pytest.raises(KeyError, match='must be same as name'):\n s.reset_index('wrong')\n\n # KeyError raised for series when level to be dropped is missing\n s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))\n with pytest.raises(KeyError, match='not found'):\n s.reset_index('wrong', drop=True)\n\n def test_droplevel(self):\n # GH20342\n ser = Series([1, 2, 3, 4])\n ser.index = MultiIndex.from_arrays([(1, 2, 3, 4), (5, 6, 7, 8)],\n names=['a', 'b'])\n expected = ser.reset_index('b', drop=True)\n result = ser.droplevel('b', axis='index')\n tm.assert_series_equal(result, expected)\n # test that droplevel raises ValueError on axis != 0\n with pytest.raises(ValueError):\n ser.droplevel(1, axis='columns')\n",
"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport warnings\n\nimport numpy as np\nfrom numpy import random\n\nfrom pandas.util._decorators import cache_readonly\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.api import is_list_like\n\nfrom pandas import DataFrame, Series\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_is_valid_plot_return_object, ensure_clean)\n\n\n\"\"\"\nThis is a common base class used for various plotting tests\n\"\"\"\n\n\[email protected]_if_no_mpl\nclass TestPlotBase:\n\n def setup_method(self, method):\n\n import matplotlib as mpl\n from pandas.plotting._matplotlib import compat\n mpl.rcdefaults()\n\n self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3()\n self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0()\n self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0()\n\n self.bp_n_objects = 7\n self.polycollection_factor = 2\n self.default_figsize = (6.4, 4.8)\n self.default_tick_position = 'left'\n\n n = 100\n with tm.RNGContext(42):\n gender = np.random.choice(['Male', 'Female'], size=n)\n classroom = np.random.choice(['A', 'B', 'C'], size=n)\n\n self.hist_df = DataFrame({'gender': gender,\n 'classroom': classroom,\n 'height': random.normal(66, 4, size=n),\n 'weight': random.normal(161, 32, size=n),\n 'category': random.randint(4, size=n)})\n\n self.tdf = tm.makeTimeDataFrame()\n self.hexbin_df = DataFrame({\"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(\n size=20)})\n\n def teardown_method(self, method):\n tm.close()\n\n @cache_readonly\n def plt(self):\n import matplotlib.pyplot as plt\n return plt\n\n @cache_readonly\n def colorconverter(self):\n import matplotlib.colors as colors\n return colors.colorConverter\n\n def _check_legend_labels(self, axes, labels=None, visible=True):\n \"\"\"\n Check each axes has expected legend labels\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n labels : list-like\n expected legend labels\n visible : bool\n expected legend visibility. labels are checked only when visible is\n True\n \"\"\"\n\n if visible and (labels is None):\n raise ValueError('labels must be specified when visible is True')\n axes = self._flatten_visible(axes)\n for ax in axes:\n if visible:\n assert ax.get_legend() is not None\n self._check_text_labels(ax.get_legend().get_texts(), labels)\n else:\n assert ax.get_legend() is None\n\n def _check_data(self, xp, rs):\n \"\"\"\n Check each axes has identical lines\n\n Parameters\n ----------\n xp : matplotlib Axes object\n rs : matplotlib Axes object\n \"\"\"\n xp_lines = xp.get_lines()\n rs_lines = rs.get_lines()\n\n def check_line(xpl, rsl):\n xpdata = xpl.get_xydata()\n rsdata = rsl.get_xydata()\n tm.assert_almost_equal(xpdata, rsdata)\n\n assert len(xp_lines) == len(rs_lines)\n [check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]\n tm.close()\n\n def _check_visible(self, collections, visible=True):\n \"\"\"\n Check each artist is visible or not\n\n Parameters\n ----------\n collections : matplotlib Artist or its list-like\n target Artist or its list or collection\n visible : bool\n expected visibility\n \"\"\"\n from matplotlib.collections import Collection\n if not isinstance(collections,\n Collection) and not is_list_like(collections):\n collections = [collections]\n\n for patch in collections:\n assert patch.get_visible() == visible\n\n def _get_colors_mapped(self, series, colors):\n unique = series.unique()\n # unique and colors length can be differed\n # depending on slice value\n mapped = dict(zip(unique, colors))\n return [mapped[v] for v in series.values]\n\n def _check_colors(self, collections, linecolors=None, facecolors=None,\n mapping=None):\n \"\"\"\n Check each artist has expected line colors and face colors\n\n Parameters\n ----------\n collections : list-like\n list or collection of target artist\n linecolors : list-like which has the same length as collections\n list of expected line colors\n facecolors : list-like which has the same length as collections\n list of expected face colors\n mapping : Series\n Series used for color grouping key\n used for andrew_curves, parallel_coordinates, radviz test\n \"\"\"\n\n from matplotlib.lines import Line2D\n from matplotlib.collections import (\n Collection, PolyCollection, LineCollection\n )\n conv = self.colorconverter\n if linecolors is not None:\n\n if mapping is not None:\n linecolors = self._get_colors_mapped(mapping, linecolors)\n linecolors = linecolors[:len(collections)]\n\n assert len(collections) == len(linecolors)\n for patch, color in zip(collections, linecolors):\n if isinstance(patch, Line2D):\n result = patch.get_color()\n # Line2D may contains string color expression\n result = conv.to_rgba(result)\n elif isinstance(patch, (PolyCollection, LineCollection)):\n result = tuple(patch.get_edgecolor()[0])\n else:\n result = patch.get_edgecolor()\n\n expected = conv.to_rgba(color)\n assert result == expected\n\n if facecolors is not None:\n\n if mapping is not None:\n facecolors = self._get_colors_mapped(mapping, facecolors)\n facecolors = facecolors[:len(collections)]\n\n assert len(collections) == len(facecolors)\n for patch, color in zip(collections, facecolors):\n if isinstance(patch, Collection):\n # returned as list of np.array\n result = patch.get_facecolor()[0]\n else:\n result = patch.get_facecolor()\n\n if isinstance(result, np.ndarray):\n result = tuple(result)\n\n expected = conv.to_rgba(color)\n assert result == expected\n\n def _check_text_labels(self, texts, expected):\n \"\"\"\n Check each text has expected labels\n\n Parameters\n ----------\n texts : matplotlib Text object, or its list-like\n target text, or its list\n expected : str or list-like which has the same length as texts\n expected text label, or its list\n \"\"\"\n if not is_list_like(texts):\n assert texts.get_text() == expected\n else:\n labels = [t.get_text() for t in texts]\n assert len(labels) == len(expected)\n for label, e in zip(labels, expected):\n assert label == e\n\n def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,\n ylabelsize=None, yrot=None):\n \"\"\"\n Check each axes has expected tick properties\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n xlabelsize : number\n expected xticks font size\n xrot : number\n expected xticks rotation\n ylabelsize : number\n expected yticks font size\n yrot : number\n expected yticks rotation\n \"\"\"\n from matplotlib.ticker import NullFormatter\n axes = self._flatten_visible(axes)\n for ax in axes:\n if xlabelsize or xrot:\n if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):\n # If minor ticks has NullFormatter, rot / fontsize are not\n # retained\n labels = ax.get_xticklabels()\n else:\n labels = ax.get_xticklabels() + ax.get_xticklabels(\n minor=True)\n\n for label in labels:\n if xlabelsize is not None:\n tm.assert_almost_equal(label.get_fontsize(),\n xlabelsize)\n if xrot is not None:\n tm.assert_almost_equal(label.get_rotation(), xrot)\n\n if ylabelsize or yrot:\n if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):\n labels = ax.get_yticklabels()\n else:\n labels = ax.get_yticklabels() + ax.get_yticklabels(\n minor=True)\n\n for label in labels:\n if ylabelsize is not None:\n tm.assert_almost_equal(label.get_fontsize(),\n ylabelsize)\n if yrot is not None:\n tm.assert_almost_equal(label.get_rotation(), yrot)\n\n def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):\n \"\"\"\n Check each axes has expected scales\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n xaxis : {'linear', 'log'}\n expected xaxis scale\n yaxis : {'linear', 'log'}\n expected yaxis scale\n \"\"\"\n axes = self._flatten_visible(axes)\n for ax in axes:\n assert ax.xaxis.get_scale() == xaxis\n assert ax.yaxis.get_scale() == yaxis\n\n def _check_axes_shape(self, axes, axes_num=None, layout=None,\n figsize=None):\n \"\"\"\n Check expected number of axes is drawn in expected layout\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n axes_num : number\n expected number of axes. Unnecessary axes should be set to\n invisible.\n layout : tuple\n expected layout, (expected number of rows , columns)\n figsize : tuple\n expected figsize. default is matplotlib default\n \"\"\"\n from pandas.plotting._matplotlib.tools import _flatten\n\n if figsize is None:\n figsize = self.default_figsize\n visible_axes = self._flatten_visible(axes)\n\n if axes_num is not None:\n assert len(visible_axes) == axes_num\n for ax in visible_axes:\n # check something drawn on visible axes\n assert len(ax.get_children()) > 0\n\n if layout is not None:\n result = self._get_axes_layout(_flatten(axes))\n assert result == layout\n\n tm.assert_numpy_array_equal(\n visible_axes[0].figure.get_size_inches(),\n np.array(figsize, dtype=np.float64))\n\n def _get_axes_layout(self, axes):\n x_set = set()\n y_set = set()\n for ax in axes:\n # check axes coordinates to estimate layout\n points = ax.get_position().get_points()\n x_set.add(points[0][0])\n y_set.add(points[0][1])\n return (len(y_set), len(x_set))\n\n def _flatten_visible(self, axes):\n \"\"\"\n Flatten axes, and filter only visible\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n\n \"\"\"\n from pandas.plotting._matplotlib.tools import _flatten\n\n axes = _flatten(axes)\n axes = [ax for ax in axes if ax.get_visible()]\n return axes\n\n def _check_has_errorbars(self, axes, xerr=0, yerr=0):\n \"\"\"\n Check axes has expected number of errorbars\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n xerr : number\n expected number of x errorbar\n yerr : number\n expected number of y errorbar\n \"\"\"\n axes = self._flatten_visible(axes)\n for ax in axes:\n containers = ax.containers\n xerr_count = 0\n yerr_count = 0\n for c in containers:\n has_xerr = getattr(c, 'has_xerr', False)\n has_yerr = getattr(c, 'has_yerr', False)\n if has_xerr:\n xerr_count += 1\n if has_yerr:\n yerr_count += 1\n assert xerr == xerr_count\n assert yerr == yerr_count\n\n def _check_box_return_type(self, returned, return_type, expected_keys=None,\n check_ax_title=True):\n \"\"\"\n Check box returned type is correct\n\n Parameters\n ----------\n returned : object to be tested, returned from boxplot\n return_type : str\n return_type passed to boxplot\n expected_keys : list-like, optional\n group labels in subplot case. If not passed,\n the function checks assuming boxplot uses single ax\n check_ax_title : bool\n Whether to check the ax.title is the same as expected_key\n Intended to be checked by calling from ``boxplot``.\n Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.\n \"\"\"\n from matplotlib.axes import Axes\n types = {'dict': dict, 'axes': Axes, 'both': tuple}\n if expected_keys is None:\n # should be fixed when the returning default is changed\n if return_type is None:\n return_type = 'dict'\n\n assert isinstance(returned, types[return_type])\n if return_type == 'both':\n assert isinstance(returned.ax, Axes)\n assert isinstance(returned.lines, dict)\n else:\n # should be fixed when the returning default is changed\n if return_type is None:\n for r in self._flatten_visible(returned):\n assert isinstance(r, Axes)\n return\n\n assert isinstance(returned, Series)\n\n assert sorted(returned.keys()) == sorted(expected_keys)\n for key, value in returned.items():\n assert isinstance(value, types[return_type])\n # check returned dict has correct mapping\n if return_type == 'axes':\n if check_ax_title:\n assert value.get_title() == key\n elif return_type == 'both':\n if check_ax_title:\n assert value.ax.get_title() == key\n assert isinstance(value.ax, Axes)\n assert isinstance(value.lines, dict)\n elif return_type == 'dict':\n line = value['medians'][0]\n axes = line.axes\n if check_ax_title:\n assert axes.get_title() == key\n else:\n raise AssertionError\n\n def _check_grid_settings(self, obj, kinds, kws={}):\n # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792\n\n import matplotlib as mpl\n\n def is_grid_on():\n xticks = self.plt.gca().xaxis.get_major_ticks()\n yticks = self.plt.gca().yaxis.get_major_ticks()\n # for mpl 2.2.2, gridOn and gridline.get_visible disagree.\n # for new MPL, they are the same.\n\n if self.mpl_ge_3_1_0:\n xoff = all(not g.gridline.get_visible() for g in xticks)\n yoff = all(not g.gridline.get_visible() for g in yticks)\n else:\n xoff = all(not g.gridOn for g in xticks)\n yoff = all(not g.gridOn for g in yticks)\n\n return not (xoff and yoff)\n\n spndx = 1\n for kind in kinds:\n\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=False)\n obj.plot(kind=kind, **kws)\n assert not is_grid_on()\n\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=True)\n obj.plot(kind=kind, grid=False, **kws)\n assert not is_grid_on()\n\n if kind != 'pie':\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=True)\n obj.plot(kind=kind, **kws)\n assert is_grid_on()\n\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=False)\n obj.plot(kind=kind, grid=True, **kws)\n assert is_grid_on()\n\n def _unpack_cycler(self, rcParams, field='color'):\n \"\"\"\n Auxiliary function for correctly unpacking cycler after MPL >= 1.5\n \"\"\"\n return [v[field] for v in rcParams['axes.prop_cycle']]\n\n\ndef _check_plot_works(f, filterwarnings='always', **kwargs):\n import matplotlib.pyplot as plt\n ret = None\n with warnings.catch_warnings():\n warnings.simplefilter(filterwarnings)\n try:\n try:\n fig = kwargs['figure']\n except KeyError:\n fig = plt.gcf()\n\n plt.clf()\n\n ax = kwargs.get('ax', fig.add_subplot(211)) # noqa\n ret = f(**kwargs)\n\n assert_is_valid_plot_return_object(ret)\n\n try:\n kwargs['ax'] = fig.add_subplot(212)\n ret = f(**kwargs)\n except Exception:\n pass\n else:\n assert_is_valid_plot_return_object(ret)\n\n with ensure_clean(return_filelike=True) as path:\n plt.savefig(path)\n finally:\n tm.close(fig)\n\n return ret\n\n\ndef curpath():\n pth, _ = os.path.split(os.path.abspath(__file__))\n return pth\n"
] | [
[
"pandas.Series",
"pandas.util.testing.makeDataFrame",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.MultiIndex.from_arrays",
"pandas.MultiIndex.from_product",
"pandas.Index",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.arange",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"pandas.RangeIndex",
"pandas.MultiIndex",
"pandas.util.testing.assert_frame_equal",
"numpy.array",
"pandas.util.testing.makeDateIndex"
],
[
"pandas.util.testing.close",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.assert_is_valid_plot_return_object",
"pandas.util.testing.makeTimeDataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"numpy.random.choice",
"matplotlib.rc",
"pandas.plotting._matplotlib.compat._mpl_ge_2_2_3",
"numpy.random.uniform",
"pandas.plotting._matplotlib.compat._mpl_ge_3_1_0",
"numpy.random.normal",
"matplotlib.pyplot.clf",
"numpy.arange",
"pandas.core.dtypes.api.is_list_like",
"pandas.plotting._matplotlib.tools._flatten",
"matplotlib.rcdefaults",
"pandas.util.testing.ensure_clean",
"pandas.util.testing.RNGContext",
"pandas.plotting._matplotlib.compat._mpl_ge_3_0_0",
"numpy.array",
"numpy.random.randint"
]
] |
Hemankita/refarch-kc-container-ms | [
"c2e85eacabe8a194782835b04f3410c2d7956a9b"
] | [
"tools/generateData_sensor_malfunction.py"
] | [
"import csv\nimport json\nfrom random import gauss\nimport random\nimport datetime\nimport numpy as np\nimport sys\nimport pandas as pd\n\ndf = pd.DataFrame(columns=['Timestamp', 'ID', 'Temperature(celsius)', 'Target_Temperature(celsius)', 'Amp', 'CumulativePowerConsumption', 'ContentType', 'Humidity', 'CO2', 'Time_Door_Open', \n'Maintainence_Required', 'Defrost_Cycle'])\n\ndef buildJSON():\n \n #faulty sensor data\n id = random.randint(1001,2000)\n Today= datetime.datetime.today()\n date_list = [Today + datetime.timedelta(minutes=15*x) for x in range(0, 1000)]\n range_list=np.linspace(1,2,1000)\n index=0\n for i in range_list:\n\n timestamp = date_list[index].strftime('%Y-%m-%d T%H:%M Z')\n df.loc[i] = [timestamp, id, gauss(5.0, 2.0), 4.4, gauss(2.5,1.0), gauss(10.0,2.0), random.randint(1,5),gauss(10.5, 5.5), gauss(10.5, 5.0), gauss(8.0, 2.0), 1, 6]\n index=index+1\n\n d = [dict([\n (colname, row[i]) \n for i,colname in enumerate(df.columns)]) for row in df.values]\n return json.dumps(d)\n\n\n"
] | [
[
"pandas.DataFrame",
"numpy.linspace"
]
] |
xvinay28x/cat_dog_classifier_library | [
"4d56f90f9d3e91051dba71dcdea78930c4ac0e52"
] | [
"animal-classifier/__init__.py"
] | [
"from tensorflow import keras\n\ndef classify(path):\n model = keras.models.load_model(\"Cat_Dog_Classification.h5\")\n load_image = keras.preprocessing.image.load_image(path,target_size=(200,200))\n image_array = keras.preprocessing.image.img_to_array(load_image)\n reshape_array = image_array.reshape(1,200,200,3)\n array_normalize = reshape_array/255\n result = model.predict(array_normalize)\n if result >= 0.5:\n return 1\n else:\n return 0\n "
] | [
[
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_image",
"tensorflow.keras.models.load_model"
]
] |
ericlearning/style-transfer | [
"f387515b4ffe441c4677400a65b9e7fdb50c979f"
] | [
"FastStyleTransfer/utils.py"
] | [
"import os\nimport glob\nimport torch\nimport pandas as pd\nimport seaborn as sn\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom sklearn.metrics import confusion_matrix\nfrom PIL import Image\n\ndef set_lr(optimizer, lrs):\n\tif(len(lrs) == 1):\n\t\tfor param in optimizer.param_groups:\n\t\t\tparam['lr'] = lrs[0]\n\telse:\n\t\tfor i, param in enumerate(optimizer.param_groups):\n\t\t\tparam['lr'] = lrs[i]\n\ndef set_base_lr(optimizer, lrs):\n\tif(len(lrs) == 1):\n\t\tfor param in optimizer.param_groups:\n\t\t\tparam['initial_lr'] = lrs[0]\n\telse:\n\t\tfor i, param in enumerate(optimizer.param_groups):\n\t\t\tparam['initial_lr'] = lrs[i]\n\ndef get_lr(optimizer):\n\toptim_param_groups = optimizer.param_groups\n\tif(len(optim_param_groups) == 1):\n\t\treturn optim_param_groups[0]['lr']\n\telse:\n\t\tlrs = []\n\t\tfor param in optim_param_groups:\n\t\t\tlrs.append(param['lr'])\n\t\treturn lrs\n\ndef get_children_groups(model_children, param_places):\n\tcur_place = 0\n\tchildren_groups = []\n\n\tfor param_place in param_places:\n\t\tchildren_groups.append(model_children[cur_place:param_place])\n\t\tcur_place = param_place\n\n\treturn children_groups\n\ndef get_params(children):\n\tparams_use_grad = []\n\tfor child in children:\n\t\tfor param in child.parameters():\n\t\t\tif(param.requires_grad == True):\n\t\t\t\tparams_use_grad.append(param)\n\n\treturn params_use_grad\n\ndef get_optimizer(model, lrs, param_places):\n\tmodel_children = list(model.children())\n\n\t# only 1 learning rate\n\tif(len(lrs) == 1):\n\t\t# from the model's childrens, only get the parameters that use grad\n\t\tparam_use_grad = get_params(model_children)\n\n\t\t# set an Adam optimizer with the params that use grad, and the lr\n\t\toptimizer = optim.Adam(param_use_grad, lrs[0])\n\n\t# multiple learning rates\n\telse:\n\t\t# from the param_places, get chunks of children from model_children\n\t\t# children_groups is a list, and each item will be a list of children\n\t\tchildren_groups = get_children_groups(model_children, param_places)\n\n\t\t# from children_groups, get each of its children group's grad using params\n\t\t# param_groups_use_grad is a list, and each item will be a list of params that use grad\n\t\tparam_groups_use_grad = []\n\n\t\tfor children_group in children_groups:\n\t\t\tparam_group_use_grad = get_params(children_group)\n\t\t\tparam_groups_use_grad.append(param_group_use_grad)\n\n\t\t# zip param_groups_use_grad together with lrs\n\t\t# in order to feed in the corresponding lr to a given param_group\n\t\tparam_groups_use_grad_with_lrs = zip(param_groups_use_grad, lrs)\n\t\toptimizer = optim.Adam([{'params' : p, 'lr' : l}\n\t\t\tfor p, l in param_groups_use_grad_with_lrs])\n\n\treturn optimizer\n\ndef freeze_until(model, idx):\n\tfor i, child in enumerate(model.children()):\n\t\tif(i <= idx):\n\t\t\tfor param in child.parameters():\n\t\t\t\tparam.requires_grad = False\n\t\telse:\n\t\t\tfor param in child.parameters():\n\t\t\t\tparam.requires_grad = True\n\ndef histogram_sizes(img_dir, h_lim = None, w_lim = None):\n\ths, ws = [], []\n\tfor file in glob.iglob(os.path.join(img_dir, '**/*.*')):\n\t\ttry:\n\t\t\twith Image.open(file) as im:\n\t\t\t\th, w = im.size\n\t\t\t\ths.append(h)\n\t\t\t\tws.append(w)\n\t\texcept:\n\t\t\tprint('Not an Image file')\n\n\tif(h_lim is not None and w_lim is not None):\n\t\ths = [h for h in hs if h<h_lim]\n\t\tws = [w for w in ws if w<w_lim]\n\n\tplt.figure('Height')\n\tplt.hist(hs)\n\n\tplt.figure('Width')\n\tplt.hist(ws)\n\n\tplt.show()\n\n\treturn hs, ws\n\ndef plot_confusion_matrix(model, dl, names, classes_count, device, figsize):\n\ttrue_label = []\n\tpredicted_label = []\n\n\tfor batch in dl:\n\t\t(images, labels) = batch\n\t\ty_real = list(labels.data.cpu().numpy())\n\t\ty_pred = list(torch.argmax(model(images.to(device)), dim=1).data.cpu().numpy())\n\t\t\n\t\ttrue_label.extend(y_real)\n\t\tpredicted_label.extend(y_pred)\n\n\tcm = confusion_matrix(true_label, predicted_label)\n\tnames_with_cnt = [str(name) + ' : ' + str(cnt) for name, cnt in zip(names, classes_count)]\n\tdf = pd.DataFrame(cm, index = names_with_cnt, columns = names_with_cnt)\n\n\tplt.figure(figsize = figsize)\n\tax = plt.subplot(111)\n\tsn.heatmap(df, annot = True, ax = ax, fmt='g')\n\t\n\tplt.show()\n\ndef freeze_cur_bn(module):\n\tclassname = module.__class__.__name__\n\tif(classname.find('BatchNorm') != -1):\n\t\tmodule.eval()\n\ndef freeze_bn(model):\n\tmodel.apply(freeze_cur_bn)\n\nclass Normalize(nn.Module):\n\tdef __init__(self, mean, variance):\n\t\tsuper(Normalize, self).__init__()\n\t\tself.mean = mean.view(-1, 1, 1)\n\t\tself.variance = variance.view(-1, 1, 1)\n\n\tdef forward(self, x):\n\t\treturn (x - mean) / variance"
] | [
[
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"torch.optim.Adam",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist"
]
] |
chathurawidanage/cylon | [
"ac61b7a50880138fe67de21adee208016a94979a"
] | [
"cpp/src/experiments/generate_csv.py"
] | [
"##\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\nimport numpy as np\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description='generate random data')\nparser.add_argument('-o', dest='output', type=str, help='output file', default='/tmp/csv.csv')\nparser.add_argument('-r', dest='rows', type=int, help='number of rows', default=10)\nparser.add_argument('-c', dest='cols', type=int, help='number of cols', default=4)\nparser.add_argument('-k', dest='idx_cols', type=int, nargs='+', help='index columns', default=[0])\nparser.add_argument('--krange', nargs=2, type=int, help='key range', default=(0, 10))\nparser.add_argument('--vrange', nargs=2, type=float, help='val range', default=(0., 1.))\nparser.add_argument('--no_header', action='store_true', help='exclude header')\n\n\ndef generate_file(output='/tmp/csv.csv', rows=10, cols=4, idx_cols=None, vrange=(0., 1.),\n krange=(0, 10), no_header=False):\n if idx_cols is None:\n idx_cols = [0]\n\n df = pd.DataFrame(np.random.rand(rows, cols) * (vrange[1] - vrange[0]) + vrange[0],\n columns=list(range(cols)))\n\n for i in idx_cols:\n assert cols > i >= 0\n df[i] = df[i].map(lambda x: int(\n krange[0] + (x - vrange[0]) * (krange[1] - krange[0]) / (vrange[1] - vrange[0])))\n\n df.to_csv(output, header=not no_header, index=False, float_format='%.3f')\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n args = vars(args)\n\n print(\"generate csv :\", args, flush=True)\n generate_file(**args)\n"
] | [
[
"numpy.random.rand"
]
] |
ZoRoronoa/Camera-Aware-Proxy | [
"352f900bbae330f18c2bfe2b3f2516fb4e31adea"
] | [
"reid/utils/evaluation_metrics/retrieval.py"
] | [
"import numpy as np\nfrom sklearn import metrics as sk_metrics\nimport torch\n\nclass PersonReIDMAP:\n '''\n Compute Rank@k and mean Average Precision (mAP) scores\n Used for Person ReID\n Test on MarKet and Duke\n '''\n\n def __init__(self, query_feature, query_cam, query_label, gallery_feature, gallery_cam, gallery_label, dist):\n '''\n :param query_feature: np.array, bs * feature_dim\n :param query_cam: np.array, 1d\n :param query_label: np.array, 1d\n :param gallery_feature: np.array, gallery_size * feature_dim\n :param gallery_cam: np.array, 1d\n :param gallery_label: np.array, 1d\n '''\n\n self.query_feature = query_feature\n self.query_cam = query_cam\n self.query_label = query_label\n self.gallery_feature = gallery_feature\n self.gallery_cam = gallery_cam\n self.gallery_label = gallery_label\n\n assert dist in ['cosine', 'euclidean']\n self.dist = dist\n\n # normalize feature for fast cosine computation\n if self.dist == 'cosine':\n self.query_feature = self.normalize(self.query_feature)\n self.gallery_feature = self.normalize(self.gallery_feature)\n\n APs = []\n CMC = []\n for i in range(len(query_label)):\n AP, cmc = self.evaluate(self.query_feature[i], self.query_cam[i], self.query_label[i],\n self.gallery_feature, self.gallery_cam, self.gallery_label)\n APs.append(AP)\n CMC.append(cmc)\n # print('{}/{}'.format(i, len(query_label)))\n\n self.APs = np.array(APs)\n self.mAP = np.mean(self.APs)\n\n min_len = 99999999\n for cmc in CMC:\n if len(cmc) < min_len:\n min_len = len(cmc)\n for i, cmc in enumerate(CMC):\n CMC[i] = cmc[0: min_len]\n self.CMC = np.mean(np.array(CMC), axis=0)\n\n def compute_AP(self, index, good_index):\n '''\n :param index: np.array, 1d\n :param good_index: np.array, 1d\n :return:\n '''\n\n num_good = len(good_index)\n hit = np.in1d(index, good_index)\n index_hit = np.argwhere(hit == True).flatten()\n\n if len(index_hit) == 0:\n AP = 0\n cmc = np.zeros([len(index)])\n else:\n precision = []\n for i in range(num_good):\n precision.append(float(i+1) / float((index_hit[i]+1)))\n AP = np.mean(np.array(precision))\n cmc = np.zeros([len(index)])\n cmc[index_hit[0]: ] = 1\n\n return AP, cmc\n\n def evaluate(self, query_feature, query_cam, query_label, gallery_feature, gallery_cam, gallery_label, rerank=False):\n '''\n :param query_feature: np.array, 1d\n :param query_cam: int\n :param query_label: int\n :param gallery_feature: np.array, 2d, gallerys_size * feature_dim\n :param gallery_cam: np.array, 1d\n :param gallery_label: np.array, 1d\n :return:\n '''\n\n # cosine score\n if self.dist is 'cosine':\n # feature has been normalize during intialization\n score = np.matmul(query_feature, gallery_feature.transpose())\n index = np.argsort(score)[::-1]\n elif self.dist is 'euclidean':\n #score = self.l2(query_feature.reshape([1, -1]), gallery_feature)\n #print('query_feature shape= {}, gallery_feature shape= {}'.format(query_feature.shape, gallery_feature.shape))\n score = self.l2(query_feature.reshape([1,-1]), gallery_feature)\n index = np.argsort(score.reshape([-1]))\n\n junk_index_1 = self.in1d(np.argwhere(query_label == gallery_label), np.argwhere(query_cam == gallery_cam))\n junk_index_2 = np.argwhere(gallery_label == -1)\n junk_index = np.append(junk_index_1, junk_index_2)\n\n good_index = self.in1d(np.argwhere(query_label == gallery_label), np.argwhere(query_cam != gallery_cam))\n index_wo_junk = self.notin1d(index, junk_index)\n\n return self.compute_AP(index_wo_junk, good_index)\n\n def in1d(self, array1, array2, invert=False):\n '''\n :param set1: np.array, 1d\n :param set2: np.array, 1d\n :return:\n '''\n\n mask = np.in1d(array1, array2, invert=invert)\n return array1[mask]\n\n def notin1d(self, array1, array2):\n\n return self.in1d(array1, array2, invert=True)\n\n def normalize(self, x):\n norm = np.tile(np.sqrt(np.sum(np.square(x), axis=1, keepdims=True)), [1, x.shape[1]])\n return x / norm\n\n def cosine_dist(self, x, y):\n return sk_metrics.pairwise.cosine_distances(x, y)\n\n def euclidean_dist(self, x, y):\n return sk_metrics.pairwise.euclidean_distances(x, y)\n\n def l2(self, x, y):\n x = torch.from_numpy(x)\n y = torch.from_numpy(y)\n\n m, n = x.size(0), y.size(0)\n x = x.view(m, -1)\n y = y.view(n, -1)\n\n dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n dist.addmm_(1, -2, x, y.t())\n # We use clamp to keep numerical stability\n dist = torch.clamp(dist, 1e-8, np.inf)\n return dist.numpy()\n\n"
] | [
[
"numpy.append",
"numpy.argwhere",
"torch.clamp",
"torch.pow",
"sklearn.metrics.pairwise.cosine_distances",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.in1d",
"numpy.argsort",
"torch.from_numpy",
"numpy.array",
"numpy.square",
"numpy.mean"
]
] |
miketrumpis/lfp_scroller | [
"ce4dbf85bb4d31f2eacfb5d68a5049499637722c"
] | [
"fast_scroller/h5data.py"
] | [
"import numpy as np\nfrom scipy.linalg import LinAlgError\nfrom scipy.signal import lfilter, lfilter_zi, hilbert\nfrom scipy.interpolate import interp1d\nimport h5py\nfrom tqdm import tqdm\nfrom ecogdata.util import input_as_2d\nfrom ecogdata.util import nextpow2\n\n\ndef h5mean(array, axis, rowmask=(), start=0, stop=None):\n \"\"\"Compute mean of a 2D HDF5 array in blocks\"\"\"\n\n shape = array.shape\n if axis < 0:\n axis += len(shape)\n if stop is None:\n stop = shape[1]\n if axis==1:\n if len(rowmask):\n mn_size = rowmask.sum()\n else:\n mn_size = shape[0]\n else:\n mn_size = shape[1 - axis]\n mn = np.zeros(mn_size, 'd')\n # For averaging in both dimensions, still iterate chunks in time\n # If averaging over channels:\n # * fill in the chunk averages along the way\n # If averaging over time\n # * accumulate the samples (scaled by 1/N)\n itr = H5Chunks(array, axis=1, slices=True)\n for n, sl in tqdm(enumerate(itr), desc='Computing mean', leave=True, total=itr.n_blocks):\n t_sl = sl[1]\n # just pass until t_sl.start < requested start < t_sl.stop\n if start >= t_sl.stop:\n print('Continuing')\n continue\n # now modify first good slice\n elif start > t_sl.start:\n t_sl = slice(start, t_sl.stop)\n sl = (sl[0], t_sl)\n # break loops if stop < t_sl.start\n if stop < t_sl.start:\n break\n # now modify lsat good slice\n elif stop < t_sl.stop:\n t_sl = slice(t_sl.start, stop)\n sl = (sl[0], t_sl)\n x_sl = array[sl]\n if len(rowmask):\n x_sl = x_sl[rowmask]\n \n if axis == 0:\n mn[sl[1]] = x_sl.mean(0)\n else:\n mn[:] += x_sl.sum(1) / float(array.shape[1])\n return mn\n\n\ndef h5stat(array, fn, rowmask=()):\n \"\"\"Compute timeseries of a channel-wise statistic for a 2D HDF5 array in blocks\"\"\"\n\n shape = array.shape\n T = shape[1]\n series = np.zeros(T, 'd')\n itr = H5Chunks(array, axis=1, slices=True)\n for n, sl in tqdm(enumerate(itr), desc='Computing series',\n leave=True, total=itr.n_blocks):\n x_sl = array[sl]\n if len(rowmask):\n x_sl = x_sl[rowmask]\n series[sl[1]] = fn(x_sl)\n return series\n\n\nclass ReadCache(object):\n # TODO -- enable catch for full slicing\n \"\"\"\n Buffers row indexes from memmap or hdf5 file.\n\n For cases where array[0, m:n], array[1, m:n], array[2, m:n] are\n accessed sequentially, this object buffers the C x (n-m)\n submatrix before yielding individual rows.\n\n Access such as array[p:q, m:n] is handled by the underlying\n array's __getitem__ method.\n \"\"\"\n \n def __init__(self, array):\n self._array = array\n self._current_slice = None\n self._current_seg = ()\n self.dtype = array.dtype\n self.shape = array.shape\n\n def __len__(self):\n return len(self._array)\n\n @property\n def file_array(self):\n return self._array\n\n def __getitem__(self, sl):\n indx, srange = sl\n # Only access diretly if the first part of the slice is also a slice.\n # In other cases, slice all first and then use numpy indexing\n if isinstance(indx, slice):\n return self._array[sl].copy()\n if self._current_slice != srange:\n all_sl = (slice(None), srange)\n self._current_seg = self._array[all_sl]\n self._current_slice = srange\n # always return the full range after slicing with possibly\n # complex original range\n new_range = slice(None)\n new_sl = (indx, new_range)\n return self._current_seg[new_sl].copy()\n\n\nclass CommonReferenceReadCache(ReadCache):\n \"\"\"Returns common-average re-referenced blocks\"\"\"\n\n def __getitem__(self, sl):\n indx, srange = sl\n if isinstance(indx, slice):\n # This returns without CAR?\n return self._array[sl].copy()\n if self._current_slice != srange:\n all_sl = (slice(None), srange)\n if self.dtype in np.sctypes['int']:\n self._current_seg = self._array[all_sl].astype('d')\n else:\n self._current_seg = self._array[all_sl].copy()\n self._current_seg -= self._current_seg.mean(0)\n self._current_slice = srange\n # always return the full range after slicing with possibly\n # complex original range\n new_range = slice(None)\n new_sl = (indx, new_range)\n return self._current_seg[new_sl].copy()\n\n\nclass FilteredReadCache(ReadCache):\n \"\"\"\n Apply row-by-row filters to a ReadCache\n \"\"\"\n\n def __init__(self, array, filters):\n if not isinstance(filters, (tuple, list)):\n f = filters\n filters = [ f ] * len(array)\n self.filters = filters\n super(FilteredReadCache, self).__init__(array)\n\n def __getitem__(self, sl):\n idx = sl[0]\n x = super(FilteredReadCache, self).__getitem__(sl)\n if isinstance(idx, int):\n return self.filters[idx]( x )\n y = np.empty_like(x)\n for x_, y_, f in zip(x[idx], y[idx], self.filters[idx]):\n y_[:] = f(x_)\n return y\n\n\ndef _make_subtract(z):\n def _f(x):\n return x - z\n return _f\n \n\nclass DCOffsetReadCache(FilteredReadCache):\n \"\"\"\n A filtered read cache with a simple offset subtraction.\n \"\"\"\n\n def __init__(self, array, offsets):\n #filters = [lambda x: x - off for off in offsets]\n filters = [_make_subtract(off) for off in offsets]\n super(DCOffsetReadCache, self).__init__(array, filters)\n self.offsets = offsets\n\n\nclass H5Chunks(object):\n \"\"\"Iterates an HDF5 over \"chunks\" with ndarray-like access\"\"\"\n\n def __init__(self, h5array, out=None, axis=1, min_chunk=None, slices=False, reverse=False):\n \"\"\"\n Efficient block iterator for HDF5 arrays (streams over chunking sizes to read whole blocks at a time).\n\n Parameters\n ----------\n h5array: h5py.Dataset\n Vector timeseries (chan x time) or (time x chan)\n out: h5py.Dataset\n Output array for write-back. May be equal to h5array for read/write arrays. Write-back disabled if None\n axis: int\n Axis to iterate over\n min_chunk: int\n Ensure the output blocks are greater than this size\n slices: bool\n Return array slicing rather than data\n reverse: bool\n Yield reverse-sequence data\n\n \"\"\"\n chunk = h5array.chunks\n if len(chunk) > 2:\n raise ValueError('Only iterates for 2D arrays')\n self.h5array = h5array\n while axis < 0:\n axis += len(chunk)\n if chunk[axis] < chunk[1-axis]:\n print('chunk size larger in other dimension!')\n\n self.axis = axis\n self.size = h5array.shape[axis]\n self.chunk = chunk[axis]\n if min_chunk is not None:\n while self.chunk < min_chunk:\n self.chunk += chunk[axis]\n self.n_blocks = self.size // self.chunk\n if self.n_blocks * self.chunk < self.size:\n self.n_blocks += 1\n self.__it = self.n_blocks - 1 if reverse else 0\n self.reverse = reverse\n self.slices = slices\n self._output_source = out\n\n def write_out(self, data):\n if self._output_source is None:\n print('No output defined!')\n return\n if self.reverse:\n # data is reversed\n data = data[:, ::-1] if self.axis == 1 else data[::-1, :]\n self._output_source[self._current_sl] = data\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.__it >= self.n_blocks or self.__it < 0:\n raise StopIteration()\n n = self.__it\n rng = slice(n * self.chunk, min(self.size, (n + 1) * self.chunk))\n self._current_sl = (slice(None), rng) if self.axis else (rng, slice(None))\n if self.reverse:\n self.__it -= 1\n else:\n self.__it += 1\n if self.slices:\n return self._current_sl\n arr = self.h5array[self._current_sl]\n if self.reverse:\n return arr[:, ::-1] if self.axis == 1 else arr[::-1, :]\n return arr\n\n\nclass HandOffIter:\n \"\"\"\n Iterates over several 2D HDF5 arrays with hand-off between files. Hand-off procedure includes attemping to match\n the DC offsets between signals around the end and beginning of recording edges.\n\n Presently iterates over axis=1.\n\n Also supports write-back to the currently visible buffer within an iteration.\n\n \"\"\"\n\n # TODO: support reverse iteration\n\n def __init__(self, arrays, out=None, min_chunk=None, chans=None, blank_points=10):\n \"\"\"\n Construct hand-off iterator from HDF5 files.\n\n Parameters\n ----------\n arrays: sequence\n sequence of h5py.Datasets\n out: h5py.Dataset, str\n out may be a pre-created Dataset of the correct size or the path of output file. If output_file=='same',\n then write-back to the same input files. If None, then there is no output source.\n min_chunk: int\n Ensure the output blocks are greater than this size\n chans: list\n channels to expose on iteration (all by default)\n blank_points: int\n Blank these many points when handing off between files. Fill in +/- blank region with linear\n interpolation between valid points.\n\n \"\"\"\n hdf_files = [array.file.filename for array in arrays]\n self.files = hdf_files\n self.arrays = arrays\n rec_lengths = [array.shape[1] for array in arrays]\n chunk_sizes = []\n num_blocks = 0\n if min_chunk is None:\n # todo: fix dumb 2000 pts hard coding\n min_chunk = blank_points + 2000\n else:\n min_chunk = max(blank_points + 2000, min_chunk)\n for array in arrays:\n size = array.chunks[1]\n if min_chunk is not None:\n while size < min_chunk:\n size += array.chunks[1]\n if size > array.shape[1]:\n raise ValueError('Minimum chunk size {} is greater than the length of >=1 arrays'.format(min_chunk))\n chunk_sizes.append(size)\n # todo: is this +1 count correct?\n num_blocks += array.shape[1] // size + 1\n n_chan = arrays[0].shape[0]\n self.n_blocks = num_blocks\n if chans is None:\n chans = slice(None)\n else:\n if not np.iterable(chans):\n chans = (chans,)\n n_chan = len(chans)\n self.total_length = np.sum(rec_lengths)\n self.rec_lengths = rec_lengths\n self.chunk_sizes = chunk_sizes\n # Output status will be checked through the value of self._output_file:\n # if None, do nothing\n # if 'same', write back to input sources\n # else write to self._output_source defined here\n if isinstance(out, str):\n self._output_file = out\n if self._output_file.lower() != 'same':\n hdf = h5py.File(self._output_file, 'w')\n array_name = arrays[0].name.strip('/')\n out = hdf.create_dataset(array_name, shape=(n_chan, self.total_length), dtype='f', chunks=True)\n hdf.create_dataset('break_points', data=np.cumsum(rec_lengths[:-1], dtype='i'))\n self._output_source = out\n self._closing_output = True\n elif out is not None:\n self._output_source = out\n self._output_file = out.file.filename\n self._closing_output = False\n else:\n self._output_file = None\n self._closing_output = False\n self.chans = chans\n self._current_source = 0\n self._current_offset = None\n self._blanking_slice = False\n self._blank_points = blank_points\n\n def __iter__(self):\n # set up initial offset as the mean(s) in the first file\n self._current_source = self.arrays[0]\n means = self._slice_source(np.s_[self._blank_points:self._blank_points + 2000], offset=False).mean(axis=1)\n if self._output_file == 'same':\n self._output_source = self.arrays[0]\n self._current_source_num = 0\n self._current_offset = means[:, None]\n self._current_step = self.chunk_sizes[0]\n self._input_point = 0\n self._output_point = 0\n # starting on a blanking slice\n self._blanking_slice = True\n self._end_of_iter = False\n return self\n\n def _slice_source(self, time_slice, offset=True):\n if isinstance(self.chans, slice):\n arr = self._current_source[self.chans, time_slice]\n else:\n arr = np.array([self._current_source[c, time_slice] for c in self.chans])\n return arr - self._current_offset if offset else arr\n\n def _hand_off(self, start):\n # Right now the current step size will run off the end of the current source.\n # So grab the remainder of this source and hand-off to the next source.\n # Also reset the offset level to the average of the last few points\n # array_name = self.array_name\n end_point = self._current_source.shape[1]\n remainder = self._slice_source(np.s_[start:])\n old_mean = remainder.mean(1)[:, None]\n # Actually... use more points if the remainder is short\n if self._current_source.shape[1] - start < 100:\n longer_tail = self._slice_source(np.s[-100:])\n old_mean = longer_tail.mean(1)[:, None]\n # self._current_source.file.close()\n self._current_source_num += 1\n if self._current_source_num >= len(self.files):\n # do not change source or step size, just signal that the end is nigh\n self._end_of_iter = True\n else:\n self._current_source = self.arrays[self._current_source_num]\n self._current_step = self.chunk_sizes[self._current_source_num]\n self._blanking_slice = True\n self._break_point = self._output_point + (end_point - start)\n # get the mean of the first few points in the new source\n new_mean = self._slice_source(np.s_[self._blank_points:self._blank_points + 2000], offset=False).mean(1)\n # new_mean = np.array([self._current_source[c, self._blank_points:200].mean() for c in self.chans])\n # this is the offset to move the new mean to the old mean\n self._current_offset = new_mean[:, None] - old_mean\n return remainder\n\n def write_out(self, data):\n if self._output_file is None:\n print('No output file defined!')\n return\n elif self._output_file == 'same':\n # this condition means that data came from two sources in a hand-off\n if data.shape[1] > self._input_point:\n # last part is from current source\n self._current_source[:, :self._input_point] = data[:, -self._input_point:]\n # first part is from previous source\n n_prev = data.shape[1] - self._input_point\n prev_source = self.arrays[self._current_source_num - 1]\n prev_source[:, -n_prev:] = data[:, :n_prev]\n else:\n max_n = self._current_source.shape[1]\n start_pt = self._input_point - self._current_step\n stop_pt = min(max_n, self._input_point)\n this_slice = np.s_[:, start_pt:stop_pt]\n self._current_source[this_slice] = data\n return\n # Write this data into the output array.\n # If this is a blanking slice (b/c of hand-off) then ???\n a = self._output_point\n b = a + data.shape[1]\n self._output_source[:, a:b] = data\n self._output_source.flush()\n self._output_point = b\n\n\n def __next__(self):\n if self._end_of_iter:\n if self._closing_output:\n self._output_source.file.close()\n raise StopIteration\n start = self._input_point\n stop = start + self._current_step\n if stop > self._current_source.shape[1]:\n # print('hand off slice: {}-{}, file length {}'.format(start, stop, self._current_source.shape[1]))\n remainder = self._hand_off(start)\n # if the hand-off logic has found end-of-files then simply return the last bit and raise StopIteration\n # next time around\n if self._end_of_iter:\n # advance the input array point counter so that it can be rewound as needed in write_out\n self._input_point += self._current_step\n return remainder\n next_strip = self._slice_source(np.s_[:self._current_step])\n # Need to handle blanking!\n r_weight = np.linspace(0, 1, self._blank_points)\n left_point = remainder[:, -1][:, None]\n right_point = next_strip[:, self._blank_points][:, None]\n next_strip[:, :self._blank_points] = r_weight * right_point + (1 - r_weight) * left_point\n arr_slice = np.c_[remainder, next_strip]\n # next input is 1X the current step\n self._input_point = self._current_step\n # print('new input point: {}, file length {}'.format(self._input_point, self._current_source.shape[1]))\n return arr_slice\n else:\n # easy case!\n arr_slice = self._slice_source(np.s_[start:stop])\n self._input_point += self._current_step\n if start == 0 and self._current_source_num == 0:\n # just blank the initial points to zero\n arr_slice[:, :self._blank_points] = 0\n return arr_slice\n\n\ndef block_itr_factory(x, **kwargs):\n if isinstance(x, (tuple, list)):\n if 'axis' in kwargs and kwargs['axis'] == 1:\n # just drop this since it works right anyway\n kwargs.pop('axis')\n args = set(kwargs.keys())\n extra_args = args - {'out', 'min_chunks', 'chans', 'blank_points'}\n if len(extra_args):\n print('Dropping arguments not (yet) supported for HandOffIter: {}'.format(extra_args))\n supported_args = args - extra_args\n kwargs = dict((k, kwargs[k]) for k in supported_args)\n return HandOffIter(x, **kwargs)\n else:\n return H5Chunks(x, **kwargs)\n\n\ndef bfilter(b, a, x, axis=-1, out=None, filtfilt=False):\n \"\"\"\n Apply linear filter inplace over array x streaming from disk.\n\n Parameters\n ----------\n b: ndarray\n polynomial coefs for transfer function denominator\n a: ndarray\n polynomial coefs for transfer function numerator\n x: h5py.Dataset, list\n Either a single or multiple datasets. If multiple, then a HandOffIter will be used to iterate. In this mode,\n if out is given as a string then the full output will be concatenated to a single HDF5 file. Otherwise output\n will be written back to each individual file.\n axis: int\n Array axis to apply filter\n out: h5py.Dataset, str\n Output array (or file name, see details above). If multiple inputs are given, a value of None will be\n converted to 'same'\n filtfilt: bool\n If True, perform zero-phase filtering with the forward-reverse technique\n\n Returns\n -------\n out: h5py.Dataset\n Output array. Not well defined if using HandOffIter in 'same' output mode\n\n \"\"\"\n try:\n zii = lfilter_zi(b, a)\n except LinAlgError:\n # the integrating filter doesn't have valid zi\n zii = np.array([0.0])\n\n zi_sl = np.s_[None, :] if axis in (-1, 1) else np.s_[:, None]\n xc_sl = np.s_[:, :1] if axis in (-1, 1) else np.s_[:1, :]\n fir_size = len(b)\n if out is None:\n if isinstance(x, (list, tuple)):\n out = 'same'\n else:\n out = x\n itr = block_itr_factory(x, axis=axis, out=out, min_chunk=fir_size)\n for n, xc in tqdm(enumerate(itr), desc='Blockwise filtering',\n leave=True, total=itr.n_blocks):\n if n == 0:\n zi = zii[zi_sl] * xc[xc_sl]\n xcf, zi = lfilter(b, a, xc, axis=axis, zi=zi)\n itr.write_out(xcf)\n\n # presently hand off iteration only goes forward so can't filt-filt\n if isinstance(itr, HandOffIter) or not filtfilt:\n out = itr._output_source\n del xc\n del xcf\n return out\n\n # Now read and write to the same out array (however it was earlier defined)\n itr = H5Chunks(out, axis=axis, min_chunk=fir_size, out=out, reverse=True)\n for n, xc in tqdm(enumerate(itr), desc='Blockwise filtering (reverse)',\n leave=True, total=itr.n_blocks):\n if n == 0:\n zi = zii[zi_sl] * xc[xc_sl]\n xcf, zi = lfilter(b, a, xc, axis=axis, zi=zi)\n itr.write_out(xcf)\n del xc\n del xcf\n return out\n\n\ndef passthrough(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Copying to output', leave=True, total=itr.n_blocks):\n itr.write_out(xc)\n\n\n@input_as_2d(in_arr=(0, 1))\ndef interpolate_blanked(x, mask, inplace=False, kind='linear'):\n if inplace:\n y = x\n else:\n y = x.copy()\n a = np.arange(x.shape[1])\n for row_x, row_y, row_m in zip(x, y, mask):\n fv = row_x[~row_m].mean()\n f = interp1d(a[~row_m], row_x[~row_m], kind=kind,\n bounds_error=False, fill_value=fv)\n #row_y[~row_m] = row_x[~row_m]\n row_y[row_m] = f( a[row_m] )\n return y\n \n\ndef block_nan_filter(x, y, kind='linear'):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='NaN Filtering', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n nan_mask = np.isnan(xc)\n if not nan_mask.any():\n # y[sl] = xc\n itr.write_out(xc)\n continue\n xc = interpolate_blanked(xc, nan_mask, inplace=True, kind=kind)\n # y[sl] = xc\n itr.write_out(xc)\n \n\ndef square_filter(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Squaring', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n # y[sl] = xc ** 2\n itr.write_out(xc ** 2)\n\n\ndef abs_filter(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Rectifying', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n # y[sl] = np.abs(xc)\n itr.write_out(np.abs(xc))\n\n\ndef hilbert_envelope_filter(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Hilbert Transform', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n n = xc.shape[1]\n nfft = nextpow2(n)\n\n # if n is closer to the previous power of 2, then split this block into two computations\n if (nfft - n) > (n - nfft / 2):\n n1 = int(n / 2)\n nfft = int(nfft / 2)\n y1 = hilbert(xc[..., :n1], N=nfft)[..., :n1]\n y2 = hilbert(xc[..., n1:], N=nfft)[..., :n - n1]\n # y[sl] = np.hstack((np.abs(y1), np.abs(y2)))\n itr.write_out(np.hstack((np.abs(y1), np.abs(y2))))\n else:\n y1 = hilbert(xc, N=nfft)[..., :n]\n # y[sl] = np.abs(y1)\n itr.write_out(np.abs(y1))\n"
] | [
[
"numpy.sum",
"scipy.interpolate.interp1d",
"numpy.cumsum",
"numpy.zeros",
"numpy.iterable",
"numpy.abs",
"numpy.arange",
"numpy.empty_like",
"scipy.signal.lfilter",
"scipy.signal.hilbert",
"numpy.isnan",
"numpy.array",
"numpy.linspace",
"scipy.signal.lfilter_zi"
]
] |
fakecoinbase/TheCyberHeadslashCyberHead | [
"b1c5d8c157ff5bb976778ff5f7901d82e41d7d3e"
] | [
"cyberhead/modules/brokers/coinbase/Coinbase.py"
] | [
"import cbpro\nimport pandas as pd\nfrom base64 import b64encode\n\nclass Coinbase:\n\tdef __init__(self, API_KEY, API_SECRET, API_PASS, ENV_URL=\"https://api-public.sandbox.pro.coinbase.com\"):\n\t\tself.API_KEY = API_KEY\n\t\tself.API_SECRET = API_SECRET\n\t\tself.API_PASS = API_PASS\n\t\tself.ENV_URL = ENV_URL\n\t\tself.client = cbpro.AuthenticatedClient(self.API_KEY, self.API_SECRET, self.API_PASS, api_url=self.ENV_URL)\n\n\tdef auth(self):\n\t\tprint('Authenticating Coinbase')\n\n\tdef place_market(self, action, ticker, amount):\n\t\torder = self.client.place_market_order(\n\t\t\t\tproduct_id=ticker,\n\t\t\t\tside=action,\n\t\t\t\tfunds=amount\n\t\t\t)\n\t\treturn place_market\n\n\tdef place_limit_order(self, action, ticker, entry_price, size):\n\t\tentry_order = self.client.place_limit_order(product_id=ticker,\n\t\t\t\t\t\t\tside=action,\n\t\t\t\t\t\t\tprice=entry_price,\n\t\t\t\t\t\t\tsize=size)\n\t\tprint(entry_order)\n\t\treturn entry_order\n\n\tdef get_accounts(self):\n\t\treturn self.client.get_accounts()\n\n\tdef orders(self):\n\t\treturn self.client.get_orders()\n\n\tdef fills(self):\n\t\treturn self.client.get_fills()\n\n\tdef historical_rates(self, ticker: str):\n\t\trates = self.client.get_product_historic_rates(ticker, granularity=86400)\n\t\tdf = pd.DataFrame(rates, columns=[\"time\",\"low\",\"high\",\"open\",\"close\",\"volume\"])\n\t\treturn df\n"
] | [
[
"pandas.DataFrame"
]
] |
jdlaubrie/shell-elem | [
"f87cb9ca9179533d3a645a494e7ef4d39666ddc6"
] | [
"3rd_check/surgery/penalty.py"
] | [
"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nNbrOfNodes = 35\r\nkeygnra = ' TIME: GANDRA STEP: 80.000 FRAME: 1.000'\r\nkeystent = ' TIME: STENT STEP: 1.000 FRAME: 1.000'\r\nkeygnrb = ' TIME: GANDRB STEP: 100.000 FRAME: 1.000'\r\n# File for gain parameter 01\r\n#--------------------------------------------------------------------------\r\n#--------------------------------------------------------------------------\r\nfile_g01 = open('surgery_p7.rsn', 'r')\r\ngain01 = file_g01.readlines()\r\ng01 = pd.Series(gain01)\r\ng01 = g01.replace(r'\\n','', regex=True)\r\ng01 = g01.replace(r'\\r\\n','', regex=True)\r\ng01 = g01.replace(r'\\r','', regex=True)\r\nindex_Time_g01 = g01[g01.str.contains('TIME', case=False, regex=False)]\r\nindex_TimeValues_g01 = index_Time_g01.index.values\r\n#--------------------------------------------------------------------------\r\nG01 = {}\r\nfor idx in index_Time_g01.index.values:\r\n index_start = idx + 1\r\n index_end = index_start + NbrOfNodes\r\n tmp_df = g01[index_start:index_end].str.strip()\r\n tmp_df = tmp_df.str.split(' ',expand=True)\r\n np.array(tmp_df.values, dtype=float)\r\n G01[g01[idx]]=np.array(tmp_df.values, dtype=float)\r\n#every mesh along time\r\nData_g01 = np.array([], dtype=np.int64)\r\nData_g01.shape = (-1, 7)\r\nfor key in sorted(G01.keys()):\r\n Data_g01 = np.append(Data_g01,[G01[key][0,:]], axis=0)\r\n#mesh for this particular key GNRA\r\nData_g01_gnra = np.array([], dtype=np.int64)\r\nData_g01_gnra.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g01_gnra = np.append(Data_g01_gnra,[G01[keygnra][node,:]], axis=0)\r\n#mesh for this particular key STENT\r\nData_g01_stent = np.array([], dtype=np.int64)\r\nData_g01_stent.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g01_stent = np.append(Data_g01_stent,[G01[keystent][node,:]], axis=0)\r\n#mesh for this particular key GNRB\r\nData_g01_gnrb = np.array([], dtype=np.int64)\r\nData_g01_gnrb.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g01_gnrb = np.append(Data_g01_gnrb,[G01[keygnrb][node,:]], axis=0)\r\n\r\nData_g01=Data_g01[np.argsort(Data_g01[:,0])]\r\n#--------------------------------------------------------------------------\r\n# File for gain parameter 02\r\n#--------------------------------------------------------------------------\r\nfile_g02 = open('surgery_ref.rsn', 'r')\r\ngain02 = file_g02.readlines()\r\ng02 = pd.Series(gain02)\r\ng02 = g02.replace(r'\\n','', regex=True)\r\ng02 = g02.replace(r'\\r\\n','', regex=True)\r\ng02 = g02.replace(r'\\r','', regex=True)\r\nindex_Time_g02 = g02[g02.str.contains('TIME', case=False, regex=False)]\r\nindex_TimeValues_g02 = index_Time_g02.index.values\r\n#--------------------------------------------------------------------------\r\nG02 = {}\r\nfor idx in index_Time_g02.index.values:\r\n index_start = idx + 1\r\n index_end = index_start + NbrOfNodes\r\n tmp_df = g02[index_start:index_end].str.strip()\r\n tmp_df = tmp_df.str.split(' ',expand=True)\r\n np.array(tmp_df.values, dtype=float)\r\n G02[g02[idx]]=np.array(tmp_df.values, dtype=float)\r\n#every mesh along time\r\nData_g02 = np.array([], dtype=np.int64)\r\nData_g02.shape = (-1, 7)\r\nfor key in sorted(G02.keys()):\r\n Data_g02 = np.append(Data_g02,[G02[key][0,:]], axis=0)\r\n#mesh for this particular key GNRA\r\nData_g02_gnra = np.array([], dtype=np.int64)\r\nData_g02_gnra.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g02_gnra = np.append(Data_g02_gnra,[G02[keygnra][node,:]], axis=0)\r\n#mesh for this particular key STENT\r\nData_g02_stent = np.array([], dtype=np.int64)\r\nData_g02_stent.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g02_stent = np.append(Data_g02_stent,[G02[keystent][node,:]], axis=0)\r\n#mesh for this particular key GNRB\r\nData_g02_gnrb = np.array([], dtype=np.int64)\r\nData_g02_gnrb.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g02_gnrb = np.append(Data_g02_gnrb,[G02[keygnrb][node,:]], axis=0)\r\n\r\nData_g02=Data_g02[np.argsort(Data_g02[:,0])]\r\n#--------------------------------------------------------------------------\r\n# File for gain parameter 03\r\n#--------------------------------------------------------------------------\r\nfile_g03 = open('surgery_p9.rsn', 'r')\r\ngain03 = file_g03.readlines()\r\ng03 = pd.Series(gain03)\r\ng03 = g03.replace(r'\\n','', regex=True)\r\ng03 = g03.replace(r'\\r\\n','', regex=True)\r\ng03 = g03.replace(r'\\r','', regex=True)\r\nindex_Time_g03 = g03[g03.str.contains('TIME', case=False, regex=False)]\r\nindex_TimeValues_g03 = index_Time_g03.index.values\r\n#--------------------------------------------------------------------------\r\nG03 = {}\r\nfor idx in index_Time_g03.index.values:\r\n index_start = idx + 1\r\n index_end = index_start + NbrOfNodes\r\n tmp_df = g03[index_start:index_end].str.strip()\r\n tmp_df = tmp_df.str.split(' ',expand=True)\r\n np.array(tmp_df.values, dtype=float)\r\n G03[g03[idx]]=np.array(tmp_df.values, dtype=float)\r\n#every mesh along time\r\nData_g03 = np.array([], dtype=np.int64)\r\nData_g03.shape = (-1, 7)\r\nfor key in sorted(G03.keys()):\r\n Data_g03 = np.append(Data_g03,[G03[key][0,:]], axis=0)\r\n#mesh for this particular key GNRA\r\nData_g03_gnra = np.array([], dtype=np.int64)\r\nData_g03_gnra.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g03_gnra = np.append(Data_g03_gnra,[G03[keygnra][node,:]], axis=0)\r\n#mesh for this particular key STENT\r\nData_g03_stent = np.array([], dtype=np.int64)\r\nData_g03_stent.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g03_stent = np.append(Data_g03_stent,[G03[keystent][node,:]], axis=0)\r\n#mesh for this particular key GNRB\r\nData_g03_gnrb = np.array([], dtype=np.int64)\r\nData_g03_gnrb.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g03_gnrb = np.append(Data_g03_gnrb,[G03[keygnrb][node,:]], axis=0)\r\n\r\nData_g03=Data_g03[np.argsort(Data_g03[:,0])]\r\n#--------------------------------------------------------------------------\r\n\r\nfig = plt.figure()\r\nplt.rcParams.update({'font.size': 5})\r\nplt.rc('text', usetex=False)\r\n\r\nplt.subplot(4,3,1)\r\nplt.plot(Data_g01[:,0],Data_g01[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02[:,0],Data_g02[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03[:,0],Data_g03[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Time [months]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'a',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,180,0,150])\r\n\r\nplt.subplot(4,3,2)\r\nplt.plot(Data_g01[:,0],Data_g01[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02[:,0],Data_g02[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03[:,0],Data_g03[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Time [months]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'b',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.legend(loc='center right')\r\nplt.axis([0,180,0,350])\r\n\r\nplt.subplot(4,3,3)\r\nplt.plot(Data_g01[:,0],Data_g01[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02[:,0],Data_g02[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03[:,0],Data_g03[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Time [months]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'c',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,180,10,13])\r\n\r\nplt.subplot(4,3,4)\r\nplt.plot(Data_g01_gnra[:,2]*1000.0,Data_g01_gnra[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnra[:,2]*1000.0,Data_g02_gnra[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnra[:,2]*1000.0,Data_g03_gnra[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'd',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,150])\r\n\r\nplt.subplot(4,3,5)\r\nplt.plot(Data_g01_gnra[:,2]*1000.0,Data_g01_gnra[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnra[:,2]*1000.0,Data_g02_gnra[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnra[:,2]*1000.0,Data_g03_gnra[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'e',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,350])\r\n\r\nplt.subplot(4,3,6)\r\nplt.plot(Data_g01_gnra[:,2]*1000.0,Data_g01_gnra[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnra[:,2]*1000.0,Data_g02_gnra[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnra[:,2]*1000.0,Data_g03_gnra[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'f',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,10,13])\r\n\r\nplt.subplot(4,3,7)\r\nplt.plot(Data_g01_stent[:,2]*1000.0,Data_g01_stent[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_stent[:,2]*1000.0,Data_g02_stent[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_stent[:,2]*1000.0,Data_g03_stent[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'g',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,150])\r\n\r\nplt.subplot(4,3,8)\r\nplt.plot(Data_g01_stent[:,2]*1000.0,Data_g01_stent[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_stent[:,2]*1000.0,Data_g02_stent[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_stent[:,2]*1000.0,Data_g03_stent[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'h',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,350])\r\n\r\nplt.subplot(4,3,9)\r\nplt.plot(Data_g01_stent[:,2]*1000.0,Data_g01_stent[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_stent[:,2]*1000.0,Data_g02_stent[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_stent[:,2]*1000.0,Data_g03_stent[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'i',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,10,13])\r\n\r\nplt.subplot(4,3,10)\r\nplt.plot(Data_g01_gnrb[:,2]*1000.0,Data_g01_gnrb[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnrb[:,2]*1000.0,Data_g02_gnrb[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnrb[:,2]*1000.0,Data_g03_gnrb[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'j',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,150])\r\n\r\nplt.subplot(4,3,11)\r\nplt.plot(Data_g01_gnrb[:,2]*1000.0,Data_g01_gnrb[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnrb[:,2]*1000.0,Data_g02_gnrb[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnrb[:,2]*1000.0,Data_g03_gnrb[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'k',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,350])\r\n\r\nplt.subplot(4,3,12)\r\nplt.plot(Data_g01_gnrb[:,2]*1000.0,Data_g01_gnrb[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnrb[:,2]*1000.0,Data_g02_gnrb[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnrb[:,2]*1000.0,Data_g03_gnrb[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'l',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,10,13])\r\n\r\nfig.tight_layout()\r\nplt.show\r\n\r\nFIGURENAME = 'penalty.eps'\r\nplt.savefig(FIGURENAME)\r\nplt.savefig(fname=FIGURENAME,\r\n dpi=None,\r\n facecolor='w',\r\n edgecolor='w',\r\n orientation='portrait',\r\n format=None,\r\n transparent=False,\r\n bbox_inches=None,\r\n pad_inches=0.1,\r\n frameon=None,\r\n metadata=None)\r\n\r\nplt.close('all')\r\n\"\"\"\r\n#--------------------------------------------------------------------------\r\n\r\nradii = (Data_g02[-1,3]*1000.0, Data_g01[-1,3]*1000.0, Data_g03[-1,3]*1000.0)\r\n\r\nfig, ax = plt.subplots()\r\n\r\nindex = np.arange(3)\r\nbar_width = 0.45\r\n\r\nopacity = 0.4\r\nerror_config = {'ecolor': '0.3'}\r\n\r\nrects1 = ax.bar(index, radii, bar_width,\r\n alpha=opacity, color='b',\r\n error_kw=error_config, label='Penalty')\r\n\r\nax.set_xlabel('Penalty')\r\nax.set_ylabel('Radius [mm]')\r\nax.set_xticks(index + bar_width / 2)\r\nax.set_xticklabels(('1e5', '1e7', '1e9'))\r\nplt.axis([-0.25,2.7,0,20])\r\n\r\nfig.tight_layout()\r\nplt.show\r\n\r\nFIGURENAME = 'sensitivity_penalty.eps'\r\nplt.savefig(FIGURENAME)\r\nplt.savefig(fname=FIGURENAME,\r\n dpi=None,\r\n facecolor='w',\r\n edgecolor='w',\r\n orientation='portrait',\r\n format=None,\r\n transparent=False,\r\n bbox_inches=None,\r\n pad_inches=0.1,\r\n frameon=None,\r\n metadata=None)\r\n\r\nplt.close('all')\r\n\"\"\"\r\n#--------------------------------------------------------------------------\r\n"
] | [
[
"pandas.Series",
"matplotlib.pyplot.legend",
"numpy.append",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.savefig",
"numpy.argsort",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.plot"
]
] |
NRuf77/proset | [
"101d491e05c2423faddca31029232982f46d8831"
] | [
"scripts/wine/wine_explain.py"
] | [
"\"\"\"Explain proset classifier trained on wine classification data.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nfrom copy import deepcopy\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport shap\r\n\r\nimport proset.utility as utility\r\n\r\n\r\nprint(\"* Apply user settings\")\r\ninput_path = \"scripts/results\"\r\noutput_path = \"scripts/reports\"\r\ninput_files = [\r\n \"wine_2d_05_model.gz\",\r\n \"wine_2d_50_model.gz\",\r\n \"wine_2d_95_model.gz\",\r\n \"wine_1d_model.gz\",\r\n \"wine_fix_model.gz\",\r\n \"wine_fix_opt_model.gz\"\r\n]\r\nprint(\" Select input file:\")\r\nfor i, file_name in enumerate(input_files):\r\n print(\" {} - {}\".format(i, file_name))\r\nchoice = int(input())\r\ninput_file = input_files[choice]\r\nexport_file = input_file.replace(\".gz\", \"_explain.xlsx\")\r\nmodel_name = input_file.replace(\".gz\", \"\")\r\n\r\nprint(\"* Load model fit results\")\r\nwith gzip.open(os.path.join(input_path, input_file), mode=\"rb\") as file:\r\n result = pickle.load(file)\r\n\r\nprint(\"* Determine reference point\")\r\nscale = np.sqrt(result[\"model\"][\"transform\"].var_)\r\noffset = result[\"model\"][\"transform\"].mean_\r\ntrain_features = result[\"model\"][\"transform\"].transform(result[\"data\"][\"X_train\"])\r\ntrain_labels = result[\"data\"][\"y_train\"]\r\nreference = utility.choose_reference_point(\r\n features=train_features,\r\n model=result[\"model\"][\"model\"],\r\n scale=scale,\r\n offset=offset\r\n)\r\nutility.print_point_report(\r\n reference=reference,\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n target_names=result[\"model\"].classes_\r\n)\r\n\r\nprint(\"* Show global results\")\r\ntest_features = result[\"model\"][\"transform\"].transform(result[\"data\"][\"X_test\"])\r\ntest_labels = result[\"data\"][\"y_test\"]\r\nprediction, familiarity = result[\"model\"][\"model\"].predict(X=test_features, compute_familiarity=True)\r\nmisclassified = prediction != test_labels\r\nplotter = utility.ClassifierPlots(\r\n model=result[\"model\"][\"model\"],\r\n model_name=model_name,\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n scale=scale,\r\n offset=offset\r\n)\r\nx_range, y_range = plotter.plot_batch_map(\r\n batch=1,\r\n features=test_features,\r\n target=test_labels,\r\n comment=\"test samples\",\r\n highlight=misclassified,\r\n highlight_name=\"misclassified\",\r\n reference=reference[\"features_raw\"]\r\n)\r\nplotter.plot_features(\r\n batch=1,\r\n features=test_features,\r\n target=test_labels,\r\n comment=\"test samples\",\r\n highlight=misclassified,\r\n highlight_name=\"misclassified\",\r\n reference=reference[\"features_raw\"],\r\n show_index=False\r\n)\r\n\r\nprint(\"* Compute global SHAP values\")\r\nshrunk_model = deepcopy(result[\"model\"][\"model\"])\r\nshrunk_model.shrink()\r\nactive_features = reference[\"active_features\"]\r\nactive_feature_names = result[\"data\"][\"feature_names\"][active_features]\r\nexplainer = shap.Explainer(\r\n model=shrunk_model.predict_proba,\r\n masker=reference[\"features_raw\"][0:1, active_features],\r\n feature_names=active_feature_names\r\n)\r\nshap_values = explainer(test_features[:, active_features])\r\nfor i, label in enumerate(result[\"model\"].classes_):\r\n plt.figure()\r\n shap.plots.bar(shap_values[:, :, i])\r\n plt.title(\"Average SHAP values for class {} prediction\".format(label))\r\n\r\nprint(\"* Find single point with worst classification result\")\r\nproba = result[\"model\"][\"model\"].predict_proba(test_features)\r\ntruth_int = result[\"model\"][\"model\"].label_encoder_.transform(test_labels)\r\nworst_ix = np.argmin(proba[np.arange(test_labels.shape[0]), truth_int])\r\nworst_features = test_features[worst_ix:(worst_ix + 1), :]\r\nworst_label = test_labels[worst_ix]\r\nworst_label_int = truth_int[worst_ix]\r\nworst_point = {\r\n \"index\": worst_ix,\r\n \"features_raw\": worst_features,\r\n \"features_processed\": worst_features[:, active_features] * scale[active_features] + offset[active_features],\r\n \"prediction\": proba[worst_ix, :],\r\n \"num_features\": test_features.shape[1],\r\n \"active_features\": active_features\r\n} # use active_features here to ensure same order of content as reference\r\nprint(\" True class = '{}'\".format(test_labels[worst_ix]))\r\nutility.print_point_report(\r\n reference=worst_point,\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n target_names=result[\"model\"].classes_\r\n)\r\n\r\nprint(\"* Generate explanation report\")\r\nexplain = result[\"model\"][\"model\"].explain(\r\n X=worst_point[\"features_raw\"],\r\n y=worst_label,\r\n familiarity=familiarity,\r\n sample_name=\"test sample {}\".format(worst_ix),\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n scale=scale,\r\n offset=offset\r\n)\r\nutility.write_report(file_path=os.path.join(output_path, export_file), report=explain)\r\n\r\nprint(\"* Show results for single point\")\r\nplotter.plot_batch_map(\r\n batch=1,\r\n features=train_features,\r\n target=train_labels,\r\n comment=\"training samples\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n x_range=x_range,\r\n y_range=y_range\r\n)\r\nplotter.plot_batch_map(\r\n batch=1,\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n x_range=x_range,\r\n y_range=y_range\r\n)\r\nplotter.plot_features(\r\n batch=1,\r\n features=train_features,\r\n target=train_labels,\r\n comment=\"training samples\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n show_index=False\r\n)\r\n\r\nprint(\"* Compute SHAP values for single point\")\r\nfor i in range(proba.shape[1]):\r\n explain = shap_values[worst_ix, :, i]\r\n shap.plots.force(\r\n base_value=explain.base_values,\r\n shap_values=explain.values,\r\n features=test_features[worst_ix:(worst_ix + 1), active_features],\r\n feature_names=active_feature_names,\r\n matplotlib=True\r\n )\r\n plt.gca().set_position([0.1, -0.25, 0.8, 0.8]) # force plot messes up the axes position within the figure\r\n plt.suptitle(\"SHAP force plot: probability for class '{}' is {:0.2f}, true class is '{}'\".format(\r\n result[\"model\"].classes_[i], proba[worst_ix, i], worst_label\r\n ))\r\n\r\nprint(\"* Show cross-sections of decision surface\")\r\nimportance = np.mean(np.abs(shap_values[:, :, worst_label_int].values), axis=0)\r\ntop_two = active_features[np.argsort(importance)[-1:-3:-1]]\r\nplotter.plot_surface(\r\n features=test_features,\r\n target=None, # suppress sample plot, features only used to determine plot ranges\r\n baseline=worst_point[\"features_raw\"],\r\n plot_index=top_two,\r\n comment=\"globally most important features\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n familiarity=familiarity,\r\n quantiles=(0.01, 0.05),\r\n use_proba=True\r\n)\r\nimportance = np.abs(shap_values[worst_ix, :, worst_label_int].values)\r\ntop_two = active_features[np.argsort(importance)[-1:-3:-1]]\r\nplotter.plot_surface(\r\n features=test_features,\r\n target=None, # suppress sample plot, features only used to determine plot ranges\r\n baseline=worst_point[\"features_raw\"],\r\n plot_index=top_two,\r\n comment=\"most important features for single point\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n familiarity=familiarity,\r\n quantiles=(0.01, 0.05),\r\n use_proba=True\r\n)\r\n\r\nprint(\"* Done\")\r\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"numpy.abs",
"numpy.argsort",
"numpy.arange",
"numpy.sqrt"
]
] |
joelnmdyer/SignatuRE | [
"085a9d727e504bd25bbebdebaa58867211a52c8d"
] | [
"signature/train_and_sample.py"
] | [
"import argparse\nimport logging\nimport numpy as np\nimport os\nimport sbi.utils as utils\nfrom sbi.inference.base import infer\nfrom sbi import analysis as analysis\nfrom sbi.inference import SMCABC, SNRE_A, simulate_for_sbi, prepare_for_sbi\nfrom sklearn.linear_model import LinearRegression\nimport statsmodels.api as sm\nimport time\nimport torch\n\n# Custom scripts/modules/packages\nfrom signature.inference import kernel_methods\nfrom signature.utils import networks\nfrom signature.utils import io, sampling\n\n\ndef train_clf(task, method, L, K=2, n_components_raw=100, seed=0):\n\n\t\"\"\"\n\tTrains a binary classifier with method <method> to distinguish between\n\tsamples (x, theta) from the joint distribution p(x, theta) and from the\n\tproduct of the marginals p(x)p(theta) associated with <task>.\n\n\tInput:\n\t- task:\t\t\tstr, name of model to run inference on, must be recognised\n\t\t\t\t\tby function get_task above.\n\t- method:\t\tstr, name of classifier to use, either \"signature\" or\n\t\t\t\t\t\"gru-resnet\"\n\t- L:\t\t\tint, number of training examples (simulations) to generate\n\t- K:\t\t\tint, number of contrasting examples. Only used when\n\t\t\t\t\tmethod == \"signature\"\n\t- seed:\t\t\tint, seed for random number generator\n\t\"\"\"\n\n\tprior, sbi_prior, obs, simulator = io.get_task(task)\n\n\tif method in [\"signature\", \"k2\"]:\n\n\t\tclf, x0, _, inn_prods, theta_kern = kernel_methods.train_kernel_classifier(prior,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t simulator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t obs,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t L,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t K,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t n_components_raw,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t task,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t method)\n\n\telif method[:10] == \"gru-resnet\":\n\t\n\t\tIDIM = 1\n\t\tdef sbi_simulator(x):\n\t\t\treturn simulator(x)\n\t\tif task == \"GSE\":\n\t\t\tobs = obs[:, :-1]\n\t\t\tIDIM = 2\n\t\t\t# Remove time indices from GSE output\n\t\t\tdef sbi_simulator(x):\n\t\t\t\treturn simulator(x)[:,:-1]\n\t\tODIM = 3\n\t\tif method != \"gru-resnet\":\n\t\t\tODIM = eval(method[10:])\n\t\tsimulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)\n\n\t\t# Instantiate the neural density ratio estimator\n\t\tembedding_net = networks.GRU(input_dim=IDIM, hidden_dim=32, num_layers=2,\n\t\t\t\t\t\t\toutput_dim=ODIM)\n\t\tn_pars_embedding = sum(p.numel() for p in embedding_net.parameters() if p.requires_grad)\n\t\tlogging.info(\"Embedding net has {0} parameters\".format(n_pars_embedding))\n\t\tclassifier = utils.get_nn_models.classifier_nn('resnet',\n\t\t\t\t\t\t\t\t\t\t\t\t\t embedding_net_x=embedding_net)\n\n\t\t# Setup the inference procedure with the SNRE-A procedure\n\t\tinference = SNRE_A(prior=_prior, classifier=classifier)\n\n\t\t# Run the inference procedure on one round and L simulated data points\n\t\ttheta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)\n\t\tif task not in [\"GSE\"]:\n\t\t\tx = x.unsqueeze(-1)\n\t\telif task == \"GSE\":\n\t\t\t# Print this out to see that it gives you everything in the right place\n\t\t\tx = x.reshape(L, -1, 2)\n\t\tdensity_estimator = inference.append_simulations(theta, x).train()\n\t\tposterior = inference.build_posterior(density_estimator)\n\t\tposterior.set_default_x(obs.reshape(1,-1,IDIM))\n\n\t\tclf = posterior\n\t\tinn_prods = None\n\t\ttheta_kern = None\n\t\tx0 = obs\n\t\tprior = _prior\n\n\telif method in [\"hc\", \"smcabc\"]:\n\n\t\tdef slope_intercept(data):\n\t\t\treg = LinearRegression().fit(data[:-1].reshape(-1,1), data[1:].reshape(-1,1))\n\t\t\tslope = reg.coef_\n\t\t\tintercept = reg.intercept_\n\t\t\treturn slope, intercept\n\n\t\tif task == \"OU\":\t\t\t\n\t\t\tdef summarise(data):\t\n\t\t\t\tslope, intercept = slope_intercept(data)\n\t\t\t\tsummary = np.array([np.mean(data), slope[0,0], intercept[0]])\n\t\t\t\treturn summary\n\n\t\telif task == \"MA2\":\n\t\t\tdef summarise(data):\n\t\t\t\tvar = np.var(data)\n\t\t\t\trhos = sm.tsa.acf(data, nlags=2)[1:]\n\t\t\t\treturn np.array([var, rhos[0], rhos[1]])\n\t\n\t\telif task == \"GSE\":\n\t\t\tdef summarise(data):\n\t\t\t\tdata = data[:, :-1]\n\t\t\t\tN = data.shape[0]\n\t\t\t\tx, y = data[:,0], data[:,1]\n\t\t\t\txmean = np.mean(x)\n\t\t\t\tymean = np.mean(y)\n\t\t\t\txvar = np.var(x, ddof=1)\n\t\t\t\tyvar = np.var(y, ddof=1)\n\t\t\t\tif xvar == 0.:\n\t\t\t\t\txvar = 1e-30\n\t\t\t\tif yvar == 0.:\n\t\t\t\t\tyvar = 1e-30\n\t\t\t\tx, y = (x - xmean)/np.sqrt(xvar), (y - ymean)/np.sqrt(yvar)\n\t\t\t\tacx, acy = [], []\n\t\t\t\tfor lag in [1,2]:\n\t\t\t\t\tacx.append(np.dot(x[:-lag], x[lag:]) / (N - 1))\n\t\t\t\t\tacy.append(np.dot(y[:-lag], y[lag:]) / (N - 1))\n\t\t\t\tccxy = np.dot(x, y)/(N-1)\n\t\t\t\tsummary = np.array([xmean, ymean, np.log(xvar + 1), np.log(yvar+1), ccxy] + acx + acy)\n\t\t\t\treturn summary\n\n\t\tdef sbi_simulator(x):\n\t\t\tdata = simulator(x)\n\t\t\treturn summarise(data)\n\n\n\t\tif method == \"hc\":\n\n\t\t\tx0 = summarise(obs)\n\t\t\tsimulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)\n\t\t\t# Instantiate the neural density ratio estimator\n\t\t\tclassifier = utils.get_nn_models.classifier_nn('resnet')\n\n\t\t\t# Setup the inference procedure with the SNRE-A procedure\n\t\t\tinference = SNRE_A(prior=_prior, classifier=classifier)\n\n\t\t\t# Run the inference procedure on one round and L simulated data points\n\t\t\ttheta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)\n\t\t\tdensity_estimator = inference.append_simulations(theta, x).train()\n\t\t\tposterior = inference.build_posterior(density_estimator)\n\t\t\tposterior.set_default_x(x0)\n\n\t\t\tclf = posterior\n\n\t\telif method == \"smcabc\":\n\n\t\t\tdef _simulator(theta):\n\t\t\t\treturn simulator(theta)[:, :-1].reshape(-1)\n\n\t\t\tprint(_simulator(prior.sample()))\n\n\t\t\tsimulator_wrapper, _prior = prepare_for_sbi(_simulator, sbi_prior)\n\t\t\tinference = SMCABC(simulator_wrapper, _prior, num_workers=20)\n\t\t\tclf = inference\n\t\t\tx0 = obs[:, :-1].reshape(-1)\n\n\t\tprint(x0)\n\t\tinn_prods = None\n\t\ttheta_kern = None\n\t\tprior = _prior\n\n\treturn clf, x0, prior, inn_prods, theta_kern\n\n\ndef sample(method, clf, x0, start, sampling_method, n_samples=[50_000, 100_000], prior=None,\n\t\t inn_prods=None, theta_kern=None):\n\n\t\"\"\"\n\tUses a density ratio estimator clf to sample from the posterior for x0\n\tand prior.\n\n\tInputs:\n\t- method:\t\tstr, either \"signature\" or \"gru-resnet\" depending on which\n\t\t\t\t\tclassifier is being used\n\t- clf:\t\t\tthe density ratio estimator\n\t- x0:\t\t\tthe preprocessed observation\n\t- start:\t\tnp.array consisting of the start point for MCMC. Recommend\n\t\t\t\t\tusing true parameter value that generated x0 for this\n\t- n_samples:\tlist of length 2 consisting of ints > 0. Trial run of MCMC\n\t\t\t\t\tuses n_samples[0] steps to estimate covariance matrix of\n\t\t\t\t\tGaussian proposal density; proper run uses n_samples[1]\n\t- prior:\t\tprior distribution, only used if method == \"signature\",\n\t\t\t\t\totherwise ignored. Default None\n\t\"\"\"\n\n\tif method in [\"signature\", \"k2\"]:\n\n\t\tif prior is None:\n\t\t\traise ValueError(\"Must provide prior for kernel classifier\")\n\n\t\tdef create_log_ratio_estimator(clf, x):\n\t\t\t\"Create a ratio estimator from the signature-based classifier\"\n\t\t\tX_test = inn_prods(x)\n\t\t\tclf.set_xkern(X_test.reshape(-1,1))\n\n\t\t\tlr = clf.lr\n\t\t\tcoefficients = lr.coef_.T\n\t\t\tintercept = lr.intercept_\n\t\t\tvector = (clf._mapping).dot(coefficients)\n\n\t\t\tdef log_ratio_estimator(theta):\n\t\t\t\tT_test = theta_kern(theta)\n\t\t\t\treturn T_test.dot(vector) + intercept\n\t\t\t\n\t\t\treturn log_ratio_estimator\n\n\t\tcustom_log_ratio_estimator = create_log_ratio_estimator(clf, x0)\n\t\tcustom_ratio_estimator = lambda theta: np.exp(custom_log_ratio_estimator(theta))\n\n\t\tdef kernel_posterior(theta):\n\t\t\t\"\"\"\n\t\t\tFunction to evaluate estimation of posterior density for\n\t\t\tkernel-based classifier.\n\t\t\t\"\"\"\n\t\t\tprior_logpdf = prior.log_prob(theta)\n\t\t\tif prior_logpdf == -float(\"inf\"):\n\t\t\t\treturn prior_logpdf\n\t\t\telse:\n\t\t\t\tlog_weight = custom_log_ratio_estimator(theta)\n\t\t\t\treturn log_weight + prior_logpdf\n\n\t\tlog_post_prob = kernel_posterior\n\n\telif (method[:10] == \"gru-resnet\") or (method == \"hc\"):\n\n\t\tdef log_post_prob(th):\n\t\t\t# Convert th to torch.tensor\n\t\t\tth = torch.as_tensor(th).float()\n\t\t\treturn clf.log_prob(th)\n\n\t\t# For sampling importance resampling\n\t\tcustom_ratio_estimator = lambda th: float(torch.exp(clf.log_prob(th) - prior.log_prob(th)))\n\n\telif method == \"smcabc\":\n\n\t\tsamples = clf(x0, 1_000, 1_000, int(1e7), 0.8)\n\t\treturn samples\n\n\tif sampling_method == \"mh\":\n\t\t# Pilot run to estimate covariance matrix of Gaussian proposal density\n\t\tsamples = sampling.mh(log_post_prob, len(start), start, method,\n\t\t\t\t\t\t\t n_samples=n_samples[0])\n\t\tcov = np.cov(samples.T)\n\t\t# Proper run\n\t\tsamples = sampling.mh(log_post_prob, len(start), start, method,\n\t\t\t\t\t\t\t n_samples=n_samples[1], cov=cov)\n\t\tsamples = samples[::100]\n\telif sampling_method == \"sir\":\n\t\t# SIR\n\t\tsamples = sampling.sir(prior, custom_ratio_estimator, 50_000,\n\t\t\t\t\t\t\t 1_000)\n\n\treturn samples\n\n\ndef train_inference(task, method, start, L, fname, K=2, sampling_method=\"mh\",\n\t\t\t\t\tn_samples=[50_000, 100_000], seed=0, n_components_raw=100, start_time=0):\n\n\tprint(\"Training classifier...\")\n\tclf, x0, prior, s_kernel, t_kernel = train_clf(task, method, L, K=K,\n\t\t\t\t\t\t\t\t\t\t\t\t n_components_raw=n_components_raw, seed=seed)\n\tlogging.info(\"Training CPU time = {0}\".format(time.process_time() - start_time))\n\tprint(\"Sampling from posterior...\")\n\tsamples = sample(method, clf, x0, start, sampling_method, n_samples=n_samples, prior=prior,\n\t\t\t\t\t inn_prods=s_kernel, theta_kern=t_kernel)\n\tprint(\"Saving samples...\")\n\tnp.savetxt(fname, samples)\n\tprint(\"Done.\")\n\n\nif __name__ == \"__main__\":\n\n\tparser = argparse.ArgumentParser(description='Ratio estimation')\n\tparser.add_argument('--task', type=str,\n\t\t\t\t\t\thelp='Name of task (simulator) to experiment with.')\n\tparser.add_argument('--method', type=str,\n\t\t\t\t\t\thelp='Name of classification pipelines to use.')\n\tparser.add_argument('--L', type=int, nargs='+',\n\t\t\t\t\t\thelp='Number of training simulations to use.')\n\tparser.add_argument('--K', type=int, default=1,\n\t\t\t\t\t\thelp='Number of contrasting examples per simulation.')\n\tparser.add_argument('--s', type=str, default='mh',\n\t\t\t\t\t\thelp=\"Sampling method in ['mh', 'sir'].\")\n\tparser.add_argument('--n', type=int, default=100,\n\t\t\t\t\t\thelp=\"Number of components retained in Nystrom DIVIDED BY (K+1).\")\n\tparser.add_argument('--seed', type=int, nargs='+', help='Seeds for RNG.')\n\targs = parser.parse_args()\n\n\tif args.method == \"sre\":\n\t\tmethod = \"signature\"\n\telse:\n\t\tmethod = args.method\n\n\tif args.task == \"OU\":\n\t\tstart = np.array([0.5, 1.])\n\telif args.task == \"MA2\":\n\t\tstart = np.array([0.6, 0.2])\n\telif args.task == \"GSE\":\n\t\tstart = np.array([1e-2, 1e-1])\n\n\tfor L in args.L:\n\n\t\tfor seed in args.seed:\n\n\t\t\t# Setup for saving output\n\t\t\tdirectory = \"./{0}/{1}/\".format(args.task, seed)\n\t\t\tif not os.path.exists(directory):\n\t\t\t\tos.makedirs(directory)\n\t\t\tif method in [\"signature\", \"k2\"]:\n\t\t\t\tfname = os.path.join(directory, \"{0}_{1}_{2}_{3}_samples.txt\".format(method, L, args.K, args.n))\n\t\t\t\tlogging.basicConfig(filename=os.path.join(directory,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"{0}_{1}_{2}.log\".format(method, L, args.K)),\n\t\t\t\t\t\t\t\t\tfilemode=\"w\", format=\"%(name)s - %(levelname)s - %(message)s\",\n\t\t\t\t\t\t\t\t\tlevel=logging.INFO)\n\t\t\telse:\t\n\t\t\t\tfname = os.path.join(directory, \"{0}_{1}_samples.txt\".format(method, L))\n\t\t\t\tlogging.basicConfig(filename=os.path.join(directory,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"{0}_{1}.log\".format(method, L)),\n\t\t\t\t\t\t\t\t\tfilemode=\"w\", format=\"%(name)s - %(levelname)s - %(message)s\",\n\t\t\t\t\t\t\t\t\tlevel=logging.INFO)\n\t\t\tlogging.info(args)\n\t\t\tlogging.info(\"Seed = {0}\".format(seed))\n\n\t\t\t# Run script\n\t\t\tstart_time = time.process_time()\n\t\t\ttrain_inference(args.task, method, start, L, fname, sampling_method=args.s,\n\t\t\t\t\t\t\tK=args.K, seed=seed, n_components_raw=args.n, start_time=start_time)\n\t\t\tlogging.info(\"Total CPU time = {0}\".format(time.process_time() - start_time))\n"
] | [
[
"numpy.sqrt",
"numpy.mean",
"torch.as_tensor",
"numpy.savetxt",
"numpy.var",
"sklearn.linear_model.LinearRegression",
"numpy.log",
"numpy.array",
"numpy.dot",
"numpy.cov"
]
] |
tlunet/pySDC | [
"209e0015a46f861e3658691b7f8724cb1b36c97e"
] | [
"pySDC/playgrounds/fft/AllenCahn_contracting_circle_FFT.py"
] | [
"import os\n\nimport dill\nimport matplotlib.ticker as ticker\nimport numpy as np\n\nimport pySDC.helpers.plot_helper as plt_helper\nfrom pySDC.helpers.stats_helper import filter_stats, sort_stats\nfrom pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right\nfrom pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI\nfrom pySDC.implementations.problem_classes.AllenCahn_2D_FFT import allencahn2d_imex, allencahn2d_imex_stab\nfrom pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order\nfrom pySDC.implementations.transfer_classes.TransferMesh_FFT2D import mesh_to_mesh_fft2d\nfrom pySDC.projects.TOMS.AllenCahn_monitor import monitor\n\n\n# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf\n\n\ndef setup_parameters():\n \"\"\"\n Helper routine to fill in all relevant parameters\n\n Note that this file will be used for all versions of SDC, containing more than necessary for each individual run\n\n Returns:\n description (dict)\n controller_params (dict)\n \"\"\"\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1E-08\n level_params['dt'] = 1E-03\n level_params['nsweeps'] = [3, 1]\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['collocation_class'] = CollGaussRadau_Right\n sweeper_params['num_nodes'] = [3]\n sweeper_params['QI'] = ['LU']\n sweeper_params['QE'] = ['EE']\n sweeper_params['initial_guess'] = 'zero'\n\n # This comes as read-in for the problem class\n problem_params = dict()\n problem_params['nu'] = 2\n problem_params['L'] = 1.0\n problem_params['nvars'] = [(256, 256), (64, 64)]\n problem_params['eps'] = [0.04, 0.16]\n problem_params['radius'] = 0.25\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 20\n controller_params['hook_class'] = monitor\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = None # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = None # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh_fft2d\n\n return description, controller_params\n\n\ndef run_SDC_variant(variant=None):\n \"\"\"\n Routine to run particular SDC variant\n\n Args:\n variant (str): string describing the variant\n\n Returns:\n timing (float)\n niter (float)\n \"\"\"\n\n # load (incomplete) default parameters\n description, controller_params = setup_parameters()\n\n # add stuff based on variant\n if variant == 'semi-implicit':\n description['problem_class'] = allencahn2d_imex\n description['sweeper_class'] = imex_1st_order\n elif variant == 'semi-implicit-stab':\n description['problem_class'] = allencahn2d_imex_stab\n description['sweeper_class'] = imex_1st_order\n else:\n raise NotImplemented('Wrong variant specified, got %s' % variant)\n\n # setup parameters \"in time\"\n t0 = 0\n Tend = 0.032\n\n # instantiate controller\n controller = controller_nonMPI(num_procs=8, controller_params=controller_params, description=description)\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # plt_helper.plt.imshow(uinit)\n # plt_helper.plt.show()\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # plt_helper.plt.imshow(uend)\n # plt_helper.plt.show()\n\n # filter statistics by variant (number of iterations)\n filtered_stats = filter_stats(stats, type='niter')\n\n # convert filtered statistics to list of iterations count, sorted by process\n iter_counts = sort_stats(filtered_stats, sortby='time')\n\n # compute and print statistics\n niters = np.array([item[1] for item in iter_counts])\n out = ' Mean number of iterations: %4.2f' % np.mean(niters)\n print(out)\n out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)\n print(out)\n out = ' Position of max/min number of iterations: %2i -- %2i' % \\\n (int(np.argmax(niters)), int(np.argmin(niters)))\n print(out)\n out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))\n print(out)\n\n timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')\n\n print('Time to solution: %6.4f sec.' % timing[0][1])\n print()\n\n return stats\n\n\ndef show_results(fname, cwd=''):\n \"\"\"\n Plotting routine\n\n Args:\n fname (str): file name to read in and name plots\n cwd (str): current working directory\n \"\"\"\n\n file = open(cwd + fname + '.pkl', 'rb')\n results = dill.load(file)\n file.close()\n\n # plt_helper.mpl.style.use('classic')\n plt_helper.setup_mpl()\n\n # set up plot for timings\n fig, ax1 = plt_helper.newfig(textwidth=238.96, scale=1.5, ratio=0.4)\n\n timings = {}\n niters = {}\n for key, item in results.items():\n timings[key] = sort_stats(filter_stats(item, type='timing_run'), sortby='time')[0][1]\n iter_counts = sort_stats(filter_stats(item, type='niter'), sortby='time')\n niters[key] = np.mean(np.array([item[1] for item in iter_counts]))\n\n xcoords = [i for i in range(len(timings))]\n sorted_timings = sorted([(key, timings[key]) for key in timings], reverse=True, key=lambda tup: tup[1])\n sorted_niters = [(k, niters[k]) for k in [key[0] for key in sorted_timings]]\n heights_timings = [item[1] for item in sorted_timings]\n heights_niters = [item[1] for item in sorted_niters]\n keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\\n').replace('_v2', ' mod.') for item in sorted_timings]\n\n ax1.bar(xcoords, heights_timings, align='edge', width=-0.3, label='timings (left axis)')\n ax1.set_ylabel('time (sec)')\n\n ax2 = ax1.twinx()\n ax2.bar(xcoords, heights_niters, color='r', align='edge', width=0.3, label='iterations (right axis)')\n ax2.set_ylabel('mean number of iterations')\n\n ax1.set_xticks(xcoords)\n ax1.set_xticklabels(keys, rotation=90, ha='center')\n\n # ask matplotlib for the plotted objects and their labels\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc=0)\n\n # save plot, beautify\n f = fname + '_timings'\n plt_helper.savefig(f)\n\n assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'\n assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'\n\n # set up plot for radii\n fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)\n\n exact_radii = []\n for key, item in results.items():\n computed_radii = sort_stats(filter_stats(item, type='computed_radius'), sortby='time')\n\n xcoords = [item0[0] for item0 in computed_radii]\n radii = [item0[1] for item0 in computed_radii]\n if key[0] + ' ' + key[1] == 'semi-implicit-stab exact':\n ax.plot(xcoords, radii, label=(key[0] + ' ' + key[1]).replace('_v2', ' mod.'))\n\n exact_radii = sort_stats(filter_stats(item, type='exact_radius'), sortby='time')\n\n # diff = np.array([abs(item0[1] - item1[1]) for item0, item1 in zip(exact_radii, computed_radii)])\n # max_pos = int(np.argmax(diff))\n # assert max(diff) < 0.07, 'ERROR: computed radius is too far away from exact radius, got %s' % max(diff)\n # assert 0.028 < computed_radii[max_pos][0] < 0.03, \\\n # 'ERROR: largest difference is at wrong time, got %s' % computed_radii[max_pos][0]\n\n xcoords = [item[0] for item in exact_radii]\n radii = [item[1] for item in exact_radii]\n ax.plot(xcoords, radii, color='k', linestyle='--', linewidth=1, label='exact')\n\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))\n ax.set_ylabel('radius')\n ax.set_xlabel('time')\n ax.grid()\n ax.legend(loc=3)\n\n # save plot, beautify\n f = fname + '_radii'\n plt_helper.savefig(f)\n\n assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'\n assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'\n\n # set up plot for interface width\n fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)\n\n interface_width = []\n for key, item in results.items():\n interface_width = sort_stats(filter_stats(item, type='interface_width'), sortby='time')\n xcoords = [item[0] for item in interface_width]\n width = [item[1] for item in interface_width]\n if key[0] + ' ' + key[1] == 'fully-implicit exact':\n ax.plot(xcoords, width, label=key[0] + ' ' + key[1])\n\n xcoords = [item[0] for item in interface_width]\n init_width = [interface_width[0][1]] * len(xcoords)\n ax.plot(xcoords, init_width, color='k', linestyle='--', linewidth=1, label='exact')\n\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))\n ax.set_ylabel(r'interface width ($\\epsilon$)')\n ax.set_xlabel('time')\n ax.grid()\n ax.legend(loc=3)\n\n # save plot, beautify\n f = fname + '_interface'\n plt_helper.savefig(f)\n\n assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'\n assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'\n\n return None\n\n\ndef main(cwd=''):\n \"\"\"\n Main driver\n\n Args:\n cwd (str): current working directory (need this for testing)\n \"\"\"\n\n # Loop over variants, exact and inexact solves\n results = {}\n for variant in ['semi-implicit-stab']:\n\n results[(variant, 'exact')] = run_SDC_variant(variant=variant)\n\n # dump result\n fname = 'data/results_SDC_variants_AllenCahn_1E-03'\n file = open(cwd + fname + '.pkl', 'wb')\n dill.dump(results, file)\n file.close()\n assert os.path.isfile(cwd + fname + '.pkl'), 'ERROR: dill did not create file'\n\n # visualize\n show_results(fname, cwd=cwd)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.var",
"numpy.argmin",
"matplotlib.ticker.FormatStrFormatter",
"numpy.argmax",
"numpy.ptp",
"numpy.array",
"numpy.std",
"numpy.mean"
]
] |
Shuai-Xie/LP-DeepSSL | [
"9389c6cb0b83c7ca509ce284c4d86b600ca44a9b"
] | [
"mean_teacher/losses.py"
] | [
"# Copyright (c) 2018, Curious AI Ltd. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\n\"\"\"Custom loss functions\"\"\"\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport pdb\nimport numpy as np\n\ndef softmax_mse_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by num_classes\n Divide by the batch size afterwards if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_softmax = F.softmax(input_logits, dim=1)\n target_softmax = F.softmax(target_logits, dim=1)\n num_classes = input_logits.size()[1]\n return F.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes\n\n\ndef softmax_kl_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns KL divergence\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_log_softmax = F.log_softmax(input_logits, dim=1) # log(q)\n target_softmax = F.softmax(target_logits, dim=1) # p\n return F.kl_div(input_log_softmax, target_softmax, size_average=False)\n\n\ndef symmetric_mse_loss(input1, input2):\n \"\"\"Like F.mse_loss but sends gradients to both directions.\n cuz input1/input2 are tensors with grad, while target in F.mse_loss has no grad.\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to both input1 and input2.\n \"\"\"\n assert input1.size() == input2.size()\n num_classes = input1.size()[1]\n return torch.sum((input1 - input2)**2) / num_classes"
] | [
[
"torch.sum",
"torch.nn.functional.log_softmax",
"torch.nn.functional.mse_loss",
"torch.nn.functional.softmax",
"torch.nn.functional.kl_div"
]
] |
balrajmarimuthu/CarND-Capstone | [
"bc3e52c5e940e3da51efad219ab89fb3580fb717"
] | [
"ros/src/tl_detector/tl_detector.py"
] | [
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nfrom scipy.spatial import KDTree\n\nSTATE_COUNT_THRESHOLD = 3\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.camera_image = None\n self.lights = []\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_waypoint(self, x , y):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint in self.waypoints\n\n \"\"\"\n #TODO implement\n closest_idx = self.waypoint_tree.query([x,y], 1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n# if(not self.has_image):\n# self.prev_light_loc = None\n# return False\n\n# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n# #Get classification\n# return self.light_classifier.get_classification(cv_image)\n return light.state\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #light = None\n closest_light = None\n line_wp_idx = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n #car_position = self.get_closest_waypoint(self.pose.pose)\n car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)\n \n #TODO find the closest visible traffic light (if one exists)\n diff = len(self.waypoints.waypoints)\n for i, light in enumerate(self.lights):\n #Get stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0], line[1])\n #Find closest stop line waypoint index\n d = temp_wp_idx - car_wp_idx\n if d>=0 and d<diff:\n diff = d\n closest_light = light\n line_wp_idx = temp_wp_idx\n \n if closest_light:\n state = self.get_light_state(closest_light)\n return line_wp_idx, state\n \n return -1, TrafficLight.UNKNOWN\n \n\n if light:\n state = self.get_light_state(light)\n return light_wp, state\n self.waypoints = None\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n"
] | [
[
"scipy.spatial.KDTree"
]
] |
uunal/adapter-transformers | [
"73a95a75f803e8fd243fc3d55ff3a9d557891377"
] | [
"src/transformers/adapters/models/distilbert.py"
] | [
"from typing import Union\n\nimport torch\nfrom torch import nn\n\nfrom ..composition import AdapterCompositionBlock, parse_composition\nfrom ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin\nfrom .bert import BertEncoderAdaptersMixin, BertModelHeadsMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin\n\n\nclass DistilBertSelfAttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):\n \"\"\"Adds attention adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def transformer_layer_norm(self):\n return self.parent.sa_layer_norm\n\n\nclass DistilBertOutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):\n \"\"\"Adds output adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def transformer_layer_norm(self):\n return self.parent.output_layer_norm\n\n\nclass DistilBertTransfomerBlockAdaptersMixin:\n \"\"\"Adds adapters to the TransformerBlock module of DistilBert.\"\"\"\n\n def _init_adapter_modules(self):\n self.attention_adapters = DistilBertSelfAttentionAdaptersModule(self)\n self.output_adapters = DistilBertOutputAdaptersModule(self)\n self.attention_adapters._init_adapter_modules()\n self.output_adapters._init_adapter_modules()\n self.register_forward_pre_hook(self._adapter_block_pre_hook)\n\n def add_fusion_layer(self, adapter_names):\n self.attention_adapters.add_fusion_layer(adapter_names)\n self.output_adapters.add_fusion_layer(adapter_names)\n\n def add_adapter(self, adapter_name: str, layer_idx: int):\n self.attention_adapters.add_adapter(adapter_name, layer_idx)\n self.output_adapters.add_adapter(adapter_name, layer_idx)\n\n def delete_adapter(self, adapter_name):\n self.attention_adapters.delete_adapter(adapter_name)\n self.output_adapters.delete_adapter(adapter_name)\n\n def delete_fusion_layer(self, adapter_names):\n self.attention_adapters.delete_fusion_layer(adapter_names)\n self.output_adapters.delete_fusion_layer(adapter_names)\n\n def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):\n self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n\n # Makes sure the \"parent\" reference always points to the correct module.\n # This is especially relevant when using torch data parallelism.\n @staticmethod\n def _adapter_block_pre_hook(module, input_tensors):\n object.__setattr__(module.attention_adapters, \"parent\", module)\n object.__setattr__(module.output_adapters, \"parent\", module)\n\n\nclass DistilBertTransformerAdaptersMixin(BertEncoderAdaptersMixin):\n \"\"\"Adds adapters to the Transformer module of DistilBert.\"\"\"\n\n pass\n\n\nclass DistilBertModelAdaptersMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):\n \"\"\"Adds adapters to the DistilBert module.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n \"\"\"Sets the model into mode for training the given adapters.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.transformer.enable_adapters(adapter_setup, True, False)\n self.enable_invertible_adapters(adapter_setup.flatten())\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.transformer.enable_adapters(adapter_setup, unfreeze_adapters, True)\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def _add_adapter(self, adapter_name):\n self.transformer.add_adapter(adapter_name)\n self.add_invertible_adapter(adapter_name)\n\n def _add_fusion_layer(self, adapter_names):\n self.transformer.add_fusion_layer(adapter_names)\n\n def _delete_adapter(self, adapter_name: str):\n self.transformer.delete_adapter(adapter_name)\n self.delete_invertible_adapter(adapter_name)\n\n def _delete_fusion_layer(self, adapter_names):\n self.transformer.delete_fusion_layer(adapter_names)\n\n def get_fusion_regularization_loss(self):\n reg_loss = 0.0\n target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)\n for _, v in self.transformer.layer._modules.items():\n\n for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n return reg_loss\n\n def get_adapter(self, name):\n return_adapters = {}\n for idx, layer in enumerate(self.transformer.layer):\n adapters = {\n \"attention\": layer.attention_adapters.adapters,\n \"output\": layer.output_adapters.adapters,\n }\n for key, adapt in adapters.items():\n if hasattr(adapt, name):\n if idx not in return_adapters:\n return_adapters[idx] = {}\n return_adapters[idx][key] = getattr(adapt, name)\n\n return return_adapters\n\n\nclass DistilBertModelHeadsMixin(BertModelHeadsMixin):\n \"\"\"Adds heads to a DistilBert model.\"\"\"\n\n pass\n"
] | [
[
"torch.zeros"
]
] |
gsyqax/pandas | [
"cb35d8a938c9222d903482d2f66c62fece5a7aae"
] | [
"pandas/tests/arrays/boolean/test_construction.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.arrays import BooleanArray\nfrom pandas.core.arrays.boolean import coerce_to_array\n\n\[email protected]\ndef data():\n return pd.array(\n [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],\n dtype=\"boolean\",\n )\n\n\ndef test_boolean_array_constructor():\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n\n result = BooleanArray(values, mask)\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n with pytest.raises(TypeError, match=\"values should be boolean numpy array\"):\n BooleanArray(values.tolist(), mask)\n\n with pytest.raises(TypeError, match=\"mask should be boolean numpy array\"):\n BooleanArray(values, mask.tolist())\n\n with pytest.raises(TypeError, match=\"values should be boolean numpy array\"):\n BooleanArray(values.astype(int), mask)\n\n with pytest.raises(TypeError, match=\"mask should be boolean numpy array\"):\n BooleanArray(values, None)\n\n with pytest.raises(ValueError, match=\"values must be a 1D array\"):\n BooleanArray(values.reshape(1, -1), mask)\n\n with pytest.raises(ValueError, match=\"mask must be a 1D array\"):\n BooleanArray(values, mask.reshape(1, -1))\n\n\ndef test_boolean_array_constructor_copy():\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n\n result = BooleanArray(values, mask)\n assert result._data is values\n assert result._mask is mask\n\n result = BooleanArray(values, mask, copy=True)\n assert result._data is not values\n assert result._mask is not mask\n\n\ndef test_to_boolean_array():\n expected = BooleanArray(\n np.array([True, False, True]), np.array([False, False, False])\n )\n\n result = pd.array([True, False, True], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, True]), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, True], dtype=object), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n expected = BooleanArray(\n np.array([True, False, True]), np.array([False, False, True])\n )\n\n result = pd.array([True, False, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, None], dtype=object), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_all_none():\n expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))\n\n result = pd.array([None, None, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([None, None, None], dtype=object), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\[email protected](\n \"a, b\",\n [\n ([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),\n ([True, np.nan], [True, None]),\n ([True, pd.NA], [True, None]),\n ([np.nan, np.nan], [None, None]),\n (np.array([np.nan, np.nan], dtype=float), [None, None]),\n ],\n)\ndef test_to_boolean_array_missing_indicators(a, b):\n result = pd.array(a, dtype=\"boolean\")\n expected = pd.array(b, dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\[email protected](\n \"values\",\n [\n [\"foo\", \"bar\"],\n [\"1\", \"2\"],\n # \"foo\",\n [1, 2],\n [1.0, 2.0],\n pd.date_range(\"20130101\", periods=2),\n np.array([\"foo\"]),\n np.array([1, 2]),\n np.array([1.0, 2.0]),\n [np.nan, {\"a\": 1}],\n ],\n)\ndef test_to_boolean_array_error(values):\n # error in converting existing arrays to BooleanArray\n msg = \"Need to pass bool-like value\"\n with pytest.raises(TypeError, match=msg):\n pd.array(values, dtype=\"boolean\")\n\n\ndef test_to_boolean_array_from_integer_array():\n result = pd.array(np.array([1, 0, 1, 0]), dtype=\"boolean\")\n expected = pd.array([True, False, True, False], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array(np.array([1, 0, 1, None]), dtype=\"boolean\")\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_from_float_array():\n result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype=\"boolean\")\n expected = pd.array([True, False, True, False], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype=\"boolean\")\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_integer_like():\n # integers of 0's and 1's\n result = pd.array([1, 0, 1, 0], dtype=\"boolean\")\n expected = pd.array([True, False, True, False], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array([1, 0, 1, None], dtype=\"boolean\")\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_coerce_to_array():\n # TODO this is currently not public API\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n result = BooleanArray(*coerce_to_array(values, mask=mask))\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n assert result._data is values\n assert result._mask is mask\n result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n assert result._data is not values\n assert result._mask is not mask\n\n # mixed missing from values and mask\n values = [True, False, None, False]\n mask = np.array([False, False, False, True], dtype=\"bool\")\n result = BooleanArray(*coerce_to_array(values, mask=mask))\n expected = BooleanArray(\n np.array([True, False, True, True]), np.array([False, False, True, True])\n )\n tm.assert_extension_array_equal(result, expected)\n result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))\n tm.assert_extension_array_equal(result, expected)\n result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))\n tm.assert_extension_array_equal(result, expected)\n\n # raise errors for wrong dimension\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n\n with pytest.raises(ValueError, match=\"values must be a 1D list-like\"):\n coerce_to_array(values.reshape(1, -1))\n\n with pytest.raises(ValueError, match=\"mask must be a 1D list-like\"):\n coerce_to_array(values, mask=mask.reshape(1, -1))\n\n\ndef test_coerce_to_array_from_boolean_array():\n # passing BooleanArray to coerce_to_array\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n arr = BooleanArray(values, mask)\n result = BooleanArray(*coerce_to_array(arr))\n tm.assert_extension_array_equal(result, arr)\n # no copy\n assert result._data is arr._data\n assert result._mask is arr._mask\n\n result = BooleanArray(*coerce_to_array(arr), copy=True)\n tm.assert_extension_array_equal(result, arr)\n assert result._data is not arr._data\n assert result._mask is not arr._mask\n\n with pytest.raises(ValueError, match=\"cannot pass mask for BooleanArray input\"):\n coerce_to_array(arr, mask=mask)\n\n\ndef test_coerce_to_numpy_array():\n # with missing values -> object dtype\n arr = pd.array([True, False, None], dtype=\"boolean\")\n result = np.array(arr)\n expected = np.array([True, False, pd.NA], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n # also with no missing values -> object dtype\n arr = pd.array([True, False, True], dtype=\"boolean\")\n result = np.array(arr)\n expected = np.array([True, False, True], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n # force bool dtype\n result = np.array(arr, dtype=\"bool\")\n expected = np.array([True, False, True], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n # with missing values will raise error\n arr = pd.array([True, False, None], dtype=\"boolean\")\n msg = (\n \"cannot convert to 'bool'-dtype NumPy array with missing values. \"\n \"Specify an appropriate 'na_value' for this dtype.\"\n )\n with pytest.raises(ValueError, match=msg):\n np.array(arr, dtype=\"bool\")\n\n\ndef test_to_boolean_array_from_strings():\n result = BooleanArray._from_sequence_of_strings(\n np.array([\"True\", \"False\", np.nan], dtype=object)\n )\n expected = BooleanArray(\n np.array([True, False, False]), np.array([False, False, True])\n )\n\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_from_strings_invalid_string():\n with pytest.raises(ValueError, match=\"cannot be cast\"):\n BooleanArray._from_sequence_of_strings([\"donkey\"])\n\n\[email protected](\"box\", [True, False], ids=[\"series\", \"array\"])\ndef test_to_numpy(box):\n con = pd.Series if box else pd.array\n # default (with or without missing values) -> object dtype\n arr = con([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy()\n expected = np.array([True, False, True], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype=\"boolean\")\n result = arr.to_numpy()\n expected = np.array([True, False, pd.NA], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype=\"boolean\")\n result = arr.to_numpy(dtype=\"str\")\n expected = np.array([True, False, pd.NA], dtype=\"<U5\")\n tm.assert_numpy_array_equal(result, expected)\n\n # no missing values -> can convert to bool, otherwise raises\n arr = con([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy(dtype=\"bool\")\n expected = np.array([True, False, True], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype=\"boolean\")\n with pytest.raises(ValueError, match=\"cannot convert to 'bool'-dtype\"):\n result = arr.to_numpy(dtype=\"bool\")\n\n # specify dtype and na_value\n arr = con([True, False, None], dtype=\"boolean\")\n result = arr.to_numpy(dtype=object, na_value=None)\n expected = np.array([True, False, None], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=bool, na_value=False)\n expected = np.array([True, False, False], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=\"int64\", na_value=-99)\n expected = np.array([1, 0, -99], dtype=\"int64\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=\"float64\", na_value=np.nan)\n expected = np.array([1, 0, np.nan], dtype=\"float64\")\n tm.assert_numpy_array_equal(result, expected)\n\n # converting to int or float without specifying na_value raises\n with pytest.raises(ValueError, match=\"cannot convert to 'int64'-dtype\"):\n arr.to_numpy(dtype=\"int64\")\n with pytest.raises(ValueError, match=\"cannot convert to 'float64'-dtype\"):\n arr.to_numpy(dtype=\"float64\")\n\n\ndef test_to_numpy_copy():\n # to_numpy can be zero-copy if no missing values\n arr = pd.array([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy(dtype=bool)\n result[0] = False\n tm.assert_extension_array_equal(\n arr, pd.array([False, False, True], dtype=\"boolean\")\n )\n\n arr = pd.array([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy(dtype=bool, copy=True)\n result[0] = False\n tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype=\"boolean\"))\n\n\n# FIXME: don't leave commented out\n# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion\n# manually in the indexing code\n# def test_indexing_boolean_mask():\n# arr = pd.array([1, 2, 3, 4], dtype=\"Int64\")\n# mask = pd.array([True, False, True, False], dtype=\"boolean\")\n# result = arr[mask]\n# expected = pd.array([1, 3], dtype=\"Int64\")\n# tm.assert_extension_array_equal(result, expected)\n\n# # missing values -> error\n# mask = pd.array([True, False, True, None], dtype=\"boolean\")\n# with pytest.raises(IndexError):\n# result = arr[mask]\n"
] | [
[
"pandas._testing.assert_numpy_array_equal",
"pandas.core.arrays.boolean.coerce_to_array",
"pandas.date_range",
"pandas.array",
"pandas._testing.assert_extension_array_equal",
"pandas.arrays.BooleanArray",
"numpy.array",
"pandas.arrays.BooleanArray._from_sequence_of_strings"
]
] |
VitoRazor/Lidar_RGB_detector | [
"5308ba24a90d6e8d73940be4b40d31eccb4df94b"
] | [
"second/pytorch/train.py"
] | [
"import copy\nimport json\nimport os\nfrom pathlib import Path\nimport pickle\nimport shutil\nimport time\nimport re \nimport fire\nimport numpy as np\nimport torch\nfrom google.protobuf import text_format\n\nimport second.data.kitti_common as kitti\nimport torchplus\nfrom second.builder import target_assigner_builder, voxel_builder\nfrom second.core import box_np_ops\nfrom second.data.preprocess import merge_second_batch, merge_second_batch_multigpu\nfrom second.protos import pipeline_pb2\nfrom second.pytorch.builder import (box_coder_builder, input_reader_builder,\n lr_scheduler_builder, optimizer_builder,\n second_builder)\nfrom second.utils.log_tool import SimpleModelLog\nfrom second.utils.progress_bar import ProgressBar\nimport psutil\n\ndef example_convert_to_torch(example, dtype=torch.float32,\n device=None) -> dict:\n device = device or torch.device(\"cuda:0\")\n example_torch = {}\n float_names = [\n \"voxels\", \"anchors\", \"reg_targets\", \"reg_weights\", \"bev_map\", \"importance\"\n ]\n for k, v in example.items():\n if k in float_names:\n # slow when directly provide fp32 data with dtype=torch.half\n example_torch[k] = torch.tensor(\n v, dtype=torch.float32, device=device).to(dtype)\n elif k in [\"coordinates\", \"labels\", \"num_points\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.int32, device=device)\n elif k in [\"anchors_mask\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.uint8, device=device)\n elif k == \"calib\":\n calib = {}\n for k1, v1 in v.items():\n calib[k1] = torch.tensor(\n v1, dtype=dtype, device=device).to(dtype)\n example_torch[k] = calib\n elif k == \"num_voxels\":\n example_torch[k] = torch.tensor(v)\n else:\n example_torch[k] = v\n return example_torch\n\n\ndef build_network(model_cfg, measure_time=False, KL=False):\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n box_coder.custom_ndim = target_assigner._anchor_generators[0].custom_ndim\n print(KL)\n net = second_builder.build(\n model_cfg, voxel_generator, target_assigner, measure_time=measure_time, KL = KL )\n return net\n\ndef _worker_init_fn(worker_id):\n time_seed = np.array(time.time(), dtype=np.int32)\n np.random.seed(time_seed + worker_id)\n print(f\"WORKER {worker_id} seed:\", np.random.get_state()[1][0])\n\ndef freeze_params(params: dict, include: str=None, exclude: str=None):\n assert isinstance(params, dict)\n include_re = None\n if include is not None:\n include_re = re.compile(include)\n exclude_re = None\n if exclude is not None:\n exclude_re = re.compile(exclude)\n remain_params = []\n for k, p in params.items():\n if include_re is not None:\n if include_re.match(k) is not None:\n continue \n if exclude_re is not None:\n if exclude_re.match(k) is None:\n continue \n remain_params.append(p)\n return remain_params\n\ndef freeze_params_v2(params: dict, include: str=None, exclude: str=None):\n assert isinstance(params, dict)\n include_re = None\n if include is not None:\n include_re = re.compile(include)\n exclude_re = None\n if exclude is not None:\n exclude_re = re.compile(exclude)\n for k, p in params.items():\n if include_re is not None:\n if include_re.match(k) is not None:\n p.requires_grad = False\n if exclude_re is not None:\n if exclude_re.match(k) is None:\n p.requires_grad = False\n\ndef filter_param_dict(state_dict: dict, include: str=None, exclude: str=None):\n assert isinstance(state_dict, dict)\n include_re = None\n if include is not None:\n include_re = re.compile(include)\n exclude_re = None\n if exclude is not None:\n exclude_re = re.compile(exclude)\n res_dict = {}\n for k, p in state_dict.items():\n if include_re is not None:\n if include_re.match(k) is None:\n continue\n if exclude_re is not None:\n if exclude_re.match(k) is not None:\n continue \n res_dict[k] = p\n return res_dict\n\n\ndef train(config_path,\n model_dir,\n KL = False,\n result_path=None,\n create_folder=False,\n display_step=50,\n summary_step=5,\n pretrained_path=None,\n pretrained_include=None,\n pretrained_exclude=None,\n freeze_include=None,\n freeze_exclude=None,\n multi_gpu=False,\n measure_time=False,\n resume=False):\n \"\"\"train a VoxelNet model specified by a config file.\n \"\"\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n model_dir = str(Path(model_dir).resolve())\n if create_folder:\n if Path(model_dir).exists():\n model_dir = torchplus.train.create_folder(model_dir)\n model_dir = Path(model_dir)\n if not resume and model_dir.exists():\n raise ValueError(\"model dir exists and you don't specify resume.\")\n model_dir.mkdir(parents=True, exist_ok=True)\n if result_path is None:\n result_path = model_dir / 'results'\n config_file_bkp = \"pipeline.config\"\n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to train with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n proto_str = text_format.MessageToString(config, indent=2)\n with (model_dir / config_file_bkp).open(\"w\") as f:\n f.write(proto_str)\n\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n if model_cfg.rpn.module_class_name == \"RPN_KL\":\n KL = True\n else:\n KL = False\n print(KL)\n net = build_network(model_cfg, measure_time,KL).to(device)\n # if train_cfg.enable_mixed_precision:\n # net.half()\n # net.metrics_to_float()\n # net.convert_norm_to_float(net)\n target_assigner = net.target_assigner\n voxel_generator = net.voxel_generator\n print(\"num parameters:\", len(list(net.parameters())))\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n if pretrained_path is not None:\n model_dict = net.state_dict()\n pretrained_dict = torch.load(pretrained_path)\n pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)\n new_pretrained_dict = {}\n for k, v in pretrained_dict.items():\n if k in model_dict and v.shape == model_dict[k].shape:\n new_pretrained_dict[k] = v \n print(\"Load pretrained parameters:\")\n for k, v in new_pretrained_dict.items():\n print(k, v.shape)\n model_dict.update(new_pretrained_dict) \n net.load_state_dict(model_dict)\n freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)\n net.clear_global_step()\n net.clear_metrics()\n if multi_gpu:\n net_parallel = torch.nn.DataParallel(net)\n else:\n net_parallel = net\n optimizer_cfg = train_cfg.optimizer\n loss_scale = train_cfg.loss_scale_factor\n fastai_optimizer = optimizer_builder.build(\n optimizer_cfg,\n net,\n mixed=False,\n loss_scale=loss_scale)\n if loss_scale < 0:\n loss_scale = \"dynamic\"\n if train_cfg.enable_mixed_precision:\n max_num_voxels = input_cfg.preprocess.max_number_of_voxels * input_cfg.batch_size\n assert max_num_voxels < 65535, \"spconv fp16 training only support this\"\n from apex import amp\n net, amp_optimizer = amp.initialize(net, fastai_optimizer,\n opt_level=\"O2\",\n keep_batchnorm_fp32=True,\n loss_scale=loss_scale\n )\n net.metrics_to_float()\n else:\n amp_optimizer = fastai_optimizer\n torchplus.train.try_restore_latest_checkpoints(model_dir,\n [fastai_optimizer])\n lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,\n train_cfg.steps)\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n if multi_gpu:\n num_gpu = torch.cuda.device_count()\n print(f\"MULTI-GPU: use {num_gpu} gpu\")\n collate_fn = merge_second_batch_multigpu\n else:\n collate_fn = merge_second_batch\n num_gpu = 1\n\n ######################\n # PREPARE INPUT\n ######################\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n multi_gpu=multi_gpu)\n eval_dataset = input_reader_builder.build(\n eval_input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size * num_gpu,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers * num_gpu,\n pin_memory=False,\n collate_fn=collate_fn,\n worker_init_fn=_worker_init_fn,\n drop_last=not multi_gpu)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=eval_input_cfg.batch_size, # only support multi-gpu train\n shuffle=False,\n num_workers=eval_input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n ######################\n # TRAINING\n ######################\n model_logging = SimpleModelLog(model_dir)\n model_logging.open()\n model_logging.log_text(proto_str + \"\\n\", 0, tag=\"config\")\n start_step = net.get_global_step()\n total_step = train_cfg.steps\n t = time.time()\n steps_per_eval = train_cfg.steps_per_eval\n clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch\n\n amp_optimizer.zero_grad()\n step_times = []\n step = start_step\n try:\n while True:\n if clear_metrics_every_epoch:\n net.clear_metrics()\n for example in dataloader:\n lr_scheduler.step(net.get_global_step())\n time_metrics = example[\"metrics\"]\n example.pop(\"metrics\")\n example_torch = example_convert_to_torch(example, float_dtype)\n batch_size = example[\"anchors\"].shape[0]\n # print(\"num_points:\",max(example_torch['num_points']))\n # print(\"num_voxels:\",example_torch['num_voxels'].shape)\n # print(\"anchors:\",example_torch['anchors'].shape)\n # print(\"voxels:\",example_torch['voxels'].shape)\n # print(example_torch['voxels'][0:3])\n # print(\"coordinates:\",example_torch['coordinates'].shape)\n # exit()\n ret_dict = net_parallel(example_torch)\n cls_preds = ret_dict[\"cls_preds\"]\n loss = ret_dict[\"loss\"].mean()\n cls_loss_reduced = ret_dict[\"cls_loss_reduced\"].mean()\n loc_loss_reduced = ret_dict[\"loc_loss_reduced\"].mean()\n cls_pos_loss = ret_dict[\"cls_pos_loss\"].mean()\n cls_neg_loss = ret_dict[\"cls_neg_loss\"].mean()\n loc_loss = ret_dict[\"loc_loss\"]\n cls_loss = ret_dict[\"cls_loss\"]\n \n cared = ret_dict[\"cared\"]\n labels = example_torch[\"labels\"]\n if train_cfg.enable_mixed_precision:\n with amp.scale_loss(loss, amp_optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)\n amp_optimizer.step()\n amp_optimizer.zero_grad()\n net.update_global_step()\n net_metrics = net.update_metrics(cls_loss_reduced,\n loc_loss_reduced, cls_preds,\n labels, cared)\n\n step_time = (time.time() - t)\n step_times.append(step_time)\n t = time.time()\n metrics = {}\n num_pos = int((labels > 0)[0].float().sum().cpu().numpy())\n num_neg = int((labels == 0)[0].float().sum().cpu().numpy())\n if 'anchors_mask' not in example_torch:\n num_anchors = example_torch['anchors'].shape[1]\n else:\n num_anchors = int(example_torch['anchors_mask'][0].sum())\n global_step = net.get_global_step()\n\n if global_step % display_step == 0:\n if measure_time:\n for name, val in net.get_avg_time_dict().items():\n print(f\"avg {name} time = {val * 1000:.3f} ms\")\n\n loc_loss_elem = [\n float(loc_loss[:, :, i].sum().detach().cpu().numpy() /\n batch_size) for i in range(loc_loss.shape[-1])\n ]\n metrics[\"runtime\"] = {\n \"step\": global_step,\n \"steptime\": np.mean(step_times),\n }\n metrics[\"runtime\"].update(time_metrics[0])\n step_times = []\n metrics.update(net_metrics)\n metrics[\"loss\"][\"loc_elem\"] = loc_loss_elem\n metrics[\"loss\"][\"cls_pos_rt\"] = float(\n cls_pos_loss.detach().cpu().numpy())\n metrics[\"loss\"][\"cls_neg_rt\"] = float(\n cls_neg_loss.detach().cpu().numpy())\n if model_cfg.use_direction_classifier:\n dir_loss_reduced = ret_dict[\"dir_loss_reduced\"].mean()\n metrics[\"loss\"][\"dir_rt\"] = float(\n dir_loss_reduced.detach().cpu().numpy())\n\n metrics[\"misc\"] = {\n \"num_vox\": int(example_torch[\"voxels\"].shape[0]),\n \"num_pos\": int(num_pos),\n \"num_neg\": int(num_neg),\n \"num_anchors\": int(num_anchors),\n \"lr\": float(amp_optimizer.lr),\n \"mem_usage\": psutil.virtual_memory().percent,\n }\n model_logging.log_metrics(metrics, global_step)\n\n if global_step % steps_per_eval == 0:\n torchplus.train.save_models(model_dir, [net, amp_optimizer],\n net.get_global_step())\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n model_logging.log_text(\"#################################\",\n global_step)\n model_logging.log_text(\"# EVAL\", global_step)\n model_logging.log_text(\"#################################\",\n global_step)\n model_logging.log_text(\"Generate output labels...\", global_step)\n t = time.time()\n detections = []\n prog_bar = ProgressBar()\n net.clear_timer()\n prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)\n // eval_input_cfg.batch_size)\n for example in iter(eval_dataloader):\n example = example_convert_to_torch(example, float_dtype)\n detections += net(example)\n prog_bar.print_bar()\n\n sec_per_ex = len(eval_dataset) / (time.time() - t)\n model_logging.log_text(\n f'generate label finished({sec_per_ex:.2f}/s). start eval:',\n global_step)\n result_dict = eval_dataset.dataset.evaluation(\n detections, str(result_path_step))\n for k, v in result_dict[\"results\"].items():\n model_logging.log_text(\"Evaluation {}\".format(k), global_step)\n model_logging.log_text(v, global_step)\n model_logging.log_metrics(result_dict[\"detail\"], global_step)\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(detections, f)\n net.train()\n step += 1\n if step >= total_step:\n break\n if step >= total_step:\n break\n except Exception as e:\n print(json.dumps(example[\"metadata\"], indent=2))\n model_logging.log_text(str(e), step)\n model_logging.log_text(json.dumps(example[\"metadata\"], indent=2), step)\n torchplus.train.save_models(model_dir, [net, amp_optimizer],\n step)\n raise e\n finally:\n model_logging.close()\n torchplus.train.save_models(model_dir, [net, amp_optimizer],\n net.get_global_step())\n\n\ndef evaluate(config_path,\n model_dir=None,\n result_path=None,\n ckpt_path=None,\n measure_time=False,\n batch_size=None,\n **kwargs):\n \"\"\"Don't support pickle_result anymore. if you want to generate kitti label file,\n please use kitti_anno_to_label_file and convert_detection_to_kitti_annos\n in second.data.kitti_dataset.\n \"\"\"\n assert len(kwargs) == 0\n model_dir = str(Path(model_dir).resolve())\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n result_name = 'eval_results'\n if result_path is None:\n model_dir = Path(model_dir)\n result_path = model_dir / result_name\n else:\n result_path = Path(result_path)\n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to eval with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n net = build_network(model_cfg, measure_time=measure_time).to(device)\n if train_cfg.enable_mixed_precision:\n net.half()\n print(\"half inference!\")\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n target_assigner = net.target_assigner\n voxel_generator = net.voxel_generator\n\n if ckpt_path is None:\n assert model_dir is not None\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n else:\n torchplus.train.restore(ckpt_path, net)\n batch_size = batch_size or input_cfg.batch_size\n eval_dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n t = time.time()\n detections = []\n print(\"Generate output labels...\")\n bar = ProgressBar()\n bar.start((len(eval_dataset) + batch_size - 1) // batch_size)\n prep_example_times = []\n prep_times = []\n t2 = time.time()\n\n for example in iter(eval_dataloader):\n if measure_time:\n prep_times.append(time.time() - t2)\n torch.cuda.synchronize()\n t1 = time.time()\n example = example_convert_to_torch(example, float_dtype)\n if measure_time:\n torch.cuda.synchronize()\n prep_example_times.append(time.time() - t1)\n with torch.no_grad():\n detections += net(example)\n bar.print_bar()\n if measure_time:\n t2 = time.time()\n\n sec_per_example = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_example:.2f}/s). start eval:')\n if measure_time:\n print(\n f\"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms\"\n )\n print(f\"avg prep time: {np.mean(prep_times) * 1000:.3f} ms\")\n for name, val in net.get_avg_time_dict().items():\n print(f\"avg {name} time = {val * 1000:.3f} ms\")\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(detections, f)\n result_dict = eval_dataset.dataset.evaluation(detections,\n str(result_path_step))\n if result_dict is not None:\n for k, v in result_dict[\"results\"].items():\n print(\"Evaluation {}\".format(k))\n print(v)\n\ndef helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):\n \"\"\"get information of target assign to tune thresholds in anchor generator.\n \"\"\" \n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to train with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n proto_str = text_format.MessageToString(config, indent=2)\n\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n net = build_network(model_cfg, False, KL)\n # if train_cfg.enable_mixed_precision:\n # net.half()\n # net.metrics_to_float()\n # net.convert_norm_to_float(net)\n target_assigner = net.target_assigner\n voxel_generator = net.voxel_generator\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n multi_gpu=False)\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n num_workers=0,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=False)\n \n class_count = {}\n anchor_count = {}\n class_count_tune = {}\n anchor_count_tune = {}\n for c in target_assigner.classes:\n class_count[c] = 0\n anchor_count[c] = 0\n class_count_tune[c] = 0\n anchor_count_tune[c] = 0\n\n\n step = 0\n classes = target_assigner.classes\n if target_rate is None:\n num_tune_epoch = 0\n for epoch in range(num_tune_epoch):\n for example in dataloader:\n gt_names = example[\"gt_names\"]\n for name in gt_names:\n class_count_tune[name] += 1\n \n labels = example['labels']\n for i in range(1, len(classes) + 1):\n anchor_count_tune[classes[i - 1]] += int(np.sum(labels == i))\n if target_rate is not None:\n for name, rate in target_rate.items():\n if class_count_tune[name] > update_freq:\n # calc rate\n current_rate = anchor_count_tune[name] / class_count_tune[name]\n if current_rate > rate:\n target_assigner._anchor_generators[classes.index(name)].match_threshold += update_delta\n target_assigner._anchor_generators[classes.index(name)].unmatch_threshold += update_delta\n else:\n target_assigner._anchor_generators[classes.index(name)].match_threshold -= update_delta\n target_assigner._anchor_generators[classes.index(name)].unmatch_threshold -= update_delta\n anchor_count_tune[name] = 0\n class_count_tune[name] = 0\n step += 1\n for c in target_assigner.classes:\n class_count[c] = 0\n anchor_count[c] = 0\n total_voxel_gene_time = 0\n count = 0\n\n for example in dataloader:\n gt_names = example[\"gt_names\"]\n total_voxel_gene_time += example[\"metrics\"][0][\"voxel_gene_time\"]\n count += 1\n\n for name in gt_names:\n class_count[name] += 1\n \n labels = example['labels']\n for i in range(1, len(classes) + 1):\n anchor_count[classes[i - 1]] += int(np.sum(labels == i))\n print(\"avg voxel gene time\", total_voxel_gene_time / count)\n\n print(json.dumps(class_count, indent=2))\n print(json.dumps(anchor_count, indent=2))\n if target_rate is not None:\n for ag in target_assigner._anchor_generators:\n if ag.class_name in target_rate:\n print(ag.class_name, ag.match_threshold, ag.unmatch_threshold)\n\ndef mcnms_parameters_search(config_path,\n model_dir,\n preds_path):\n pass\n\n\nif __name__ == '__main__':\n fire.Fire()\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.sum",
"torch.load",
"numpy.random.get_state",
"torch.no_grad",
"numpy.random.seed",
"torch.cuda.synchronize",
"torch.cuda.device_count",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.device",
"numpy.mean"
]
] |
KyunghoWon-GIST/PyRiemann-with-OpenViBE | [
"2a070fdadb040ce6edad81aef497d054ddd70130"
] | [
"python-Riemann-online.py"
] | [
"import pickle\r\nimport numpy as np\r\nimport pyriemann\r\nimport sklearn\r\nimport scipy\r\nimport matplotlib as mpl\r\nmpl.use('Qt5Agg') # for using pyplot (pip install pyqt5)\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import signal\r\nfrom scipy.signal import butter, filtfilt, sosfiltfilt\r\n\r\n# Pyriemann with OV Python scripting plugin --------------------------------------------------- written by Kyungho Won\r\n#\r\n# Step\r\n# 1. Loads covariance matrices estimated using calibration EEG at the beginning and fits MDM (__init__)\r\n# 2. During test scenario, python scripting module receives the segmented EEG from OpenViBE every epoch (input: signal)\r\n# 3. In Python scripting plugin, the segmented EEG is band-pass filtered and transformed to a covariance matrix\r\n# 4. The Fitted MDM predicts the current label with the covariance matrix\r\n# 5. Python scripting plugin sends stimulution (predicted labels) as an output (output: stimulation)\r\n\r\n# 6. Ohter external modules could be added\r\n\r\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order):\r\n\tnyq = fs/2\r\n\tlow = lowcut/nyq\r\n\thigh = highcut/nyq\r\n\tsos = butter(order, [low, high], btype='band', output='sos')\r\n\t# demean before filtering\r\n\tmeandat = np.mean(data, axis=1)\r\n\tdata = data - meandat[:, np.newaxis]\r\n\ty = sosfiltfilt(sos, data) # zero-phase filter # data: [ch x time]\r\n\t# specify pandlen to make the result the same as Matlab filtfilt()\r\n\treturn y\r\n\r\ndef draw_feedback(nth, nClass):\r\n\tlabels_arr = ['LEFT','RIGHT','UP','DOWN']\r\n\tmpl.rcParams['toolbar'] = 'None' # Remove tool bar (upper bar)\r\n\r\n\tplt.clf()\r\n\tplt.plot(0,0)\r\n\tax = plt.gca()\r\n\tax.set_facecolor('black')\r\n\tplt.xlim([-10, 10])\r\n\tplt.ylim([-10, 10])\r\n\tplt.axis('off')\r\n\tplt.title('%02d Predicted: %s' %(nth, labels_arr[int(nClass)-1]))\r\n\r\n\tif nClass == 1: # left\r\n\t\tplt.arrow(0,0, -4, 0, width=1)\r\n\telif nClass == 2: # right\r\n\t\tplt.arrow(0,0, 4, 0, width=1)\r\n\telif nClass == 3: # up\r\n\t\tplt.arrow(0,0, 0, 4, width=1)\r\n\telif nClass == 4: # down\r\n\t\tplt.arrow(0,0, 0, -4, width=1)\r\n\r\n\r\nclass MyOVBox(OVBox):\r\n\tdef __init__(self):\r\n\t\tOVBox.__init__(self)\r\n\t\tself.signalHeader = None\r\n\t\tself.nth_trial = 0\r\n\r\n\tdef initialize(self):\r\n\t\t# Append to the box output a stimulation header. \r\n\t\tself.output[0].append(OVStimulationHeader(0., 0.))\r\n\r\n\t\t# Load covariance matrices estimated from the calibrated EEG\r\n\t\tload_file = open(self.setting['Trained model path'], 'rb')\r\n\t\ttrained = pickle.load(load_file)\r\n\t\tself.mdm = pyriemann.classification.MDM()\r\n\t\tself.mdm.metric = 'Riemann'\r\n\t\tself.mdm.fit(trained['COV'], trained['Labels'])\t\r\n\t\tprint('Training accuracy is', np.sum(self.mdm.predict(trained['COV'])==trained['Labels'])/len(trained['Labels']))\r\n\t\tprint('== Trained COV:', trained['COV'].shape)\r\n\t\tprint('==', self.mdm)\r\n\t\tprint('\\n\\n')\r\n\r\n\t\t# User defined parameters\r\n\t\tself.lowbp = int(self.setting['low bp'])\r\n\t\tself.highbp = int(self.setting['high bp'])\r\n\t\tself.filterorder = int(self.setting['filter order'])\r\n\t\tself.sampling = int(self.setting['sampling rate'])\r\n\t\tself.isfeedback = self.setting['Feedback']\r\n\t\tself.ans_mi = [769, 770, 780, 774] # left right up down\r\n\r\n\t\tplt.ion()\r\n\r\n\tdef process(self):\r\n\t\tfor chunkIdx in range( len(self.input[0]) ):\r\n\t\t\t# borrowed from python-signal-average.py\r\n\t\t\tif(type(self.input[0][chunkIdx]) == OVSignalHeader): # called only once\r\n\t\t\t\tself.signalHeader = self.input[0].pop()\r\n\r\n\t\t\telif(type(self.input[0][chunkIdx]) == OVSignalBuffer): # called every epoch\r\n\t\t\t\tchunk = self.input[0].pop()\r\n\t\t\t\tnumpyBuffer = np.array(chunk, dtype=np.float64).reshape(tuple(self.signalHeader.dimensionSizes))\r\n\t\t\t\t# numpyBuffer has [ch x time]\r\n\t\t\t\tnumpyBuffer = butter_bandpass_filter(numpyBuffer, self.lowbp, self.highbp, self.sampling, self.filterorder)\r\n\r\n\t\t\t\t# Pyriemann only accpets 3D inputs with [nMatrices, nCh, nTime]\r\n\t\t\t\tcur_input = np.expand_dims(numpyBuffer, axis=0) # now (1, nCh, nTime)\r\n\t\t\t\tCOV_cur = pyriemann.estimation.Covariances().fit_transform(cur_input)\r\n\t\t\t\tpredict_class = self.mdm.predict(COV_cur) # among [1, 2, 3, 4]\r\n\t\t\t\tprint(predict_class)\r\n\r\n\t\t\t\t# send stimulation (classified results)\r\n\t\t\t\tstimSet = OVStimulationSet(self.getCurrentTime(), self.getCurrentTime()+1./self.getClock())\r\n\t\t\t\tstimSet.append(OVStimulation(self.ans_mi[int(predict_class)-1], self.getCurrentTime(), 0.))\r\n\t\t\t\tself.output[0].append(stimSet)\r\n\t\t\t\tself.nth_trial = self.nth_trial + 1\r\n\r\n\t\t\t\tif self.isfeedback == 'True':\r\n\t\t\t\t\tdraw_feedback(self.nth_trial, predict_class)\r\n\t\t\t\t\t\t\t\t\r\n\tdef uninitialize(self):\r\n\t\tend = self.getCurrentTime()\r\n\t\tself.output[0].append(OVStimulationEnd(end,end))\r\n\t\tprint('uninitialize')\r\n\t\tplt.ioff()\r\n\t\tplt.close()\r\n\r\nbox = MyOVBox()\t# When it ends (the last call)\r\n"
] | [
[
"matplotlib.pyplot.ion",
"scipy.signal.sosfiltfilt",
"scipy.signal.butter",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.arrow",
"numpy.expand_dims",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.mean"
]
] |
yage99/tensorflow | [
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd"
] | [
"tensorflow/python/data/experimental/service/server_lib_test.py",
"tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py",
"tensorflow/python/kernel_tests/signal/spectral_ops_test.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.data service server lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.experimental.service import server_lib\n\nfrom tensorflow.python.platform import test\n\n\nclass ServerLibTest(test.TestCase):\n\n def testStartDispatcher(self):\n dispatcher = server_lib.DispatchServer(0, start=False)\n dispatcher.start()\n\n def testMultipleStartDispatcher(self):\n dispatcher = server_lib.DispatchServer(0, start=True)\n dispatcher.start()\n\n def testStartWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address, start=False)\n worker.start()\n\n def testMultipleStartWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address, start=True)\n worker.start()\n\n def testStopDispatcher(self):\n dispatcher = server_lib.DispatchServer(0)\n dispatcher._stop()\n dispatcher._stop()\n\n def testStopWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address)\n worker._stop()\n worker._stop()\n\n def testStopStartDispatcher(self):\n dispatcher = server_lib.DispatchServer(0)\n dispatcher._stop()\n with self.assertRaisesRegex(\n RuntimeError, \"Server cannot be started after it has been stopped\"):\n dispatcher.start()\n\n def testStopStartWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address)\n worker._stop()\n with self.assertRaisesRegex(\n RuntimeError, \"Server cannot be started after it has been stopped\"):\n worker.start()\n\n def testJoinDispatcher(self):\n dispatcher = server_lib.DispatchServer(0)\n dispatcher._stop()\n dispatcher.join()\n\n def testJoinWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address)\n worker._stop()\n worker.join()\n\n def testDispatcherNumWorkers(self):\n dispatcher = server_lib.DispatchServer(0)\n self.assertEqual(0, dispatcher._num_workers())\n worker1 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable\n self.assertEqual(1, dispatcher._num_workers())\n worker2 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable\n self.assertEqual(2, dispatcher._num_workers())\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# RUN: (! %p/exported_python_args 2>&1) | FileCheck %s\n\n# pylint: disable=missing-docstring,line-too-long,dangerous-default-value\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common\n\n\nclass TestModule(tf.Module):\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])\n def some_function(self, x):\n return self.callee(x)\n\n # CHECK: While importing SavedModel function 'callee': in input signature:\n # CHECK-SAME: Unhandled structured value kind {{.*}} at index path: <value>.1.foo\n @tf.function\n def callee(self, x, n={'foo': 42}):\n return x\n\n\nif __name__ == '__main__':\n common.do_test(TestModule)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for spectral_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.signal import spectral_ops\nfrom tensorflow.python.ops.signal import window_ops\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SpectralOpsTest(test.TestCase, parameterized.TestCase):\n\n @staticmethod\n def _np_hann_periodic_window(length):\n if length == 1:\n return np.ones(1)\n odd = length % 2\n if not odd:\n length += 1\n window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))\n if not odd:\n window = window[:-1]\n return window\n\n @staticmethod\n def _np_frame(data, window_length, hop_length):\n num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))\n shape = (num_frames, window_length)\n strides = (data.strides[0] * hop_length, data.strides[0])\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)\n\n @staticmethod\n def _np_stft(data, fft_length, hop_length, window_length):\n frames = SpectralOpsTest._np_frame(data, window_length, hop_length)\n window = SpectralOpsTest._np_hann_periodic_window(window_length)\n return np.fft.rfft(frames * window, fft_length)\n\n @staticmethod\n def _np_inverse_stft(stft, fft_length, hop_length, window_length):\n frames = np.fft.irfft(stft, fft_length)\n # Pad or truncate frames's inner dimension to window_length.\n frames = frames[..., :window_length]\n frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +\n [[0, max(0, window_length - frames.shape[-1])]], \"constant\")\n window = SpectralOpsTest._np_hann_periodic_window(window_length)\n return SpectralOpsTest._np_overlap_add(frames * window, hop_length)\n\n @staticmethod\n def _np_overlap_add(stft, hop_length):\n num_frames, window_length = np.shape(stft)\n # Output length will be one complete window, plus another hop_length's\n # worth of points for each additional window.\n output_length = window_length + (num_frames - 1) * hop_length\n output = np.zeros(output_length)\n for i in range(num_frames):\n output[i * hop_length:i * hop_length + window_length] += stft[i,]\n return output\n\n def _compare(self, signal, frame_length, frame_step, fft_length, tol):\n actual_stft = spectral_ops.stft(\n signal, frame_length, frame_step, fft_length, pad_end=False)\n signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)\n actual_stft_from_ph = spectral_ops.stft(\n signal_ph, frame_length, frame_step, fft_length, pad_end=False)\n\n actual_inverse_stft = spectral_ops.inverse_stft(\n actual_stft, frame_length, frame_step, fft_length)\n\n actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(\n [actual_stft, actual_stft_from_ph, actual_inverse_stft])\n\n actual_stft_ph = array_ops.placeholder_with_default(\n actual_stft, shape=actual_stft.shape)\n actual_inverse_stft_from_ph = self.evaluate(\n spectral_ops.inverse_stft(\n actual_stft_ph, frame_length, frame_step, fft_length))\n\n # Confirm that there is no difference in output when shape/rank is fully\n # unknown or known.\n self.assertAllClose(actual_stft, actual_stft_from_ph)\n self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)\n\n expected_stft = SpectralOpsTest._np_stft(\n signal, fft_length, frame_step, frame_length)\n self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)\n\n expected_inverse_stft = SpectralOpsTest._np_inverse_stft(\n expected_stft, fft_length, frame_step, frame_length)\n self.assertAllClose(\n expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)\n\n def test_shapes(self):\n signal = np.zeros((512,)).astype(np.float32)\n\n # If fft_length is not provided, the smallest enclosing power of 2 of\n # frame_length (8) is used.\n stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,\n pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], self.evaluate(stft).shape)\n\n stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,\n pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], self.evaluate(stft).shape)\n\n stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,\n fft_length=16, pad_end=True)\n self.assertAllEqual([64, 9], stft.shape.as_list())\n self.assertAllEqual([64, 9], self.evaluate(stft).shape)\n\n stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,\n fft_length=8, pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], self.evaluate(stft).shape)\n\n stft = np.zeros((32, 9)).astype(np.complex64)\n\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,\n fft_length=16, frame_step=8)\n expected_length = (stft.shape[0] - 1) * 8 + 8\n self.assertAllEqual([256], inverse_stft.shape.as_list())\n self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)\n\n @parameterized.parameters(\n (512, 64, 32, 64, np.float32, 1e-4),\n (512, 64, 32, 64, np.float64, 1e-8),\n (512, 64, 64, 64, np.float32, 1e-4),\n (512, 64, 64, 64, np.float64, 1e-8),\n (512, 72, 64, 64, np.float32, 1e-4),\n (512, 72, 64, 64, np.float64, 1e-8),\n (512, 64, 25, 64, np.float32, 1e-4),\n (512, 64, 25, 64, np.float64, 1e-8),\n (512, 25, 15, 36, np.float32, 1e-4),\n (512, 25, 15, 36, np.float64, 1e-8),\n (123, 23, 5, 42, np.float32, 1e-4),\n (123, 23, 5, 42, np.float64, 1e-8))\n def test_stft_and_inverse_stft(self, signal_length, frame_length,\n frame_step, fft_length, np_rtype, tol):\n \"\"\"Test that spectral_ops.stft/inverse_stft match a NumPy implementation.\"\"\"\n signal = np.random.random(signal_length).astype(np_rtype)\n self._compare(signal, frame_length, frame_step, fft_length, tol)\n\n @parameterized.parameters(\n # 87.5% overlap.\n (4096, 256, 32, 256, np.float32, 1e-5, 1e-6),\n (4096, 256, 32, 256, np.float64, 1e-8, 1e-8),\n # 75% overlap.\n (4096, 256, 64, 256, np.float32, 1e-5, 1e-6),\n (4096, 256, 64, 256, np.float64, 1e-8, 1e-8),\n # Odd frame hop.\n (4096, 128, 25, 128, np.float32, 1e-3, 1e-6),\n (4096, 128, 25, 128, np.float64, 5e-4, 1e-8),\n # Odd frame length.\n (4096, 127, 32, 128, np.float32, 1e-3, 1e-6),\n (4096, 127, 32, 128, np.float64, 1e-3, 1e-8),\n # 50% overlap.\n (4096, 128, 64, 128, np.float32, 0.4, 1e-6),\n (4096, 128, 64, 128, np.float64, 0.4, 1e-8))\n def test_stft_round_trip(self, signal_length, frame_length, frame_step,\n fft_length, np_rtype, threshold,\n corrected_threshold):\n # Generate a random white Gaussian signal.\n signal = np.random.normal(size=signal_length).astype(np_rtype)\n\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,\n pad_end=False)\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,\n fft_length)\n inverse_stft_corrected = spectral_ops.inverse_stft(\n stft, frame_length, frame_step, fft_length,\n window_fn=spectral_ops.inverse_stft_window_fn(frame_step))\n inverse_stft, inverse_stft_corrected = self.evaluate(\n [inverse_stft, inverse_stft_corrected])\n\n # Truncate signal to the size of inverse stft.\n signal = signal[:inverse_stft.shape[0]]\n\n # Ignore the frame_length samples at either edge.\n signal = signal[frame_length:-frame_length]\n inverse_stft = inverse_stft[frame_length:-frame_length]\n inverse_stft_corrected = inverse_stft_corrected[\n frame_length:-frame_length]\n\n # Check that the inverse and original signal are close up to a scale\n # factor.\n inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))\n signal_scaled = signal / np.mean(np.abs(signal))\n self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)\n\n # Check that the inverse with correction and original signal are close.\n self.assertLess(np.std(inverse_stft_corrected - signal),\n corrected_threshold)\n\n @parameterized.parameters(\n (256, 32),\n (256, 64),\n (128, 25),\n (127, 32),\n (128, 64))\n def test_inverse_stft_window_fn(self, frame_length, frame_step):\n \"\"\"Test that inverse_stft_window_fn has unit gain at each window phase.\"\"\"\n hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)\n inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)\n inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)\n hann_window, inverse_window = self.evaluate([hann_window, inverse_window])\n\n # Expect unit gain at each phase of the window.\n product_window = hann_window * inverse_window\n for i in range(frame_step):\n self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))\n\n @parameterized.parameters((256, 64), (128, 32))\n def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):\n \"\"\"Test inverse_stft_window_fn in special overlap = 3/4 case.\"\"\"\n # Cases in which frame_length is an integer multiple of 4 * frame_step are\n # special because they allow exact reproduction of the waveform with a\n # squared Hann window (Hann window in both forward and reverse transforms).\n # In the case where frame_length = 4 * frame_step, that combination\n # produces a constant gain of 1.5, and so the corrected window will be the\n # Hann window / 1.5.\n hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)\n inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)\n inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)\n self.assertAllClose(hann_window, inverse_window * 1.5)\n\n @staticmethod\n def _compute_stft_gradient(signal, frame_length=32, frame_step=16,\n fft_length=32):\n \"\"\"Computes the gradient of the STFT with respect to `signal`.\"\"\"\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)\n magnitude_stft = math_ops.abs(stft)\n loss = math_ops.reduce_sum(magnitude_stft)\n return gradients_impl.gradients([loss], [signal])[0]\n\n def test_gradients(self):\n \"\"\"Test that spectral_ops.stft has a working gradient.\"\"\"\n # TODO(rjryan): Update gradient tests for Eager.\n if context.executing_eagerly():\n return\n with self.session(use_gpu=True) as sess:\n signal_length = 512\n\n # An all-zero signal has all zero gradients with respect to the sum of the\n # magnitude STFT.\n empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)\n empty_signal_gradient = sess.run(\n self._compute_stft_gradient(empty_signal))\n self.assertTrue((empty_signal_gradient == 0.0).all())\n\n # A sinusoid will have non-zero components of its gradient with respect to\n # the sum of the magnitude STFT.\n sinusoid = math_ops.sin(\n 2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))\n sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))\n self.assertFalse((sinusoid_gradient == 0.0).all())\n\n @parameterized.parameters(\n (64, 16, 8, 16, np.float32, 2e-3, 5e-4),\n (64, 16, 8, 16, np.float64, 1e-8, 1e-8),\n (64, 16, 16, 16, np.float32, 2e-3, 5e-4),\n (64, 16, 16, 16, np.float64, 1e-8, 1e-8),\n (64, 16, 7, 16, np.float32, 2e-3, 5e-4),\n (64, 16, 7, 16, np.float64, 1e-8, 1e-8),\n (64, 7, 4, 9, np.float32, 2e-3, 5e-4),\n (64, 7, 4, 9, np.float64, 1e-8, 1e-8),\n (29, 5, 1, 10, np.float32, 2e-3, 5e-4),\n (29, 5, 1, 10, np.float64, 1e-8, 1e-8))\n def test_gradients_numerical(self, signal_length, frame_length, frame_step,\n fft_length, np_rtype, forward_tol, backward_tol):\n # TODO(rjryan): Investigate why STFT gradient error is so high.\n signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1\n\n def forward(signal):\n return spectral_ops.stft(\n signal, frame_length, frame_step, fft_length, pad_end=False)\n ((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(\n forward, [signal])\n self.assertAllClose(f_jacob_t, f_jacob_n,\n rtol=forward_tol, atol=forward_tol)\n\n def backward(stft):\n return spectral_ops.inverse_stft(\n stft, frame_length, frame_step, fft_length)\n\n stft = forward(signal)\n ((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(\n backward, [stft])\n self.assertAllClose(b_jacob_t, b_jacob_n,\n rtol=backward_tol, atol=backward_tol)\n\n @parameterized.parameters(\n itertools.product(\n (4000,),\n (256,),\n (np.float32, np.float64),\n (\"ortho\", None),\n (\"vorbis\", \"kaiser_bessel_derived\", None),\n (False, True)))\n def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,\n norm, window_type, pad_end):\n if np_rtype == np.float32:\n tol = 1e-5\n else:\n if window_type == \"kaiser_bessel_derived\":\n tol = 1e-6\n else:\n tol = 1e-8\n # Generate a random white Gaussian signal.\n signal = np.random.normal(size=signal_length).astype(np_rtype)\n if window_type == \"vorbis\":\n window_fn = window_ops.vorbis_window\n elif window_type == \"kaiser_bessel_derived\":\n window_fn = window_ops.kaiser_bessel_derived_window\n elif window_type is None:\n window_fn = None\n mdct = spectral_ops.mdct(signal, frame_length, norm=norm,\n window_fn=window_fn, pad_end=pad_end)\n inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,\n window_fn=window_fn)\n inverse_mdct = self.evaluate(inverse_mdct)\n\n # Truncate signal and inverse_mdct to their minimum length.\n min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])\n # Ignore the half_len samples at either edge.\n half_len = frame_length // 2\n signal = signal[half_len:min_length-half_len]\n inverse_mdct = inverse_mdct[half_len:min_length-half_len]\n\n # Check that the inverse and original signal are close.\n self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.data.experimental.service.server_lib.DispatchServer",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.experimental.service.server_lib.WorkerServer"
],
[
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model.common.do_test"
],
[
"numpy.ones",
"numpy.sum",
"tensorflow.python.ops.signal.window_ops.hann_window",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"numpy.fft.irfft",
"tensorflow.python.ops.signal.spectral_ops.inverse_stft_window_fn",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"numpy.abs",
"numpy.random.rand",
"tensorflow.python.ops.math_ops.linspace",
"numpy.fft.rfft",
"numpy.minimum",
"numpy.zeros",
"tensorflow.python.ops.signal.spectral_ops.stft",
"tensorflow.python.ops.signal.spectral_ops.inverse_mdct",
"tensorflow.python.ops.signal.spectral_ops.inverse_stft",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.signal.spectral_ops.mdct",
"numpy.lib.stride_tricks.as_strided",
"numpy.arange",
"numpy.std",
"tensorflow.python.ops.math_ops.abs",
"numpy.random.random",
"tensorflow.python.platform.test.main",
"numpy.shape",
"numpy.random.normal",
"tensorflow.python.eager.context.executing_eagerly"
]
] |
evanloshin/CarND-Behavioral-Cloning-P3 | [
"22ec89cdea5257a10512f07b07fc4c074bc7c649"
] | [
"drive.py"
] | [
"import argparse\nimport base64\nfrom datetime import datetime\nimport os\nimport shutil\n\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\n\nfrom keras.models import load_model\nimport h5py\nfrom keras import __version__ as keras_version\nfrom keras import Model\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\n\nclass SimplePIController:\n def __init__(self, Kp, Ki):\n self.Kp = Kp\n self.Ki = Ki\n self.set_point = 0.\n self.error = 0.\n self.integral = 0.\n\n def set_desired(self, desired):\n self.set_point = desired\n\n def update(self, measurement):\n # proportional error\n self.error = self.set_point - measurement\n\n # integral error\n self.integral += self.error\n\n return self.Kp * self.error + self.Ki * self.integral\n\n\ncontroller = SimplePIController(0.1, 0.002)\nset_speed = 9\ncontroller.set_desired(set_speed)\n\n\[email protected]('telemetry')\ndef telemetry(sid, data):\n if data:\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))\n\n # # Extract intermediate layer output\n # layer_name = 'first_convolution'\n # intermediate_layer_model = Model(inputs=model.input,\n # outputs=model.get_layer(layer_name).output)\n # intermediate_output = intermediate_layer_model.predict(image_array[None, :, :, :], batch_size=1)\n # intermediate_output = np.squeeze(intermediate_output)\n # intermediate_output = (255.0 / intermediate_output.max() * (intermediate_output - intermediate_output.min())).astype(np.uint8)\n # intermediate_output_img = Image.fromarray(intermediate_output[12])\n #\n # # save intermediate output layer\n # timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n # image_filename = os.path.join('/Users/evanloshin/Documents/Udacity/SDC/behavioral-cloning-data/Intermediate-Layer/', timestamp)\n # intermediate_output_img.save('{}.jpg'.format(image_filename))\n\n throttle = controller.update(float(speed))\n\n print(\"Predicted Steering Angle: {} Throttle: {}\".format(round(steering_angle, 5), round(throttle, 5)))\n send_control(steering_angle, throttle)\n\n # save frame\n if args.image_folder != '':\n timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n image_filename = os.path.join(args.image_folder, timestamp)\n image.save('{}.jpg'.format(image_filename))\n else:\n # NOTE: DON'T EDIT THIS.\n sio.emit('manual', data={}, skip_sid=True)\n\n\[email protected]('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\n \"steer\",\n data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n },\n skip_sid=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument(\n 'model',\n type=str,\n help='Path to model h5 file. Model should be on the same path.'\n )\n parser.add_argument(\n 'image_folder',\n type=str,\n nargs='?',\n default='',\n help='Path to image folder. This is where the images from the run will be saved.'\n )\n args = parser.parse_args()\n\n # check that model Keras version is same as local Keras version\n f = h5py.File(args.model, mode='r')\n model_version = f.attrs.get('keras_version')\n keras_version = str(keras_version).encode('utf8')\n\n if model_version != keras_version:\n print('You are using Keras version ', keras_version,\n ', but the model was built using ', model_version)\n\n model = load_model(args.model)\n\n if args.image_folder != '':\n print(\"Creating image folder at {}\".format(args.image_folder))\n if not os.path.exists(args.image_folder):\n os.makedirs(args.image_folder)\n else:\n shutil.rmtree(args.image_folder)\n os.makedirs(args.image_folder)\n print(\"RECORDING THIS RUN ...\")\n else:\n print(\"NOT RECORDING THIS RUN ...\")\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n"
] | [
[
"numpy.asarray"
]
] |
frederikschubert/rltime | [
"d1722ffd4cf7b4599655b8d9c64abc243919afc9"
] | [
"rltime/eval.py"
] | [
"\"\"\" Entry point for evaluating/rendering a trained policy. \"\"\"\n\nimport argparse\nimport json\nimport os\nimport numpy as np\nimport time\nimport datetime\n\nfrom rltime.general.config import load_config\nfrom rltime.general.utils import deep_dictionary_update\nfrom rltime.general.type_registry import get_registered_type\nfrom rltime.env_wrappers.common import make_env_creator, EpisodeRecorder\nfrom rltime.env_wrappers.vec_env.sub_proc import make_sub_proc_vec_env\nfrom rltime.general.loggers import DirectoryLogger\n\n\ndef create_policy_from_config(config, action_space, observation_space):\n \"\"\"Creates a policy from the given config and spaces\n\n This does not load the weights just creates the policy\n \"\"\"\n if not isinstance(config, dict):\n config = load_config(config)\n\n train_cls = get_registered_type(\n \"trainers\", config['training'].get(\"type\", None))\n assert(hasattr(train_cls, \"create_policy\")), \\\n f\"Config training class {type(train_cls)} does not have a \" \\\n \"'create_policy' method\"\n\n model_config = config.get(\"model\")\n\n return train_cls.create_policy(\n model_config=model_config, action_space=action_space,\n observation_space=observation_space, **config.get(\"policy_args\", {}))\n\n\ndef eval_policy(path, num_envs, episode_count, record=False, record_fps=60,\n render=False, render_fps=None, eps=0.001, conf_update=None):\n \"\"\"Evaluates training result at 'path', loading the last checkpoint\n\n The result is logged to a new line in file 'eval.json' in <path>\n\n Args:\n path: The path containing the training result output to evaluate\n num_envs: Amount of vectorized (sub-process) ENVs to evaluate in\n parallel\n episode_count: The amount of episodes to evaluate total\n record: Whether to record episodes to MP4 (under 'recordings'\n sub-directory in <path>)\n record_fps: If <record>, the FPS to record at (These are raw ENV frames\n before any frame-skipping, so atari would usually be 60)\n render: Whether to render the ENVs in a window in real-time (Tiled if\n num_envs>1)\n render_fps: Frames-Per-Second to sync the rendering to (Valid only for\n render=True), the default (None) renders at max policy speed. These\n are acting steps, so after frame-skipping if active\n eps: Epsilon to use for random action selection\n\n Note: We count the first 'episode_count' episodes that started and not\n ended, as 'ended' is unfair to longer episodes in case of vectorized\n evaluation. For Example: Take a policy that achieves 100 reward in 100\n seconds 50% of the time and 0 reward in <1 second 50% of the time.\n So we'd expect if we evaluate 20 episodes to get around ~50 average\n reward (which we would if running 20 episodes serially on a single ENV)\n But if we run 16 ENVs in parallel we will likely get near-0 mean reward\n if we count the first 20 episodes that finished (Since half of the 16\n ENVs immediately end with reward 0 then restart, then half of those\n immediately end with 0 and so on, so we quickly get ~(8+4+2+1) 0-reward\n runs and don't count the ones which are running long and going to reach\n 100 reward), while if we take the first 20 episodes that started (and\n ignore any that started after) we will get the expected result\n \"\"\"\n print(\"Evaluating:\", path)\n assert(num_envs <= episode_count), \\\n \"num_envs can't be higher than the requested episode_count\"\n\n logger = DirectoryLogger(path, use_logging=False, tensorboard=False)\n\n # Load the config from the result path\n config = logger.get_config()\n \n if conf_update:\n config = dict(config) # Avoid changing the passed config\n deep_dictionary_update(config, conf_update)\n\n # Make the env-creaton function based on the config settings\n env_args = config.get(\"env_args\", {})\n if record:\n # If requested, add also an episode-recorder to the ENV stack\n recorder = {\n \"type\": EpisodeRecorder,\n \"args\": {\n \"path\": os.path.join(path, \"recordings\"),\n \"fps\": record_fps\n }\n }\n env_args['wrappers'] = [recorder] + env_args.get('wrappers', [])\n\n env_creator = make_env_creator(config.get(\"env\"), **env_args)\n\n # Create a vectorized ENV\n env = make_sub_proc_vec_env(env_creator, num_envs)\n\n # Create the policy based on the config\n policy = create_policy_from_config(\n config, env.action_space, env.observation_space)\n\n # Load the last checkpoint\n training_step, cp_data = logger.get_checkpoint()\n # Load the weights from the checkpoint to the policy\n policy.load_state(cp_data['policy_state'])\n print(\"Loaded checkpoint from step:\", training_step)\n\n # The initial policy input state\n state = policy.make_input_state(env.reset(), np.array([True] * num_envs))\n\n episodes_started = num_envs\n rewards = []\n lengths = []\n # This signifies the ENV started the episode in time and should be counted\n masks = [True] * num_envs\n # TODO(frederik): Mention mode and difficulty\n print(f\"Running '{config.get('env')}' for {episode_count} episodes\"\n f\" on {num_envs} ENVs\")\n while len(rewards) < episode_count:\n step_start = time.time()\n # Select the next action for each env\n preds = policy.actor_predict(state, timesteps=1)\n actions = preds['actions']\n if eps:\n # Remap to random actions with eps probability\n for i in range(num_envs):\n if np.random.rand() < eps:\n actions[i] = env.action_space.sample()\n # Send the action and get the transition data\n obs, _, dones, info = env.step(actions)\n\n # Check any env if finished\n for i, env_info in enumerate(info):\n # We use the 'real' done/reward from the EpisodeTracker wrapper\n if env_info['episode_info']['done']:\n if masks[i]:\n # Only count the first 'episode_count' that started\n reward = env_info['episode_info']['reward']\n length = env_info['episode_info']['length']\n rewards.append(reward)\n lengths.append(length)\n print(f\"Episode {len(rewards)}/{episode_count} \"\n f\"finished with reward: {reward}\")\n\n episodes_started += 1\n if episodes_started > episode_count:\n masks[i] = False\n\n # Render to screen if requested\n if render:\n if render_fps:\n diff = 1./render_fps - (time.time() - step_start)\n if diff > 0:\n time.sleep(diff)\n env.render()\n # Generate the next policy input state\n state = policy.make_input_state(obs, dones)\n\n env.close()\n\n # Log the result\n result = {\n \"step\": training_step,\n \"date\": datetime.datetime.now(),\n \"episodes\": episode_count,\n \"envs\": num_envs,\n **{\n key: {\n \"mean\": np.mean(vals),\n \"min\": np.min(vals),\n \"max\": np.max(vals),\n \"median\": np.median(vals),\n \"std\": np.std(vals),\n } for key, vals in [(\"reward\", rewards), (\"length\", lengths)]\n }\n }\n print(\"Result:\")\n logger.log_result(\"eval\", result, None)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n 'path', type=str,\n help=\"The path to the training directory result to evaluate\")\n parser.add_argument(\n '--num-envs', type=int, default=1,\n help=\"Amount of ENVs to run in parallel\")\n parser.add_argument(\n '--episodes', type=int, default=5,\n help=\"Amount of episodes to run\")\n parser.add_argument(\n '--record', action='store_true',\n help=\"Whether to record episode to MP4 (To a sub-directory in the \"\n \"result path). Warning: If used with --num-envs>1 the last \"\n \"videos will be truncated\")\n parser.add_argument(\n '--record-fps', type=int, default=60,\n help=\"FPS to record at if --record (Typically 60FPS for atari)\")\n parser.add_argument(\n '--render', action='store_true',\n help=\"Whether to render the episodes in real-time\")\n parser.add_argument(\n '--render-fps', type=int, default=0,\n help=\"FPS to sync to if using --render (Set to 0 for full speed), \"\n \"note this is after ENV frame-skipping so if you want 60FPS with \"\n \"frame-skip of 4 use 15 here\")\n parser.add_argument(\n '--eps', type=float, default=0.001,\n help=\"Epsilon value to use for random action selection during \"\n \"evaluation\")\n parser.add_argument(\n '--conf-update', type=str,\n help=\"Optional JSON dictionary string to deep-update the config with\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n conf_update = None if not args.conf_update \\\n else json.loads(args.conf_update)\n\n eval_policy(\n args.path, num_envs=args.num_envs, episode_count=args.episodes,\n record=args.record, record_fps=args.record_fps,\n render=args.render, render_fps=args.render_fps, eps=args.eps, conf_update=conf_update)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.median",
"numpy.max",
"numpy.min",
"numpy.random.rand",
"numpy.array",
"numpy.std",
"numpy.mean"
]
] |
apexrl/EBIL-torch | [
"8d257d5efa36f7c608085e34a7cdd3e996962d3f"
] | [
"rlkit/core/base_algorithm.py"
] | [
"import abc\nimport pickle\nimport time\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport gtimer as gt\nimport numpy as np\n\nfrom rlkit.core import logger, eval_util\nfrom rlkit.data_management.env_replay_buffer import EnvReplayBuffer\nfrom rlkit.data_management.path_builder import PathBuilder\nfrom rlkit.policies.base import ExplorationPolicy\nfrom rlkit.torch.sac.policies import MakeDeterministic\nfrom rlkit.samplers import PathSampler\nfrom rlkit.envs.wrapped_absorbing_env import WrappedAbsorbingEnv\n\nfrom gym.spaces import Dict\n\n\nclass BaseAlgorithm(metaclass=abc.ABCMeta):\n \"\"\"\n base algorithm for single task setting\n can be used for RL or Learning from Demonstrations\n \"\"\"\n def __init__(\n self,\n env,\n exploration_policy: ExplorationPolicy,\n training_env=None,\n eval_policy=None,\n eval_sampler=None,\n\n num_epochs=100,\n num_steps_per_epoch=10000,\n num_steps_between_train_calls=1000,\n num_steps_per_eval=1000,\n max_path_length=1000,\n min_steps_before_training=0,\n\n replay_buffer=None,\n replay_buffer_size=10000,\n\n freq_saving=1,\n save_replay_buffer=False,\n save_environment=False,\n save_algorithm=False,\n\n save_best=False,\n save_best_starting_from_epoch=0,\n best_key='AverageReturn', # higher is better\n \n no_terminal=False,\n wrap_absorbing=False,\n\n render=False,\n render_kwargs={},\n\n freq_log_visuals=1,\n\n eval_deterministic=False\n ):\n self.env = env\n self.training_env = training_env or pickle.loads(pickle.dumps(env))\n self.exploration_policy = exploration_policy\n\n self.num_epochs = num_epochs\n self.num_env_steps_per_epoch = num_steps_per_epoch\n self.num_steps_between_train_calls = num_steps_between_train_calls\n self.num_steps_per_eval = num_steps_per_eval\n self.max_path_length = max_path_length\n self.min_steps_before_training = min_steps_before_training\n\n self.render = render\n\n self.save_replay_buffer = save_replay_buffer\n self.save_algorithm = save_algorithm\n self.save_environment = save_environment\n self.save_best = save_best\n self.save_best_starting_from_epoch = save_best_starting_from_epoch\n self.best_key = best_key\n self.best_statistic_so_far = float('-Inf')\n \n if eval_sampler is None:\n if eval_policy is None:\n eval_policy = exploration_policy\n eval_policy = MakeDeterministic(eval_policy)\n eval_sampler = PathSampler(\n env,\n eval_policy,\n num_steps_per_eval,\n max_path_length,\n no_terminal=no_terminal,\n render=render,\n render_kwargs=render_kwargs\n )\n self.eval_policy = eval_policy\n self.eval_sampler = eval_sampler\n\n self.action_space = env.action_space\n self.obs_space = env.observation_space\n self.replay_buffer_size = replay_buffer_size\n if replay_buffer is None:\n assert max_path_length < replay_buffer_size\n replay_buffer = EnvReplayBuffer(\n self.replay_buffer_size,\n self.env,\n random_seed=np.random.randint(10000)\n )\n else:\n assert max_path_length < replay_buffer._max_replay_buffer_size\n self.replay_buffer = replay_buffer\n\n self._n_env_steps_total = 0\n self._n_train_steps_total = 0\n self._n_rollouts_total = 0\n self._do_train_time = 0\n self._epoch_start_time = None\n self._algo_start_time = None\n self._old_table_keys = None\n self._current_path_builder = PathBuilder()\n self._exploration_paths = []\n\n if wrap_absorbing:\n # needs to be properly handled both here and in replay buffer\n raise NotImplementedError()\n self.wrap_absorbing = wrap_absorbing\n self.freq_saving = freq_saving\n self.no_terminal = no_terminal\n\n self.eval_statistics = None\n self.freq_log_visuals = freq_log_visuals\n\n\n def train(self, start_epoch=0):\n self.pretrain()\n if start_epoch == 0:\n params = self.get_epoch_snapshot(-1)\n logger.save_itr_params(-1, params)\n self.training_mode(False)\n self._n_env_steps_total = start_epoch * self.num_env_steps_per_epoch\n gt.reset()\n gt.set_def_unique(False)\n self.start_training(start_epoch=start_epoch)\n\n\n def pretrain(self):\n \"\"\"\n Do anything before the main training phase.\n \"\"\"\n pass\n\n def start_training(self, start_epoch=0):\n self._current_path_builder = PathBuilder()\n observation = self._start_new_rollout()\n\n for epoch in gt.timed_for(\n range(start_epoch, self.num_epochs),\n save_itrs=True,\n ):\n self._start_epoch(epoch)\n for steps_this_epoch in range(self.num_env_steps_per_epoch):\n action, agent_info = self._get_action_and_info(observation)\n if self.render: self.training_env.render()\n\n next_ob, raw_reward, terminal, env_info = (\n self.training_env.step(action)\n )\n if self.no_terminal: terminal = False\n self._n_env_steps_total += 1\n\n reward = np.array([raw_reward])\n terminal = np.array([terminal])\n self._handle_step(\n observation,\n action,\n reward,\n next_ob,\n np.array([False]) if self.no_terminal else terminal,\n absorbing=np.array([0., 0.]),\n agent_info=agent_info,\n env_info=env_info,\n )\n if terminal[0]:\n if self.wrap_absorbing:\n raise NotImplementedError()\n '''\n If we wrap absorbing states, two additional\n transitions must be added: (s_T, s_abs) and\n (s_abs, s_abs). In Disc Actor Critic paper\n they make s_abs be a vector of 0s with last\n dim set to 1. Here we are going to add the following:\n ([next_ob,0], random_action, [next_ob, 1]) and\n ([next_ob,1], random_action, [next_ob, 1])\n This way we can handle varying types of terminal states.\n '''\n # next_ob is the absorbing state\n # for now just taking the previous action\n self._handle_step(\n next_ob,\n action,\n # env.action_space.sample(),\n # the reward doesn't matter\n reward,\n next_ob,\n np.array([False]),\n absorbing=np.array([0.0, 1.0]),\n agent_info=agent_info,\n env_info=env_info\n )\n self._handle_step(\n next_ob,\n action,\n # env.action_space.sample(),\n # the reward doesn't matter\n reward,\n next_ob,\n np.array([False]),\n absorbing=np.array([1.0, 1.0]),\n agent_info=agent_info,\n env_info=env_info\n )\n self._handle_rollout_ending()\n observation = self._start_new_rollout()\n elif len(self._current_path_builder) >= self.max_path_length:\n self._handle_rollout_ending()\n observation = self._start_new_rollout()\n else:\n observation = next_ob\n\n if self._n_env_steps_total % self.num_steps_between_train_calls == 0:\n gt.stamp('sample')\n self._try_to_train(epoch)\n gt.stamp('train')\n\n gt.stamp('sample')\n self._try_to_eval(epoch)\n gt.stamp('eval')\n self._end_epoch()\n\n def _try_to_train(self, epoch):\n if self._can_train():\n self.training_mode(True)\n self._do_training(epoch)\n self._n_train_steps_total += 1\n self.training_mode(False)\n\n def _try_to_eval(self, epoch):\n\n if self._can_evaluate():\n # save if it's time to save\n if (epoch % self.freq_saving == 0) or (epoch + 1 >= self.num_epochs):\n # if epoch + 1 >= self.num_epochs:\n # epoch = 'final'\n logger.save_extra_data(self.get_extra_data_to_save(epoch))\n params = self.get_epoch_snapshot(epoch)\n logger.save_itr_params(epoch, params)\n\n self.evaluate(epoch)\n\n logger.record_tabular(\n \"Number of train calls total\",\n self._n_train_steps_total,\n )\n logger.record_tabular(\n \"Number of env steps total\",\n self._n_env_steps_total,\n )\n logger.record_tabular(\n \"Number of rollouts total\",\n self._n_rollouts_total,\n )\n\n times_itrs = gt.get_times().stamps.itrs\n train_time = times_itrs['train'][-1]\n sample_time = times_itrs['sample'][-1]\n eval_time = times_itrs['eval'][-1] if epoch > 0 else 0\n epoch_time = train_time + sample_time + eval_time\n total_time = gt.get_times().total\n\n logger.record_tabular('Train Time (s)', train_time)\n logger.record_tabular('(Previous) Eval Time (s)', eval_time)\n logger.record_tabular('Sample Time (s)', sample_time)\n logger.record_tabular('Epoch Time (s)', epoch_time)\n logger.record_tabular('Total Train Time (s)', total_time)\n\n logger.record_tabular(\"Epoch\", epoch)\n logger.dump_tabular(with_prefix=False, with_timestamp=False)\n else:\n logger.log(\"Skipping eval for now.\")\n\n def _can_evaluate(self):\n \"\"\"\n One annoying thing about the logger table is that the keys at each\n iteration need to be the exact same. So unless you can compute\n everything, skip evaluation.\n\n A common example for why you might want to skip evaluation is that at\n the beginning of training, you may not have enough data for a\n validation and training set.\n\n :return:\n \"\"\"\n return (\n len(self._exploration_paths) > 0\n and self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training\n )\n\n def _can_train(self):\n return self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training\n\n def _get_action_and_info(self, observation):\n \"\"\"\n Get an action to take in the environment.\n :param observation:\n :return:\n \"\"\"\n self.exploration_policy.set_num_steps_total(self._n_env_steps_total)\n return self.exploration_policy.get_action(\n observation,\n )\n\n def _start_epoch(self, epoch):\n self._epoch_start_time = time.time()\n self._exploration_paths = []\n self._do_train_time = 0\n logger.push_prefix('Iteration #%d | ' % epoch)\n\n def _end_epoch(self):\n self.eval_statistics = None\n logger.log(\"Epoch Duration: {0}\".format(\n time.time() - self._epoch_start_time\n ))\n logger.log(\"Started Training: {0}\".format(self._can_train()))\n logger.pop_prefix()\n\n def _start_new_rollout(self):\n self.exploration_policy.reset()\n return self.training_env.reset()\n\n def _handle_path(self, path):\n \"\"\"\n Naive implementation: just loop through each transition.\n :param path:\n :return:\n \"\"\"\n for (\n ob,\n action,\n reward,\n next_ob,\n terminal,\n agent_info,\n env_info\n ) in zip(\n path[\"observations\"],\n path[\"actions\"],\n path[\"rewards\"],\n path[\"next_observations\"],\n path[\"terminals\"],\n path[\"agent_infos\"],\n path[\"env_infos\"],\n ):\n self._handle_step(\n ob,\n action,\n reward,\n next_ob,\n terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n self._handle_rollout_ending()\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n absorbing,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n self._current_path_builder.add_all(\n observations=observation,\n actions=action,\n rewards=reward,\n next_observations=next_observation,\n terminals=terminal,\n absorbing=absorbing,\n agent_infos=agent_info,\n env_infos=env_info,\n )\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_observation,\n absorbing=absorbing,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _handle_rollout_ending(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n self.replay_buffer.terminate_episode()\n self._n_rollouts_total += 1\n if len(self._current_path_builder) > 0:\n self._exploration_paths.append(\n self._current_path_builder\n )\n self._current_path_builder = PathBuilder()\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Probably will be overridden by each algorithm\n \"\"\"\n data_to_save = dict(\n epoch=epoch,\n exploration_policy=self.exploration_policy,\n )\n if self.save_environment:\n data_to_save['env'] = self.training_env\n return data_to_save\n \n # @abc.abstractmethod\n # def load_snapshot(self, snapshot):\n # \"\"\"\n # Should be implemented on a per algorithm basis\n # taking into consideration the particular\n # get_epoch_snapshot implementation for the algorithm\n # \"\"\"\n # pass\n\n def get_extra_data_to_save(self, epoch):\n \"\"\"\n Save things that shouldn't be saved every snapshot but rather\n overwritten every time.\n :param epoch:\n :return:\n \"\"\"\n if self.render:\n self.training_env.render(close=True)\n data_to_save = dict(\n epoch=epoch,\n )\n if self.save_environment:\n data_to_save['env'] = self.training_env\n if self.save_replay_buffer:\n data_to_save['replay_buffer'] = self.replay_buffer\n if self.save_algorithm:\n data_to_save['algorithm'] = self\n return data_to_save\n\n @abc.abstractmethod\n def training_mode(self, mode):\n \"\"\"\n Set training mode to `mode`.\n :param mode: If True, training will happen (e.g. set the dropout\n probabilities to not all ones).\n \"\"\"\n pass\n\n\n @abc.abstractmethod\n def _do_training(self):\n \"\"\"\n Perform some update, e.g. perform one gradient step.\n :return:\n \"\"\"\n pass\n\n\n def evaluate(self, epoch):\n \"\"\"\n Evaluate the policy, e.g. save/print progress.\n :param epoch:\n :return:\n \"\"\"\n statistics = OrderedDict()\n try:\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n except:\n print('No Stats to Eval')\n\n logger.log(\"Collecting samples for evaluation\")\n test_paths = self.eval_sampler.obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Test\",\n ))\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n\n if hasattr(self.env, \"log_diagnostics\"):\n self.env.log_diagnostics(test_paths)\n if hasattr(self.env, \"log_statistics\"):\n statistics.update(self.env.log_statistics(test_paths))\n if epoch % self.freq_log_visuals == 0:\n if hasattr(self.env, \"log_visuals\"):\n self.env.log_visuals(test_paths, epoch, logger.get_snapshot_dir())\n \n average_returns = eval_util.get_average_returns(test_paths)\n statistics['AverageReturn'] = average_returns\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n \n best_statistic = statistics[self.best_key]\n if best_statistic > self.best_statistic_so_far:\n self.best_statistic_so_far = best_statistic\n if self.save_best and epoch >= self.save_best_starting_from_epoch:\n data_to_save = {\n 'epoch': epoch,\n 'statistics': statistics\n }\n data_to_save.update(self.get_epoch_snapshot(epoch))\n logger.save_extra_data(data_to_save, 'best.pkl')\n print('\\n\\nSAVED BEST\\n\\n')\n"
] | [
[
"numpy.array",
"numpy.random.randint"
]
] |
VolkerH/gputools | [
"b8732c3cf82b96c6960497e6d82ce6b2bac463aa"
] | [
"gputools/convolve/minmax_filter.py"
] | [
"from __future__ import print_function, unicode_literals, absolute_import, division\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nimport os\nimport numpy as np\nfrom gputools import OCLArray, OCLProgram, get_device\n\nfrom gputools.core.ocltypes import assert_bufs_type\nfrom gputools.utils.tile_iterator import tile_iterator\nfrom ._abspath import abspath\n\n\ndef _filter_max_2_gpu(data_g, size=10, res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"max_2_x\", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))\n prog.run_kernel(\"max_2_y\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))\n\n return res_g\n\n\ndef _filter_max_3_gpu(data_g, size=10, res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"max_3_x\", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))\n prog.run_kernel(\"max_3_y\", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))\n prog.run_kernel(\"max_3_z\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))\n\n return res_g\n\n\n\n\ndef _max_filter_gpu(data_g, size=5, res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n assert (len(data_g.shape) == len(size))\n\n if len(data_g.shape) == 2:\n return _filter_max_2_gpu(data_g, size=size, res_g=res_g)\n elif len(data_g.shape) == 3:\n return _filter_max_3_gpu(data_g, size=size, res_g=res_g)\n else:\n raise NotImplementedError(\"only 2 or 3d arrays are supported for now\")\n\n\ndef _max_filter_numpy(data, size=5):\n data_g = OCLArray.from_array(data.astype(np.float32))\n return _max_filter_gpu(data_g, size=size).get()\n\n\ndef max_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):\n \"\"\"\n maximum filter of given size\n\n Parameters\n ----------\n data: 2 or 3 dimensional ndarray or OCLArray of type float32\n input data\n size: scalar, tuple\n the size of the patch to consider\n res_g: OCLArray\n store result in buffer if given\n sub_blocks:\n perform over subblock tiling (only if data is ndarray)\n\n Returns\n -------\n filtered image or None (if OCLArray)\n \"\"\"\n\n if np.isscalar(size):\n size = (size,)*len(data.shape)\n\n if isinstance(data, np.ndarray):\n data = np.ascontiguousarray(data)\n if set(sub_blocks) == {1} or sub_blocks is None:\n return _max_filter_numpy(data, size)\n else:\n # cut the image into tile and operate on every of them\n N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]\n Npads = tuple(map(lambda x: x//2, size))\n res = np.empty(data.shape, np.float32)\n for i, (data_tile, data_s_src, data_s_dest) \\\n in enumerate(tile_iterator(data, blocksize=N_sub,\n padsize=Npads,\n mode=\"constant\")):\n res_tile = _max_filter_numpy(data_tile.copy(),\n size)\n res[data_s_src] = res_tile[data_s_dest]\n return res\n\n\n elif isinstance(data, OCLArray):\n return _max_filter_gpu(data, size=size, res_g=res_g)\n else:\n raise TypeError(\"array argument (1) has bad type: %s\" % type(data))\n\n\n\ndef _filter_min_2_gpu(data_g, size=(10,10), res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"min_2_x\", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))\n prog.run_kernel(\"min_2_y\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))\n\n return res_g\n\n\ndef _filter_min_3_gpu(data_g, size=(10,10,10), res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"min_3_x\", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))\n prog.run_kernel(\"min_3_y\", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))\n prog.run_kernel(\"min_3_z\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))\n\n return res_g\n\n\n\n\ndef _min_filter_gpu(data_g, size=(10,10), res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n assert (len(data_g.shape)==len(size))\n\n if len(data_g.shape) == 2:\n return _filter_min_2_gpu(data_g, size=size, res_g=res_g)\n elif len(data_g.shape) == 3:\n return _filter_min_3_gpu(data_g, size=size, res_g=res_g)\n else:\n raise NotImplementedError(\"only 2 or 3d arrays are supported for now\")\n\n\ndef _min_filter_numpy(data, size=(10,10)):\n data_g = OCLArray.from_array(data.astype(np.float32))\n return _min_filter_gpu(data_g, size=size).get()\n\n\ndef min_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):\n \"\"\"\n minimum filter of given size\n\n Parameters\n ----------\n data: 2 or 3 dimensional ndarray or OCLArray of type float32\n input data\n size: scalar, tuple\n the size of the patch to consider\n res_g: OCLArray\n store result in buffer if given\n sub_blocks:\n perform over subblock tiling (only if data is ndarray)\n\n Returns\n -------\n filtered image or None (if OCLArray)\n \"\"\"\n\n if np.isscalar(size):\n size = (size,)*len(data.shape)\n\n if isinstance(data, np.ndarray):\n if set(sub_blocks) == {1} or sub_blocks is None:\n return _min_filter_numpy(data, size)\n else:\n # cut the image into tile and operate on every of them\n N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]\n Npads = tuple(map(lambda x: x//2, size))\n res = np.empty(data.shape, np.float32)\n for i, (data_tile, data_s_src, data_s_dest) \\\n in enumerate(tile_iterator(data, blocksize=N_sub,\n padsize=Npads,\n mode=\"constant\")):\n res_tile = _min_filter_numpy(data_tile.copy(),\n size)\n res[data_s_src] = res_tile[data_s_dest]\n return res\n\n\n elif isinstance(data, OCLArray):\n return _min_filter_gpu(data, size=size, res_g=res_g)\n else:\n raise TypeError(\"array argument (1) has bad type: %s\" % type(data))\n"
] | [
[
"numpy.ceil",
"numpy.empty",
"numpy.int32",
"numpy.ascontiguousarray",
"numpy.isscalar"
]
] |
domingoesteban/robolearn | [
"0d20125425c352b80ef2eeed1c0b11ab6497b11a"
] | [
"robolearn/torch/policies/tanh_gaussian_promp_multi_policy.py"
] | [
"import math\nimport torch\nfrom torch import nn as nn\nfrom torch.distributions import Normal\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom collections import OrderedDict\nfrom itertools import chain\n\n# LOG_SIG_MAX = 2\n# LOG_SIG_MIN = -3.0\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\n# SIG_MAX = 7.38905609893065\n# SIG_MIN = 0.049787068367863944\n\n# LOG_MIX_COEFF_MIN = -10\n# LOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5\n# LOG_MIX_COEFF_MIN = -1\n# LOG_MIX_COEFF_MAX = 1 #-4.5e-5\n\n# EPS = 1e-12\nEPS = 1e-8\n\n\nclass TanhGaussianPrompMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianPrompMultiPolicy(...)\n action, policy_dict = policy(obs)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0,\n output_w_init='xavier_normal',\n output_b_init_val=0,\n pol_output_activation='linear',\n mix_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n softmax_weights=False,\n **kwargs\n ):\n self.save_init_params(locals())\n PyTorchModule.__init__(self)\n ExplorationPolicy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n\n self._softmax_weights = softmax_weights\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n mixture_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # Multi-Policy Log-Stds Last Layers\n self.stds = stds\n self.log_std = list()\n if stds is None:\n self._pfc_log_std_lasts = list()\n for pol_idx in range(self._n_subpolicies):\n last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc_log_std,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std)\n self._pfc_log_std_lasts.append(last_pfc_log_std)\n self.add_policies_module(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std, idx=pol_idx)\n\n else:\n for std in stds:\n self.log_std.append(torch.log(stds))\n assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX\n\n # ############# #\n # Mixing Layers #\n # ############# #\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n self.mfc_sigmoid = nn.Sigmoid()\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n\n def get_action(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool): True for using mean. False, sample from dist.\n return_log_prob (bool):\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._shared_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n # Last Mean Layers\n means = torch.cat(\n [(\n self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ],\n dim=1\n ) # Batch x Npols x dA\n\n # Last Log-Std Layers\n if self.stds is None:\n log_stds = torch.cat(\n [(\n self._pol_output_activation(\n self._pfc_log_std_lasts[pp](hs[pp])\n )\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ],\n dim=1\n ) # Batch x Npols x dA\n\n # # log_std option 1:\n # log_stds = torch.clamp(log_stds, min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n # log_std option 2:\n log_stds = torch.tanh(log_stds)\n log_stds = \\\n LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN)*(log_stds + 1)\n\n stds = torch.exp(log_stds)\n variances = stds**2\n\n else:\n log_stds = self.log_std\n stds = self.stds\n variances = stds**2\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = h.clone()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n mixture_coeff = \\\n self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)\n\n mixture_coeff = self.mfc_sigmoid(mixture_coeff)\n\n # if torch.isnan(mixture_coeff).any():\n # raise ValueError('Some mixture coeff(s) is(are) NAN: %s' %\n # mixture_coeff)\n #\n # if torch.isnan(means).any():\n # raise ValueError('Some means are NAN: %s' %\n # means)\n #\n # if torch.isnan(stds).any():\n # raise ValueError('Some stds are NAN: %s' %\n # stds)\n\n if pol_idx is None:\n # Calculate weighted means and stds (and log_stds)\n if optimize_policies:\n sig_invs = mixture_coeff/variances\n else:\n sig_invs = mixture_coeff/variances.detach()\n\n variance = 1./torch.sum(sig_invs, dim=1, keepdim=False)\n\n if optimize_policies:\n mean = variance*torch.sum(\n means*sig_invs,\n dim=1,\n keepdim=False\n )\n else:\n mean = variance*torch.sum(\n means.detach()*sig_invs,\n dim=1,\n keepdim=False\n )\n\n # log_std option 1:\n std = torch.sqrt(variance)\n std = torch.clamp(std,\n min=math.exp(LOG_SIG_MIN),\n max=math.exp(LOG_SIG_MAX))\n log_std = torch.log(std)\n # # log_std option 2:\n # variance = torch.tanh(variance)\n # variance = (\n # math.exp(LOG_SIG_MIN)**2 +\n # 0.5*(math.exp(LOG_SIG_MAX)**2 - math.exp(LOG_SIG_MIN)**2) *\n # (variance + 1)\n # )\n # std = torch.sqrt(variance)\n # log_std = torch.log(std)\n\n # TODO: Remove the following?\n # log_std = torch.logsumexp(\n # log_stds + log_mixture_coeff.reshape(-1,\n # self.action_dim,\n # self._n_subpolicies),\n # dim=-1,\n # keepdim=False\n # ) - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)\n\n # log_std = torch.log(std)\n\n else:\n index = self._pols_idxs[pol_idx]\n mean = \\\n torch.index_select(means, dim=1, index=index).squeeze(1)\n std = \\\n torch.index_select(stds, dim=1, index=index).squeeze(1)\n log_std = \\\n torch.index_select(log_stds, dim=1, index=index).squeeze(1)\n variance = \\\n torch.index_select(variances, dim=1, index=index).squeeze(1)\n\n pre_tanh_value = None\n log_prob = None\n pre_tanh_values = None\n log_probs = None\n\n if deterministic:\n action = torch.tanh(mean)\n actions = torch.tanh(means)\n else:\n # # Using this distribution instead of TanhMultivariateNormal\n # # because it has Diagonal Covariance.\n # # Then, a collection of n independent Gaussian r.v.\n # tanh_normal = TanhNormal(mean, std)\n #\n # # # It is the Lower-triangular factor of covariance because it is\n # # # Diagonal Covariance\n # # scale_trils = torch.stack([torch.diag(m) for m in std])\n # # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)\n #\n # if return_log_prob:\n # log_prob = tanh_normal.log_prob(\n # action,\n # pre_tanh_value=pre_tanh_value\n # )\n # log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n noise = self._normal_dist.sample((nbatch,))\n\n pre_tanh_value = std*noise + mean\n pre_tanh_values = stds*noise.unsqueeze(1) + means\n\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n if return_log_prob:\n # Log probability: Main Policy\n log_prob = -((pre_tanh_value - mean) ** 2) / (2*variance) \\\n - log_std - math.log(math.sqrt(2*math.pi))\n log_prob -= torch.log(\n # torch.clamp(1. - action**2, 0, 1)\n clip_but_pass_gradient(1. - action**2, 0, 1)\n + 1.e-6\n )\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n # Log probability: Sub-Policies\n log_probs = -((pre_tanh_values - means) ** 2) / (2*variances)\\\n - log_stds - math.log(math.sqrt(2*math.pi))\n log_probs -= torch.log(\n # torch.clamp(1. - actions**2, 0, 1)\n clip_but_pass_gradient(1. - actions**2, 0, 1)\n + 1.e-6\n )\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n # if torch.isnan(action).any():\n # raise ValueError('ACTION NAN')\n #\n # if torch.isnan(actions).any():\n # raise ValueError('ACTION NAN')\n\n info_dict = dict(\n mean=mean,\n std=std,\n log_std=log_std,\n log_prob=log_prob,\n pre_tanh_value=pre_tanh_value,\n # log_mixture_coeff=log_mixture_coeff,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_means=means,\n pol_stds=stds,\n pol_log_stds=log_stds,\n pol_log_probs=log_probs,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n def log_action(self, actions, obs, pol_idx=None):\n raise NotImplementedError\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n\n\ndef clip_but_pass_gradient(x, l=-1., u=1.):\n clip_up = (x > u).to(ptu.device, dtype=torch.float32)\n clip_low = (x < l).to(ptu.device, dtype=torch.float32)\n return x + ((u - x)*clip_up + (l - x)*clip_low).detach()\n"
] | [
[
"torch.sum",
"torch.nn.Linear",
"torch.nn.modules.normalization.LayerNorm",
"torch.nn.BatchNorm1d",
"torch.sqrt",
"torch.exp",
"torch.batch_norm",
"torch.log",
"torch.tanh",
"torch.index_select",
"torch.nn.Sigmoid"
]
] |
kcexn/singular-value-decomposition | [
"63e2a23f9f0db9aa361e338b8065d59b80f7649e"
] | [
"coded_distributed_computing.py"
] | [
"''' coded_distributed_computing\nThis module contains functions related to a study of the coded distributed computing model.\n\n'''\nimport numpy as np\n\ndef encode_matrix(A: np.matrix, G: np.matrix) -> np.matrix:\n ''' encode_matrix\n Parameters:\n ---\n A: np.matrix, input matrix to code.\n G: np.matrix, generator matrix to encode A with.\n ---\n Returns:\n ---\n A*G: np.matrix, output encoded matrix.\n ---\n Description:\n ---\n Following van Lint's text \"Introduction to Coding Theory\", \n I am constructing linear block codes using a generator matrix G \n and an input matrix A. \n\n Actually typically the codes would be constructed using a \n generator matrix G and an input vector k which would create an \n output message, a vector, m.\n\n Following from my conversation with Jingge last week though. \n I'm convinced that encoding a matrix to preserve the \n matrix vector multiplication Ax is exactly the same as encoding\n multiple messages across time simultaneously. i.e. If I were to \n accumulate n messages (column vectors) of size k and concatenated them \n I would end up with a matrix of size k x n (rows and columns). Encoding \n it with the generator matrix G would give me a matrix of size m x n. Where\n each column in the matrix A*G can be considered one message to be delivered \n over time. The matrix vector multiplication Ax is simply the rows of multiple\n messages concatenated together multiplied with the vector x.\n\n This is not a super great analogue, because obviously matrices in a matrix vector \n multiplication are shared with everyone all at once not one column at a time. \n But I think it's a useful way to reason about the coding properties of \n the matrix A*G. And I believe opens up the possibilities of \n matrix encodings to ALL codes that can be represented as linear block codes \n (which I believe are simply, ALL linear codes).\n\n '''\n return np.matmul(A,G)\n\n\n\n"
] | [
[
"numpy.matmul"
]
] |
sanzgiri/MaatPy | [
"381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965"
] | [
"maatpy/dataset.py"
] | [
"import warnings\nimport numpy as np\nimport pandas as pd\n\nfrom collections import Counter\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.utils import check_X_y\nfrom sklearn.utils import Bunch\nfrom sklearn.preprocessing import LabelEncoder\nfrom imblearn.under_sampling.prototype_selection import RandomUnderSampler\nfrom imblearn.over_sampling import RandomOverSampler\n\n\nclass Dataset(Bunch):\n\n def __init__(self, data=None, target=None, feature_names=None, target_names=None):\n \"\"\"\n\n :param data:\n :param target:\n :param feature_names:\n :param target_names:\n \"\"\"\n self.data = data\n self.target = target\n self.feature_names = feature_names\n self.target_names = target_names\n\n def make_imbalance(self, ratio=None, random_state=None):\n \"\"\"\n Built on the imblearn.make_imbalance function\n :param ratio: dict or list\n Ratio to use for resampling the data set.\n - When 'dict', the keys correspond to the targeted classes. The values correspond to the desired number\n of samples for each targeted class.\n - When 'list', the values correspond to the proportions of samples (float) assigned to each class. In\n this case the number of samples is maintained but the samples per class are adjusted to the given\n proportions.\n :param random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n :return:\n \"\"\"\n x, y = check_X_y(self.data, self.target)\n original_dataset_size = len(y)\n n_classes = len(self.target_names)\n\n if isinstance(ratio, dict):\n ratio_ = ratio\n\n elif isinstance(ratio, list):\n weights = ratio\n if len(weights) != n_classes:\n raise ValueError(\"{} classes available but only {} values provided\".format(n_classes, len(weights)))\n ratio_ = {}\n for i in range(n_classes):\n ratio_[i] = int(round(weights[i] * original_dataset_size, 0))\n\n else:\n raise TypeError(\"Expected dict or list; {} provided\".format(type(ratio)))\n\n if sum(ratio_.values()) < original_dataset_size:\n rus = RandomUnderSampler(ratio=ratio_, random_state=random_state)\n self.data, self.target = rus.fit_sample(x, y)\n\n elif sum(ratio_.values()) == original_dataset_size:\n original_distribution = Counter(y)\n interim_ratio = {}\n for key in ratio_:\n if ratio_[key] >= original_distribution[key]:\n interim_ratio[key] = original_distribution[key]\n else:\n interim_ratio[key] = ratio_[key]\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n rus = RandomUnderSampler(ratio=interim_ratio, random_state=random_state)\n x_int, y_int = rus.fit_sample(x, y)\n with warnings.catch_warnings():\n # Silencing RandomOverSampler UserWarning: After over-sampling, the number of samples in class A will\n # be larger than the number of samples in the majority class\n warnings.simplefilter(\"ignore\")\n ros = RandomOverSampler(ratio=ratio_, random_state=random_state)\n self.data, self.target = ros.fit_sample(x_int, y_int)\n\n else:\n raise ValueError(\"The requested dataset cannot be larger than the original dataset\")\n\n def load_from_csv(self, filename, sep=',', output_column=None, ignore=None):\n \"\"\"\n\n :param filename: path to filename containing the data to load\n :param sep: field separator; default ','\n :param output_column: column containing the outcome\n :param ignore: column to remove from data; str or list\n :return:\n \"\"\"\n df = pd.read_csv(filename, sep=sep)\n if output_column:\n le = LabelEncoder()\n le.fit(list(df[output_column]))\n self.target_names = le.classes_\n self.target = le.transform(list(df[output_column]))\n df.drop(output_column, axis=1, inplace=True)\n else:\n raise ValueError('Please define an output_column; column containing the class defined for each observation '\n '(row)')\n if ignore is not None:\n df.drop(ignore, axis=1, inplace=True)\n self.feature_names = df.columns\n self.data = df.values\n\n\ndef simulate_dataset(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1,\n weights=None, flip_y=0.01, class_sep=1.0, random_state=None):\n \"\"\"\n Using sklearn.make_classification function to return a Dataset object\n :param n_samples: int, optional (default=100).\n The number of samples.\n :param n_features: int, optional (default=2)\n The total number of features. These comprise 'n_informative' informative features and 'n_redundant'\n redundant features.\n :param n_informative: int, optional (default=2)\n The number of informative features. Each class is composed of a number of gaussian clusters each located\n around the vertices of a hypercube in a subspace of dimension 'n_informative'. For each cluster,\n informative features are drawn independently from N(0, 1) and then randomly linearly combined within\n each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube.\n :param n_redundant: int, optional (default=0)\n The number of redundant features. These features are generated a random linear combinations of the\n informative features.\n :param n_classes: int, optional (default=2)\n The number of classes (or labels) of the classification problem.\n :param n_clusters_per_class: int, optional (default=1)\n The number of clusters per class.\n :param weights: list of floats or None (default=None)\n The proportions of samples assigned to each class. If None, then classes are balanced. Note that if\n 'len(weights) == n_classes - 1' then the last class weight is automatically inferred. More than\n 'n_samples' samples may be returned if the sum of `weights` exceeds 1.\n :param flip_y: float, optional (default=0.01)\n The fraction of samples whose class are randomly exchanged. Larger values introduce noise in the labels\n and make the classification task harder.\n :param class_sep: float, optional (default=1.0)\n The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the\n classification task easier.\n :param random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n :return: Dataset object\n \"\"\"\n\n data, target = make_classification(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, n_redundant=n_redundant,\n n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,\n weights=weights, flip_y=flip_y, class_sep=class_sep,\n random_state=random_state)\n feature_names = ['feature#{}'.format(i) for i in range(data.shape[1])]\n target_names = ['class#{}'.format(i) for i in np.unique(target)]\n\n return Dataset(data, target, feature_names, target_names)\n"
] | [
[
"pandas.read_csv",
"sklearn.datasets.make_classification",
"sklearn.preprocessing.LabelEncoder",
"sklearn.utils.check_X_y",
"numpy.unique"
]
] |
beesk135/ReID-Survey | [
"d1467c0ce5d3ca78640196360a05df9ff9f9f42a"
] | [
"evaluate/__init__.py"
] | [
"import torch \n\nfrom .eval_reid import eval_func\n\ndef euclidean_dist(x, y):\n m, n = x.size(0), y.size(0)\n xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, x, y.t())\n dist = dist.clamp(min=1e-12).sqrt()\n return dist\n"
] | [
[
"torch.pow"
]
] |
xujin1184104394/coco-analyze | [
"fefe16025554dbf831e71d32d6601dd8f00286a8"
] | [
"analysisAPI/scoringErrors.py"
] | [
"## imports\nimport os, time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# package imports \nfrom . import utilities\n\ndef scoringErrors( coco_analyze, oks, imgs_info, saveDir ):\n loc_dir = saveDir + '/scoring_errors'\n if not os.path.exists(loc_dir):\n os.makedirs(loc_dir)\n f = open('%s/std_out.txt'%loc_dir, 'w')\n f.write(\"Running Analysis: [Scoring Errors]\\n\\n\")\n tic = time.time()\n paths = {}\n\n # set parameters for the scoring errors analysis\n coco_analyze.params.areaRng = [[32 ** 2, 1e5 ** 2]]\n coco_analyze.params.areaRngLbl = ['all']\n coco_analyze.params.oksThrs = [oks]\n coco_analyze.cocoEval.params.useGtIgnore = 0\n coco_analyze.cocoEval.params.gtIgnoreIds = []\n coco_analyze.analyze(check_kpts=False, check_scores=True, check_bckgd=False)\n coco_analyze.summarize(makeplots=True, savedir=loc_dir, team_name='scoring')\n paths['opt_score_prc'] = \\\n '%s/error_prc_[scoring][%d][%s][%d].pdf'%(loc_dir, int(oks*100),\n coco_analyze.params.areaRngLbl[0],\n coco_analyze.params.maxDets[0])\n corrected_dts = coco_analyze.corrected_dts['all']\n\n # dictionary of all corrected detections grouped by image id\n all_dts = {}\n for d in coco_analyze.corrected_dts['all']:\n if d['image_id'] not in all_dts:\n all_dts[d['image_id']] = {}\n all_dts[d['image_id']]['dts'] = [d]\n else:\n all_dts[d['image_id']]['dts'].append(d)\n\n subopt_order_images = []\n all_gts = {}; all_dtgt_oks = {}\n for imgId in imgs_info:\n if imgId in all_dts:\n dts = all_dts[imgId]['dts']\n all_dts[imgId]['score'] = np.argsort([-d['score'] for d in dts], kind='mergesort')\n all_dts[imgId]['opt_score'] = np.argsort([-d['opt_score'] for d in dts], kind='mergesort')\n\n if list(all_dts[imgId]['score']) != list(all_dts[imgId]['opt_score']):\n subopt_order_images.append(imgId)\n else:\n dts = []\n\n gts = coco_analyze.cocoGt.loadAnns(coco_analyze.cocoGt.getAnnIds(imgIds=imgId))\n not_ignore_gts = []\n for g in gts:\n # gt ignores are discarded\n if g['ignore'] or (g['area']<coco_analyze.params.areaRng[0][0] or g['area']>coco_analyze.params.areaRng[0][1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n not_ignore_gts.append(g)\n\n # compute the oks matrix between the dts and gts of each image\n image_oks_mat = utilities.compute_oks(dts, not_ignore_gts)\n if len(image_oks_mat) == 0:\n all_gts[imgId] = not_ignore_gts\n all_dtgt_oks[imgId] = []\n\n else:\n # sort the ground truths by their max oks value with any detection\n maxoksvals = [-max(image_oks_mat[:,j]) for j in range(len(not_ignore_gts))]\n gtind = np.argsort(maxoksvals, kind='mergesort')\n all_gts[imgId] = [not_ignore_gts[j] for j in gtind]\n all_dtgt_oks[imgId] = image_oks_mat[:,gtind]\n\n ## check how many images have optimal score and original score with same order\n perc = 100*len(subopt_order_images)/float(len(all_dts))\n f.write(\"Num. of imgs with sub-optimal detections order: [%d]/[%d] (%.2f%%).\\n\\n\"%(len(subopt_order_images), len(all_dts), perc))\n\n ## find scoring errors before and after rescoring\n min_match_oks = .5\n scoring_errors = {'score':[],'opt_score':[]}\n for score_type in scoring_errors.keys():\n for ind, imgId in enumerate(all_dts.keys()):\n dind = all_dts[imgId][score_type]\n sorted_dts = [all_dts[imgId]['dts'][i] for i in dind]\n gtIds = [g['id'] for g in all_gts[imgId]]\n if len(sorted_dts) * len(gtIds) == 0: continue\n\n used_dts = []\n for gind, gt in enumerate(all_gts[imgId]):\n assert(gt['_ignore']==0)\n\n oks = all_dtgt_oks[imgId][dind,gind]\n dts_with_oks = np.where(oks >= min_match_oks)[0]\n # remove the matched dts\n dts_available = [(i,sorted_dts[i]['id'],oks[i],sorted_dts[i][score_type]) \\\n for i in dts_with_oks if sorted_dts[i]['id'] not in used_dts]\n if len(dts_available) == 0: break\n\n max_oks_dt = np.argmax([d[2] for d in dts_available])\n used_dts.append(dts_available[max_oks_dt][1])\n\n if len( dts_available ) > 1:\n # check for scoring error\n max_score_dt = np.argmax([d[3] for d in dts_available])\n if max_score_dt!=max_oks_dt:\n # this is a scoring error\n error = {}\n error['gt'] = gt\n error['imgId'] = imgId\n error['matched_dt'] = sorted_dts[dts_available[max_score_dt][0]]\n error['top_match_dt'] = sorted_dts[dts_available[max_oks_dt][0]]\n error['high_oks'] = dts_available[max_oks_dt][2]\n error['low_oks'] = dts_available[max_score_dt][2]\n scoring_errors[score_type].append(error)\n\n f.write(\"Num. of scoring errors:\\n\")\n f.write(\" - Original Score: %d\\n\"%len(scoring_errors['score']))\n f.write(\" - Optimal Score: %d\\n\"%len(scoring_errors['opt_score']))\n\n f.write(\"\\nMost relevant scoring errors:\\n\")\n ## print the top scoring errors of the algorithm\n ori_scoring_errors = scoring_errors['score']\n ori_scoring_errors.sort(key=lambda k: -np.sqrt((k['matched_dt']['score']-k['top_match_dt']['score'])*(k['high_oks']-k['low_oks'])))\n for ind, err in enumerate(ori_scoring_errors[0:12]):\n relevance = np.sqrt((err['matched_dt']['score']-err['top_match_dt']['score'])*(err['high_oks']-err['low_oks']))\n f.write(\"================================================\\n\")\n f.write( \"- gt id: [%d]\\n\"%err['gt']['id'] )\n f.write( \"- dt id, high score, low oks: [%d][%.3f][%.3f]\\n\"%(err['matched_dt']['id'], err['matched_dt']['score'], err['low_oks']) )\n f.write( \"- dt id, low score, high oks: [%d][%.3f][%.3f]\\n\"%(err['top_match_dt']['id'], err['top_match_dt']['score'], err['high_oks']) )\n f.write( \"- Relevance: [%.3f]\\n\\n\"%relevance )\n\n name = 'score_err_%d_high_score'%ind\n paths[name] = '%s/%s.pdf'%(loc_dir,name)\n utilities.show_dets([err['matched_dt']],\n [err['gt']],\n imgs_info[err['imgId']],save_path=paths[name])\n\n name = 'score_err_%d_high_oks'%ind\n paths[name] = '%s/%s.pdf'%(loc_dir,name)\n utilities.show_dets([err['top_match_dt']],\n [err['gt']],\n imgs_info[err['imgId']],save_path=paths[name])\n\n # for all the images with dts and gts compute the following quantities\n # - number of dts with oks > min_match_oks for each gt\n # - histogram of oks for the detection with highest oks\n # - histogram of oks for all the other detections\n # - histogram of original/optimal scores for the detection with highest oks\n # - histogram of original/optimal scores for all the other detections\n num_dts_high_oks = []\n high_oks_dt_oks_hist = []; other_dt_oks_hist = []\n high_oks_dt_ori_score_hist = []; other_dt_ori_score_hist = []\n high_oks_dt_opt_score_hist = []; other_dt_opt_score_hist = []\n\n for ind, imgId in enumerate(all_dts.keys()):\n dts = [(d['id'],d['score'],d['opt_score']) for d in all_dts[imgId]['dts']]\n gtIds = [g['id'] for g in all_gts[imgId]]\n if len(dts) * len(gtIds) == 0: continue\n\n for gind, gt in enumerate(all_gts[imgId]):\n assert(gt['_ignore']==0)\n\n dts_oks = all_dtgt_oks[imgId][:,gind]\n dts_high_oks_i = np.where(dts_oks > .1)[0]\n num_dts_high_oks.append(len(dts_high_oks_i))\n\n if len(dts_high_oks_i) >= 2:\n # study the case where multiple detections have high oks\n\n # add the oks of the detections to the histogram of oks\n oks_vals = sorted([(dts_oks[i],dts[i]) for i in dts_high_oks_i], key=lambda k: -k[0])\n high_oks_dt_oks_hist.append(oks_vals[0][0])\n other_dt_oks_hist.extend([k[0] for k in oks_vals[1:]])\n\n high_oks_dt_ori_score_hist.append(oks_vals[0][1][1])\n other_dt_ori_score_hist.extend([k[1][1] for k in oks_vals[1:]])\n\n high_oks_dt_opt_score_hist.append(oks_vals[0][1][2])\n other_dt_opt_score_hist.extend([k[1][2] for k in oks_vals[1:]])\n\n fig, ax = plt.subplots(figsize=(10,10))\n ax.set_facecolor('lightgray')\n plt.hist(num_dts_high_oks,bins=[i-.5 for i in range(max(num_dts_high_oks)+1)],color='green')\n plt.grid()\n plt.xticks([i for i in range(max(num_dts_high_oks))])\n plt.title('Histogram of Detection Redundancy',fontsize=20)\n plt.xlabel('Number of Detections with OKS > .1',fontsize=20)\n plt.ylabel('Number of Ground Truth Instances',fontsize=20)\n path = '%s/num_dts_high_oks.pdf'%loc_dir\n paths['num_dts_high_oks'] = path\n plt.savefig(path,bbox_inches='tight')\n plt.close()\n\n fig, ax = plt.subplots(figsize=(10,10))\n y1,binEdges=np.histogram(high_oks_dt_ori_score_hist,bins=19)\n bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')\n min_val1 = min(bincenters1)\n max_val1 = max(bincenters1)\n\n y2,binEdges=np.histogram(other_dt_ori_score_hist,bins=19)\n bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')\n min_val2 = min(bincenters2)\n max_val2 = max(bincenters2)\n\n min_val = min(min_val1,min_val2)\n max_val = max(max_val1,max_val2)\n\n overlapbins = [min(x,y) for x,y in zip(y1,y2)]\n width = (max_val-min_val)/20.\n ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')\n plt.grid()\n plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])\n\n plt.grid()\n plt.legend(loc='upper center',fontsize=20)\n plt.title('Histogram of Original Detection Scores',fontsize=20)\n plt.xlabel('Original Confidence Score',fontsize=20)\n plt.ylabel('Number of Detections',fontsize=20)\n path = '%s/dts_ori_score_hist.pdf'%loc_dir\n paths['dts_ori_score_hist'] = path\n plt.savefig(path,bbox_inches='tight')\n plt.close()\n\n fig, ax = plt.subplots(figsize=(10,10))\n y1,binEdges=np.histogram(high_oks_dt_opt_score_hist,bins=19)\n bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')\n min_val1 = min(bincenters1)\n max_val1 = max(bincenters1)\n\n y2,binEdges=np.histogram(other_dt_opt_score_hist,bins=19)\n bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')\n min_val2 = min(bincenters2)\n max_val2 = max(bincenters2)\n\n min_val = min(min_val1,min_val2)\n max_val = max(max_val1,max_val2)\n\n overlapbins = [min(x,y) for x,y in zip(y1,y2)]\n width = (max_val-min_val)/20.\n ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')\n plt.grid()\n plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])\n\n plt.grid()\n plt.legend(loc='upper center',fontsize=20)\n plt.title('Histogram of Optimal Detection Scores',fontsize=20)\n plt.xlabel('Optimal Confidence Score',fontsize=20)\n plt.ylabel('Number of Detections',fontsize=20)\n path = '%s/dts_opt_score_hist.pdf'%loc_dir\n paths['dts_opt_score_hist'] = path\n plt.savefig(path,bbox_inches='tight')\n plt.close()\n\n f.write(\"\\nDone, (t=%.2fs).\"%(time.time()-tic))\n f.close()\n\n return paths\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.histogram",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.argsort",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"numpy.argmax",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"numpy.sqrt",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
sirjamesmeddel-gitty/intuition | [
"cd517e6b3b315a743eb4d0d0dc294e264ab913ce"
] | [
"tests/core/test_configuration.py"
] | [
"'''\nTests for intuition.core.configuration\n'''\n\nimport unittest\nfrom nose.tools import raises\nimport dna.test_utils as test_utils\nimport pandas as pd\nimport intuition.core.configuration as configuration\nfrom dna.errors import DynamicImportFailed\nfrom intuition.errors import InvalidConfiguration\n\n\nclass ConfigurationUtilsTestCase(unittest.TestCase):\n\n def test_logfile(self):\n logfile = configuration.logfile('fake_id')\n if 'tmp' in logfile:\n self.assertEqual('/tmp/logs/fake_id.log', logfile)\n else:\n self.assertIn('.intuition/logs/fake_id.log', logfile)\n\n\nclass ContextLoadTestCase(unittest.TestCase):\n\n def setUp(self):\n test_utils.setup_logger(self)\n self.good_driver = \\\n 'intuition.test_utils.FakeContext://localhost/path?valid=true'\n self.bad_driver = \\\n 'no.file.FileContext://localhost/path?valid=true'\n self.bad_config = \\\n 'intuition.test_utils.FakeContext://localhost/path?valid=false'\n self.bad_formatted_config = \\\n 'intuition.test_utils.FakeContext://localhost/path?format=false'\n\n def tearDown(self):\n test_utils.teardown_logger(self)\n\n def test_load_context(self):\n with configuration.Context(self.good_driver) as context:\n self.assertIsInstance(context, dict)\n self.assertIsInstance(context['strategy'], dict)\n self.assertIsInstance(context['config'], dict)\n\n @raises(InvalidConfiguration)\n def test_validate_bad_config(self):\n bad_config = {}\n ctx = configuration.Context(self.bad_driver)\n ctx._validate(bad_config)\n\n def test_validate_good_config(self):\n good_config = {\n 'universe': 'nasdaq,4',\n 'index': pd.date_range('2014/2/3', periods=30),\n 'modules': {\n 'algorithm': 'dualma'\n }\n }\n ctx = configuration.Context(self.bad_driver)\n self.assertIsNone(ctx._validate(good_config))\n\n @raises(InvalidConfiguration)\n def test_load_bad_configuration(self):\n ctx = configuration.Context(self.bad_formatted_config)\n ctx.__enter__()\n\n def test_loaded_configuration(self):\n with configuration.Context(self.good_driver) as context:\n for field in ['manager', 'algorithm', 'data']:\n self.assertIn(field, context['strategy'])\n for field in ['index', 'live']:\n self.assertIn(field, context['config'])\n\n @raises(DynamicImportFailed)\n def test_absent_driver_context_load(self):\n ctx = configuration.Context(self.bad_driver)\n ctx.__enter__()\n"
] | [
[
"pandas.date_range"
]
] |
rkripa/PS-FCN | [
"eb8ddbd60964830c06432a734a2cf6dce34f70f0"
] | [
"models/PS_FCN_run.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn.init import kaiming_normal_\nfrom models import model_utils\n\nclass FeatExtractor(nn.Module):\n def __init__(self, batchNorm=False, c_in=3, other={}):\n super(FeatExtractor, self).__init__()\n self.other = other\n self.conv1 = model_utils.conv(batchNorm, c_in, 64, k=3, stride=1, pad=1)\n self.conv2 = model_utils.conv(batchNorm, 64, 128, k=3, stride=2, pad=1)\n self.conv3 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n self.conv4 = model_utils.conv(batchNorm, 128, 256, k=3, stride=2, pad=1)\n self.conv5 = model_utils.conv(batchNorm, 256, 256, k=3, stride=1, pad=1)\n self.conv6 = model_utils.deconv(256, 128)\n self.conv7 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n out = self.conv4(out)\n out = self.conv5(out)\n out = self.conv6(out)\n out_feat = self.conv7(out)\n n, c, h, w = out_feat.data.shape\n out_feat = out_feat.view(-1)\n return out_feat, [n, c, h, w]\n\nclass Regressor(nn.Module):\n def __init__(self, batchNorm=False, other={}): \n super(Regressor, self).__init__()\n self.other = other\n self.deconv1 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n self.deconv2 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n self.deconv3 = model_utils.deconv(128, 64)\n self.est_normal= self._make_output(64, 3, k=3, stride=1, pad=1)\n self.other = other\n\n def _make_output(self, cin, cout, k=3, stride=1, pad=1):\n return nn.Sequential(\n nn.Conv2d(cin, cout, kernel_size=k, stride=stride, padding=pad, bias=False))\n\n def forward(self, x, shape):\n x = x.view(shape[0], shape[1], shape[2], shape[3])\n out = self.deconv1(x)\n out = self.deconv2(out)\n out = self.deconv3(out)\n normal = self.est_normal(out)\n normal = torch.nn.functional.normalize(normal, 2, 1)\n return normal\n\nclass PS_FCN(nn.Module):\n def __init__(self, fuse_type='max', batchNorm=False, c_in=3, other={}):\n super(PS_FCN, self).__init__()\n self.extractor = FeatExtractor(batchNorm, c_in, other)\n self.regressor = Regressor(batchNorm, other)\n self.c_in = c_in\n self.fuse_type = fuse_type\n self.other = other\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n kaiming_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n img = x[0]\n img_split = torch.split(img, 3, 1)\n if len(x) > 1: # Have lighting\n light = x[1]\n light_split = torch.split(light, 3, 1)\n\n feats = torch.Tensor()\n for i in range(len(img_split)):\n net_in = img_split[i] if len(x) == 1 else torch.cat([img_split[i], light_split[i]], 1)\n feat, shape = self.extractor(net_in)\n if i == 0:\n feats = feat\n else:\n if self.fuse_type == 'mean':\n feats = torch.stack([feats, feat], 1).sum(1)\n elif self.fuse_type == 'max':\n feats, _ = torch.stack([feats, feat], 1).max(1)\n if self.fuse_type == 'mean':\n feats = feats / len(img_split)\n feat_fused = feats\n normal = self.regressor(feat_fused, shape)\n return normal\n"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.stack",
"torch.split",
"torch.nn.functional.normalize",
"torch.nn.Conv2d",
"torch.cat",
"torch.Tensor"
]
] |
ComputationalMechanics/SurfaceTopography | [
"6751be427c89d526ef4857300409596c79119029",
"6751be427c89d526ef4857300409596c79119029"
] | [
"SurfaceTopography/Uniform/Filtering.py",
"test/test_reliability_cutoff.py"
] | [
"#\n# Copyright 2020-2021 Lars Pastewka\n# 2020-2021 Antoine Sanner\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport numpy as np\nfrom scipy.signal import get_window\n\nfrom ..FFTTricks import get_window_2D\nfrom ..HeightContainer import UniformTopographyInterface\nfrom ..UniformLineScanAndTopography import DecoratedUniformTopography\n\n\nclass WindowedUniformTopography(DecoratedUniformTopography):\n \"\"\"\n Construct a topography with a window function applied to it.\n \"\"\"\n\n name = 'windowed_topography'\n\n def __init__(self, topography, window=None, direction=None, info={}):\n \"\"\"\n window : str, optional\n Window for eliminating edge effect. See scipy.signal.get_window.\n (Default: no window for periodic Topographies, \"hann\" window for\n nonperiodic Topographies)\n direction : str, optional\n Direction in which the window is applied. Possible options are\n 'x', 'y' and 'radial'. If set to None, it chooses 'x' for line\n scans and 'radial' for topographies. (Default: None)\n \"\"\"\n super().__init__(topography, info=info)\n\n self._window_name = window\n self._direction = direction\n\n self._window_data = None\n\n def _make_window(self):\n self._window_data = None\n\n n = self.parent_topography.nb_grid_pts\n\n try:\n nx, ny = n\n except ValueError:\n nx, = n\n\n window_name = self._window_name\n if not self.parent_topography.is_periodic and window_name is None:\n window_name = \"hann\"\n\n direction = self._direction\n if direction is None:\n direction = 'x' if self.parent_topography.dim == 1 else 'radial'\n\n # Construct window\n if window_name is not None and window_name != 'None':\n if direction == 'x':\n # Get window from scipy.signal\n win = get_window(window_name, nx)\n # Normalize window\n win *= np.sqrt(nx / (win ** 2).sum())\n elif direction == 'y':\n if self.parent_topography.dim == 1:\n raise ValueError(\"Direction 'y' does not make sense for line scans.\")\n # Get window from scipy.signal\n win = get_window(window_name, ny)\n # Normalize window\n win *= np.sqrt(ny / (win ** 2).sum())\n elif direction == 'radial':\n if self.parent_topography.dim == 1:\n raise ValueError(\"Direction 'radial' does not make sense for line scans.\")\n win = get_window_2D(window_name, nx, ny,\n self.parent_topography.physical_sizes)\n # Normalize window\n win *= np.sqrt(nx * ny / (win ** 2).sum())\n else:\n raise ValueError(f\"Unknown direction '{self._direction}'.\")\n\n self._window_data = win\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), \\\n self._window_name, self._direction\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._window_name, self._direction = state\n super().__setstate__(superstate)\n\n @property\n def window_data(self):\n if self._window_data is None:\n self._make_window()\n return self._window_data\n\n def heights(self):\n \"\"\" Computes the windowed topography.\n \"\"\"\n if self.window_data is None:\n return self.parent_topography.heights()\n else:\n direction = self._direction\n if direction is None:\n direction = 'x' if self.parent_topography.dim == 1 else 'radial'\n if direction == 'x':\n return (self.window_data * self.parent_topography.heights().T).T\n elif direction == 'y' or direction == 'radial':\n return self.window_data * self.parent_topography.heights()\n else:\n raise ValueError(f\"Unknown direction '{self._direction}'.\")\n\n\nclass FilteredUniformTopography(DecoratedUniformTopography):\n name = 'filtered_topography'\n\n def __init__(self, topography,\n filter_function=lambda qx, qy: (np.abs(qx) <= 1) * np.abs(qy) <= 1,\n isotropic=True,\n info={}):\n\n if not topography.is_periodic:\n raise ValueError(\"only implemented for periodic topographies\")\n super().__init__(topography, info=info)\n\n self._filter_function = filter_function\n self._is_filter_isotropic = isotropic\n # TODO: should be deductible from the filter function signature\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), \\\n self._filter_function, self._is_filter_isotropic\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._filter_function, self._is_filter_isotropic = state\n super().__setstate__(superstate)\n\n @property\n def is_filter_isotropic(self):\n return self._is_filter_isotropic\n\n def filter_function(self, *args):\n \"\"\"\n\n Parameters\n ----------\n if dim = 2 and filter is not isotropic\n qx, qy\n if dim = 1\n q\n \"\"\"\n\n if self.dim == 2 and not self.is_filter_isotropic \\\n and len(args) != 2:\n raise (\"ValueError: qx, qy expected\")\n elif self.dim == 1 and len(args) != 1:\n raise (\"ValueError: q expected\")\n\n return self._filter_function(*args)\n\n def heights(self):\n if self.dim == 2:\n nx, ny = self.parent_topography.nb_grid_pts\n sx, sy = self.parent_topography.physical_sizes\n\n qx = np.arange(0, nx, dtype=np.float64).reshape(-1, 1)\n qx = np.where(qx <= nx // 2, qx / sx, (qx - nx) / sx)\n qx *= 2 * np.pi\n\n qy = np.arange(0, ny // 2 + 1, dtype=np.float64).reshape(1, -1)\n qy *= 2 * np.pi / sy\n\n if self.is_filter_isotropic:\n h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *\n self.filter_function(np.sqrt(qx ** 2 + qy ** 2)))\n else:\n h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *\n self.filter_function(qx, qy))\n\n return h_qs\n elif self.dim == 1:\n s, = self.parent_topography.physical_sizes\n n, = self.parent_topography.nb_grid_pts\n q = abs(2 * np.pi * np.fft.rfftfreq(n, s / n))\n\n h = self.parent_topography.heights()\n h_q = np.fft.rfft(h)\n h_q_filtered = np.fft.irfft(h_q * self.filter_function(q))\n\n # Max_imaginary = np.max(np.imag(shifted_pot))\n # assert Max_imaginary < 1e-14 *np.max(np.real(shifted_pot)) ,\n # f\"{Max_imaginary}\"\n\n return np.real(h_q_filtered)\n\n\nclass ShortCutTopography(FilteredUniformTopography):\n name = 'shortcut_filtered_topography'\n\n def __init__(self, topography,\n cutoff_wavevector=None, cutoff_wavelength=None,\n kind=\"circular step\",\n info={}):\n r\"\"\"Applies a short wavelength cut filter to the topography using fft.\n\n for `kind==\"circular step\"` (default), parts of the spectrum with\n `|q| > cutoff_wavevector` are set to zero\n\n for `kind==\"square step\"`, parts of the spectrum with\n `q_x > cutoff_wavevector or q_y > cutoff_wavevector ` are set to zero\n\n either `cutoff_wavelength` or\n `cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`\n have to be provided.\n\n Parameters\n ----------\n topography: Topography\n cutoff_wavevector: float\n highest wavevector\n cutoff_wavelength: float\n shortest wavelength\n kind: {\"circular step\", \"square step\"}\n\n Returns\n -------\n Topography with filtered heights\n\n Examples\n --------\n >>> topography.shortcut(cutoff_wavevector=2 * np.pi / l)\n >>> topography.shortcut(cutoff_wavelength=l) # equivalent\n\n \"\"\"\n if not topography.is_periodic:\n raise ValueError(\"only implemented for periodic topographies\")\n\n if cutoff_wavelength is None:\n if cutoff_wavevector is not None:\n cutoff_wavelength = 2 * np.pi / cutoff_wavevector\n else:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n elif cutoff_wavevector is not None:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n\n self._cutoff_wavelength = cutoff_wavelength\n self._kind = kind\n\n def circular_step(q):\n return q <= self.cutoff_wavevector\n\n def square_step(qx, qy):\n return (np.abs(qx) <= self.cutoff_wavevector) * (\n np.abs(qy) <= self.cutoff_wavevector)\n\n if self._kind == \"circular step\":\n super().__init__(topography, info=info,\n filter_function=circular_step)\n elif self._kind == \"square step\":\n super().__init__(topography, info=info,\n filter_function=square_step, isotropic=False)\n else:\n raise ValueError(\"Invalid kind\")\n\n @property\n def cutoff_wavevector(self):\n return 2 * np.pi / self._cutoff_wavelength\n\n @property\n def cutoff_wavelength(self):\n return self._cutoff_wavelength\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), self._filter_function, \\\n self._kind, self._cutoff_wavelength\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._filter_function, self._kind, \\\n self._cutoff_wavelength = state\n super().__setstate__(superstate)\n\n\nclass LongCutTopography(FilteredUniformTopography):\n name = 'longcut_filtered_topography'\n\n def __init__(self, topography,\n cutoff_wavevector=None, cutoff_wavelength=None,\n kind=\"circular step\",\n info={}):\n r\"\"\"Applies a long wavelength cut filter to the topography using fft.\n\n for `kind==\"circular step\"` (default), parts of the spectrum with\n `|q| < cutoff_wavevector` are set to zero\n\n for `kind==\"square step\"`, parts of the spectrum with\n `q_x < cutoff_wavevector or q_y < cutoff_wavevector ` are set to zero\n\n either `cutoff_wavelength` or\n `cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`\n have to be provided.\n\n Parameters\n ----------\n topography: Topography\n cutoff_wavevector: float\n highest wavevector\n cutoff_wavelength: float\n shortest wavelength\n kind: {\"circular step\", \"square step\"}\n\n Returns\n -------\n Topography with filtered heights\n\n Examples\n --------\n >>> topography.longcut(cutoff_wavevector=2 * np.pi / l)\n >>> topography.longcut(cutoff_wavelength=l) # equivalent\n\n \"\"\"\n if not topography.is_periodic:\n raise ValueError(\"only implemented for periodic topographies\")\n\n if cutoff_wavelength is None:\n if cutoff_wavevector is not None:\n cutoff_wavelength = 2 * np.pi / cutoff_wavevector\n else:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n elif cutoff_wavevector is not None:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n\n self._cutoff_wavelength = cutoff_wavelength\n self._kind = kind\n\n def circular_step(q):\n return q >= self.cutoff_wavevector\n\n def square_step(qx, qy):\n return (np.abs(qx) >= self.cutoff_wavevector) * (\n np.abs(qy) >= self.cutoff_wavevector)\n\n if self._kind == \"circular step\":\n super().__init__(topography, info=info,\n filter_function=circular_step)\n elif self._kind == \"square step\":\n super().__init__(topography, info=info,\n filter_function=square_step, isotropic=False)\n else:\n raise ValueError(\"Invalid kind\")\n\n @property\n def cutoff_wavevector(self):\n return 2 * np.pi / self._cutoff_wavelength\n\n @property\n def cutoff_wavelength(self):\n return self._cutoff_wavelength\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), self._filter_function, \\\n self._kind, self._cutoff_wavelength\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._filter_function, self._kind, \\\n self._cutoff_wavelength = state\n super().__setstate__(superstate)\n\n\nUniformTopographyInterface.register_function(\"window\", WindowedUniformTopography)\nUniformTopographyInterface.register_function(\"filter\", FilteredUniformTopography)\nUniformTopographyInterface.register_function(\"shortcut\", ShortCutTopography)\nUniformTopographyInterface.register_function(\"longcut\", LongCutTopography)\n",
"#\n# Copyright 2021 Lars Pastewka\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\"\"\"\nTests reliability cutoff and its use to restrict the range of data in the\nanalysis pipeline functions.\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom SurfaceTopography import (read_container, read_topography, SurfaceContainer, NonuniformLineScan, UniformLineScan,\n Topography)\nfrom SurfaceTopography.Exceptions import NoReliableDataError\n\n\ndef test_scanning_probe_reliability_cutoff(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'di1.di'))\n np.testing.assert_allclose(surf.scanning_probe_reliability_cutoff(40), 90.700854)\n\n # Should be None because there is no tip radius information\n assert surf.short_reliability_cutoff() is None\n\n cut = surf.short_reliability_cutoff(0.2)\n # Should be the maximum of the actual value and the value that was passed\n np.testing.assert_almost_equal(cut, 0.2)\n\n\ndef test_tip_radius_reliability_cutoff_from_instrument_metadata(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'di1.di'), info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 40,\n 'unit': 'nm',\n }\n }\n }\n })\n cut = surf.short_reliability_cutoff()\n np.testing.assert_allclose(cut, 90.700854)\n\n # Make sure PSD returns only reliable portion\n q, _ = surf.power_spectrum_from_profile()\n assert q[-1] < 2 * np.pi / cut\n\n q, _ = surf.power_spectrum_from_area()\n assert q[-1] < 2 * np.pi / cut\n\n # Make sure ACF returns only reliable portion\n r, A = surf.autocorrelation_from_profile()\n assert r[0] >= cut / 2\n\n r, A = surf.autocorrelation_from_area()\n assert r[0] >= cut / 2\n\n # Make sure SDRP returns only reliable portion\n r, s = surf.scale_dependent_statistical_property(lambda x, y=None: np.mean(x * x))\n assert r[0] >= cut / 2\n\n\ndef test_resolution_reliability_cutoff_from_instrument_metadata(file_format_examples):\n resolution = 70\n surf = read_topography(os.path.join(file_format_examples, 'di1.di'), info={\n 'instrument': {\n 'parameters': {\n 'resolution': {\n 'value': resolution,\n 'unit': 'nm',\n }\n }\n }\n })\n cut = surf.short_reliability_cutoff()\n np.testing.assert_almost_equal(cut, resolution)\n\n # Make sure PSD returns only reliable portion\n q, _ = surf.power_spectrum_from_profile()\n assert q[-1] < 2 * np.pi / cut\n\n q, _ = surf.power_spectrum_from_area()\n assert q[-1] < 2 * np.pi / cut\n\n # Make sure ACF returns only reliable portion\n r, A = surf.autocorrelation_from_profile()\n assert r[0] >= cut / 2\n\n r, A = surf.autocorrelation_from_area()\n assert r[0] >= cut / 2\n\n # Make sure SDRP returns only reliable portion\n r, s = surf.scale_dependent_statistical_property(lambda x, y=None: np.mean(x * x))\n assert r[0] >= cut / 2\n\n\ndef test_reliability_cutoff_line_scan(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'example7.txt'), unit='um', info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 40,\n 'unit': 'nm',\n }\n }\n }\n })\n cut = surf.short_reliability_cutoff()\n np.testing.assert_allclose(cut, 0.126504, atol=1e-6)\n\n cut = surf.to_nonuniform().short_reliability_cutoff()\n # This differs from the above because the derivatives are computed at slightly different locations\n np.testing.assert_allclose(cut, 0.126505, atol=1e-6)\n\n cut = surf.to_nonuniform().short_reliability_cutoff(0.2)\n # Should be the maximum of the actual value and the value that was passed\n np.testing.assert_allclose(cut, 0.2)\n\n cut = surf.to_nonuniform().short_reliability_cutoff(0.1)\n # Should be the maximum of the actual value and the value that was passed\n np.testing.assert_allclose(cut, 0.126505, atol=1e-6)\n\n\ndef test_problem1(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'di6.di'), info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 26,\n 'unit': 'nm',\n }\n }\n }\n })\n assert surf.short_reliability_cutoff() is None\n\n\ndef test_no_reliable_data_uniform():\n t = UniformLineScan([-0.16666667, -0.16666667, -0.16666667, 0.83333333, -0.16666667, -0.16666667, -0.16666667], 6,\n unit='nm',\n info=dict(instrument={'name': 'Bla',\n 'type': 'microscope-based',\n 'parameters': {'resolution': {'unit': 'µm', 'value': 10.0}}}))\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.variable_bandwidth_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1)\n\n c = SurfaceContainer([t])\n with pytest.raises(NoReliableDataError):\n c.power_spectrum(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.autocorrelation(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.variable_bandwidth(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1, unit='um')\n\n\ndef test_no_reliable_data_topography():\n t = Topography(\n np.array([[-0.16666667, -0.16666667, -0.16666667, 0.83333333, -0.16666667, -0.16666667, -0.16666667]] * 6),\n (6, 6),\n unit='nm',\n info=dict(instrument={'name': 'Bla',\n 'type': 'microscope-based',\n 'parameters': {'resolution': {'unit': 'µm', 'value': 10.0}}}))\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_area()\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_area()\n\n with pytest.raises(NoReliableDataError):\n t.variable_bandwidth_from_area()\n\n with pytest.raises(NoReliableDataError):\n t.scale_dependent_statistical_property(lambda x, y: np.mean(x * x + y * y), n=1)\n\n\ndef test_no_reliable_data_nonuniform():\n t = NonuniformLineScan([0., 1., 2., 3.5, 4., 5., 6.],\n [-0.16666667, -0.16666667, -0.16666667, 0.83333333, -0.16666667, -0.16666667, -0.16666667],\n unit='nm',\n info=dict(instrument={'name': 'Bla',\n 'type': 'microscope-based',\n 'parameters': {'resolution': {'unit': 'µm', 'value': 10.0}}}))\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.variable_bandwidth_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1)\n\n c = SurfaceContainer([t])\n with pytest.raises(NoReliableDataError):\n c.power_spectrum(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.autocorrelation(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.variable_bandwidth(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1, unit='um')\n\n\ndef test_linear_2d_small_tip():\n t = Topography(np.array([[9, 9, 9, 9, 9],\n [7, 7, 7, 7, 7],\n [5, 5, 5, 5, 5],\n [3, 3, 3, 3, 3],\n [1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1],\n [-3, -3, -3, -3, -3],\n [-5, -5, -5, -5, -5],\n [-7, -7, -7, -7, -7],\n [-9, -9, -9, -9, -9]]).T,\n (1, 2), unit='um', info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 26,\n 'unit': 'nm',\n }\n }\n }}).detrend('center')\n\n # This has zero curvature, so everything should be reliable\n assert t.short_reliability_cutoff() is None\n\n q, C = t.power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.transpose().power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.power_spectrum_from_area()\n assert np.isfinite(C).sum() > 0\n\n\ndef test_linear_2d_large_tip():\n t = Topography(np.array([[9, 9, 9, 9, 9],\n [7, 7, 7, 7, 7],\n [5, 5, 5, 5, 5],\n [3, 3, 3, 3, 3],\n [1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1],\n [-3, -3, -3, -3, -3],\n [-5, -5, -5, -5, -5],\n [-7, -7, -7, -7, -7],\n [-9, -9, -9, -9, -9]]).T,\n (1, 2), unit='um', info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 10,\n 'unit': 'mm',\n }\n }\n }}).detrend('center')\n\n # This has zero curvature, so everything should be reliable\n assert t.short_reliability_cutoff() is None\n\n q, C = t.power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.transpose().power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.power_spectrum_from_area()\n assert np.isfinite(C).sum() > 0\n\n\ndef test_partially_reliable_data_container(file_format_examples):\n c, = read_container(f'{file_format_examples}/container1.zip')\n\n # Patch info dictionary\n c[0]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'um'}}}\n c[1]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'um'}}}\n c[2]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'um'}}}\n\n # Check that we raise NoReliableDataError for one of the topographies\n c[0].power_spectrum_from_profile()\n c[1].power_spectrum_from_profile()\n with pytest.raises(NoReliableDataError):\n c[2].power_spectrum_from_profile()\n\n # This should raise no error\n c.power_spectrum(unit='um')\n\n # Patch info dictionary such that all data is unreliable\n c[0]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'mm'}}}\n c[1]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'mm'}}}\n c[2]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'mm'}}}\n\n # Check that we raise NoReliableDataError for one of the topographies\n with pytest.raises(NoReliableDataError):\n c[0].power_spectrum_from_profile()\n with pytest.raises(NoReliableDataError):\n c[1].power_spectrum_from_profile()\n with pytest.raises(NoReliableDataError):\n c[2].power_spectrum_from_profile()\n\n # This should now raise a NoReliableDataError\n with pytest.raises(NoReliableDataError):\n c.power_spectrum(unit='um')\n"
] | [
[
"scipy.signal.get_window",
"numpy.abs",
"numpy.arange",
"numpy.fft.rfftfreq",
"numpy.sqrt",
"numpy.where",
"numpy.fft.rfft",
"numpy.real"
],
[
"numpy.testing.assert_almost_equal",
"numpy.mean",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.isfinite"
]
] |
georgetown-analytics/DC-Bikeshare | [
"42676654d103cdaddfb76db76d1eece533251261",
"42676654d103cdaddfb76db76d1eece533251261"
] | [
"final_plots/read_aws.py",
"report_queries/dockless_trips_by_operator.py"
] | [
"import psycopg2\nimport psycopg2.extras\nimport pandas as pd\nimport os\nimport time\nfrom pathlib import Path\nfrom dotenv import load_dotenv\n\n\ndef read_only_connect_aws():\n env_path = 'env_readonly.env'\n load_dotenv(dotenv_path=env_path)\n host = \"bikeshare-restored.cs9te7lm3pt2.us-east-1.rds.amazonaws.com\"\n port = 5432\n database = \"bikeshare\"\n\n user = os.environ.get(\"AWS_READONLY_USER\")\n password = os.environ.get(\"AWS_READONLY_PASS\")\n\n # Connect to aws postgres D\n conn = psycopg2.connect(\n host=host, user=user, port=port, password=password,\n database=database)\n return conn\n\n# Function to load cabi data from AWS. Leaving room to add different load\n# types. Right now only allowing a load of all the database\n\n\nclass QueryTool:\n\n def __init__(self, connection, table=None):\n self.connection = connection\n self.table = table\n\n def basic(self):\n query = (\n 'SELECT * from ') + self.table\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def missing_check(self):\n query = (\"\"\"\n SELECT\n COUNT(*) as total_count,\n dt.operator as operator\n FROM dockless_trips as dt\n GROUP BY\n operator;\"\"\")\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def geo_metric(self, cut):\n self.cut = cut\n query = (\"\"\"\n SELECT\n stations.end_region_code,\n stations.start_region_code,\n extract({0} from subq_trip.start_date) as {0},\n COUNT(*) as total_trips\n FROM\n (SELECT * FROM {1} LIMIT 25) as subq_trip\n LEFT JOIN cabi_stations_geo_temp AS stations\n ON subq_trip.start_station = stations.start_short_name\n AND subq_trip.end_station = stations.end_short_name\n GROUP BY\n stations.end_region_code,\n stations.start_region_code,\n extract({0} from subq_trip.start_date);\"\"\").format(cut, table)\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def annual(self, year):\n self.year = year\n start_string = (\n 'SELECT * from cabi_trips '\n 'WHERE EXTRACT(YEAR FROM start_date)=')\n query = start_string + str(self.year)\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def describe_data(self):\n cur = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cur.execute(\"\"\"select *\n from information_schema.columns\n where table_schema NOT IN (\n 'information_schema', 'pg_catalog')\n order by table_schema, table_name\"\"\")\n for row in cur:\n print(\"schema: {schema}, table: {table}, column: {col}, \\\n type: {type}\".format(\n schema=row['table_schema'], table=row['table_name'],\n col=row['column_name'], type=row['data_type']))\n\n\nif __name__ == '__main__':\n print('Running')\n conn = read_only_connect_aws()\n CABI_TRIPS = QueryTool(conn, 'cabi_trips')\n CABI_TRIPS.describe_data()\n",
"import pandas as pd\nimport util_functions as uf\n\nif __name__ == \"__main__\":\n # Connect to AWS\n uf.set_env_path()\n conn, cur = uf.aws_connect()\n\n # Trips by Date and Operator\n df = pd.read_sql(\"\"\"select distinct\n OperatorClean,\n count(*) as trips\n from dockless_trips\n group by OperatorClean\n order by OperatorClean\n \"\"\", con=conn)\n print(df)\n"
] | [
[
"pandas.read_sql"
],
[
"pandas.read_sql"
]
] |
daroari/pygmt | [
"e022851d62814a9255ed2bb63ae092b666b832b9"
] | [
"pygmt/tests/test_datasets_earth_relief.py"
] | [
"\"\"\"\nTest basic functionality for loading Earth relief datasets.\n\"\"\"\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nfrom pygmt.datasets import load_earth_relief\nfrom pygmt.exceptions import GMTInvalidInput\n\n\ndef test_earth_relief_fails():\n \"\"\"\n Make sure earth relief fails for invalid resolutions.\n \"\"\"\n resolutions = \"1m 1d bla 60d 001m 03\".split()\n resolutions.append(60)\n for resolution in resolutions:\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(resolution=resolution)\n\n\n# Only test 01d and 30m to avoid downloading large datasets in CI\ndef test_earth_relief_01d():\n \"\"\"\n Test some properties of the earth relief 01d data.\n \"\"\"\n data = load_earth_relief(resolution=\"01d\", registration=\"gridline\")\n assert data.shape == (181, 361)\n npt.assert_allclose(data.lat, np.arange(-90, 91, 1))\n npt.assert_allclose(data.lon, np.arange(-180, 181, 1))\n npt.assert_allclose(data.min(), -8592.5)\n npt.assert_allclose(data.max(), 5559.0)\n\n\ndef test_earth_relief_01d_with_region():\n \"\"\"\n Test loading low-resolution earth relief with 'region'.\n \"\"\"\n data = load_earth_relief(\n resolution=\"01d\", region=[-10, 10, -5, 5], registration=\"gridline\"\n )\n assert data.shape == (11, 21)\n npt.assert_allclose(data.lat, np.arange(-5, 6, 1))\n npt.assert_allclose(data.lon, np.arange(-10, 11, 1))\n npt.assert_allclose(data.min(), -5145)\n npt.assert_allclose(data.max(), 805.5)\n\n\ndef test_earth_relief_30m():\n \"\"\"\n Test some properties of the earth relief 30m data.\n \"\"\"\n data = load_earth_relief(resolution=\"30m\", registration=\"gridline\")\n assert data.shape == (361, 721)\n npt.assert_allclose(data.lat, np.arange(-90, 90.5, 0.5))\n npt.assert_allclose(data.lon, np.arange(-180, 180.5, 0.5))\n npt.assert_allclose(data.min(), -9460.5)\n npt.assert_allclose(data.max(), 5887.5)\n\n\ndef test_earth_relief_05m_with_region():\n \"\"\"\n Test loading a subregion of high-resolution earth relief grid.\n \"\"\"\n data = load_earth_relief(\n resolution=\"05m\", region=[120, 160, 30, 60], registration=\"gridline\"\n )\n assert data.coords[\"lat\"].data.min() == 30.0\n assert data.coords[\"lat\"].data.max() == 60.0\n assert data.coords[\"lon\"].data.min() == 120.0\n assert data.coords[\"lon\"].data.max() == 160.0\n assert data.data.min() == -9633.0\n assert data.data.max() == 2532.0\n assert data.sizes[\"lat\"] == 361\n assert data.sizes[\"lon\"] == 481\n\n\ndef test_earth_relief_05m_without_region():\n \"\"\"\n Test loading high-resolution earth relief without passing 'region'.\n \"\"\"\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(\"05m\")\n\n\ndef test_earth_relief_03s_landonly_srtm():\n \"\"\"\n Test loading original 3 arc-second land-only SRTM tiles.\n \"\"\"\n data = load_earth_relief(\n \"03s\", region=[135, 136, 35, 36], registration=\"gridline\", use_srtm=True\n )\n\n assert data.coords[\"lat\"].data.min() == 35.0\n assert data.coords[\"lat\"].data.max() == 36.0\n assert data.coords[\"lon\"].data.min() == 135.0\n assert data.coords[\"lon\"].data.max() == 136.0\n # data.data.min() == -305.51846 if use_srtm is False.\n assert data.data.min() == -6.0\n assert data.data.max() == 1191.0\n assert data.sizes[\"lat\"] == 1201\n assert data.sizes[\"lon\"] == 1201\n\n\ndef test_earth_relief_incorrect_registration():\n \"\"\"\n Test loading earth relief with incorrect registration type.\n \"\"\"\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(registration=\"improper_type\")\n\n\ndef test_earth_relief_invalid_resolution_registration_combination():\n \"\"\"\n Test loading earth relief with invalid combination of resolution and\n registration.\n \"\"\"\n for resolution, registration in [\n (\"15s\", \"gridline\"),\n (\"03s\", \"pixel\"),\n (\"01s\", \"pixel\"),\n ]:\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(resolution=resolution, registration=registration)\n"
] | [
[
"numpy.arange"
]
] |
minhmanho/rrdncnn | [
"f09ef7d92e31bfd43a548bb476970cfe38d32508"
] | [
"pytorch_ssim.py"
] | [
"import torch\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nfrom math import exp\r\n\r\ndef gaussian(window_size, sigma):\r\n gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\r\n return gauss/gauss.sum()\r\n\r\ndef create_window(window_size, channel):\r\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\r\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\r\n window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())\r\n return window\r\n\r\ndef _ssim(img1, img2, window, window_size, channel, size_average = True):\r\n mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)\r\n mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)\r\n\r\n mu1_sq = mu1.pow(2)\r\n mu2_sq = mu2.pow(2)\r\n mu1_mu2 = mu1*mu2\r\n\r\n sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq\r\n sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq\r\n sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2\r\n\r\n C1 = 0.01**2\r\n C2 = 0.03**2\r\n\r\n ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))\r\n\r\n if size_average:\r\n return ssim_map.mean()\r\n else:\r\n return ssim_map.mean(1).mean(1).mean(1)\r\n\r\nclass SSIM(torch.nn.Module):\r\n def __init__(self, window_size = 11, size_average = True):\r\n super(SSIM, self).__init__()\r\n self.window_size = window_size\r\n self.size_average = size_average\r\n self.channel = 1\r\n self.window = create_window(window_size, self.channel)\r\n\r\n def forward(self, img1, img2):\r\n (_, channel, _, _) = img1.size()\r\n\r\n if channel == self.channel and self.window.data.type() == img1.data.type():\r\n window = self.window\r\n else:\r\n window = create_window(self.window_size, channel)\r\n \r\n if img1.is_cuda:\r\n window = window.cuda(img1.get_device())\r\n window = window.type_as(img1)\r\n \r\n self.window = window\r\n self.channel = channel\r\n\r\n\r\n return _ssim(img1, img2, window, self.window_size, channel, self.size_average)\r\n\r\ndef ssim(img1, img2, window_size = 11, size_average = True):\r\n (_, channel, _, _) = img1.size()\r\n window = create_window(window_size, channel)\r\n \r\n if img1.is_cuda:\r\n window = window.cuda(img1.get_device())\r\n window = window.type_as(img1)\r\n \r\n return _ssim(img1, img2, window, window_size, channel, size_average)"
] | [
[
"torch.nn.functional.conv2d"
]
] |
kay-wong/DiscoBERT | [
"814c741e2a049de3afc489835e0df3ccf9fb4fe9"
] | [
"model/archival_gnns.py"
] | [
"# Graph Conv and Relational Graph Conv\nimport itertools\nimport torch\nfrom typing import List, Union\n\nimport dgl\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom allennlp.common import FromParams\nfrom allennlp.common import Registrable\nfrom allennlp.modules.encoder_base import _EncoderBase\nfrom allennlp.modules.feedforward import FeedForward\nfrom allennlp.modules.layer_norm import LayerNorm\nfrom allennlp.modules.masked_layer_norm import MaskedLayerNorm\nfrom overrides import overrides\n\n\nclass GraphEncoder(_EncoderBase, Registrable):\n def get_input_dim(self) -> int:\n raise NotImplementedError\n\n def get_output_dim(self) -> int:\n raise NotImplementedError\n\n def is_bidirectional(self):\n raise NotImplementedError\n #\n def convert_sent_tensors_to_graphs(self, sent, sent_mask, meta_field, key):\n batch_size, max_sent_num, hdim = sent.shape\n effective_length = torch.sum(sent_mask, dim=1).long().tolist()\n graph_bag = []\n for b in range(batch_size):\n this_sent = sent[b] # max_sent, hdim\n this_len = effective_length[b]\n graph_seed = meta_field[b][key] # List of tuples\n G = dgl.DGLGraph()\n G.add_nodes(max_sent_num)\n # fc_src = [i for i in range(this_len)] * this_len\n # fc_tgt = [[i] * this_len for i in range(this_len)]\n # fc_tgt = list(itertools.chain.from_iterable(fc_tgt))\n fc_src = [x[0] for x in graph_seed]\n fc_tgt = [x[1] for x in graph_seed]\n G.add_edges(fc_src, fc_tgt)\n G.ndata['h'] = this_sent # every node has the parameter\n graph_bag.append(G)\n return graph_bag\n\n\[email protected](\"easy_graph_encoder\")\nclass EasyGraph(GraphEncoder, torch.nn.Module, FromParams):\n def __init__(self,\n input_dim: int,\n num_layers: int,\n hidden_dims: Union[int, List[int]],\n dropout=0.1):\n super().__init__()\n\n if not isinstance(hidden_dims, list):\n hidden_dims = [hidden_dims] * num_layers\n if not isinstance(dropout, list):\n dropout = [dropout] * num_layers # type: ignore\n\n self._activations = [torch.nn.functional.relu] * num_layers\n input_dims = [input_dim] + hidden_dims[:-1]\n linear_layers = []\n for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):\n linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))\n self._linear_layers = torch.nn.ModuleList(linear_layers)\n dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]\n self._dropout = torch.nn.ModuleList(dropout_layers)\n self._output_dim = hidden_dims[-1]\n\n self.lin = torch.nn.Linear(self._output_dim, self._output_dim)\n self.ln = MaskedLayerNorm(size=hidden_dims[0])\n\n def transform_sent_rep(self, sent_rep, sent_mask, graphs):\n # LayerNorm(x + Sublayer(x))\n output = sent_rep\n\n for layer, activation, dropout in zip(self._linear_layers, self._activations, self._dropout):\n mid = layer(output) # output: batch, seq, feat\n mid = mid.permute(0, 2, 1) # mid: batch, feat, seq\n\n nex = torch.bmm(mid, graphs)\n output = dropout(activation(nex))\n output = output.permute(0, 2, 1) # mid: batch, seq, feat\n middle = sent_rep + self.lin(output)\n output = self.ln.forward(middle, sent_mask)\n return output\n\n\[email protected](\"old_gcn\")\nclass GCN_layers(GraphEncoder, torch.nn.Module, FromParams):\n\n def __init__(self, input_dims: List[int],\n num_layers: int,\n hidden_dims: Union[int, List[int]],\n activations='relu'):\n super(GCN_layers, self).__init__()\n if not isinstance(hidden_dims, list):\n hidden_dims = [hidden_dims] * num_layers\n # TODO remove hard code relu\n activations = [torch.nn.functional.tanh] * num_layers\n assert len(input_dims) == len(hidden_dims) == len(activations) == num_layers\n gcn_layers = []\n for layer_input_dim, layer_output_dim, activate in zip(input_dims, hidden_dims, activations):\n gcn_layers.append(GCN(layer_input_dim, layer_output_dim, activate))\n self.layers = nn.ModuleList(gcn_layers)\n self._output_dim = hidden_dims[-1]\n self.input_dim = input_dims[0]\n self.ln = LayerNorm(hidden_dims[0])\n self._mlp = FeedForward(hidden_dims[0], 1, hidden_dims[0], torch.nn.functional.sigmoid)\n\n def transform_sent_rep(self, sent_rep, sent_mask, sent_graph):\n init_graphs = self.convert_sent_tensors_to_graphs(sent_rep, sent_mask)\n unpadated_graphs = []\n for g in init_graphs:\n updated_graph = self.forward(g)\n unpadated_graphs.append(updated_graph)\n recovered_sent = torch.stack(unpadated_graphs, dim=0)\n assert recovered_sent.shape == sent_rep.shape\n return recovered_sent\n\n def convert_sent_tensors_to_graphs(self, sent, sent_mask):\n batch_size, max_sent_num, hdim = sent.shape\n effective_length = torch.sum(sent_mask, dim=1).long().tolist()\n graph_bag = []\n for b in range(batch_size):\n this_sent = sent[b] # max_sent, hdim\n # this_mask = sent_mask[b]\n this_len = effective_length[b]\n\n G = dgl.DGLGraph()\n G.add_nodes(max_sent_num)\n fc_src = [i for i in range(this_len)] * this_len\n fc_tgt = [[i] * this_len for i in range(this_len)]\n fc_tgt = list(itertools.chain.from_iterable(fc_tgt))\n\n G.add_edges(fc_src, fc_tgt)\n G.ndata['h'] = this_sent # every node has the parameter\n graph_bag.append(G)\n return graph_bag\n\n @overrides\n def forward(self, g):\n # h = g.in_degrees().view(-1, 1).float()\n h = g.ndata['h']\n output = h\n for conv in self.layers:\n output = conv(g, output)\n print(output)\n norm_output = self.ln(h + output)\n # print(norm_output)\n # m = self._mlp(norm_output)\n # h = self.ln(norm_output + m)\n h = norm_output\n g.ndata['h'] = h\n hg = dgl.mean_nodes(g, 'h')\n # return g, g.ndata['h'], hg # g is the raw graph, h is the node rep, and hg is the mean of all h\n return g.ndata['h']\n\n def get_input_dim(self) -> int:\n return self.input_dim\n\n def get_output_dim(self) -> int:\n return self._output_dim\n\n @overrides\n def is_bidirectional(self):\n return False\n\n\ndef discourse_oracle(disco_txt, ):\n # oracle labels\n docs = [disc.get_readable_words_as_list() for disc in disco_bag]\n\n # rewrite the docs to accomodate the dependency\n modified_docs_w_deps = []\n oracle_inclusion = []\n for idx, disco in enumerate(disco_bag):\n # tmp_txt, tmp_oracle_inclusion = copy.deepcopy(docs[idx]),[idx]\n tmp_txt, tmp_oracle_inclusion = [], []\n if disco.dep != []:\n for _d in disco.dep:\n if _d < len(docs):\n tmp_txt += docs[_d]\n tmp_oracle_inclusion.append(_d)\n tmp_txt += copy.deepcopy(docs[idx])\n tmp_oracle_inclusion.append(idx)\n modified_docs_w_deps.append(\" \".join(tmp_txt))\n oracle_inclusion.append(tmp_oracle_inclusion)\n else:\n modified_docs_w_deps.append(\n \" \".join(docs[idx])\n )\n oracle_inclusion.append([idx])\n\n yangliu_label = original_greedy_selection([x.split(\" \") for x in modified_docs_w_deps], summary, 5)\n # oracle_ids = greedy_selection(modified_docs_w_deps, summary, oracle_size)\n return yangliu_labelf\n"
] | [
[
"torch.sum",
"torch.stack",
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.bmm",
"torch.nn.Dropout"
]
] |
paigeco/VirtualGoniometer | [
"536e7e77fbb036ad8d777b42e751a0f3e80b8242"
] | [
"src/AngleMeasurement/RP1DClustering.py"
] | [
"import numpy as np\nfrom .PCASmallestEig import pca_smallest_eig, pca_smallest_eig_powermethod\nfrom .Withness import withness\nfrom .CalculateAngle import get_angle\n\n#RP1D clustering from\n#Han, Sangchun, and Mireille Boutin. \"The hidden structure of image datasets.\" 2015 IEEE International Conference on Image Processing (ICIP). IEEE, 2015.\n############################################\ndef ClusteringMeanRP1D(P,N,T,A=0,UsePCA=True,UsePower=False):\n n = N.shape[0]\n d = N.shape[1]\n v = np.random.rand(T,d)\n \n #u = np.mean(N,axis=0)\n \n if UsePower:\n N1 = pca_smallest_eig_powermethod(N, center=False)\n N1 = np.reshape(N1,(3,))\n else:\n N1 = pca_smallest_eig(N, center=False)\n \n N2 = np.sum(N,axis=0)\n v = np.cross(N1,N2)\n v = v/np.linalg.norm(v)\n \n m = np.mean(P,axis=0)\n dist = np.sqrt(np.sum((P - m)**2,axis=1))\n i = np.argmin(dist)\n radius = np.max(dist)\n D = (P - P[i,:])/radius\n\n #The A=2 is just hand tuned. Larger A encourages the clustering to split the patch in half\n #A=0 is the previous version of the virtual goniometer\n x = np.sum(v*N,axis=1) + A*np.sum(v*D,axis=1)\n\n #Clustering\n _, m = withness(x)\n\n C = np.zeros(n,)\n C[x>m] = 1\n C[x<=m] = 2\n \n P1 = P[C==1,:]\n P2 = P[C==2,:]\n N1 = N[C==1,:]\n N2 = N[C==2,:]\n \n theta, n1, n2 = get_angle(P1,P2,N1,N2,UsePCA = UsePCA, UsePower = UsePower)\n \n \n return C,n1,n2,theta\n\ndef ClusteringRandomRP1D(X,T):\n n = X.shape[0]\n d = X.shape[1]\n v = np.random.rand(T,d)\n u = np.mean(X,axis=0)\n wmin = float(\"inf\")\n imin = 0\n \n #w_list = []\n #m_list = []\n \n for i in range(T):\n x = np.sum((v[i,:]-(np.dot(v[i,:],u)/np.dot(v[i,:],v[i,:]))*u)*X,axis=1)\n w,m = withness(x) \n if w < wmin:\n wmin = w\n imin = i\n \n x = np.sum((v[imin,:]-(np.dot(v[imin,:],u)/np.dot(v[imin,:],v[imin,:]))*u)*X,axis=1)\n \n _,m = withness(x)\n\n C = np.zeros(n,)\n C[x>m] = 1\n C[x<=m] = 2\n \n return C"
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.argmin",
"numpy.reshape",
"numpy.cross",
"numpy.max",
"numpy.random.rand",
"numpy.dot",
"numpy.linalg.norm",
"numpy.mean"
]
] |
nipreps/mriqc | [
"e021008da0a2ef1c48e882baf932139a673349f9"
] | [
"mriqc/interfaces/anatomical.py"
] | [
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n#\n# Copyright 2021 The NiPreps Developers <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# We support and encourage derived works from this project, please read\n# about our expectations at\n#\n# https://www.nipreps.org/community/licensing/\n#\n\"\"\"Nipype interfaces to support anatomical workflow.\"\"\"\nimport os.path as op\nfrom builtins import zip\n\nimport nibabel as nb\nimport numpy as np\nimport scipy.ndimage as nd\nfrom mriqc.qc.anatomical import (\n art_qi1,\n art_qi2,\n cjv,\n cnr,\n efc,\n fber,\n rpve,\n snr,\n snr_dietrich,\n summary_stats,\n volume_fraction,\n wm2max,\n)\nfrom mriqc.utils.misc import _flatten_dict\nfrom nipype.interfaces.base import (\n BaseInterfaceInputSpec,\n File,\n InputMultiPath,\n SimpleInterface,\n TraitedSpec,\n isdefined,\n traits,\n)\nfrom nipype.utils.filemanip import fname_presuffix\n\n\nclass StructuralQCInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"file to be plotted\")\n in_noinu = File(exists=True, mandatory=True, desc=\"image after INU correction\")\n in_segm = File(exists=True, mandatory=True, desc=\"segmentation file from FSL FAST\")\n in_bias = File(exists=True, mandatory=True, desc=\"bias file\")\n head_msk = File(exists=True, mandatory=True, desc=\"head mask\")\n air_msk = File(exists=True, mandatory=True, desc=\"air mask\")\n rot_msk = File(exists=True, mandatory=True, desc=\"rotation mask\")\n artifact_msk = File(exists=True, mandatory=True, desc=\"air mask\")\n in_pvms = InputMultiPath(\n File(exists=True),\n mandatory=True,\n desc=\"partial volume maps from FSL FAST\",\n )\n in_tpms = InputMultiPath(File(), desc=\"tissue probability maps from FSL FAST\")\n mni_tpms = InputMultiPath(File(), desc=\"tissue probability maps from FSL FAST\")\n in_fwhm = traits.List(\n traits.Float, mandatory=True, desc=\"smoothness estimated with AFNI\"\n )\n human = traits.Bool(True, usedefault=True, desc=\"human workflow\")\n\n\nclass StructuralQCOutputSpec(TraitedSpec):\n summary = traits.Dict(desc=\"summary statistics per tissue\")\n icvs = traits.Dict(desc=\"intracranial volume (ICV) fractions\")\n rpve = traits.Dict(desc=\"partial volume fractions\")\n size = traits.Dict(desc=\"image sizes\")\n spacing = traits.Dict(desc=\"image sizes\")\n fwhm = traits.Dict(desc=\"full width half-maximum measure\")\n inu = traits.Dict(desc=\"summary statistics of the bias field\")\n snr = traits.Dict\n snrd = traits.Dict\n cnr = traits.Float\n fber = traits.Float\n efc = traits.Float\n qi_1 = traits.Float\n wm2max = traits.Float\n cjv = traits.Float\n out_qc = traits.Dict(desc=\"output flattened dictionary with all measures\")\n out_noisefit = File(exists=True, desc=\"plot of background noise and chi fitting\")\n tpm_overlap = traits.Dict\n\n\nclass StructuralQC(SimpleInterface):\n \"\"\"\n Computes anatomical :abbr:`QC (Quality Control)` measures on the\n structural image given as input\n\n \"\"\"\n\n input_spec = StructuralQCInputSpec\n output_spec = StructuralQCOutputSpec\n\n def _run_interface(self, runtime): # pylint: disable=R0914,E1101\n imnii = nb.load(self.inputs.in_noinu)\n erode = (\n np.all(np.array(imnii.header.get_zooms()[:3], dtype=np.float32) < 1.9)\n if self.inputs.human\n else False\n )\n\n # Load image corrected for INU\n inudata = np.nan_to_num(imnii.get_fdata())\n inudata[inudata < 0] = 0\n\n if np.all(inudata < 1e-5):\n raise RuntimeError(\n \"Input inhomogeneity-corrected data seem empty. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n # Load binary segmentation from FSL FAST\n segnii = nb.load(self.inputs.in_segm)\n segdata = np.asanyarray(segnii.dataobj).astype(np.uint8)\n\n if np.sum(segdata > 0) < 1e3:\n raise RuntimeError(\n \"Input segmentation data is likely corrupt. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n # Load air, artifacts and head masks\n airdata = np.asanyarray(nb.load(self.inputs.air_msk).dataobj).astype(np.uint8)\n artdata = np.asanyarray(nb.load(self.inputs.artifact_msk).dataobj).astype(\n np.uint8\n )\n\n headdata = np.asanyarray(nb.load(self.inputs.head_msk).dataobj).astype(np.uint8)\n if np.sum(headdata > 0) < 100:\n raise RuntimeError(\n \"Detected less than 100 voxels belonging to the head mask. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n rotdata = np.asanyarray(nb.load(self.inputs.rot_msk).dataobj).astype(np.uint8)\n\n # Load Partial Volume Maps (pvms) from FSL FAST\n pvmdata = []\n for fname in self.inputs.in_pvms:\n pvmdata.append(nb.load(fname).get_fdata(dtype=\"float32\"))\n if np.sum(pvmdata[-1] > 1e-4) < 10:\n raise RuntimeError(\n \"Detected less than 10 voxels belonging to one tissue prob. map. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n # Summary stats\n stats = summary_stats(inudata, pvmdata, airdata, erode=erode)\n self._results[\"summary\"] = stats\n\n # SNR\n snrvals = []\n self._results[\"snr\"] = {}\n for tlabel in [\"csf\", \"wm\", \"gm\"]:\n snrvals.append(\n snr(\n stats[tlabel][\"median\"],\n stats[tlabel][\"stdv\"],\n stats[tlabel][\"n\"],\n )\n )\n self._results[\"snr\"][tlabel] = snrvals[-1]\n self._results[\"snr\"][\"total\"] = float(np.mean(snrvals))\n\n snrvals = []\n self._results[\"snrd\"] = {\n tlabel: snr_dietrich(\n stats[tlabel][\"median\"],\n mad_air=stats[\"bg\"][\"mad\"],\n sigma_air=stats[\"bg\"][\"stdv\"],\n )\n for tlabel in [\"csf\", \"wm\", \"gm\"]\n }\n self._results[\"snrd\"][\"total\"] = float(\n np.mean([val for _, val in list(self._results[\"snrd\"].items())])\n )\n\n # CNR\n self._results[\"cnr\"] = cnr(\n stats[\"wm\"][\"median\"],\n stats[\"gm\"][\"median\"],\n stats[\"bg\"][\"stdv\"],\n stats[\"wm\"][\"stdv\"],\n stats[\"gm\"][\"stdv\"],\n )\n\n # FBER\n self._results[\"fber\"] = fber(inudata, headdata, rotdata)\n\n # EFC\n self._results[\"efc\"] = efc(inudata, rotdata)\n\n # M2WM\n self._results[\"wm2max\"] = wm2max(inudata, stats[\"wm\"][\"median\"])\n\n # Artifacts\n self._results[\"qi_1\"] = art_qi1(airdata, artdata)\n\n # CJV\n self._results[\"cjv\"] = cjv(\n # mu_wm, mu_gm, sigma_wm, sigma_gm\n stats[\"wm\"][\"median\"],\n stats[\"gm\"][\"median\"],\n stats[\"wm\"][\"mad\"],\n stats[\"gm\"][\"mad\"],\n )\n\n # FWHM\n fwhm = np.array(self.inputs.in_fwhm[:3]) / np.array(\n imnii.header.get_zooms()[:3]\n )\n self._results[\"fwhm\"] = {\n \"x\": float(fwhm[0]),\n \"y\": float(fwhm[1]),\n \"z\": float(fwhm[2]),\n \"avg\": float(np.average(fwhm)),\n }\n\n # ICVs\n self._results[\"icvs\"] = volume_fraction(pvmdata)\n\n # RPVE\n self._results[\"rpve\"] = rpve(pvmdata, segdata)\n\n # Image specs\n self._results[\"size\"] = {\n \"x\": int(inudata.shape[0]),\n \"y\": int(inudata.shape[1]),\n \"z\": int(inudata.shape[2]),\n }\n self._results[\"spacing\"] = {\n i: float(v) for i, v in zip([\"x\", \"y\", \"z\"], imnii.header.get_zooms()[:3])\n }\n\n try:\n self._results[\"size\"][\"t\"] = int(inudata.shape[3])\n except IndexError:\n pass\n\n try:\n self._results[\"spacing\"][\"tr\"] = float(imnii.header.get_zooms()[3])\n except IndexError:\n pass\n\n # Bias\n bias = nb.load(self.inputs.in_bias).get_fdata()[segdata > 0]\n self._results[\"inu\"] = {\n \"range\": float(\n np.abs(np.percentile(bias, 95.0) - np.percentile(bias, 5.0))\n ),\n \"med\": float(np.median(bias)),\n } # pylint: disable=E1101\n\n mni_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.mni_tpms]\n in_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.in_pvms]\n overlap = fuzzy_jaccard(in_tpms, mni_tpms)\n self._results[\"tpm_overlap\"] = {\n \"csf\": overlap[0],\n \"gm\": overlap[1],\n \"wm\": overlap[2],\n }\n\n # Flatten the dictionary\n self._results[\"out_qc\"] = _flatten_dict(self._results)\n return runtime\n\n\nclass ArtifactMaskInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"File to be plotted\")\n head_mask = File(exists=True, mandatory=True, desc=\"head mask\")\n rot_mask = File(exists=True, desc=\"a rotation mask\")\n nasion_post_mask = File(\n exists=True,\n mandatory=True,\n desc=\"nasion to posterior of cerebellum mask\",\n )\n\n\nclass ArtifactMaskOutputSpec(TraitedSpec):\n out_hat_msk = File(exists=True, desc='output \"hat\" mask')\n out_art_msk = File(exists=True, desc=\"output artifacts mask\")\n out_air_msk = File(exists=True, desc='output \"hat\" mask, without artifacts')\n\n\nclass ArtifactMask(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = ArtifactMaskInputSpec\n output_spec = ArtifactMaskOutputSpec\n\n def _run_interface(self, runtime):\n imnii = nb.load(self.inputs.in_file)\n imdata = np.nan_to_num(imnii.get_fdata().astype(np.float32))\n\n # Remove negative values\n imdata[imdata < 0] = 0\n\n hmdata = np.asanyarray(nb.load(self.inputs.head_mask).dataobj)\n npdata = np.asanyarray(nb.load(self.inputs.nasion_post_mask).dataobj)\n\n # Invert head mask\n airdata = np.ones_like(hmdata, dtype=np.uint8)\n airdata[hmdata == 1] = 0\n\n # Calculate distance to border\n dist = nd.morphology.distance_transform_edt(airdata)\n\n # Apply nasion-to-posterior mask\n airdata[npdata == 1] = 0\n dist[npdata == 1] = 0\n dist /= dist.max()\n\n # Apply rotation mask (if supplied)\n if isdefined(self.inputs.rot_mask):\n rotmskdata = np.asanyarray(nb.load(self.inputs.rot_mask).dataobj)\n airdata[rotmskdata == 1] = 0\n\n # Run the artifact detection\n qi1_img = artifact_mask(imdata, airdata, dist)\n\n fname, ext = op.splitext(op.basename(self.inputs.in_file))\n if ext == \".gz\":\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n\n self._results[\"out_hat_msk\"] = op.abspath(\"{}_hat{}\".format(fname, ext))\n self._results[\"out_art_msk\"] = op.abspath(\"{}_art{}\".format(fname, ext))\n self._results[\"out_air_msk\"] = op.abspath(\"{}_air{}\".format(fname, ext))\n\n hdr = imnii.header.copy()\n hdr.set_data_dtype(np.uint8)\n nb.Nifti1Image(qi1_img, imnii.affine, hdr).to_filename(\n self._results[\"out_art_msk\"]\n )\n\n nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(\n self._results[\"out_hat_msk\"]\n )\n\n airdata[qi1_img > 0] = 0\n nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(\n self._results[\"out_air_msk\"]\n )\n return runtime\n\n\nclass ComputeQI2InputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"File to be plotted\")\n air_msk = File(exists=True, mandatory=True, desc=\"air (without artifacts) mask\")\n\n\nclass ComputeQI2OutputSpec(TraitedSpec):\n qi2 = traits.Float(desc=\"computed QI2 value\")\n out_file = File(desc=\"output plot: noise fit\")\n\n\nclass ComputeQI2(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = ComputeQI2InputSpec\n output_spec = ComputeQI2OutputSpec\n\n def _run_interface(self, runtime):\n imdata = nb.load(self.inputs.in_file).get_fdata()\n airdata = nb.load(self.inputs.air_msk).get_fdata()\n qi2, out_file = art_qi2(imdata, airdata)\n self._results[\"qi2\"] = qi2\n self._results[\"out_file\"] = out_file\n return runtime\n\n\nclass HarmonizeInputSpec(BaseInterfaceInputSpec):\n in_file = File(\n exists=True, mandatory=True, desc=\"input data (after bias correction)\"\n )\n wm_mask = File(exists=True, mandatory=True, desc=\"white-matter mask\")\n erodemsk = traits.Bool(True, usedefault=True, desc=\"erode mask\")\n thresh = traits.Float(0.9, usedefault=True, desc=\"WM probability threshold\")\n\n\nclass HarmonizeOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"input data (after intensity harmonization)\")\n\n\nclass Harmonize(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = HarmonizeInputSpec\n output_spec = HarmonizeOutputSpec\n\n def _run_interface(self, runtime):\n\n in_file = nb.load(self.inputs.in_file)\n wm_mask = nb.load(self.inputs.wm_mask).get_fdata()\n wm_mask[wm_mask < 0.9] = 0\n wm_mask[wm_mask > 0] = 1\n wm_mask = wm_mask.astype(np.uint8)\n\n if self.inputs.erodemsk:\n # Create a structural element to be used in an opening operation.\n struc = nd.generate_binary_structure(3, 2)\n # Perform an opening operation on the background data.\n wm_mask = nd.binary_erosion(wm_mask, structure=struc).astype(np.uint8)\n\n data = in_file.get_fdata()\n data *= 1000.0 / np.median(data[wm_mask > 0])\n\n out_file = fname_presuffix(\n self.inputs.in_file, suffix=\"_harmonized\", newpath=\".\"\n )\n in_file.__class__(data, in_file.affine, in_file.header).to_filename(out_file)\n\n self._results[\"out_file\"] = out_file\n\n return runtime\n\n\nclass RotationMaskInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"input data\")\n\n\nclass RotationMaskOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"rotation mask (if any)\")\n\n\nclass RotationMask(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = RotationMaskInputSpec\n output_spec = RotationMaskOutputSpec\n\n def _run_interface(self, runtime):\n in_file = nb.load(self.inputs.in_file)\n data = in_file.get_fdata()\n mask = data <= 0\n\n # Pad one pixel to control behavior on borders of binary_opening\n mask = np.pad(mask, pad_width=(1,), mode=\"constant\", constant_values=1)\n\n # Remove noise\n struc = nd.generate_binary_structure(3, 2)\n mask = nd.binary_opening(mask, structure=struc).astype(np.uint8)\n\n # Remove small objects\n label_im, nb_labels = nd.label(mask)\n if nb_labels > 2:\n sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))\n ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))\n for _, label in ordered[2:]:\n mask[label_im == label] = 0\n\n # Un-pad\n mask = mask[1:-1, 1:-1, 1:-1]\n\n # If mask is small, clean-up\n if mask.sum() < 500:\n mask = np.zeros_like(mask, dtype=np.uint8)\n\n out_img = in_file.__class__(mask, in_file.affine, in_file.header)\n out_img.header.set_data_dtype(np.uint8)\n\n out_file = fname_presuffix(self.inputs.in_file, suffix=\"_rotmask\", newpath=\".\")\n out_img.to_filename(out_file)\n self._results[\"out_file\"] = out_file\n return runtime\n\n\ndef artifact_mask(imdata, airdata, distance, zscore=10.0):\n \"\"\"Computes a mask of artifacts found in the air region\"\"\"\n from statsmodels.robust.scale import mad\n\n if not np.issubdtype(airdata.dtype, np.integer):\n airdata[airdata < 0.95] = 0\n airdata[airdata > 0.0] = 1\n\n bg_img = imdata * airdata\n if np.sum((bg_img > 0).astype(np.uint8)) < 100:\n return np.zeros_like(airdata)\n\n # Find the background threshold (the most frequently occurring value\n # excluding 0)\n bg_location = np.median(bg_img[bg_img > 0])\n bg_spread = mad(bg_img[bg_img > 0])\n bg_img[bg_img > 0] -= bg_location\n bg_img[bg_img > 0] /= bg_spread\n\n # Apply this threshold to the background voxels to identify voxels\n # contributing artifacts.\n qi1_img = np.zeros_like(bg_img)\n qi1_img[bg_img > zscore] = 1\n qi1_img[distance < 0.10] = 0\n\n # Create a structural element to be used in an opening operation.\n struc = nd.generate_binary_structure(3, 1)\n qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)\n qi1_img[airdata <= 0] = 0\n\n return qi1_img\n\n\ndef fuzzy_jaccard(in_tpms, in_mni_tpms):\n overlaps = []\n for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):\n tpm = tpm.reshape(-1)\n mni_tpm = mni_tpm.reshape(-1)\n\n num = np.min([tpm, mni_tpm], axis=0).sum()\n den = np.max([tpm, mni_tpm], axis=0).sum()\n overlaps.append(float(num / den))\n return overlaps\n"
] | [
[
"numpy.sum",
"numpy.issubdtype",
"numpy.ones_like",
"scipy.ndimage.morphology.distance_transform_edt",
"numpy.average",
"numpy.mean",
"scipy.ndimage.generate_binary_structure",
"scipy.ndimage.label",
"scipy.ndimage.binary_opening",
"numpy.median",
"numpy.asanyarray",
"scipy.ndimage.binary_erosion",
"numpy.all",
"numpy.max",
"numpy.min",
"numpy.pad",
"numpy.percentile",
"numpy.zeros_like",
"numpy.array"
]
] |
GrumpyMeow/ownphotos-backend | [
"98d8e9136e9188009afe08657f943dba3df80ccb"
] | [
"api/util.py"
] | [
"import base64\nimport pickle\nimport itertools\n\nfrom scipy import linalg\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom sklearn import cluster\nfrom sklearn import mixture\nfrom scipy.spatial import distance\nfrom sklearn.preprocessing import StandardScaler\n\n\nimport requests\n\nfrom config import mapzen_api_key, mapbox_api_key\n\nimport logging\nimport logging.handlers\n\nimport spacy\n\nnlp = spacy.load('en_core_web_sm')\n\nlogger = logging.getLogger('ownphotos')\nfomatter = logging.Formatter(\n '%(asctime)s : %(filename)s : %(funcName)s : %(lineno)s : %(levelname)s : %(message)s')\nfileMaxByte = 256 * 1024 * 200 # 100MB\nfileHandler = logging.handlers.RotatingFileHandler(\n './logs/ownphotos.log', maxBytes=fileMaxByte, backupCount=10)\nfileHandler.setFormatter(fomatter)\nlogger.addHandler(fileHandler)\nlogger.setLevel(logging.INFO)\n\n\n\ndef convert_to_degrees(values):\n \"\"\"\n Helper function to convert the GPS coordinates stored in the EXIF to degress in float format\n :param value:\n :type value: exifread.utils.Ratio\n :rtype: float\n \"\"\"\n d = float(values[0].num) / float(values[0].den)\n m = float(values[1].num) / float(values[1].den)\n s = float(values[2].num) / float(values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)\n\nweekdays = {1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Saturday',7:'Sunday'}\n\n\n\ndef compute_bic(kmeans,X):\n \"\"\"\n Computes the BIC metric for a given clusters\n\n Parameters:\n -----------------------------------------\n kmeans: List of clustering object from scikit learn\n\n X : multidimension np array of data points\n\n Returns:\n -----------------------------------------\n BIC value\n \"\"\"\n # assign centers and labels\n centers = [kmeans.cluster_centers_]\n labels = kmeans.labels_\n #number of clusters\n m = kmeans.n_clusters\n # size of the clusters\n n = np.bincount(labels)\n #size of data set\n N, d = X.shape\n\n #compute variance for all clusters beforehand\n cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]], \n 'euclidean')**2) for i in range(m)])\n\n const_term = 0.5 * m * np.log(N) * (d+1)\n\n BIC = np.sum([n[i] * np.log(n[i]) -\n n[i] * np.log(N) -\n ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -\n ((n[i] - 1) * d/ 2) for i in range(m)]) - const_term\n\n return(BIC)\n\n\ndef mapzen_reverse_geocode(lat,lon):\n url = \"https://search.mapzen.com/v1/reverse?point.lat=%f&point.lon=%f&size=1&lang=en&api_key=%s\"%(lat,lon,mapzen_api_key)\n resp = requests.get(url)\n if resp.status_code == 200:\n resp_json = resp.json()\n search_text = []\n if len(resp_json['features']) > 0:\n if 'country' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['country'])\n if 'county' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['county'])\n if 'macrocounty' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['macrocounty'])\n if 'locality' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['locality'])\n if 'region' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['region'])\n if 'neighbourhood' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['neighbourhood'])\n if 'name' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['name'])\n if 'label' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['label'])\n search_text = ' '.join(search_text)\n search_text = search_text.replace(',',' ')\n search_text_tokens = list(set(search_text.split()))\n search_text = ' '.join(search_text_tokens)\n resp_json['search_text'] = search_text\n return resp_json\n else:\n return {}\n\n\ndef mapbox_reverse_geocode(lat,lon):\n url = \"https://api.mapbox.com/geocoding/v5/mapbox.places/%f,%f.json?access_token=%s\"%(lon,lat,mapbox_api_key)\n resp = requests.get(url)\n print(resp)\n if resp.status_code == 200:\n resp_json = resp.json()\n search_terms = []\n\n if 'features' in resp_json.keys():\n for feature in resp_json['features']:\n search_terms.append(feature['text'])\n\n logger.info('location search terms: %s'%(' '.join(search_terms)))\n resp_json['search_text'] = ' '.join(search_terms)\n return resp_json\n else:\n logger.info('mapbox returned non 200 response.')\n return {}\n"
] | [
[
"numpy.where",
"numpy.log",
"numpy.bincount"
]
] |
matham/Ceed | [
"b81a14a6b8211e5f4582418ddea34c951ab2667e"
] | [
"ceed/tests/test_app/test_stage.py"
] | [
"import os\nimport sys\nimport math\nfrom contextlib import contextmanager\nfrom math import isclose\nimport numpy as np\nimport pytest\n\nimport ceed\nfrom .examples.stages import create_test_stages, make_stage, StageWrapper, \\\n stage_classes, assert_stages_same\nfrom typing import Type, List, Union\nfrom ceed.tests.ceed_app import CeedTestApp\nfrom ceed.tests.test_app import replace_text, touch_widget, escape, \\\n run_plugin_experiment\nfrom ceed.stage import CeedStage, CeedStageRef, last_experiment_stage_name\nfrom ceed.function import CeedFuncRef, FuncBase, FuncGroup\nfrom ceed.shape import CeedShape, CeedShapeGroup\nfrom .examples.shapes import assert_add_three_groups, CircleShapeP1\nfrom .examples.funcs import create_funcs, GroupFunction\nfrom .examples.stages import fake_plugin_stage, SerialAllStage\nfrom .examples.experiment import wait_stage_experiment_started, \\\n wait_experiment_done, measure_fps, wait_experiment_stopped\nfrom .test_func import assert_func_params_in_gui, \\\n replace_last_ref_with_original_func, assert_funcs_same\n\npytestmark = pytest.mark.ceed_app\n\n\nasync def assert_set_params_in_gui(\n stage_app: CeedTestApp, stage: StageWrapper, settings=None,\n check_name=False):\n opened_settings = settings is None\n if opened_settings:\n settings = await open_stage_settings(stage_app, stage.stage)\n\n if check_name:\n name = stage_app.resolve_widget(settings).down(\n test_name='stage name')()\n assert name.text != stage.name\n assert name.text == stage.stage.name\n await replace_text(stage_app, name, stage.name)\n assert name.text == stage.name\n assert name.text == stage.stage.name\n\n # verify colors\n for color in ('r', 'g', 'b'):\n widget = stage_app.resolve_widget(settings).down(\n test_name='stage color {}'.format(color))()\n prop = 'color_{}'.format(color)\n # the stage values should always match the GUI values\n assert getattr(stage.stage, prop) == (widget.state == 'down')\n # if the wrapper need to change the value, do it\n if getattr(stage, prop) != getattr(stage.stage, prop):\n await touch_widget(stage_app, widget)\n\n # make sure it was changed\n assert getattr(stage.stage, prop) == (widget.state == 'down')\n assert getattr(stage, prop) == getattr(stage.stage, prop)\n\n # parallel vs serial\n serial = stage_app.resolve_widget(settings).down(\n test_name='stage serial')()\n parallel = stage_app.resolve_widget(settings).down(\n test_name='stage parallel')()\n assert (stage.stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.stage.order == 'parallel') == (parallel.state == 'down')\n\n # set the GUI to the correct value\n if stage.order == 'parallel' and parallel.state != 'down':\n await touch_widget(stage_app, parallel)\n elif stage.order == 'serial' and serial.state != 'down':\n await touch_widget(stage_app, serial)\n assert (stage.stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.stage.order == 'parallel') == (parallel.state == 'down')\n assert (stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.order == 'parallel') == (parallel.state == 'down')\n\n # complete_on all vs any\n all_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish all')()\n any_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish any')()\n assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.stage.complete_on == 'any') == (any_w.state == 'down')\n\n # set the GUI to the correct value\n if stage.complete_on == 'all' and all_w.state != 'down':\n await touch_widget(stage_app, all_w)\n elif stage.complete_on == 'any' and any_w.state != 'down':\n await touch_widget(stage_app, any_w)\n\n assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.stage.complete_on == 'any') == (any_w.state == 'down')\n assert (stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.complete_on == 'any') == (any_w.state == 'down')\n\n if opened_settings:\n await escape(stage_app)\n return settings\n\n\nasync def assert_stage_params_in_gui(\n stage_app: CeedTestApp, stage: StageWrapper, settings=None,\n check_name=False):\n opened_settings = settings is None\n if opened_settings:\n settings = await open_stage_settings(stage_app, stage.stage)\n\n if check_name:\n name = stage_app.resolve_widget(settings).down(\n test_name='stage name')()\n name_label = stage_app.resolve_widget(stage.stage.display).down(\n test_name='stage label')()\n assert name.text == stage.name\n assert name_label.text == stage.name\n assert name.text == stage.stage.name\n\n # verify colors\n for color in ('r', 'g', 'b'):\n widget = stage_app.resolve_widget(settings).down(\n test_name='stage color {}'.format(color))()\n prop = 'color_{}'.format(color)\n assert getattr(stage.stage, prop) == (widget.state == 'down')\n assert getattr(stage, prop) == getattr(stage.stage, prop)\n\n # parallel vs serial\n serial = stage_app.resolve_widget(settings).down(\n test_name='stage serial')()\n parallel = stage_app.resolve_widget(settings).down(\n test_name='stage parallel')()\n assert (stage.stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.stage.order == 'parallel') == (parallel.state == 'down')\n\n # complete_on all vs any\n all_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish all')()\n any_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish any')()\n assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.stage.complete_on == 'any') == (any_w.state == 'down')\n\n if opened_settings:\n await escape(stage_app)\n return settings\n\n\nasync def replace_last_ref_with_original_stage(\n stage_app: CeedTestApp,\n stages: List[Union[CeedStageRef, CeedStage]], name: str):\n start_stages = stages[:]\n ref_stage = stages[-1]\n # it should be a ref to start with\n assert isinstance(ref_stage, CeedStageRef)\n # make sure the class name matches - we added the right class\n assert ref_stage.stage.name == name\n\n # the label of the new sub-stage\n sub_stage_widget = ref_stage.display\n name_w = stage_app.resolve_widget(sub_stage_widget).down(\n test_name='stage label')()\n assert name_w.text == name\n # replace the ref with a copy of the stage\n ref_btn = stage_app.resolve_widget(sub_stage_widget).down(\n test_name='stage settings open')()\n await touch_widget(stage_app, ref_btn)\n\n # should now have replaced the ref with a copy of the original\n assert ref_stage not in stages\n assert len(stages) == len(start_stages)\n\n new_stage = stages[-1]\n assert ref_stage is not new_stage\n assert stages[:-1] == start_stages[:-1]\n # it should not be a ref anymore\n assert not isinstance(new_stage, CeedStageRef)\n\n assert_stages_same(ref_stage.stage, new_stage)\n\n return new_stage\n\n\nasync def open_stage_settings(app: CeedTestApp, stage: CeedStage):\n settings_btn = app.resolve_widget(stage.display).down(\n test_name='stage settings open')()\n await touch_widget(app, settings_btn)\n\n return app.resolve_widget().down(test_name='stage settings')()\n\n\nasync def test_stage_find_shape_in_all_stages(stage_app: CeedTestApp):\n (s1, s2, s3), (group, shape1, shape2, shape3) = create_test_stages(\n stage_app=stage_app, show_in_gui=True)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape1, shape2, shape3):\n for stage in (s1, s2, s3):\n assert shape.shape in [s.shape for s in stage.stage.shapes]\n assert shape.shape in group.shapes\n\n stage_app.app.shape_factory.remove_shape(shape2.shape)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape1, shape3):\n for stage in (s1, s2, s3):\n assert shape.shape in [s.shape for s in stage.stage.shapes]\n assert shape.shape in group.shapes\n for shape in (shape2, ):\n for stage in (s1, s2, s3):\n assert shape.shape not in [s.shape for s in stage.stage.shapes]\n assert shape.shape not in group.shapes\n\n stage_app.app.shape_factory.remove_shape(shape1.shape)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape3, ):\n for stage in (s1, s2, s3):\n assert shape.shape in [s.shape for s in stage.stage.shapes]\n assert shape.shape in group.shapes\n for shape in (shape2, shape1):\n for stage in (s1, s2, s3):\n assert shape.shape not in [s.shape for s in stage.stage.shapes]\n assert shape.shape not in group.shapes\n\n stage_app.app.shape_factory.remove_shape(shape3.shape)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape2, shape1, shape3):\n for stage in (s1, s2, s3):\n assert shape.shape not in [s.shape for s in stage.stage.shapes]\n assert shape.shape not in group.shapes\n\n\nasync def test_add_empty_stage(stage_app: CeedTestApp):\n stage_factory = stage_app.app.stage_factory\n assert not stage_factory.stages\n n = len(stage_factory.stage_names)\n\n # add first empty stage\n add_stage = stage_app.resolve_widget().down(test_name='stage add')()\n await touch_widget(stage_app, add_stage)\n\n assert stage_factory.stages\n stage = stage_factory.stages[0]\n assert stage in list(stage_factory.stage_names.values())\n assert len(stage_factory.stage_names) == n + 1\n assert stage.display.show_more\n\n # select the stage and add stage to it\n name_label = stage_app.resolve_widget(stage.display).down(\n test_name='stage label')()\n assert not stage.display.selected\n\n await touch_widget(stage_app, name_label)\n assert stage.display.selected\n await touch_widget(stage_app, add_stage)\n assert stage_factory.stages == [stage]\n\n # deselect the stage and add stage globally\n assert stage.display.selected\n await touch_widget(stage_app, name_label)\n await touch_widget(stage_app, add_stage)\n\n assert len(stage_factory.stages) == 2\n assert stage_factory.stages[0] is stage\n\n\nasync def test_gui_add_stages(stage_app: CeedTestApp):\n stages = []\n add_stage = stage_app.resolve_widget().down(test_name='stage add')()\n for i, stage_cls in enumerate(stage_classes):\n stage = stage_cls(app=stage_app, show_in_gui=False)\n stages.append(stage)\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_stage = stages.pop(0)\n assert oldest_stage.stage in stage_app.app.stage_factory.stages\n remove_btn = stage_app.resolve_widget(\n oldest_stage.stage.display).down(test_name='del btn stage')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_stage.stage not in stage_app.app.stage_factory.stages\n\n # add the stage\n await touch_widget(stage_app, add_stage)\n assert len(stage_app.app.stage_factory.stages) == min(2, i + 1)\n stage.stage = stage_app.app.stage_factory.stages[-1]\n\n # show the settings for the stage\n widget = stage.stage.display\n settings = await open_stage_settings(stage_app, stage.stage)\n\n # check default name\n name = stage_app.resolve_widget(settings).down(\n test_name='stage name')()\n assert not name.disabled, \"root stages can be renamed\"\n name_label = stage_app.resolve_widget(widget).down(\n test_name='stage label')()\n original_name = name.text\n assert stage.name != original_name\n assert original_name == name_label.text\n assert original_name in stage_app.app.stage_factory.stage_names\n assert stage.name not in stage_app.app.stage_factory.stage_names\n\n # change the stage name\n await replace_text(stage_app, name, stage.name)\n assert name.text == stage.name\n assert name_label.text == stage.name\n assert original_name not in stage_app.app.stage_factory.stage_names\n assert stage.name in stage_app.app.stage_factory.stage_names\n\n await assert_set_params_in_gui(stage_app, stage, settings)\n\n # close the settings widget\n await escape(stage_app)\n\n\nasync def test_gui_add_sub_stages(stage_app: CeedTestApp):\n add_stage = stage_app.resolve_widget().down(test_name='stage add')()\n await touch_widget(stage_app, add_stage)\n\n base_stage: CeedStage = stage_app.app.stage_factory.stages[0]\n name_label = stage_app.resolve_widget(base_stage.display).down(\n test_name='stage label')()\n await touch_widget(stage_app, name_label)\n assert base_stage.display.selected\n assert not base_stage.stages\n\n stages = []\n for i, stage_cls in enumerate(stage_classes[:4]):\n stage = stage_cls(app=stage_app, show_in_gui=False)\n stages.append(stage)\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_stage = stages.pop(0)\n assert oldest_stage.stage in base_stage.stages\n remove_btn = stage_app.resolve_widget(\n oldest_stage.stage.display).down(test_name='del btn stage')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_stage.stage not in base_stage.stages\n\n if not base_stage.display.selected:\n await touch_widget(stage_app, name_label)\n assert base_stage.display.selected\n\n # add the stage\n await touch_widget(stage_app, add_stage)\n assert len(base_stage.stages) == min(2, i + 1)\n stage.stage = base_stage.stages[-1]\n\n # replace the ref stage\n settings_btn = stage_app.resolve_widget(stage.stage.display).down(\n test_name='stage settings open')()\n await touch_widget(stage_app, settings_btn)\n stage.stage = base_stage.stages[-1]\n\n await assert_set_params_in_gui(stage_app, stage, check_name=False)\n\n\nasync def test_gui_drag_shape_to_stage(stage_app: CeedTestApp):\n (group, group2, group3), (shape1, shape2, shape3) = \\\n assert_add_three_groups(\n shape_factory=stage_app.app.shape_factory, app=stage_app,\n show_in_gui=True)\n await stage_app.wait_clock_frames(2)\n\n (s1, s2, s3), _ = create_test_stages(\n stage_app=stage_app, add_func=False, add_shapes=False)\n await stage_app.wait_clock_frames(2)\n\n # multiple stages\n for stage in (s2, s3):\n container = stage.stage.display.shape_widget\n shapes = stage.stage.shapes\n assert not shapes\n\n # drag each shape to the stage\n added_shapes = []\n for i, shape in enumerate((shape1, group2, shape3, shape2)):\n if isinstance(shape, CeedShapeGroup):\n src = stage_app.resolve_widget(shape.widget).down(\n test_name='group drag button')()\n else:\n shape = shape.shape\n src = stage_app.resolve_widget(shape.widget).down(\n test_name='shape drag')()\n\n offset = (0, 5) if container.height else (0, 0)\n async for _ in stage_app.do_touch_drag_follow(\n widget=src, target_widget=container,\n target_widget_loc=('center_x', 'y'),\n target_widget_offset=offset, drag_n=15):\n pass\n\n # check that shape was added\n assert len(shapes) == min(3, i + 1)\n assert shape is shapes[-1].shape\n\n # make sure label matches\n name_label = stage_app.resolve_widget(shapes[-1].display).down(\n test_name='stage shape name')()\n assert name_label.text == shape.name\n\n added_shapes.append(shapes[-1])\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_shape = added_shapes.pop(0)\n assert oldest_shape in shapes\n remove_btn = stage_app.resolve_widget(\n oldest_shape.display).down(\n test_name='stage shape del')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_shape not in shapes\n await stage_app.wait_clock_frames(2)\n\n\nasync def test_gui_drag_func_to_stage(stage_app: CeedTestApp):\n global_funcs = create_funcs(func_app=stage_app, show_in_gui=True)\n group_func: GroupFunction = global_funcs[-1]\n ff1 = group_func.wrapper_funcs[0]\n ff2 = group_func.wrapper_funcs[1]\n global_funcs = [\n (ff1, True)] + [(f, False) for f in global_funcs] + [(ff2, True)]\n await stage_app.wait_clock_frames(2)\n\n (s1, s2, s3), _ = create_test_stages(\n stage_app=stage_app, add_func=False, add_shapes=False)\n await stage_app.wait_clock_frames(2)\n\n # multiple funcs\n for stage in (s2, s3):\n container = stage.stage.display.func_widget\n functions = stage.stage.functions\n assert not functions\n\n # drag each func to the stage\n added_funcs = []\n for i, (func, is_sub_func) in enumerate(global_funcs):\n src = stage_app.resolve_widget(func.func.display).down(\n test_name='func drag btn')()\n\n async for _ in stage_app.do_touch_drag_follow(\n widget=src, target_widget=container,\n target_widget_loc=('center_x', 'y'),\n target_widget_offset=(0, 5)):\n pass\n\n # check that shape was added\n assert len(functions) == min(3, i + 1)\n assert functions[-1] is not func.func\n if is_sub_func:\n assert isinstance(functions[-1], (FuncBase, FuncGroup))\n assert_funcs_same(functions[-1], func.func)\n else:\n assert isinstance(functions[-1], CeedFuncRef)\n assert func.func is functions[-1].func\n await replace_last_ref_with_original_func(\n stage_app, functions, func.func.name)\n\n added_funcs.append(functions[-1])\n\n # don't keep more than two funcs so the list is not too long\n if i >= 2:\n oldest_func = added_funcs.pop(0)\n assert oldest_func in functions\n remove_btn = stage_app.resolve_widget(\n oldest_func.display).down(\n test_name='del_btn_func')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_func not in functions\n\n await stage_app.wait_clock_frames(2)\n\n\nasync def test_gui_drag_stage_to_stage(stage_app: CeedTestApp):\n (s1, s2, s21), _ = create_test_stages(\n stage_app=stage_app, show_in_gui=True, add_func=False,\n add_shapes=False)\n (s3, s4, s41), _ = create_test_stages(\n stage_app=stage_app, show_in_gui=True, add_func=False,\n add_shapes=False)\n await stage_app.wait_clock_frames(2)\n\n # collapse stages to not take up space\n for stage in (s1, s21, s3):\n stage.stage.display.show_more = False\n await stage_app.wait_clock_frames(2)\n\n # multiple funcs\n for stage in (s4, s41):\n container = stage.stage.display.stage_widget\n stages = stage.stage.stages\n n_start = 0 if stage is s41 else 1\n assert len(stages) == n_start\n\n # drag each func to the stage\n added_stages = []\n for i, src_stage in enumerate((s1, s2, s21, s3)):\n src = stage_app.resolve_widget(src_stage.stage.display).down(\n test_name='stage drag btn')()\n\n async for _ in stage_app.do_touch_drag_follow(\n widget=src, target_widget=container,\n target_widget_loc=('center_x', 'y'),\n target_widget_offset=(0, 5)):\n pass\n\n # check that shape was added\n assert len(stages) == min(3, i + 1) + n_start\n\n assert stages[-1] is not src_stage.stage\n if src_stage is s21:\n assert isinstance(stages[-1], CeedStage)\n assert_stages_same(stages[-1], src_stage.stage)\n else:\n assert isinstance(stages[-1], CeedStageRef)\n assert src_stage.stage is stages[-1].stage\n await replace_last_ref_with_original_stage(\n stage_app, stages, src_stage.stage.name)\n\n added_stages.append(stages[-1])\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_stage = added_stages.pop(0)\n assert oldest_stage in stages\n remove_btn = stage_app.resolve_widget(\n oldest_stage.display).down(\n test_name='del btn stage')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_stage not in stages\n\n await stage_app.wait_clock_frames(2)\n\n\ndef verify_color(\n stage_app, shape_color, shape2_color, frame, centers, flip, video_mode):\n (cx1, cy1), (cx2, cy2) = centers\n if flip:\n cx1 = 1920 - cx1\n cx2 = 1920 - cx2\n\n centers = [[(cx1, cy1), (cx2, cy2)]]\n if 'QUAD' in video_mode:\n cx1, cx2, cy1, cy2 = cx1 // 2, cx2 // 2, cy1 // 2, cy2 // 2\n corners = ((0, 540), (960, 540), (0, 0), (960, 0))\n centers = [\n [(cx + x, cy + y) for cx, cy in [(cx1, cy1), (cx2, cy2)]]\n for x, y in corners]\n\n if video_mode == 'QUAD12X':\n # first get all 4 centers values, one for each quadrant\n rgb_values = []\n for i in range(4):\n rgb = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, centers[i])\n rgb = [[c / 255 for c in p] for p in rgb]\n rgb_values.append(rgb)\n\n # r, g, b\n for plane in [0, 1, 2]:\n # 4 quads\n for color1, color2 in rgb_values:\n assert isclose(\n color1[plane], shape_color[frame][3], abs_tol=2 / 255)\n assert isclose(\n color2[plane], shape2_color[frame][3], abs_tol=2 / 255)\n frame += 1\n else:\n n_sub_frames = 1\n if video_mode == 'QUAD4X':\n n_sub_frames = 4\n\n for i in range(n_sub_frames):\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, centers[i])\n points = [[c / 255 for c in p] for p in points]\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n\n val = shape_color[frame]\n assert isclose(r1, val[3], abs_tol=2 / 255) if val[0] else r1 == 0\n assert isclose(g1, val[3], abs_tol=2 / 255) if val[1] else g1 == 0\n assert isclose(b1, val[3], abs_tol=2 / 255) if val[2] else b1 == 0\n val = shape2_color[frame]\n assert isclose(r2, val[3], abs_tol=2 / 255) if val[0] else r2 == 0\n assert isclose(g2, val[3], abs_tol=2 / 255) if val[1] else g2 == 0\n assert isclose(b2, val[3], abs_tol=2 / 255) if val[2] else b2 == 0\n frame += 1\n\n return frame\n\n\[email protected]('video_mode', ['RGB', 'QUAD4X', 'QUAD12X'])\[email protected](\n 'flip,skip', [(True, False), (False, True), (False, False)])\nasync def test_recursive_play_stage_intensity(\n stage_app: CeedTestApp, tmp_path, flip, skip, video_mode):\n \"\"\"Checks that proper frame rendering happens in all these modes.\n In skip mode, some frames are skipped if GPU/CPU is too slow.\n \"\"\"\n from ..test_stages import create_recursive_stages\n from .examples.shapes import CircleShapeP1, CircleShapeP2\n from kivy.clock import Clock\n from ceed.analysis import CeedDataReader\n\n root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(\n stage_app.app.stage_factory, app=stage_app)\n\n from ceed.function.plugin import LinearFunc\n for i, stage in enumerate((s1, s2, s3, s4, s5, s6)):\n stage.stage.add_func(LinearFunc(\n function_factory=stage_app.app.function_factory, b=0, m=.5,\n duration=(i % 2 + 1) * 1))\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n\n shape2 = CircleShapeP2(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n s1.stage.add_shape(shape.shape)\n s4.stage.add_shape(shape.shape)\n s5.stage.add_shape(shape.shape)\n s2.stage.add_shape(shape2.shape)\n s3.stage.add_shape(shape2.shape)\n s6.stage.add_shape(shape2.shape)\n\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n frame = 0\n event = None\n # make GPU too slow to force skipping frames, when enabled\n fps = await measure_fps(stage_app) + 10\n rate = stage_app.app.view_controller.frame_rate = fps\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.flip_projector = flip\n stage_app.app.view_controller.skip_estimated_missed_frames = skip\n stage_app.app.view_controller.video_mode = video_mode\n stage_app.app.view_controller.pad_to_stage_handshake = False\n\n n_sub_frames = 1\n if video_mode == 'QUAD4X':\n n_sub_frames = 4\n elif video_mode == 'QUAD12X':\n n_sub_frames = 12\n\n centers = shape.center, shape2.center\n num_frames = rate * n_sub_frames * (2 + 1 + 2 + 1)\n shape_color = [(False, False, False, 0.), ] * num_frames\n shape2_color = [(False, False, False, 0.), ] * num_frames\n skipped_frame_indices = set()\n n_missed_frames = 0\n\n for s, start, e in [(s1, 0, 1), (s4, 3, 5), (s5, 5, 6)]:\n for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):\n val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5\n shape_color[i] = s.color_r, s.color_g, s.color_b, val\n\n for s, start, e in [(s2, 0, 2), (s3, 2, 3), (s6, 5, 6)]:\n for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):\n val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5\n shape2_color[i] = s.color_r, s.color_g, s.color_b, val\n\n def verify_intensity(*largs):\n nonlocal frame, n_missed_frames\n # total frames is a multiple of n_sub_frames\n if not stage_app.app.view_controller.stage_active:\n assert stage_app.app.view_controller.count - 1 == num_frames\n if skip:\n # last frame could be passed actual frames\n assert frame - n_missed_frames * n_sub_frames <= num_frames\n else:\n assert frame == num_frames\n event.cancel()\n return\n # not yet started\n if not stage_app.app.view_controller.count:\n return\n\n # some frame may have been skipped, but num_frames is max frames\n # This callback happens after frame callback and after the frame flip.\n # This also means we record even the last skipped frames (if skipped)\n assert frame < num_frames\n\n frame = verify_color(\n stage_app, shape_color, shape2_color, frame, centers, flip,\n video_mode)\n assert stage_app.app.view_controller.count == frame\n\n if skip:\n # some frames may have been dropped for next frame\n n_missed_frames = stage_app.app.view_controller._n_missed_frames\n for k in range(n_missed_frames * n_sub_frames):\n # frame is next frame index, next frame is skipped\n skipped_frame_indices.add(frame)\n frame += 1\n else:\n assert not stage_app.app.view_controller._n_missed_frames\n\n event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)\n event()\n stage_app.app.view_controller.request_stage_start(root.name)\n\n await wait_experiment_done(stage_app, timeout=num_frames / rate * 50)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'recursive_play_stage_intensity.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0']\n assert not f.num_images_in_file\n f.load_experiment(0)\n\n shape_data = f.shapes_intensity[shape.name]\n shape_data_rendered = f.shapes_intensity_rendered[shape.name]\n shape2_data = f.shapes_intensity[shape2.name]\n shape2_data_rendered = f.shapes_intensity_rendered[shape2.name]\n recorded_rendered_frames = f.rendered_frames\n\n # even when skipping, skipped frames are still logged but they are removed\n # in xxx_rendered arrays\n if skip:\n # because frame rate is high, we'll definitely drop frames\n assert skipped_frame_indices\n else:\n assert not skipped_frame_indices\n\n assert shape_data.shape[0] == num_frames\n assert shape2_data.shape[0] == num_frames\n\n n_skipped = len(skipped_frame_indices)\n if skip:\n # last frame may be recorded as skipped, but if stage is done frame is\n # not real. n_missed_frames is the n_missed_frames from last frame\n assert num_frames - n_skipped <= shape_data_rendered.shape[0] \\\n <= num_frames - n_skipped + n_sub_frames * n_missed_frames\n assert num_frames - n_skipped <= shape2_data_rendered.shape[0] \\\n <= num_frames - n_skipped + n_sub_frames * n_missed_frames\n else:\n assert shape_data_rendered.shape[0] == num_frames\n assert shape2_data_rendered.shape[0] == num_frames\n\n # in QUAD12X mode, all 3 channels have same value in the data (because we\n # show gray). But the projector outputs different values for each channel,\n # for each sub-frame\n gray = video_mode == 'QUAD12X'\n i = 0\n k = 0\n for (r, g, b, val), (r1, g1, b1, _) in zip(shape_color, shape_data):\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n\n if skip:\n assert recorded_rendered_frames[k] \\\n == (k not in skipped_frame_indices)\n else:\n assert recorded_rendered_frames[k]\n\n if k not in skipped_frame_indices:\n r1, g1, b1, _ = shape_data_rendered[i, :]\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n i += 1\n k += 1\n\n i = 0\n k = 0\n for (r, g, b, val), (r1, g1, b1, _) in zip(shape2_color, shape2_data):\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n\n if skip:\n assert recorded_rendered_frames[k] \\\n == (k not in skipped_frame_indices)\n else:\n assert recorded_rendered_frames[k]\n\n if k not in skipped_frame_indices:\n r1, g1, b1, _ = shape2_data_rendered[i, :]\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n i += 1\n k += 1\n\n f.close_h5()\n\n\nasync def test_moat_stage_shapes(stage_app: CeedTestApp, tmp_path):\n from ..test_stages import create_recursive_stages\n from .examples.shapes import CircleShapeP1, CircleShapeP1Internal\n from ceed.function.plugin import ConstFunc\n from ceed.analysis import CeedDataReader\n\n root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(\n stage_app.app.stage_factory, app=stage_app)\n # internal shape\n s1.stage.color_r = False\n s1.stage.color_g = False\n s1.stage.color_b = True\n # surrounding shape\n s2.stage.color_r = True\n s2.stage.color_g = False\n s2.stage.color_b = True\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n internal_shape = CircleShapeP1Internal(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n\n s1.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, a=1, duration=5))\n s1.stage.add_shape(internal_shape.shape)\n\n s2.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, a=1, duration=5))\n s2.stage.add_shape(shape.shape)\n\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n stage_app.app.view_controller.frame_rate = 10\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.flip_projector = False\n\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_stage_experiment_started(stage_app)\n assert stage_app.app.view_controller.stage_active\n\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, [internal_shape.center, shape.center])\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n assert r1 == 0\n assert g1 == 0\n assert b1 == 255\n\n assert r2 == 255\n assert g2 == 0\n assert b2 == 255\n\n stage_app.app.view_controller.request_stage_end()\n await stage_app.wait_clock_frames(2)\n assert not stage_app.app.view_controller.stage_active\n\n # now hide internal shape behind larger circle\n stage_app.app.shape_factory.move_shape_upwards(shape.shape)\n await stage_app.wait_clock_frames(2)\n\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_stage_experiment_started(stage_app)\n assert stage_app.app.view_controller.stage_active\n\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, [internal_shape.center, shape.center])\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n assert r1 == 255\n assert g1 == 0\n assert b1 == 255\n\n assert r2 == 255\n assert g2 == 0\n assert b2 == 255\n\n stage_app.app.view_controller.request_stage_end()\n await stage_app.wait_clock_frames(2)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'moat_stage_shapes.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0', '1']\n assert not f.num_images_in_file\n\n f.load_experiment(0)\n assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)\n assert tuple(\n np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)\n\n f.load_experiment(1)\n assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)\n assert tuple(\n np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)\n\n f.close_h5()\n\n\nasync def test_moat_single_stage_shapes(stage_app: CeedTestApp, tmp_path):\n from ..test_stages import create_recursive_stages\n from .examples.shapes import CircleShapeP1, CircleShapeP1Internal\n from ceed.function.plugin import ConstFunc\n from ceed.analysis import CeedDataReader\n\n root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(\n stage_app.app.stage_factory, app=stage_app)\n s1.stage.color_r = False\n s1.stage.color_g = False\n s1.stage.color_b = True\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n internal_shape = CircleShapeP1Internal(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n\n s1.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, a=1, duration=5))\n stage_shape = s1.stage.add_shape(internal_shape.shape)\n s1.stage.add_shape(shape.shape)\n stage_shape.keep_dark = True\n\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n stage_app.app.view_controller.frame_rate = 10\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.flip_projector = False\n\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_stage_experiment_started(stage_app)\n assert stage_app.app.view_controller.stage_active\n\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, [internal_shape.center, shape.center])\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n assert r1 == 0\n assert g1 == 0\n assert b1 == 0\n\n assert r2 == 0\n assert g2 == 0\n assert b2 == 255\n\n stage_app.app.view_controller.request_stage_end()\n await stage_app.wait_clock_frames(2)\n assert not stage_app.app.view_controller.stage_active\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'moat_single_stage_shapes.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0', ]\n assert not f.num_images_in_file\n\n f.load_experiment(0)\n assert tuple(np.array(f.shapes_intensity[shape.name])[0]) == (0, 0, 1, 1)\n assert tuple(\n np.array(f.shapes_intensity[internal_shape.name])[0]) == (0, 0, 0, 1)\n f.close_h5()\n\n\[email protected]('func', [True, False])\nasync def test_event_data_empty(stage_app: CeedTestApp, tmp_path, func):\n from ..test_stages import create_2_shape_stage\n from ceed.function.plugin import ConstFunc\n from ceed.analysis import CeedDataReader\n\n root, s1, s2, shape1, shape2 = create_2_shape_stage(\n stage_app.app.stage_factory, show_in_gui=True, app=stage_app)\n s1.stage.name = 'test stage'\n\n if func:\n s1.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, duration=0))\n\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.skip_estimated_missed_frames = False\n stage_app.app.view_controller.frame_rate = 10\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_experiment_done(stage_app, timeout=180)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'event_data_empty.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n # order in which the stage/func id start/finish\n if func:\n order = (0, 1, 3, 2), (2, 1, 3, 0)\n else:\n order = (0, 1, 2), (1, 2, 0)\n loops = [\n [0, i, 'start' + s, [0, ] * 2] for i in order[0] for s in ('', '_loop')\n ]\n loops += [\n [0, i, 'end' + s, [0, ] * 2] for i in order[1] for s in ('_loop', '')\n ]\n\n with CeedDataReader(filename) as f:\n f.load_experiment(0)\n events = [d[:-1] + [d[-1][:-1], ] for d in f.event_data]\n assert loops == events\n\n s = f.experiment_stage.stages[0]\n\n for kw in [{'ceed_id': s.ceed_id}, {'ceed_name': s1.stage.name},\n {'ceed_obj': s}]:\n items = f.format_event_data(event='start_loop', **kw)\n assert len(items) == 1\n assert items[0][:5] == [0, s, 'start_loop', 0, 0]\n\n items = f.format_event_data(**kw)\n assert len(items) == 4\n for item, val in zip(\n items, ['start', 'start_loop', 'end_loop', 'end']):\n assert item[:5] == [0, s, val, 0, 0]\n\n\[email protected](\n 'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])\[email protected]('skip', [False, True])\nasync def test_pad_stage_ticks(\n stage_app: CeedTestApp, tmp_path, quad, sub_frames, skip):\n from ceed.analysis import CeedDataReader\n\n root = SerialAllStage(\n stage_factory=stage_app.app.stage_factory, show_in_gui=False,\n app=stage_app, create_add_to_parent=True)\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n root.stage.add_shape(shape.shape)\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n # use a larger frame rate so we have to drop frames\n stage_app.app.view_controller.frame_rate = await measure_fps(stage_app) + 10\n stage_app.app.view_controller.skip_estimated_missed_frames = skip\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.video_mode = quad\n\n stage_app.app.view_controller.pad_to_stage_handshake = False\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_experiment_done(stage_app)\n\n stage_app.app.view_controller.pad_to_stage_handshake = True\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_experiment_done(stage_app, 300)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'pad_stage_ticks.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0', '1']\n assert not f.num_images_in_file\n\n f.load_experiment('0')\n assert f.shapes_intensity[shape.name].shape == (0, 4)\n\n f.load_experiment('1')\n # sub_frames scales up the handshake since IO is same for each sub-frame\n # Even skipped frames are logged so size matches\n assert f.shapes_intensity[shape.name].shape == (\n stage_app.app.data_serializer.num_ticks_handshake(16, sub_frames), 4)\n assert f.shapes_intensity[shape.name].shape == (\n stage_app.app.data_serializer.num_ticks_handshake(16, 1) * sub_frames,\n 4)\n\n frame_time_counter = np.asarray(f._block.data_arrays['frame_time_counter'])\n frame_time = np.asarray(f._block.data_arrays['frame_time'])\n rendered_frames_bool = f.rendered_frames\n assert len(frame_time_counter) == len(frame_time)\n assert np.sum(rendered_frames_bool) == len(frame_time_counter) * sub_frames\n\n frame_counter = np.asarray(f._block.data_arrays['frame_counter'])\n n = f.shapes_intensity[shape.name].shape[0]\n # some frames will have been skipped because of higher frame rate than GPU\n if skip:\n assert sub_frames * len(frame_time_counter) < n\n else:\n assert sub_frames * len(frame_time_counter) == n\n\n # we didn't stop early so all frames are rendered\n rendered_indices = np.arange(0, n, sub_frames)\n if skip:\n assert len(frame_time_counter) < len(frame_counter) // sub_frames\n assert len(rendered_indices) > len(frame_time_counter)\n else:\n assert len(frame_time_counter) == len(frame_counter) // sub_frames\n assert len(rendered_indices) == len(frame_time_counter)\n\n assert np.all(np.arange(1, n + 1) == frame_counter)\n # count recorded is last sub-frame\n if skip:\n assert np.all(\n np.isin(frame_time_counter, rendered_indices + sub_frames))\n assert np.all(frame_time_counter[1:] - frame_time_counter[:-1] > 0)\n\n assert np.all(np.isin(\n frame_time_counter,\n frame_counter[rendered_indices + sub_frames - 1]))\n else:\n assert np.all(frame_time_counter == rendered_indices + sub_frames)\n assert np.all(\n frame_counter[rendered_indices + sub_frames - 1]\n == frame_time_counter)\n\n f.close_h5()\n\n\n@contextmanager\ndef add_to_path(tmp_path, *args):\n sys.path.append(str(tmp_path))\n mod = tmp_path / 'my_gui_stage_plugin' / '__init__.py'\n try:\n mod.parent.mkdir()\n mod.write_text(fake_plugin_stage)\n yield None\n finally:\n sys.path.remove(str(tmp_path))\n if 'my_gui_stage_plugin' in sys.modules:\n del sys.modules['my_gui_stage_plugin']\n\n\[email protected](\n \"ceed_app\",\n [{'yaml_config': {\n 'external_stage_plugin_package': 'my_gui_stage_plugin',\n 'view': {'teensy_frame_estimation': {'use_teensy': False}}},\n 'app_context': add_to_path}, ],\n indirect=True\n)\[email protected]('external', [False, True])\nasync def test_external_plugin_named_package(\n stage_app: CeedTestApp, tmp_path, external):\n stage_factory = stage_app.app.stage_factory\n\n assert 'FakeStage' in stage_factory.stages_cls\n\n stage = SerialAllStage(\n stage_factory=stage_factory, show_in_gui=True, app=stage_app,\n create_add_to_parent=False, stage_cls=stage_factory.get('FakeStage'))\n stage.stage.val = 13\n await run_plugin_experiment(stage_app, tmp_path, external, stage=stage)\n\n assert stage_factory.stage_names[last_experiment_stage_name].val == 13\n\n\[email protected](\n 'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])\[email protected]('main_frames', [1, 1.5, 2])\nasync def test_short_stage(\n stage_app: CeedTestApp, tmp_path, quad, sub_frames, main_frames):\n from ceed.analysis import CeedDataReader\n from ceed.function.plugin import LinearFunc\n from kivy.clock import Clock\n\n num_frames = int(math.ceil(main_frames * sub_frames))\n rate = main_frames\n\n root = SerialAllStage(\n stage_factory=stage_app.app.stage_factory, show_in_gui=False,\n app=stage_app, create_add_to_parent=True)\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n root.stage.add_shape(shape.shape)\n root.stage.add_func(LinearFunc(\n function_factory=stage_app.app.function_factory, b=0, m=1,\n duration=1))\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n # use a larger frame rate so we have to drop frames\n stage_app.app.view_controller.frame_rate = rate\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.video_mode = quad\n stage_app.app.view_controller.pad_to_stage_handshake = False\n stage_app.app.view_controller.flip_projector = False\n\n frame = 0\n event = None\n cx, cy = shape.shape.centroid\n if sub_frames == 1:\n centers = [(cx, cy)]\n else:\n cx1, cy1 = cx // 2, cy // 2\n corners = ((0, 540), (960, 540), (0, 0), (960, 0))\n centers = [(cx1 + x, cy1 + y) for x, y in corners]\n intensity = []\n total_rounded_frames = math.ceil(main_frames) * sub_frames\n\n def verify_intensity(*largs):\n nonlocal frame\n if not stage_app.app.view_controller.stage_active:\n event.cancel()\n return\n # not yet started\n if not stage_app.app.view_controller.count:\n return\n\n assert frame < num_frames\n\n rgb = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, centers)\n rgb = [[c / 255 for c in p] for p in rgb]\n if sub_frames == 12:\n for plane in range(3):\n for point in rgb:\n value = point[plane]\n intensity.append((value, value, value, 1))\n else:\n intensity.extend(rgb)\n frame += sub_frames\n\n assert frame in (\n stage_app.app.view_controller.count, total_rounded_frames)\n assert not stage_app.app.view_controller._n_missed_frames\n\n event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)\n event()\n stage_app.app.view_controller.request_stage_start(root.name)\n\n await wait_experiment_done(stage_app, timeout=50)\n await wait_experiment_stopped(stage_app)\n\n assert stage_app.app.view_controller.count == num_frames + 1\n # only counts whole frames\n assert frame == total_rounded_frames\n # have data for blank frames at end\n assert len(intensity) == total_rounded_frames\n assert total_rounded_frames >= num_frames\n\n filename = str(tmp_path / 'short_stage.h5')\n stage_app.app.ceed_data.save(filename=filename)\n with CeedDataReader(filename) as f:\n f.load_experiment(0)\n\n shape_data = f.shapes_intensity[shape.name]\n shape_data_rendered = f.shapes_intensity_rendered[shape.name]\n recorded_rendered_frames = f.rendered_frames\n\n assert shape_data.shape[0] == num_frames\n assert shape_data_rendered.shape[0] == num_frames\n assert len(recorded_rendered_frames) == num_frames\n\n # for each sub-frame\n gray = quad == 'QUAD12X'\n r, g, b = root.color_r, root.color_g, root.color_b\n for i, ((v1, v2, v3, _), (r1, g1, b1, _)) in enumerate(\n zip(intensity[:num_frames], shape_data)):\n # we saw the intensity we expect\n val = i / (main_frames * sub_frames)\n assert isclose(val, v1, abs_tol=2 / 255) if r or gray else v1 == 0\n assert isclose(val, v2, abs_tol=2 / 255) if g or gray else v2 == 0\n assert isclose(val, v3, abs_tol=2 / 255) if b or gray else v3 == 0\n\n # what we saw is what is recorded\n assert isclose(v1, r1, abs_tol=2 / 255)\n assert isclose(v2, g1, abs_tol=2 / 255)\n assert isclose(v3, b1, abs_tol=2 / 255)\n\n assert recorded_rendered_frames[i]\n assert shape_data_rendered[i, 0] == r1\n assert shape_data_rendered[i, 1] == g1\n assert shape_data_rendered[i, 2] == b1\n\n # remaining frames are blank in quad mode\n for r, g, b, _ in intensity[num_frames:]:\n assert not r\n assert not g\n assert not b\n"
] | [
[
"numpy.sum",
"numpy.asarray",
"numpy.arange",
"numpy.isin",
"numpy.all",
"numpy.array"
]
] |
igorlucci/koalas | [
"8803344d620261981003175bd1edc3c4120b84e2"
] | [
"databricks/koalas/base.py"
] | [
"#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nBase and utility classes for Koalas objects.\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nimport datetime\nfrom functools import wraps, partial\nfrom typing import Any, Callable, Tuple, Union, cast, TYPE_CHECKING\nimport warnings\n\nimport numpy as np\nimport pandas as pd # noqa: F401\nfrom pandas.api.types import is_list_like\nfrom pyspark import sql as spark\nfrom pyspark.sql import functions as F, Window, Column\nfrom pyspark.sql.types import (\n BooleanType,\n DateType,\n DoubleType,\n FloatType,\n IntegralType,\n LongType,\n StringType,\n TimestampType,\n)\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas import numpy_compat\nfrom databricks.koalas.config import get_option, option_context\nfrom databricks.koalas.internal import (\n InternalFrame,\n DEFAULT_SERIES_NAME,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_DEFAULT_INDEX_NAME,\n)\nfrom databricks.koalas.spark import functions as SF\nfrom databricks.koalas.spark.accessors import SparkIndexOpsMethods\nfrom databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype\nfrom databricks.koalas.utils import (\n combine_frames,\n same_anchor,\n scol_for,\n validate_axis,\n ERROR_MESSAGE_CANNOT_COMBINE,\n)\nfrom databricks.koalas.frame import DataFrame\n\nif TYPE_CHECKING:\n from databricks.koalas.indexes import Index\n from databricks.koalas.series import Series\n\n\ndef should_alignment_for_column_op(self: \"IndexOpsMixin\", other: \"IndexOpsMixin\") -> bool:\n from databricks.koalas.series import Series\n\n if isinstance(self, Series) and isinstance(other, Series):\n return not same_anchor(self, other)\n else:\n return self._internal.spark_frame is not other._internal.spark_frame\n\n\ndef align_diff_index_ops(func, this_index_ops: \"IndexOpsMixin\", *args) -> \"IndexOpsMixin\":\n \"\"\"\n Align the `IndexOpsMixin` objects and apply the function.\n\n Parameters\n ----------\n func : The function to apply\n this_index_ops : IndexOpsMixin\n A base `IndexOpsMixin` object\n args : list of other arguments including other `IndexOpsMixin` objects\n\n Returns\n -------\n `Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`\n \"\"\"\n from databricks.koalas.indexes import Index\n from databricks.koalas.series import Series, first_series\n\n cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]\n\n if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):\n combined = combine_frames(this_index_ops.to_frame(), *cols, how=\"full\")\n\n return column_op(func)(\n combined[\"this\"]._kser_for(combined[\"this\"]._internal.column_labels[0]),\n *[\n combined[\"that\"]._kser_for(label)\n for label in combined[\"that\"]._internal.column_labels\n ]\n )\n else:\n # This could cause as many counts, reset_index calls, joins for combining\n # as the number of `Index`s in `args`. So far it's fine since we can assume the ops\n # only work between at most two `Index`s. We might need to fix it in the future.\n\n self_len = len(this_index_ops)\n if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):\n raise ValueError(\"operands could not be broadcast together with shapes\")\n\n with option_context(\"compute.default_index_type\", \"distributed-sequence\"):\n if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):\n return (\n cast(\n Series,\n column_op(func)(\n this_index_ops.to_series().reset_index(drop=True),\n *[\n arg.to_series().reset_index(drop=True)\n if isinstance(arg, Index)\n else arg\n for arg in args\n ]\n ),\n )\n .sort_index()\n .to_frame(DEFAULT_SERIES_NAME)\n .set_index(DEFAULT_SERIES_NAME)\n .index.rename(this_index_ops.name)\n )\n elif isinstance(this_index_ops, Series):\n this = this_index_ops.reset_index()\n that = [\n cast(Series, col.to_series() if isinstance(col, Index) else col).reset_index(\n drop=True\n )\n for col in cols\n ]\n\n combined = combine_frames(this, *that, how=\"full\").sort_index()\n combined = combined.set_index(\n combined._internal.column_labels[: this_index_ops._internal.index_level]\n )\n combined.index.names = this_index_ops._internal.index_names\n\n return column_op(func)(\n first_series(combined[\"this\"]),\n *[\n combined[\"that\"]._kser_for(label)\n for label in combined[\"that\"]._internal.column_labels\n ]\n )\n else:\n this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)\n\n that_series = next(col for col in cols if isinstance(col, Series))\n that_frame = that_series._kdf[\n [col.to_series() if isinstance(col, Index) else col for col in cols]\n ]\n\n combined = combine_frames(this, that_frame.reset_index()).sort_index()\n\n self_index = (\n combined[\"this\"].set_index(combined[\"this\"]._internal.column_labels).index\n )\n\n other = combined[\"that\"].set_index(\n combined[\"that\"]._internal.column_labels[: that_series._internal.index_level]\n )\n other.index.names = that_series._internal.index_names\n\n return column_op(func)(\n self_index, *[other._kser_for(label) for label in other._internal.column_labels]\n )\n\n\ndef booleanize_null(left_scol, scol, f) -> Column:\n \"\"\"\n Booleanize Null in Spark Column\n \"\"\"\n comp_ops = [\n getattr(Column, \"__{}__\".format(comp_op))\n for comp_op in [\"eq\", \"ne\", \"lt\", \"le\", \"ge\", \"gt\"]\n ]\n\n if f in comp_ops:\n # if `f` is \"!=\", fill null with True otherwise False\n filler = f == Column.__ne__\n scol = F.when(scol.isNull(), filler).otherwise(scol)\n\n elif f == Column.__or__:\n scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol)\n\n elif f == Column.__and__:\n scol = F.when(scol.isNull(), False).otherwise(scol)\n\n return scol\n\n\ndef column_op(f):\n \"\"\"\n A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be\n supported too. If this decorator is used for the `f` function that takes Spark Column and\n returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas\n Series.\n\n :param f: a function that takes Spark Column and returns Spark Column.\n :param self: Koalas Series\n :param args: arguments that the function `f` takes.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, *args):\n from databricks.koalas.series import Series\n\n # It is possible for the function `f` takes other arguments than Spark Column.\n # To cover this case, explicitly check if the argument is Koalas Series and\n # extract Spark Column. For other arguments, they are used as are.\n cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]\n\n if all(not should_alignment_for_column_op(self, col) for col in cols):\n # Same DataFrame anchors\n args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]\n scol = f(self.spark.column, *args)\n scol = booleanize_null(self.spark.column, scol, f)\n\n if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):\n index_ops = self._with_new_scol(scol)\n else:\n kser = next(col for col in cols if isinstance(col, Series))\n index_ops = kser._with_new_scol(scol)\n elif get_option(\"compute.ops_on_diff_frames\"):\n index_ops = align_diff_index_ops(f, self, *args)\n else:\n raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)\n\n if not all(self.name == col.name for col in cols):\n index_ops = index_ops.rename(None)\n\n return index_ops\n\n return wrapper\n\n\ndef numpy_column_op(f):\n @wraps(f)\n def wrapper(self, *args):\n # PySpark does not support NumPy type out of the box. For now, we convert NumPy types\n # into some primitive types understandable in PySpark.\n new_args = []\n for arg in args:\n # TODO: This is a quick hack to support NumPy type. We should revisit this.\n if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):\n new_args.append(float(arg / np.timedelta64(1, \"s\")))\n else:\n new_args.append(arg)\n return column_op(f)(self, *new_args)\n\n return wrapper\n\n\nclass IndexOpsMixin(object, metaclass=ABCMeta):\n \"\"\"common ops mixin to support a unified interface / docs for Series / Index\n\n Assuming there are following attributes or properties and function.\n \"\"\"\n\n @property\n @abstractmethod\n def _internal(self) -> InternalFrame:\n pass\n\n @property\n @abstractmethod\n def _kdf(self) -> DataFrame:\n pass\n\n @abstractmethod\n def _with_new_scol(self, scol: spark.Column):\n pass\n\n @property\n @abstractmethod\n def _column_label(self) -> Tuple:\n pass\n\n @property\n @abstractmethod\n def spark(self) -> SparkIndexOpsMethods:\n pass\n\n @property\n def spark_column(self) -> Column:\n warnings.warn(\n \"Series.spark_column is deprecated as of Series.spark.column. \"\n \"Please use the API instead.\",\n FutureWarning,\n )\n return self.spark.column\n\n spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__\n\n # arithmetic operators\n __neg__ = column_op(Column.__neg__)\n\n def __add__(self, other) -> Union[\"Series\", \"Index\"]:\n if not isinstance(self.spark.data_type, StringType) and (\n (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n if isinstance(self.spark.data_type, StringType):\n # Concatenate string columns\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType):\n return column_op(F.concat)(self, other)\n # Handle df['col'] + 'literal'\n elif isinstance(other, str):\n return column_op(F.concat)(self, F.lit(other))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__add__)(self, other)\n\n def __sub__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(\n other.spark.data_type, TimestampType\n ):\n warnings.warn(msg, UserWarning)\n return self.astype(\"long\") - other.astype(\"long\")\n elif isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return self.astype(\"long\") - F.lit(other).cast(as_spark_type(\"long\"))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, other).astype(\"long\")\n elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, F.lit(other)).astype(\"long\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__sub__)(self, other)\n\n def __mul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if (\n isinstance(self.spark.data_type, IntegralType)\n and isinstance(other, IndexOpsMixin)\n and isinstance(other.spark.data_type, StringType)\n ):\n return column_op(SF.repeat)(other, self)\n\n if isinstance(self.spark.data_type, StringType):\n if (\n isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, IntegralType)\n ) or isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__mul__)(self, other)\n\n def __truediv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __truediv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def truediv(left, right):\n return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n\n return numpy_column_op(truediv)(self, other)\n\n def __mod__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def mod(left, right):\n return ((left % right) + right) % right\n\n return column_op(mod)(self, other)\n\n def __radd__(self, other) -> Union[\"Series\", \"Index\"]:\n # Handle 'literal' + df['col']\n if not isinstance(self.spark.data_type, StringType) and isinstance(other, str):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, str):\n return self._with_new_scol(F.concat(F.lit(other), self.spark.column))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__radd__)(self, other)\n\n def __rsub__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -(self.astype(\"long\") - F.lit(other).cast(as_spark_type(\"long\")))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -column_op(F.datediff)(self, F.lit(other)).astype(\"long\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__rsub__)(self, other)\n\n def __rmul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__rmul__)(self, other)\n\n def __rtruediv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rtruediv(left, right):\n return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(\n F.lit(right).__truediv__(left)\n )\n\n return numpy_column_op(rtruediv)(self, other)\n\n def __floordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __floordiv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def floordiv(left, right):\n return F.when(F.lit(right is np.nan), np.nan).otherwise(\n F.when(\n F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))\n ).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n )\n\n return numpy_column_op(floordiv)(self, other)\n\n def __rfloordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rfloordiv(left, right):\n return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(\n F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))\n )\n\n return numpy_column_op(rfloordiv)(self, other)\n\n def __rmod__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def rmod(left, right):\n return ((right % left) + left) % left\n\n return column_op(rmod)(self, other)\n\n __pow__ = column_op(Column.__pow__)\n __rpow__ = column_op(Column.__rpow__)\n __abs__ = column_op(F.abs)\n\n # comparison operators\n __eq__ = column_op(Column.__eq__)\n __ne__ = column_op(Column.__ne__)\n __lt__ = column_op(Column.__lt__)\n __le__ = column_op(Column.__le__)\n __ge__ = column_op(Column.__ge__)\n __gt__ = column_op(Column.__gt__)\n\n # `and`, `or`, `not` cannot be overloaded in Python,\n # so use bitwise operators as boolean operators\n __and__ = column_op(Column.__and__)\n __or__ = column_op(Column.__or__)\n __invert__ = column_op(Column.__invert__)\n __rand__ = column_op(Column.__rand__)\n __ror__ = column_op(Column.__ror__)\n\n def __len__(self):\n return len(self._kdf)\n\n # NDArray Compat\n def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):\n # Try dunder methods first.\n result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n\n # After that, we try with PySpark APIs.\n if result is NotImplemented:\n result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(\n self, ufunc, method, *inputs, **kwargs\n )\n\n if result is not NotImplemented:\n return result\n else:\n # TODO: support more APIs?\n raise NotImplementedError(\"Koalas objects currently do not support %s.\" % ufunc)\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Return the dtype object of the underlying data.\n\n Examples\n --------\n >>> s = ks.Series([1, 2, 3])\n >>> s.dtype\n dtype('int64')\n\n >>> s = ks.Series(list('abc'))\n >>> s.dtype\n dtype('O')\n\n >>> s = ks.Series(pd.date_range('20130101', periods=3))\n >>> s.dtype\n dtype('<M8[ns]')\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.dtype\n dtype('<M8[ns]')\n \"\"\"\n return spark_type_to_pandas_dtype(self.spark.data_type)\n\n @property\n def empty(self) -> bool:\n \"\"\"\n Returns true if the current object is empty. Otherwise, returns false.\n\n >>> ks.range(10).id.empty\n False\n\n >>> ks.range(0).id.empty\n True\n\n >>> ks.DataFrame({}, index=list('abc')).index.empty\n False\n \"\"\"\n return self._internal.resolved_copy.spark_frame.rdd.isEmpty()\n\n @property\n def hasnans(self) -> bool:\n \"\"\"\n Return True if it has any missing values. Otherwise, it returns False.\n\n >>> ks.DataFrame({}, index=list('abc')).index.hasnans\n False\n\n >>> ks.Series(['a', None]).hasnans\n True\n\n >>> ks.Series([1.0, 2.0, np.nan]).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).hasnans\n False\n\n >>> (ks.Series([1.0, 2.0, np.nan]) + 1).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).rename(\"a\").to_frame().set_index(\"a\").index.hasnans\n False\n \"\"\"\n sdf = self._internal.spark_frame\n scol = self.spark.column\n\n if isinstance(self.spark.data_type, (DoubleType, FloatType)):\n return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]\n else:\n return sdf.select(F.max(scol.isNull())).collect()[0][0]\n\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically increasing.\n\n .. note:: the current implementation of is_monotonic requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are\n transferred to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018'])\n >>> ser.is_monotonic\n True\n\n >>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})\n >>> df.dates.is_monotonic\n False\n\n >>> df.index.is_monotonic\n True\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic\n False\n\n >>> ser.index.is_monotonic\n True\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic\n True\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic\n False\n \"\"\"\n return self._is_monotonic(\"increasing\")\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically decreasing.\n\n .. note:: the current implementation of is_monotonic_decreasing requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are transferred\n to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018'])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})\n >>> df.dates.is_monotonic_decreasing\n False\n\n >>> df.index.is_monotonic_decreasing\n False\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.index.is_monotonic_decreasing\n False\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n False\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n True\n \"\"\"\n return self._is_monotonic(\"decreasing\")\n\n def _is_locally_monotonic_spark_column(self, order):\n window = (\n Window.partitionBy(F.col(\"__partition_id\"))\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-1, -1)\n )\n\n if order == \"increasing\":\n return (F.col(\"__origin\") >= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n else:\n return (F.col(\"__origin\") <= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n\n def _is_monotonic(self, order):\n assert order in (\"increasing\", \"decreasing\")\n\n sdf = self._internal.spark_frame\n\n sdf = (\n sdf.select(\n F.spark_partition_id().alias(\n \"__partition_id\"\n ), # Make sure we use the same partition id in the whole job.\n F.col(NATURAL_ORDER_COLUMN_NAME),\n self.spark.column.alias(\"__origin\"),\n )\n .select(\n F.col(\"__partition_id\"),\n F.col(\"__origin\"),\n self._is_locally_monotonic_spark_column(order).alias(\n \"__comparison_within_partition\"\n ),\n )\n .groupby(F.col(\"__partition_id\"))\n .agg(\n F.min(F.col(\"__origin\")).alias(\"__partition_min\"),\n F.max(F.col(\"__origin\")).alias(\"__partition_max\"),\n F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True))).alias(\n \"__comparison_within_partition\"\n ),\n )\n )\n\n # Now we're windowing the aggregation results without partition specification.\n # The number of rows here will be as the same of partitions, which is expected\n # to be small.\n window = Window.orderBy(F.col(\"__partition_id\")).rowsBetween(-1, -1)\n if order == \"increasing\":\n comparison_col = F.col(\"__partition_min\") >= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n else:\n comparison_col = F.col(\"__partition_min\") <= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n\n sdf = sdf.select(\n comparison_col.alias(\"__comparison_between_partitions\"),\n F.col(\"__comparison_within_partition\"),\n )\n\n ret = sdf.select(\n F.min(F.coalesce(F.col(\"__comparison_between_partitions\"), F.lit(True)))\n & F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True)))\n ).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of array dimensions.\n\n Return 1 for Series / Index / MultiIndex.\n\n Examples\n --------\n\n For Series\n\n >>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])\n >>> s.ndim\n 1\n\n For Index\n\n >>> s.index.ndim\n 1\n\n For MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index.ndim\n 1\n \"\"\"\n return 1\n\n def astype(self, dtype) -> Union[\"Index\", \"Series\"]:\n \"\"\"\n Cast a Koalas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> ser = ks.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.astype('int64')\n Int64Index([1, 2], dtype='int64', name='a')\n \"\"\"\n spark_type = as_spark_type(dtype)\n if not spark_type:\n raise ValueError(\"Type {} not understood\".format(dtype))\n if isinstance(spark_type, BooleanType):\n if isinstance(self.spark.data_type, StringType):\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n F.length(self.spark.column) > 0\n )\n elif isinstance(self.spark.data_type, (FloatType, DoubleType)):\n scol = F.when(\n self.spark.column.isNull() | F.isnan(self.spark.column), F.lit(True)\n ).otherwise(self.spark.column.cast(spark_type))\n else:\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n self.spark.column.cast(spark_type)\n )\n elif isinstance(spark_type, StringType):\n scol = F.when(self.spark.column.isNull(), str(None)).otherwise(\n self.spark.column.cast(spark_type)\n )\n else:\n scol = self.spark.column.cast(spark_type)\n return self._with_new_scol(scol)\n\n def isin(self, values) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Check whether `values` are contained in Series or Index.\n\n Return a boolean Series or Index showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : list or set\n The sequence of values to test.\n\n Returns\n -------\n isin : Series (bool dtype) or Index (bool dtype)\n\n Examples\n --------\n >>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.isin(['lama'])\n Index([True, False, True, False, True, False], dtype='object', name='a')\n \"\"\"\n if not is_list_like(values):\n raise TypeError(\n \"only list-like objects are allowed to be passed\"\n \" to isin(), you passed a [{values_type}]\".format(values_type=type(values).__name__)\n )\n\n return self._with_new_scol(self.spark.column.isin(list(values)))\n\n def isnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values. Characters such as empty strings '' or\n numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser.isna() # doctest: +NORMALIZE_WHITESPACE\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.isna()\n Index([False, False, True], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n if isinstance(self.spark.data_type, (FloatType, DoubleType)):\n return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column))\n else:\n return self._with_new_scol(self.spark.column.isNull())\n\n isna = isnull\n\n def notnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True.\n Characters such as empty strings '' or numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n NA values, such as None or numpy.NaN, get mapped to False values.\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n Show which entries in a Series are not NA.\n\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.notna()\n Index([True, True, False], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"notna is not defined for MultiIndex\")\n return (~self.isnull()).rename(\n self.name # type: ignore\n )\n\n notna = notnull\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def all(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether all elements are True.\n\n Returns True unless there at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([True, True]).all()\n True\n\n >>> ks.Series([True, False]).all()\n False\n\n >>> ks.Series([0, 1]).all()\n False\n\n >>> ks.Series([1, 2, 3]).all()\n True\n\n >>> ks.Series([True, True, None]).all()\n True\n\n >>> ks.Series([True, False, None]).all()\n False\n\n >>> ks.Series([]).all()\n True\n\n >>> ks.Series([np.nan]).all()\n True\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.all()\n False\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"every(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use min as its alternative:\n ret = sdf.select(F.min(F.coalesce(col.cast(\"boolean\"), F.lit(True)))).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def any(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether any element is True.\n\n Returns False unless there at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([False, False]).any()\n False\n\n >>> ks.Series([True, False]).any()\n True\n\n >>> ks.Series([0, 0]).any()\n False\n\n >>> ks.Series([0, 1, 2]).any()\n True\n\n >>> ks.Series([False, False, None]).any()\n False\n\n >>> ks.Series([True, False, None]).any()\n True\n\n >>> ks.Series([]).any()\n False\n\n >>> ks.Series([np.nan]).any()\n False\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.any()\n True\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"any(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use max as its alternative:\n ret = sdf.select(F.max(F.coalesce(col.cast(\"boolean\"), F.lit(False)))).collect()[0][0]\n if ret is None:\n return False\n else:\n return ret\n\n # TODO: add frep and axis parameter\n def shift(self, periods=1, fill_value=None) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Shift Series/Index by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input Series/Index, shifted.\n\n Examples\n --------\n >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.Col1.shift(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 10.0\n 4 20.0\n Name: Col1, dtype: float64\n\n >>> df.Col2.shift(periods=3, fill_value=0)\n 0 0\n 1 0\n 2 0\n 3 13\n 4 23\n Name: Col2, dtype: int64\n\n >>> df.index.shift(periods=3, fill_value=0)\n Int64Index([0, 0, 0, 0, 1], dtype='int64')\n \"\"\"\n return self._shift(periods, fill_value)\n\n def _shift(self, periods, fill_value, part_cols=()):\n if not isinstance(periods, int):\n raise ValueError(\"periods should be an int; however, got [%s]\" % type(periods).__name__)\n\n col = self.spark.column\n window = (\n Window.partitionBy(*part_cols)\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-periods, -periods)\n )\n lag_col = F.lag(col, periods).over(window)\n col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)\n return self._with_new_scol(col)\n\n # TODO: Update Documentation for Bins Parameter when its supported\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ) -> \"Series\":\n \"\"\"\n Return a Series containing counts of unique values.\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : boolean, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : boolean, default True\n Sort by values.\n ascending : boolean, default False\n Sort in ascending order.\n bins : Not Yet Supported\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n\n Examples\n --------\n For Series\n\n >>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})\n >>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n Name: x, dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE\n 1.0 0.6\n 0.0 0.4\n Name: x, dtype: float64\n\n **dropna**\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n NaN 1\n Name: x, dtype: int64\n\n For Index\n\n >>> idx = ks.Index([3, 1, 2, 3, 4, np.nan])\n >>> idx\n Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')\n\n >>> idx.value_counts().sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **sort**\n\n With `sort` set to `False`, the result wouldn't be sorted by number of count.\n\n >>> idx.value_counts(sort=True).sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **normalize**\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> idx.value_counts(normalize=True).sort_index()\n 1.0 0.2\n 2.0 0.2\n 3.0 0.4\n 4.0 0.2\n dtype: float64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n NaN 1\n dtype: int64\n\n For MultiIndex.\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index # doctest: +SKIP\n MultiIndex([( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'length'),\n ('falcon', 'weight'),\n ('falcon', 'length'),\n ('falcon', 'length')],\n )\n\n >>> s.index.value_counts().sort_index()\n (cow, length) 1\n (cow, weight) 2\n (falcon, length) 2\n (falcon, weight) 1\n (lama, weight) 3\n dtype: int64\n\n >>> s.index.value_counts(normalize=True).sort_index()\n (cow, length) 0.111111\n (cow, weight) 0.222222\n (falcon, length) 0.222222\n (falcon, weight) 0.111111\n (lama, weight) 0.333333\n dtype: float64\n\n If Index has name, keep the name up.\n\n >>> idx = ks.Index([0, 0, 0, 1, 1, 2, 3], name='koalas')\n >>> idx.value_counts().sort_index()\n 0 3\n 1 2\n 2 1\n 3 1\n Name: koalas, dtype: int64\n \"\"\"\n from databricks.koalas.series import first_series\n\n if bins is not None:\n raise NotImplementedError(\"value_counts currently does not support bins\")\n\n if dropna:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()\n else:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column)\n index_name = SPARK_DEFAULT_INDEX_NAME\n column_name = self._internal.data_spark_column_names[0]\n sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()\n if sort:\n if ascending:\n sdf = sdf.orderBy(F.col(\"count\"))\n else:\n sdf = sdf.orderBy(F.col(\"count\").desc())\n\n if normalize:\n sum = sdf_dropna.count()\n sdf = sdf.withColumn(\"count\", F.col(\"count\") / F.lit(sum))\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, index_name)],\n column_labels=self._internal.column_labels,\n data_spark_columns=[scol_for(sdf, \"count\")],\n column_label_names=self._internal.column_label_names,\n )\n\n return first_series(DataFrame(internal))\n\n def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:\n \"\"\"\n Return number of unique elements in the object.\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to Koalas and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to Koalas.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> ks.Series([1, 2, 3, np.nan]).nunique()\n 3\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)\n 4\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)\n 3\n\n >>> idx = ks.Index([1, 1, 2, None])\n >>> idx\n Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')\n\n >>> idx.nunique()\n 2\n\n >>> idx.nunique(dropna=False)\n 3\n \"\"\"\n res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])\n return res.collect()[0][0]\n\n def _nunique(self, dropna=True, approx=False, rsd=0.05):\n colname = self._internal.data_spark_column_names[0]\n count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct\n if dropna:\n return count_fn(self.spark.column).alias(colname)\n else:\n return (\n count_fn(self.spark.column)\n + F.when(\n F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1\n ).otherwise(0)\n ).alias(colname)\n\n def take(self, indices) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n\n Series\n\n >>> kser = ks.Series([100, 200, 300, 400, 500])\n >>> kser\n 0 100\n 1 200\n 2 300\n 3 400\n 4 500\n dtype: int64\n\n >>> kser.take([0, 2, 4]).sort_index()\n 0 100\n 2 300\n 4 500\n dtype: int64\n\n Index\n\n >>> kidx = ks.Index([100, 200, 300, 400, 500])\n >>> kidx\n Int64Index([100, 200, 300, 400, 500], dtype='int64')\n\n >>> kidx.take([0, 2, 4]).sort_values()\n Int64Index([100, 300, 500], dtype='int64')\n\n MultiIndex\n\n >>> kmidx = ks.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\")])\n >>> kmidx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('x', 'c')],\n )\n\n >>> kmidx.take([0, 2]) # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'c')],\n )\n \"\"\"\n if not is_list_like(indices) or isinstance(indices, (dict, set)):\n raise ValueError(\"`indices` must be a list-like except dict or set\")\n if isinstance(self, ks.Series):\n return cast(ks.Series, self.iloc[indices])\n else:\n return self._kdf.iloc[indices].index\n"
] | [
[
"pandas.api.types.is_list_like",
"numpy.timedelta64"
]
] |
abhi526691/Covid-Guard | [
"9c050ef44201c01f512169ffb146ad0da5278ec1"
] | [
"main.py"
] | [
"# import the necessary packages\r\nfrom tensorflow.keras.preprocessing.image import img_to_array\r\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\r\nfrom tensorflow.keras.models import load_model\r\nfrom imutils.video import VideoStream,FileVideoStream\r\nimport imutils\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport cv2\r\nimport math\r\n\r\n\r\ndef mainc():\r\n\r\n\tscale_percent = 20 # percentage of original size\r\n\twidth = 0\r\n\theight = 0\r\n\r\n\tlabelsPath = \"Model/coco.names\" #path for model\r\n\tLABELS = open(labelsPath).read().strip().split(\"\\n\")\r\n\r\n\tnp.random.seed(42)\r\n\tCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\r\n\t\tdtype=\"uint8\")\r\n\r\n\tweightsPath = \"Model/yolov3.weights\" #path for yolov3 weights\r\n\tconfigPath = \"Model/yolov3.cfg\" #path for yolov3 configuration file\r\n\r\n\tnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\r\n\r\n\tcap = cv2.VideoCapture(0)\r\n\tif not cap.isOpened():\r\n\t\tprint(\"Could not open webcam\")\r\n\t\texit()\r\n\telse: #get dimension info\r\n\t\twidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n\t\theight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\t\tdim = (width, height)\r\n\t\tprint('Original Dimensions : ',dim)\r\n\t\twidth = int(width * scale_percent / 100)\r\n\t\theight = int(height * scale_percent / 100)\r\n\t\tdim = (width, height)\r\n\t\tprint('Resized Dimensions : ', dim)\r\n\r\n\r\n\tdef detect_and_predict_mask(frame, faceNet, maskNet):\r\n\t\t# grab the dimensions of the frame and then construct a blob from it\r\n\t\t(h, w) = frame.shape[:2]\r\n\t\tblob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),\r\n\t\t\t(104.0, 177.0, 123.0))\r\n\t\t# pass the blob through the network and obtain the face detections\r\n\t\tfaceNet.setInput(blob)\r\n\t\tdetections = faceNet.forward()\r\n\t\t# initialize our list of faces, their corresponding locations,\r\n\t\t# and the list of predictions from our face mask network\r\n\t\tfaces = []\r\n\t\tlocs = []\r\n\t\tpreds = []\r\n\r\n\r\n\t\t# loop over the detections\r\n\t\tfor i in range(0, detections.shape[2]):\r\n\t\t\t# extract the confidence (i.e., probability) associated with\r\n\t\t\t# the detection\r\n\t\t\tconfidence = detections[0, 0, i, 2]\r\n\t\t\t# filter out weak detections by ensuring the confidence is\r\n\t\t\t# greater than the minimum confidence\r\n\t\t\tif confidence > 0.5:\r\n\t\t\t\t# compute the (x, y)-coordinates of the bounding box for\r\n\t\t\t\t# the object\r\n\t\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\r\n\t\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\r\n\t\t\t\t# ensure the bounding boxes fall within the dimensions of\r\n\t\t\t\t# the frame\r\n\t\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\r\n\t\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\r\n\r\n\r\n\t\t\t\t# extract the face ROI, convert it from BGR to RGB channel\r\n\t\t\t\t# ordering, resize it to 224x224, and preprocess it\r\n\t\t\t\tface = frame[startY:endY, startX:endX]\r\n\t\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\r\n\t\t\t\tface = cv2.resize(face, (224, 224))\r\n\t\t\t\tface = img_to_array(face)\r\n\t\t\t\tface = preprocess_input(face)\r\n\t\t\t\t# add the face and bounding boxes to their respective\r\n\t\t\t\t# lists\r\n\t\t\t\tfaces.append(face)\r\n\t\t\t\tlocs.append((startX, startY, endX, endY))\r\n\r\n\r\n\t\t# only make a predictions if at least one face was detected\r\n\t\tif len(faces) > 0:\r\n\t\t\t# for faster inference we'll make batch predictions on *all*\r\n\t\t\t# faces at the same time rather than one-by-one predictions\r\n\t\t\t# in the above `for` loop\r\n\t\t\tfaces = np.array(faces, dtype=\"float32\")\r\n\t\t\tpreds = maskNet.predict(faces, batch_size=32)\r\n\t\t# return a 2-tuple of the face locations and their corresponding\r\n\t\t# locations\r\n\t\treturn (locs, preds)\r\n\r\n\r\n\r\n\tbase_dir=os.getcwd()\r\n\tbase_dir=base_dir.replace('\\\\','/')\r\n\r\n\tprint(base_dir)\r\n\tdataset_path=base_dir+'/dataset'\r\n\taccuracy_plot_dir=base_dir+'/Model'\r\n\tmodel_store_dir=base_dir+'/Model/mask_detector.model'\r\n\texample=base_dir+'/Image/1.jpg'\r\n\r\n\tconfidence=0.4\r\n\r\n\r\n\tface_detector_caffe=base_dir+'/Face Detector/res10_300x300_ssd_iter_140000.caffemodel'\r\n\r\n\r\n\r\n\t# load our serialized face detector model from disk\r\n\tprint(\"[INFO] loading face detector model...\")\r\n\tprototxtPath = base_dir+'/Face Detector/deploy.prototxt'\r\n\tweightsPath = face_detector_caffe\r\n\tfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\r\n\t# load the face mask detector model from disk\r\n\tprint(\"[INFO] loading face mask detector model...\")\r\n\tmaskNet = load_model(model_store_dir)\r\n\t# initialize the video stream and allow the camera sensor to warm up\r\n\tprint(\"[INFO] starting video stream...\")\r\n\tvs = VideoStream(src=0).start()\r\n\t#time.sleep(2.0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t# loop over the frames from the video stream\r\n\titer=0\r\n\twhile True:\r\n\r\n\r\n\r\n\t\t# grab the frame from the threaded video stream and resize it\r\n\t\t# to have a maximum width of 400 pixels\r\n\t\tframe = vs.read()\r\n\t\tframe = imutils.resize(frame, width=1200)\r\n\r\n\t\tresized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\r\n\r\n\t\t(H, W) = frame.shape[:2]\r\n\t\tln = net.getLayerNames()\r\n\t\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\n\t\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (224, 224), swapRB=True, crop=False)\r\n\t\tnet.setInput(blob)\r\n\t\tstart = time.time()\r\n\t\tlayerOutputs = net.forward(ln)\r\n\t\tend = time.time()\r\n\t\t# print(\"Frame Prediction Time : {:.6f} seconds\".format(end - start))\r\n\t\tboxes = []\r\n\t\tconfidences = []\r\n\t\tclassIDs = []\r\n\r\n\t\tfor output in layerOutputs:\r\n\t\t\tfor detection in output:\r\n\t\t\t\tscores = detection[5:]\r\n\t\t\t\tclassID = np.argmax(scores)\r\n\t\t\t\tconfidence = scores[classID]\r\n\t\t\t\tif confidence > 0.1 and classID == 0:\r\n\t\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\r\n\t\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\r\n\t\t\t\t\tx = int(centerX - (width / 2))\r\n\t\t\t\t\ty = int(centerY - (height / 2))\r\n\t\t\t\t\tboxes.append([x, y, int(width), int(height)])\r\n\t\t\t\t\tconfidences.append(float(confidence))\r\n\t\t\t\t\tclassIDs.append(classID)\r\n\r\n\t\tif iter % 3 == 0:\r\n\r\n\t\t\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)\r\n\t\t\tind = []\r\n\t\t\tfor i in range(0, len(classIDs)):\r\n\t\t\t\tif (classIDs[i] == 0):\r\n\t\t\t\t\tind.append(i)\r\n\t\t\ta = []\r\n\t\t\tb = []\r\n\r\n\t\t\tif len(idxs) > 0:\r\n\t\t\t\tfor i in idxs.flatten():\r\n\t\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\t\ta.append(x)\r\n\t\t\t\t\tb.append(y)\r\n\r\n\t\t\tdistance = []\r\n\t\t\tnsd = []\r\n\t\t\tfor i in range(0, len(a) - 1):\r\n\t\t\t\tfor k in range(1, len(a)):\r\n\t\t\t\t\tif (k == i):\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tx_dist = (a[k] - a[i])\r\n\t\t\t\t\t\ty_dist = (b[k] - b[i])\r\n\t\t\t\t\t\td = math.sqrt(x_dist * x_dist + y_dist * y_dist)\r\n\t\t\t\t\t\tdistance.append(d)\r\n\t\t\t\t\t\tif (d <= 6912):\r\n\t\t\t\t\t\t\tnsd.append(i)\r\n\t\t\t\t\t\t\tnsd.append(k)\r\n\t\t\t\t\t\tnsd = list(dict.fromkeys(nsd))\r\n\t\t\t\t\t# print(nsd)\r\n\r\n\t\t\tcolor = (0, 0, 255)\r\n\t\t\tfor i in nsd:\r\n\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\r\n\t\t\t\ttext = \"Alert\"\r\n\t\t\t\tcv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\r\n\t\t\tcolor = (0, 255, 0)\r\n\t\t\tif len(idxs) > 0:\r\n\t\t\t\tfor i in idxs.flatten():\r\n\t\t\t\t\tif (i in nsd):\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\r\n\t\t\t\t\t\ttext = 'OK'\r\n\t\t\t\t\t\tcv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\r\n\r\n\t\ttext = \"Social Distancing Violators: {}\".format(len(nsd))\r\n\t\tcv2.putText(frame, text, (660, frame.shape[0] - 45),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)\r\n\r\n\t\tcv2.putText(frame, \"Covid Guard: Team TrojanWave\", (140, 45),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\r\n\t\tcv2.rectangle(frame, (20, 60), (1170, 100), (170, 170, 170), 2)\r\n\t\tcv2.putText(frame, \"COLOR CODE: RISK ANALYSIS\", (30, 85),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)\r\n\t\tcv2.putText(frame, \"--- GREEN : SAFE\", (500, 85),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\r\n\t\tcv2.putText(frame, \"--- RED: UNSAFE\", (1000, 85),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\r\n\r\n\r\n\t\ttot_str = \"TOTAL: \" + str(len(idxs))\r\n\t\thigh_str = \"HIGH RISK: \" + str(len(nsd))\r\n\t\tlow_str = \"LOW RISK: \" + str(0)\r\n\t\tsafe_str = \"SAFE: \" + str(len(idxs)-len(nsd))\r\n\r\n\t\tsub_img = frame[H - 270: H , 0:240]\r\n\t\tblack_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0\r\n\r\n\t\tres = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)\r\n\r\n\t\tframe[H - 270:H, 0:240] = res\r\n\r\n\t\tcv2.putText(frame, tot_str, (10, H - 235),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)\r\n\t\tcv2.putText(frame, safe_str, (10, H - 200),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\r\n\t\tcv2.putText(frame, low_str, (10, H - 165),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 120, 255), 2)\r\n\t\tcv2.putText(frame, high_str, (10, H - 130),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 150), 2)\r\n\r\n\t\t#cv2.imshow(\"Social Distancing Detector\", frame)\r\n\r\n\t\tcv2.rectangle(frame, (10, H-100 ), (600, H-10), (170, 170, 170), 2)\r\n\t\tcv2.putText(frame, \"COLOR CODE: MASK DETECTION\", (40, H-40),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)\r\n\t\tcv2.putText(frame, \"--- RED : NO MASK\", (420, H-70),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\r\n\t\tcv2.putText(frame, \"--- GREEN : MASK\", (420, H-35),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\r\n\r\n\t\t# cv2.putText(frame, \"-- GREEN: SAFE\", (565, 150),\r\n\t\t# \t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\r\n\r\n\t\t# detect faces in the frame and determine if they are wearing a\r\n\t\t# face mask or not\r\n\t\t(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\r\n\r\n\t\t# loop over the detected face locations and their corresponding\r\n\t\t# locations\r\n\t\tfor (box, pred) in zip(locs, preds):\r\n\t\t\t# unpack the bounding box and predictions\r\n\t\t\t(startX, startY, endX, endY) = box\r\n\t\t\t(mask, withoutMask) = pred\r\n\t\t\t# determine the class label and color we'll use to draw\r\n\t\t\t# the bounding box and text\r\n\t\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\r\n\t\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\r\n\t\t\t# include the probability in the label\r\n\t\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\r\n\t\t\t# display the label and bounding box rectangle on the output\r\n\t\t\t# frame\r\n\t\t\tcv2.putText(frame, label, (startX, startY - 10),\r\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\r\n\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\r\n\r\n\r\n\t\t# show the output frame\r\n\t\tcv2.namedWindow('frame', cv2.WINDOW_NORMAL)\r\n\t\tcv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\r\n\t\tcv2.imshow('frame', frame)\r\n\t\tkey = cv2.waitKey(1) & 0xFF\r\n\t\t# if the `q` key was pressed, break from the loop\r\n\r\n\t\r\n\t\tif key == ord(\"q\"):\r\n\t\t\tbreak\r\n\r\n\r\n\r\n\t# do a bit of cleanup\r\n\tcv2.destroyAllWindows()\r\n\tvs.stop()\r\n\r\n"
] | [
[
"numpy.ones",
"tensorflow.keras.models.load_model",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.random.seed",
"numpy.argmax",
"numpy.array"
]
] |
fmamitrotta/pyNastran | [
"90f957887a4f68f8e58b07c15e1ac69c66b9c6f4"
] | [
"pyNastran/op2/tables/geom/ept.py"
] | [
"\"\"\"\ndefines readers for BDF objects in the OP2 EPT/EPTS table\n\"\"\"\n#pylint: disable=C0103,R0914\nfrom __future__ import annotations\nfrom struct import unpack, Struct\nfrom functools import partial\nfrom typing import Tuple, List, TYPE_CHECKING\n\nimport numpy as np\n\n#from pyNastran import is_release\nfrom pyNastran.bdf.cards.properties.mass import PMASS, NSM, NSML\nfrom pyNastran.bdf.cards.properties.bars import PBAR, PBARL, PBEND, PBEAM3\nfrom pyNastran.bdf.cards.properties.beam import PBEAM, PBEAML, PBCOMP\nfrom pyNastran.bdf.cards.properties.bush import PBUSH, PBUSHT\nfrom pyNastran.bdf.cards.properties.damper import PDAMP, PVISC\nfrom pyNastran.bdf.cards.properties.properties import PFAST, PGAP\nfrom pyNastran.bdf.cards.properties.rods import PROD, PTUBE\nfrom pyNastran.bdf.cards.properties.shell import PSHEAR, PSHELL, PCOMP\nfrom pyNastran.bdf.cards.properties.solid import PSOLID\nfrom pyNastran.bdf.cards.properties.springs import PELAS, PELAST\n\nfrom pyNastran.bdf.cards.thermal.thermal import PCONV, PHBDY, PCONVM\n# PCOMPG, PBUSH1D, PBEAML, PBEAM3\nfrom pyNastran.op2.op2_interface.op2_reader import (\n mapfmt, reshape_bytes_block_size) # reshape_bytes_block,\nfrom .utils import get_minus1_start_end\nfrom .geom2 import DoubleCardError\nif TYPE_CHECKING: # pragma: no cover\n from pyNastran.op2.op2_geom import OP2Geom\n\n\nclass EPT:\n \"\"\"defines methods for reading op2 properties\"\"\"\n\n @property\n def size(self) -> int:\n return self.op2.size\n @property\n def factor(self) -> int:\n return self.op2.factor\n\n def _read_fake(self, data: bytes, n: int) -> int:\n return self.op2._read_fake(data, n)\n\n def read_ept_4(self, data: bytes, ndata: int):\n return self.op2._read_geom_4(self.ept_map, data, ndata)\n\n def __init__(self, op2: OP2Geom):\n self.op2 = op2\n self.ept_map = {\n (3201, 32, 55): ['NSM', self._read_nsm], # record 2\n (52, 20, 181): ['PBAR', self._read_pbar], # record 11 - buggy\n (9102, 91, 52): ['PBARL', self._read_pbarl], # record 12 - almost there...\n (2706, 27, 287): ['PCOMP', self._read_pcomp], # record 22 - buggy\n (302, 3, 46): ['PELAS', self._read_pelas], # record 39\n (2102, 21, 121): ['PGAP', self._read_pgap], # record 42\n (902, 9, 29): ['PROD', self._read_prod], # record 49\n (1002, 10, 42): ['PSHEAR', self._read_pshear], # record 50\n (2402, 24, 281): ['PSOLID', self._read_psolid], # record 51\n (2302, 23, 283): ['PSHELL', self._read_pshell], # record 52\n (1602, 16, 30): ['PTUBE', self._read_ptube], # record 56\n\n (5402, 54, 262): ['PBEAM', self._read_pbeam], # record 14 - not done\n (9202, 92, 53): ['PBEAML', self._read_pbeaml], # record 15\n (2502, 25, 248): ['PBEND', self._read_pbend], # record 16 - not done\n (1402, 14, 37): ['PBUSH', self._read_pbush], # record 19 - not done\n (3101, 31, 219): ['PBUSH1D', self._read_pbush1d], # record 20 - not done\n (152, 19, 147): ['PCONEAX', self._read_pconeax], # record 24 - not done\n (11001, 110, 411): ['PCONV', self._read_pconv], # record 25 - not done\n # record 26\n (202, 2, 45): ['PDAMP', self._read_pdamp], # record 27 - not done\n (2802, 28, 236): ['PHBDY', self._read_phbdy], # record 43 - not done\n (402, 4, 44): ['PMASS', self._read_pmass], # record 48\n (1802, 18, 31): ['PVISC', self._read_pvisc], # record 59\n (10201, 102, 400): ['PVAL', self._read_pval], # record 58 - not done\n (2606, 26, 289): ['VIEW', self._read_view], # record 62 - not done\n (3201, 32, 991) : ['NSM', self._read_nsm_2], # record\n (3301, 33, 992) : ['NSM1', self._read_nsm1], # record\n (3701, 37, 995) : ['NSML1', self._read_nsml1_nx], # record\n (3601, 36, 62): ['NSML1', self._read_nsml1_msc], # record 7\n (15006, 150, 604): ['PCOMPG', self._read_pcompg], # record\n\n (702, 7, 38): ['PBUSHT', self._read_pbusht], # record 1\n (3301, 33, 56): ['NSM1', self._read_fake], # record 3\n (3401, 34, 57) : ['NSMADD', self._read_fake], # record 5\n (3501, 35, 58): ['NSML', self._read_fake], # record 6\n (3501, 35, 994) : ['NSML', self._read_nsml],\n (1502, 15, 36): ['PAABSF', self._read_fake], # record 8\n (8300, 83, 382): ['PACABS', self._read_fake], # record 9\n (8500, 85, 384): ['PACBAR', self._read_fake], # record 10\n (5403, 55, 349): ['PBCOMP', self._read_pbcomp], # record 13\n (13301, 133, 509): ['PBMSECT', self._read_fake], # record 17\n (2902, 29, 420): ['PCONVM', self._read_pconvm], # record 26\n (1202, 12, 33): ['PDAMPT', self._read_pdampt], # record 28\n (8702, 87, 412): ['PDAMP5', self._read_pdamp5], # record 29\n (6802, 68, 164): ['PDUM8', self._read_fake], # record 37\n (6902, 69, 165): ['PDUM9', self._read_fake], # record 38\n (1302, 13, 34): ['PELAST', self._read_pelast], # record 41\n (12001, 120, 480): ['PINTC', self._read_fake], # record 44\n (12101, 121, 484): ['PINTS', self._read_fake], # record 45\n (4606, 46, 375): ['PLPLANE', self._read_plplane], # record 46\n (4706, 47, 376): ['PLSOLID', self._read_plsolid], # record 47\n (10301, 103, 399): ['PSET', self._read_pset], # record 57\n (3002, 30, 415): ['VIEW3D', self._read_fake], # record 63\n\n (13501, 135, 510) : ['PFAST', self._read_pfast_msc], # MSC-specific\n (3601, 36, 55) : ['PFAST', self._read_pfast_nx], # NX-specific\n (3801, 38, 979) : ['PPLANE', self._read_pplane],\n (11801, 118, 560) : ['PWELD', self._read_fake],\n (3401, 34, 993) : ['NSMADD', self._read_nsmadd],\n (9300, 93, 684) : ['ELAR', self._read_fake],\n (9400, 94, 685) : ['ELAR2', self._read_fake],\n (16006, 160, 903) : ['PCOMPS', self._read_fake],\n\n # MSC-specific\n (14602, 146, 692): ['PSLDN1', self._read_fake],\n (16502, 165, 916): ['PAXSYMH', self._read_fake],\n (13201, 132, 513): ['PBRSECT', self._read_fake],\n\n (13701, 137, 638): ['PWSEAM', self._read_fake],\n (7001, 70, 632): ['???', self._read_fake],\n (15106, 151, 953): ['PCOMPG1', self._read_fake],\n (3901, 39, 969): ['PSHL3D', self._read_fake],\n (17006, 170, 901): ['MATCID', self._read_fake],\n\n (9601, 96, 691): ['PJOINT', self._read_fake],\n (16502, 165, 916): ['???', self._read_fake],\n\n (9701, 97, 692): ['PJOINT2', self._read_fake],\n (13401, 134, 611): ['PBEAM3', self._read_pbeam3],\n (8901, 89, 905): ['PSOLCZ', self._read_fake],\n (9801, 98, 698): ['DESC', self._read_desc],\n #(9701, 97, 692): ['???', self._read_fake],\n #(9701, 97, 692): ['???', self._read_fake],\n #(9701, 97, 692): ['???', self._read_fake],\n\n }\n\n def _add_op2_property(self, prop):\n \"\"\"helper method for op2\"\"\"\n op2 = self.op2\n #if prop.pid > 100000000:\n #raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))\n #print(str(prop)[:-1])\n ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')\n pid = prop.pid\n allow_overwrites = (\n ntables > 1 and\n pid in op2.properties and\n op2.properties[pid].type == prop.type)\n op2._add_methods._add_property_object(prop, allow_overwrites=allow_overwrites)\n\n def _add_op2_property_mass(self, prop):\n \"\"\"helper method for op2\"\"\"\n op2 = self.op2\n #if prop.pid > 100000000:\n #raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))\n #print(str(prop)[:-1])\n ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')\n pid = prop.pid\n allow_overwrites = (\n ntables > 1 and\n pid in op2.properties_mass and\n op2.properties_mass[pid].type == prop.type)\n op2._add_methods._add_property_mass_object(prop, allow_overwrites=allow_overwrites)\n\n def _add_pconv(self, prop: PCONV) -> None:\n if prop.pconid > 100000000:\n raise RuntimeError('bad parsing pconid > 100000000...%s' % str(prop))\n self.op2._add_methods._add_convection_property_object(prop)\n\n# HGSUPPR\n\n def _read_desc(self, data: bytes, n: int) -> int:\n \"\"\"\n RECORD – DESC(9801,98,698)\n\n Word Name Type Description\n 1 DID I Description identification number\n 2 NWORDS I Number of words for the description string\n 3 DESC CHAR4 Description\n Words 3 repeats NWORDS times\n\n data = (1, 14, 'FACE CONTACT(1) ')\n \"\"\"\n op2 = self.op2\n assert self.size == 4, 'DESC size={self.size} is not supported'\n #op2.show_data(data[n:], types='ifs')\n struct_2i = Struct(op2._endian + b'2i')\n while n < len(data):\n\n datai = data[n:n+8]\n desc_id, nwords = struct_2i.unpack(datai)\n #print(desc_id, nwords)\n ndatai = 8 + nwords * 4\n word_bytes = data[n+8:n+ndatai]\n word = word_bytes.decode('ascii').rstrip()\n assert len(word_bytes) == nwords * 4\n #print('word_bytes =', word_bytes)\n op2.log.warning(f'geom skipping DESC={desc_id}: {word!r}')\n n += ndatai\n assert n == len(data), n\n return n\n\n def _read_nsml(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 2019.2\n RECORD – NSML(3501, 35, 994)\n\n Defines a set of lumped nonstructural mass by ID.\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP(2) CHAR4 Set of properties or elements\n 4 ID I Property of element identification number\n 5 VALUE RS Lumped nonstructural mass value\n Words 4 and 5 repeat until -1 occurs\n\n ints = (3, ELEMENT, 0, 200, 0.7, -1, 4, PSHELL, 0, 6401, 4.2, -1)\n floats = (3, ELEMENT, 0.0, 200, 0.7, -1, 4, PSHELL, 0.0, 6401, 4.2, -1)\n\n \"\"\"\n op2 = self.op2\n n0 = n\n #op2.show_data(data[n:])\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n size = self.size\n for (i0, i1) in zip(istart, iend):\n #data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n assert ints[i1] == -1, ints[i1]\n sid = ints[i0]\n prop_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]\n #print(sid, prop_bytes)\n ids = ints[i0+4:i1:2].tolist()\n values = floats[i0+5:i1:2].tolist()\n #print(ids, values)\n assert len(ids) == len(values)\n nsm_type = prop_bytes.decode('latin1').rstrip()\n nsml = op2.add_nsml(sid, nsm_type, ids, values)\n #print(nsml)\n str(nsml)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSML'] = ncards\n return n\n\n def _read_nsmadd(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 2019.2\n (3401, 34, 993)\n\n RECORD – NSMADD(3401,34,993)\n Combines the nonstructural mass inputs.\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 ID I Set of properties or elements\n Word 2 repeats until End of Record\n\n (1, 2, 3, 4, -1)\n \"\"\"\n op2 = self.op2\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n istart = [0] + list(iend + 1)\n size = self.size\n for (i0, i1) in zip(istart, iend):\n assert ints[i1] == -1, ints[i1]\n sid, *nsms = ints[i0:i1]\n nsmadd = op2.add_nsmadd(sid, nsms)\n #print(nsmadd)\n str(nsmadd)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSMADD'] = ncards\n return n\n\n def _read_nsml1_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n NSML1(3701, 37, 995)\n Alternate form of NSML entry. Defines lumped nonstructural mass entries by VALUE, ID list.\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of properties\n 3 TYPE CHAR4 Set of elements\n 4 VALUE RS Lumped nonstructural mass value\n 5 SPECOPT I Specification option\n SPECOPT=1 By IDs\n 6 ID I Property of element identification number\n Word 6 repeats until -1 occurs\n SPECOPT=2 All\n 6 ALL(2) CHAR4 Keyword ALL\n Words 6 and 7 repeat until -1 occurs\n SPECOPT=3 Thru range\n 6 ID1 I Starting identification number\n 7 THRU(2) CHAR4 Keyword THRU\n 9 ID2 I Ending identification number\n Words 6 through 9 repeat until -1 occurs\n SPECOPT=4 Thru range with by\n 6 ID1 I Starting identification number\n 7 THRU(2) CHAR4 Keyword THRU\n 9 ID2 I Ending identification number\n 10 BY(2) CHAR4 Keyword BY\n 12 N I Increment\n Words 6 through 12 repeat until -1 occurs\n\n data = (\n 3701, 37, 995,\n 1, ELEMENT, 466.2,\n 3, 249311, THRU, 250189, -1,\n 3, 250656, THRU, 251905, -1,\n 3, 270705, THRU, 275998, -1,\n 3, 332687, THRU, 334734, -1,\n -2,\n\n 2, ELEMENT, 77.7,\n 3, 225740, THRU 227065, -1,\n 3, 227602, THRU, 228898, -1,\n 3, 229435, THRU, 230743, -1,\n 3, 231280, THRU, 233789, -1,\n 3, 233922, THRU, 235132, -1,\n 3, 235265, THRU, 236463, -1,\n 3, 338071, THRU, 341134, -1, -2)\n \"\"\"\n #ints = (1, ELEMENT, 466.2,\n # 3, 249311, THRU, 250189, -1,\n # 3, 250656, THRU, 251905, -1,\n # 3, 270705, THRU, 275998, -1,\n # 3, 332687, THRU, 334734, -1,\n # -2,\n #\n # 2, ELEMENT, 77.7,\n # 3, 225740, THRU 227065, -1,\n # 3, 227602, THRU, 228898, -1,\n # 3, 229435, THRU, 230743, -1,\n # 3, 231280, THRU, 233789, -1,\n # 3, 233922, THRU, 235132, -1,\n # 3, 235265, THRU, 236463, -1,\n # 3, 338071, THRU, 341134, -1, -2)\n op2 = self.op2\n n0 = n\n #op2.show_data(data[n:])\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n iminus2 = np.where(ints == -2)[0]\n istart = [0] + list(iminus2[:-1] + 1)\n iend = iminus2\n #print(istart, iend)\n assert len(data[n:]) > 12, data[n:]\n #op2.show_data(data[n:], types='ifs')\n\n ncards = 0\n istart = [0] + list(iend + 1)\n size = self.size\n for (i0, i1) in zip(istart, iend):\n #data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n assert ints[i1] == -2, ints[i1]\n sid = ints[i0]\n nsm_type = data[n0+(i0+1)*size:n0+(i0+2)*size].decode('latin1').rstrip()\n value = float(floats[i0+3])\n #print(f'sid={sid} nsm_type={nsm_type} value={value}')\n\n iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]\n #print('-1', iminus1)\n #print('-2', iminus2)\n istart2 = [i0 + 4] + list(iminus1[:-1] + 1)\n iend2 = iminus1\n #print(istart2, iend2)\n\n for istarti, iendi in zip(istart2, iend2):\n #print(istarti, iendi)\n spec_opt = ints[istarti] # 4\n #print(f'ints[{istarti}] = spec_opt = {spec_opt}')\n if spec_opt == 1:\n # 6 ID I Property of element identification number\n\n ivalues = list(range(istarti, iendi))\n #print('ivalues =', ivalues)\n pid_eids = ints[ivalues].tolist()\n #print('pid_eids =', pid_eids)\n elif spec_opt == 3:\n # datai = (3, 249311, 'THRU ', 250189)\n #print(f'i0={i0}')\n #datai = data[n0+(i0+6)*size:n0+i1*size]\n #op2.show_data(datai)\n ids = ints[istarti:iendi]\n istart = ids[1]\n iend = ids[-1]\n pid_eids = list(range(istart, iend+1))\n else:\n raise NotImplementedError(spec_opt)\n\n if nsm_type == 'ELEM':\n nsm_type = 'ELEMENT'\n #for pid_eid in pid_eids:\n #nsml = op2.add_nsml1(sid, nsm_type, pid_eids, [value])\n assert len(pid_eids) > 0, pid_eids\n nsml1 = op2.add_nsml1(sid, nsm_type, value, pid_eids)\n #print(nsml1)\n str(nsml1)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSML'] = ncards\n return n\n\n def _read_nsml1_msc(self, data: bytes, n: int) -> int:\n r\"\"\"\n NSML1(3601, 36, 62)\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of property or elements\n 3 VALUE RS Lumped nonstructural mass value\n 4 SPECOPT I Specification option\n SPECOPT=1 By IDs\n 5 IDs , =FLG1LIST in ixidlst.prm\n 6 ID I Property or element ID\n Word 6 repeats until End of Record\n SPECOPT=2 means ALL, =FLG1ALL in ixidlst.prm\n 5 ALL(2) CHAR4 Keyword ALL\n Words 5 through 6 repeat until End of Record\n SPECOPT=3 means THRU range, =FLG1THRU in ixidlst.prm\n 5 ID1 I Starting ID\n 6 THRU(2) CHAR4 Keyword THRU\n 8 ID2 I Ending ID\n Words 5 through 8 repeat until End of Record\n SPECOPT=4 means THRU range with BY, =FLG1THBY in ixidlst.prm\n 5 ID1 I Starting ID\n 6 THRU(2) CHAR4 Keyword THRU\n 8 ID2 I Ending ID\n 9 BY(2) CHAR4 Keyword BY\n 11 N I Increment\n Words 5 through 11 repeat until End of Record\n End SPECOPT\n Words 4 through max repeat until End of Record\n\n C:\\MSC.Software\\simcenter_nastran_2019.2\\tpl_post2\\elsum15.op2\n\n data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n\n \"\"\"\n op2 = self.op2\n op2.log.info(f'geom skipping NSML1 in {op2.table_name}; ndata={len(data)-12}')\n #op2.show_data(data[n:], types='ifs')\n #bbb\n return len(data)\n\n def _read_nsm1(self, data: bytes, n: int) -> int:\n \"\"\"\n NSM1(3301, 33, 992)\n\n Defines the properties of a nonstructural mass.\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of properties\n 3 TYPE CHAR4 Set of elements\n 4 ORIGIN I Entry origin\n 5 VALUE RS Nonstructural mass value\n 6 SPECOPT I Specification option\n SPECOPT=1 By IDs\n 7 ID I\n Word 7 repeats until -1 occurs\n SPECOPT=2 All\n 7 ALL(2) CHAR4\n Words 7 and 8 repeat until -1 occurs\n SPECOPT=3 Thru range\n 7 ID I\n 8 THRU(2) CHAR4\n 10 ID I\n Words 7 through 10 repeat until -1 occurs\n SPECOPT=4 Thru range with by\n 7 ID I\n 8 THRU(2) CHAR4\n 10 ID I\n 11 BY(2) CHAR4\n 13 N I\n Words 7 through 13 repeat until -1 occurs\n\n data = (3, PCOMP, 0, 0.37, 2, ALL, -1,\n 4, ELEMENT, 2, 2.1, 1, 3301, -1)\n\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:], types='ifs')\n n0 = n\n #op2.show_data(data[n:])\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n size = self.size\n for (i0, i1) in zip(istart, iend):\n assert ints[i1] == -1, ints[i1]\n # 1 SID I Set identification number\n sid = ints[i0]\n\n # 2 PROP CHAR4 Set of properties\n # 3 TYPE CHAR4 Set of elements\n # 4 ORIGIN I Entry origin\n # 5 VALUE RS Nonstructural mass value\n # 6 SPECOPT I Specification option\n nsm_type = data[n0+(i0+1)*size:n0+(i0+3)*size].decode('latin1').rstrip()\n zero_two = ints[i0+3]\n value = float(floats[i0+4])\n spec_opt = ints[i0+5]\n assert zero_two in [0, 2], zero_two\n #nii = 6\n #print(ints[i0+nii:i1])\n #print(floats[i0+nii:i1])\n #print(sid, nsm_type, value, spec_opt)\n\n iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]\n #print('-1', iminus1)\n #print('-2', iminus2)\n istart2 = [i0 + 5] + list(iminus1[:-1] + 1)\n iend2 = iminus1\n #print(istart2, iend2)\n\n if spec_opt == 1:\n # 7 ID I\n ids = ints[i0+6:i1]\n elif spec_opt == 2:\n word = data[n0+(i0+6)*size:n0+i1*size]\n ids = word\n elif spec_opt == 3: # thru\n # datai = (249311, 'THRU ', 250189)\n #datai = data[n0+(i0+6)*size:n0+i1*size]\n ids = ints[i0+6:i1]\n istart = ids[0]\n iend = ids[-1]\n ids = list(range(istart, iend+1))\n else:\n raise NotImplementedError(spec_opt)\n #print(sid, nsm_type, zero_two, value, ids)\n #if nsm_type == 'ELEM':\n #nsm_type = 'ELEMENT'\n #for pid_eid in pid_eids:\n #nsml = self.add_nsml1(sid, nsm_type, pid_eids, [value])\n nsm1 = op2.add_nsm1(sid, nsm_type, value, ids)\n #print(nsm1)\n str(nsm1)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSM1'] = ncards\n return n\n\n def _read_nsm(self, data: bytes, n: int) -> int:\n \"\"\"NSM\"\"\"\n op2 = self.op2\n n = op2.reader_geom2._read_dual_card(\n data, n,\n self._read_nsm_nx, self._read_nsm_msc,\n 'NSM', op2._add_methods._add_nsm_object)\n return n\n\n def _read_nsm_2(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 2019.2\n NSM(3201, 32, 991)\n\n RECORD – NSM(3201,32,991)\n Defines the properties of a nonstructural mass.\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of properties\n 3 TYPE CHAR4 Set of elements <---- not right...it's an integer and not used...\n 4 ID I Property or element identification number\n 5 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n\n NSM,2,conrod,1007,0.3\n\n data = (2, CONROD, 0, 1007, 0.3, -1,\n 2, ELEMENT, 0, 200, 0.20, -1,\n 3, PSHELL, 0, 3301, 0.20, -1,\n 3, ELEMENT, 2, 200, 1.0, -1,\n 4, PSHELL, 2, 6401, 4.2, -1)\n \"\"\"\n op2 = self.op2\n n0 = n\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n size = self.size\n for (i0, i1) in zip(istart, iend):\n #data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n assert ints[i1] == -1, ints[i1]\n sid = ints[i0]\n prop_type = data[n0+(i0+1)*size:n0+(i0+3)*size]\n elem_type = data[n0+(i0+3)*size:n0+(i0+4)*size]\n nsm_type = prop_type.decode('latin1').rstrip()\n dunno_int = ints[i0+3]\n #print(ints[i0+4:i1])\n #print(floats[i0+4:i1])\n ids = ints[i0+4:i1:2].tolist()\n values = floats[i0+5:i1:2].tolist()\n assert len(ids) == len(values)\n assert dunno_int in [0, 2], (sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)\n #print(sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)\n nsm = op2.add_nsm(sid, nsm_type, ids, values)\n #print(nsm[0])\n str(nsm)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSM'] = ncards\n return n\n\n def _read_nsm_msc(self, data: bytes, n: int) -> int:\n \"\"\"\n NSM(3201,32,55) - the marker for Record 2\n\n MSC\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of property or elements\n 3 ID I Property or element identification number\n 4 VALUE RS Nonstructural mass value\n ORIGIN=0 NSM Bulk Data entry\n 5 ID I Property or element ID\n 6 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n ORIGIN=2 NSML Bulk Data entry\n 5 ID I Property or element ID\n 6 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n Words 3 through 4 repeat until End of Record\n \"\"\"\n op2 = self.op2\n properties = []\n struct1 = Struct(op2._endian + b'i 4s if')\n ndelta = 16\n\n i = 0\n ints = np.frombuffer(data[n:], op2.idtype).copy()\n floats = np.frombuffer(data[n:], op2.fdtype).copy()\n\n while n < len(data):\n edata = data[n:n+ndelta]\n out = struct1.unpack(edata)\n (sid, prop_set, pid, value) = out\n # 538976312\n assert pid < 100000000\n i += 4\n n += ndelta\n\n prop_set = prop_set.decode('utf8').rstrip(' ') # \\x00\n values = [value]\n #print('ints[i:]=', ints[i:])\n while ints[i] != -1:\n value2 = floats[i]\n values.append(value2)\n n += 4\n i += 1\n op2.log.info(\"MSC: NSM-sid=%s prop_set=%s pid=%s values=%s\" % (\n sid, prop_set, pid, values))\n prop = NSM.add_op2_data([sid, prop_set, pid, value])\n #op2._add_methods._add_nsm_object(prop)\n properties.append(prop)\n\n # handle the trailing -1\n i += 1\n n += 4\n return n, properties\n\n def _read_nsm_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n NSM(3201,32,55) - the marker for Record 2\n\n 1 SID I Set identification number\n 2 PROP(2) CHAR4 Set of properties or elements\n 4 ORIGIN I Entry origin\n 5 ID I Property or element identification number\n 6 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n \"\"\"\n op2 = self.op2\n properties = []\n\n #NX: C:\\Users\\sdoyle\\Dropbox\\move_tpl\\nsmlcr2s.op2\n struct1 = Struct(op2._endian + b'i 8s ii f')\n ndelta = 24\n #op2.show_data(data[12:], 'ifs')\n\n i = 0\n ints = np.frombuffer(data[n:], op2.idtype).copy()\n floats = np.frombuffer(data[n:], op2.fdtype).copy()\n\n unused_packs = break_by_minus1(ints)\n #for pack in packs:\n #print(pack)\n\n #ipack = 0\n while n < len(data):\n #print('ints[i:]=', ints[i:].tolist())\n #i1, i2 = packs[ipack]\n #print('idata=%s' % idata[i1:i2])\n #print('fdata=%s' % fdata[i1:i2])\n #print(idata[i1:i2])\n edata = data[n:n+ndelta]\n out = struct1.unpack(edata)\n (sid, prop_set, origin, pid, value) = out\n # 538976312\n assert pid < 100000000\n i += 6\n n += ndelta\n\n prop_set = prop_set.decode('utf8').rstrip(' ') # \\x00\n pids = [pid]\n values = [value]\n #print('ints[i:]=', ints[i:].tolist())\n while ints[i] != -1:\n pid = ints[i]\n value2 = floats[i+1]\n assert pid != -1\n pids.append(pid)\n values.append(value2)\n n += 8\n i += 2\n\n for pid, value in zip(pids, values):\n if origin == 0:\n #op2.log.info(\"NX: NSM-sid=%s prop_set=%s pid=%s values=%s\" % (\n #sid, prop_set, pid, values))\n prop = NSM.add_op2_data([sid, prop_set, pid, value])\n elif origin == 2:\n #op2.log.info(\"NX: NSML-sid=%s prop_set=%s pid=%s values=%s\" % (\n #sid, prop_set, pid, values))\n prop = NSML.add_op2_data([sid, prop_set, pid, value])\n\n #print(prop.rstrip(), pid, value)\n #op2._add_methods._add_nsm_object(prop)\n properties.append(prop)\n #print('----')\n\n # handle the trailing -1\n i += 1\n n += 4\n #ipack += 1\n return n, properties\n\n# NSM1\n# NSML1\n# NSMADD\n# NSML\n# NSML1\n# PAABSF\n# PACABS\n# PACBAR\n\n def _read_pbar(self, data: bytes, n: int) -> int:\n \"\"\"\n PBAR(52,20,181) - the marker for Record 11\n .. warning:: this makes a funny property...\n\n MSC 2016/NX10\n\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 A RS Area\n 4 I1 RS Area moment of inertia in plane 1\n 5 I2 RS Area moment of inertia in plane 2\n 6 J RS Torsional constant\n 7 NSM RS Nonstructural mass per unit length\n 8 FE RS\n 9 C1 RS Stress recovery location at point C in element y-axis\n 10 C2 RS Stress recovery location at point C in element z-axis\n 11 D1 RS Stress recovery location at point D in element y-axis\n 12 D2 RS Stress recovery location at point D in element z-axis\n 13 E1 RS Stress recovery location at point E in element y-axis\n 14 E2 RS Stress recovery location at point E in element z-axis\n 15 F1 RS Stress recovery location at point F in element y-axis\n 16 F2 RS Stress recovery location at point F in element z-axis\n 17 K1 RS Area factor for shear in plane 1\n 18 K2 RS Area factor for shear in plane 2\n 19 I12 RS Area product of inertia for plane 1 and 2\n \"\"\"\n op2 = self.op2\n ntotal = 76 * self.factor # 19*4\n struct1 = Struct(mapfmt(op2._endian + b'2i17f', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, mid, a, I1, I2, J, nsm, fe, c1, c2, d1, d2,\n #e1, e2, f1, f2, k1, k2, I12) = out\n prop = PBAR.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PBAR'] = nentries\n return n\n\n def _read_pbarl(self, data: bytes, n: int) -> int:\n \"\"\"\n PBARL(9102,91,52) - the marker for Record 12\n TODO: buggy\n It's possible to have a PBARL and a PBAR at the same time.\n NSM is at the end of the element.\n \"\"\"\n op2 = self.op2\n valid_types = {\n 'ROD': 1,\n 'TUBE': 2,\n 'TUBE2': 2,\n 'I': 6,\n 'CHAN': 4,\n 'T': 4,\n 'BOX': 4,\n 'BAR': 2,\n 'CROSS': 4,\n 'H': 4,\n 'T1': 4,\n 'I1': 4,\n 'CHAN1': 4,\n 'Z': 4,\n 'CHAN2': 4,\n \"T2\": 4,\n 'BOX1': 6,\n 'HEXA': 3,\n 'HAT': 4,\n 'HAT1': 5,\n 'DBOX': 10, # was 12\n #'MLO TUBE' : 2,\n } # for GROUP=\"MSCBML0\"\n\n size = self.size\n ntotal = 28 * self.factor # 7*4 - ROD - shortest entry...could be buggy... # TODO fix this\n if size == 4:\n struct1 = Struct(op2._endian + b'2i 8s 8s f')\n else:\n struct1 = Struct(op2._endian + b'2q 16s 16s d')\n\n #nentries = (len(data) - n) // ntotal\n #print(self.show_ndata(80))\n ndata = len(data)\n\n while ndata - n > ntotal:\n edata = data[n:n+ntotal]\n n += ntotal\n\n out = struct1.unpack(edata)\n (pid, mid, group, beam_type, value) = out\n if pid > 100000000 or pid < 1:\n op2.log.debug(\" pid=%s mid=%s group=%r beam_type=%r value=%s\" % (\n pid, mid, group, beam_type, value))\n raise RuntimeError('bad parsing...')\n\n beam_type = reshape_bytes_block_size(beam_type, size=size)\n group = reshape_bytes_block_size(group, size=size)\n data_in = [pid, mid, group, beam_type, value]\n\n expected_length = valid_types[beam_type]\n iformat = op2._endian + b'%if' % expected_length\n\n ndelta = expected_length * 4\n dims_nsm = list(unpack(iformat, data[n:n+ndelta]))\n data_in += dims_nsm\n #print(\" pid=%s mid=%s group=%r beam_type=%r value=%s dims_nsm=%s\" % (\n #pid, mid, group, beam_type, value, dims_nsm))\n\n # TODO why do i need the +4???\n # is that for the nsm?\n #min_len = expected_length * 4 + 4\n #if len(data)\n #data = data[n + expected_length * 4 + 4:]\n n += ndelta\n\n #prin( \"len(out) = \",len(out)))\n #print(\"PBARL = %s\" % data_in)\n prop = PBARL.add_op2_data(data_in) # last value is nsm\n pid = prop.pid\n if pid in op2.properties:\n #op2.log.debug(\"removing:\\n%s\" % op2.properties[pid])\n op2._type_to_id_map['PBAR'].remove(pid)\n del op2.properties[pid]\n self._add_op2_property(prop)\n #op2.properties[pid] = prop\n #print(prop.get_stats())\n #print(op2.show_data(data[n-8:-100]))\n\n # the PBARL ends with a -1 flag\n #value, = unpack(op2._endian + b'i', data[n:n+4])\n n += 4 * self.factor\n if len(op2._type_to_id_map['PBAR']) == 0 and 'PBAR' in op2.card_count:\n del op2._type_to_id_map['PBAR']\n del op2.card_count['PBAR']\n op2.increase_card_count('PBARL')\n #assert len(data) == n\n if self.size == 8:\n n += 16\n #n += 8 # same for 32/64 bit - not 100% that it's always active\n return n\n\n def _read_pbcomp(self, data: bytes, n: int) -> int:\n \"\"\"\n PBCOMP(5403, 55, 349)\n\n pid mid A I1 I2 I12 J NSM\n PBCOMP 3 2 2.00E-4 6.67E-9 1.67E-9 0.0 4.58E-9 0.0 +\n pid mid\n floats = (3, 2, 0.0002, 6.67e-09, 1.67e-09, 0.0, 4.58e-09, 0.0, 1.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n ints = (3, 2, 0.0002, 6.67E-9, 1.67E-9, 0, 4.58E-9, 0, 1.0, 1.0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n\n \"\"\"\n op2 = self.op2\n struct1 = Struct(mapfmt(op2._endian + b'2i 12f i', self.size))\n struct2 = Struct(mapfmt(op2._endian + b'3f 2i', self.size))\n nproperties = 0\n ntotal1 = 60 * self.factor # 4*15\n ntotal2 = 20 * self.factor\n\n ndata = len(data)\n #print(ntotal1, ntotal2)\n if self.factor == 2:\n op2.show_data(data[12*self.factor:], types='qd')\n #print(len(data[12*self.factor:]))\n while n < ndata:\n #op2.log.debug(f\"n={n} ndata={ndata}\")\n edata = data[n:n+ntotal1]\n #if len(edata) == ntotal1:\n data1 = struct1.unpack(edata)\n #else:\n #op2.show_data(edata, types='qdi')\n #n += ntotal2\n #continue\n nsections = data1[-1]\n if op2.is_debug_file:\n (pid, mid, a, i1, i2, i12, j, nsm, k1, k2,\n m1, m2, n1, n2, unused_nsections) = data1\n op2.log.info(f'PBCOMP pid={pid} mid={mid} nsections={nsections} '\n f'k1={k1} k2={k2} m=({m1},{m2}) n=({n1},{n2})\\n')\n #if pid > 0 and nsections == 0:\n #print('n1')\n #n += ntotal1\n #continue\n #if pid == 0 and nsections == 0:\n #print('n2')\n #n += ntotal2\n #continue\n\n data2 = []\n n += ntotal1\n if nsections in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]:\n # 16 Y RS Lumped area location along element's y-axis\n # 17 Z RS Lumped area location along element's z-axis\n # 18 C RS Fraction of the total area for the lumped area\n # 19 MID I Material identification number\n # 20 UNDEF None\n # Words 16 through 20 repeat NSECT times\n for unused_i in range(nsections):\n datai = data[n:n+ntotal2]\n xi, yi, ci, mid, unused_null = struct2.unpack(datai)\n data2.append((xi, yi, ci, mid))\n n += ntotal2\n else:\n op2.log.error(f'PBCOMP={data1[0]} has no sections; check your bdf')\n return n\n #raise NotImplementedError('PBCOMP nsections=%r' % nsections)\n\n if op2.is_debug_file:\n op2.binary_debug.write(' PBCOMP: %s\\n' % str([data1, data2]))\n msg = (\n ' i=%-2s so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '\n 'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (\n nsections, None, -9999., a, i1, i2, i12, j, nsm,\n None, None, None, None, None, None, None, None,)\n )\n op2.log.debug(msg)\n #op2.log.debug(data1)\n #op2.log.debug(data2)\n\n data_in = [data1, data2]\n prop = PBCOMP.add_op2_data(data_in)\n pid = data1[0]\n if pid in op2.properties:\n op2._type_to_id_map['PBEAM'].remove(pid)\n del op2.properties[pid]\n\n self._add_op2_property(prop)\n nproperties += 1\n #print(f\"n={n} ndata={ndata}\")\n assert nproperties > 0, 'PBCOMP nproperties=%s' % (nproperties)\n if len(op2._type_to_id_map['PBEAM']) == 0 and 'PBEAM' in op2.card_count:\n del op2._type_to_id_map['PBEAM']\n del op2.card_count['PBEAM']\n op2.card_count['PBCOMP'] = nproperties\n return n\n\n def _read_pbeam(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEAM(5402,54,262) - the marker for Record 14\n .. todo:: add object\n \"\"\"\n op2 = self.op2\n cross_section_type_map = {\n 0 : 'variable',\n 1 : 'constant',\n 2 : '???',\n }\n\n struct1 = Struct(mapfmt(op2._endian + b'4if', self.size))\n struct2 = Struct(mapfmt(op2._endian + b'16f', self.size))\n struct3 = Struct(mapfmt(op2._endian + b'16f', self.size))\n unused_ntotal = 768 # 4*(5+16*12)\n #nproperties = (len(data) - n) // ntotal\n #assert nproperties > 0, 'ndata-n=%s n=%s datai\\n%s' % (len(data)-n, n, op2.show_data(data[n:100+n]))\n ndata = len(data)\n #op2.show_data(data[12:], 'if')\n #assert ndata % ntotal == 0, 'ndata-n=%s n=%s ndata%%ntotal=%s' % (len(data)-n, n, ndata % ntotal)\n nproperties = 0\n\n ntotal1 = 20 * self.factor\n ntotal2 = 64 * self.factor\n while n < ndata:\n #while 1: #for i in range(nproperties):\n edata = data[n:n+ntotal1]\n n += ntotal1\n data_in = list(struct1.unpack(edata))\n #if op2.is_debug_file:\n #op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s\\n' % tuple(data_in))\n (pid, unused_mid, unused_nsegments, ccf, unused_x) = data_in\n #op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s' % tuple(data_in))\n\n # Constant cross-section flag: 1=yes and 0=no\n # what is 2?\n if ccf not in [0, 1, 2]:\n msg = (' PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s; '\n 'ccf must be in [0, 1, 2]\\n' % tuple(data_in))\n raise ValueError(msg)\n\n cross_section_type = cross_section_type_map[ccf]\n #print('cross_section_type = %s' % cross_section_type)\n\n is_pbcomp = False\n is_bad_so = False\n\n so = []\n xxb = []\n for i in range(11):\n edata = data[n:n+ntotal2]\n if len(edata) != ntotal2:\n endpack = []\n raise RuntimeError(f'PBEAM unexpected length i={i:d}...')\n n += ntotal2\n pack = struct2.unpack(edata)\n (soi, xxbi, a, i1, i2, i12, j, nsm, c1, c2,\n d1, d2, e1, e2, f1, f2) = pack\n xxb.append(xxbi)\n so.append(soi)\n\n if soi == 0.0:\n so_str = 'NO'\n elif soi == 1.0:\n so_str = 'YES'\n else:\n so_str = str(soi)\n is_bad_so = True\n #msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; soi not in 0.0 or 1.0' % (\n #pid, i, xxb, soi)\n #raise NotImplementedError(msg)\n\n #if xxb != 0.0:\n #msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; xxb not in 0.0 or 1.0' % (\n #pid, i, xxb, soi)\n #raise NotImplementedError(msg)\n\n pack2 = (so_str, xxbi, a, i1, i2, i12, j, nsm, c1, c2,\n d1, d2, e1, e2, f1, f2)\n data_in.append(pack2)\n if op2.is_debug_file:\n op2.binary_debug.write(f' {pack}\\n')\n msg = (\n ' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '\n 'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))\n )\n op2.binary_debug.write(msg)\n #msg = (\n #' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '\n #'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))\n #)\n #print(msg)\n\n edata = data[n:n+ntotal2]\n if len(edata) != ntotal2:\n endpack = []\n raise RuntimeError('PBEAM unexpected length 2...')\n endpack = struct3.unpack(edata)\n n += ntotal2\n\n assert len(endpack) == 16, endpack\n #(k1, k2, s1, s2, nsia, nsib, cwa, cwb, # 8\n #m1a, m2a, m1b, m2b, n1a, n2a, n1b, n2b) = endpack # 8 -> 16\n if op2.is_debug_file:\n op2.binary_debug.write(' k=[%s,%s] s=[%s,%s] nsi=[%s,%s] cw=[%s,%s] '\n 'ma=[%s,%s] mb=[%s,%s] na=[%s,%s] nb=[%s,%s]' % (\n tuple(endpack)))\n data_in.append(endpack)\n\n if is_bad_so:\n #if soi < 0.:\n xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])\n so_str = ', '.join(['%g' % soi for soi in so])\n msg = (f'PBEAM pid={pid} i={i} soi=[{so_str}]; '\n 'soi not 0.0 or 1.0; assuming PBCOMP & dropping')\n op2.log.error(msg)\n is_pbcomp = True\n\n if min(xxb) < 0.0 or max(xxb) > 1.0:\n xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])\n msg = (f'PBEAM pid={pid} i={i} x/xb=[{xxb_str}]; '\n 'x/xb must be between 0.0 and 1.0; assuming PBCOMP & dropping')\n op2.log.error(msg)\n is_pbcomp = True\n\n if is_pbcomp:\n continue\n if pid in op2.properties:\n if op2.properties[pid].type == 'PBCOMP':\n continue\n\n prop = PBEAM.add_op2_data(data_in)\n nproperties += 1\n self._add_op2_property(prop)\n if nproperties:\n op2.card_count['PBEAM'] = nproperties\n return n\n\n def _read_pbeaml(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEAML(9202,92,53)\n\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 GROUP(2) CHAR4 Cross-section group name\n 5 TYPE(2) CHAR4 Cross section type\n 7 VALUE RS Cross section values for XXB, SO, NSM, and dimensions\n Word 7 repeats until (-1) occurs\n \"\"\"\n op2 = self.op2\n #strs = numpy.core.defchararray.reshapesplit(data, sep=\",\")\n #ints = np.frombuffer(data[n:], self._uendian + 'i').copy()\n #floats = np.frombuffer(data[n:], self._uendian + 'f').copy()\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n size = self.size\n nproperties = len(istart)\n if size == 4:\n struct1 = Struct(op2._endian + b'2i 8s 8s')\n else:\n struct1 = Struct(op2._endian + b'2q 16s 16s')\n\n for unused_i, (istarti, iendi) in enumerate(zip(istart, iend)):\n idata = data[n+istarti*size : n+(istarti+6)*size]\n pid, mid, group, beam_type = struct1.unpack(idata)\n group = group.decode('latin1').strip()\n beam_type = beam_type.decode('latin1').strip()\n fvalues = floats[istarti+6: iendi]\n if op2.is_debug_file:\n op2.binary_debug.write(' %s\\n' % str(fvalues))\n op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')\n op2.log.debug(fvalues)\n #op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')\n data_in = [pid, mid, group, beam_type, fvalues]\n prop = PBEAML.add_op2_data(data_in)\n if pid in op2.properties:\n # this is a fake PSHELL\n propi = op2.properties[pid]\n assert propi.type in ['PBEAM'], propi.get_stats()\n nproperties -= 1\n continue\n self._add_op2_property(prop)\n if nproperties:\n op2.card_count['PBEAML'] = nproperties\n return len(data)\n\n def _read_pbend(self, data: bytes, n: int) -> int:\n \"\"\"PBEND\"\"\"\n op2 = self.op2\n n = op2.reader_geom2._read_dual_card(\n data, n,\n self._read_pbend_nx, self._read_pbend_msc,\n 'PBEND', op2._add_methods._add_property_object)\n return n\n\n def _read_pbend_msc(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEND\n\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 A RS Area\n 4 I1 RS Area moment of inertia in plane 1\n 5 I2 RS Area moment of inertia in plane 2\n 6 J RS Torsional constant\n 7 FSI I flexibility and stress intensification factors\n 8 RM RS Mean cross-sectional radius of the curved pipe\n 9 T RS Wall thickness of the curved pipe\n 10 P RS Internal pressure\n 11 RB RS Bend radius of the line of centroids\n 12 THETAB RS Arc angle of element\n 13 C1 RS Stress recovery location at point C in element y-axis\n 14 C2 RS Stress recovery location at point C in element z-axis\n 15 D1 RS Stress recovery location at point D in element y-axis\n 16 D2 RS Stress recovery location at point D in element z-axis\n 17 E1 RS Stress recovery location at point E in element y-axis\n 18 E2 RS Stress recovery location at point E in element z-axis\n 19 F1 RS Stress recovery location at point F in element y-axis\n 20 F2 RS Stress recovery location at point F in element z-axis\n 21 K1 RS Area factor for shear in plane 1\n 22 K2 RS Area factor for shear in plane 2\n 23 NSM RS Nonstructural mass per unit length\n 24 RC RS Radial offset of the geometric centroid\n 25 ZC RS Offset of the geometric centroid\n 26 DELTAN I Radial offset of the neutral axis from the geometric\n centroid\n \"\"\"\n op2 = self.op2\n ntotal = 104 # 26*4\n struct1 = Struct(op2._endian + b'2i 4f i 18f f') # delta_n is a float, not an integer\n nproperties = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n properties = []\n for unused_i in range(nproperties):\n edata = data[n:n+104]\n out = struct1.unpack(edata)\n (pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,\n delta_n) = out\n beam_type = fsi\n\n if (area, rm, t, p) == (0., 0., 0., 0.):\n area = None\n rm = None\n t = None\n p = None\n delta_n = None\n beam_type = 2\n if delta_n == 0:\n #: Radial offset of the neutral axis from the geometric\n #: centroid, positive is toward the center of curvature\n delta_n = None\n pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,\n nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)\n #print(pbend)\n pbend.validate()\n\n properties.append(pbend)\n n += ntotal\n return n, properties\n\n def _read_pbend_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEND\n\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 A RS Area\n 4 I1 RS Area moment of inertia in plane 1\n 5 I2 RS Area moment of inertia in plane 2\n 6 J RS Torsional constant\n 7 FSI I Flexibility and stress intensification factors\n 8 RM RS Mean cross-sectional radius of the curved pipe\n 9 T RS Wall thickness of the curved pipe\n 10 P RS Internal pressure\n 11 RB RS Bend radius of the line of centroids\n 12 THETAB RS Arc angle of element\n 13 C1 RS Stress recovery location at point C in element y-axis\n 14 C2 RS Stress recovery location at point C in element z-axis\n 15 D1 RS Stress recovery location at point D in element y-axis\n 16 D2 RS Stress recovery location at point D in element z-axis\n 17 E1 RS Stress recovery location at point E in element y-axis\n 18 E2 RS Stress recovery location at point E in element z-axis\n 19 F1 RS Stress recovery location at point F in element y-axis\n 20 F2 RS Stress recovery location at point F in element z-axis\n 21 K1 RS Area factor for shear in plane 1\n 22 K2 RS Area factor for shear in plane 2\n 23 NSM RS Nonstructural mass per unit length\n 24 RC RS Radial offset of the geometric centroid\n 25 ZC RS Offset of the geometric centroid\n 26 DELTAN RS Radial offset of the neutral axis from the geometric\n centroid\n 27 SACL RS Miter spacing at center line.\n 28 ALPHA RS One-half angle between the adjacent miter axis\n (Degrees).\n 29 FLANGE I For FSI=5, defines the number of flanges attached.\n 30 KX RS For FSI=6, the user defined flexibility factor for the\n torsional moment.\n 31 KY RS For FSI=6, the user defined flexibility factor for the\n out-of-plane bending moment.\n 32 KZ RS For FSI=6, the user defined flexbility factor for the\n in-plane bending moment.\n 33 Not used\n \"\"\"\n op2 = self.op2\n #op2.log.info('geom skipping PBEND in EPT')\n #return len(data)\n ntotal = 132 # 33*4\n struct1 = Struct(op2._endian + b'2i 4f i 21f i 4f')\n nproperties = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n properties = []\n for unused_i in range(nproperties):\n edata = data[n:n+132]\n out = struct1.unpack(edata)\n (pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,\n delta_n, unused_sacl, unused_alpha, unused_flange,\n unused_kx, unused_ky, unused_kz, unused_junk,) = out\n beam_type = fsi\n\n pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,\n nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)\n pbend.validate()\n properties.append(pbend)\n n += ntotal\n return n, properties\n\n# PBMSECT\n# PBRSECT\n\n def _read_pbush(self, data: bytes, n: int) -> int:\n \"\"\"\n The PBUSH card is different between MSC and NX Nastran.\n\n DMAP NX 11\n ----------\n NX has 23 fields in NX 11-NX 2019.2 (same as MSC 2005)\n NX has 18 fields in the pre-2001 format\n\n DMAP MSC 2005\n -------------\n MSC has 23 fields in 2005\n MSC has 18 fields in the pre-2001 format\n\n DMAP MSC 2016\n -------------\n MSC has 24 fields in 2016.1\n MSC has 18 fields in the pre-2001 format\n\n DMAP MSC 2021\n -------------\n MSC has 27 fields in 2021\n\n \"\"\"\n op2 = self.op2\n card_name = 'PBUSH'\n card_obj = PBUSH\n methods = {\n 72 : self._read_pbush_nx_72, # 72=4*18\n 92 : self._read_pbush_msc_92, # 92=4*23\n 96 : self._read_pbush_msc_96, # 96=4*24\n 108 : self._read_pbush_msc_108, # 108=4*27\n }\n try:\n n = op2.reader_geom2._read_double_card(\n card_name, card_obj, self._add_op2_property,\n methods, data, n)\n except DoubleCardError:\n nx_method = partial(self._read_pbush_nx_72, card_obj)\n msc_method = partial(self._read_pbush_msc_92, card_obj)\n n = op2.reader_geom2._read_dual_card(\n data, n,\n nx_method, msc_method,\n card_name, self._add_op2_property)\n\n # we're listing nx twice because NX/MSC used to be consistent\n # the new form for MSC is not supported\n #n = self._read_dual_card(data, n, self._read_pbush_nx, self._read_pbush_msc,\n #'PBUSH', self._add_op2_property)\n return n\n\n def _read_pbush_nx_72(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"\n PBUSH(1402,14,37) - 18 fields\n legacy MSC/NX format\n \"\"\"\n op2 = self.op2\n ntotal = 72 * self.factor\n struct1 = Struct(mapfmt(op2._endian + b'i17f', self.size))\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1, sa, st, ea, et) = out\n #op2.log.debug(out)\n assert pid > 0, pid\n g2 = g3 = g4 = g5 = g6 = g1\n data_in = (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n g1, g2, g3, g4, g5, g6, sa, st, ea, et)\n prop = PBUSH.add_op2_data(data_in)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush_msc_92(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"PBUSH(1402,14,37) - 23 fields\n\n MSC 2005r2 to <MSC 2016\n \"\"\"\n op2 = self.op2\n ntotal = 92 * self.factor # 23*4\n struct1 = Struct(mapfmt(op2._endian + b'i22f', self.size))\n\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n #g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out\n pid = out[0]\n assert pid > 0, pid\n prop = PBUSH.add_op2_data(out)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush_msc_96(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"PBUSH(1402,14,37) - 24 fields\n\n MSC 2016.1? to 2020\n \"\"\"\n op2 = self.op2\n ntotal = 96 * self.factor # 24*4\n struct1 = Struct(mapfmt(op2._endian + b'i22f f', self.size))\n\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n #g1, g2, g3, g4, g5, g6, sa, st, ea, et, mass) = out\n pid = out[0]\n assert pid > 0, pid\n prop = PBUSH.add_op2_data(out)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush_msc_108(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"\n PBUSH(1402,14,37) - 27 fields\n MSC 2021 to current\n\n ints = (1402, 14, 37, 2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0,\n -1577048263, -1577048263, -1577048263, -1577048263, -1577048263, 1065353216, 1065353216, 1065353216, 1065353216, 0, 0, 0, 0)\n floats = (1402, 14, 37,\n 2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0.0,\n -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n ntotal = 108 * self.factor # 27*4\n struct1 = Struct(mapfmt(op2._endian + b'i22f 4f', self.size))\n #op2.show_data(data, types='ifs')\n\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n #g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out\n pid = out[0]\n assert pid > 0, pid\n prop = PBUSH.add_op2_data(out)\n str(prop)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush1d(self, data: bytes, n: int) -> int:\n \"\"\"\n Record 18 -- PBUSH1D(3101,31,219)\n\n 1 PID I Property identification number\n 2 K RS Stiffness\n 3 C RS Viscous Damping\n 4 M RS Mass\n 5 ALPHA RS Temperature coefficient\n 6 SA RS Stress recovery coefficient\n 7 EA/SE RS Strain recovery coefficient\n\n 8 TYPEA I Shock data type:0=Null, 1=Table, 2=Equation\n 9 CVT RS Coefficient of translation velocity tension\n 10 CVC RS Coefficient of translation velocity compression\n 11 EXPVT RS Exponent of velocity tension\n 12 EXPVC RS Exponent of velocity compression\n 13 IDTSU I TABLEDi or DEQATN entry identification number for scale factor vs displacement\n 14 IDTCU I DEQATN entry identification number for scale factor vs displacement\n 15 IDTSUD I DEQATN entry identification number for derivative tension\n 16 IDCSUD I DEQATN entry identification number for derivative compression\n\n 17 TYPES I Spring data type: 0=Null, 1=Table, 2=Equation\n 18 IDTS I TABLEDi or DEQATN entry identification number for tension compression\n 19 IDCS I DEQATN entry identification number for compression\n 20 IDTDU I DEQATN entry identification number for scale factor vs displacement\n 21 IDCDU I DEQATN entry identification number for force vs displacement\n\n 22 TYPED I Damper data type: 0=Null, 1=Table, 2=Equation\n 23 IDTD I TABLEDi or DEQATN entry identification number for tension compression\n 24 IDCD I DEQATN entry identification number for compression\n 25 IDTDV I DEQATN entry identification number for scale factor versus velocity\n 26 IDCDV I DEQATN entry identification number for force versus velocity\n\n 27 TYPEG I General data type: 0=Null, 1=Table, 2=Equation\n 28 IDTG I TABLEDi or DEQATN entry identification number for tension compression\n 29 IDCG I DEQATN entry identification number for compression\n 30 IDTDU I DEQATN entry identification number for scale factor versus displacement\n 31 IDCDU I DEQATN entry identification number for force versus displacement\n 32 IDTDV I DEQATN entry identification number for scale factor versus velocity\n 33 IDCDV I DEQATN entry identification number for force vs velocity\n\n 34 TYPEF I Fuse data type: 0=Null, 1=Table\n 35 IDTF I TABLEDi entry identification number for tension\n 36 IDCF I TABLEDi entry identification number for compression\n\n 37 UT RS Ultimate tension\n 38 UC RS Ultimate compression\n \"\"\"\n op2 = self.op2\n type_map = {\n 0 : None, # NULL\n 1 : 'TABLE',\n 2 : 'EQUAT',\n }\n ntotal = 152 * self.factor # 38*4\n struct1 = Struct(mapfmt(op2._endian + b'i 6f i 4f 24i 2f', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid, k, c, m, unused_alpha, sa, se,\n typea, cvt, cvc, expvt, expvc, idtsu, idtcu, idtsud, idcsud,\n types, idts, idcs, idtdus, idcdus,\n typed, idtd, idcd, idtdvd, idcdvd,\n typeg, idtg, idcg, idtdug, idcdug, idtdvg, idcdvg,\n typef, idtf, idcf,\n unused_ut, unused_uc) = out\n # test_op2_other_05\n #pbush1d, 204, 1.e+5, 1000., , , , , , +pb1\n #+pb1, spring, table, 205, , , , , , +pb2\n #+pb2, damper, table, 206\n #pid=204 k=100000.0 c=1000.0 m=0.0 sa=nan se=nan\n\n\n msg = f'PBUSH1D pid={pid} k={k} c={c} m={m} sa={sa} se={se}'\n optional_vars = {}\n typea_str = type_map[typea]\n types_str = type_map[types]\n typed_str = type_map[typed]\n unused_typeg_str = type_map[typeg]\n unused_typef_str = type_map[typef]\n\n if min([typea, types, typed, typeg, typef]) < 0:\n raise RuntimeError(f'typea={typea} types={types} typed={typed} typeg={typeg} typef={typef}')\n if typea in [1, 2]: # SHOCKA?\n #pbush1d, 204, 1.e+5, 1000., , , , , , +pb4\n #+pb4, shocka, table, 1000., , 1., , 214, , +pb41\n #+pb41, spring, table, 205\n\n idts = idtsu # if typea_str == 'TABLE' else 0\n idets = idtsu # if typea_str == 'EQUAT' else 0\n optional_vars['SHOCKA'] = [typea_str, cvt, cvc, expvt, expvc,\n idts, idets, idtcu, idtsud, idcsud]\n #(shock_type, shock_cvt, shock_cvc, shock_exp_vt, shock_exp_vc,\n #shock_idts, shock_idets, shock_idecs, shock_idetsd, shock_idecsd\n #)\n #print('shock_idts, shock_idets', typea_str, idtsu, idtsu)\n msg += (\n f' SHOCKA type={typea} cvt={cvt} cvc={cvc} expvt={expvt} expvc={expvc}\\n'\n f' idtsu={idtsu} (idts={idts} idets={idets}) idtcu={idtcu} idtsud={idtsud} idcsud={idcsud}')\n if types in [1, 2]: # SPRING: Spring data type: 0=Null, 1=Table, 2=Equation\n #(spring_type, spring_idt, spring_idc, spring_idtdu, spring_idcdu) = values\n # SPRING, TYPE IDT IDC IDTDU IDCDU\n optional_vars['SPRING'] = [types_str, idts, idcs, idtdus, idcdus]\n msg += f' SPRING type={types} idt={idts} idc={idcs} idtdu={idtdus} idcdu={idcdus}'\n if typed in [1, 2]: # Damper data type: 0=Null, 1=Table, 2=Equation\n optional_vars['DAMPER'] = [typed_str, idtd, idcd, idtdvd, idcdvd]\n msg += f' DAMPER type={typed} idt={idtd} idc={idtd} idtdv={idtdvd} idcdv={idcdvd}'\n if typeg in [1, 2]: # general, GENER?: 0=Null, 1=Table 2=Equation\n # C:\\NASA\\m4\\formats\\git\\examples\\move_tpl\\ar29scbt.bdf\n #pbush1d, 206, 1.e+3, 10., , , , , , +pb6\n #+pb6, gener, equat, 315, , 3015, , 3016\n msg += f' GENER type={typeg} idt={idtg} idc={idcg} idtdu={idtdug} idcdu={idcdug} idtdv={idtdvg} idcdv={idcdvg}'\n optional_vars['GENER'] = [idtg, idcg, idtdug, idcdug, idtdvg, idcdvg]\n if typef in [1, 2]: # Fuse data type: 0=Null, 1=Table\n raise NotImplementedError(f'typef={typef} idtf={idtf} idcf={idcf}')\n\n if op2.is_debug_file:\n op2.binary_debug.write(msg)\n\n pbush1d = op2.add_pbush1d(pid, k=k, c=c, m=m, sa=sa, se=se,\n optional_vars=optional_vars,)\n str(pbush1d)\n n += ntotal\n op2.card_count['PBUSH1D'] = nentries\n return n\n\n #def _read_pbusht(self, data: bytes, n: int) -> int:\n #\"\"\"reads the PBUSHT(702, 7, 38)\"\"\"\n #n, props = self._read_pbusht_nx(data, n)\n #for prop in props:\n ##print(prop)\n #op2._add_pbusht_object(prop)\n #return n\n\n def _read_pbusht(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 12 / MSC 2005\n Word Name Type Description\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID(6) I TABLEDi entry identification number for structural damping\n 20 TKNID(6) I TABLEDi entry identification numbers for force versus deflection\n\n old style\n Word Name Type Description\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID I TABLEDi entry identification number for structural damping\n 15 TKNID(6) I TABLEDi entry IDs for force versus deflection\n \"\"\"\n op2 = self.op2\n card_name = 'PBUSHT'\n card_obj = PBUSHT\n methods = {\n 80 : self._read_pbusht_80,\n 100 : self._read_pbusht_100,\n 136 : self._read_pbusht_136,\n }\n try:\n n = op2.reader_geom2._read_double_card(\n card_name, card_obj, op2._add_methods._add_pbusht_object,\n methods, data, n)\n except DoubleCardError:\n raise\n op2.log.warning(f'try-except {card_name}')\n #n = self._read_split_card(data, n,\n #self._read_cquad8_current, self._read_cquad8_v2001,\n #card_name, self.add_op2_element)\n #nelements = op2.card_count['CQUAD8']\n #op2.log.debug(f'nCQUAD8 = {nelements}')\n\n #n = self._read_dual_card(data, n, self._read_ctriax_8, self._read_ctriax_9,\n #'CTRIAX', self.add_op2_element)\n return n\n\n def _read_pbusht_nx_old(self, data: bytes, n: int) -> int:\n op2 = self.op2\n #op2.show_data(data[12:])\n ndata = (len(data) - n) // self.factor\n\n if ndata % 100 == 0 and ndata % 80 == 0:\n op2.log.warning(f\"skipping PBUSHT in EPT because nfields={ndata//4}, which is \"\n 'nproperties*25 or nproperties*20')\n return len(data), []\n if ndata % 100 == 0:\n n, props = self._read_pbusht_100(data, n)\n elif ndata % 80 == 0:\n n, props = self._read_pbusht_80(data, n)\n else:\n # C:\\MSC.Software\\msc_nastran_runs\\mbsh14.op2\n # ints = (1,\n # 51, 51, 0, 0, 0, 0,\n # 61, 61, 0, 0, 0, 0,\n # 0, 0, 0, 0, 0, 0,\n # 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0,\n # 7,\n # 51, 51, 0, 0, 0, 0,\n # 61, 61, 0, 0, 0, 0,\n # 0, 0, 0, 0, 0, 0,\n # 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0)\n # strings = (b\"1 51 51 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00=\\x00\\x00\\x00=\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xac\\xc5'7\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x07\\x00\\x00\\x003\\x00\\x00\\x003\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00=\\x00\\x00\\x00=\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xac\\xc5'7\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\",)\n # ints = (1, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0 , 0,\n #\n # 7, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0, 0)\n #op2.show_data(data[n:], types='is')\n raise NotImplementedError('You have blank lines in your PBUSHT')\n return n, props\n\n def _read_pbusht_80(self, card_obj, data: bytes, n: int) -> int:\n \"\"\"\n Word Name Type Description\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID I TABLEDi entry identification number for structural damping\n 15 TKNID(6) I TABLEDi entry identification numbers for force versus deflection\n 16,17,18,19,20\n ???\n \"\"\"\n op2 = self.op2\n ntotal = 80 * self.factor\n struct1 = Struct(op2._endian + b'20i')\n nentries = (len(data) - n) // ntotal\n assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid,\n #k1, k2, k3, k4, k5, k6,\n #b1, b2, b3, b4, b5, b6,\n #g1, sa, st, ea, et) = out\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1,\n n1, n2, n3, n4, n5, n6) = out\n g2 = g3 = g4 = g5 = g6 = g1\n k_tables = [k1, k2, k3, k4, k5, k6]\n b_tables = [b1, b2, b3, b4, b5, b6]\n ge_tables = [g1, g2, g3, g4, g5, g6]\n kn_tables = [n1, n2, n3, n4, n5, n6]\n prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbusht_100(self, card_obj, data: bytes, n: int) -> int:\n op2 = self.op2\n props = []\n ntotal = 100 * self.factor\n struct1 = Struct(mapfmt(op2._endian + b'25i', self.size))\n nentries = (len(data) - n) // ntotal\n assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1, g2, g3, g4, g5, g6,\n n1, n2, n3, n4, n5, n6) = out\n k_tables = [k1, k2, k3, k4, k5, k6]\n b_tables = [b1, b2, b3, b4, b5, b6]\n ge_tables = [g1, g2, g3, g4, g5, g6]\n kn_tables = [n1, n2, n3, n4, n5, n6]\n prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbusht_136(self, card_obj, data: bytes, n: int) -> int:\n r\"\"\"not 100%\n\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID(6) I TABLEDi entry identification number for structural damping\n 20 TKNID(6) I TABLEDi entry IDs for force vs. deflection\n 26 FDC(2) CHAR4 Force deflection curve rule\n 28 FUSE I Failure level\n 29 DIR I Fuse direction\n 30 OPTION(2) CHAR4 Failure mode\n 32 LOWER RS Lower failure bound\n 33 UPPER RS Upper failure bound\n 34 FRATE RS FACTOR of scales the stiffness\n 35 LRGR I Controls large rotation\n 36 UNDEF(4) none\n\n # C:\\MSC.Software\\msc_nastran_runs\\mbsh14.op2\n PBUSHT\t1\t K\t51\t51\n B\t61\t61\n PBUSHT\t7\t K\t51\t51\n B\t61\t61\n\n 538976288 = ' '\n ints = (\n 702, 7, 38,\n 1, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0,\n 7, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0)\n floats = (\n 702, 7, 38,\n 1, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0,\n 7, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n props = []\n ntotal = 136 * self.factor # k b g n fdc\n struct1 = Struct(mapfmt(op2._endian + b'i 6i 6i 6i 6i 4s 2i i 5i', self.size))\n nentries = (len(data) - n) // ntotal\n assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1, g2, g3, g4, g5, g6,\n n1, n2, n3, n4, n5, n6,\n word1, a, word2, c, *other) = out\n\n\n k_tables = [ki if ki != 538976288 else 0\n for ki in [k1, k2, k3, k4, k5, k6]]\n\n b_tables = [bi if bi != 538976288 else 0\n for bi in [b1, b2, b3, b4, b5, b6]]\n ge_tables = [gei if gei != 538976288 else 0\n for gei in [g1, g2, g3, g4, g5, g6]]\n kn_tables = [kni if kni != 538976288 else 0\n for kni in [n1, n2, n3, n4, n5, n6]]\n op2.log.warning(\n f'PBUSHT: pid={pid} '\n f'k={k_tables} '\n f'b={b_tables} '\n f'ge={ge_tables} '\n f'n={kn_tables} ' +\n 'words=' + str([word1, a, word2, c]) +\n f' other={other}')\n assert sum(other) == 0, other\n prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pcomp(self, data: bytes, n: int) -> int:\n r\"\"\"\n PCOMP(2706,27,287) - the marker for Record 22\n\n standard:\n EPTS; 64-bit: C:\\MSC.Software\\simcenter_nastran_2019.2\\tpl_post1\\cqrdbxdra3lg.op2\n\n optistruct:\n ints = (2706, 27, 287,\n 5,\n 3, -2.75, 0, 0, 1, 0, 0,\n 2, 0.25, 0, 2, # why is sout=2?\n 3, 5.0, 0, 3, # why is sout=3?\n 2, 0.25, 0, 2, # why is sout=2?\n\n 6, 5, -3.0, 0, 0, 1, 0, 0,\n 2, 0.25, 0, 2,\n 2, 0.25, 0, 2,\n 3, 5.0, 0, 3,\n 2, 0.25, 0, 2,\n 2, 0.25, 0, 2, 7, 7, -1068498944, 0, 0, 1, 0, 0, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 3, 5.0, 0, 3, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2)\n floats = (2706, 27, 287,\n 5, 3, -2.75, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 6, 5, -3.0, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 9.80908925027372e-45, 9.80908925027372e-45, -3.25, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2)\n \"\"\"\n op2 = self.op2\n if self.size == 4:\n n2, props = self._read_pcomp_32_bit(data, n)\n nproperties = len(props)\n for prop in props:\n self._add_op2_property(prop)\n op2.card_count['PCOMP'] = nproperties\n else:\n n2 = op2.reader_geom2._read_dual_card(\n data, n, self._read_pcomp_32_bit,\n self._read_pcomp_64_bit,\n 'PCOMP', self._add_op2_property)\n return n2\n\n def _read_pcomp_64_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]:\n \"\"\"\n PCOMP(2706,27,287) - the marker for Record 22\n\n 1 PID I Property identification number\n 2 N(C) I Number of plies\n 3 Z0 RS Distance from the reference plane to the bottom surface\n 4 NSM RS Nonstructural mass per unit area\n 5 SB RS Allowable shear stress of the bonding material\n 6 FT I Failure theory\n 7 TREF RS Reference temperature\n 8 GE RS Damping coefficient\n\n 9 MID I Material identification number\n 10 T RS Thicknesses of the ply\n 11 THETA RS Orientation angle of the longitudinal direction of the ply\n 12 SOUT I Stress or strain output request of the ply\n Words 9 through 12 repeat N times\n\n TODO:\n 64-bit bug: why is the number of plies 0???\n\n doubles (float64) = (\n 1, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1,\n 21, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1)\n long long (int64) = (\n 1, 0, 1.7368e-18, 0, 1.0, 3, 0, 0, 1, 4592590756007337001, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1,\n 21, 0, 4341475431749739292, 0, 4607182418800017408, 3, 0, 0,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1)\n\n doubles (float64) = (5e-324, 0.0, -0.005, 0.0, 0.0, 0.0, 0.0, 0.0,\n 4e-323, 0.005, 0.0, 5e-324,\n 4e-323, 0.005, 0.0, 5e-324,\n nan, nan, nan, nan)\n long long (int64) = (1, 0, -4650957407178058629, 0, 0, 0, 0, 0,\n 8, 4572414629676717179, 0, 1,\n 8, 4572414629676717179, 0, 1,\n -1, -1, -1, -1)\n\n C:\\MSC.Software\\simcenter_nastran_2019.2\\tpl_post2\\dbxdr12lg.op2\n data = (3321, 2, -0.5, 0.0, 1.0, 4, 0.0, 0.0,\n 3, 0.5, 0, 1,\n 3, 0.5, 0, 1)\n \"\"\"\n op2 = self.op2\n op2.to_nx(' because PCOMP-64 was found')\n nproperties = 0\n s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))\n ntotal1 = 32 * self.factor\n s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))\n\n four_minus1 = Struct(mapfmt(op2._endian + b'4i', self.size))\n ndata = len(data)\n ntotal2 = 16 * self.factor\n props = []\n while n < (ndata - ntotal1):\n out = s1.unpack(data[n:n+ntotal1])\n (pid, nlayers, z0, nsm, sb, ft, tref, ge) = out\n assert pid > 0\n if op2.binary_debug:\n op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n assert isinstance(nlayers, int), out\n #print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n #f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n n += ntotal1\n\n # None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'\n is_symmetrical = 'NO'\n #if nlayers < 0:\n #is_symmetrical = 'SYM'\n #nlayers = abs(nlayers)\n\n mids = []\n T = []\n thetas = []\n souts = []\n edata2 = data[n:n+ntotal2]\n idata = four_minus1.unpack(edata2)\n while idata != (-1, -1, -1, -1):\n (mid, t, theta, sout) = s2.unpack(edata2)\n mids.append(mid)\n T.append(t)\n thetas.append(theta)\n souts.append(sout)\n if op2.is_debug_file:\n op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\\n')\n n += ntotal2\n #print(f' mid={mid} t={t} theta={theta} sout={sout}')\n edata2 = data[n:n+ntotal2]\n if n == ndata:\n op2.log.warning(' no (-1, -1, -1, -1) flag was found to close the PCOMPs')\n break\n idata = four_minus1.unpack(edata2)\n\n if self.size == 4:\n assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s Tref=%s ge=%s' % (\n pid, nlayers, z0, nsm, sb, ft, tref, ge)\n else:\n assert nlayers == 0, nlayers\n nlayers = len(mids)\n\n data_in = [\n pid, z0, nsm, sb, ft, tref, ge,\n is_symmetrical, mids, T, thetas, souts]\n prop = PCOMP.add_op2_data(data_in)\n nproperties += 1\n n += ntotal2\n props.append(prop)\n return n, props\n\n def _read_pcomp_32_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]: # pragma: no cover\n \"\"\"PCOMP(2706,27,287) - the marker for Record 22\"\"\"\n op2 = self.op2\n nproperties = 0\n s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))\n ntotal1 = 32 * self.factor\n s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))\n\n ndata = len(data)\n ntotal2 = 16 * self.factor\n props = []\n while n < (ndata - ntotal1):\n out = s1.unpack(data[n:n+ntotal1])\n (pid, nlayers, z0, nsm, sb, ft, tref, ge) = out\n assert pid > 0\n\n if op2.binary_debug:\n op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n assert isinstance(nlayers, int), out\n #print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n #f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n n += ntotal1\n\n mids = []\n T = []\n thetas = []\n souts = []\n\n # None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'\n is_symmetrical = 'NO'\n if nlayers < 0:\n is_symmetrical = 'SYM'\n nlayers = abs(nlayers)\n assert nlayers > 0, out\n\n assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (\n pid, nlayers, z0, nsm, sb, ft, tref, ge)\n\n if op2.is_debug_file:\n op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s\\n' % (\n pid, nlayers, z0, nsm, sb, ft, tref, ge))\n #if op2._nastran_format == 'optistruct':\n #print(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (\n #pid, nlayers, z0, nsm, sb, ft, tref, ge))\n for unused_ilayer in range(nlayers):\n (mid, t, theta, sout) = s2.unpack(data[n:n+ntotal2])\n if op2._nastran_format == 'optistruct':\n #print(f' mid={mid} t={t} theta={theta} sout={sout}')\n if sout in [2, 3]: # TODO: Why is this 2/3?\n sout = 1 # YES\n\n mids.append(mid)\n assert mid > 0\n\n T.append(t)\n thetas.append(theta)\n souts.append(sout)\n if op2.is_debug_file:\n op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\\n')\n n += ntotal2\n\n data_in = [\n pid, z0, nsm, sb, ft, tref, ge,\n is_symmetrical, mids, T, thetas, souts]\n prop = PCOMP.add_op2_data(data_in)\n #print(prop)\n props.append(prop)\n nproperties += 1\n return n, props\n\n def _read_pcompg(self, data: bytes, n: int) -> int:\n \"\"\"\n PCOMP(2706,27,287)\n\n 1 PID I Property identification number\n 2 LAMOPT I Laminate option\n 3 Z0 RS Distance from the reference plane to the bottom surface\n 4 NSM RS Nonstructural mass per unit area\n 5 SB RS Allowable shear stress of the bonding material\n 6 FT I Failure theory\n 7 TREF RS Reference temperature\n 8 GE RS Damping coefficient\n\n 9 GPLYIDi I Global ply IDs.\n 10 MID I Material identification number\n 11 T RS Thicknesses of the ply\n 12 THETA RS Orientation angle of the longitudinal direction of the ply\n 13 SOUT I Stress or strain output request of the ply\n Words 9 through 13 repeat N times (until -1, -1, -1, -1, -1 as Nplies doesn't exist...)\n\n float = (15006, 150, 604,\n 5, 0.0, 1.7368e-18, 0.0, 0.0, 0.0, 20.0, 0.0,\n 5e-324, 5e-324, 2.0, 0.0, 0.0,\n 1e-323, 1e-323, 3.0, 0.0, 0.0,\n 1.5e-323, 1e-323, 3.0, 0.0, 0.0,\n 2e-323, 5e-324, 2.0, 0.0, 0.0,\n nan, nan, nan, nan, nan)\n int = (15006, 150, 604,\n 5, 0, 1.7368e-18, 0, 0, 0, 20.0, 0,\n 1, 1, 4611686018427387904, 0, 0,\n 2, 2, 4613937818241073152, 0, 0,\n 3, 2, 4613937818241073152, 0, 0,\n 4, 1, 4611686018427387904, 0, 0,\n -1, -1, -1, -1, -1)\n\n \"\"\"\n op2 = self.op2\n nproperties = 0\n s1 = Struct(mapfmt(op2._endian + b'2i 3f i 2f', self.size))\n s2 = Struct(mapfmt(op2._endian + b'2i 2f i', self.size))\n struct_i5 = Struct(mapfmt(op2._endian + b'5i', self.size))\n\n # lam - SYM, MEM, BEND, SMEAR, SMCORE, None\n lam_map = {\n 0 : None,\n # MEM\n # BEND\n # SMEAR\n # SMCORE\n }\n\n # ft - HILL, HOFF, TSAI, STRN, None\n ft_map = {\n 0 : None,\n # HILL\n # HOFF\n 3 : 'TSAI',\n # STRN\n }\n # sout - YES, NO\n sout_map = {\n 0 : 'NO',\n 1 : 'YES',\n }\n ndata = len(data)\n #op2.show_data(data, types='qd')\n ntotal1 = 32 * self.factor\n ntotal2 = 20 * self.factor\n while n < (ndata - ntotal1):\n out = s1.unpack(data[n:n+ntotal1])\n (pid, lam_int, z0, nsm, sb, ft_int, tref, ge) = out\n if op2.binary_debug:\n op2.binary_debug.write(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} '\n f'sb={sb} ft_int={ft_int} tref={tref} ge={ge}')\n #print(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} sb={sb} '\n #f'ft_int={ft_int} tref={tref} ge={ge}')\n assert isinstance(lam_int, int), out\n assert pid > -1, out\n n += ntotal1\n\n mids = []\n thicknesses = []\n thetas = []\n souts = []\n global_ply_ids = []\n\n # None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'\n #is_symmetrical = 'NO'\n #if nlayers < 0:\n #is_symmetrical = 'SYM'\n #nlayers = abs(nlayers)\n #assert nlayers > 0, out\n\n #assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s' % (\n #pid, nlayers, z0, nsm, sb, ft, tref, ge)\n\n #if op2.is_debug_file:\n #op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s\\n' % (\n #pid, nlayers, z0, nsm, sb, ft, tref, ge))\n ilayer = 0\n while ilayer < 1000:\n ints5 = struct_i5.unpack(data[n:n+ntotal2])\n if ints5 == (-1, -1, -1, -1, -1):\n if op2.is_debug_file:\n op2.binary_debug.write(' global_ply=%-1 mid=%-1 t=%-1 theta=%-1 sout=-1\\n')\n break\n (global_ply, mid, t, theta, sout_int) = s2.unpack(data[n:n+ntotal2])\n #print(' ', (global_ply, mid, t, theta, sout_int))\n try:\n sout = sout_map[sout_int]\n except KeyError:\n op2.log.error('cant parse global_ply=%s sout=%s; assuming 0=NO' % (\n global_ply, sout_int))\n sout = 'NO'\n\n global_ply_ids.append(global_ply)\n mids.append(mid)\n thicknesses.append(t)\n thetas.append(theta)\n souts.append(sout)\n if op2.is_debug_file:\n op2.binary_debug.write(' global_ply=%s mid=%s t=%s theta=%s sout_int=%s sout=%r\\n' % (\n global_ply, mid, t, theta, sout_int, sout))\n n += ntotal2\n ilayer += 1\n n += ntotal2\n\n try:\n ft = ft_map[ft_int]\n except KeyError:\n op2.log.error('pid=%s cant parse ft=%s; should be HILL, HOFF, TSAI, STRN'\n '...skipping' % (pid, ft_int))\n continue\n\n try:\n lam = lam_map[lam_int]\n except KeyError:\n op2.log.error('pid=%s cant parse lam=%s; should be HILL, HOFF, TSAI, STRN'\n '...skipping' % (pid, lam_int))\n continue\n\n # apparently Nastran makes duplicate property ids...\n if pid in op2.properties and op2.properties[pid].type == 'PCOMP':\n del op2.properties[pid]\n\n op2.add_pcompg(pid, global_ply_ids, mids, thicknesses, thetas=thetas, souts=souts,\n nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam, z0=z0, comment='')\n nproperties += 1\n op2.card_count['PCOMPG'] = nproperties\n return n\n\n# PCOMPA\n\n def _read_pconeax(self, data: bytes, n: int) -> int:\n \"\"\"\n (152,19,147) - Record 24\n \"\"\"\n self.op2.log.info('geom skipping PCONEAX in EPT')\n return len(data)\n\n def _read_pconv(self, data: bytes, n: int) -> int:\n \"\"\"common method for reading PCONVs\"\"\"\n op2 = self.op2\n #n = self._read_dual_card(data, n, self._read_pconv_nx, self._read_pconv_msc,\n #'PCONV', self._add_pconv)\n\n card_name = 'PCONV'\n card_obj = PCONV\n methods = {\n 16 : self._read_pconv_nx_16, # 16=4*4\n 56 : self._read_pconv_msc_56, # 56=4*14\n }\n try:\n n, elements = op2.reader_geom2._read_double_card_load(\n card_name, card_obj,\n methods, data, n)\n except DoubleCardError:\n nx_method = partial(self._read_pconv_nx_16, card_obj)\n msc_method = partial(self._read_pconv_msc_56, card_obj)\n n, elements = op2._read_dual_card_load(\n data, n,\n nx_method, msc_method,\n card_name, self._add_op2_property)\n\n nelements = len(elements)\n for prop in elements:\n key = prop.pconid\n if key in op2.convection_properties:\n prop_old = op2.convection_properties[key]\n if prop != prop_old:\n op2.log.warning(prop.raw_fields())\n op2.log.warning(prop_old.raw_fields())\n op2.log.warning(f'PCONV pconid={key}; old, new\\n{prop_old}{prop}')\n # this will fail due to a duplicate id\n self._add_pconv(prop)\n #else:\n # already exists\n else:\n self._add_pconv(prop)\n op2.card_count['PCONV'] = nelements\n\n return n\n\n def _read_pconv_nx_16(self, card_obj: PCONV, data: bytes, n: int) -> int:\n \"\"\"\n (11001,110,411)- NX version\n \"\"\"\n op2 = self.op2\n ntotal = 16 # 4*4\n struct_3if = Struct(op2._endian + b'3if')\n nentries = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n props = []\n for unused_i in range(nentries):\n out = struct_3if.unpack(data[n:n+ntotal])\n (pconid, mid, form, expf) = out\n ftype = tid = chlen = gidin = ce = e1 = e2 = e3 = None\n data_in = (pconid, mid, form, expf, ftype, tid, chlen,\n gidin, ce, e1, e2, e3)\n\n prop = PCONV.add_op2_data(data_in)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pconv_msc_56(self, card_obj: PCONV, data: bytes, n: int) -> int:\n \"\"\"\n (11001,110,411)- MSC version - Record 25\n \"\"\"\n op2 = self.op2\n ntotal = 56 # 14*4\n s = Struct(op2._endian + b'3if 4i fii 3f')\n nentries = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n props = []\n for unused_i in range(nentries):\n out = s.unpack(data[n:n+ntotal])\n (pconid, mid, form, expf, ftype, tid, unused_undef1, unused_undef2, chlen,\n gidin, ce, e1, e2, e3) = out\n data_in = (pconid, mid, form, expf, ftype, tid, chlen,\n gidin, ce, e1, e2, e3)\n\n prop = PCONV.add_op2_data(data_in)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pconvm(self, data: bytes, n: int) -> int:\n \"\"\"Record 24 -- PCONVM(2902,29,420)\n\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 FORM I Type of formula used for free convection\n 4 FLAG I Flag for mass flow convection\n 5 COEF RS Constant coefficient used for forced convection\n 6 EXPR RS Reynolds number convection exponent\n 7 EXPPI RS Prandtl number convection exponent into the working fluid\n 8 EXPPO RS Prandtl number convection exponent out of the working fluid\n \"\"\"\n op2 = self.op2\n ntotal = 32 # 8*4\n structi = Struct(op2._endian + b'4i 4f')\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = structi.unpack(data[n:n+ntotal])\n if out != (0, 0, 0, 0, 0., 0., 0., 0.):\n (pconid, mid, form, flag, coeff, expr, expri, exppo) = out\n #print(out)\n prop = PCONVM(pconid, mid, coeff, form=form, flag=flag,\n expr=expr, exppi=expri, exppo=exppo, comment='')\n op2._add_methods._add_convection_property_object(prop)\n n += ntotal\n op2.card_count['PCONVM'] = nentries\n return n\n\n def _read_pdamp(self, data: bytes, n: int) -> int:\n \"\"\"\n PDAMP(202,2,45) - the marker for Record ???\n \"\"\"\n op2 = self.op2\n ntotal = 8 * self.factor # 2*4\n struct_if = Struct(mapfmt(op2._endian + b'if', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = struct_if.unpack(data[n:n+ntotal])\n #(pid, b) = out\n prop = PDAMP.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PDAMP'] = nentries\n return n\n\n def _read_pdampt(self, data: bytes, n: int) -> int: # 26\n self.op2.log.info('geom skipping PDAMPT in EPT')\n return len(data)\n\n def _read_pdamp5(self, data: bytes, n: int) -> int: # 26\n self.op2.log.info('geom skipping PDAMP5 in EPT')\n return len(data)\n\n# PDUM1\n# PDUM2\n# PDUM3\n# PDUM4\n# PDUM5\n# PDUM6\n# PDUM7\n# PDUM8\n# PDUM9\n\n def _read_pelas(self, data: bytes, n: int) -> int:\n \"\"\"PELAS(302,3,46) - the marker for Record 39\"\"\"\n op2 = self.op2\n struct_i3f = Struct(mapfmt(op2._endian + b'i3f', self.size))\n ntotal = 16 * self.factor # 4*4\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_i3f.unpack(edata)\n #(pid, k, ge, s) = out\n if op2.is_debug_file:\n op2.binary_debug.write(' PELAS=%s\\n' % str(out))\n prop = PELAS.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PELAS'] = nproperties\n return n\n\n def _read_pfast_msc(self, data: bytes, n: int) -> int:\n r\"\"\"\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material property identification number\n 3 D RS Diameter of the fastener\n 4 CONNBEH I Connection behavior (0=FF/F, 1=FR, 10=RF/R, 11=RR)\n 5 CONNTYPE I Connection type (0=clamp, 1=hinge, 2=bolt)\n 6 EXTCON I External constraint flag (0=off, 1=on)\n 7 CONDTYPE I Condition type (0=rigid, 1=equivalent)\n 8 WELDTYPE I Weld type (0=spot weld, 1=but seam, 2=T-seam)\n\n 9 MINLEN RS Minimum length of spot weld\n 10 MAXLEN RS Maximum length of spot weld\n 11 GMCHK I Perform geometry check\n 12 SPCGS I SPC the master grid GS\n 13 CMASS RS Concentrated mass\n 14 GE RS Structureal Damping\n\n 15 UNDEF(3) none Not used\n 18 MCID I Element stiffness coordinate system\n 19 MFLAG I Defined the coordinate system type\n 20 KT(3) RS Stiffness values in direction 1\n 23 KR(3) RS Rotation stiffness values in direction 1\n\n C:\\MSC.Software\\msc_nastran_runs\\cfmass.op2\n pid mid D con con ext cond weld min max chk spc cmass ge und und und mcid mfag kt1 kt2 kt3 kr1 kr2 kr3\n ints = (99, 0, 0.1, 0, 0, 0, 0, -1, 0.2, 5.0, 0, 0, 7.9, 0, 0, 0, 0, -1, 0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)\n floats = (99, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, -1, 0.2, 5.0, 0.0, 0.0, 7.9, 0.0, 0.0, 0.0, 0.0, -1, 0.0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:], types='ifs')\n #ntotal = 92 * self.factor # 26*4\n #struct1 = Struct(op2._endian + b'ifii 3f')\n\n ntotal = 100 * self.factor # 25*4\n struct1 = Struct(op2._endian + b'2if 5i 2f2i2f 3i 2i 6f')\n ndatai = len(data) - n\n nproperties = ndatai // ntotal\n delta = ndatai % ntotal\n assert delta == 0, 'len(data)-n=%s n=%s' % (ndatai, ndatai / 100.)\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PFAST=%s\\n' % str(out))\n (pid, d, mcid, unused_connbeh, unused_conntype, unused_extcon,\n unused_condtype, unused_weldtype, unused_minlen, unused_maxlen,\n unused_gmcheck, unused_spcgs, mass, ge,\n unused_aa, unused_bb, unused_cc, mcid, mflag,\n kt1, kt2, kt3, kr1, kr2, kr3) = out\n\n data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,\n kr1, kr2, kr3, mass, ge)\n prop = PFAST.add_op2_data(data_in)\n str(prop)\n #print(prop)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PFAST'] = nproperties\n return n\n\n def _read_pfast_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n PFAST(3601,36,55)\n NX only\n \"\"\"\n op2 = self.op2\n ntotal = 48\n struct1 = Struct(op2._endian + b'ifii 8f')\n nproperties = (len(data) - n) // ntotal\n delta = (len(data) - n) % ntotal\n assert delta == 0, 'len(data)-n=%s n=%s' % (len(data) - n, (len(data) - n) / 48.)\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PFAST=%s\\n' % str(out))\n (pid, d, mcid, mflag, kt1, kt2, kt3, kr1, kr2, kr3, mass, ge) = out\n\n data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,\n kr1, kr2, kr3, mass, ge)\n prop = PFAST.add_op2_data(data_in)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PFAST'] = nproperties\n op2.to_nx(' because PFAST-NX was found')\n return n\n\n def _read_pelast(self, data: bytes, n: int) -> int:\n \"\"\"\n Record 41 -- PELAST(1302,13,34)\n\n 1 PID I Property identification number\n 2 TKID I TABLEDi entry identification number for stiffness\n 3 TGEID I TABLEDi entry identification number for structural\n damping\n 4 TKNID I TABLEDi entry\n \"\"\"\n op2 = self.op2\n ntotal = 16 * self.factor\n struct_4i = Struct(mapfmt(op2._endian + b'4i', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_4i.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PELAST=%s\\n' % str(out))\n #(pid, tkid, tgeid, tknid) = out\n prop = PELAST.add_op2_data(out)\n op2._add_methods._add_pelast_object(prop)\n n += ntotal\n op2.card_count['PELAST'] = nproperties\n return n\n\n def _read_pgap(self, data: bytes, n: int) -> int:\n \"\"\"\n PGAP(2102,21,121) - the marker for Record 42\n \"\"\"\n op2 = self.op2\n ntotal = 44 * self.factor\n struct_i10f = Struct(mapfmt(op2._endian + b'i10f', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_i10f.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PGAP=%s\\n' % str(out))\n #(pid,u0,f0,ka,kb,kt,mu1,mu2,tmax,mar,trmin) = out\n prop = PGAP.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PGAP'] = nproperties\n return n\n\n def _read_phbdy(self, data: bytes, n: int) -> int:\n \"\"\"\n PHBDY(2802,28,236) - the marker for Record 43\n \"\"\"\n op2 = self.op2\n struct_i3f = Struct(op2._endian + b'ifff')\n nproperties = (len(data) - n) // 16\n for unused_i in range(nproperties):\n edata = data[n:n+16]\n out = struct_i3f.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PHBDY=%s\\n' % str(out))\n #(pid, af, d1, d2) = out\n prop = PHBDY.add_op2_data(out)\n op2._add_methods._add_phbdy_object(prop)\n n += 16\n op2.card_count['PHBDY'] = nproperties\n return n\n\n def _read_pintc(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping PINTC in EPT')\n return len(data)\n\n def _read_pints(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping PINTS in EPT')\n return len(data)\n\n def _read_pbeam3(self, data: bytes, n: int) -> int:\n op2 = self.op2\n card_name = 'PBUSHT'\n card_obj = PBUSHT\n methods = {\n 264 : self._read_pbeam3_264,\n 456 : self._read_pbeam3_456,\n }\n try:\n n = op2.reader_geom2._read_double_card(\n card_name, card_obj, self._add_op2_property,\n methods, data, n)\n except DoubleCardError:\n raise\n op2.log.warning(f'try-except {card_name}')\n return n\n\n def _read_pbeam3_456(self, card_obj, data: bytes, n: int) -> int:\n r\"\"\"\n\n # per C:\\MSC.Software\\msc_nastran_runs\\b3plod3.op2\n ints = (2201, 1, 1.0, 0.1833, 0.0833, 0, -1.0, 0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,\n 2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 2901, 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5,\n 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n floats = (2201, 1, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,\n 2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 2901, 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5,\n 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:])\n ntotal = 456 * self.factor # 114*4\n #\n struct1 = Struct(mapfmt(op2._endian +\n b'2i' # pid, mid\n b'3f' # A, Iy, Iz\n b'5f' # # a, b, c, d, e\n b'5f fi 14f i' #fj ki 14f i\n b'2i3f' #aa-ee - good\n b'5f' #ff-jj\n b'5f' #kk-oo\n b'5f' #pp-tt\n b'6f' #uu-zz\n b'5f' #aaa-eee\n b'4i' #fff-iii\n # jjj-ooo\n b'2f iii f'\n # ppp-ttt\n b'5f'\n # uuu-zzz\n b'6f'\n b'30f', self.size))\n\n ndatai = len(data) - n\n nentries = ndatai // ntotal\n assert ndatai % ntotal == 0\n\n props = []\n for unused_i in range(nentries):\n #print(n, ntotal)\n datai = data[n:n+ntotal]\n #op2.show_data(datai, types='ifqd')\n n += ntotal\n\n (pid, mid, A, iz, iy,\n a, b, c, d, e,\n f, g, h, i, j,\n k, inta, l, m, ni, o, p, q, r, s, t, u, v, w, x, y, z,\n aa, bb, cc, dd, ee,\n ff, gg, hh, ii, jj,\n kk, ll, mm, nn, oo,\n pp, qq, rr, ss, tt,\n uu, vv, ww, xx, yy, zz,\n aaa, bbb, ccc, ddd, eee,\n fff, ggg, hhh, iii,\n jjj, kkk, lll, mmm, nnn, ooo,\n ppp, qqq, rrr, sss, ttt,\n uuu, vvv, www, xxx, yyy, zzz,\n *other) = struct1.unpack(datai)\n #print(pid, mid, A, iz, iy)\n #print('a-e', (a, b, c, d, e))\n #print('f-j', (f, g, h, i, j))\n #print(k, inta, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z)\n #print('aa-ee', (aa, bb, cc, dd, ee))\n #print('ff-jj', (ff, gg, hh, ii, jj))\n #print('kk-oo', (kk, ll, mm, nn, oo))\n #print('pp-tt', (pp, qq, rr, ss, tt))\n #print('uu-zz', (uu, vv, ww, xx, yy, zz))\n #print('aaa-eee', (aaa, bbb, ccc, ddd, eee))\n #print('fff-jjj', (fff, ggg, hhh, iii))\n #print('jjj-ooo', (jjj, kkk, lll, mmm, nnn, ooo))\n #print('ppp-ttt', (ppp, qqq, rrr, sss, ttt))\n #print('uuu-zzz', (uuu, vvv, www, xxx, yyy, zzz))\n\n if mid == 0:\n continue\n #assert sum(other) < 100, other\n prop = PBEAM3(\n pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,\n so=None,\n cy=None, cz=None,\n dy=None, dz=None,\n ey=None, ez=None,\n fy=None, fz=None,\n ky=1., kz=1.,\n ny=None, nz=None, my=None, mz=None,\n nsiy=None, nsiz=None, nsiyz=None,\n cw=None, stress='GRID',\n w=None, wy=None, wz=None, comment='')\n assert pid > 0, prop.get_stats()\n assert mid > 0, prop.get_stats()\n str(prop)\n props.append(prop)\n #self._add_op2_property(prop)\n #op2.card_count['PBEAM3'] = nentries\n return n, props\n\n def _read_pbeam3_264(self, card_obj, data: bytes, n: int) -> int:\n \"\"\"\n TODO: partial\n # per test_cbeam_cbeam3???\n ints = (2901, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0)\n floats = (2901, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n ntotal = 264 * self.factor # 66*4\n # p/m ayz ae fj ki 14f i\n struct1 = Struct(mapfmt(op2._endian + b'2i 3f 5f 5f fi 14f i 30f 4i', self.size))\n\n ndatai = len(data) - n\n nentries = ndatai // ntotal\n assert ndatai % ntotal == 0\n\n props = []\n for unused_i in range(nentries):\n pid, mid, A, iz, iy, a, b, c, d, e, f, g, h, i, j, k, inta, *other = struct1.unpack(data[n:n+ntotal])\n #print(pid, mid, A, iz, iy)\n #print((a, b, c, d, e))\n #print((f, g, h, i, j))\n #print(k, inta)\n assert sum(other) < 100, other\n prop = PBEAM3(\n pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,\n so=None,\n cy=None, cz=None,\n dy=None, dz=None,\n ey=None, ez=None,\n fy=None, fz=None,\n ky=1., kz=1.,\n ny=None, nz=None, my=None, mz=None,\n nsiy=None, nsiz=None, nsiyz=None,\n cw=None, stress='GRID',\n w=None, wy=None, wz=None, comment='')\n assert pid > 0, prop.get_stats()\n assert mid > 0, prop.get_stats()\n str(prop)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pplane(self, data: bytes, n: int) -> int:\n \"\"\"\n RECORD – PPLANE(3801,38,979)\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 T RS Default membrane thickness for Ti on the connection entry\n 4 NSM RS Nonstructural mass per unit area\n 5 FOROPT I Formulation option number\n 6 CSOPT I Reserved for coordinate system definition of plane\n 7 UNDEF(2) None\n\n ints = (1, 1, 1.0, 0, 0, 0, 0, 0, 2, 2, 1.0, 0, 0, 0, 0, 0)\n floats = (1, 1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 2, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n ntotal = 32 * self.factor # 8*4\n struct1 = Struct(mapfmt(op2._endian + b'2i 2f 4i', self.size))\n\n ndatai = len(data) - n\n nentries = ndatai // ntotal\n assert ndatai % ntotal == 0\n for unused_i in range(nentries):\n out = struct1.unpack(data[n:n+ntotal])\n pid, mid, t, nsm, foropt, csopt = out[:6]\n #print(out)\n assert csopt == 0, csopt\n pplane = op2.add_pplane(pid, mid, t=t, nsm=nsm,\n formulation_option=foropt)\n pplane.validate()\n #print(pplane)\n str(pplane)\n n += ntotal\n op2.card_count['PLPLANE'] = nentries\n return n\n\n def _read_plplane(self, data: bytes, n: int) -> int:\n \"\"\"\n PLPLANE(4606,46,375)\n\n NX 10\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 CID I Coordinate system identification number\n 4 STR CHAR4 Location of stress and strain output\n 5 T RS Default membrane thickness for Ti on the connection entry\n 6 CSOPT I Reserved for coordinate system definition of plane\n 7 UNDEF(5) None\n\n MSC 2016\n PID I Property identification number\n 2 MID I Material identification number\n 3 CID I Coordinate system identification number\n 4 STR CHAR4 Location of stress and strain output\n 5 UNDEF(7 ) none Not used\n\n .. warning:: CSOPT ad T are not supported\n \"\"\"\n op2 = self.op2\n ntotal = 44 * self.factor # 4*11\n if self.size == 4:\n s = Struct(op2._endian + b'3i 4s f 6i')\n else:\n s = Struct(op2._endian + b'3q 8s d 6q')\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = s.unpack(data[n:n+ntotal])\n pid, mid, cid, location, unused_t, unused_csopt = out[:6]\n location = location.decode('latin1')\n #op2.show_data(data[n:n+ntotal], 'ifs')\n op2.add_plplane(pid, mid, cid=cid, stress_strain_output_location=location)\n n += ntotal\n op2.card_count['PLPLANE'] = nentries\n return n\n\n def _read_plsolid(self, data: bytes, n: int) -> int:\n \"\"\"\n MSC 2016\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 STR CHAR4 Location of stress and strain output\n 4 UNDEF(4 ) none Not used\n\n NX 10\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 STR CHAR4 Location of stress and strain output\n 4 CSOPT I Reserved for coordinate system definition of plane\n 5 UNDEF(3) None\n\n .. warning:: CSOPT is not supported\n \"\"\"\n op2 = self.op2\n ntotal = 28 * self.factor # 4*7\n if self.size == 4:\n struct1 = Struct(op2._endian + b'2i 4s 4i')\n else:\n struct1 = Struct(op2._endian + b'2q 8s 4q')\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = struct1.unpack(data[n:n+ntotal])\n pid, mid, location, unused_csopt, unused_null_a, unused_null_b, unused_null_c = out\n location = location.decode('latin1')\n #op2.show_data(data[n:n+ntotal], 'ifs')\n op2.add_plsolid(pid, mid, stress_strain=location, ge=0.)\n n += ntotal\n op2.card_count['PLSOLID'] = nentries\n return n\n\n def _read_pmass(self, data: bytes, n: int) -> int:\n \"\"\"\n PMASS(402,4,44) - the marker for Record 48\n \"\"\"\n op2 = self.op2\n ntotal = 8 * self.factor # 2*4\n struct_if = Struct(mapfmt(op2._endian + b'if', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n edata = data[n:n + ntotal]\n out = struct_if.unpack(edata)\n #out = (pid, mass)\n if op2.is_debug_file:\n op2.binary_debug.write(' PMASS=%s\\n' % str(out))\n prop = PMASS.add_op2_data(out)\n self._add_op2_property_mass(prop)\n n += ntotal\n return n\n\n def _read_prod(self, data: bytes, n: int) -> int:\n \"\"\"\n PROD(902,9,29) - the marker for Record 49\n \"\"\"\n op2 = self.op2\n ntotal = 24 * self.factor # 6*4\n struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_2i4f.unpack(edata)\n #(pid, mid, a, j, c, nsm) = out\n prop = PROD.add_op2_data(out)\n if op2.is_debug_file:\n op2.binary_debug.write(' PROD=%s\\n' % str(out))\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PROD'] = nproperties\n return n\n\n def _read_pshear(self, data: bytes, n: int) -> int:\n \"\"\"\n PSHEAR(1002,10,42) - the marker for Record 50\n \"\"\"\n op2 = self.op2\n ntotal = 24 * self.factor\n struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_2i4f.unpack(edata)\n #(pid, mid, t, nsm, f1, f2) = out\n if op2.is_debug_file:\n op2.binary_debug.write(' PSHEAR=%s\\n' % str(out))\n prop = PSHEAR.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PSHEAR'] = nproperties\n return n\n\n def _read_pshell(self, data: bytes, n: int) -> int:\n \"\"\"\n PSHELL(2302,23,283) - the marker for Record 51\n \"\"\"\n op2 = self.op2\n ntotal = 44 * self.factor # 11*4\n s = Struct(mapfmt(op2._endian + b'iififi4fi', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = s.unpack(edata)\n (pid, mid1, unused_t, mid2, unused_bk, mid3, unused_ts,\n unused_nsm, unused_z1, unused_z2, mid4) = out\n if op2.is_debug_file:\n op2.binary_debug.write(' PSHELL=%s\\n' % str(out))\n prop = PSHELL.add_op2_data(out)\n n += ntotal\n\n if pid in op2.properties:\n # this is a fake PSHELL\n propi = op2.properties[pid]\n if prop == propi:\n op2.log.warning(f'Fake PSHELL {pid:d} (skipping):\\n{propi}')\n nproperties -= 1\n continue\n #assert propi.type in ['PCOMP', 'PCOMPG'], propi.get_stats()\n op2.log.error(f'PSHELL {pid:d} is also {propi.type} (skipping PSHELL):\\n{propi}{prop}')\n nproperties -= 1\n continue\n #continue\n #if max(pid, mid1, mid2, mid3, mid4) > 1e8:\n #self.big_properties[pid] = prop\n #else:\n self._add_op2_property(prop)\n if nproperties:\n op2.card_count['PSHELL'] = nproperties\n return n\n\n def _read_psolid(self, data: bytes, n: int) -> int:\n \"\"\"\n PSOLID(2402,24,281) - the marker for Record 52\n \"\"\"\n op2 = self.op2\n #print(\"reading PSOLID\")\n if self.size == 4:\n ntotal = 28 # 7*4\n struct_6i4s = Struct(op2._endian + b'6i4s')\n else:\n ntotal = 28 * 2\n struct_6i4s = Struct(op2._endian + b'6q8s')\n\n nproperties = (len(data) - n) // ntotal\n nproperties_found = 0\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_6i4s.unpack(edata)\n #(pid, mid, cid, inp, stress, isop, fctn) = out\n #data_in = [pid, mid, cid, inp, stress, isop, fctn]\n if op2.is_debug_file:\n op2.binary_debug.write(' PSOLID=%s\\n' % str(out))\n\n n += ntotal\n fctn = out[-1]\n if fctn == b'FAKE':\n op2.log.warning(' PSOLID=%s; is this a PCOMPLS?' % str(out))\n continue\n prop = PSOLID.add_op2_data(out)\n self._add_op2_property(prop)\n nproperties_found += 1\n op2.card_count['PSOLID'] = nproperties_found\n return n\n\n# PSOLIDL\n# PTRIA6\n# PTSHELL\n\n def _read_ptube(self, data: bytes, n: int) -> int:\n \"\"\"\n PTUBE(1602,16,30) - the marker for Record 56\n\n .. todo:: OD2 only exists for heat transfer...\n how do i know if there's heat transfer at this point?\n I could store all the tubes and add them later,\n but what about themal/non-thermal subcases?\n\n .. warning:: assuming OD2 is not written (only done for thermal)\n \"\"\"\n op2 = self.op2\n struct_2i3f = Struct(op2._endian + b'2i3f')\n nproperties = (len(data) - n) // 20\n for unused_i in range(nproperties):\n edata = data[n:n+20] # or 24???\n out = struct_2i3f.unpack(edata)\n (pid, mid, OD, t, nsm) = out\n data_in = [pid, mid, OD, t, nsm]\n if op2.is_debug_file:\n op2.binary_debug.write(' PTUBE=%s\\n' % str(out))\n prop = PTUBE.add_op2_data(data_in)\n self._add_op2_property(prop)\n n += 20\n op2.card_count['PTUBE'] = nproperties\n return n\n\n def _read_pset(self, data: bytes, n: int) -> int:\n op2 = self.op2\n struct_5i4si = Struct(op2._endian + b'5i4si')\n nentries = 0\n while n < len(data):\n edata = data[n:n+28]\n out = struct_5i4si.unpack(edata)\n #print(out)\n idi, poly1, poly2, poly3, cid, typei, typeid = out\n typei = typei.rstrip().decode('latin1')\n assert typei in ['SET', 'ELID'], (idi, poly1, poly2, poly3, cid, typei, typeid)\n if op2.is_debug_file:\n op2.binary_debug.write(' PVAL=%s\\n' % str(out))\n #print(idi, poly1, poly2, poly3, cid, typei, typeid)\n typeids = []\n n += 28\n while typeid != -1:\n typeids.append(typeid)\n typeid, = op2.struct_i.unpack(data[n:n+4])\n n += 4\n #print(val)\n #print(typeids)\n # PSET ID POLY1 POLY2 POLY3 CID SETTYP ID\n if len(typeids) == 1:\n typeids = typeids[0]\n op2.add_pset(idi, poly1, poly2, poly3, cid, typei, typeids)\n op2.card_count['PSET'] = nentries\n return n\n\n def _read_pval(self, data: bytes, n: int) -> int:\n \"\"\"\n PVAL(10201,102,400)\n\n Word Name Type Description\n 1 ID I p-value set identification number\n 2 POLY1 I Polynomial order in 1 direction of the CID system\n 3 POLY2 I Polynomial order in 2 direction of the CID system\n 4 POLY3 I Polynomial order in 2 direction of the CID system\n 5 CID I Coordinate system identification number\n 6 TYPE CHAR4 Type of set provided: \"SET\" or \"ELID\"\n 7 TYPEID I SET identification number or element identification\n number with this p-value specification.\n Words 1 through 7 repeat until End of Record\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:])\n if self.size == 4:\n struct_5i4si = Struct(op2._endian + b'5i 4s i')\n struct_i = op2.struct_i\n else:\n struct_5i4si = Struct(op2._endian + b'5q 8s q')\n struct_i = op2.struct_q\n\n nentries = 0\n ntotal = 28 * self.factor\n size = self.size\n while n < len(data):\n edata = data[n:n+ntotal]\n out = struct_5i4si.unpack(edata)\n #print(out)\n idi, poly1, poly2, poly3, cid, typei, typeid = out\n typei = typei.rstrip().decode('latin1')\n assert typei in ['SET', 'ELID'], f'idi={idi} poly1={poly1} poly2={poly2} poly3={poly3} cid={cid} typei={typei} typeid={typeid}'\n if op2.is_debug_file:\n op2.binary_debug.write(' PVAL=%s\\n' % str(out))\n #print(idi, poly1, poly2, poly3, cid, typei, typeid)\n typeids = []\n n += ntotal\n while typeid != -1:\n typeids.append(typeid)\n typeid, = struct_i.unpack(data[n:n+size])\n n += size\n #print(val)\n #print(typeids)\n # PVAL ID POLY1 POLY2 POLY3 CID SETTYP ID\n op2.add_pval(idi, poly1, poly2, poly3, cid, typei, typeids)\n op2.card_count['PVAL'] = nentries\n return n\n\n def _read_pvisc(self, data: bytes, n: int) -> int:\n \"\"\"PVISC(1802,18,31) - the marker for Record 39\"\"\"\n op2 = self.op2\n struct_i2f = Struct(op2._endian + b'i2f')\n nproperties = (len(data) - n) // 12\n for unused_i in range(nproperties):\n edata = data[n:n+12]\n out = struct_i2f.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PVISC=%s\\n' % str(out))\n #(pid, ce, cr) = out\n prop = PVISC.add_op2_data(out)\n self._add_op2_property(prop)\n n += 12\n op2.card_count['PVISC'] = nproperties\n return n\n\n# PWELD\n# PWSEAM\n def _read_view(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping VIEW in EPT')\n return len(data)\n\n def _read_view3d(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping VIEW3D in EPT')\n return len(data)\n\ndef break_by_minus1(idata):\n \"\"\"helper for ``read_nsm_nx``\"\"\"\n i1 = 0\n i = 0\n i2 = None\n packs = []\n for idatai in idata:\n #print('data[i:] = ', data[i:])\n if idatai == -1:\n i2 = i\n packs.append((i1, i2))\n i1 = i2 + 1\n i += 1\n continue\n i += 1\n #print(packs)\n return packs\n"
] | [
[
"numpy.where",
"numpy.frombuffer"
]
] |
FelipeH92/Task-Space-Control-Vision | [
"77d9f709d7cb0afb50ef9baf6ba39304aca445e5"
] | [
"Experiments/src/Task Control - Python/UR5Class.py"
] | [
"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n## @package UR5\r\n# Documentação para o pacote de classes UR5.\r\n#\r\n# Documentação do código produzido para controle do manipulador UR5 e geração e controle de suas posições.\r\n# Cada código aqui documentado possui uma breve descrição de sua função, suas entradas e saídas.\r\nimport numpy as np\r\nfrom numpy.linalg import inv\r\nfrom numpy.linalg import norm\r\nfrom numpy.linalg import pinv\r\nfrom scipy.signal import butter,lfilter\r\nfrom scipy.signal import freqz\r\nimport struct\r\nimport time\r\nimport csv\r\nimport Transformations as tf\r\nimport os\r\n\r\n## Documentação da Classe UR5Class para controle remoto do manipulador Universal Robots 5 (UR5).\r\n#\r\n# Essa classe é responsável por interpretar os dados recebidos pela caixa de controle do UR5 e controlar seu funcionamento ao longo do projeto.\r\n# A ela cabe as funções dos cálculos de cinemática direta e inversa para as diversas posições do robô, interpretar os dados do robô, verificar\r\n# seu estado de segurança e funcionamento, assim como realizar qualquer cálculo de calibração ou posição necessário.\r\nclass UR5Class:\r\n _standard_DH = np.mat([[0,-.425,-.39225,0,0,0], [1.570796327, 0, 0, 1.570796327, -1.570796327, 0], [.089159,0,0,.10915,.09465,.0823], [0, 0, 0, 0, 0, 0]])\r\n # _standard_DH é a tabela DH tradicional do Robô. As linhas correspondem respectivamente a (a, alpha, d,q)\r\n \r\n _robot_data = []\r\n # Lista vazia para receber os dados do robô\r\n\r\n _data_pack_max = 133\r\n # Tamanho maximo e esperado de valores recebidos em lista no pacote de dados\r\n processTimeList = []\r\n\r\n errorDB = []\r\n error_D_DB = []\r\n wDB = []\r\n u = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n errorSaturation = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n errorPrevious = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n errorSum = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n\r\n normErro = np.zeros(6,dtype=np.float64)\r\n\r\n ## Construtor da classe.\r\n # @param self O ponteiro do objeto.\r\n # @param delta_DH Os dados de calibração da matriz Denavit-Hartenberg do robô a ser controlado. \r\n def __init__(self, delta_DH = np.zeros((5,6))):\r\n self.delta_standard_DH = delta_DH\r\n\r\n self._effective_a = self._standard_DH[0,:] + self.delta_standard_DH[0,:]\r\n self._effective_alpha = self._standard_DH[1,:] + self.delta_standard_DH[1,:]\r\n self._effective_d = self._standard_DH[2,:] + self.delta_standard_DH[2,:]\r\n self._effective_q = np.array(self._standard_DH[3,:] + self.delta_standard_DH[3,:])\r\n \r\n # Os dados efetivos equivalem aos dados esperados do UR5 mais os dados de calibração do robô específico.\r\n\r\n Rot_x_1 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,0]), -np.sin(self._effective_alpha[0,0]), 0], [0, np.sin(self._effective_alpha[0,0]), np.cos(self._effective_alpha[0,0]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_2 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,1]), -np.sin(self._effective_alpha[0,1]), 0], [0, np.sin(self._effective_alpha[0,1]), np.cos(self._effective_alpha[0,1]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_3 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,2]), -np.sin(self._effective_alpha[0,2]), 0], [0, np.sin(self._effective_alpha[0,2]), np.cos(self._effective_alpha[0,2]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_4 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,3]), -np.sin(self._effective_alpha[0,3]), 0], [0, np.sin(self._effective_alpha[0,3]), np.cos(self._effective_alpha[0,3]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_5 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,4]), -np.sin(self._effective_alpha[0,4]), 0], [0, np.sin(self._effective_alpha[0,4]), np.cos(self._effective_alpha[0,4]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_6 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,5]), -np.sin(self._effective_alpha[0,5]), 0], [0, np.sin(self._effective_alpha[0,5]), np.cos(self._effective_alpha[0,5]), 0], [ 0, 0, 0, 1]])\r\n\r\n Trans_d_1 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,0]], [0, 0, 0, 1]])\r\n Trans_d_2 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,1]], [0, 0, 0, 1]])\r\n Trans_d_3 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,2]], [0, 0, 0, 1]])\r\n Trans_d_4 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,3]], [0, 0, 0, 1]])\r\n Trans_d_5 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,4]], [0, 0, 0, 1]])\r\n Trans_d_6 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,5]], [0, 0, 0, 1]])\r\n\r\n Trans_a_1 = np.mat([[1, 0, 0, self._effective_a[0,0]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_2 = np.mat([[1, 0, 0, self._effective_a[0,1]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_3 = np.mat([[1, 0, 0, self._effective_a[0,2]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_4 = np.mat([[1, 0, 0, self._effective_a[0,3]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_5 = np.mat([[1, 0, 0, self._effective_a[0,4]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_6 = np.mat([[1, 0, 0, self._effective_a[0,5]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n\r\n self._A_0_1 = Trans_d_1 * Trans_a_1 * Rot_x_1\r\n self._A_0_2 = Trans_d_2 * Trans_a_2 * Rot_x_2\r\n self._A_0_3 = Trans_d_3 * Trans_a_3 * Rot_x_3\r\n self._A_0_4 = Trans_d_4 * Trans_a_4 * Rot_x_4\r\n self._A_0_5 = Trans_d_5 * Trans_a_5 * Rot_x_5\r\n self._A_0_6 = Trans_d_6 * Trans_a_6 * Rot_x_6\r\n # Transformações comuns, indiferentes a movimentação, utilizadas em cálculos futuros.\r\n\r\n return\r\n ## Método que recebe e configura o pacote de dados do robô.\r\n # @param self O ponteiro do objeto.\r\n # @param data O pacote de dados recebido pela conexão Ethernet com o robô.\r\n def setRobotData(self, data):\r\n size = len(data)\r\n self._robot_data = []\r\n # O primeiro dado recebido, de tempo, é um inteiro de 4 bytes.\r\n self._robot_data.append(struct.unpack('!i', data[0:4]))\r\n i = 4\r\n # O resto dos dados recebidos vem em formato de double de 8 bytes.\r\n while i < size:\r\n self._robot_data.append(struct.unpack('!d', data[i:i+8])[0])\r\n i += 8\r\n # Já atualiza os dados de juntas do robô.\r\n if (size < (4+(34*8))):\r\n print(\"[WARNING] Data size smaller than expected. Bytes: \" + str(size))\r\n return\r\n\r\n self._effective_q = np.array(self._robot_data[32:38]) + self.delta_standard_DH[3,:]\r\n return \r\n # setRobotData recebe o pacote de 1060 bytes e os separa nos 160 valores da lista de dados.\r\n\r\n def setRobotDataRTDE(self, data):\r\n\r\n #print(data.actual_TCP_pose)\r\n self._robot_data[1] = np.asarray(data.timestamp, dtype = np.float64)\r\n self._robot_data[2:8] = np.asarray(data.target_q, dtype = np.float64)\r\n self._robot_data[8:14] = np.asarray(data.target_qd, dtype = np.float64)\r\n\r\n self._robot_data[32:38] = np.asarray(data.actual_q, dtype = np.float64)\r\n self._robot_data[38:44] = np.asarray(data.actual_qd, dtype = np.float64)\r\n\r\n self._robot_data[56:62] = np.asarray(data.actual_TCP_pose, dtype = np.float64)\r\n\r\n self._robot_data[62:68] = np.asarray(data.actual_TCP_speed, dtype = np.float64)\r\n self._robot_data[68:74] = np.asarray(data.actual_TCP_force, dtype = np.float64)\r\n\r\n self._robot_data[74:80] = np.asarray(data.target_TCP_pose, dtype = np.float64)\r\n self._robot_data[80:86] = np.asarray(data.target_TCP_speed, dtype = np.float64)\r\n\r\n self._robot_data[102] = np.asarray(data.safety_mode, dtype = np.int32)\r\n\r\n self._robot_data[132] = np.asarray(data.runtime_state, dtype = np.uint32)\r\n\r\n\r\n\r\n q = np.asarray(data.actual_q)\r\n\r\n self._effective_q = q + self.delta_standard_DH[3,:]\r\n # <field name=\"timestamp\" type=\"DOUBLE\"/>\r\n # <field name=\"target_q\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_qd\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_qdd\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_current\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_moment\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_q\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_qd\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_current\" type=\"VECTOR6D\"/>\r\n # <field name=\"joint_control_output\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_TCP_pose\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_TCP_speed\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_TCP_force\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_TCP_pose\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_TCP_speed\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_digital_input_bits\" type=\"UINT64\"/>\r\n # <field name=\"joint_temperatures\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_execution_time\" type=\"DOUBLE\"/>\r\n # <field name=\"robot_mode\" type=\"INT32\"/>\r\n # <field name=\"joint_mode\" type=\"VECTOR6INT32\"/>\r\n # <field name=\"safety_mode\" type=\"INT32\"/>\r\n # <field name=\"actual_tool_accelerometer\" type=\"VECTOR3D\"/>\r\n # <field name=\"speed_scaling\" type=\"DOUBLE\"/>\r\n # <field name=\"target_speed_fraction\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_momentum\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_main_voltage\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_robot_voltage\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_robot_current\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_joint_voltage\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_digital_output_bits\" type=\"UINT64\"/>\r\n # <field name=\"runtime_state\" type=\"UINT32\"/>\r\n return\r\n\r\n ## Retorna verdadeiro ou falso para o estado de segurança do robô.\r\n # @param self O ponteiro do objeto.\r\n def checkSafety(self):\r\n try:\r\n if self._robot_data[102] == 1:\r\n safety = True\r\n else:\r\n safety = False\r\n return safety\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # checkSafety verifica se a variável de segurança do robô está apta a operar\r\n\r\n ## Retorna verdadeiro ou falso para o estado de operação do robô.\r\n # @param self O ponteiro do objeto.\r\n def programStateCheck(self):\r\n try:\r\n if self._robot_data[132] == 1:\r\n state = True\r\n else:\r\n state = False\r\n return state\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # programStateCheck verifica se a variável de estado do robô está apta a operar\r\n\r\n ## Imprime em prompt de comando as 133 informações recebidas pelo pacote de dados do robô.\r\n # @param self O ponteiro do objeto.\r\n def printRobotData(self):\r\n size = len(self._robot_data)\r\n\r\n if size == self._datapackmax:\r\n print(\"[INFO] Message Size in Bytes: \" + str(self._robot_data[0]))\r\n print(\"[INFO] Time: \" + str(self._robot_data[1]))\r\n print(\"[INFO] q target\" + str(self._robot_data[2:8]))\r\n print(\"[INFO] qd target\" + str(self._robot_data[8:14]))\r\n print(\"[INFO] qdd target\" + str(self._robot_data[14:20]))\r\n print(\"[INFO] I target\" + str(self._robot_data[20:26]))\r\n print(\"[INFO] M target\" + str(self._robot_data[26:32]))\r\n print(\"[INFO] q actual\" + str(self._robot_data[32:38]))\r\n print(\"[INFO] qd actual\" + str(self._robot_data[38:44]))\r\n print(\"[INFO] I actual\" + str(self._robot_data[44:50]))\r\n print(\"[INFO] I control\" + str(self._robot_data[50:56]))\r\n print(\"[INFO] Tool Vector Actual\" + str(self._robot_data[56:62]))\r\n print(\"[INFO] TCP Speed Actual\" + str(self._robot_data[62:68]))\r\n print(\"[INFO] TCP Force\" + str(self._robot_data[68:74]))\r\n print(\"[INFO] Tool Vector Target\" + str(self._robot_data[74:80]))\r\n print(\"[INFO] TCP Speed Target\" + str(self._robot_data[80:86]))\r\n print(\"[INFO] digital input bits\" + str(self._robot_data[86]))\r\n print(\"[INFO] Motor Temperatures\" + str(self._robot_data[87:93]))\r\n print(\"[INFO] Controller Timer\" + str(self._robot_data[93]))\r\n print(\"[INFO] Test Value\" + str(self._robot_data[94]))\r\n print(\"[INFO] Robot Mode\" + str(self._robot_data[95]))\r\n print(\"[INFO] Joint Modes\" + str(self._robot_data[96:102]))\r\n print(\"[INFO] Safety Mode\" + str(self._robot_data[102]))\r\n print(\"[INFO] Tool Acceleration Values\" + str(self._robot_data[109:112]))\r\n print(\"[INFO] Speed Scaling\" + str(self._robot_data[118]))\r\n print(\"[INFO] Linear Momentum Norm\" + str(self._robot_data[119]))\r\n print(\"[INFO] V Main\" + str(self._robot_data[122]))\r\n print(\"[INFO] V Robot\" + str(self._robot_data[123]))\r\n print(\"[INFO] I Robot\" + str(self._robot_data[124]))\r\n print(\"[INFO] V actual\" + str(self._robot_data[125:131]))\r\n print(\"[INFO] Digital Outputs\" + str(self._robot_data[131]))\r\n print(\"[INFO] Program State\" + str(self._robot_data[132]))\r\n # Exceção caso o pacote venha menor que 1060 Bytes\r\n else:\r\n print(\"[WARNING] Size of data smaller than expected: \", size)\r\n return\r\n # printRobotData imprime em tela todos os valores do pacote de dados traduzido, para depuração\r\n\r\n ## Retorna o vetor de posição do efetuador do robô, em formato [x, y, z, rx, ry, rz].\r\n # @param self O ponteiro do objeto.\r\n def getPositionTarget(self):\r\n try:\r\n array = np.array(self._robot_data[74:80])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getPosition retorna a posição atual do vetor da ferramenta.\r\n\r\n def getPosition(self):\r\n try:\r\n array = np.array(self._robot_data[56:62])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getPosition retorna a posição atual do vetor da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade do efetuador do robô, em formato [dx, dy, dz, drx, dry, drz].\r\n # @param self O ponteiro do objeto.\r\n def getTCPSpeed(self):\r\n try:\r\n array = np.array(self._robot_data[62:68])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getTCPSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade do efetuador do robô, em formato [dx, dy, dz, drx, dry, drz].\r\n # @param self O ponteiro do objeto.\r\n def getTCPSpeedTarget(self):\r\n try:\r\n array = np.array(self._robot_data[80:86])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getTCPSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade modular do efetuador do robô, em formato [v].\r\n # @param self O ponteiro do objeto.\r\n def getTCPSpeedMod(self):\r\n try:\r\n v = np.sqrt(self._robot_data[62]*self._robot_data[62] + self._robot_data[63]*self._robot_data[63] + self._robot_data[64]*self._robot_data[64])\r\n return v\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getTCPSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de posição das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointPosition(self):\r\n try:\r\n array = np.array(self._robot_data[32:38])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n\r\n ## Retorna o vetor de posição das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointPositionTarget(self):\r\n try:\r\n array = np.array(self._robot_data[2:8])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # Retorna o valor das articulações da ferramenta\r\n\r\n ## Retorna o vetor de velocidade das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointSpeed(self):\r\n try:\r\n array = np.array(self._robot_data[38:44])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getJointSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointSpeedTarget(self):\r\n try:\r\n array = np.array(self._robot_data[8:14])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getJointSpeed retorna a velocidade da ferramenta.\r\n\r\n def getTCPForce(self):\r\n try:\r\n array = np.array(self._robot_data[68:74])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getJointSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o tempo atual do robô desde que foi ligado.\r\n # @param self O ponteiro do objeto.\r\n def getTime(self):\r\n return self._robot_data[1]\r\n\r\n # Retorna o valor do tempo de uso atual\r\n\r\n ## Realiza a cinemática direta do UR5 para a posição de juntas atual. O método retorna a matriz homogênea 4x4 da posição atual, ou um vetor em RV ou RPY.\r\n # @param self O ponteiro do objeto.\r\n # @param q O vetor de coordenadas de junta.\r\n # @param vector parâmetro que define se o tipo de retorno como vetor de posições em RV.\r\n # @param rpy parâmetro que, juntamente de vector, define o retorno como vetor de posições em RPY.\r\n def ur5_direct_kinematics(self, q, vector = False, rpy = False, apply_offset = False):\r\n\r\n if (apply_offset == True):\r\n # q = q + self.delta_standard_DH[3,:]\r\n q = np.squeeze(np.asarray(q + self.delta_standard_DH[3,:]))\r\n\r\n _rot_z_1 = np.mat([[np.cos(q[0]), -np.sin(q[0]), 0, 0],[np.sin(q[0]), np.cos(q[0]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_2 = np.mat([[np.cos(q[1]), -np.sin(q[1]), 0, 0],[np.sin(q[1]), np.cos(q[1]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_3 = np.mat([[np.cos(q[2]), -np.sin(q[2]), 0, 0],[np.sin(q[2]), np.cos(q[2]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_4 = np.mat([[np.cos(q[3]), -np.sin(q[3]), 0, 0],[np.sin(q[3]), np.cos(q[3]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_5 = np.mat([[np.cos(q[4]), -np.sin(q[4]), 0, 0],[np.sin(q[4]), np.cos(q[4]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_6 = np.mat([[np.cos(q[5]), -np.sin(q[5]), 0, 0],[np.sin(q[5]), np.cos(q[5]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n\r\n # Utiliza as matrizes definidas no construtor e as de rotação das juntas atuais para retornar a matriz final.\r\n self._A_1 = _rot_z_1 * self._A_0_1\r\n self._A_2 = _rot_z_2 * self._A_0_2\r\n self._A_3 = _rot_z_3 * self._A_0_3\r\n self._A_4 = _rot_z_4 * self._A_0_4\r\n self._A_5 = _rot_z_5 * self._A_0_5\r\n self._A_6 = _rot_z_6 * self._A_0_6\r\n\r\n self._H = self._A_1 * self._A_2 * self._A_3 * self._A_4 * self._A_5 * self._A_6\r\n #print self._H\r\n\r\n if (vector == False):\r\n return self._H\r\n else:\r\n vetor = tf.matrix2RotationVector(self._H[0:3,0:3])\r\n array = np.array([self._H[0,3], self._H[1,3], self._H[2,3]], float)\r\n vetor = np.hstack((array,vetor))\r\n #print vetor\r\n if (rpy == False):\r\n return vetor\r\n else:\r\n vetor[3:6] = tf.rotationVector2RollPitchYaw(vetor[3:6])\r\n return vetor\r\n # ur5_direct_kinematics executa a cinemática direta do UR5 e retorna a matriz 4x4 de posição e orientação do UR5\r\n\r\n\r\n def verifyDelta(self, epsilon = 10e-6):\r\n\r\n direct = self.ur5_direct_kinematics(self.getJointPosition(), vector = True, apply_offset = True)\r\n real = self.getPosition()\r\n\r\n diff = tf.computeDifference(real,direct)\r\n\r\n print(\"[INFO] Direct Kinematics calculated with Delta: \" + str(direct))\r\n print(\"[INFO] Direct Kinematics real: \" + str(real))\r\n\r\n error = norm(diff[0:3])\r\n\r\n print(\"[INFO] Error: \", error)\r\n\r\n\r\n if (error < epsilon):\r\n print(\"[INFO] Correct Delta Matrix!\")\r\n return True\r\n else:\r\n print(\"[WARNING] Incorrect Delta Matrix!\")\r\n return False\r\n\r\n\r\n def _DH(self, a, alpha, d, theta):\r\n\r\n Td = np.asmatrix(np.eye(4))\r\n Td[2,3] = d\r\n Ta = np.asmatrix(np.eye(4))\r\n Ta[0,3] = a\r\n Rtheta = tf.Rot_z(theta)\r\n Rtheta = np.mat([[Rtheta[0,0], Rtheta[0,1], Rtheta[0,2], 0], [Rtheta[1,0], Rtheta[1,1], Rtheta[1,2], 0], [Rtheta[2,0], Rtheta[2,1], Rtheta[2,2], 0], [0,0,0,1]])\r\n Ralpha = tf.Rot_x(alpha)\r\n Ralpha = np.mat([[Ralpha[0,0], Ralpha[0,1], Ralpha[0,2], 0], [Ralpha[1,0], Ralpha[1,1], Ralpha[1,2], 0], [Ralpha[2,0], Ralpha[2,1], Ralpha[2,2], 0], [0,0,0,1]])\r\n\r\n G = Td * Rtheta * Ta * Ralpha\r\n\r\n return G\r\n # _DH retorna uma matrix 4x4 de junta especifica, utilizado na cinemática inversa analítica\r\n\r\n\r\n def _analytic_ur5_inverse_kinematics(self, p):\r\n\r\n\r\n rvMatrix = tf.rotationVector2Matrix(p[3:6])\r\n\r\n gd = np.mat(([[rvMatrix[0,0], rvMatrix[0,1], rvMatrix[0,2], p[0]], [rvMatrix[1,0], rvMatrix[1,1], rvMatrix[1,2], p[1]], [rvMatrix[2,0], rvMatrix[2,1], rvMatrix[2,2], p[2]], [0, 0, 0, 1]]))\r\n\r\n theta = np.zeros((6, 8))\r\n\r\n d1 = self._standard_DH[2,0]\r\n d2 = self._standard_DH[2,1]\r\n d3 = self._standard_DH[2,2]\r\n d4 = self._standard_DH[2,3]\r\n d5 = self._standard_DH[2,4]\r\n d6 = self._standard_DH[2,5]\r\n\r\n a1 = self._standard_DH[0,0]\r\n a2 = self._standard_DH[0,1]\r\n a3 = self._standard_DH[0,2]\r\n a4 = self._standard_DH[0,3]\r\n a5 = self._standard_DH[0,4]\r\n a6 = self._standard_DH[0,5]\r\n\r\n alpha1 = self._standard_DH[1,0]\r\n alpha2 = self._standard_DH[1,1]\r\n alpha3 = self._standard_DH[1,2]\r\n alpha4 = self._standard_DH[1,3]\r\n alpha5 = self._standard_DH[1,4]\r\n alpha6 = self._standard_DH[1,5]\r\n\r\n # Calculating theta1\r\n p05 = gd * np.mat([[0], [0], [-d6], [1]])\r\n p05 = p05 - np.mat([[0], [0], [0], [1]])\r\n psi = np.arctan2(p05[1], p05[0])\r\n p05xy = np.sqrt(p05[1]*p05[1] + p05[0]*p05[0])\r\n if (d4 > p05xy):\r\n print (\"[WARNING] No solution for Theta1: d4 > P05xy\")\r\n print (\"[WARNING] Creating aproximation highly inaccurate\")\r\n d4 = p05xy - 1e-10\r\n try:\r\n phi = np.arccos(d4 / p05xy)\r\n except:\r\n print(\"[ERROR] Division by zero: \" + str(p05xy))\r\n return None\r\n theta[0, 0:4] = np.radians(90) + psi + phi\r\n theta[0, 4:8] = np.radians(90) + psi - phi\r\n theta = np.real(theta)\r\n\r\n # Calculating theta5\r\n cols = np.array([0, 4])\r\n for i in range(0, cols.size):\r\n c = cols[i];\r\n try:\r\n T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))\r\n except:\r\n print(\"[ERROR] Could not find inverse: \" + str(self._DH(a1, alpha1, d1, theta[0,c])))\r\n return None\r\n T16 = T10 * gd\r\n p16z = T16[2,3]\r\n try:\r\n if (((p16z-d4)/d6) > 1):\r\n print (\"[WARNING] No solution for Theta5: (p16z-d4)/d6) > 1\")\r\n print (\"[WARNING] Creating aproximation highly inaccurate\")\r\n d6 = (p16z-d4) + 1e-10\r\n t5 = np.arccos((p16z-d4)/d6)\r\n except:\r\n print(\"[ERROR] Division by zero: \" + str(d6))\r\n return None\r\n theta[4, c:c+2] = t5\r\n theta[4, c+2:c+4] = -t5\r\n theta = np.real(theta)\r\n\r\n # Calculating theta6\r\n cols = np.array([0, 2, 4, 6])\r\n for i in range(0, cols.size):\r\n c = cols[i]\r\n T01 = self._DH(a1, alpha1, d1, theta[0,c])\r\n try:\r\n T61 = inv(gd) * T01\r\n except:\r\n print(\"[ERROR] Could not find inverse: \" + str(gd))\r\n return None\r\n T61zy = T61[1, 2]\r\n T61zx = T61[0, 2]\r\n t5 = theta[4, c]\r\n if (np.sin(t5) == 0):\r\n theta[5, c:c+2] = 0\r\n else: \r\n theta[5, c:c+2] = np.arctan2(-T61zy/np.sin(t5), T61zx/np.sin(t5))\r\n theta = np.real(theta)\r\n\r\n # Calculating theta3\r\n cols = np.array([0, 2, 4, 6])\r\n for i in range (0, cols.size):\r\n c = cols[i]\r\n try:\r\n T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))\r\n T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))\r\n T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))\r\n except T10:\r\n print(\"[ERROR] Could not find inverse: Theta3, inverse 1, \" + str(T10))\r\n return None\r\n except T65:\r\n print(\"[ERROR] Could not find inverse: Theta3, inverse 2, \" + str(T65))\r\n return None\r\n except T54:\r\n print(\"[ERROR] Could not find inverse: Theta3, inverse 3, \" + str(T54))\r\n return None\r\n T14 = T10 * gd * T65 * T54\r\n p13 = T14 * np.mat([[0], [-d4], [0], [1]])\r\n p13 = p13 - np.mat([[0], [0], [0], [1]])\r\n p13norm2 = norm(p13) * norm(p13)\r\n arg = (p13norm2-a2*a2-a3*a3)/(2*a2*a3)\r\n if (arg > 1 or arg < -1):\r\n print (\"[WARNING] No solution for Theta3: arg < -1 or arg > 1\")\r\n print (\"[WARNING] Creating aproximation highly inaccurate\")\r\n if (arg >1):\r\n arg = 1 - 1e-10\r\n else:\r\n arg = -1 + 1e-10\r\n t3p = np.arccos(arg)\r\n theta[2, c] = t3p\r\n theta[2, c+1] = -t3p\r\n theta = np.real(theta)\r\n\r\n # Calculating theta2 and theta4\r\n cols = np.array([0, 1, 2, 3, 4, 5, 6, 7])\r\n for i in range (0, cols.size):\r\n c = cols[i]\r\n try:\r\n T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))\r\n T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))\r\n T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))\r\n except T10:\r\n print(\"[ERROR] Could not find inverse: Theta2 inverse 1, \" + str(T10))\r\n return None\r\n except T65:\r\n print(\"[ERROR] Could not find inverse: Theta2, inverse 2, \" + str(T65))\r\n return None\r\n except T54:\r\n print(\"[ERROR] Could not find inverse: Theta2, inverse 3, \" + str(T54))\r\n return None\r\n T14 = T10 * gd * T65 * T54\r\n p13 = T14 * np.mat([[0], [-d4], [0], [1]]) - np.mat([[0], [0], [0], [1]])\r\n p13norm = norm(p13)\r\n theta[1, c] = -np.arctan2(p13[1], -p13[0])+np.arcsin(a3*np.sin(theta[2,c])/p13norm)\r\n try:\r\n T32 = inv(self._DH(a3, alpha3, d3, theta[2,c]))\r\n T21 = inv(self._DH(a2, alpha2, d2, theta[1,c]))\r\n except T10:\r\n print(\"[ERROR] Could not find inverse: Theta4 inverse 1, \" + str(T32))\r\n return None\r\n except T65:\r\n print(\"[ERROR] Could not find inverse: Theta4, inverse 2, \" + str(T21))\r\n return None\r\n T34 = T32 * T21 * T14;\r\n theta[3, c] = np.arctan2(T34[1,0], T34[0,0])\r\n theta = np.real(theta)\r\n\r\n for i in range (0, 5):\r\n for j in range(0,7):\r\n if theta[i,j] > np.pi:\r\n theta[i,j] -= 2*np.pi\r\n elif theta[i,j] < -np.pi:\r\n theta[i,j] += 2*np.pi\r\n\r\n return theta\r\n # _analytic_ur5_inverse_kinematics retorna a matriz 6x8 com as 8 possiveis posições de 6 angulos dos motores que inferem na posição atual do UR5\r\n\r\n ## Cálcula a matriz Jacobiana da relação entre juntas e vetor de pose.\r\n # @param self O ponteiro do objeto.\r\n # @param q_Past Um vetor de juntas inicial a ser aplicado a derivada.\r\n # @param deltaTheta Um vetor de diferença de juntas em um tempo infinitesimal para o cálculo de derivada.\r\n def jacobian(self, q_Past, deltaTheta, rpy = False):\r\n\r\n jacobian_matrix = np.zeros((6,6))\r\n FK_init = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_Past.transpose() + self.delta_standard_DH[3,:])), vector = True, rpy = rpy)\r\n step = deltaTheta\r\n NaN_check = False\r\n\r\n for i in range(0,6):\r\n q_aux = np.array([[0],[0],[0],[0],[0],[0]], float)\r\n q_aux[i] += step[i]\r\n q_aux = q_Past + q_aux\r\n q_aux = np.squeeze(np.asarray(q_aux.transpose() + self.delta_standard_DH[3,:]))\r\n FK_next = self.ur5_direct_kinematics(q_aux, vector = True, rpy = rpy)\r\n jacobian_matrix[i,:] = (tf.computeDifference(FK_next, FK_init)/(step[i]))\r\n if(np.any(np.isnan(jacobian_matrix[i,:]))):\r\n jacobian_matrix[i,:] = np.zeros(6)\r\n NaN_check = True\r\n \r\n if(NaN_check):\r\n print(\"[WARNING] NaN found on Jacobian.\")\r\n\r\n return jacobian_matrix.transpose()\r\n\r\n def jacobian2(self, q):\r\n\r\n jacobian_matrix = np.zeros((6,6))\r\n\r\n # Atualiza as matrizes\r\n\r\n self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])))\r\n\r\n # R^0_{i-1}dot(0,0,1)cross(d^0_n - d^0_{i-1})\r\n\r\n auxRow = np.array([[0],[0],[1]])\r\n # Row 1\r\n\r\n jacobian_matrix[0:3,0] = np.cross(np.dot(np.eye(3),auxRow),self._H[0:3,3],axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,0] = np.dot(np.eye(3),auxRow).transpose()\r\n\r\n # Row 2\r\n \r\n jacobian_matrix[0:3,1] = np.cross(np.dot(self._A_1[0:3,0:3],auxRow),(self._H[0:3,3] - self._A_1[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,1] = np.dot(self._A_1[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 3\r\n\r\n aux = self._A_1 * self._A_2\r\n\r\n jacobian_matrix[0:3,2] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,2] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 4\r\n\r\n aux = aux * self._A_3\r\n\r\n jacobian_matrix[0:3,3] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,3] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 5\r\n\r\n aux = aux * self._A_4\r\n\r\n jacobian_matrix[0:3,4] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,4] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 6\r\n\r\n aux = aux * self._A_5\r\n\r\n jacobian_matrix[0:3,5] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,5] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n return jacobian_matrix\r\n\r\n def jacobianEndEffectorReference(self,jacobian):\r\n\r\n fowardKinematics = self._H\r\n\r\n jacobianTransform = np.eye(6)\r\n #jacobianTransform[0:3,0:3] = fowardKinematics[0:3,0:3].transpose()\r\n jacobianTransform[3:6,3:6] = fowardKinematics[0:3,0:3].transpose()\r\n\r\n newJacobian = np.dot(jacobianTransform,jacobian)\r\n\r\n return newJacobian\r\n\r\n\r\n def jacobianAnalytic(self, q):\r\n\r\n pose = self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])),vector = True, rpy = True)\r\n\r\n jacobian = self.jacobian2(q)\r\n jacobian = self.jacobianEndEffectorReference(jacobian)\r\n\r\n # r = pose[3]\r\n # p = pose[4]\r\n # #y = pose[5]\r\n\r\n # B = np.array([[1,0,np.sin(p)],[0,np.cos(r),-np.cos(p)*np.sin(r)],[0,np.sin(r),np.cos(p)*np.cos(r)]])\r\n # invB = inv(B)\r\n # auxMat = np.eye(6)\r\n # auxMat[3:6,3:6] = invB\r\n\r\n # jacobianAnalytic = np.dot(auxMat,jacobian)\r\n\r\n #jacobianAnalytic = self.jacobianEndEffectorReference(jacobianAnalytic)\r\n\r\n return jacobian\r\n\r\n ## Esse método realiza a cinemática inversa de uma posição espacial para uma das oito configurações possíveis no espaço utilizando aproximação numérica por Newton-Raphson. \r\n # Ele retorna um vetor com as seis juntas que representam a configuração escolhida.\r\n # @param self O ponteiro do objeto.\r\n # @param cartesian_position Vetor [1x6] da posição a ser transformada.\r\n # @param chosen_theta Configuração escolhida. Default = 2.\r\n # @param theta Um parametro que pode ser usado como posição proxima inicial para aproximação numérica\r\n # @param rpy Um parâmetro que especifica se a posição cartesiana dada foi em RV ou RPY.\r\n def ur5_inverse_kinematics_newthon_raphson(self, cartesian_position, chosen_theta = 2, theta = np.zeros(6), rpy = False):\r\n\r\n #t = time.clock()\r\n\r\n if (rpy == True):\r\n cartesian_position[3:6] = tf.rollPitchYaw2RotationVector(cartesian_position[3:6])\r\n # A cinemática inversa analitica é inicialmente calculada\r\n if (np.all(theta == 0)):\r\n theta = self._analytic_ur5_inverse_kinematics(cartesian_position)\r\n joint_analytic_IK = theta[:,chosen_theta]\r\n else:\r\n joint_analytic_IK = theta\r\n\r\n NaN_check = np.isnan(joint_analytic_IK) \r\n\r\n if (np.any(NaN_check)):\r\n joint_analytic_IK = self.getJointPosition()\r\n print (\"[WARNING] Nan position found in analytic IK solution, using Actual Joint Position as start position.\")\r\n\r\n # O vetor de juntas inicial a ser corrigido numéricamente é escolhido\r\n \r\n #print joint_analytic_IK\r\n\r\n q_i = np.array([0,0,0,0,0,0], float)\r\n q_i += joint_analytic_IK\r\n \r\n joint_analytic_IK = joint_analytic_IK + self.delta_standard_DH[3,:]\r\n joint_analytic_IK = np.squeeze(np.asarray(joint_analytic_IK))\r\n FK = self.ur5_direct_kinematics(joint_analytic_IK, True)\r\n\r\n\r\n # Transformação de RV para RPY é realizada para se iniciar o cálculo.\r\n cartesian_position_rpy = cartesian_position\r\n erro = tf.computeDifference(cartesian_position_rpy, FK)\r\n \r\n norm_erro = norm(erro)\r\n\r\n episilon = 0.0001*0.0001\r\n max_iteractions = 500\r\n iteraction = 1\r\n q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])\r\n erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])\r\n\r\n delta_theta = np.ones(6)*0.000006\r\n delta_theta = np.array([[delta_theta[0]], [delta_theta[1]],[delta_theta[2]], [delta_theta[3]],[delta_theta[4]], [delta_theta[5]]])\r\n while (norm_erro > episilon):\r\n # Calcula\r\n j = self.jacobian(q_i, delta_theta)\r\n try:\r\n jt = pinv(j)\r\n except:\r\n print(\"[WARNING] Pseudo Inverse with SVD diverged\")\r\n jt = np.dot(j.transpose(),inv(np.dot(j,j.transpose())))\r\n\r\n q_in = np.array([[0],[0],[0],[0],[0],[0]], float)\r\n q_in = q_i + np.dot(jt,erro)\r\n\r\n delta_theta = q_in - q_i\r\n q_i = np.array([[0],[0],[0],[0],[0],[0]], float)\r\n q_i += q_in\r\n q_i = np.squeeze(np.asarray(q_i.transpose()))\r\n FK = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_i + self.delta_standard_DH[3,:])), True)\r\n erro = tf.computeDifference(cartesian_position_rpy, FK)\r\n norm_erro = norm(erro)\r\n\r\n erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])\r\n \r\n q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])\r\n \r\n iteraction += 1\r\n if (iteraction > max_iteractions):\r\n print (\"[ERROR] Maximum interactions reached.\")\r\n break\r\n\r\n #t2 = time.clock()\r\n\r\n #print (\"Tempo de convergencia NRa: \", t2 - t)\r\n\r\n q_i = q_i.transpose()\r\n q_aux = np.array([q_i[0,0],q_i[0,1],q_i[0,2],q_i[0,3],q_i[0,4],q_i[0,5]], float)\r\n\r\n return q_aux\r\n\r\n ## Esse método realiza a cinemática inversa de uma posição espacial para uma das oito configurações possíveis no espaço utilizando aproximação numérica por Cyclic Coordinate Descent. \r\n # Ele retorna um vetor com as seis juntas que representam a configuração escolhida. Obs.: Lento.\r\n # @param self O ponteiro do objeto.\r\n # @param cartesian_position Vetor [1x6] da posição a ser transformada.\r\n # @param chosen_theta Configuração escolhida. Default = 2.\r\n \r\n def ur5_inverse_kinematics_ccd(self, cartesian_position, chosen_theta = 2):\r\n\r\n # A cinemática inversa analitica é inicialmente calculada\r\n\r\n t = time.clock()\r\n\r\n theta = self._analytic_ur5_inverse_kinematics(cartesian_position)\r\n\r\n # O vetor de juntas inicial a ser corrigido numéricamente é escolhido\r\n joint_analytic_IK = theta[:,chosen_theta]\r\n\r\n self._effective_q = joint_analytic_IK + self.delta_standard_DH[3,:]\r\n Initial_DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)\r\n Initial_DK[3:6] = tf.rotationVector2RollPitchYaw(Initial_DK[3:6])\r\n # Cyclic Coordinate Descent\r\n cartesian_position_rpy = np.hstack((cartesian_position[0:3], tf.rotationVector2RollPitchYaw(cartesian_position[3:6])))\r\n\r\n # Constantes a serem utilizadas\r\n epsilon = 0.0001\r\n quad_epsilon = epsilon*epsilon\r\n joint_count = 5\r\n max_interection = 5000\r\n interection_count = 1\r\n interection_count_joint = 1\r\n direction = 1\r\n min_step = 0.000017\r\n max_step = 0.1\r\n alpha_step = max_step\r\n\r\n Radius = np.sqrt(cartesian_position[0:3].transpose()*cartesian_position[0:3])\r\n\r\n joint_interact = np.zeros(6)\r\n joint_interact += joint_analytic_IK\r\n\r\n # Erros Iniciais\r\n\r\n Error_Position = cartesian_position[0:3] - Initial_DK[0:3]\r\n Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))\r\n\r\n Error_Rotation = tf.computeDifference(cartesian_position_rpy[3:6],Initial_DK[3:6], True)\r\n Linear_Rotation_Error = Radius*Error_Rotation\r\n Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))\r\n\r\n erro_quad = (Mean_Position + Mean_Rotation)/2\r\n\r\n erro_quad_aux = erro_quad\r\n\r\n # Correção numérica.\r\n while erro_quad > quad_epsilon:\r\n \r\n joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n\r\n self._effective_q = joint_interact + self.delta_standard_DH[3,:]\r\n\r\n DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)\r\n DK[3:6] = rotationVector2RollPitchYaw(DK[3:6])\r\n\r\n Error_Position = cartesian_position[0:3] - DK[0:3] \r\n Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))\r\n\r\n Error_Rotation = computeDifference(cartesian_position_rpy[3:6],DK[3:6], True)\r\n Linear_Rotation_Error = Radius*Error_Rotation\r\n Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))\r\n\r\n erro_quad = (Mean_Position + Mean_Rotation)/2\r\n\r\n if erro_quad > erro_quad_aux:\r\n if interection_count_joint == 1:\r\n direction = -1*direction\r\n joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n interection_count_joint = 0\r\n error_direction = erro_quad\r\n else:\r\n if alpha_step > min_step:\r\n joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n alpha_step = alpha_step/2\r\n interection_count_joint = 1\r\n else:\r\n joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n alpha_step = max_step\r\n interection_count_joint = 1\r\n joint_count -=1\r\n if joint_count < 0:\r\n joint_count = 5\r\n interection_count +=1\r\n else:\r\n alpha_step = alpha_step/2\r\n interection_count_joint = 1\r\n erro_quad_aux = erro_quad\r\n\r\n #if interection_count_joint == 1:\r\n #if erro_quad < erro_quad_aux:\r\n #erro_quad_aux = erro_quad\r\n #interection_count_joint += 1\r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #alpha_step = alpha_step/2\r\n #else:\r\n #direction = -1*direction\r\n #joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n #interection_count_joint += 1\r\n #else:\r\n #if erro_quad < erro_quad_aux:\r\n #erro_quad_aux = erro_quad\r\n #interection_count_joint += 1\r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #alpha_step = alpha_step/2\r\n #else:\r\n #if (alpha_step < 0.000017)\r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #alpha_step = alpha_step*2\r\n #joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n #alpha_step = np.pi\r\n #interection_count_joint = 1\r\n #joint_count -=1\r\n #if joint_count < 0:\r\n #joint_count = 5\r\n #interection_count +=1\r\n #else: \r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #interection_count_joint = 1\r\n #joint_count -=1\r\n #if joint_count < 0:\r\n #joint_count = 5\r\n #interection_count +=1\r\n if interection_count > max_interection:\r\n print (\"[ERROR] Maximum interations reached.\")\r\n break\r\n\r\n t2 = time.clock()\r\n\r\n print (\"[INFO] CCD Total time: \"+ str(t2 - t))\r\n\r\n return joint_interact\r\n\r\n\r\n def getMeanValueVector(self, vectorArray):\r\n\r\n print(\"[INFO] Mean Value: Array, Mean, \" + str(vectorArray) + \", \" + str(np.mean(vectorArray, axis = 0, dtype=np.float64)))\r\n\r\n\r\n def controlLoopTranspose(self, desiredPose, poseActual = None):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n np.nan_to_num(rotationError, False)\r\n\r\n error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]\r\n self.normErro = norm(poseError)\r\n\r\n self.errorDB.append(error)\r\n\r\n jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())\r\n\r\n # Control\r\n\r\n K = 0.5*np.eye(6,6)\r\n\r\n jointControl = np.dot(np.dot(jacob.transpose(),K),error.transpose())\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n def controlLoopPseudoInverse(self, desiredPose, poseActual = None):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n np.nan_to_num(rotationError, False)\r\n\r\n error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]\r\n self.normErro = norm(poseError)\r\n\r\n self.errorDB.append(error)\r\n\r\n jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())\r\n\r\n # Control\r\n\r\n K = 0.5*np.eye(6,6)\r\n\r\n jointControl = np.dot(np.dot(pinv(jacob),K),error.transpose())\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n\r\n def controlLoopInverse(self, desiredPose, poseActual = None):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n np.nan_to_num(rotationError, False)\r\n\r\n error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]\r\n\r\n self.normErro = norm(poseError)\r\n self.errorDB.append(error)\r\n\r\n jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose())\r\n\r\n # Control\r\n\r\n K = 0.5*np.eye(6,6)\r\n\r\n jointControl = np.dot(np.dot(inv(jacob),K),error.transpose())\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n def controlLoopDLS(self, desiredPose, poseActual = None, step = 0.008, jointSpeedReference = np.array([0, 0, 0, 0, 0, 0]), cartesianSpeedReference = np.array([0, 0, 0, 0, 0, 0])):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n #print(self.getPosition())\r\n #print(self.getJointPosition())\r\n\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n print('[INFO][ControlLoopDLS] NaN found on control')\r\n np.nan_to_num(rotationError, False)\r\n\r\n # Error Calculation\r\n\r\n #Kp\r\n error = np.hstack((poseError, rotationError))\r\n\r\n #Kd\r\n error_D = (error - self.errorPrevious)/step\r\n self.error_D_DB.append(error_D)\r\n self.errorPrevious = error\r\n errorFiltered = butter_lowpass_filter(np.asarray(self.error_D_DB, dtype=np.float32), 3, 125, order=2)\r\n error_D = errorFiltered[errorFiltered.shape[0]-1]\r\n \r\n #Ki\r\n self.errorSum = self.errorSum + error\r\n # for i in range(0,6):\r\n # if (self.errorSum[i] > 0.1):\r\n # self.errorSum[i] = 0.1\r\n # elif(self.errorSum[i] < -0.1):\r\n # self.errorSum[i] = -0.1\r\n\r\n # print('Error Sum ' + str(self.errorSum))\r\n # if (len(self.errorDB) > 1000):\r\n # self.errorSum = self.errorSum - np.asarray(self.errorDB[len(self.errorDB) - 1000], dtype=np.float32)\r\n\r\n #DB\r\n self.normErro = norm(poseError)\r\n self.errorDB.append(error)\r\n\r\n #jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose(), rpy = True)\r\n #jacob = self.jacobian2(self.getJointPosition())\r\n jacob = self.jacobianAnalytic(self.getJointPosition())\r\n\r\n # Control\r\n\r\n Kp = 5*np.eye(6,6) #10 #5\r\n # Kp[0,0] = 1.5\r\n # Kp[1,1] = 1.5\r\n # Kp[2,2] = 1.5\r\n # Kp[0,3] = 0.2#0.5\r\n # Kp[0,4] = 0.1#0.5\r\n # Kp[0,5] = 0.1#0.5\r\n # Kp[1,3] = 0#0.5\r\n # Kp[1,4] = 0#0.5\r\n # Kp[1,5] = 0#0.5\r\n # Kp[2,3] = 0#0.5\r\n # Kp[2,4] = 0#0.5\r\n # Kp[2,5] = 0#0.5\r\n #Kp[3,3] = 16#0.5\r\n # Kp[3,4] = 0#0.5\r\n # Kp[3,5] = 0#0.5\r\n # Kp[4,3] = 0#0.5\r\n #Kp[4,4] = 16#0.5\r\n # Kp[4,5] = 0#0.5\r\n # Kp[5,3] = 0#0.5\r\n # Kp[5,4] = 0#0.5\r\n #Kp[5,5] = 16#0.5\r\n\r\n Kd = 2*np.eye(6,6)\r\n\r\n # Kd[3,3] = 0.1\r\n # Kd[4,4] = 0.1\r\n # Kd[5,5] = 0.1\r\n\r\n Ki = 0.25*np.eye(6,6)\r\n # Ki[3,3] = 0.00055 #0.55\r\n # Ki[4,4] = 0.00055\r\n # Ki[5,5] = 0.00055\r\n # WindupUpperLimit = np.array([0.15, 0.15, 0.15, 0.15, 0.15, 0.15])\r\n # WindupLowerLimit = -np.array([0.15, 0.15, 0.15, 0.15, 0.15, 0.15])\r\n\r\n k0 = 0.01\r\n\r\n w0 = 0.01\r\n\r\n \r\n\r\n KpControl = np.dot(Kp,error.transpose())\r\n KdControl = np.dot(Kd,error_D.transpose())\r\n KiControl = np.dot(Ki,self.errorSum.transpose())\r\n # print(KiControl)\r\n # print('\\n')\r\n # for i in range(0,6):\r\n # if (KiControl[i] > 0.02):\r\n # KiControl[i] = 0.02\r\n # elif(KiControl[i] < -0.02):\r\n # KiControl[i] = -0.02\r\n ControlSum = KpControl + cartesianSpeedReference #+ KiControl\r\n\r\n t1 = time.perf_counter()\r\n \r\n w = np.sqrt(np.linalg.det(np.dot(jacob,jacob.transpose())))\r\n\r\n if (w < w0):\r\n lamb = k0*(np.power((1 - (w/w0)),2))\r\n print('[WARNING] Near Singularity: ' + str(w))\r\n else:\r\n lamb = 0\r\n\r\n lamb2 = lamb*np.eye(6,6)\r\n invJacob = np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2))\r\n t2 = time.perf_counter()\r\n \r\n #t1 = time.perf_counter()\r\n #invJacob = inv(jacob)\r\n #t2 = time.perf_counter()\r\n\r\n\r\n JacobianProcessTime = t2 - t1\r\n self.processTimeList.append(JacobianProcessTime)\r\n \r\n\r\n\r\n self.wDB.append(w)\r\n #invJacob = jacob.transpose()\r\n jointControl = np.dot(invJacob,ControlSum) #np.dot(np.dot(np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2)),Kp),error.transpose())\r\n\r\n #jointControl = jointControl + jointSpeedReference\r\n\r\n # for i in range(0,6):\r\n # if (jointControl[i] > WindupUpperLimit[i]):\r\n # self.u[i] = WindupUpperLimit[i]\r\n # elif(jointControl[i] < WindupLowerLimit[i]):\r\n # self.u[i] = WindupLowerLimit[i]\r\n # else:\r\n # self.u[i] = jointControl[i]\r\n\r\n # self.errorSaturation = jointControl - self.u\r\n # print(self.errorSaturation)\r\n\r\n # print('Error Sum windup' + str((np.dot(jacob,jointControl) - KpControl)/Ki[0,0]))\r\n\r\n # for i in range(0,6):\r\n # if (jointControl[i] > 0.4):\r\n # jointControl[i] = 0.4\r\n # elif (jointControl[i] < -0.4):\r\n # jointControl[i] = -0.4\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n def speedTransform(self, desiredSpeed, q = None, step = 0.008):\r\n\r\n if (q == None):\r\n q = self.getJointPosition()\r\n\r\n #jacobian = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose(), rpy = True)\r\n #jacobian = self.jacobian2(q)\r\n jacobian = self.jacobianAnalytic(q)\r\n\r\n jointSpeed = np.dot(inv(jacobian),desiredSpeed.transpose())\r\n\r\n return jointSpeed\r\n\r\ndef butter_lowpass(cutoff, fs, order=5):\r\n nyq = 0.5 * fs\r\n normal_cutoff = cutoff / nyq\r\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\r\n return b, a\r\n\r\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\r\n b, a = butter_lowpass(cutoff, fs, order=order)\r\n y = lfilter(b, a, data)\r\n return y"
] | [
[
"numpy.ones",
"numpy.any",
"numpy.asarray",
"numpy.nan_to_num",
"scipy.signal.butter",
"numpy.concatenate",
"numpy.arccos",
"numpy.cos",
"numpy.isnan",
"numpy.mat",
"numpy.mean",
"numpy.eye",
"numpy.zeros",
"numpy.hstack",
"numpy.all",
"numpy.power",
"numpy.linalg.pinv",
"numpy.array",
"numpy.linalg.norm",
"numpy.arctan2",
"numpy.radians",
"numpy.linalg.inv",
"scipy.signal.lfilter",
"numpy.sqrt",
"numpy.sin",
"numpy.dot",
"numpy.real"
]
] |
piojanu/tf_utils | [
"169bd3334dd11954cf8f411f2c918f76cd609fab"
] | [
"samples/mnist_vae.py"
] | [
"import argparse\nimport io\nimport os.path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tqdm import tqdm\n\nfrom tf_utils import AttrDict, attrdict_from_yaml, lazy_property_with_scope, share_variables\n\ntfd = tfp.distributions\ntfl = tf.layers\n\n\nclass Model(object):\n def __init__(self, data, config):\n # Initialize attributes\n self.data = data\n self.data_shape = list(self.data.shape[1:])\n self.config = config\n\n # Build model\n self.prior\n self.posterior\n self.code\n self.likelihood\n self.sample\n self.samples\n self.log_prob\n self.divergence\n self.elbo\n self.loss\n self.optimiser\n self.gradients\n self.optimise\n\n # Define summaries\n self.summary\n self.images\n\n @lazy_property_with_scope\n def prior(self):\n \"\"\"Standard normal distribution prior.\"\"\"\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(self.config.code_size),\n scale_diag=tf.ones(self.config.code_size))\n\n @lazy_property_with_scope(scope_name=\"encoder\")\n def posterior(self):\n \"\"\"a.k.a the encoder\"\"\"\n x = tfl.Flatten()(self.data)\n x = tfl.Dense(self.config.hidden_size, activation='relu')(x)\n x = tfl.Dense(self.config.hidden_size, activation='relu')(x)\n loc = tfl.Dense(self.config.code_size)(x)\n scale = tfl.Dense(self.config.code_size, activation='softplus')(x)\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)\n\n @lazy_property_with_scope\n def code(self):\n \"\"\"Code sample from the posterior.\"\"\"\n return self.posterior.sample()\n\n @lazy_property_with_scope(scope_name=\"decoder\")\n def likelihood(self):\n \"\"\"a.k.a the decoder.\"\"\"\n return self._make_decoder(self.code)\n\n @lazy_property_with_scope\n def sample(self):\n \"\"\"Sample example.\"\"\"\n return self._make_decoder(self.prior.sample(1))\n\n @lazy_property_with_scope\n def samples(self):\n \"\"\"Generated examples.\"\"\"\n return self._make_decoder(self.prior.sample(self.config.n_samples)).mean()\n\n @lazy_property_with_scope\n def log_prob(self):\n \"\"\"Log. likelihood of data under code sampled from posterior.\"\"\"\n return self.likelihood.log_prob(self.data)\n\n @lazy_property_with_scope\n def divergence(self):\n \"\"\"KL divergence between posterior and prior.\"\"\"\n return tfd.kl_divergence(self.posterior, self.prior)\n\n @lazy_property_with_scope\n def elbo(self):\n \"\"\"Evidence lower bound with a Lagrangian multiplier beta.\"\"\"\n return self.log_prob - self.config.beta * self.divergence\n\n @lazy_property_with_scope\n def loss(self):\n \"\"\"Negative ELBO reduced over the whole batch and every pixel.\"\"\"\n return -tf.reduce_mean(self.elbo)\n\n @lazy_property_with_scope\n def optimiser(self):\n \"\"\"ADAM optimiser.\"\"\"\n return tf.train.AdamOptimizer(self.config.learning_rate)\n\n @lazy_property_with_scope\n def gradients(self):\n \"\"\"Variables values and gradients of the loss (negative ELBO).\"\"\"\n return self.optimiser.compute_gradients(self.loss)\n\n @lazy_property_with_scope\n def optimise(self):\n \"\"\"Optimise the loss op. (apply gradients).\"\"\"\n return self.optimiser.apply_gradients(self.gradients)\n\n @lazy_property_with_scope\n def summary(self):\n \"\"\"Merged the model's summaries.\"\"\"\n return tf.summary.merge(self._define_summaries())\n\n @lazy_property_with_scope\n def images(self):\n \"\"\"Image summary of generated examples.\"\"\"\n images = tf.reshape(self.samples, (-1, self.samples.shape[2])) # Create col. of images\n images = tf.expand_dims(images, axis=0) # Add batch dim.\n images = tf.expand_dims(images, axis=-1) # Add channel dim.\n return tf.summary.image(\"samples\", images, max_outputs=1)\n\n @share_variables\n def _make_decoder(self, code):\n \"\"\"Build decoder network.\"\"\"\n x = tfl.Dense(self.config.hidden_size, activation='relu')(code)\n x = tfl.Dense(self.config.hidden_size, activation='relu')(x)\n logits = tfl.Dense(np.product(self.data_shape))(x)\n logits = tf.reshape(logits, [-1] + self.data_shape)\n return tfd.Independent(tfd.Bernoulli(logits), 2)\n\n def _define_summaries(self):\n \"\"\"Define the model's summaries.\"\"\"\n summaries = []\n\n # Learning rate\n summaries.append(tf.summary.scalar(\"learning_rate\",\n self.optimiser._lr))\n\n # ELBO and loss\n summaries.append(tf.summary.histogram(\"evidence/lower_bound_log_prob/image\",\n self.elbo))\n summaries.append(tf.summary.scalar(\"mean/evidence/lower_bound_log_prob/image\",\n tf.reduce_mean(self.elbo)))\n summaries.append(tf.summary.scalar(\"loss\",\n self.loss))\n\n # KL divergence\n summaries.append(tf.summary.histogram(\"divergence\",\n self.divergence))\n summaries.append(tf.summary.scalar(\"mean/divergence\",\n tf.reduce_mean(self.divergence)))\n\n # Gradients and variables norm\n gradients, variables = list(zip(*self.gradients))\n for gradient, variable in zip(gradients, variables):\n summaries.append(tf.summary.histogram(\"gradients/batch_norm/\" + variable.name,\n tf.norm(gradient, axis=0)))\n summaries.append(tf.summary.histogram(\"variables/batch_norm/\" + variable.name,\n tf.norm(variable, axis=0)))\n summaries.append(tf.summary.scalar(\"gradients/global_norm\",\n tf.global_norm(gradients)))\n summaries.append(tf.summary.scalar(\"variables/global_norm\",\n tf.global_norm(variables)))\n\n # Prior and posterior entropy\n summaries.append(tf.summary.histogram(\"prior/entropy\",\n self.prior.entropy()))\n summaries.append(tf.summary.scalar(\"mean/prior/entropy\",\n tf.reduce_mean(self.prior.entropy())))\n summaries.append(tf.summary.histogram(\"posterior/entropy\",\n self.posterior.entropy()))\n summaries.append(tf.summary.scalar(\"mean/posterior/entropy\",\n tf.reduce_mean(self.posterior.entropy())))\n\n # Prior and posterior log_prob\n summaries.append(tf.summary.histogram(\"prior/log_prob/image\",\n self.sample.log_prob(self.data)))\n summaries.append(tf.summary.scalar(\"mean/prior/log_prob/image\",\n tf.reduce_mean(self.sample.log_prob(self.data))))\n summaries.append(tf.summary.histogram(\"posterior/log_prob/image\",\n self.log_prob))\n summaries.append(tf.summary.scalar(\"mean/posterior/log_prob/image\",\n tf.reduce_mean(self.log_prob)))\n\n return summaries\n\n\ndef plot_codes(codes, labels):\n # Scatter plot\n fig, ax = plt.subplots()\n ax.scatter(codes[:, 0], codes[:, 1], s=2, c=labels, alpha=0.1)\n ax.set_aspect('equal')\n ax.set_xlim(codes.min() - .1, codes.max() + .1)\n ax.set_ylim(codes.min() - .1, codes.max() + .1)\n ax.tick_params(\n axis='both', which='both', left=False, bottom=False,\n labelleft=False, labelbottom=False)\n\n # Save to io buffer\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n buf.seek(0)\n\n # Create image summary\n image = tf.Summary.Image(encoded_image_string=buf.getvalue())\n summary = tf.Summary(value=[tf.Summary.Value(tag=\"images/codes/image\", image=image)])\n return summary\n\n\ndef create_datasets(train_set, test_set, config):\n train_dataset = tf.data.Dataset.from_tensor_slices(\n tf.convert_to_tensor(train_set, dtype=tf.float32)) \\\n .map(lambda x: x / 255) \\\n .shuffle(train_set.shape[0]) \\\n .batch(config.batch_size)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(\n tf.convert_to_tensor(test_set, dtype=tf.float32)) \\\n .map(lambda x: x / 255) \\\n .batch(test_set.shape[0])\n\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types,\n train_dataset.output_shapes)\n\n next_batch = iterator.get_next()\n train_init_op = iterator.make_initializer(train_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n\n return next_batch, train_init_op, test_init_op\n\n\ndef train(model, train_init_op, test_init_op, test_labels, config):\n with tf.train.MonitoredSession() as sess:\n summary_writer_train = tf.summary.FileWriter(\n os.path.join(config.logs_dir, \"train\"), sess.graph)\n summary_writer_test = tf.summary.FileWriter(\n os.path.join(config.logs_dir, \"test\"))\n\n step = 0\n for epoch in tqdm(range(config.epochs)):\n # Test\n sess.run(test_init_op)\n test_summary, test_images, test_codes = sess.run(\n [model.summary, model.images, model.code])\n summary_writer_test.add_summary(test_summary, step)\n summary_writer_test.add_summary(test_images, step)\n\n # Plot codes\n # TODO: Use TensorBoard projector.\n codes = plot_codes(test_codes, test_labels)\n summary_writer_test.add_summary(codes, step)\n\n # Train\n # TODO: Add tfu.loop that will run whole epoch, have callbacks and reduce returns.\n sess.run(train_init_op)\n while True:\n try:\n fetches = AttrDict({\"optimise\": model.optimise})\n if step % config.log_every == 0:\n fetches.summary = model.summary\n\n returns = sess.run(fetches)\n if \"summary\" in returns:\n summary_writer_train.add_summary(returns.summary, step)\n\n step += 1\n except tf.errors.OutOfRangeError:\n break\n\n summary_writer_train.close()\n summary_writer_test.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Train VAE for MNIST dataset.\")\n parser.add_argument('--config', type=str, default=\"\", help=\"YAML formatted configuration\")\n user_config_json = parser.parse_args().config\n\n default_config = AttrDict({\n \"batch_size\": 100,\n \"epochs\": 20,\n \"n_samples\": 10,\n \"hidden_size\": 200,\n \"code_size\": 2,\n \"beta\": 1.,\n \"learning_rate\": 0.001,\n \"logs_dir\": \"./logs\",\n \"log_every\": 100\n })\n config = default_config.nested_update(attrdict_from_yaml(user_config_json))\n\n (train_set, _), (test_set, test_labels) = tf.keras.datasets.mnist.load_data()\n # TODO: Use whole test set, but batch it like train set and average summaries.\n # https://stackoverflow.com/questions/40788785/how-to-average-summaries-over-multiple-batches\n train_set, test_set, test_labels = train_set[:], test_set[:5000], test_labels[:5000]\n\n next_batch, train_init_op, test_init_op = create_datasets(train_set, test_set, config)\n\n model = Model(next_batch, config)\n train(model, train_init_op, test_init_op, test_labels, config)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.summary.histogram",
"tensorflow.zeros",
"tensorflow.norm",
"tensorflow.Summary.Value",
"tensorflow.reshape",
"tensorflow.summary.image",
"tensorflow.train.AdamOptimizer",
"tensorflow.expand_dims",
"tensorflow.reduce_mean",
"matplotlib.pyplot.subplots",
"tensorflow.ones",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.product",
"tensorflow.convert_to_tensor",
"tensorflow.global_norm",
"tensorflow.data.Iterator.from_structure",
"tensorflow.train.MonitoredSession"
]
] |
pkyIntelligence/FasterRCNN | [
"230953938efdba8f8c127fcc0bb746fcce8d9463"
] | [
"FasterRCNN/layers/roi_align.py"
] | [
"import torch\nimport math\n\nfrom torch import nn\nfrom ..utils.utils import point_interpolate\n\n\nclass ROIAlign(nn.Module):\n def __init__(self, output_size, spatial_scale, sampling_ratio):\n \"\"\"\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio (int): number of inputs samples to take for each output\n sample. 0 to take samples densely.\n\n Note:\n point interpolate already accounts for alignment, just make sure the continuous coordinates are correct\n \"\"\"\n super(ROIAlign, self).__init__()\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n\n def forward(self, input, rois):\n \"\"\"\n Args:\n input: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.\n\n returns: ROIAligned output, shape = (B, Channels, self.output_size[0], self.output_size[1])\n \"\"\"\n assert rois.dim() == 2 and rois.size(1) == 5\n\n batch_indices, rois_only = torch.split(rois, split_size_or_sections=[1, 4], dim=1)\n batch_indices = batch_indices.squeeze().long()\n rois_only = rois_only * self.spatial_scale\n\n n_rois = len(batch_indices)\n\n pooled_height = self.output_size[0]\n pooled_width = self.output_size[1]\n\n channels = input.shape[1]\n\n output = input.new_zeros(size=(rois.shape[0], channels, pooled_height, pooled_width))\n\n for i in range(n_rois):\n batch_index = batch_indices[i]\n roi = rois_only[i]\n\n roi_start_w = roi[0]\n roi_start_h = roi[1]\n roi_end_w = roi[2]\n roi_end_h = roi[3]\n\n roi_width = roi_end_w - roi_start_w\n roi_height = roi_end_h - roi_start_h\n\n roi_width = max(roi_width, 1.)\n roi_height = max(roi_height, 1.)\n\n bin_size_h = roi_height / pooled_height\n bin_size_w = roi_width / pooled_width\n\n roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_height / pooled_height)\n roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_width / pooled_width)\n\n count = max(roi_bin_grid_h * roi_bin_grid_w, 1)\n\n # Construct Pooled ROI for all channels\n for ph in range(pooled_height):\n for pw in range(pooled_width):\n pooled_sum = input.new_zeros(size=(channels, ))\n\n for sample_h in range(roi_bin_grid_h):\n y = roi_start_h + ph * bin_size_h + ((sample_h + 0.5) / roi_bin_grid_h) * bin_size_h\n\n for sample_w in range(roi_bin_grid_w):\n x = roi_start_w + pw * bin_size_w + ((sample_w + 0.5) / roi_bin_grid_w) * bin_size_w\n\n sampled_point = point_interpolate(input[batch_index], torch.Tensor([x, y]))\n pooled_sum = pooled_sum + sampled_point\n\n output[i, :, ph, pw] = pooled_sum / count\n\n return output\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + \"(\"\n tmpstr += \"output_size=\" + str(self.output_size)\n tmpstr += \", spatial_scale=\" + str(self.spatial_scale)\n tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio)\n tmpstr += \", aligned=\" + str(self.aligned)\n tmpstr += \")\"\n return tmpstr\n"
] | [
[
"torch.Tensor",
"torch.split"
]
] |
dhingratul/RNN | [
"9e1ac582dbf8251769817b34fc9d791fa8c20376"
] | [
"Memory_RNN.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 24 11:28:50 2017\n\n@author: dhingratul\n\"\"\"\nfrom __future__ import print_function, division\nimport numpy as np\nimport tensorflow as tf\nimport helpers\n# hyperparams\nnum_epochs = 10000\ntotal_series_length = 100\ntruncated_backprop_length = 5\nstate_size = 4 # Number of neurons in the hidden layer\nnum_classes = 2 # Data is binary, 0 / 1 = Two Classes\nbatch_size = 8\nnum_batches = total_series_length//batch_size//truncated_backprop_length\n\n# Step 1 - Data Generation\n# Generate integers and corresponding binary numbers randomly selected in a\n# range of 10,000. The data points are zero padded so as to make a constant\n# lenght of 100\n\nshift_batch = 0\n\n\ndef generateData(shift_batch):\n vector_size = 100\n batches = helpers.random_sequences(length_from=3, length_to=8,\n vocab_lower=0, vocab_upper=2,\n batch_size=vector_size)\n batch = next(batches)\n x, _ = helpers.batch(batch)\n if shift_batch == 0: # Learning the same sequence\n y = x\n else:\n y_inter2 = helpers.shifter(batch, shift_batch)\n y, _ = helpers.batch(y_inter2)\n return x, y\n\n# Step 2 - Build the Model\nbatchX_placeholder = tf.placeholder(\n tf.float32, [batch_size, truncated_backprop_length])\nbatchY_placeholder = tf.placeholder(\n tf.int32, [batch_size, truncated_backprop_length])\ninit_state = tf.placeholder(tf.float32, [batch_size, state_size])\n\n# Randomly initialize weights\nW = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)\nb = tf.Variable(np.zeros((1, state_size)), dtype=tf.float32)\n\nW2 = tf.Variable(np.random.rand(state_size, num_classes), dtype=tf.float32)\nb2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)\n# Unpack columns\ninputs_series = tf.unstack(batchX_placeholder, axis=1)\nlabels_series = tf.unstack(batchY_placeholder, axis=1)\n# Forward pass\n# State placeholder\ncurrent_state = init_state\n# series of states through time\nstates_series = []\n\n# For each set of inputs, forward pass through the network to get new state\n# values and store all states in memory\nfor current_input in inputs_series:\n current_input = tf.reshape(current_input, [batch_size, 1])\n # Concatenate state and input data\n input_and_state_concatenated = tf.concat(\n axis=1, values=[current_input, current_state])\n next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b)\n # Store the state in memory\n states_series.append(next_state)\n # Set current state to next one\n current_state = next_state\n# Calculate loss\nlogits_series = [tf.matmul(state, W2) + b2 for state in states_series]\n# Softmax Non-linearity\npredictions_series = [tf.nn.softmax(logits) for logits in logits_series]\n\n# Measure loss, calculate softmax again on logits, then compute cross entropy\nlosses = [tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels) for logits,\n labels in zip(logits_series, labels_series)]\n# Average Loss\ntotal_loss = tf.reduce_mean(losses)\n# Use adagrad for minimization\ntrain_step = tf.train.AdagradOptimizer(0.2).minimize(total_loss)\n# Step 3 Training the network\nwith tf.Session() as sess:\n y = np.zeros([batch_size])\n sess.run(tf.global_variables_initializer())\n loss_list = []\n for epoch_idx in range(num_epochs):\n # Generate new data at every epoch\n x, y = generateData(shift_batch)\n while (len(y) > 8 or len(y) < 8):\n x, y = generateData(shift_batch)\n # Empty hidden state\n _current_state = np.zeros((batch_size, state_size))\n\n print(\"epoch\", epoch_idx)\n for batch_idx in range(num_batches):\n # layers unrolled to a limited number of time-steps:\n # truncated length\n start_idx = batch_idx * truncated_backprop_length\n end_idx = start_idx + truncated_backprop_length\n\n batchX = x[:, start_idx:end_idx]\n batchY = y[:, start_idx:end_idx]\n # Run the computation graph, give it the values\n _total_loss, _train_step, _current_state, _predictions_series = \\\n sess.run(\n [total_loss, train_step, current_state,\n predictions_series],\n feed_dict={\n batchX_placeholder: batchX,\n batchY_placeholder: batchY,\n init_state: _current_state\n })\n # print(batchX, batchY)\n loss_list.append(_total_loss)\n\n if batch_idx % 100 == 0:\n print(\"Loss\", _total_loss)\n"
] | [
[
"tensorflow.placeholder",
"numpy.zeros",
"tensorflow.unstack",
"tensorflow.reshape",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.matmul",
"tensorflow.train.AdagradOptimizer",
"numpy.random.rand",
"tensorflow.concat",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.Session",
"tensorflow.nn.softmax"
]
] |
ashok-arjun/few-shot-ssl-public | [
"3cf522031aa40b4ffb61e4693d0b48fdd5669276"
] | [
"fewshot/data/compress_tiered_imagenet.py"
] | [
"# Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell,\n# Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# =============================================================================\nimport cv2\nimport numpy as np\nimport six\nimport sys\nimport pickle as pkl\n\nfrom tqdm import tqdm\n\n\ndef compress(path, output):\n with np.load(path, mmap_mode=\"r\") as data:\n images = data[\"images\"]\n array = []\n for ii in tqdm(six.moves.xrange(images.shape[0]), desc='compress'):\n im = images[ii]\n im_str = cv2.imencode('.png', im)[1]\n array.append(im_str)\n with open(output, 'wb') as f:\n pkl.dump(array, f, protocol=pkl.HIGHEST_PROTOCOL)\n\n\ndef decompress(path, output):\n with open(output, 'rb') as f:\n array = pkl.load(f)\n images = np.zeros([len(array), 84, 84, 3], dtype=np.uint8)\n for ii, item in tqdm(enumerate(array), desc='decompress'):\n im = cv2.imdecode(item, 1)\n images[ii] = im\n np.savez(path, images=images)\n\n\ndef main():\n if sys.argv[1] == 'compress':\n compress(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == 'decompress':\n decompress(sys.argv[2], sys.argv[3])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.load",
"numpy.savez"
]
] |
selimfirat/pysad | [
"dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede"
] | [
"tests/transform/preprocessing/test_instance_unit_norm_scaler.py"
] | [
"\n\ndef test_instance_unit_norm_scaler():\n import numpy as np\n from pysad.transform.preprocessing import InstanceUnitNormScaler\n\n X = np.random.rand(100, 25)\n scaler = InstanceUnitNormScaler()\n\n scaled_X = scaler.fit_transform(X)\n assert np.all(np.isclose(np.linalg.norm(scaled_X, axis=1), 1.0))\n\n scaler = scaler.fit(X)\n scaled_X = scaler.transform(X)\n assert np.all(np.isclose(np.linalg.norm(scaled_X, axis=1), 1.0))\n"
] | [
[
"numpy.linalg.norm",
"numpy.random.rand"
]
] |
corganhejijun/frontal-trans | [
"1509babf2447a53a772703b09cb6a2daec6968a7"
] | [
"test_sample.py"
] | [
"# -*- coding: utf-8 -*- \nimport os\nimport cv2\nfrom scipy import misc\nfrom PIL import Image\n\nsample_path = 'datasets/celeb_train/lfw_trans'\ndest_path = sample_path + \"/../dest\"\nmiddleSize = 64\nimgSize = 256\nkernel_size = (5, 5)\nsigma = 5\n\nif not os.path.exists(dest_path):\n os.mkdir(dest_path)\n\nfileList = os.listdir(sample_path)\nfor index, file in enumerate(fileList):\n imgPath = os.path.join(sample_path, file)\n if os.path.isdir(imgPath):\n continue\n print(\"procesing \" + file + \" \" + str(index+1) + '/' + str(len(fileList)))\n img = cv2.cvtColor(cv2.imread(imgPath), cv2.COLOR_BGR2RGB)\n img = misc.imresize(img, (middleSize, middleSize), interp='bilinear')\n img = misc.imresize(img, (imgSize, imgSize), interp='bilinear')\n img = cv2.GaussianBlur(img, kernel_size, sigma)\n combineImg = Image.new('RGB', (img.shape[0]*2, img.shape[0]))\n combineImg.paste(Image.fromarray(img), (0,0))\n combineImg.paste(Image.fromarray(img), (img.shape[0]+1,0))\n misc.imsave(os.path.join(dest_path, file), combineImg)\n"
] | [
[
"scipy.misc.imresize"
]
] |
YusrilHasanuddin/bangkit-capstone-CAP0166 | [
"51742f7af47fa285154793a6ea74de1d78d945b3"
] | [
"ml-project/extract_face_yusril.py"
] | [
"import sys\nimport os\nimport traceback\nfrom PIL import Image\nfrom facenet_pytorch import MTCNN\nimport matplotlib.image as mpimg\nimport numpy as np\n\n\ndef detect_faces(image_path):\n mtcnn = MTCNN(margin=20, keep_all=True,\n post_process=False, device='cuda:0')\n image = image_path\n image = mpimg.imread(image)\n image = Image.fromarray(image)\n faces = mtcnn(image)\n count = 0\n for face in faces:\n face = face.permute(1, 2, 0).int().numpy()\n # cv2.imwrite(os.path.join(\n # path_folder, \"face\" + str(count) + \".jpg\"),face)\n face = Image.fromarray((face).astype(np.uint8))\n face.save(os.path.join(path_folder, \"face\" + str(count) + \".jpg\"))\n count = count + 1\n\n\nif __name__ == \"__main__\":\n fcount = 0\n while os.path.exists(\"ExtractedFaceFolder\" + str(fcount)) == True:\n fcount = fcount + 1\n if os.path.exists(\"ExtractedFaceFolder\" + str(fcount)) == False:\n break\n else:\n continue\n os.mkdir(\"ExtractedFaceFolder\" + str(fcount))\n path_folder = \"ExtractedFaceFolder\" + str(fcount)\n\n if len(sys.argv) < 2:\n print(\"Usage: python detect_extract_save.py 'image path'\")\n sys.exit()\n\n if os.path.isdir(sys.argv[1]):\n for image in os.listdir(sys.argv[1]):\n try:\n print(\"Processing.....\",os.path.abspath(\n os.path.join(sys.argv[1],image)))\n detect_faces(os.path.abspath(\n os.path.join(sys.argv[1],image)),False)\n except Exception:\n print(\"Could not process \",os.path.abspath(\n os.path.join(sys.argv[1],image)))\n else:\n detect_faces(sys.argv[1])\n"
] | [
[
"matplotlib.image.imread"
]
] |
sailab-code/SAILenv | [
"e202be04de468a58e58ae858693245f5556c3597"
] | [
"example_unity_socket.py"
] | [
"#\n# Copyright (C) 2020 Enrico Meloni, Luca Pasqualini, Matteo Tiezzi\n# University of Siena - Artificial Intelligence Laboratory - SAILab\n#\n#\n# SAILenv is licensed under a MIT license.\n#\n# You should have received a copy of the license along with this\n# work. If not, see <https://en.wikipedia.org/wiki/MIT_License>.\n\n\n# Import packages\n\nimport time\nimport numpy as np\nimport cv2\nimport tkinter as tk\nfrom PIL import Image, ImageTk\n\n# Import src\n\nfrom sailenv.agent import Agent\n\nframes: int = 1000\n\n\ndef decode_image(array: np.ndarray):\n \"\"\"\n Decode the given numpy array with OpenCV.\n\n :param array: the numpy array to decode\n :return: the decoded image that can be displayed\n \"\"\"\n image = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)\n return image\n\n\ndef draw_flow_lines(current_frame, optical_flow, line_step=16, line_color=(0, 255, 0)):\n frame_with_lines = current_frame.copy()\n line_color = (line_color[2], line_color[1], line_color[0])\n\n for y in range(0, optical_flow.shape[0], line_step):\n for x in range(0, optical_flow.shape[1], line_step):\n fx, fy = optical_flow[y, x]\n cv2.line(frame_with_lines, (x, y), (int(x + fx), int(y + fy)), line_color)\n cv2.circle(frame_with_lines, (x, y), 1, line_color, -1)\n\n return frame_with_lines\n\n\ndef draw_flow_map(optical_flow):\n hsv = np.zeros((optical_flow.shape[0], optical_flow.shape[1], 3), dtype=np.uint8)\n hsv[..., 1] = 255\n\n mag, ang = cv2.cartToPolar(optical_flow[..., 0], optical_flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n frame_flow_map = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n return frame_flow_map\n\n\ndef create_windows(agent: Agent):\n windows = {}\n for view, is_active in agent.active_frames.items():\n if is_active:\n window = tk.Tk()\n window.geometry(f\"{agent.width}x{agent.height}\")\n windows[view] = window\n\n\n\n\n\n# host = \"bronte.diism.unisi.it\"\nhost = \"127.0.0.1\"\n# host = \"eliza.diism.unisi.it\"\nif __name__ == '__main__':\n print(\"Generating agent...\")\n agent = Agent(depth_frame_active=True,\n flow_frame_active=True,\n object_frame_active=True,\n main_frame_active=True,\n category_frame_active=True, width=256, height=192, host=host, port=8085, use_gzip=False)\n print(\"Registering agent on server...\")\n agent.register()\n print(f\"Agent registered with ID: {agent.id}\")\n last_unity_time: float = 0.0\n\n print(f\"Available scenes: {agent.scenes}\")\n\n scene = agent.scenes[0]\n print(f\"Changing scene to {scene}\")\n agent.change_scene(scene)\n\n print(f\"Available categories: {agent.categories}\")\n\n # print(agent.get_resolution())\n try:\n print(\"Press ESC to close\")\n while True:\n start_real_time = time.time()\n start_unity_time = last_unity_time\n\n start_get = time.time()\n frame = agent.get_frame()\n step_get = time.time() - start_get\n\n print(f\"get frame in seconds: {step_get}, fps: {1/step_get}\")\n\n if frame[\"main\"] is not None:\n main_img = cv2.cvtColor(frame[\"main\"], cv2.COLOR_RGB2BGR)\n cv2.imshow(\"PBR\", main_img)\n\n if frame[\"category\"] is not None:\n start_get_cat = time.time()\n # cat_img = np.zeros((agent.height * agent.width, 3), dtype=np.uint8)\n # Extract values and keys\n k = np.array(list(agent.cat_colors.keys()))\n v = np.array(list(agent.cat_colors.values()))\n\n mapping_ar = np.zeros((np.maximum(np.max(k)+1, 256), 3), dtype=v.dtype)\n mapping_ar[k] = v\n out = mapping_ar[frame[\"category\"]]\n\n # for idx, sup in enumerate(frame[\"category\"]):\n # try:\n # color = agent.cat_colors[sup]\n # cat_img[idx] = color\n # except KeyError:\n # #print(f\"key error on color get: {sup}\")\n # cat_img[idx] = [0,0,0]\n\n cat_img = np.reshape(out, (agent.height, agent.width, 3))\n cat_img = cat_img.astype(np.uint8)\n\n # unity stores the image as left to right, bottom to top\n # while CV2 reads it left to right, top to bottom\n # a flip up-down solves the problem\n # cat_img = np.flipud(cat_img)\n\n step_get_cat = time.time() - start_get_cat\n print(f\"Plot category in : {step_get_cat}\")\n cv2.imshow(\"Category\", cat_img)\n\n if frame[\"object\"] is not None:\n obj_img = decode_image(frame[\"object\"])\n cv2.imshow(\"Object ID\", obj_img)\n\n if frame[\"flow\"] is not None:\n flow = frame[\"flow\"]\n flow_img = draw_flow_map(flow)\n cv2.imshow(\"Optical Flow\", flow_img)\n\n if frame[\"depth\"] is not None:\n depth = frame[\"depth\"]\n cv2.imshow(\"Depth\", depth)\n\n key = cv2.waitKey(1)\n # print(f\"FPS: {1/(time.time() - start_real_time)}\")\n if key == 27: # ESC Pressed\n break\n finally:\n print(f\"Closing agent {agent.id}\")\n agent.delete()\n"
] | [
[
"numpy.max",
"numpy.reshape",
"numpy.zeros"
]
] |
georgeAccnt-GH/Azure2019 | [
"5c9774b644d3ea15590d72d3de9363df72abf7ab"
] | [
"src/AzureFunctions/ComputeGradient/AzureUtilities.py"
] | [
"import numpy as np\nimport segyio\nimport subprocess\nimport os, h5py\nfrom scipy import interpolate\nfrom devito import Eq, Operator\nfrom azure.storage.blob import BlockBlobService, PublicAccess\n\nblob_service = BlockBlobService(account_name='', account_key='')\n\n####################################################################################################\n# array put and get\n\ndef convert_to_string(t):\n if len(t) == 1:\n return str(t[0])\n elif len(t) == 2:\n return str(t[0]) + 'S' + str(t[1])\n else:\n return str(t[0]) + 'S' + str(t[1]) + 'S' + str(t[2])\n\ndef convert_int_from_string(s):\n s_split = s.split('S')\n ndim = len(s_split)\n if ndim==1:\n n = int(s_split[0])\n elif ndim==2:\n n1 = int(s_split[0])\n n2 = int(s_split[1])\n n = (n1, n2)\n else:\n n1 = int(s_split[0])\n n2 = int(s_split[1])\n n3 = int(s_split[2])\n n = (n1, n2, n3)\n return n\n\ndef convert_float_from_string(s):\n s_split = s.split('S')\n ndim = len(s_split)\n d1 = float(s_split[0])\n d2 = float(s_split[1])\n if ndim==2:\n d = (d1, d2)\n else:\n d3 = float(s_split[2])\n d = (d1, d2, d3)\n return d\n\n# write array\ndef array_put(blob, container, blob_name, index=0, count=None, validate_content=False):\n shape_str = convert_to_string(blob.shape)\n meta = {'dtype':str(blob.dtype), 'shape': shape_str}\n blob_service.create_blob_from_bytes(\n container,\n blob_name,\n blob.tostring(), # blob\n index = index, # start index in array of bytes\n count = count, # number of bytes to upload\n metadata = meta, # Name-value pairs\n validate_content = validate_content\n )\n\n# put array\ndef array_get(container, blob_name, start_range=None, end_range=None, validate_content=False):\n binary_blob = blob_service.get_blob_to_bytes(\n container,\n blob_name,\n start_range=start_range,\n end_range=end_range,\n validate_content=validate_content\n )\n try:\n meta = binary_blob.metadata\n shape = convert_int_from_string(meta['shape'])\n x = np.fromstring(binary_blob.content, dtype=meta['dtype'])\n return x.reshape(shape)\n except:\n x = np.fromstring(binary_blob.content, dtype='float32')\n return x\n####################################################################################################\n# model put and get\n\n# write model\ndef model_put(model_blob, origin, spacing, container, blob_name, index=0, count=None, validate_content=False):\n shape_str = convert_to_string(model_blob.shape)\n origin_str = convert_to_string(origin)\n spacing_str = convert_to_string(spacing)\n meta = {'dtype':str(model_blob.dtype), 'shape': shape_str, 'origin': origin_str, 'spacing': spacing_str}\n blob_service.create_blob_from_bytes(\n container,\n blob_name,\n model_blob.tostring(), # blob\n index = index, # start index in array of bytes\n count = count, # number of bytes to upload\n metadata = meta, # Name-value pairs\n validate_content = validate_content\n )\n\n# read model\ndef model_get(container, blob_name, start_range=None, end_range=None, validate_content=False):\n binary_blob = blob_service.get_blob_to_bytes(\n container,\n blob_name,\n start_range=start_range,\n end_range=end_range,\n validate_content=validate_content\n )\n meta = binary_blob.metadata\n shape = convert_int_from_string(meta['shape'])\n origin = convert_float_from_string(meta['origin'])\n spacing = convert_float_from_string(meta['spacing'])\n x = np.fromstring(binary_blob.content, dtype=meta['dtype'])\n return x.reshape(shape), origin, spacing\n\ndef model_read(filename):\n h5f = h5py.File(filename, 'r')\n m = h5f['m'][:]\n o = h5f['origin'][:]\n d = h5f['spacing'][:]\n h5f.close()\n return m, o, d\n\ndef model_write(m, origin, spacing, filename):\n h5f = h5py.File(filename, 'w')\n h5f.create_dataset('m', data=m)\n h5f.create_dataset('origin', data=origin)\n h5f.create_dataset('spacing', data=spacing)\n h5f.close()\n\n####################################################################################################\n# segy read\n\ndef segy_get(container, path, filename, ndims=2, keepFile=False):\n\n # copy from s3 to local volume\n subprocess.run(['az', 'storage', 'blob', 'download', '--container-name', container, '--name', path + filename,\n '--file', os.getcwd() + '/' + filename, '--output', 'table'])\n argout = segy_read(filename, ndims=ndims)\n\n if keepFile is False:\n subprocess.run(['rm', '-f', filename])\n\n return argout\n\ndef segy_read(filename, ndims=2):\n\n with segyio.open(filename, \"r\", ignore_geometry=True) as segyfile:\n segyfile.mmap()\n\n # Assume input data is for single common shot gather\n sourceX = segyfile.attributes(segyio.TraceField.SourceX)[0]\n sourceY = segyfile.attributes(segyio.TraceField.SourceY)[0]\n sourceZ = segyfile.attributes(segyio.TraceField.SourceSurfaceElevation)[0]\n groupX = segyfile.attributes(segyio.TraceField.GroupX)[:]\n groupY = segyfile.attributes(segyio.TraceField.GroupY)[:]\n groupZ = segyfile.attributes(segyio.TraceField.ReceiverGroupElevation)[:]\n dt = segyio.dt(segyfile)/1e3\n\n # Apply scaling\n elevScalar = segyfile.attributes(segyio.TraceField.ElevationScalar)[0]\n coordScalar = segyfile.attributes(segyio.TraceField.SourceGroupScalar)[0]\n\n if coordScalar < 0.:\n sourceX = sourceX / np.abs(coordScalar)\n sourceY = sourceY / np.abs(coordScalar)\n sourceZ = sourceZ / np.abs(elevScalar)\n groupX = groupX / np.abs(coordScalar)\n groupY = groupY / np.abs(coordScalar)\n elif coordScalar > 0.:\n sourceX = sourceX * np.abs(coordScalar)\n sourceY = sourceY * np.abs(coordScalar)\n sourceZ = sourceZ * np.abs(elevScalar)\n groupX = groupX * np.abs(coordScalar)\n groupY = groupY * np.abs(coordScalar)\n\n if elevScalar < 0.:\n groupZ = groupZ / np.abs(elevScalar)\n elif elevScalar > 0.:\n groupZ = groupZ * np.abs(elevScalar)\n\n nrec = len(groupX)\n nt = len(segyfile.trace[0])\n\n # Extract data\n data = np.zeros(shape=(nt, nrec), dtype='float32')\n for i in range(nrec):\n data[:,i] = segyfile.trace[i]\n tmax = (nt-1)*dt\n\n if ndims == 2:\n return data, sourceX, sourceZ, groupX, groupZ, tmax, dt, nt\n else:\n return data, sourceX, sourceY, sourceZ, groupX, groupY, groupZ, tmax, dt, nt\n\n\ndef segy_model_read(filename):\n\n with segyio.open(filename, \"r\", ignore_geometry=True) as segyfile:\n segyfile.mmap()\n\n # Assume input data is for single common shot gather\n sourceX = segyfile.attributes(segyio.TraceField.SourceX)\n dx = segyio.dt(segyfile)/1e3\n\n # Apply scaling\n coordScalar = segyfile.attributes(segyio.TraceField.SourceGroupScalar)[0]\n\n if coordScalar < 0.:\n sourceX = sourceX / np.abs(coordScalar)\n elif coordScalar > 0.:\n sourceX = sourceX * np.abs(coordScalar)\n\n nx = len(sourceX)\n nz = len(segyfile.trace[0])\n\n # Extract data\n data = np.zeros(shape=(nx, nz), dtype='float32')\n for i in range(nx):\n data[i,:] = segyfile.trace[i]\n\n return data, sourceX, dx\n\n\ndef segy_put(data, sourceX, sourceZ, groupX, groupZ, dt, container, path, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000, keepFile=False):\n\n # Write segy file\n segy_write(data, sourceX, sourceZ, groupX, groupZ, dt, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000)\n\n # copy from local volume to s3\n status = subprocess.run(['az', 'storage', 'blob', 'upload', '--container-name', container, '--file', filename, '--name', path+filename])\n\n if keepFile is False:\n subprocess.run(['rm', '-f', filename])\n\n return status\n\n\ndef segy_write(data, sourceX, sourceZ, groupX, groupZ, dt, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000):\n\n nt = data.shape[0]\n nsrc = 1\n nxrec = len(groupX)\n if sourceY is None and groupY is None:\n sourceY = np.zeros(1, dtype='int')\n groupY = np.zeros(nxrec, dtype='int')\n nyrec = len(groupY)\n\n # Create spec object\n spec = segyio.spec()\n spec.ilines = np.arange(nxrec) # dummy trace count\n spec.xlines = np.zeros(1, dtype='int') # assume coordinates are already vectorized for 3D\n spec.samples = range(nt)\n spec.format=1\n spec.sorting=1\n\n with segyio.create(filename, spec) as segyfile:\n for i in range(nxrec):\n segyfile.header[i] = {\n segyio.su.tracl : i+1,\n segyio.su.tracr : i+1,\n segyio.su.fldr : 1,\n segyio.su.tracf : i+1,\n segyio.su.sx : int(np.round(sourceX[0] * np.abs(coordScalar))),\n segyio.su.sy : int(np.round(sourceY[0] * np.abs(coordScalar))),\n segyio.su.selev: int(np.round(sourceZ[0] * np.abs(elevScalar))),\n segyio.su.gx : int(np.round(groupX[i] * np.abs(coordScalar))),\n segyio.su.gy : int(np.round(groupY[i] * np.abs(coordScalar))),\n segyio.su.gelev : int(np.round(groupZ[i] * np.abs(elevScalar))),\n segyio.su.dt : int(dt*1e3),\n segyio.su.scalel : int(elevScalar),\n segyio.su.scalco : int(coordScalar)\n }\n segyfile.trace[i] = data[:, i]\n segyfile.dt=int(dt*1e3)\n\n\n\n####################################################################################################\n# Auxiliary modeling functions\n\n# Add/subtract devito data w/ MPI\ndef add_rec(d1, d2):\n eq = Eq(d1, d1 + d2)\n op = Operator([eq])\n op()\n return d1\n\ndef sub_rec(d1, d2):\n eq = Eq(d1, d1 - d2)\n op = Operator([eq],subs={d2.indices[-1]: d1.indices[-1]})\n op()\n return d1\n\n# Create 3D receiver grid from 1D x and y receiver vectors\ndef create_3D_grid(xrec, yrec, zrec):\n\n nxrec = len(xrec)\n nyrec = len(yrec)\n nrec_total = nxrec * nyrec\n\n rec = np.zeros(shape=(nrec_total, 3), dtype='float32')\n count = 0\n for j in range(nxrec):\n for k in range(nyrec):\n rec[count, 0] = xrec[j]\n rec[count, 1] = yrec[k]\n rec[count, 2] = zrec\n count += 1\n return rec\n\n\ndef restrict_model_to_receiver_grid(sx, gx, m, spacing, origin, sy=None, gy=None, buffer_size=500, numpy_coords=True):\n\n # Model parameters\n shape = m.shape\n ndim = len(shape)\n if ndim == 2:\n domain_size = ((shape[0] - 1) * spacing[0], (shape[1] - 1) * spacing[1])\n else:\n domain_size = ((shape[0] - 1) * spacing[0], (shape[1] - 1) * spacing[1], \\\n (shape[2] - 1) * spacing[2])\n\n # Scan for minimum/maximum source/receiver coordinates\n min_x = np.min([np.min(sx), np.min(gx)])\n max_x = np.max([np.max(sx), np.max(gx)])\n if sy is not None and gy is not None:\n min_y = np.min([np.min(sy), np.min(gy)])\n max_y = np.max([np.max(sy), np.max(gy)])\n\n # Add buffer zone if possible\n min_x = np.max([origin[0], min_x - buffer_size])\n max_x = np.min([origin[0] + domain_size[0], max_x + buffer_size])\n #print(\"min_x: \", min_x)\n #print(\"max_x: \", max_x)\n if ndim == 3:\n min_y = np.max([origin[1], min_y - buffer_size])\n max_y = np.min([origin[1] + domain_size[1], max_y + buffer_size])\n #print(\"min_y: \", min_y)\n #print(\"max_y: \", max_y)\n\n # Extract model part\n nx_min = int(min_x / spacing[0])\n nx_max = int(max_x / spacing[0])\n #print(\"nx_min: \", nx_min)\n #print(\"nx_max: \", nx_max)\n ox = nx_min * spacing[0]\n oz = origin[-1]\n if ndim == 3:\n ny_min = int(min_y / spacing[1])\n ny_max = int(max_y / spacing[1])\n #print(\"ny_min: \", ny_min)\n #print(\"ny_max: \", ny_max)\n oy = ny_min * spacing[1]\n\n # Extract relevant part of model\n n_orig = shape\n #print(\"Original shape: \", n_orig)\n if ndim == 2:\n m = m[nx_min:nx_max+1, :]\n origin = (ox, oz)\n else:\n m = m[nx_min:nx_max+1, ny_min:ny_max+1, :]\n origin = (ox, oy, oz)\n shape = m.shape\n #print(\"New shape: \", shape)\n\n return m, shape, origin\n\n\ndef extent_gradient(shape_full, origin_full, shape_sub, origin_sub, spacing, g):\n\n nz = shape_full[-1]\n ndim = len(shape_full)\n\n nx_left = int((origin_sub[0] - origin_full[0]) / spacing[0])\n nx_right = shape_full[0] - shape_sub[0] - nx_left\n\n if ndim == 3:\n ny_left = int((origin_sub[1] - origin_full[1]) / spacing[1])\n ny_right = shape_full[1] - shape_sub[1] - ny_left\n\n if ndim == 2:\n block1 = np.zeros(shape=(nx_left, nz), dtype='float32')\n block2 = np.zeros(shape=(nx_right, nz), dtype='float32')\n g = np.concatenate((block1, g, block2), axis=0)\n else:\n block1 = np.zeros(shape=(nx_left, shape_sub[1], nz), dtype='float32')\n block2 = np.zeros(shape=(nx_right, shape_sub[1], nz), dtype='float32')\n g = np.concatenate((block1, g, block2), axis=0)\n del block1, block2\n block3 = np.zeros(shape=(shape_full[0], ny_left, nz), dtype='float32')\n block4 = np.zeros(shape=(shape_full[0], ny_right, nz), dtype='float32')\n g = np.concatenate((block3, g, block4), axis=1)\n\n return g\n\n\n####################################################################################################\n# Auxiliary AWS functions\n\n\ndef resample(data, t0, tn, nt_prev, nt_new):\n\n time_prev = np.linspace(start=t0, stop=tn, num=nt_prev)\n time_new = np.linspace(start=t0, stop=tn, num=nt_new)\n\n d_resamp = np.zeros(shape=(len(time_new), data.shape[1]), dtype='float32')\n for i in range(data.shape[1]):\n tck = interpolate.splrep(time_prev, data[:, i], k=3)\n d_resamp[:, i] = interpolate.splev(time_new, tck)\n return d_resamp\n\n\n# Get chunk size of gradient\ndef get_chunk_size(g_size, num_chunks):\n\n average_size = int(g_size/num_chunks)\n num_residuals = g_size % num_chunks\n chunk_size = np.ones(num_chunks, dtype='int')*average_size\n if num_residuals > 0:\n for j in range(num_residuals):\n chunk_size[j] += 1\n return chunk_size\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.concatenate",
"numpy.abs",
"numpy.arange",
"scipy.interpolate.splev",
"numpy.max",
"numpy.min",
"scipy.interpolate.splrep",
"numpy.linspace",
"numpy.fromstring"
]
] |
TwinIsland/img2java | [
"6b6788daa0a97acb1e455ead9d7bd09d7d881ab2"
] | [
"treat.py"
] | [
"from matplotlib import pyplot as plt\nimport numpy as np\nimport cv2\nfrom scipy import stats\nimport translate\nfrom skimage import transform\n\n#####################################\nimgData = cv2.imread('van.jpg',0)\ncompressRate = 0.4\n#####################################\n\nimgData = np.array(imgData)\nshape = imgData.shape\npas = p = 'unknown'\n\ndef twoWayTreat():\n global imgData\n imgData = stats.zscore(imgData)\n\n for raw in range(shape[0]):\n for col in range(shape[1]):\n if imgData[raw][col] < 0:\n imgData[raw][col] = 0\n else:\n imgData[raw][col] = 255\n\n\ndef debugImg():\n global imgData\n plt.imshow(imgData)\n plt.show()\n\n\ndef getCode():\n code = ''\n for this_line_index in range(len(imgData)-1):\n lineLib = []\n this_line = imgData[this_line_index]\n newTurn = False\n for this_line_data_index in range(len(this_line)-1):\n if this_line[this_line_data_index] == 255:\n begin_draw = this_line_data_index\n newTurn = True\n\n if this_line[this_line_data_index] == 0 and newTurn:\n end_draw = this_line_data_index\n lineLib.append([begin_draw,end_draw])\n newTurn = False\n\n for i in lineLib:\n code = code + translate.getCode([i[0],this_line_index,i[1],this_line_index]) + '\\n'\n\n return code\n\ndef compressImg():\n global imgData,compressRate\n imgData = transform.rescale(imgData, [compressRate,compressRate])\n\n\ndef passivate():\n count = 0\n global imgData\n shape = imgData.shape\n lineLenght = shape[1]\n for lineIndex in range(shape[0]-1):\n for numberIndex in range(0,lineLenght-6):\n thisFive = list(imgData[lineIndex,numberIndex:numberIndex+5])\n if thisFive == [0,255,255,255,255]:\n count += 1\n thisFive[0] =255\n imgData[lineIndex,numberIndex:numberIndex+5] = thisFive\n return 'passivate rate: ' + str(count/(shape[0]*shape[1])) + '%'\n\n\ntwoWayTreat()\ncompressImg()\npas = passivate()\ndebugImg()\np = getCode()\ntranslate.setSize(imgData.shape)\n\nwith open('draw.java','w') as f:\n f.write(translate.upper_code)\n f.write(p)\n f.write(translate.lower_code)\n\ntry:\n print('==================')\n print('compressRate: ' + str(compressRate))\n print('passivateRate: ' + str(pas))\n print('size: ' + str(imgData.shape))\n print('==================')\nexcept Exception:\n print('cannot print out the post-info!')\n\nf.close()\n\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"scipy.stats.zscore"
]
] |
MinhTuDo/MD-MOENAS | [
"edd6ec8c3f89cfbe9674873425c5056e72899edb"
] | [
"procedure/problem/efficiency_performance/mo_nats.py"
] | [
"from procedure.problem.base import nats as base\n\nimport numpy as np\n\nclass EfficiencyAccuracyNATS(base.NATS):\n def __init__(self, efficiency, **kwargs):\n super().__init__(n_obj=2, **kwargs)\n self.msg += efficiency + '={:.3f}, ' + 'valid-error' + '={:.3f}'\n self.efficiency = efficiency\n\n def _calc_F(self, genotype, **kwargs):\n accuracy, latency, _, runtime = self.api.simulate_train_eval(\n genotype, self.dataset, iepoch=self.epoch, hp=self.api.full_train_epochs\n )\n\n idx = self.api.query_index_by_arch(genotype)\n cost_info = self.api.get_cost_info(idx, self.dataset, hp=self.api.full_train_epochs)\n params, flops = cost_info['params'], cost_info['flops']\n \n efficiency = eval(self.efficiency)\n error = 100 - accuracy\n\n F = [efficiency, error]\n return F, runtime\n\n\n def _convert_to_pf_space(self, X):\n F = []\n dataset = self.pf_dict['dataset']\n for x in X:\n genotype = self._decode(x)\n idx = self.api.query_index_by_arch(genotype)\n efficiency = self.api.get_cost_info(\n idx, dataset, hp=self.api.full_train_epochs\n )[self.efficiency]\n acc = self.api.get_more_info(\n idx, dataset, hp=self.api.full_train_epochs, is_random=False\n )['test-accuracy']\n err = 100 - acc\n f = [efficiency, err]\n F += [np.column_stack(f)]\n F = np.row_stack(F)\n return F\n\nclass MDEfficiencyAccuracyNATS(base.NATS):\n def __init__(self, efficiency, **kwargs):\n super().__init__(n_obj=2, **kwargs)\n self.msg += 'avg-' + efficiency + '={:.3f}, avg-val-err={:.3f}'\n self.efficiency = efficiency\n \n\n def _calc_F(self, genotype, **kwargs):\n idx = self.api.query_index_by_arch(genotype)\n\n efficiency = []; runtime = []; accuracy = []\n for dts in self.dataset:\n _accuracy, latency, _, _runtime = self.api.simulate_train_eval(\n genotype, dataset=dts, iepoch=self.epoch, hp=self.api.full_train_epochs\n )\n\n idx = self.api.query_index_by_arch(genotype)\n cost_info = self.api.get_cost_info(idx, dts, hp=self.api.full_train_epochs)\n params, flops = cost_info['params'], cost_info['flops']\n \n _efficiency = eval(self.efficiency)\n efficiency += [_efficiency]\n runtime += [_runtime]\n accuracy += [_accuracy]\n\n efficiency = np.mean(efficiency)\n runtime = sum(runtime)\n accuracy = np.mean(accuracy)\n\n err = 100 - accuracy\n\n F = [efficiency, err]\n\n return F, runtime\n\n def _convert_to_pf_space(self, X):\n F = []\n dataset = self.pf_dict['dataset']\n \n for x in X:\n genotype = self._decode(x)\n idx = self.api.query_index_by_arch(genotype)\n efficiency = self.api.get_cost_info(idx, dataset, hp=self.api.full_train_epochs)[self.efficiency]\n acc = \\\n self.api.get_more_info(idx, dataset, hp=self.api.full_train_epochs, is_random=False)['test-accuracy']\n err = 100 - acc\n f = [efficiency, err]\n F += [np.column_stack(f)]\n F = np.row_stack(F)\n return F\n \n \n"
] | [
[
"numpy.column_stack",
"numpy.row_stack",
"numpy.mean"
]
] |
Gaskell-1206/MSI_vs_MSS_Classification | [
"be6fd8a6961624367b2bb0e1299219e940f6f418"
] | [
"Step2_Training_MIL/train_MIL_classification_trained_cnn_models.py"
] | [
"# Run MIL classification use pretrained CNN models\n# Reference: 1.Campanella, G. et al. Clinical-grade computational pathology using weakly supervised\n# deep learning on whole slide images. Nat Med 25, 1301–1309 (2019).\n# doi:10.1038/s41591-019-0508-1. Available from http://www.nature.com/articles/s41591-019-0508-1\n# The source codes of the referenced paper available at https://github.com/MSKCC-Computational-Pathology/MIL-nature-medicine-2019\n# This code was modified by Shengjia Chen for our work.\nimport argparse\nimport os\nimport random\nimport sys\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import Callable, Optional, Union\nfrom urllib.error import HTTPError\nimport glob\nimport numpy as np\nimport pandas as pd\nimport pytorch_lightning as pl\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nfrom pytorch_lightning.callbacks import (EarlyStopping, LearningRateMonitor,\n ModelCheckpoint)\nfrom pytorch_lightning.lite import LightningLite\nfrom pytorch_lightning.loops import Loop\nfrom skimage import io\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms\nfrom tqdm import tqdm\nsys.path.append('/gpfs/scratch/sc9295/digPath/MSI_vs_MSS_Classification/Step1_Training_MSI_MSS')\nfrom train_tile_level_classification import MSI_MSS_Module\nfrom sklearn.metrics import (auc, confusion_matrix, f1_score, roc_auc_score,\n roc_curve)\n\nbest_acc = 0\n\ndef inference(loader, model):\n model.eval()\n probs = torch.FloatTensor(len(loader.dataset))\n with torch.no_grad():\n for i, input in enumerate(loader):\n # print(\n # 'Inference\\tEpoch: [{}/{}]\\tBatch: [{}/{}]'.format(run+1, args.nepochs, i+1, len(loader)))\n output = F.softmax(model(input), dim=1)\n probs[i*args.batch_size:i*args.batch_size +\n input.size(0)] = output.detach()[:, 1].clone()\n return probs.cpu().numpy()\n\n\ndef train(run, loader, model, criterion, optimizer):\n model.train()\n running_loss = 0.\n for i, (input, target) in enumerate(loader):\n input = input.cuda()\n target = target.cuda()\n output = model(input)\n loss = criterion(output, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()*input.size(0)\n return running_loss/len(loader.dataset)\n\n\ndef calc_err(pred, real):\n pred = np.array(pred)\n real = np.array(real)\n pos = np.equal(pred, real)\n neq = np.not_equal(pred, real)\n acc = float(pos.sum())/pred.shape[0]\n err = float(neq.sum())/pred.shape[0]\n fpr = float(np.logical_and(pred == 1, neq).sum())/(real == 0).sum()\n fnr = float(np.logical_and(pred == 0, neq).sum())/(real == 1).sum()\n return acc, err, fpr, fnr\n\n\ndef group_argtopk(groups, data, k=1):\n # groups in slide, data is prob of each tile\n k = min(k,len(data))\n order = np.lexsort((data, groups))\n groups = groups[order]\n data = data[order]\n index = np.empty(len(groups), 'bool')\n index[-k:] = True\n index[:-k] = groups[k:] != groups[:-k]\n return list(order[index]) # output top prob tile index in each slide\n\n\ndef group_max(groups, data, nmax):\n out = np.empty(nmax)\n out[:] = np.nan\n order = np.lexsort((data, groups))\n groups = groups[order]\n data = data[order]\n index = np.empty(len(groups), 'bool')\n index[-1] = True\n index[:-1] = groups[1:] != groups[:-1]\n out[groups[index]] = data[index]\n return out\n\n\nclass MILdataset(Dataset):\n def __init__(self, libraryfile_dir='', root_dir='', dataset_mode='Train', transform=None, subset_rate=None):\n libraryfile_path = os.path.join(\n libraryfile_dir, f'CRC_DX_{dataset_mode}_ALL.csv')\n lib = pd.read_csv(libraryfile_path)\n lib = lib if subset_rate is None else lib.sample(\n frac=subset_rate, random_state=2022)\n lib = lib.sort_values(['subject_id'], ignore_index=True)\n lib.to_csv(os.path.join(libraryfile_dir,\n f'{dataset_mode}_temporary.csv'))\n slides = []\n for i, name in enumerate(lib['subject_id'].unique()):\n # sys.stdout.write(\n # 'Slides: [{}/{}]\\r'.format(i+1, len(lib['subject_id'].unique())))\n # sys.stdout.flush()\n slides.append(name)\n\n # Flatten grid\n grid = []\n slideIDX = []\n for i, g in enumerate(lib['subject_id'].unique()):\n tiles = lib[lib['subject_id'] == g]['slice_id']\n grid.extend(tiles)\n slideIDX.extend([i]*len(tiles))\n\n # print('Number of tiles: {}'.format(len(grid)))\n self.dataframe = self.load_data_and_get_class(lib)\n self.slidenames = list(lib['subject_id'].values)\n self.slides = slides\n self.targets = self.dataframe['Class']\n self.grid = grid\n self.slideIDX = slideIDX\n self.transform = transform\n self.root_dir = root_dir\n self.dset = f\"CRC_DX_{dataset_mode}\"\n\n def setmode(self, mode):\n self.mode = mode\n\n def maketraindata(self, idxs):\n self.t_data = [(self.slideIDX[x], self.grid[x],\n self.targets[x]) for x in idxs]\n\n def shuffletraindata(self):\n self.t_data = random.sample(self.t_data, len(self.t_data))\n\n def load_data_and_get_class(self, df):\n df.loc[df['label'] == 'MSI', 'Class'] = 1\n df.loc[df['label'] == 'MSS', 'Class'] = 0\n return df\n\n def __getitem__(self, index):\n if self.mode == 1:\n slideIDX = self.slideIDX[index]\n tile_id = self.grid[index]\n slide_id = self.slides[slideIDX]\n img_name = \"blk-{}-{}.png\".format(tile_id, slide_id)\n target = self.targets[index]\n label = 'CRC_DX_MSIMUT' if target == 1 else 'CRC_DX_MSS'\n img_path = os.path.join(self.root_dir, self.dset, label, img_name)\n img = io.imread(img_path)\n if self.transform is not None:\n img = self.transform(img)\n return img\n elif self.mode == 2:\n slideIDX, tile_id, target = self.t_data[index]\n slide_id = self.slides[slideIDX]\n label = 'CRC_DX_MSIMUT' if target == 1 else 'CRC_DX_MSS'\n img_name = \"blk-{}-{}.png\".format(tile_id, slide_id)\n img_path = os.path.join(self.root_dir, self.dset, label, img_name)\n img = io.imread(img_path)\n\n if self.transform is not None:\n img = self.transform(img)\n return img, target\n\n def __len__(self):\n if self.mode == 1:\n return len(self.grid)\n elif self.mode == 2:\n return len(self.t_data)\n\n\nclass Lite(LightningLite):\n\n def run(self, args):\n global best_acc\n print(args)\n\n self.seed_everything(2022)\n model_name = args.model_name\n sample_rate = args.sample_rate\n ckpt_path = os.path.join(args.model_path, f'{args.model_name}_bs{args.batch_size}_lr{args.learning_rate}')\n ckpt_file_path = glob.glob(os.path.join(ckpt_path,'*.ckpt'))[0]\n model = MSI_MSS_Module.load_from_checkpoint(ckpt_file_path)\n\n optimizer = torch.optim.AdamW(\n model.parameters(), lr=args.learning_rate, weight_decay=1e-4)\n if args.weights == 0.5:\n criterion = nn.CrossEntropyLoss()\n else:\n w = torch.Tensor([1-args.weights, args.weights])\n criterion = nn.CrossEntropyLoss(w)\n # Scale model and optimizers\n model, optimizer = self.setup(model, optimizer, move_to_device=True)\n\n DATA_MEANS = [0.485, 0.456, 0.406]\n DATA_STD = [0.229, 0.224, 0.225]\n\n train_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.RandomHorizontalFlip(),\n transforms.Normalize(DATA_MEANS, DATA_STD)])\n test_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(DATA_MEANS, DATA_STD)])\n\n train_dataset = MILdataset(\n args.lib_dir, args.root_dir, 'Train', transform=train_transform, subset_rate=sample_rate)\n val_dataset = MILdataset(\n args.lib_dir, args.root_dir, 'Val', transform=test_transform, subset_rate=sample_rate)\n test_dataset = MILdataset(\n args.lib_dir, args.root_dir, 'Test', transform=test_transform, subset_rate=sample_rate)\n\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n train_dataloader, val_dataloader, test_dataloader = self.setup_dataloaders(\n train_dataloader, val_dataloader, test_dataloader, move_to_device=True)\n\n # open output file\n version_name = f'MIL_{model_name}_bs{args.batch_size}_lr{args.learning_rate}_w{args.weights}_k{args.k}_output'\n # logger\n output_path = os.path.join(args.output_path,version_name)\n writer = SummaryWriter(output_path)\n\n for epoch in tqdm(range(args.nepochs)):\n train_dataset.setmode(1)\n # print(\"train_set_len:\", len(train_dataloader.dataset))\n probs = inference(train_dataloader, model)\n # return the indices of topk tile(s) in each slides\n topk = group_argtopk(\n np.array(train_dataset.slideIDX), probs, args.k)\n train_dataset.maketraindata(topk)\n train_dataset.shuffletraindata()\n train_dataset.setmode(2)\n\n model.train()\n running_loss = 0.\n for i, (input, target) in enumerate(train_dataloader):\n output = model(input)\n loss = criterion(output, target.long())\n optimizer.zero_grad()\n self.backward(loss)\n optimizer.step()\n running_loss += loss.item()*input.size(0)\n\n train_loss = running_loss/len(train_dataloader.dataset)\n print(\n 'Training\\tEpoch: [{}/{}]\\tLoss: {}'.format(epoch+1, args.nepochs, train_loss))\n writer.add_scalar('train_loss', train_loss, epoch+1)\n\n\n # Validation\n if (epoch+1) % args.test_every == 0:\n val_dataset.setmode(1)\n probs = inference(val_dataloader, model)\n maxs = group_max(np.array(val_dataset.slideIDX),\n probs, len(val_dataset.targets))\n pred = [1 if x >= 0.5 else 0 for x in probs]\n val_acc, err, fpr, fnr = calc_err(pred, val_dataset.targets)\n\n print('Validation\\tEpoch: [{}/{}]\\t ACC: {}\\tError: {}\\tFPR: {}\\tFNR: {}'.format(\n epoch+1, args.nepochs, val_acc, err, fpr, fnr))\n\n writer.add_scalar('val_acc', val_acc, epoch+1)\n writer.add_scalar('fpr', fpr, epoch+1)\n writer.add_scalar('fnr', fnr, epoch+1)\n\n # Save best model\n err = (fpr+fnr)/2.\n if 1-err >= best_acc:\n best_acc = 1-err\n obj = {\n 'epoch': epoch+1,\n 'state_dict': model.state_dict(),\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict()\n }\n torch.save(obj, os.path.join(output_path, 'checkpoint_best.pth'))\n\n # test\n ch = torch.load(os.path.join(output_path,'checkpoint_best.pth'))\n # load params\n model.load_state_dict(ch['state_dict'])\n model = model.cuda()\n cudnn.benchmark = True\n train_dataset.setmode(1)\n val_dataset.setmode(1)\n test_dataset.setmode(1)\n\n # Train\n probs = inference(train_dataloader, model)\n maxs = group_max(np.array(train_dataset.slideIDX), probs, len(train_dataset.targets))\n fp = open(os.path.join(output_path, f'Train_{version_name}.csv'), 'w')\n fp.write('slides,tiles,target,prediction,probability\\n')\n for slides, tiles, target, prob in zip(train_dataset.slidenames, train_dataset.grid, train_dataset.targets, probs):\n fp.write('{},{},{},{},{}\\n'.format(slides, tiles, target, int(prob>=0.5), prob))\n fp.close()\n\n # Val\n probs = inference(val_dataloader, model)\n maxs = group_max(np.array(val_dataset.slideIDX), probs, len(val_dataset.targets))\n fp = open(os.path.join(output_path, f'Val_{version_name}.csv'), 'w')\n fp.write('slides,tiles,target,prediction,probability\\n')\n for slides, tiles, target, prob in zip(val_dataset.slidenames, val_dataset.grid, val_dataset.targets, probs):\n fp.write('{},{},{},{},{}\\n'.format(slides, tiles, target, int(prob>=0.5), prob))\n fp.close()\n\n # Test\n probs = inference(test_dataloader, model)\n maxs = group_max(np.array(test_dataset.slideIDX), probs, len(test_dataset.targets))\n fp = open(os.path.join(output_path, f'Test_{version_name}.csv'), 'w')\n fp.write('slides,tiles,target,prediction,probability\\n')\n for slides, tiles, target, prob in zip(test_dataset.slidenames, test_dataset.grid, test_dataset.targets, probs):\n fp.write('{},{},{},{},{}\\n'.format(slides, tiles, target, int(prob>=0.5), prob))\n fp.close() \n\n pred = [1 if x >= 0.5 else 0 for x in probs]\n test_acc, err, fnr, fpr = calc_err(pred, test_dataset.targets)\n test_f1_score = f1_score(test_dataset.targets, pred, average='binary')\n\n try:\n test_auroc_score = roc_auc_score(test_dataset.targets, probs)\n writer.add_scalar(\"test_auroc_score\", test_auroc_score)\n except ValueError:\n writer.add_scalar('test_auroc_score', .0)\n\n writer.add_scalar('test_f1_score', test_f1_score) \n writer.add_scalar('test_acc', test_acc)\n\n\n\ndef main(args):\n Lite(devices=\"auto\", accelerator=\"auto\").run(args)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"--root_dir\",\n type=Path,\n required=True,\n help=\"root directory of dataset\",\n )\n parser.add_argument(\n \"--lib_dir\",\n type=Path,\n required=True,\n help=\"root directory of libraryfile\",\n )\n parser.add_argument(\n \"--model_path\",\n type=Path,\n required=True,\n help=\"root directory of pretrained models\",\n )\n parser.add_argument(\n \"--output_path\",\n type=Path,\n required=True,\n help=\"output directory\",\n )\n parser.add_argument(\n \"--model_name\",\n default='alexnet',\n choices=('resnet18', 'resnet34', 'alexnet', 'vgg',\n 'squeezenet', 'densenet', 'inception'),\n type=str,\n help=\"model use for train\",\n )\n parser.add_argument(\n \"--sample_rate\",\n default=1,\n type=float,\n help=\"undersample rate\",\n )\n parser.add_argument(\n \"--batch_size\",\n default=128,\n type=int,\n help=\"batch size\",\n )\n parser.add_argument(\n \"--learning_rate\",\n default=1e-3,\n type=float,\n help=\"learning rate\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=0,\n type=int,\n required=True,\n help=\"number of workers\",\n )\n parser.add_argument(\n \"--nepochs\",\n default=50,\n type=int,\n help=\"training epoch\",\n )\n parser.add_argument(\n '--test_every',\n default=1,\n type=int,\n help='test on val every (default: 10)')\n\n parser.add_argument(\n \"--weights\",\n default=0.5,\n type=float,\n help=\"unbalanced positive class weight (default: 0.5, balanced classes)\",\n )\n\n parser.add_argument(\n \"--k\",\n default=1,\n type=int,\n help=\"top k tiles are assumed to be of the same class as the slide (default: 1, standard MIL)\",\n )\n \n args = parser.parse_args()\n main(args)\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.empty",
"pandas.read_csv",
"numpy.equal",
"numpy.not_equal",
"torch.no_grad",
"numpy.logical_and",
"torch.nn.CrossEntropyLoss",
"numpy.lexsort",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score",
"torch.utils.tensorboard.SummaryWriter",
"numpy.array",
"torch.Tensor"
]
] |
siddhantwahal/scipy | [
"411fbbda0f942fcce3e4b314efb11c4553baaa7c"
] | [
"scipy/stats/_distn_infrastructure.py"
] | [
"#\n# Author: Travis Oliphant 2002-2011 with contributions from\n# SciPy Developers 2004-2011\n#\nfrom scipy._lib._util import getfullargspec_no_self as _getfullargspec\n\nimport sys\nimport keyword\nimport re\nimport types\nimport warnings\nimport inspect\nfrom itertools import zip_longest\n\nfrom scipy._lib import doccer\nfrom ._distr_params import distcont, distdiscrete\nfrom scipy._lib._util import check_random_state\nfrom scipy._lib._util import _valarray as valarray\n\nfrom scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive)\n\n# for root finding for continuous distribution ppf, and max likelihood estimation\nfrom scipy import optimize\n\n# for functions of continuous distributions (e.g. moments, entropy, cdf)\nfrom scipy import integrate\n\n# to approximate the pdf of a continuous distribution given its cdf\nfrom scipy.misc import derivative\n\nfrom numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,\n logical_and, log, sqrt, place, argmax, vectorize, asarray,\n nan, inf, isinf, NINF, empty)\n\nimport numpy as np\n\nfrom ._constants import _XMAX\n\n\n# These are the docstring parts used for substitution in specific\n# distribution docstrings\n\ndocheaders = {'methods': \"\"\"\\nMethods\\n-------\\n\"\"\",\n 'notes': \"\"\"\\nNotes\\n-----\\n\"\"\",\n 'examples': \"\"\"\\nExamples\\n--------\\n\"\"\"}\n\n_doc_rvs = \"\"\"\\\nrvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)\n Random variates.\n\"\"\"\n_doc_pdf = \"\"\"\\\npdf(x, %(shapes)s, loc=0, scale=1)\n Probability density function.\n\"\"\"\n_doc_logpdf = \"\"\"\\\nlogpdf(x, %(shapes)s, loc=0, scale=1)\n Log of the probability density function.\n\"\"\"\n_doc_pmf = \"\"\"\\\npmf(k, %(shapes)s, loc=0, scale=1)\n Probability mass function.\n\"\"\"\n_doc_logpmf = \"\"\"\\\nlogpmf(k, %(shapes)s, loc=0, scale=1)\n Log of the probability mass function.\n\"\"\"\n_doc_cdf = \"\"\"\\\ncdf(x, %(shapes)s, loc=0, scale=1)\n Cumulative distribution function.\n\"\"\"\n_doc_logcdf = \"\"\"\\\nlogcdf(x, %(shapes)s, loc=0, scale=1)\n Log of the cumulative distribution function.\n\"\"\"\n_doc_sf = \"\"\"\\\nsf(x, %(shapes)s, loc=0, scale=1)\n Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).\n\"\"\"\n_doc_logsf = \"\"\"\\\nlogsf(x, %(shapes)s, loc=0, scale=1)\n Log of the survival function.\n\"\"\"\n_doc_ppf = \"\"\"\\\nppf(q, %(shapes)s, loc=0, scale=1)\n Percent point function (inverse of ``cdf`` --- percentiles).\n\"\"\"\n_doc_isf = \"\"\"\\\nisf(q, %(shapes)s, loc=0, scale=1)\n Inverse survival function (inverse of ``sf``).\n\"\"\"\n_doc_moment = \"\"\"\\\nmoment(n, %(shapes)s, loc=0, scale=1)\n Non-central moment of order n\n\"\"\"\n_doc_stats = \"\"\"\\\nstats(%(shapes)s, loc=0, scale=1, moments='mv')\n Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').\n\"\"\"\n_doc_entropy = \"\"\"\\\nentropy(%(shapes)s, loc=0, scale=1)\n (Differential) entropy of the RV.\n\"\"\"\n_doc_fit = \"\"\"\\\nfit(data)\n Parameter estimates for generic data.\n See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the\n keyword arguments.\n\"\"\"\n_doc_expect = \"\"\"\\\nexpect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_expect_discrete = \"\"\"\\\nexpect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_median = \"\"\"\\\nmedian(%(shapes)s, loc=0, scale=1)\n Median of the distribution.\n\"\"\"\n_doc_mean = \"\"\"\\\nmean(%(shapes)s, loc=0, scale=1)\n Mean of the distribution.\n\"\"\"\n_doc_var = \"\"\"\\\nvar(%(shapes)s, loc=0, scale=1)\n Variance of the distribution.\n\"\"\"\n_doc_std = \"\"\"\\\nstd(%(shapes)s, loc=0, scale=1)\n Standard deviation of the distribution.\n\"\"\"\n_doc_interval = \"\"\"\\\ninterval(alpha, %(shapes)s, loc=0, scale=1)\n Endpoints of the range that contains alpha percent of the distribution\n\"\"\"\n_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,\n _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,\n _doc_logsf, _doc_ppf, _doc_isf, _doc_moment,\n _doc_stats, _doc_entropy, _doc_fit,\n _doc_expect, _doc_median,\n _doc_mean, _doc_var, _doc_std, _doc_interval])\n\n_doc_default_longsummary = \"\"\"\\\nAs an instance of the `rv_continuous` class, `%(name)s` object inherits from it\na collection of generic methods (see below for the full list),\nand completes them with details specific for this particular distribution.\n\"\"\"\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape,\nlocation, and scale parameters returning a \"frozen\" continuous RV object:\n\nrv = %(name)s(%(shapes)s, loc=0, scale=1)\n - Frozen RV object with the same methods but holding the given shape,\n location, and scale fixed.\n\"\"\"\n_doc_default_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability density function (``pdf``):\n\n>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s), 100)\n>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),\n... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape, location and scale parameters. This returns a \"frozen\"\nRV object holding the given parameters fixed.\n\nFreeze the distribution and display the frozen ``pdf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)\n>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\nAnd compare the histogram:\n\n>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\n\"\"\"\n\n_doc_default_locscale = \"\"\"\\\nThe probability density above is defined in the \"standardized\" form. To shift\nand/or scale the distribution use the ``loc`` and ``scale`` parameters.\nSpecifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically\nequivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with\n``y = (x - loc) / scale``.\n\"\"\"\n\n_doc_default = ''.join([_doc_default_longsummary,\n _doc_allmethods,\n '\\n',\n _doc_default_example])\n\n_doc_default_before_notes = ''.join([_doc_default_longsummary,\n _doc_allmethods])\n\ndocdict = {\n 'rvs': _doc_rvs,\n 'pdf': _doc_pdf,\n 'logpdf': _doc_logpdf,\n 'cdf': _doc_cdf,\n 'logcdf': _doc_logcdf,\n 'sf': _doc_sf,\n 'logsf': _doc_logsf,\n 'ppf': _doc_ppf,\n 'isf': _doc_isf,\n 'stats': _doc_stats,\n 'entropy': _doc_entropy,\n 'fit': _doc_fit,\n 'moment': _doc_moment,\n 'expect': _doc_expect,\n 'interval': _doc_interval,\n 'mean': _doc_mean,\n 'std': _doc_std,\n 'var': _doc_var,\n 'median': _doc_median,\n 'allmethods': _doc_allmethods,\n 'longsummary': _doc_default_longsummary,\n 'frozennote': _doc_default_frozen_note,\n 'example': _doc_default_example,\n 'default': _doc_default,\n 'before_notes': _doc_default_before_notes,\n 'after_notes': _doc_default_locscale\n}\n\n# Reuse common content between continuous and discrete docs, change some\n# minor bits.\ndocdict_discrete = docdict.copy()\n\ndocdict_discrete['pmf'] = _doc_pmf\ndocdict_discrete['logpmf'] = _doc_logpmf\ndocdict_discrete['expect'] = _doc_expect_discrete\n_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',\n 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',\n 'mean', 'var', 'std', 'interval']\nfor obj in _doc_disc_methods:\n docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')\n\n_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']\nfor obj in _doc_disc_methods_err_varname:\n docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')\n\ndocdict_discrete.pop('pdf')\ndocdict_discrete.pop('logpdf')\n\n_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])\ndocdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods\n\ndocdict_discrete['longsummary'] = _doc_default_longsummary.replace(\n 'rv_continuous', 'rv_discrete')\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape and\nlocation parameters returning a \"frozen\" discrete RV object:\n\nrv = %(name)s(%(shapes)s, loc=0)\n - Frozen RV object with the same methods but holding the given shape and\n location fixed.\n\"\"\"\ndocdict_discrete['frozennote'] = _doc_default_frozen_note\n\n_doc_default_discrete_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability mass function (``pmf``):\n\n>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s))\n>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')\n>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape and location. This returns a \"frozen\" RV object holding\nthe given parameters fixed.\n\nFreeze the distribution and display the frozen ``pmf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,\n... label='frozen pmf')\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> prob = %(name)s.cdf(x, %(shapes)s)\n>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\"\"\"\n\n\n_doc_default_discrete_locscale = \"\"\"\\\nThe probability mass function above is defined in the \"standardized\" form.\nTo shift distribution use the ``loc`` parameter.\nSpecifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically\nequivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.\n\"\"\"\n\ndocdict_discrete['example'] = _doc_default_discrete_example\ndocdict_discrete['after_notes'] = _doc_default_discrete_locscale\n\n_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods']])\ndocdict_discrete['before_notes'] = _doc_default_before_notes\n\n_doc_default_disc = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods'],\n docdict_discrete['frozennote'],\n docdict_discrete['example']])\ndocdict_discrete['default'] = _doc_default_disc\n\n# clean up all the separate docstring elements, we do not need them anymore\nfor obj in [s for s in dir() if s.startswith('_doc_')]:\n exec('del ' + obj)\ndel obj\n\n\ndef _moment(data, n, mu=None):\n if mu is None:\n mu = data.mean()\n return ((data - mu)**n).mean()\n\n\ndef _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):\n if (n == 0):\n return 1.0\n elif (n == 1):\n if mu is None:\n val = moment_func(1, *args)\n else:\n val = mu\n elif (n == 2):\n if mu2 is None or mu is None:\n val = moment_func(2, *args)\n else:\n val = mu2 + mu*mu\n elif (n == 3):\n if g1 is None or mu2 is None or mu is None:\n val = moment_func(3, *args)\n else:\n mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment\n val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment\n elif (n == 4):\n if g1 is None or g2 is None or mu2 is None or mu is None:\n val = moment_func(4, *args)\n else:\n mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment\n mu3 = g1*np.power(mu2, 1.5) # 3rd central moment\n val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu\n else:\n val = moment_func(n, *args)\n\n return val\n\n\ndef _skew(data):\n \"\"\"\n skew is third central moment / variance**(1.5)\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m3 = ((data - mu)**3).mean()\n return m3 / np.power(m2, 1.5)\n\n\ndef _kurtosis(data):\n \"\"\"\n kurtosis is fourth central moment / variance**2 - 3\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m4 = ((data - mu)**4).mean()\n return m4 / m2**2 - 3\n\n\n# Frozen RV class\nclass rv_frozen(object):\n\n def __init__(self, dist, *args, **kwds):\n self.args = args\n self.kwds = kwds\n\n # create a new instance\n self.dist = dist.__class__(**dist._updated_ctor_param())\n\n shapes, _, _ = self.dist._parse_args(*args, **kwds)\n self.a, self.b = self.dist._get_support(*shapes)\n\n @property\n def random_state(self):\n return self.dist._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self.dist._random_state = check_random_state(seed)\n\n def pdf(self, x): # raises AttributeError in frozen discrete distribution\n return self.dist.pdf(x, *self.args, **self.kwds)\n\n def logpdf(self, x):\n return self.dist.logpdf(x, *self.args, **self.kwds)\n\n def cdf(self, x):\n return self.dist.cdf(x, *self.args, **self.kwds)\n\n def logcdf(self, x):\n return self.dist.logcdf(x, *self.args, **self.kwds)\n\n def ppf(self, q):\n return self.dist.ppf(q, *self.args, **self.kwds)\n\n def isf(self, q):\n return self.dist.isf(q, *self.args, **self.kwds)\n\n def rvs(self, size=None, random_state=None):\n kwds = self.kwds.copy()\n kwds.update({'size': size, 'random_state': random_state})\n return self.dist.rvs(*self.args, **kwds)\n\n def sf(self, x):\n return self.dist.sf(x, *self.args, **self.kwds)\n\n def logsf(self, x):\n return self.dist.logsf(x, *self.args, **self.kwds)\n\n def stats(self, moments='mv'):\n kwds = self.kwds.copy()\n kwds.update({'moments': moments})\n return self.dist.stats(*self.args, **kwds)\n\n def median(self):\n return self.dist.median(*self.args, **self.kwds)\n\n def mean(self):\n return self.dist.mean(*self.args, **self.kwds)\n\n def var(self):\n return self.dist.var(*self.args, **self.kwds)\n\n def std(self):\n return self.dist.std(*self.args, **self.kwds)\n\n def moment(self, n):\n return self.dist.moment(n, *self.args, **self.kwds)\n\n def entropy(self):\n return self.dist.entropy(*self.args, **self.kwds)\n\n def pmf(self, k):\n return self.dist.pmf(k, *self.args, **self.kwds)\n\n def logpmf(self, k):\n return self.dist.logpmf(k, *self.args, **self.kwds)\n\n def interval(self, alpha):\n return self.dist.interval(alpha, *self.args, **self.kwds)\n\n def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):\n # expect method only accepts shape parameters as positional args\n # hence convert self.args, self.kwds, also loc/scale\n # See the .expect method docstrings for the meaning of\n # other parameters.\n a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)\n if isinstance(self.dist, rv_discrete):\n return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)\n else:\n return self.dist.expect(func, a, loc, scale, lb, ub,\n conditional, **kwds)\n\n def support(self):\n return self.dist.support(*self.args, **self.kwds)\n\n\n# This should be rewritten\ndef argsreduce(cond, *args):\n \"\"\"Return the sequence of ravel(args[i]) where ravel(condition) is\n True in 1D.\n\n Examples\n --------\n >>> import numpy as np\n >>> rand = np.random.random_sample\n >>> A = rand((4, 5))\n >>> B = 2\n >>> C = rand((1, 5))\n >>> cond = np.ones(A.shape)\n >>> [A1, B1, C1] = argsreduce(cond, A, B, C)\n >>> B1.shape\n (20,)\n >>> cond[2,:] = 0\n >>> [A2, B2, C2] = argsreduce(cond, A, B, C)\n >>> B2.shape\n (15,)\n\n \"\"\"\n newargs = np.atleast_1d(*args)\n if not isinstance(newargs, list):\n newargs = [newargs, ]\n expand_arr = (cond == cond)\n return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]\n\n\nparse_arg_template = \"\"\"\ndef _parse_args(self, %(shape_arg_str)s %(locscale_in)s):\n return (%(shape_arg_str)s), %(locscale_out)s\n\ndef _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):\n return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)\n\ndef _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):\n return (%(shape_arg_str)s), %(locscale_out)s, moments\n\"\"\"\n\n\n# Both the continuous and discrete distributions depend on ncx2.\n# The function name ncx2 is an abbreviation for noncentral chi squared.\n\ndef _ncx2_log_pdf(x, df, nc):\n # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the\n # factor of exp(-xs*ns) into the ive function to improve numerical\n # stability at large values of xs. See also `rice.pdf`.\n df2 = df/2.0 - 1.0\n xs, ns = np.sqrt(x), np.sqrt(nc)\n res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2\n res += np.log(ive(df2, xs*ns) / 2.0)\n return res\n\n\ndef _ncx2_pdf(x, df, nc):\n return np.exp(_ncx2_log_pdf(x, df, nc))\n\n\ndef _ncx2_cdf(x, df, nc):\n return chndtr(x, df, nc)\n\n\nclass rv_generic(object):\n \"\"\"Class which encapsulates common functionality between rv_discrete\n and rv_continuous.\n\n \"\"\"\n def __init__(self, seed=None):\n super(rv_generic, self).__init__()\n\n # figure out if _stats signature has 'moments' keyword\n sig = _getfullargspec(self._stats)\n self._stats_has_moments = ((sig.varkw is not None) or\n ('moments' in sig.args) or\n ('moments' in sig.kwonlyargs))\n self._random_state = check_random_state(seed)\n\n # For historical reasons, `size` was made an attribute that was read\n # inside _rvs(). The code is being changed so that 'size' is an argument\n # to self._rvs(). However some external (non-SciPy) distributions have not\n # been updated. Maintain backwards compatibility by checking if\n # the self._rvs() signature has the 'size' keyword, or a **kwarg,\n # and if not set self._size inside self.rvs() before calling self._rvs().\n argspec = inspect.getfullargspec(self._rvs)\n self._rvs_uses_size_attribute = (argspec.varkw is None and\n 'size' not in argspec.args and\n 'size' not in argspec.kwonlyargs)\n # Warn on first use only\n self._rvs_size_warned = False\n\n @property\n def random_state(self):\n \"\"\" Get or set the RandomState object for generating random variates.\n\n This can be either None, int, a RandomState instance, or a\n np.random.Generator instance.\n\n If None (or np.random), use the RandomState singleton used by np.random.\n If already a RandomState or Generator instance, use it.\n If an int, use a new RandomState instance seeded with seed.\n\n \"\"\"\n return self._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self._random_state = check_random_state(seed)\n\n def __getstate__(self):\n return self._updated_ctor_param(), self._random_state\n\n def __setstate__(self, state):\n ctor_param, r = state\n self.__init__(**ctor_param)\n self._random_state = r\n return self\n\n def _construct_argparser(\n self, meths_to_inspect, locscale_in, locscale_out):\n \"\"\"Construct the parser for the shape arguments.\n\n Generates the argument-parsing functions dynamically and attaches\n them to the instance.\n Is supposed to be called in __init__ of a class for each distribution.\n\n If self.shapes is a non-empty string, interprets it as a\n comma-separated list of shape parameters.\n\n Otherwise inspects the call signatures of `meths_to_inspect`\n and constructs the argument-parsing functions from these.\n In this case also sets `shapes` and `numargs`.\n \"\"\"\n\n if self.shapes:\n # sanitize the user-supplied shapes\n if not isinstance(self.shapes, str):\n raise TypeError('shapes must be a string.')\n\n shapes = self.shapes.replace(',', ' ').split()\n\n for field in shapes:\n if keyword.iskeyword(field):\n raise SyntaxError('keywords cannot be used as shapes.')\n if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):\n raise SyntaxError(\n 'shapes must be valid python identifiers')\n else:\n # find out the call signatures (_pdf, _cdf etc), deduce shape\n # arguments. Generic methods only have 'self, x', any further args\n # are shapes.\n shapes_list = []\n for meth in meths_to_inspect:\n shapes_args = _getfullargspec(meth) # NB: does not contain self\n args = shapes_args.args[1:] # peel off 'x', too\n\n if args:\n shapes_list.append(args)\n\n # *args or **kwargs are not allowed w/automatic shapes\n if shapes_args.varargs is not None:\n raise TypeError(\n '*args are not allowed w/out explicit shapes')\n if shapes_args.varkw is not None:\n raise TypeError(\n '**kwds are not allowed w/out explicit shapes')\n if shapes_args.kwonlyargs:\n raise TypeError(\n 'kwonly args are not allowed w/out explicit shapes')\n if shapes_args.defaults is not None:\n raise TypeError('defaults are not allowed for shapes')\n\n if shapes_list:\n shapes = shapes_list[0]\n\n # make sure the signatures are consistent\n for item in shapes_list:\n if item != shapes:\n raise TypeError('Shape arguments are inconsistent.')\n else:\n shapes = []\n\n # have the arguments, construct the method from template\n shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None\n dct = dict(shape_arg_str=shapes_str,\n locscale_in=locscale_in,\n locscale_out=locscale_out,\n )\n ns = {}\n exec(parse_arg_template % dct, ns)\n # NB: attach to the instance, not class\n for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:\n setattr(self, name, types.MethodType(ns[name], self))\n\n self.shapes = ', '.join(shapes) if shapes else None\n if not hasattr(self, 'numargs'):\n # allows more general subclassing with *args\n self.numargs = len(shapes)\n\n def _construct_doc(self, docdict, shapes_vals=None):\n \"\"\"Construct the instance docstring with string substitutions.\"\"\"\n tempdict = docdict.copy()\n tempdict['name'] = self.name or 'distname'\n tempdict['shapes'] = self.shapes or ''\n\n if shapes_vals is None:\n shapes_vals = ()\n vals = ', '.join('%.3g' % val for val in shapes_vals)\n tempdict['vals'] = vals\n\n tempdict['shapes_'] = self.shapes or ''\n if self.shapes and self.numargs == 1:\n tempdict['shapes_'] += ','\n\n if self.shapes:\n tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)\n else:\n tempdict['set_vals_stmt'] = ''\n\n if self.shapes is None:\n # remove shapes from call parameters if there are none\n for item in ['default', 'before_notes']:\n tempdict[item] = tempdict[item].replace(\n \"\\n%(shapes)s : array_like\\n shape parameters\", \"\")\n for i in range(2):\n if self.shapes is None:\n # necessary because we use %(shapes)s in two forms (w w/o \", \")\n self.__doc__ = self.__doc__.replace(\"%(shapes)s, \", \"\")\n try:\n self.__doc__ = doccer.docformat(self.__doc__, tempdict)\n except TypeError as e:\n raise Exception(\"Unable to construct docstring for distribution \\\"%s\\\": %s\" % (self.name, repr(e)))\n\n # correct for empty shapes\n self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')\n\n def _construct_default_doc(self, longname=None, extradoc=None,\n docdict=None, discrete='continuous'):\n \"\"\"Construct instance docstring from the default template.\"\"\"\n if longname is None:\n longname = 'A'\n if extradoc is None:\n extradoc = ''\n if extradoc.startswith('\\n\\n'):\n extradoc = extradoc[2:]\n self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),\n '\\n\\n%(before_notes)s\\n', docheaders['notes'],\n extradoc, '\\n%(example)s'])\n self._construct_doc(docdict)\n\n def freeze(self, *args, **kwds):\n \"\"\"Freeze the distribution for the given arguments.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution. Should include all\n the non-optional arguments, may include ``loc`` and ``scale``.\n\n Returns\n -------\n rv_frozen : rv_frozen instance\n The frozen distribution.\n\n \"\"\"\n return rv_frozen(self, *args, **kwds)\n\n def __call__(self, *args, **kwds):\n return self.freeze(*args, **kwds)\n __call__.__doc__ = freeze.__doc__\n\n # The actual calculation functions (no basic checking need be done)\n # If these are defined, the others won't be looked at.\n # Otherwise, the other set can be defined.\n def _stats(self, *args, **kwds):\n return None, None, None, None\n\n # Noncentral moments (also known as the moment about the origin).\n # Expressed in LaTeX, munp would be $\\mu'_{n}$, i.e. \"mu-sub-n-prime\".\n # The primed mu is a widely used notation for the noncentral moment.\n def _munp(self, n, *args):\n # Silence floating point warnings from integration.\n with np.errstate(all='ignore'):\n vals = self.generic_moment(n, *args)\n return vals\n\n def _argcheck_rvs(self, *args, **kwargs):\n # Handle broadcasting and size validation of the rvs method.\n # Subclasses should not have to override this method.\n # The rule is that if `size` is not None, then `size` gives the\n # shape of the result (integer values of `size` are treated as\n # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)\n #\n # `args` is expected to contain the shape parameters (if any), the\n # location and the scale in a flat tuple (e.g. if there are two\n # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).\n # The only keyword argument expected is 'size'.\n size = kwargs.get('size', None)\n all_bcast = np.broadcast_arrays(*args)\n\n def squeeze_left(a):\n while a.ndim > 0 and a.shape[0] == 1:\n a = a[0]\n return a\n\n # Eliminate trivial leading dimensions. In the convention\n # used by numpy's random variate generators, trivial leading\n # dimensions are effectively ignored. In other words, when `size`\n # is given, trivial leading dimensions of the broadcast parameters\n # in excess of the number of dimensions in size are ignored, e.g.\n # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)\n # array([ 1.00104267, 3.00422496, 4.99799278])\n # If `size` is not given, the exact broadcast shape is preserved:\n # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])\n # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])\n #\n all_bcast = [squeeze_left(a) for a in all_bcast]\n bcast_shape = all_bcast[0].shape\n bcast_ndim = all_bcast[0].ndim\n\n if size is None:\n size_ = bcast_shape\n else:\n size_ = tuple(np.atleast_1d(size))\n\n # Check compatibility of size_ with the broadcast shape of all\n # the parameters. This check is intended to be consistent with\n # how the numpy random variate generators (e.g. np.random.normal,\n # np.random.beta) handle their arguments. The rule is that, if size\n # is given, it determines the shape of the output. Broadcasting\n # can't change the output size.\n\n # This is the standard broadcasting convention of extending the\n # shape with fewer dimensions with enough dimensions of length 1\n # so that the two shapes have the same number of dimensions.\n ndiff = bcast_ndim - len(size_)\n if ndiff < 0:\n bcast_shape = (1,)*(-ndiff) + bcast_shape\n elif ndiff > 0:\n size_ = (1,)*ndiff + size_\n\n # This compatibility test is not standard. In \"regular\" broadcasting,\n # two shapes are compatible if for each dimension, the lengths are the\n # same or one of the lengths is 1. Here, the length of a dimension in\n # size_ must not be less than the corresponding length in bcast_shape.\n ok = all([bcdim == 1 or bcdim == szdim\n for (bcdim, szdim) in zip(bcast_shape, size_)])\n if not ok:\n raise ValueError(\"size does not match the broadcast shape of \"\n \"the parameters. %s, %s, %s\" % (size, size_, bcast_shape))\n\n param_bcast = all_bcast[:-2]\n loc_bcast = all_bcast[-2]\n scale_bcast = all_bcast[-1]\n\n return param_bcast, loc_bcast, scale_bcast, size_\n\n ## These are the methods you must define (standard form functions)\n ## NB: generic _pdf, _logpdf, _cdf are different for\n ## rv_continuous and rv_discrete hence are defined in there\n def _argcheck(self, *args):\n \"\"\"Default check for correct values on args and keywords.\n\n Returns condition array of 1's where arguments are correct and\n 0's where they are not.\n\n \"\"\"\n cond = 1\n for arg in args:\n cond = logical_and(cond, (asarray(arg) > 0))\n return cond\n\n def _get_support(self, *args, **kwargs):\n \"\"\"Return the support of the (unscaled, unshifted) distribution.\n\n *Must* be overridden by distributions which have support dependent\n upon the shape parameters of the distribution. Any such override\n *must not* set or change any of the class members, as these members\n are shared amongst all instances of the distribution.\n\n Parameters\n ----------\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n Returns\n -------\n a, b : numeric (float, or int or +/-np.inf)\n end-points of the distribution's support for the specified\n shape parameters.\n \"\"\"\n return self.a, self.b\n\n def _support_mask(self, x, *args):\n a, b = self._get_support(*args)\n with np.errstate(invalid='ignore'):\n return (a <= x) & (x <= b)\n\n def _open_support_mask(self, x, *args):\n a, b = self._get_support(*args)\n with np.errstate(invalid='ignore'):\n return (a < x) & (x < b)\n\n def _rvs(self, *args, size=None, random_state=None):\n # This method must handle size being a tuple, and it must\n # properly broadcast *args and size. size might be\n # an empty tuple, which means a scalar random variate is to be\n # generated.\n\n ## Use basic inverse cdf algorithm for RV generation as default.\n U = random_state.uniform(size=size)\n Y = self._ppf(U, *args)\n return Y\n\n def _logcdf(self, x, *args):\n with np.errstate(divide='ignore'):\n return log(self._cdf(x, *args))\n\n def _sf(self, x, *args):\n return 1.0-self._cdf(x, *args)\n\n def _logsf(self, x, *args):\n with np.errstate(divide='ignore'):\n return log(self._sf(x, *args))\n\n def _ppf(self, q, *args):\n return self._ppfvec(q, *args)\n\n def _isf(self, q, *args):\n return self._ppf(1.0-q, *args) # use correct _ppf for subclasses\n\n # These are actually called, and should not be overwritten if you\n # want to keep error checking.\n def rvs(self, *args, **kwds):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional\n Scale parameter (default=1).\n size : int or tuple of ints, optional\n Defining number of random variates (default is 1).\n random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n discrete = kwds.pop('discrete', None)\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n if np.all(scale == 0):\n return loc*ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n random_state = check_random_state(rndm)\n else:\n random_state = self._random_state\n\n # Maintain backwards compatibility by setting self._size\n # for distributions that still need it.\n if self._rvs_uses_size_attribute:\n if not self._rvs_size_warned:\n warnings.warn(\n f'The signature of {self._rvs} does not contain '\n f'a \"size\" keyword. Such signatures are deprecated.',\n np.VisibleDeprecationWarning)\n self._rvs_size_warned = True\n self._size = size\n self._random_state = random_state\n vals = self._rvs(*args)\n else:\n vals = self._rvs(*args, size=size, random_state=random_state)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n # Cast to int if discrete\n if discrete:\n if size == ():\n vals = int(vals)\n else:\n vals = vals.astype(int)\n\n return vals\n\n def stats(self, *args, **kwds):\n \"\"\"\n Some statistics of the given RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional (continuous RVs only)\n scale parameter (default=1)\n moments : str, optional\n composed of letters ['mvsk'] defining which moments to compute:\n 'm' = mean,\n 'v' = variance,\n 's' = (Fisher's) skew,\n 'k' = (Fisher's) kurtosis.\n (default is 'mv')\n\n Returns\n -------\n stats : sequence\n of requested moments.\n\n \"\"\"\n args, loc, scale, moments = self._parse_args_stats(*args, **kwds)\n # scale = 1 by construction for discrete RVs\n loc, scale = map(asarray, (loc, scale))\n args = tuple(map(asarray, args))\n cond = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = []\n default = valarray(shape(cond), self.badvalue)\n\n # Use only entries that are valid in calculation\n if np.any(cond):\n goodargs = argsreduce(cond, *(args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n\n if self._stats_has_moments:\n mu, mu2, g1, g2 = self._stats(*goodargs,\n **{'moments': moments})\n else:\n mu, mu2, g1, g2 = self._stats(*goodargs)\n if g1 is None:\n mu3 = None\n else:\n if mu2 is None:\n mu2 = self._munp(2, *goodargs)\n if g2 is None:\n # (mu2**1.5) breaks down for nan and inf\n mu3 = g1 * np.power(mu2, 1.5)\n\n if 'm' in moments:\n if mu is None:\n mu = self._munp(1, *goodargs)\n out0 = default.copy()\n place(out0, cond, mu * scale + loc)\n output.append(out0)\n\n if 'v' in moments:\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n # if mean is inf then var is also inf\n with np.errstate(invalid='ignore'):\n mu2 = np.where(np.isfinite(mu), mu2p - mu**2, np.inf)\n out0 = default.copy()\n place(out0, cond, mu2 * scale * scale)\n output.append(out0)\n\n if 's' in moments:\n if g1 is None:\n mu3p = self._munp(3, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n with np.errstate(invalid='ignore'):\n mu3 = (-mu*mu - 3*mu2)*mu + mu3p\n g1 = mu3 / np.power(mu2, 1.5)\n out0 = default.copy()\n place(out0, cond, g1)\n output.append(out0)\n\n if 'k' in moments:\n if g2 is None:\n mu4p = self._munp(4, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n if mu3 is None:\n mu3p = self._munp(3, *goodargs)\n with np.errstate(invalid='ignore'):\n mu3 = (-mu * mu - 3 * mu2) * mu + mu3p\n with np.errstate(invalid='ignore'):\n mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p\n g2 = mu4 / mu2**2.0 - 3.0\n out0 = default.copy()\n place(out0, cond, g2)\n output.append(out0)\n else: # no valid args\n output = [default.copy() for _ in moments]\n\n if len(output) == 1:\n return output[0]\n else:\n return tuple(output)\n\n def entropy(self, *args, **kwds):\n \"\"\"\n Differential entropy of the RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional (continuous distributions only).\n Scale parameter (default=1).\n\n Notes\n -----\n Entropy is defined base `e`:\n\n >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n >>> np.allclose(drv.entropy(), np.log(2.0))\n True\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n # NB: for discrete distributions scale=1 by construction in _parse_args\n loc, scale = map(asarray, (loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = zeros(shape(cond0), 'd')\n place(output, (1-cond0), self.badvalue)\n goodargs = argsreduce(cond0, scale, *args)\n goodscale = goodargs[0]\n goodargs = goodargs[1:]\n place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))\n return output\n\n def moment(self, n, *args, **kwds):\n \"\"\"\n n-th order non-central moment of distribution.\n\n Parameters\n ----------\n n : int, n >= 1\n Order of moment.\n arg1, arg2, arg3,... : float\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n if not (self._argcheck(*args) and (scale > 0)):\n return nan\n if (floor(n) != n):\n raise ValueError(\"Moment must be an integer.\")\n if (n < 0):\n raise ValueError(\"Moment must be positive.\")\n mu, mu2, g1, g2 = None, None, None, None\n if (n > 0) and (n < 5):\n if self._stats_has_moments:\n mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}\n else:\n mdict = {}\n mu, mu2, g1, g2 = self._stats(*args, **mdict)\n val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)\n\n # Convert to transformed X = L + S*Y\n # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)\n if loc == 0:\n return scale**n * val\n else:\n result = 0\n fac = float(scale) / float(loc)\n for k in range(n):\n valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)\n result += comb(n, k, exact=True)*(fac**k) * valk\n result += fac**n * val\n return result * loc**n\n\n def median(self, *args, **kwds):\n \"\"\"\n Median of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter, Default is 0.\n scale : array_like, optional\n Scale parameter, Default is 1.\n\n Returns\n -------\n median : float\n The median of the distribution.\n\n See Also\n --------\n rv_discrete.ppf\n Inverse of the CDF\n\n \"\"\"\n return self.ppf(0.5, *args, **kwds)\n\n def mean(self, *args, **kwds):\n \"\"\"\n Mean of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n mean : float\n the mean of the distribution\n\n \"\"\"\n kwds['moments'] = 'm'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def var(self, *args, **kwds):\n \"\"\"\n Variance of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n var : float\n the variance of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def std(self, *args, **kwds):\n \"\"\"\n Standard deviation of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n std : float\n standard deviation of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = sqrt(self.stats(*args, **kwds))\n return res\n\n def interval(self, alpha, *args, **kwds):\n \"\"\"\n Confidence interval with equal areas around the median.\n\n Parameters\n ----------\n alpha : array_like of float\n Probability that an rv will be drawn from the returned range.\n Each value should be in the range [0, 1].\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n\n Returns\n -------\n a, b : ndarray of float\n end-points of range that contain ``100 * alpha %`` of the rv's\n possible values.\n\n \"\"\"\n alpha = asarray(alpha)\n if np.any((alpha > 1) | (alpha < 0)):\n raise ValueError(\"alpha must be between 0 and 1 inclusive\")\n q1 = (1.0-alpha)/2\n q2 = (1.0+alpha)/2\n a = self.ppf(q1, *args, **kwds)\n b = self.ppf(q2, *args, **kwds)\n return a, b\n\n def support(self, *args, **kwargs):\n \"\"\"\n Return the support of the distribution.\n\n Parameters\n ----------\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n Returns\n -------\n a, b : float\n end-points of the distribution's support.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwargs)\n _a, _b = self._get_support(*args)\n return _a * scale + loc, _b * scale + loc\n\n\ndef _get_fixed_fit_value(kwds, names):\n \"\"\"\n Given names such as `['f0', 'fa', 'fix_a']`, check that there is\n at most one non-None value in `kwds` associaed with those names.\n Return that value, or None if none of the names occur in `kwds`.\n As a side effect, all occurrences of those names in `kwds` are\n removed.\n \"\"\"\n vals = [(name, kwds.pop(name)) for name in names if name in kwds]\n if len(vals) > 1:\n repeated = [name for name, val in vals]\n raise ValueError(\"fit method got multiple keyword arguments to \"\n \"specify the same fixed parameter: \" +\n ', '.join(repeated))\n return vals[0][1] if vals else None\n\n\n## continuous random variables: implement maybe later\n##\n## hf --- Hazard Function (PDF / SF)\n## chf --- Cumulative hazard function (-log(SF))\n## psf --- Probability sparsity function (reciprocal of the pdf) in\n## units of percent-point-function (as a function of q).\n## Also, the derivative of the percent-point function.\n\nclass rv_continuous(rv_generic):\n \"\"\"\n A generic continuous random variable class meant for subclassing.\n\n `rv_continuous` is a base class to construct specific distribution classes\n and instances for continuous random variables. It cannot be used\n directly as a distribution.\n\n Parameters\n ----------\n momtype : int, optional\n The type of generic moment calculation to use: 0 for pdf, 1 (default)\n for ppf.\n a : float, optional\n Lower bound of the support of the distribution, default is minus\n infinity.\n b : float, optional\n Upper bound of the support of the distribution, default is plus\n infinity.\n xtol : float, optional\n The tolerance for fixed point calculation for generic ppf.\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example ``\"m, n\"`` for a\n distribution that takes two integers as the two shape arguments for all\n its methods. If not provided, shape parameters will be inferred from\n the signature of the private methods, ``_pdf`` and ``_cdf`` of the\n instance.\n extradoc : str, optional, deprecated\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n This parameter defines the object to use for drawing random variates.\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is None.\n\n Methods\n -------\n rvs\n pdf\n logpdf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n fit\n fit_loc_scale\n nnlf\n support\n\n Notes\n -----\n Public methods of an instance of a distribution class (e.g., ``pdf``,\n ``cdf``) check their arguments and pass valid arguments to private,\n computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid\n if it is within the support of the distribution.\n Whether a shape parameter is valid is decided by an ``_argcheck`` method\n (which defaults to checking that its arguments are strictly positive.)\n\n **Subclassing**\n\n New random variables can be defined by subclassing the `rv_continuous` class\n and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized\n to location 0 and scale 1).\n\n If positive argument checking is not correct for your RV\n then you will also need to re-define the ``_argcheck`` method.\n\n For most of the scipy.stats distributions, the support interval doesn't\n depend on the shape parameters. ``x`` being in the support interval is\n equivalent to ``self.a <= x <= self.b``. If either of the endpoints of\n the support do depend on the shape parameters, then\n i) the distribution must implement the ``_get_support`` method; and\n ii) those dependent endpoints must be omitted from the distribution's\n call to the ``rv_continuous`` initializer.\n\n Correct, but potentially slow defaults exist for the remaining\n methods but for speed and/or accuracy you can over-ride::\n\n _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf\n\n The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,\n applied to a uniform random variate. In order to generate random variates\n efficiently, either the default ``_ppf`` needs to be overwritten (e.g.\n if the inverse cdf can expressed in an explicit form) or a sampling\n method needs to be implemented in a custom ``_rvs`` method.\n\n If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.\n The main reason would be to improve numerical accuracy: for example,\n the survival function ``_sf`` is computed as ``1 - _cdf`` which can\n result in loss of precision if ``_cdf(x)`` is close to one.\n\n **Methods that can be overwritten by subclasses**\n ::\n\n _rvs\n _pdf\n _cdf\n _sf\n _ppf\n _isf\n _stats\n _munp\n _entropy\n _argcheck\n _get_support\n\n There are additional (internal and private) generic methods that can\n be useful for cross-checking and for debugging, but might work in all\n cases when directly called.\n\n A note on ``shapes``: subclasses need not specify them explicitly. In this\n case, `shapes` will be automatically deduced from the signatures of the\n overridden methods (`pdf`, `cdf` etc).\n If, for some reason, you prefer to avoid relying on introspection, you can\n specify ``shapes`` explicitly as an argument to the instance constructor.\n\n\n **Frozen Distributions**\n\n Normally, you must provide shape parameters (and, optionally, location and\n scale parameters to each call of a method of a distribution.\n\n Alternatively, the object may be called (as a function) to fix the shape,\n location, and scale parameters returning a \"frozen\" continuous RV object:\n\n rv = generic(<shape(s)>, loc=0, scale=1)\n `rv_frozen` object with the same methods but holding the given shape,\n location, and scale fixed\n\n **Statistics**\n\n Statistics are computed using numerical integration by default.\n For speed you can redefine this using ``_stats``:\n\n - take shape parameters and return mu, mu2, g1, g2\n - If you can't compute one of these, return it as None\n - Can also be defined with a keyword argument ``moments``, which is a\n string composed of \"m\", \"v\", \"s\", and/or \"k\".\n Only the components appearing in string should be computed and\n returned in the order \"m\", \"v\", \"s\", or \"k\" with missing values\n returned as None.\n\n Alternatively, you can override ``_munp``, which takes ``n`` and shape\n parameters and returns the n-th non-central moment of the distribution.\n\n Examples\n --------\n To create a new Gaussian distribution, we would do the following:\n\n >>> from scipy.stats import rv_continuous\n >>> class gaussian_gen(rv_continuous):\n ... \"Gaussian distribution\"\n ... def _pdf(self, x):\n ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)\n >>> gaussian = gaussian_gen(name='gaussian')\n\n ``scipy.stats`` distributions are *instances*, so here we subclass\n `rv_continuous` and create an instance. With this, we now have\n a fully functional distribution with all relevant methods automagically\n generated by the framework.\n\n Note that above we defined a standard normal distribution, with zero mean\n and unit variance. Shifting and scaling of the distribution can be done\n by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``\n essentially computes ``y = (x - loc) / scale`` and\n ``gaussian._pdf(y) / scale``.\n\n \"\"\"\n def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,\n badvalue=None, name=None, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_continuous, self).__init__(seed)\n\n # save the ctor parameters, cf generic freeze\n self._ctor_param = dict(\n momtype=momtype, a=a, b=b, xtol=xtol,\n badvalue=badvalue, name=name, longname=longname,\n shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n if name is None:\n name = 'Distribution'\n self.badvalue = badvalue\n self.name = name\n self.a = a\n self.b = b\n if a is None:\n self.a = -inf\n if b is None:\n self.b = inf\n self.xtol = xtol\n self.moment_type = momtype\n self.shapes = shapes\n self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],\n locscale_in='loc=0, scale=1',\n locscale_out='loc, scale')\n\n # nin correction\n self._ppfvec = vectorize(self._ppf_single, otypes='d')\n self._ppfvec.nin = self.numargs + 1\n self.vecentropy = vectorize(self._entropy, otypes='d')\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self._cdfvec.nin = self.numargs + 1\n\n self.extradoc = extradoc\n if momtype == 0:\n self.generic_moment = vectorize(self._mom0_sc, otypes='d')\n else:\n self.generic_moment = vectorize(self._mom1_sc, otypes='d')\n # Because of the *args argument of _mom0_sc, vectorize cannot count the\n # number of arguments correctly.\n self.generic_moment.nin = self.numargs + 1\n\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict,\n discrete='continuous')\n else:\n dct = dict(distcont)\n self._construct_doc(docdict, dct.get(self.name))\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['xtol'] = self.xtol\n dct['badvalue'] = self.badvalue\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _ppf_to_solve(self, x, q, *args):\n return self.cdf(*(x, )+args)-q\n\n def _ppf_single(self, q, *args):\n factor = 10.\n left, right = self._get_support(*args)\n\n if np.isinf(left):\n left = min(-factor, right)\n while self._ppf_to_solve(left, q, *args) > 0.:\n left, right = left * factor, left\n # left is now such that cdf(left) <= q\n # if right has changed, then cdf(right) > q\n\n if np.isinf(right):\n right = max(factor, left)\n while self._ppf_to_solve(right, q, *args) < 0.:\n left, right = right, right * factor\n # right is now such that cdf(right) >= q\n\n return optimize.brentq(self._ppf_to_solve,\n left, right, args=(q,)+args, xtol=self.xtol)\n\n # moment from definition\n def _mom_integ0(self, x, m, *args):\n return x**m * self.pdf(x, *args)\n\n def _mom0_sc(self, m, *args):\n _a, _b = self._get_support(*args)\n return integrate.quad(self._mom_integ0, _a, _b,\n args=(m,)+args)[0]\n\n # moment calculated using ppf\n def _mom_integ1(self, q, m, *args):\n return (self.ppf(q, *args))**m\n\n def _mom1_sc(self, m, *args):\n return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]\n\n def _pdf(self, x, *args):\n return derivative(self._cdf, x, dx=1e-5, args=args, order=5)\n\n ## Could also define any of these\n def _logpdf(self, x, *args):\n return log(self._pdf(x, *args))\n\n def _cdf_single(self, x, *args):\n _a, _b = self._get_support(*args)\n return integrate.quad(self._pdf, _a, x, args=args)[0]\n\n def _cdf(self, x, *args):\n return self._cdfvec(x, *args)\n\n ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined\n ## in rv_generic\n\n def pdf(self, x, *args, **kwds):\n \"\"\"\n Probability density function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x, *args) & (scale > 0)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._pdf(*goodargs) / scale)\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpdf(self, x, *args, **kwds):\n \"\"\"\n Log of the probability density function at x of the given RV.\n\n This uses a more numerically accurate calculation if available.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logpdf : array_like\n Log of the probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x, *args) & (scale > 0)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._logpdf(*goodargs) - log(scale))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, x, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `x`\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = (x >= np.asarray(_b)) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._cdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, x, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = (x >= _b) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, x, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = cond0 & (x <= _a)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._sf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, x, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as (1 - `cdf`),\n evaluated at `x`.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `x`.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = cond0 & (x <= _a)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n lower tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : array_like\n quantile corresponding to the lower tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 0)\n cond3 = cond0 & (q == 1)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = _a * scale + loc\n upper_bound = _b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._ppf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n upper tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : ndarray or scalar\n Quantile corresponding to the upper tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 1)\n cond3 = cond0 & (q == 0)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = _a * scale + loc\n upper_bound = _b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._isf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def _nnlf(self, x, *args):\n return -np.sum(self._logpdf(x, *args), axis=0)\n\n def _unpack_loc_scale(self, theta):\n try:\n loc = theta[-2]\n scale = theta[-1]\n args = tuple(theta[:-2])\n except IndexError:\n raise ValueError(\"Not enough input arguments.\")\n return loc, scale, args\n\n def nnlf(self, theta, x):\n '''Return negative loglikelihood function.\n\n Notes\n -----\n This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the\n parameters (including loc and scale).\n '''\n loc, scale, args = self._unpack_loc_scale(theta)\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n n_log_scale = len(x) * log(scale)\n if np.any(~self._support_mask(x, *args)):\n return inf\n return self._nnlf(x, *args) + n_log_scale\n\n def _nnlf_and_penalty(self, x, args):\n cond0 = ~self._support_mask(x, *args)\n n_bad = np.count_nonzero(cond0, axis=0)\n if n_bad > 0:\n x = argsreduce(~cond0, x)[0]\n logpdf = self._logpdf(x, *args)\n finite_logpdf = np.isfinite(logpdf)\n n_bad += np.sum(~finite_logpdf, axis=0)\n if n_bad > 0:\n penalty = n_bad * log(_XMAX) * 100\n return -np.sum(logpdf[finite_logpdf], axis=0) + penalty\n return -np.sum(logpdf, axis=0)\n\n def _penalized_nnlf(self, theta, x):\n ''' Return penalized negative loglikelihood function,\n i.e., - sum (log pdf(x, theta), axis=0) + penalty\n where theta are the parameters (including loc and scale)\n '''\n loc, scale, args = self._unpack_loc_scale(theta)\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n n_log_scale = len(x) * log(scale)\n return self._nnlf_and_penalty(x, args) + n_log_scale\n\n # return starting point for fit (shape arguments + loc + scale)\n def _fitstart(self, data, args=None):\n if args is None:\n args = (1.0,)*self.numargs\n loc, scale = self._fit_loc_scale_support(data, *args)\n return args + (loc, scale)\n\n def _reduce_func(self, args, kwds):\n \"\"\"\n Return the (possibly reduced) function to optimize in order to find MLE\n estimates for the .fit method.\n \"\"\"\n # Convert fixed shape parameters to the standard numeric form: e.g. for\n # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value\n # for `f0`, `fa` or 'fix_a'. The following converts the latter two\n # into the first (numeric) form.\n if self.shapes:\n shapes = self.shapes.replace(',', ' ').split()\n for j, s in enumerate(shapes):\n key = 'f' + str(j)\n names = [key, 'f' + s, 'fix_' + s]\n val = _get_fixed_fit_value(kwds, names)\n if val is not None:\n kwds[key] = val\n\n args = list(args)\n Nargs = len(args)\n fixedn = []\n names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']\n x0 = []\n for n, key in enumerate(names):\n if key in kwds:\n fixedn.append(n)\n args[n] = kwds.pop(key)\n else:\n x0.append(args[n])\n\n if len(fixedn) == 0:\n func = self._penalized_nnlf\n restore = None\n else:\n if len(fixedn) == Nargs:\n raise ValueError(\n \"All parameters fixed. There is nothing to optimize.\")\n\n def restore(args, theta):\n # Replace with theta for all numbers not in fixedn\n # This allows the non-fixed values to vary, but\n # we still call self.nnlf with all parameters.\n i = 0\n for n in range(Nargs):\n if n not in fixedn:\n args[n] = theta[i]\n i += 1\n return args\n\n def func(theta, x):\n newtheta = restore(args[:], theta)\n return self._penalized_nnlf(newtheta, x)\n\n return x0, func, restore, args\n\n def fit(self, data, *args, **kwds):\n \"\"\"\n Return MLEs for shape (if applicable), location, and scale\n parameters from data.\n\n MLE stands for Maximum Likelihood Estimate. Starting estimates for\n the fit are given by input arguments; for any arguments not provided\n with starting estimates, ``self._fitstart(data)`` is called to generate\n such.\n\n One can hold some parameters fixed to specific values by passing in\n keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)\n and ``floc`` and ``fscale`` (for location and scale parameters,\n respectively).\n\n Parameters\n ----------\n data : array_like\n Data to use in calculating the MLEs.\n arg1, arg2, arg3,... : floats, optional\n Starting value(s) for any shape-characterizing arguments (those not\n provided will be determined by a call to ``_fitstart(data)``).\n No default value.\n kwds : floats, optional\n - `loc`: initial guess of the distribution's location parameter.\n - `scale`: initial guess of the distribution's scale parameter.\n\n Special keyword arguments are recognized as holding certain\n parameters fixed:\n\n - f0...fn : hold respective shape parameters fixed.\n Alternatively, shape parameters to fix can be specified by name.\n For example, if ``self.shapes == \"a, b\"``, ``fa`` and ``fix_a``\n are equivalent to ``f0``, and ``fb`` and ``fix_b`` are\n equivalent to ``f1``.\n\n - floc : hold location parameter fixed to specified value.\n\n - fscale : hold scale parameter fixed to specified value.\n\n - optimizer : The optimizer to use. The optimizer must take ``func``,\n and starting position as the first two arguments,\n plus ``args`` (for extra arguments to pass to the\n function to be optimized) and ``disp=0`` to suppress\n output as keyword arguments.\n\n Returns\n -------\n mle_tuple : tuple of floats\n MLEs for any shape parameters (if applicable), followed by those\n for location and scale. For most random variables, shape statistics\n will be returned, but there are exceptions (e.g. ``norm``).\n\n Notes\n -----\n This fit is computed by maximizing a log-likelihood function, with\n penalty applied for samples outside of range of the distribution. The\n returned answer is not guaranteed to be the globally optimal MLE, it\n may only be locally optimal, or the optimization may fail altogether.\n If the data contain any of np.nan, np.inf, or -np.inf, the fit routine\n will throw a RuntimeError.\n\n Examples\n --------\n\n Generate some data to fit: draw random variates from the `beta`\n distribution\n\n >>> from scipy.stats import beta\n >>> a, b = 1., 2.\n >>> x = beta.rvs(a, b, size=1000)\n\n Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):\n\n >>> a1, b1, loc1, scale1 = beta.fit(x)\n\n We can also use some prior knowledge about the dataset: let's keep\n ``loc`` and ``scale`` fixed:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)\n >>> loc1, scale1\n (0, 1)\n\n We can also keep shape parameters fixed by using ``f``-keywords. To\n keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,\n equivalently, ``fa=1``:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)\n >>> a1\n 1\n\n Not all distributions return estimates for the shape parameters.\n ``norm`` for example just returns estimates for location and scale:\n\n >>> from scipy.stats import norm\n >>> x = norm.rvs(a, b, size=1000, random_state=123)\n >>> loc1, scale1 = norm.fit(x)\n >>> loc1, scale1\n (0.92087172783841631, 2.0015750750324668)\n \"\"\"\n Narg = len(args)\n if Narg > self.numargs:\n raise TypeError(\"Too many input arguments.\")\n\n if not np.isfinite(data).all():\n raise RuntimeError(\"The data contains non-finite values.\")\n\n start = [None]*2\n if (Narg < self.numargs) or not ('loc' in kwds and\n 'scale' in kwds):\n # get distribution specific starting locations\n start = self._fitstart(data)\n args += start[Narg:-2]\n loc = kwds.pop('loc', start[-2])\n scale = kwds.pop('scale', start[-1])\n args += (loc, scale)\n x0, func, restore, args = self._reduce_func(args, kwds)\n\n optimizer = kwds.pop('optimizer', optimize.fmin)\n # convert string to function in scipy.optimize\n if not callable(optimizer) and isinstance(optimizer, str):\n if not optimizer.startswith('fmin_'):\n optimizer = \"fmin_\"+optimizer\n if optimizer == 'fmin_':\n optimizer = 'fmin'\n try:\n optimizer = getattr(optimize, optimizer)\n except AttributeError:\n raise ValueError(\"%s is not a valid optimizer\" % optimizer)\n\n # by now kwds must be empty, since everybody took what they needed\n if kwds:\n raise TypeError(\"Unknown arguments: %s.\" % kwds)\n\n vals = optimizer(func, x0, args=(ravel(data),), disp=0)\n if restore is not None:\n vals = restore(args, vals)\n vals = tuple(vals)\n return vals\n\n def _fit_loc_scale_support(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data accounting for support.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n data = np.asarray(data)\n\n # Estimate location and scale according to the method of moments.\n loc_hat, scale_hat = self.fit_loc_scale(data, *args)\n\n # Compute the support according to the shape parameters.\n self._argcheck(*args)\n _a, _b = self._get_support(*args)\n a, b = _a, _b\n support_width = b - a\n\n # If the support is empty then return the moment-based estimates.\n if support_width <= 0:\n return loc_hat, scale_hat\n\n # Compute the proposed support according to the loc and scale\n # estimates.\n a_hat = loc_hat + a * scale_hat\n b_hat = loc_hat + b * scale_hat\n\n # Use the moment-based estimates if they are compatible with the data.\n data_a = np.min(data)\n data_b = np.max(data)\n if a_hat < data_a and data_b < b_hat:\n return loc_hat, scale_hat\n\n # Otherwise find other estimates that are compatible with the data.\n data_width = data_b - data_a\n rel_margin = 0.1\n margin = data_width * rel_margin\n\n # For a finite interval, both the location and scale\n # should have interesting values.\n if support_width < np.inf:\n loc_hat = (data_a - a) - margin\n scale_hat = (data_width + 2 * margin) / support_width\n return loc_hat, scale_hat\n\n # For a one-sided interval, use only an interesting location parameter.\n if a > -np.inf:\n return (data_a - a) - margin, 1\n elif b < np.inf:\n return (data_b - b) + margin, 1\n else:\n raise RuntimeError\n\n def fit_loc_scale(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data using 1st and 2nd moments.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n mu, mu2 = self.stats(*args, **{'moments': 'mv'})\n tmp = asarray(data)\n muhat = tmp.mean()\n mu2hat = tmp.var()\n Shat = sqrt(mu2hat / mu2)\n Lhat = muhat - Shat*mu\n if not np.isfinite(Lhat):\n Lhat = 0\n if not (np.isfinite(Shat) and (0 < Shat)):\n Shat = 1\n return Lhat, Shat\n\n def _entropy(self, *args):\n def integ(x):\n val = self._pdf(x, *args)\n return entr(val)\n\n # upper limit is often inf, so suppress warnings when integrating\n _a, _b = self._get_support(*args)\n with np.errstate(over='ignore'):\n h = integrate.quad(integ, _a, _b)[0]\n\n if not np.isnan(h):\n return h\n else:\n # try with different limits if integration problems\n low, upp = self.ppf([1e-10, 1. - 1e-10], *args)\n if np.isinf(_b):\n upper = upp\n else:\n upper = _b\n if np.isinf(_a):\n lower = low\n else:\n lower = _a\n return integrate.quad(integ, lower, upper)[0]\n\n def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,\n conditional=False, **kwds):\n \"\"\"Calculate expected value of a function with respect to the\n distribution by numerical integration.\n\n The expected value of a function ``f(x)`` with respect to a\n distribution ``dist`` is defined as::\n\n ub\n E[f(x)] = Integral(f(x) * dist.pdf(x)),\n lb\n\n where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``\n distribution. If the bounds ``lb`` and ``ub`` correspond to the\n support of the distribution, e.g. ``[-inf, inf]`` in the default\n case, then the integral is the unrestricted expectation of ``f(x)``.\n Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``\n outside a finite interval in which case the expectation is\n calculated within the finite range ``[lb, ub]``.\n\n Parameters\n ----------\n func : callable, optional\n Function for which integral is calculated. Takes only one argument.\n The default is the identity mapping f(x) = x.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter (default=0).\n scale : float, optional\n Scale parameter (default=1).\n lb, ub : scalar, optional\n Lower and upper bound for integration. Default is set to the\n support of the distribution.\n conditional : bool, optional\n If True, the integral is corrected by the conditional probability\n of the integration interval. The return value is the expectation\n of the function, conditional on being in the given interval.\n Default is False.\n\n Additional keyword arguments are passed to the integration routine.\n\n Returns\n -------\n expect : float\n The calculated expected value.\n\n Notes\n -----\n The integration behavior of this function is inherited from\n `scipy.integrate.quad`. Neither this function nor\n `scipy.integrate.quad` can verify whether the integral exists or is\n finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and\n ``cauchy(0).expect()`` returns ``0.0``.\n\n The function is not vectorized.\n\n Examples\n --------\n\n To understand the effect of the bounds of integration consider\n \n >>> from scipy.stats import expon\n >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)\n 0.6321205588285578\n\n This is close to\n\n >>> expon(1).cdf(2.0) - expon(1).cdf(0.0)\n 0.6321205588285577\n\n If ``conditional=True``\n\n >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)\n 1.0000000000000002\n\n The slight deviation from 1 is due to numerical integration.\n \"\"\"\n lockwds = {'loc': loc,\n 'scale': scale}\n self._argcheck(*args)\n _a, _b = self._get_support(*args)\n if func is None:\n def fun(x, *args):\n return x * self.pdf(x, *args, **lockwds)\n else:\n def fun(x, *args):\n return func(x) * self.pdf(x, *args, **lockwds)\n if lb is None:\n lb = loc + _a * scale\n if ub is None:\n ub = loc + _b * scale\n if conditional:\n invfac = (self.sf(lb, *args, **lockwds)\n - self.sf(ub, *args, **lockwds))\n else:\n invfac = 1.0\n kwds['args'] = args\n # Silence floating point warnings from integration.\n with np.errstate(all='ignore'):\n vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac\n return vals\n\n\n# Helpers for the discrete distributions\ndef _drv2_moment(self, n, *args):\n \"\"\"Non-central moment of discrete distribution.\"\"\"\n def fun(x):\n return np.power(x, n) * self._pmf(x, *args)\n\n _a, _b = self._get_support(*args)\n return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)\n\n\ndef _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm\n _a, _b = self._get_support(*args)\n b = _b\n a = _a\n if isinf(b): # Be sure ending point is > q\n b = int(max(100*q, 10))\n while 1:\n if b >= _b:\n qb = 1.0\n break\n qb = self._cdf(b, *args)\n if (qb < q):\n b += 10\n else:\n break\n else:\n qb = 1.0\n if isinf(a): # be sure starting point < q\n a = int(min(-100*q, -10))\n while 1:\n if a <= _a:\n qb = 0.0\n break\n qa = self._cdf(a, *args)\n if (qa > q):\n a -= 10\n else:\n break\n else:\n qa = self._cdf(a, *args)\n\n while 1:\n if (qa == q):\n return a\n if (qb == q):\n return b\n if b <= a+1:\n if qa > q:\n return a\n else:\n return b\n c = int((a+b)/2.0)\n qc = self._cdf(c, *args)\n if (qc < q):\n if a != c:\n a = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qa = qc\n elif (qc > q):\n if b != c:\n b = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qb = qc\n else:\n return c\n\n\ndef entropy(pk, qk=None, base=None, axis=0):\n \"\"\"Calculate the entropy of a distribution for given probability values.\n\n If only probabilities `pk` are given, the entropy is calculated as\n ``S = -sum(pk * log(pk), axis=axis)``.\n\n If `qk` is not None, then compute the Kullback-Leibler divergence\n ``S = sum(pk * log(pk / qk), axis=axis)``.\n\n This routine will normalize `pk` and `qk` if they don't sum to 1.\n\n Parameters\n ----------\n pk : sequence\n Defines the (discrete) distribution. ``pk[i]`` is the (possibly\n unnormalized) probability of event ``i``.\n qk : sequence, optional\n Sequence against which the relative entropy is computed. Should be in\n the same format as `pk`.\n base : float, optional\n The logarithmic base to use, defaults to ``e`` (natural logarithm).\n axis: int, optional\n The axis along which the entropy is calculated. Default is 0.\n\n Returns\n -------\n S : float\n The calculated entropy.\n\n Examples\n --------\n\n >>> from scipy.stats import entropy\n\n Bernoulli trial with different p.\n The outcome of a fair coin is the most uncertain:\n\n >>> entropy([1/2, 1/2], base=2)\n 1.0\n\n The outcome of a biased coin is less uncertain:\n\n >>> entropy([9/10, 1/10], base=2)\n 0.46899559358928117\n\n Relative entropy:\n\n >>> entropy([1/2, 1/2], qk=[9/10, 1/10])\n 0.5108256237659907\n\n \"\"\"\n pk = asarray(pk)\n pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)\n if qk is None:\n vec = entr(pk)\n else:\n qk = asarray(qk)\n if qk.shape != pk.shape:\n raise ValueError(\"qk and pk must have same shape.\")\n qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)\n vec = rel_entr(pk, qk)\n S = np.sum(vec, axis=axis)\n if base is not None:\n S /= log(base)\n return S\n\n\n# Must over-ride one of _pmf or _cdf or pass in\n# x_k, p(x_k) lists in initialization\n\nclass rv_discrete(rv_generic):\n \"\"\"\n A generic discrete random variable class meant for subclassing.\n\n `rv_discrete` is a base class to construct specific distribution classes\n and instances for discrete random variables. It can also be used\n to construct an arbitrary distribution defined by a list of support\n points and corresponding probabilities.\n\n Parameters\n ----------\n a : float, optional\n Lower bound of the support of the distribution, default: 0\n b : float, optional\n Upper bound of the support of the distribution, default: plus infinity\n moment_tol : float, optional\n The tolerance for the generic calculation of moments.\n values : tuple of two array_like, optional\n ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero\n probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``\n and ``pk`` must have the same shape.\n inc : integer, optional\n Increment for the support of the distribution.\n Default is 1. (other values have not been tested)\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example \"m, n\" for a distribution\n that takes two integers as the two shape arguments for all its methods\n If not provided, shape parameters will be inferred from\n the signatures of the private methods, ``_pmf`` and ``_cdf`` of\n the instance.\n extradoc : str, optional\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n This parameter defines the object to use for drawing random variates.\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is None.\n\n Methods\n -------\n rvs\n pmf\n logpmf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n support\n\n\n Notes\n -----\n\n This class is similar to `rv_continuous`. Whether a shape parameter is\n valid is decided by an ``_argcheck`` method (which defaults to checking\n that its arguments are strictly positive.)\n The main differences are:\n\n - the support of the distribution is a set of integers\n - instead of the probability density function, ``pdf`` (and the\n corresponding private ``_pdf``), this class defines the\n *probability mass function*, `pmf` (and the corresponding\n private ``_pmf``.)\n - scale parameter is not defined.\n\n To create a new discrete distribution, we would do the following:\n\n >>> from scipy.stats import rv_discrete\n >>> class poisson_gen(rv_discrete):\n ... \"Poisson distribution\"\n ... def _pmf(self, k, mu):\n ... return exp(-mu) * mu**k / factorial(k)\n\n and create an instance::\n\n >>> poisson = poisson_gen(name=\"poisson\")\n\n Note that above we defined the Poisson distribution in the standard form.\n Shifting the distribution can be done by providing the ``loc`` parameter\n to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``\n delegates the work to ``poisson._pmf(x-loc, mu)``.\n\n **Discrete distributions from a list of probabilities**\n\n Alternatively, you can construct an arbitrary discrete rv defined\n on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the\n ``values`` keyword argument to the `rv_discrete` constructor.\n\n Examples\n --------\n\n Custom made discrete distribution:\n\n >>> from scipy import stats\n >>> xk = np.arange(7)\n >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)\n >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))\n >>>\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots(1, 1)\n >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')\n >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)\n >>> plt.show()\n\n Random number generation:\n\n >>> R = custm.rvs(size=100)\n\n \"\"\"\n def __new__(cls, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n if values is not None:\n # dispatch to a subclass\n return super(rv_discrete, cls).__new__(rv_sample)\n else:\n # business as usual\n return super(rv_discrete, cls).__new__(cls)\n\n def __init__(self, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_discrete, self).__init__(seed)\n\n # cf generic freeze\n self._ctor_param = dict(\n a=a, b=b, name=name, badvalue=badvalue,\n moment_tol=moment_tol, values=values, inc=inc,\n longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n self.badvalue = badvalue\n self.a = a\n self.b = b\n self.moment_tol = moment_tol\n self.inc = inc\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self.vecentropy = vectorize(self._entropy)\n self.shapes = shapes\n\n if values is not None:\n raise ValueError(\"rv_discrete.__init__(..., values != None, ...)\")\n\n self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n\n # nin correction needs to be after we know numargs\n # correct nin for generic moment vectorization\n _vec_generic_moment = vectorize(_drv2_moment, otypes='d')\n _vec_generic_moment.nin = self.numargs + 2\n self.generic_moment = types.MethodType(_vec_generic_moment, self)\n\n # correct nin for ppf vectorization\n _vppf = vectorize(_drv2_ppfsingle, otypes='d')\n _vppf.nin = self.numargs + 2\n self._ppfvec = types.MethodType(_vppf, self)\n\n # now that self.numargs is defined, we can adjust nin\n self._cdfvec.nin = self.numargs + 1\n\n self._construct_docstrings(name, longname, extradoc)\n\n def _construct_docstrings(self, name, longname, extradoc):\n if name is None:\n name = 'Distribution'\n self.name = name\n self.extradoc = extradoc\n\n # generate docstring for subclass instances\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict_discrete,\n discrete='discrete')\n else:\n dct = dict(distdiscrete)\n self._construct_doc(docdict_discrete, dct.get(self.name))\n\n # discrete RV do not have the scale parameter, remove it\n self.__doc__ = self.__doc__.replace(\n '\\n scale : array_like, '\n 'optional\\n scale parameter (default=1)', '')\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['badvalue'] = self.badvalue\n dct['moment_tol'] = self.moment_tol\n dct['inc'] = self.inc\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _nonzero(self, k, *args):\n return floor(k) == k\n\n def _pmf(self, k, *args):\n return self._cdf(k, *args) - self._cdf(k-1, *args)\n\n def _logpmf(self, k, *args):\n return log(self._pmf(k, *args))\n\n def _cdf_single(self, k, *args):\n _a, _b = self._get_support(*args)\n m = arange(int(_a), k+1)\n return np.sum(self._pmf(m, *args), axis=0)\n\n def _cdf(self, x, *args):\n k = floor(x)\n return self._cdfvec(k, *args)\n\n # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic\n\n def rvs(self, *args, **kwargs):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n size : int or tuple of ints, optional\n Defining number of random variates (Default is 1). Note that `size`\n has to be given as keyword, not as positional argument.\n random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n This parameter defines the object to use for drawing random\n variates.\n If `random_state` is `None` the `~np.random.RandomState` singleton\n is used.\n If `random_state` is an int, a new ``RandomState`` instance is used,\n seeded with random_state.\n If `random_state` is already a ``RandomState`` or ``Generator``\n instance, then that object is used.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n kwargs['discrete'] = True\n return super(rv_discrete, self).rvs(*args, **kwargs)\n\n def pmf(self, k, *args, **kwds):\n \"\"\"\n Probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n pmf : array_like\n Probability mass function evaluated at k\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpmf(self, k, *args, **kwds):\n \"\"\"\n Log of the probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter. Default is 0.\n\n Returns\n -------\n logpmf : array_like\n Log of the probability mass function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logpmf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, k, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k >= _b)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 1.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, k, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at k of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k >= _b)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 0.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, k, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k < _a) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._sf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, k, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as 1 - `cdf`,\n evaluated at `k`.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k < _a) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Lower tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : array_like\n Quantile corresponding to the lower tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), _a-1 + loc)\n place(output, cond2, _b + loc)\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._ppf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Upper tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : ndarray or scalar\n Quantile corresponding to the upper tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n\n # same problem as with ppf; copied from ppf and changed\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), _b)\n place(output, cond2, _a-1)\n\n # call place only if at least 1 valid argument\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n # PB same as ticket 766\n place(output, cond, self._isf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def _entropy(self, *args):\n if hasattr(self, 'pk'):\n return entropy(self.pk)\n else:\n _a, _b = self._get_support(*args)\n return _expect(lambda x: entr(self.pmf(x, *args)),\n _a, _b, self.ppf(0.5, *args), self.inc)\n\n def expect(self, func=None, args=(), loc=0, lb=None, ub=None,\n conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):\n \"\"\"\n Calculate expected value of a function with respect to the distribution\n for discrete distribution by numerical summation.\n\n Parameters\n ----------\n func : callable, optional\n Function for which the expectation value is calculated.\n Takes only one argument.\n The default is the identity mapping f(k) = k.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter.\n Default is 0.\n lb, ub : int, optional\n Lower and upper bound for the summation, default is set to the\n support of the distribution, inclusive (``ul <= k <= ub``).\n conditional : bool, optional\n If true then the expectation is corrected by the conditional\n probability of the summation interval. The return value is the\n expectation of the function, `func`, conditional on being in\n the given interval (k such that ``ul <= k <= ub``).\n Default is False.\n maxcount : int, optional\n Maximal number of terms to evaluate (to avoid an endless loop for\n an infinite sum). Default is 1000.\n tolerance : float, optional\n Absolute tolerance for the summation. Default is 1e-10.\n chunksize : int, optional\n Iterate over the support of a distributions in chunks of this size.\n Default is 32.\n\n Returns\n -------\n expect : float\n Expected value.\n\n Notes\n -----\n For heavy-tailed distributions, the expected value may or may not exist,\n depending on the function, `func`. If it does exist, but the sum converges\n slowly, the accuracy of the result may be rather low. For instance, for\n ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.\n increasing `maxcount` and/or `chunksize` may improve the result, but may\n also make zipf very slow.\n\n The function is not vectorized.\n\n \"\"\"\n if func is None:\n def fun(x):\n # loc and args from outer scope\n return (x+loc)*self._pmf(x, *args)\n else:\n def fun(x):\n # loc and args from outer scope\n return func(x+loc)*self._pmf(x, *args)\n # used pmf because _pmf does not check support in randint and there\n # might be problems(?) with correct self.a, self.b at this stage maybe\n # not anymore, seems to work now with _pmf\n\n self._argcheck(*args) # (re)generate scalar self.a and self.b\n _a, _b = self._get_support(*args)\n if lb is None:\n lb = _a\n else:\n lb = lb - loc # convert bound for standardized distribution\n if ub is None:\n ub = _b\n else:\n ub = ub - loc # convert bound for standardized distribution\n if conditional:\n invfac = self.sf(lb-1, *args) - self.sf(ub, *args)\n else:\n invfac = 1.0\n\n # iterate over the support, starting from the median\n x0 = self.ppf(0.5, *args)\n res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)\n return res / invfac\n\n\ndef _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,\n chunksize=32):\n \"\"\"Helper for computing the expectation value of `fun`.\"\"\"\n\n # short-circuit if the support size is small enough\n if (ub - lb) <= chunksize:\n supp = np.arange(lb, ub+1, inc)\n vals = fun(supp)\n return np.sum(vals)\n\n # otherwise, iterate starting from x0\n if x0 < lb:\n x0 = lb\n if x0 > ub:\n x0 = ub\n\n count, tot = 0, 0.\n # iterate over [x0, ub] inclusive\n for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n return tot\n\n # iterate over [lb, x0)\n for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n break\n\n return tot\n\n\ndef _iter_chunked(x0, x1, chunksize=4, inc=1):\n \"\"\"Iterate from x0 to x1 in chunks of chunksize and steps inc.\n\n x0 must be finite, x1 need not be. In the latter case, the iterator is\n infinite.\n Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards\n (make sure to set inc < 0.)\n\n >>> [x for x in _iter_chunked(2, 5, inc=2)]\n [array([2, 4])]\n >>> [x for x in _iter_chunked(2, 11, inc=2)]\n [array([2, 4, 6, 8]), array([10])]\n >>> [x for x in _iter_chunked(2, -5, inc=-2)]\n [array([ 2, 0, -2, -4])]\n >>> [x for x in _iter_chunked(2, -9, inc=-2)]\n [array([ 2, 0, -2, -4]), array([-6, -8])]\n\n \"\"\"\n if inc == 0:\n raise ValueError('Cannot increment by zero.')\n if chunksize <= 0:\n raise ValueError('Chunk size must be positive; got %s.' % chunksize)\n\n s = 1 if inc > 0 else -1\n stepsize = abs(chunksize * inc)\n\n x = x0\n while (x - x1) * inc < 0:\n delta = min(stepsize, abs(x - x1))\n step = delta * s\n supp = np.arange(x, x + step, inc)\n x += step\n yield supp\n\n\nclass rv_sample(rv_discrete):\n \"\"\"A 'sample' discrete distribution defined by the support and values.\n\n The ctor ignores most of the arguments, only needs the `values` argument.\n \"\"\"\n def __init__(self, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_discrete, self).__init__(seed)\n\n if values is None:\n raise ValueError(\"rv_sample.__init__(..., values=None,...)\")\n\n # cf generic freeze\n self._ctor_param = dict(\n a=a, b=b, name=name, badvalue=badvalue,\n moment_tol=moment_tol, values=values, inc=inc,\n longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n self.badvalue = badvalue\n self.moment_tol = moment_tol\n self.inc = inc\n self.shapes = shapes\n self.vecentropy = self._entropy\n\n xk, pk = values\n\n if np.shape(xk) != np.shape(pk):\n raise ValueError(\"xk and pk must have the same shape.\")\n if np.less(pk, 0.0).any():\n raise ValueError(\"All elements of pk must be non-negative.\")\n if not np.allclose(np.sum(pk), 1):\n raise ValueError(\"The sum of provided pk is not 1.\")\n\n indx = np.argsort(np.ravel(xk))\n self.xk = np.take(np.ravel(xk), indx, 0)\n self.pk = np.take(np.ravel(pk), indx, 0)\n self.a = self.xk[0]\n self.b = self.xk[-1]\n\n self.qvals = np.cumsum(self.pk, axis=0)\n\n self.shapes = ' ' # bypass inspection\n self._construct_argparser(meths_to_inspect=[self._pmf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n\n self._construct_docstrings(name, longname, extradoc)\n\n def _get_support(self, *args):\n \"\"\"Return the support of the (unscaled, unshifted) distribution.\n\n Parameters\n ----------\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n Returns\n -------\n a, b : numeric (float, or int or +/-np.inf)\n end-points of the distribution's support.\n \"\"\"\n return self.a, self.b\n\n def _pmf(self, x):\n return np.select([x == k for k in self.xk],\n [np.broadcast_arrays(p, x)[0] for p in self.pk], 0)\n\n def _cdf(self, x):\n xx, xxk = np.broadcast_arrays(x[:, None], self.xk)\n indx = np.argmax(xxk > xx, axis=-1) - 1\n return self.qvals[indx]\n\n def _ppf(self, q):\n qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)\n indx = argmax(sqq >= qq, axis=-1)\n return self.xk[indx]\n\n def _rvs(self, size=None, random_state=None):\n # Need to define it explicitly, otherwise .rvs() with size=None\n # fails due to explicit broadcasting in _ppf\n U = random_state.uniform(size=size)\n if size is None:\n U = np.array(U, ndmin=1)\n Y = self._ppf(U)[0]\n else:\n Y = self._ppf(U)\n return Y\n\n def _entropy(self):\n return entropy(self.pk)\n\n def generic_moment(self, n):\n n = asarray(n)\n return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)\n\n\ndef _check_shape(argshape, size):\n \"\"\"\n This is a utility function used by `_rvs()` in the class geninvgauss_gen.\n It compares the tuple argshape to the tuple size.\n\n Parameters\n ----------\n argshape : tuple of integers\n Shape of the arguments.\n size : tuple of integers or integer\n Size argument of rvs().\n\n Returns\n -------\n The function returns two tuples, scalar_shape and bc.\n\n scalar_shape : tuple\n Shape to which the 1-d array of random variates returned by\n _rvs_scalar() is converted when it is copied into the\n output array of _rvs().\n\n bc : tuple of booleans\n bc is an tuple the same length as size. bc[j] is True if the data\n associated with that index is generated in one call of _rvs_scalar().\n\n \"\"\"\n scalar_shape = []\n bc = []\n for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],\n fillvalue=1):\n if sizedim > argdim or (argdim == sizedim == 1):\n scalar_shape.append(sizedim)\n bc.append(True)\n else:\n bc.append(False)\n return tuple(scalar_shape[::-1]), tuple(bc[::-1])\n\n\ndef get_distribution_names(namespace_pairs, rv_base_class):\n \"\"\"\n Collect names of statistical distributions and their generators.\n\n Parameters\n ----------\n namespace_pairs : sequence\n A snapshot of (name, value) pairs in the namespace of a module.\n rv_base_class : class\n The base class of random variable generator classes in a module.\n\n Returns\n -------\n distn_names : list of strings\n Names of the statistical distributions.\n distn_gen_names : list of strings\n Names of the generators of the statistical distributions.\n Note that these are not simply the names of the statistical\n distributions, with a _gen suffix added.\n\n \"\"\"\n distn_names = []\n distn_gen_names = []\n for name, value in namespace_pairs:\n if name.startswith('_'):\n continue\n if name.endswith('_gen') and issubclass(value, rv_base_class):\n distn_gen_names.append(name)\n if isinstance(value, rv_base_class):\n distn_names.append(name)\n return distn_names, distn_gen_names\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.vectorize",
"numpy.less",
"numpy.any",
"numpy.asarray",
"scipy.special.rel_entr",
"numpy.log",
"scipy.misc.derivative",
"numpy.isfinite",
"numpy.extract",
"numpy.isnan",
"scipy._lib.doccer.docformat",
"scipy._lib._util.getfullargspec_no_self",
"numpy.broadcast_arrays",
"scipy.special.entr",
"scipy.special.xlogy",
"scipy._lib._util.check_random_state",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.arange",
"numpy.power",
"numpy.all",
"numpy.min",
"numpy.max",
"scipy.special.comb",
"numpy.array",
"scipy.integrate.quad",
"numpy.cumsum",
"numpy.isinf",
"numpy.floor",
"numpy.atleast_1d",
"numpy.ravel",
"scipy.optimize.brentq",
"numpy.place",
"numpy.errstate",
"numpy.shape",
"scipy.special.chndtr",
"numpy.sqrt",
"numpy.find_common_type",
"scipy.special.ive"
]
] |
saikrishna-pallerla/efficientdet-pytorch | [
"dc7b790f537d28476a26af6f793acc4757becd0d"
] | [
"effdet/data/transforms.py"
] | [
"\"\"\" COCO transforms (quick and dirty)\n\nHacked together by Ross Wightman\n\"\"\"\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport random\nimport math\n\nIMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)\nIMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)\nIMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)\nIMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)\n\n\nclass ImageToNumpy:\n\n def __call__(self, pil_img, annotations: dict):\n np_img = np.array(pil_img, dtype=np.uint8)\n if np_img.ndim < 3:\n np_img = np.expand_dims(np_img, axis=-1)\n np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW\n return np_img, annotations\n\n\nclass ImageToTensor:\n\n def __init__(self, dtype=torch.float32):\n self.dtype = dtype\n\n def __call__(self, pil_img, annotations: dict):\n np_img = np.array(pil_img, dtype=np.uint8)\n if np_img.ndim < 3:\n np_img = np.expand_dims(np_img, axis=-1)\n np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW\n return torch.from_numpy(np_img).to(dtype=self.dtype), annotations\n\n\ndef _pil_interp(method):\n if method == 'bicubic':\n return Image.BICUBIC\n elif method == 'lanczos':\n return Image.LANCZOS\n elif method == 'hamming':\n return Image.HAMMING\n else:\n # default bilinear, do we want to allow nearest?\n return Image.BILINEAR\n\n\n_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)\n\n\ndef clip_boxes_(boxes, img_size):\n height, width = img_size\n clip_upper = np.array([height, width] * 2, dtype=boxes.dtype)\n np.clip(boxes, 0, clip_upper, out=boxes)\n\n\ndef clip_boxes(boxes, img_size):\n clipped_boxes = boxes.copy()\n clip_boxes_(clipped_boxes, img_size)\n return clipped_boxes\n\n\ndef _size_tuple(size):\n if isinstance(size, int):\n return size, size\n else:\n assert len(size) == 2\n return size\n\n\nclass ResizePad:\n\n def __init__(self, target_size: int, interpolation: str = 'bilinear', fill_color: tuple = (0, 0, 0)):\n self.target_size = _size_tuple(target_size)\n self.interpolation = interpolation\n self.fill_color = fill_color\n\n def __call__(self, img, anno: dict):\n width, height = img.size\n\n img_scale_y = self.target_size[0] / height\n img_scale_x = self.target_size[1] / width\n img_scale = min(img_scale_y, img_scale_x)\n scaled_h = int(height * img_scale)\n scaled_w = int(width * img_scale)\n\n new_img = Image.new(\"RGB\", (self.target_size[1], self.target_size[0]), color=self.fill_color)\n interp_method = _pil_interp(self.interpolation)\n img = img.resize((scaled_w, scaled_h), interp_method)\n new_img.paste(img)\n\n if 'bbox' in anno:\n # FIXME haven't tested this path since not currently using dataset annotations for train/eval\n bbox = anno['bbox']\n bbox[:, :4] *= img_scale\n clip_boxes_(bbox, (scaled_h, scaled_w))\n valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)\n anno['bbox'] = bbox[valid_indices, :]\n anno['cls'] = anno['cls'][valid_indices]\n\n anno['img_scale'] = 1. / img_scale # back to original\n\n return new_img, anno\n\n\nclass RandomResizePad:\n\n def __init__(self, target_size: int, scale: tuple = (0.1, 2.0), interpolation: str = 'random',\n fill_color: tuple = (0, 0, 0)):\n self.target_size = _size_tuple(target_size)\n self.scale = scale\n if interpolation == 'random':\n self.interpolation = _RANDOM_INTERPOLATION\n else:\n self.interpolation = _pil_interp(interpolation)\n self.fill_color = fill_color\n\n def _get_params(self, img):\n # Select a random scale factor.\n scale_factor = random.uniform(*self.scale)\n scaled_target_height = scale_factor * self.target_size[0]\n scaled_target_width = scale_factor * self.target_size[1]\n\n # Recompute the accurate scale_factor using rounded scaled image size.\n width, height = img.size\n img_scale_y = scaled_target_height / height\n img_scale_x = scaled_target_width / width\n img_scale = min(img_scale_y, img_scale_x)\n\n # Select non-zero random offset (x, y) if scaled image is larger than target size\n scaled_h = int(height * img_scale)\n scaled_w = int(width * img_scale)\n offset_y = scaled_h - self.target_size[0]\n offset_x = scaled_w - self.target_size[1]\n offset_y = int(max(0.0, float(offset_y)) * random.uniform(0, 1))\n offset_x = int(max(0.0, float(offset_x)) * random.uniform(0, 1))\n return scaled_h, scaled_w, offset_y, offset_x, img_scale\n\n def __call__(self, img, anno: dict):\n scaled_h, scaled_w, offset_y, offset_x, img_scale = self._get_params(img)\n\n if isinstance(self.interpolation, (tuple, list)):\n interpolation = random.choice(self.interpolation)\n else:\n interpolation = self.interpolation\n img = img.resize((scaled_w, scaled_h), interpolation)\n right, lower = min(scaled_w, offset_x + self.target_size[1]), min(scaled_h, offset_y + self.target_size[0])\n img = img.crop((offset_x, offset_y, right, lower))\n new_img = Image.new(\"RGB\", (self.target_size[1], self.target_size[0]), color=self.fill_color)\n new_img.paste(img)\n\n if 'bbox' in anno:\n # FIXME not fully tested\n bbox = anno['bbox'].copy() # FIXME copy for debugger inspection, back to inplace\n bbox[:, :4] *= img_scale\n box_offset = np.stack([offset_y, offset_x] * 2)\n bbox -= box_offset\n clip_boxes_(bbox, (scaled_h, scaled_w))\n valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)\n anno['bbox'] = bbox[valid_indices, :]\n anno['cls'] = anno['cls'][valid_indices]\n\n anno['img_scale'] = 1. / img_scale # back to original\n\n return new_img, anno\n\n\nclass RandomFlip:\n\n def __init__(self, horizontal=True, vertical=False, prob=0.5):\n self.horizontal = horizontal\n self.vertical = vertical\n self.prob = prob\n\n def _get_params(self):\n do_horizontal = random.random() < self.prob if self.horizontal else False\n do_vertical = random.random() < self.prob if self.vertical else False\n return do_horizontal, do_vertical\n\n def __call__(self, img, annotations: dict):\n do_horizontal, do_vertical = self._get_params()\n width, height = img.size\n\n def _fliph(bbox):\n x_max = width - bbox[:, 1]\n x_min = width - bbox[:, 3]\n bbox[:, 1] = x_min\n bbox[:, 3] = x_max\n\n def _flipv(bbox):\n y_max = height - bbox[:, 0]\n y_min = height - bbox[:, 2]\n bbox[:, 0] = y_min\n bbox[:, 2] = y_max\n\n if do_horizontal and do_vertical:\n img = img.transpose(Image.ROTATE_180)\n if 'bbox' in annotations:\n _fliph(annotations['bbox'])\n _flipv(annotations['bbox'])\n elif do_horizontal:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if 'bbox' in annotations:\n _fliph(annotations['bbox'])\n elif do_vertical:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n if 'bbox' in annotations:\n _flipv(annotations['bbox'])\n\n return img, annotations\n\n\ndef resolve_fill_color(fill_color, img_mean=IMAGENET_DEFAULT_MEAN):\n if isinstance(fill_color, tuple):\n assert len(fill_color) == 3\n fill_color = fill_color\n else:\n try:\n int_color = int(fill_color)\n fill_color = (int_color,) * 3\n except ValueError:\n assert fill_color == 'mean'\n fill_color = tuple([int(round(255 * x)) for x in img_mean])\n return fill_color\n\n\nclass Compose:\n\n def __init__(self, transforms: list):\n self.transforms = transforms\n\n def __call__(self, img, annotations: dict):\n for t in self.transforms:\n img, annotations = t(img, annotations)\n return img, annotations\n\n\ndef transforms_coco_eval(\n img_size=224,\n interpolation='bilinear',\n use_prefetcher=False,\n fill_color='mean',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD):\n\n fill_color = resolve_fill_color(fill_color, mean)\n\n image_tfl = [\n ResizePad(\n target_size=img_size, interpolation=interpolation, fill_color=fill_color),\n ImageToNumpy(),\n ]\n\n assert use_prefetcher, \"Only supporting prefetcher usage right now\"\n\n image_tf = Compose(image_tfl)\n return image_tf\n\n\ndef transforms_coco_train(\n img_size=224,\n interpolation='random',\n use_prefetcher=False,\n fill_color='mean',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD):\n\n fill_color = resolve_fill_color(fill_color, mean)\n\n image_tfl = [\n RandomFlip(horizontal=True, prob=0.5),\n RandomResizePad(\n target_size=img_size, interpolation=interpolation, fill_color=fill_color),\n ImageToNumpy(),\n ]\n\n assert use_prefetcher, \"Only supporting prefetcher usage right now\"\n\n image_tf = Compose(image_tfl)\n return image_tf\n"
] | [
[
"numpy.stack",
"numpy.moveaxis",
"numpy.clip",
"numpy.expand_dims",
"torch.from_numpy",
"numpy.array"
]
] |
jtwhite79/MetPy | [
"8f1880be1ee98c17cd00ae556324386d2a6301ac"
] | [
"metpy/calc/tests/test_basic.py"
] | [
"# Copyright (c) 2008-2015 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom metpy.units import units\nfrom metpy.testing import assert_almost_equal, assert_array_almost_equal\nfrom metpy.calc.basic import * # noqa\n\n\ndef test_wind_comps_basic():\n 'Test the basic wind component calculation.'\n speed = np.array([4, 4, 4, 4, 25, 25, 25, 25, 10.]) * units.mph\n dirs = np.array([0, 45, 90, 135, 180, 225, 270, 315, 360]) * units.deg\n s2 = np.sqrt(2.)\n\n u, v = get_wind_components(speed, dirs)\n\n true_u = np.array([0, -4 / s2, -4, -4 / s2, 0, 25 / s2, 25, 25 / s2, 0]) * units.mph\n true_v = np.array([-4, -4 / s2, 0, 4 / s2, 25, 25 / s2, 0, -25 / s2, -10]) * units.mph\n\n assert_array_almost_equal(true_u, u, 4)\n assert_array_almost_equal(true_v, v, 4)\n\n\ndef test_wind_comps_scalar():\n 'Test scalar wind components'\n u, v = get_wind_components(8 * units('m/s'), 150 * units.deg)\n assert_almost_equal(u, -4 * units('m/s'), 3)\n assert_almost_equal(v, 6.9282 * units('m/s'), 3)\n\n\ndef test_speed():\n 'Basic test of wind speed calculation'\n u = np.array([4., 2., 0., 0.]) * units('m/s')\n v = np.array([0., 2., 4., 0.]) * units('m/s')\n\n speed = get_wind_speed(u, v)\n\n s2 = np.sqrt(2.)\n true_speed = np.array([4., 2 * s2, 4., 0.]) * units('m/s')\n\n assert_array_almost_equal(true_speed, speed, 4)\n\n\ndef test_dir():\n 'Basic test of wind direction calculation'\n u = np.array([4., 2., 0., 0.]) * units('m/s')\n v = np.array([0., 2., 4., 0.]) * units('m/s')\n\n direc = get_wind_dir(u, v)\n\n true_dir = np.array([270., 225., 180., 270.]) * units.deg\n\n assert_array_almost_equal(true_dir, direc, 4)\n\n\ndef test_speed_dir_roundtrip():\n 'Convert from wind speed and direction to u,v and back'\n # Test each quadrant of the whole circle\n wspd = np.array([15., 5., 2., 10.]) * units.meters / units.seconds\n wdir = np.array([160., 30., 225., 350.]) * units.degrees\n\n u, v = get_wind_components(wspd, wdir)\n\n wdir_out = get_wind_dir(u, v)\n wspd_out = get_wind_speed(u, v)\n\n assert_array_almost_equal(wspd, wspd_out, 4)\n assert_array_almost_equal(wdir, wdir_out, 4)\n\n\ndef test_scalar_speed():\n 'Test wind speed with scalars'\n s = get_wind_speed(-3. * units('m/s'), -4. * units('m/s'))\n assert_almost_equal(s, 5. * units('m/s'), 3)\n\n\ndef test_scalar_dir():\n 'Test wind direction with scalars'\n d = get_wind_dir(3. * units('m/s'), 4. * units('m/s'))\n assert_almost_equal(d, 216.870 * units.deg, 3)\n\n\ndef test_windchill_scalar():\n 'Test wind chill with scalars'\n wc = windchill(-5 * units.degC, 35 * units('m/s'))\n assert_almost_equal(wc, -18.9357 * units.degC, 0)\n\n\ndef test_windchill_basic():\n 'Test the basic wind chill calculation.'\n temp = np.array([40, -10, -45, 20]) * units.degF\n speed = np.array([5, 55, 25, 15]) * units.mph\n\n wc = windchill(temp, speed)\n values = np.array([36, -46, -84, 6]) * units.degF\n assert_array_almost_equal(wc, values, 0)\n\n\ndef test_windchill_invalid():\n 'Test for values that should be masked.'\n temp = np.array([10, 51, 49, 60, 80, 81]) * units.degF\n speed = np.array([4, 4, 3, 1, 10, 39]) * units.mph\n\n wc = windchill(temp, speed)\n mask = np.array([False, True, True, True, True, True])\n assert_array_equal(wc.mask, mask)\n\n\ndef test_windchill_undefined_flag():\n 'Tests whether masking values can be disabled.'\n temp = units.Quantity(np.ma.array([49, 50, 49, 60, 80, 81]), units.degF)\n speed = units.Quantity(([4, 4, 3, 1, 10, 39]), units.mph)\n\n wc = windchill(temp, speed, mask_undefined=False)\n mask = np.array([False] * 6)\n assert_array_equal(wc.mask, mask)\n\n\ndef test_windchill_face_level():\n 'Tests using the face_level flag'\n temp = np.array([20, 0, -20, -40]) * units.degF\n speed = np.array([15, 30, 45, 60]) * units.mph\n\n wc = windchill(temp, speed, face_level_winds=True)\n values = np.array([3, -30, -64, -98]) * units.degF\n assert_array_almost_equal(wc, values, 0)\n\n\ndef test_heat_index_basic():\n 'Test the basic heat index calculation.'\n temp = np.array([80, 88, 92, 110]) * units.degF\n rh = np.array([40, 100, 70, 40]) * units.percent\n\n hi = heat_index(temp, rh)\n values = np.array([80, 121, 112, 136]) * units.degF\n assert_array_almost_equal(hi, values, 0)\n\n\ndef test_heat_index_scalar():\n 'Test heat index using scalars'\n hi = heat_index(96 * units.degF, 65 * units.percent)\n assert_almost_equal(hi, 121 * units.degF, 0)\n\n\ndef test_heat_index_invalid():\n 'Test for values that should be masked.'\n temp = np.array([80, 88, 92, 79, 30, 81]) * units.degF\n rh = np.array([40, 39, 2, 70, 50, 39]) * units.percent\n\n hi = heat_index(temp, rh)\n mask = np.array([False, True, True, True, True, True])\n assert_array_equal(hi.mask, mask)\n\n\ndef test_heat_index_undefined_flag():\n 'Tests whether masking values can be disabled.'\n temp = units.Quantity(np.ma.array([80, 88, 92, 79, 30, 81]), units.degF)\n rh = np.ma.array([40, 39, 2, 70, 50, 39]) * units.percent\n\n hi = heat_index(temp, rh, mask_undefined=False)\n mask = np.array([False] * 6)\n assert_array_equal(hi.mask, mask)\n\n\ndef test_heat_index_units():\n 'Test units coming out of heat index'\n temp = units.Quantity([35., 20.], units.degC)\n rh = 70 * units.percent\n hi = heat_index(temp, rh)\n assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)\n\n\ndef test_heat_index_ratio():\n 'Test giving humidity as number [0, 1]'\n temp = units.Quantity([35., 20.], units.degC)\n rh = 0.7\n hi = heat_index(temp, rh)\n assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)\n\n# class TestIrrad(object):\n# def test_basic(self):\n# 'Test the basic solar irradiance calculation.'\n# from datetime import date\n\n# d = date(2008, 9, 28)\n# lat = 35.25\n# hours = np.linspace(6,18,10)\n\n# s = solar_irradiance(lat, d, hours)\n# values = np.array([0., 344.1, 682.6, 933.9, 1067.6, 1067.6, 933.9,\n# 682.6, 344.1, 0.])\n# assert_array_almost_equal(s, values, 1)\n\n# def test_scalar(self):\n# from datetime import date\n# d = date(2008, 9, 28)\n# lat = 35.25\n# hour = 9.5\n# s = solar_irradiance(lat, d, hour)\n# assert_almost_equal(s, 852.1, 1)\n\n# def test_invalid(self):\n# 'Test for values that should be masked.'\n# from datetime import date\n# d = date(2008, 9, 28)\n# lat = 35.25\n# hours = np.linspace(0,22,12)\n# s = solar_irradiance(lat, d, hours)\n\n# mask = np.array([ True, True, True, True, False, False, False,\n# False, False, True, True, True])\n# assert_array_equal(s.mask, mask)\n\n\ndef test_pressure_to_heights_basic():\n 'Tests basic pressure to height calculation.'\n pressures = np.array([975.2, 987.5, 956., 943.]) * units.mbar\n heights = pressure_to_height_std(pressures)\n values = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter\n assert_almost_equal(heights, values, 1)\n"
] | [
[
"numpy.sqrt",
"numpy.ma.array",
"numpy.array",
"numpy.testing.assert_array_equal"
]
] |
LBJ-Wade/GALLUMI_public | [
"4529ab32ccfc281e5976f482fe556b672b8f464f"
] | [
"Scripts/Plotting/Posteriors_cosmo_model1/Posteriors_cosmo_model1_alternative_dust.py"
] | [
"import numpy as np\nfrom matplotlib import pyplot as plt\nimport glob\nfrom matplotlib import patches as mpatches\nimport scipy.ndimage\nfrom scipy.interpolate import PchipInterpolator\nplt.style.use(\"../template.mplstyle\")\n\n# purple - green - darkgoldenrod - blue - red\ncolors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']\nlinestyles = [(0, (1,1.05)), (0, (3, 1, 1, 1)), (0, (1,3)), (0, (3,3.65)), (0, (3,2.772)), (0, (3, 1, 1, 1, 1, 1))]\n\n#########################################################################################\n\ndef ctr_level2d(histogram2d, lvl, infinite=False):\n hist = histogram2d.flatten()*1.\n hist.sort()\n cum_hist = np.cumsum(hist[::-1])\n cum_hist /= cum_hist[-1]\n\n alvl = np.searchsorted(cum_hist, lvl)[::-1]\n clist = [0]+[hist[-i] for i in alvl]+[hist.max()]\n if not infinite:\n return clist[1:]\n return clist\n\ndef get_hist2d(datax, datay, num_bins=40, weights=[None]):\n if not any(weights):\n weights = np.ones(len(datax))\n hist, bin_edgesx, bin_edgesy = np.histogram2d(datax, datay, bins=num_bins, weights=weights)\n bin_centresx = 0.5*(bin_edgesx[1:]+bin_edgesx[:-1])\n bin_centresy = 0.5*(bin_edgesy[1:]+bin_edgesy[:-1])\n return hist, bin_edgesx, bin_edgesy, bin_centresx, bin_centresy\n\ndef adjust_lightness(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])\n\ndef plot_hist2d(datax, datay, ax, num_bins=30, weights=[None], color=None, zorder=0):\n if not any(weights):\n weights = np.ones(len(datax))\n if color == None:\n color=\"black\"\n\n hist, bin_edgesx, bin_edgesy, bin_centresx, bin_centresy = get_hist2d(datax, datay, num_bins=num_bins, weights=weights)\n\n interpolation_smoothing = 3.\n gaussian_smoothing = 0.5\n sigma = interpolation_smoothing * gaussian_smoothing\n\n interp_y_centers = scipy.ndimage.zoom(bin_centresy, interpolation_smoothing, mode='reflect')\n interp_x_centers = scipy.ndimage.zoom(bin_centresx,interpolation_smoothing, mode='reflect')\n interp_hist = scipy.ndimage.zoom(hist, interpolation_smoothing, mode='reflect')\n interp_smoothed_hist = scipy.ndimage.filters.gaussian_filter(interp_hist, [sigma,sigma], mode='reflect')\n\n ax.contourf(interp_x_centers, interp_y_centers, np.transpose(interp_smoothed_hist), colors=[adjust_lightness(color,1.4), adjust_lightness(color,0.8)], levels=ctr_level2d(interp_smoothed_hist.copy(), [0.68, 0.95]), zorder=zorder, alpha=0.45)\n ax.contour(interp_x_centers, interp_y_centers, np.transpose(interp_smoothed_hist), colors=[color, adjust_lightness(color,0.8)], linewidths=2., levels=ctr_level2d(interp_smoothed_hist.copy(), [0.68, 0.95]), zorder=zorder)\n\n\n##################################################################################################\n\nUVLF_Overzier = []\nUVLF_Bouwens = []\nUVLF_Casey = []\n\nfor filepath in glob.iglob('../../Data/UVLF_HST_ST_model1/*__*.txt'):\n data = np.loadtxt(filepath)\n UVLF_Overzier.append(data)\nfor filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_Bouwens2016/*__*.txt'):\n data = np.loadtxt(filepath)\n UVLF_Bouwens.append(data)\nfor filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_Casey2014/*__*.txt'):\n data = np.loadtxt(filepath)\n UVLF_Casey.append(data)\n\nUVLF_Overzier = np.vstack(np.array(UVLF_Overzier))\nUVLF_Bouwens = np.vstack(np.array(UVLF_Bouwens))\nUVLF_Casey = np.vstack(np.array(UVLF_Casey))\n\nbetadata = np.loadtxt(\"Beta_parameters.txt\", unpack=True)\nbetainterp = PchipInterpolator(betadata[0], betadata[1])\ndbetadMUVinterp = PchipInterpolator(betadata[0], betadata[2])\n\ndef betaAverage(z, MUV):\n if MUV < -19.5:\n return dbetadMUVinterp(z) * (MUV + 19.5) + betainterp(z)\n return (betainterp(z) + 2.33) * np.exp((dbetadMUVinterp(z) * (MUV + 19.5)) / (betainterp(z) + 2.33)) - 2.33\n\[email protected]\ndef AUV(z, MUV, index):\n if z < 2.5 or z > 8:\n return 0.\n sigmabeta = 0.34\n if index==0:\n return max(0., 4.54 + 0.2 * np.log(10) * (2.07**2) * (sigmabeta**2) + 2.07 * betaAverage(z, MUV)) # Overzier 2011\n if index==1:\n return max(0., 3.36 + 0.2 * np.log(10) * (2.04**2) * (sigmabeta**2) + 2.04 * betaAverage(z, MUV)) # Casey 2014\n if index==2:\n return max(0., 2.45 + 0.2 * np.log(10) * (1.1**2) * (sigmabeta**2) + 1.1 * betaAverage(z, MUV)) # Bouwens 2016\n\n\nplt.figure(figsize=(24.,6.))\nax1 = plt.subplot(131)\nax2 = plt.subplot(132)\nax3 = plt.subplot(133)\nax1.tick_params(axis='x', which='major', pad=6)\nax2.tick_params(axis='x', which='major', pad=6)\nax3.tick_params(axis='x', which='major', pad=6)\nax1.tick_params(axis='both', which='major', labelsize=26)\nax1.tick_params(axis='both', which='minor', labelsize=26)\nax2.tick_params(axis='both', which='major', labelsize=26)\nax2.tick_params(axis='both', which='minor', labelsize=26)\nax3.tick_params(axis='both', which='major', labelsize=26)\nax3.tick_params(axis='both', which='minor', labelsize=26)\n\nfor axis in ['top','bottom','left','right']:\n ax1.spines[axis].set_linewidth(2.2)\n ax2.spines[axis].set_linewidth(2.2)\n ax3.spines[axis].set_linewidth(2.2)\n\n###############\n\nax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 0), color=colors[3], lw=2.5)\nax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 1), linestyle=linestyles[2], color=colors[1], lw=3.)\nax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 2), linestyle=linestyles[3], color=colors[-1], lw=2.5)\nax1.set_xlabel(r'$M_\\mathrm{UV}$', labelpad=10, fontsize=30)\nax1.set_ylabel(r'$A_\\mathrm{UV}$', labelpad=12, fontsize=30)\nax1.set_xlim(-23, -16)\nax1.set_ylim(0., 1.3)\n\npatch_blue = mpatches.Patch(color=colors[3], lw=1.5, label=r\"$\\mathrm{Overzier\\ 2011}$\")\npatch_green = mpatches.Patch(color=colors[1], lw=1.5, label=r\"$\\mathrm{Casey\\ 2014}$\")\npatch_yellow = mpatches.Patch(color=colors[-1], lw=1.5, label=r\"$\\mathrm{Bouwens\\ 2016}$\")\nleg = ax1.legend(handles=[patch_blue, patch_green,patch_yellow], loc=\"upper right\", frameon=False, markerfirst=False, prop={'size': 21}, handlelength=1.9, handletextpad=0.5)\n\n###############\n\nplot_hist2d(datax=UVLF_Overzier[:,-7], datay=UVLF_Overzier[:,2], ax=ax2, num_bins=20, weights=UVLF_Overzier[:,0], color=colors[3], zorder=3)\nplot_hist2d(datax=UVLF_Bouwens[:,-7], datay=UVLF_Bouwens[:,2], ax=ax2, num_bins=20, weights=UVLF_Bouwens[:,0], color=colors[-1], zorder=2)\nplot_hist2d(datax=UVLF_Casey[:,-7], datay=UVLF_Casey[:,2], ax=ax2, num_bins=20, weights=UVLF_Casey[:,0], color=colors[1], zorder=1)\nax2.set_xlabel(r'$\\Omega_\\mathrm{m}$', labelpad=10, fontsize=30)\nax2.set_ylabel(r'$\\sigma_8$', labelpad=8, fontsize=30)\nax2.set_xlim(0.2, 0.4)\nax2.set_ylim(0.3, 1.3)\n\n###############\n\nplot_hist2d(datax=UVLF_Overzier[:,5], datay=UVLF_Overzier[:,2], ax=ax3, num_bins=20, weights=UVLF_Overzier[:,0], color=colors[3], zorder=3)\nplot_hist2d(datax=UVLF_Bouwens[:,5], datay=UVLF_Bouwens[:,2], ax=ax3, num_bins=20, weights=UVLF_Bouwens[:,0], color=colors[-1], zorder=2)\nplot_hist2d(datax=UVLF_Casey[:,5], datay=UVLF_Casey[:,2], ax=ax3, num_bins=20, weights=UVLF_Casey[:,0], color=colors[1], zorder=1)\nax3.set_ylabel(r'$\\sigma_8$', labelpad=8, fontsize=30)\nax3.set_xlabel(r'$n_\\mathrm{s}$', labelpad=10, fontsize=30)\nax3.set_xlim(0.7, 1.3)\nax3.set_ylim(0.3, 1.3)\n\nplt.savefig(\"Posteriors_cosmo_model1_alternative_dust.pdf\")\n"
] | [
[
"matplotlib.pyplot.style.use",
"numpy.cumsum",
"numpy.transpose",
"numpy.searchsorted",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"numpy.histogram2d",
"numpy.log",
"numpy.array",
"matplotlib.colors.to_rgb",
"scipy.interpolate.PchipInterpolator",
"numpy.linspace",
"matplotlib.patches.Patch",
"numpy.loadtxt"
]
] |
Yugeeth/chat-bot | [
"3198fb160f743c7be1f377d2febb889423da8c06"
] | [
"train.py"
] | [
"import numpy as np\r\nimport random\r\nimport json\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\nfrom nltk_utils import bag_of_words, tokenize, stem\r\nfrom model import NeuralNet\r\n\r\nwith open('intents.json', 'r') as f:\r\n intents = json.load(f)\r\n\r\nall_words = []\r\ntags = []\r\nxy = []\r\n# loop through each sentence in our intents patterns\r\nfor intent in intents['intents']:\r\n tag = intent['tag']\r\n # add to tag list\r\n tags.append(tag)\r\n for pattern in intent['patterns']:\r\n # tokenize each word in the sentence\r\n w = tokenize(pattern)\r\n # add to our words list\r\n all_words.extend(w)\r\n # add to xy pair\r\n xy.append((w, tag))\r\n\r\n# stem and lower each word\r\nignore_words = ['?', '.', '!']\r\nall_words = [stem(w) for w in all_words if w not in ignore_words]\r\n# remove duplicates and sort\r\nall_words = sorted(set(all_words))\r\ntags = sorted(set(tags))\r\n\r\nprint(len(xy), \"patterns\")\r\nprint(len(tags), \"tags:\", tags)\r\nprint(len(all_words), \"unique stemmed words:\", all_words)\r\n\r\n# create training data\r\nX_train = []\r\ny_train = []\r\nfor (pattern_sentence, tag) in xy:\r\n # X: bag of words for each pattern_sentence\r\n bag = bag_of_words(pattern_sentence, all_words)\r\n X_train.append(bag)\r\n # y: PyTorch CrossEntropyLoss needs only class labels, not one-hot\r\n label = tags.index(tag)\r\n y_train.append(label)\r\n\r\nX_train = np.array(X_train)\r\ny_train = np.array(y_train)\r\n\r\n# Hyper-parameters \r\nnum_epochs = 1000\r\nbatch_size = 8\r\nlearning_rate = 0.001\r\ninput_size = len(X_train[0])\r\nhidden_size = 8\r\noutput_size = len(tags)\r\nprint(input_size, output_size)\r\n\r\nclass ChatDataset(Dataset):\r\n\r\n def __init__(self):\r\n self.n_samples = len(X_train)\r\n self.x_data = X_train\r\n self.y_data = y_train\r\n\r\n # support indexing such that dataset[i] can be used to get i-th sample\r\n def __getitem__(self, index):\r\n return self.x_data[index], self.y_data[index]\r\n\r\n # we can call len(dataset) to return the size\r\n def __len__(self):\r\n return self.n_samples\r\n\r\ndataset = ChatDataset()\r\ntrain_loader = DataLoader(dataset=dataset,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n num_workers=0)\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\nmodel = NeuralNet(input_size, hidden_size, output_size).to(device)\r\n\r\n# Loss and optimizer\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n\r\n# Train the model\r\nfor epoch in range(num_epochs):\r\n for (words, labels) in train_loader:\r\n words = words.to(device)\r\n labels = labels.to(dtype=torch.long).to(device)\r\n \r\n # Forward pass\r\n outputs = model(words)\r\n # if y would be one-hot, we must apply\r\n # labels = torch.max(labels, 1)[1]\r\n loss = criterion(outputs, labels)\r\n \r\n # Backward and optimize\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n if (epoch+1) % 100 == 0:\r\n print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')\r\n\r\n\r\nprint(f'final loss: {loss.item():.4f}')\r\n\r\ndata = {\r\n\"model_state\": model.state_dict(),\r\n\"input_size\": input_size,\r\n\"hidden_size\": hidden_size,\r\n\"output_size\": output_size,\r\n\"all_words\": all_words,\r\n\"tags\": tags\r\n}\r\n\r\nFILE = \"data.pth\"\r\ntorch.save(data, FILE)\r\n\r\nprint(f'training complete. file saved to {FILE}')"
] | [
[
"torch.utils.data.DataLoader",
"torch.save",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"numpy.array"
]
] |
TurkuNLP/paraphrase-classification | [
"625f0cf5223ecff9d25c2a4f558ca39fa5ecc794"
] | [
"para_averaging.py"
] | [
"import torch.nn.functional as F\nimport torch\nimport para_model\n\nclass ParaAvgModel(para_model.PARAModel):\n\n def __init__(self, **args):\n super().__init__(**args)\n # self.drop_layer=torch.nn.Dropout(p=0.2)\n self.cls_layer=torch.nn.Linear(self.bert.config.hidden_size*5, args['num_classes'])\n\n def forward(self, batch):\n input_ids = batch['input_ids']\n token_type_ids = batch['token_type_ids']\n attention_mask = batch['attention_mask']\n cls_mask = batch['cls_mask']\n sep1_mask = batch['sep1_mask']\n sep2_mask = batch['sep2_mask']\n left_mask = batch['left_mask']\n right_mask = batch['right_mask']\n enc = self.bert(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)[0] #BxS_LENxSIZE; BxSIZE\n cls = (enc*cls_mask.unsqueeze(-1)).sum(1) # enc.pooler_output\n sep1 = (enc*sep1_mask.unsqueeze(-1)).sum(1)\n sep2 = (enc*sep2_mask.unsqueeze(-1)).sum(1)\n left = (enc*left_mask.unsqueeze(-1)).sum(1) / left_mask.sum(-1).unsqueeze(-1)\n right = (enc*right_mask.unsqueeze(-1)).sum(1) / right_mask.sum(-1).unsqueeze(-1)\n catenated = torch.cat((cls, sep1, sep2, left, right), -1)\n # dropped = self.drop_layer(catenated)\n\n return self.cls_layer(catenated)"
] | [
[
"torch.nn.Linear",
"torch.cat"
]
] |
joewalter/mne-python | [
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc",
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc"
] | [
"mne/viz/circle.py",
"mne/io/meas_info.py"
] | [
"\"\"\"Functions to plot on circle as for connectivity\n\"\"\"\nfrom __future__ import print_function\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: Simplified BSD\n\n\nfrom itertools import cycle\nfrom functools import partial\n\nimport numpy as np\n\nfrom .utils import plt_show\nfrom ..externals.six import string_types\n\n\ndef circular_layout(node_names, node_order, start_pos=90, start_between=True,\n group_boundaries=None, group_sep=10):\n \"\"\"Create layout arranging nodes on a circle.\n\n Parameters\n ----------\n node_names : list of str\n Node names.\n node_order : list of str\n List with node names defining the order in which the nodes are\n arranged. Must have the elements as node_names but the order can be\n different. The nodes are arranged clockwise starting at \"start_pos\"\n degrees.\n start_pos : float\n Angle in degrees that defines where the first node is plotted.\n start_between : bool\n If True, the layout starts with the position between the nodes. This is\n the same as adding \"180. / len(node_names)\" to start_pos.\n group_boundaries : None | array-like\n List of of boundaries between groups at which point a \"group_sep\" will\n be inserted. E.g. \"[0, len(node_names) / 2]\" will create two groups.\n group_sep : float\n Group separation angle in degrees. See \"group_boundaries\".\n\n Returns\n -------\n node_angles : array, shape=(len(node_names,))\n Node angles in degrees.\n \"\"\"\n n_nodes = len(node_names)\n\n if len(node_order) != n_nodes:\n raise ValueError('node_order has to be the same length as node_names')\n\n if group_boundaries is not None:\n boundaries = np.array(group_boundaries, dtype=np.int)\n if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):\n raise ValueError('\"group_boundaries\" has to be between 0 and '\n 'n_nodes - 1.')\n if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):\n raise ValueError('\"group_boundaries\" must have non-decreasing '\n 'values.')\n n_group_sep = len(group_boundaries)\n else:\n n_group_sep = 0\n boundaries = None\n\n # convert it to a list with indices\n node_order = [node_order.index(name) for name in node_names]\n node_order = np.array(node_order)\n if len(np.unique(node_order)) != n_nodes:\n raise ValueError('node_order has repeated entries')\n\n node_sep = (360. - n_group_sep * group_sep) / n_nodes\n\n if start_between:\n start_pos += node_sep / 2\n\n if boundaries is not None and boundaries[0] == 0:\n # special case when a group separator is at the start\n start_pos += group_sep / 2\n boundaries = boundaries[1:] if n_group_sep > 1 else None\n\n node_angles = np.ones(n_nodes, dtype=np.float) * node_sep\n node_angles[0] = start_pos\n if boundaries is not None:\n node_angles[boundaries] += group_sep\n\n node_angles = np.cumsum(node_angles)[node_order]\n\n return node_angles\n\n\ndef _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,\n n_nodes=0, node_angles=None,\n ylim=[9, 10]):\n \"\"\"Isolates connections around a single node when user left clicks a node.\n\n On right click, resets all connections.\"\"\"\n if event.inaxes != axes:\n return\n\n if event.button == 1: # left click\n # click must be near node radius\n if not ylim[0] <= event.ydata <= ylim[1]:\n return\n\n # all angles in range [0, 2*pi]\n node_angles = node_angles % (np.pi * 2)\n node = np.argmin(np.abs(event.xdata - node_angles))\n\n patches = event.inaxes.patches\n for ii, (x, y) in enumerate(zip(indices[0], indices[1])):\n patches[ii].set_visible(node in [x, y])\n fig.canvas.draw()\n elif event.button == 3: # right click\n patches = event.inaxes.patches\n for ii in range(np.size(indices, axis=1)):\n patches[ii].set_visible(True)\n fig.canvas.draw()\n\n\ndef plot_connectivity_circle(con, node_names, indices=None, n_lines=None,\n node_angles=None, node_width=None,\n node_colors=None, facecolor='black',\n textcolor='white', node_edgecolor='black',\n linewidth=1.5, colormap='hot', vmin=None,\n vmax=None, colorbar=True, title=None,\n colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),\n fontsize_title=12, fontsize_names=8,\n fontsize_colorbar=8, padding=6.,\n fig=None, subplot=111, interactive=True,\n node_linewidth=2., show=True):\n \"\"\"Visualize connectivity as a circular graph.\n\n Note: This code is based on the circle graph example by Nicolas P. Rougier\n http://www.labri.fr/perso/nrougier/coding/.\n\n Parameters\n ----------\n con : array\n Connectivity scores. Can be a square matrix, or a 1D array. If a 1D\n array is provided, \"indices\" has to be used to define the connection\n indices.\n node_names : list of str\n Node names. The order corresponds to the order in con.\n indices : tuple of arrays | None\n Two arrays with indices of connections for which the connections\n strenghts are defined in con. Only needed if con is a 1D array.\n n_lines : int | None\n If not None, only the n_lines strongest connections (strength=abs(con))\n are drawn.\n node_angles : array, shape=(len(node_names,)) | None\n Array with node positions in degrees. If None, the nodes are equally\n spaced on the circle. See mne.viz.circular_layout.\n node_width : float | None\n Width of each node in degrees. If None, the minimum angle between any\n two nodes is used as the width.\n node_colors : list of tuples | list of str\n List with the color to use for each node. If fewer colors than nodes\n are provided, the colors will be repeated. Any color supported by\n matplotlib can be used, e.g., RGBA tuples, named colors.\n facecolor : str\n Color to use for background. See matplotlib.colors.\n textcolor : str\n Color to use for text. See matplotlib.colors.\n node_edgecolor : str\n Color to use for lines around nodes. See matplotlib.colors.\n linewidth : float\n Line width to use for connections.\n colormap : str\n Colormap to use for coloring the connections.\n vmin : float | None\n Minimum value for colormap. If None, it is determined automatically.\n vmax : float | None\n Maximum value for colormap. If None, it is determined automatically.\n colorbar : bool\n Display a colorbar or not.\n title : str\n The figure title.\n colorbar_size : float\n Size of the colorbar.\n colorbar_pos : 2-tuple\n Position of the colorbar.\n fontsize_title : int\n Font size to use for title.\n fontsize_names : int\n Font size to use for node names.\n fontsize_colorbar : int\n Font size to use for colorbar.\n padding : float\n Space to add around figure to accommodate long labels.\n fig : None | instance of matplotlib.pyplot.Figure\n The figure to use. If None, a new figure with the specified background\n color will be created.\n subplot : int | 3-tuple\n Location of the subplot when creating figures with multiple plots. E.g.\n 121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See\n matplotlib.pyplot.subplot.\n interactive : bool\n When enabled, left-click on a node to show only connections to that\n node. Right-click shows all connections.\n node_linewidth : float\n Line with for nodes.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of matplotlib.pyplot.Figure\n The figure handle.\n axes : instance of matplotlib.axes.PolarAxesSubplot\n The subplot handle.\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.path as m_path\n import matplotlib.patches as m_patches\n\n n_nodes = len(node_names)\n\n if node_angles is not None:\n if len(node_angles) != n_nodes:\n raise ValueError('node_angles has to be the same length '\n 'as node_names')\n # convert it to radians\n node_angles = node_angles * np.pi / 180\n else:\n # uniform layout on unit circle\n node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)\n\n if node_width is None:\n # widths correspond to the minimum angle between two nodes\n dist_mat = node_angles[None, :] - node_angles[:, None]\n dist_mat[np.diag_indices(n_nodes)] = 1e9\n node_width = np.min(np.abs(dist_mat))\n else:\n node_width = node_width * np.pi / 180\n\n if node_colors is not None:\n if len(node_colors) < n_nodes:\n node_colors = cycle(node_colors)\n else:\n # assign colors using colormap\n node_colors = [plt.cm.spectral(i / float(n_nodes))\n for i in range(n_nodes)]\n\n # handle 1D and 2D connectivity information\n if con.ndim == 1:\n if indices is None:\n raise ValueError('indices has to be provided if con.ndim == 1')\n elif con.ndim == 2:\n if con.shape[0] != n_nodes or con.shape[1] != n_nodes:\n raise ValueError('con has to be 1D or a square matrix')\n # we use the lower-triangular part\n indices = np.tril_indices(n_nodes, -1)\n con = con[indices]\n else:\n raise ValueError('con has to be 1D or a square matrix')\n\n # get the colormap\n if isinstance(colormap, string_types):\n colormap = plt.get_cmap(colormap)\n\n # Make figure background the same colors as axes\n if fig is None:\n fig = plt.figure(figsize=(8, 8), facecolor=facecolor)\n\n # Use a polar axes\n if not isinstance(subplot, tuple):\n subplot = (subplot,)\n axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)\n\n # No ticks, we'll put our own\n plt.xticks([])\n plt.yticks([])\n\n # Set y axes limit, add additional space if requested\n plt.ylim(0, 10 + padding)\n\n # Remove the black axes border which may obscure the labels\n axes.spines['polar'].set_visible(False)\n\n # Draw lines between connected nodes, only draw the strongest connections\n if n_lines is not None and len(con) > n_lines:\n con_thresh = np.sort(np.abs(con).ravel())[-n_lines]\n else:\n con_thresh = 0.\n\n # get the connections which we are drawing and sort by connection strength\n # this will allow us to draw the strongest connections first\n con_abs = np.abs(con)\n con_draw_idx = np.where(con_abs >= con_thresh)[0]\n\n con = con[con_draw_idx]\n con_abs = con_abs[con_draw_idx]\n indices = [ind[con_draw_idx] for ind in indices]\n\n # now sort them\n sort_idx = np.argsort(con_abs)\n con_abs = con_abs[sort_idx]\n con = con[sort_idx]\n indices = [ind[sort_idx] for ind in indices]\n\n # Get vmin vmax for color scaling\n if vmin is None:\n vmin = np.min(con[np.abs(con) >= con_thresh])\n if vmax is None:\n vmax = np.max(con)\n vrange = vmax - vmin\n\n # We want to add some \"noise\" to the start and end position of the\n # edges: We modulate the noise with the number of connections of the\n # node and the connection strength, such that the strongest connections\n # are closer to the node center\n nodes_n_con = np.zeros((n_nodes), dtype=np.int)\n for i, j in zip(indices[0], indices[1]):\n nodes_n_con[i] += 1\n nodes_n_con[j] += 1\n\n # initialize random number generator so plot is reproducible\n rng = np.random.mtrand.RandomState(seed=0)\n\n n_con = len(indices[0])\n noise_max = 0.25 * node_width\n start_noise = rng.uniform(-noise_max, noise_max, n_con)\n end_noise = rng.uniform(-noise_max, noise_max, n_con)\n\n nodes_n_con_seen = np.zeros_like(nodes_n_con)\n for i, (start, end) in enumerate(zip(indices[0], indices[1])):\n nodes_n_con_seen[start] += 1\n nodes_n_con_seen[end] += 1\n\n start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /\n float(nodes_n_con[start]))\n end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /\n float(nodes_n_con[end]))\n\n # scale connectivity for colormap (vmin<=>0, vmax<=>1)\n con_val_scaled = (con - vmin) / vrange\n\n # Finally, we draw the connections\n for pos, (i, j) in enumerate(zip(indices[0], indices[1])):\n # Start point\n t0, r0 = node_angles[i], 10\n\n # End point\n t1, r1 = node_angles[j], 10\n\n # Some noise in start and end point\n t0 += start_noise[pos]\n t1 += end_noise[pos]\n\n verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]\n codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,\n m_path.Path.LINETO]\n path = m_path.Path(verts, codes)\n\n color = colormap(con_val_scaled[pos])\n\n # Actual line\n patch = m_patches.PathPatch(path, fill=False, edgecolor=color,\n linewidth=linewidth, alpha=1.)\n axes.add_patch(patch)\n\n # Draw ring with colored nodes\n height = np.ones(n_nodes) * 1.0\n bars = axes.bar(node_angles, height, width=node_width, bottom=9,\n edgecolor=node_edgecolor, lw=node_linewidth,\n facecolor='.9', align='center')\n\n for bar, color in zip(bars, node_colors):\n bar.set_facecolor(color)\n\n # Draw node labels\n angles_deg = 180 * node_angles / np.pi\n for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):\n if angle_deg >= 270:\n ha = 'left'\n else:\n # Flip the label, so text is always upright\n angle_deg += 180\n ha = 'right'\n\n axes.text(angle_rad, 10.4, name, size=fontsize_names,\n rotation=angle_deg, rotation_mode='anchor',\n horizontalalignment=ha, verticalalignment='center',\n color=textcolor)\n\n if title is not None:\n plt.title(title, color=textcolor, fontsize=fontsize_title,\n axes=axes)\n\n if colorbar:\n sm = plt.cm.ScalarMappable(cmap=colormap,\n norm=plt.Normalize(vmin, vmax))\n sm.set_array(np.linspace(vmin, vmax))\n cb = plt.colorbar(sm, ax=axes, use_gridspec=False,\n shrink=colorbar_size,\n anchor=colorbar_pos)\n cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')\n cb.ax.tick_params(labelsize=fontsize_colorbar)\n plt.setp(cb_yticks, color=textcolor)\n\n # Add callback for interaction\n if interactive:\n callback = partial(_plot_connectivity_circle_onpick, fig=fig,\n axes=axes, indices=indices, n_nodes=n_nodes,\n node_angles=node_angles)\n\n fig.canvas.mpl_connect('button_press_event', callback)\n\n plt_show(show)\n return fig, axes\n",
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Teon Brooks <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom collections import Counter\nfrom copy import deepcopy\nfrom datetime import datetime as dt\nimport os.path as op\nimport re\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .pick import channel_type\nfrom .constants import FIFF\nfrom .open import fiff_open\nfrom .tree import dir_tree_find\nfrom .tag import read_tag, find_tag\nfrom .proj import _read_proj, _write_proj, _uniquify_projs, _normalize_proj\nfrom .ctf_comp import read_ctf_comp, write_ctf_comp\nfrom .write import (start_file, end_file, start_block, end_block,\n write_string, write_dig_point, write_float, write_int,\n write_coord_trans, write_ch_info, write_name_list,\n write_julian, write_float_matrix)\nfrom .proc_history import _read_proc_history, _write_proc_history\nfrom ..utils import logger, verbose, warn\nfrom .. import __version__\nfrom ..externals.six import b, BytesIO, string_types, text_type\n\n\n_kind_dict = dict(\n eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),\n mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T),\n grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M),\n ref_meg=(FIFF.FIFFV_REF_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3,\n FIFF.FIFF_UNIT_T),\n misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE),\n stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),\n bio=(FIFF.FIFFV_BIO_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),\n hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL),\n hbr=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFF_UNIT_MOL)\n)\n\n\ndef _summarize_str(st):\n \"\"\"Aux function\"\"\"\n return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...'\n\n\nclass Info(dict):\n \"\"\"Information about the recording.\n\n This data structure behaves like a dictionary. It contains all meta-data\n that is available for a recording.\n\n The attributes listed below are the possible dictionary entries:\n\n Attributes\n ----------\n bads : list of str\n List of bad (noisy/broken) channels, by name. These channels will by\n default be ignored by many processing steps.\n ch_names : list-like of str (read-only)\n The names of the channels.\n This object behaves like a read-only Python list. Behind the scenes\n it iterates over the channels dictionaries in `info['chs']`:\n `info['ch_names'][x] == info['chs'][x]['ch_name']`\n chs : list of dict\n A list of channel information structures.\n See: :ref:`faq` for details.\n comps : list of dict\n CTF software gradient compensation data.\n See: :ref:`faq` for details.\n custom_ref_applied : bool\n Whether a custom (=other than average) reference has been applied to\n the EEG data. This flag is checked by some algorithms that require an\n average reference to be set.\n events : list of dict\n Event list, usually extracted from the stim channels.\n See: :ref:`faq` for details.\n hpi_results : list of dict\n Head position indicator (HPI) digitization points and fit information\n (e.g., the resulting transform). See: :ref:`faq` for details.\n meas_date : list of int\n The first element of this list is a POSIX timestamp (milliseconds since\n 1970-01-01 00:00:00) denoting the date and time at which the\n measurement was taken. The second element is the number of\n microseconds.\n nchan : int\n Number of channels.\n projs : list of dict\n List of SSP operators that operate on the data.\n See: :ref:`faq` for details.\n sfreq : float\n Sampling frequency in Hertz.\n See: :ref:`faq` for details.\n acq_pars : str | None\n MEG system acquition parameters.\n acq_stim : str | None\n MEG system stimulus parameters.\n buffer_size_sec : float | None\n Buffer size (in seconds) when reading the raw data in chunks.\n ctf_head_t : dict | None\n The transformation from 4D/CTF head coordinates to Neuromag head\n coordinates. This is only present in 4D/CTF data.\n See: :ref:`faq` for details.\n description : str | None\n String description of the recording.\n dev_ctf_t : dict | None\n The transformation from device coordinates to 4D/CTF head coordinates.\n This is only present in 4D/CTF data.\n See: :ref:`faq` for details.\n dev_head_t : dict | None\n The device to head transformation.\n See: :ref:`faq` for details.\n dig : list of dict | None\n The Polhemus digitization data in head coordinates.\n See: :ref:`faq` for details.\n experimentor : str | None\n Name of the person that ran the experiment.\n file_id : dict | None\n The fif ID datastructure of the measurement file.\n See: :ref:`faq` for details.\n filename : str | None\n The name of the file that provided the raw data.\n highpass : float | None\n Highpass corner frequency in Hertz. Zero indicates a DC recording.\n hpi_meas : list of dict | None\n HPI measurements that were taken at the start of the recording\n (e.g. coil frequencies).\n hpi_subsystem : dict | None\n Information about the HPI subsystem that was used (e.g., event\n channel used for cHPI measurements).\n line_freq : float | None\n Frequency of the power line in Hertz.\n lowpass : float | None\n Lowpass corner frequency in Hertz.\n meas_id : dict | None\n The ID assigned to this measurement by the acquisition system or during\n file conversion.\n See: :ref:`faq` for details.\n proj_id : int | None\n ID number of the project the experiment belongs to.\n proj_name : str | None\n Name of the project the experiment belongs to.\n subject_info : dict | None\n Information about the subject.\n proc_history : list of dict | None | not present in dict\n The SSS info, the CTC correction and the calibaraions from the SSS\n processing logs inside of a raw file.\n See: :ref:`faq` for details.\n \"\"\"\n\n def copy(self):\n \"\"\"Copy the instance\n\n Returns\n -------\n info : instance of Info\n The copied info.\n \"\"\"\n return Info(deepcopy(self))\n\n def normalize_proj(self):\n \"\"\"(Re-)Normalize projection vectors after subselection\n\n Applying projection after sub-selecting a set of channels that\n were originally used to compute the original projection vectors\n can be dangerous (e.g., if few channels remain, most power was\n in channels that are no longer picked, etc.). By default, mne\n will emit a warning when this is done.\n\n This function will re-normalize projectors to use only the\n remaining channels, thus avoiding that warning. Only use this\n function if you're confident that the projection vectors still\n adequately capture the original signal of interest.\n \"\"\"\n _normalize_proj(self)\n\n def __repr__(self):\n \"\"\"Summarize info instead of printing all\"\"\"\n strs = ['<Info | %s non-empty fields']\n non_empty = 0\n for k, v in self.items():\n if k in ['bads', 'ch_names']:\n entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)\n if v else '0 items')\n if len(entr) >= 56:\n # get rid of of half printed ch names\n entr = _summarize_str(entr)\n elif k == 'filename' and v:\n path, fname = op.split(v)\n entr = path[:10] + '.../' + fname\n elif k == 'projs' and v:\n entr = ', '.join(p['desc'] + ': o%s' %\n {0: 'ff', 1: 'n'}[p['active']] for p in v)\n if len(entr) >= 56:\n entr = _summarize_str(entr)\n elif k == 'meas_date' and np.iterable(v):\n # first entry in meas_date is meaningful\n entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')\n elif k == 'kit_system_id' and v is not None:\n from .kit.constants import SYSNAMES as KIT_SYSNAMES\n entr = '%i (%s)' % (v, KIT_SYSNAMES.get(v, 'unknown'))\n else:\n this_len = (len(v) if hasattr(v, '__len__') else\n ('%s' % v if v is not None else None))\n entr = (('%d items' % this_len) if isinstance(this_len, int)\n else ('%s' % this_len if this_len else ''))\n if entr:\n non_empty += 1\n entr = ' | ' + entr\n if k == 'chs':\n ch_types = [channel_type(self, idx) for idx in range(len(v))]\n ch_counts = Counter(ch_types)\n entr += \" (%s)\" % ', '.join(\"%s: %d\" % (ch_type.upper(), count)\n for ch_type, count\n in ch_counts.items())\n strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))\n if k in ['sfreq', 'lowpass', 'highpass']:\n strs[-1] += ' Hz'\n strs_non_empty = sorted(s for s in strs if '|' in s)\n strs_empty = sorted(s for s in strs if '|' not in s)\n st = '\\n '.join(strs_non_empty + strs_empty)\n st += '\\n>'\n st %= non_empty\n return st\n\n def _check_consistency(self):\n \"\"\"Do some self-consistency checks and datatype tweaks\"\"\"\n missing = [bad for bad in self['bads'] if bad not in self['ch_names']]\n if len(missing) > 0:\n raise RuntimeError('bad channel(s) %s marked do not exist in info'\n % (missing,))\n\n chs = [ch['ch_name'] for ch in self['chs']]\n if len(self['ch_names']) != len(chs) or any(\n ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \\\n self['nchan'] != len(chs):\n raise RuntimeError('info channel name inconsistency detected, '\n 'please notify mne-python developers')\n\n # make sure we have the proper datatypes\n for key in ('sfreq', 'highpass', 'lowpass'):\n if self.get(key) is not None:\n self[key] = float(self[key])\n\n # make sure channel names are unique\n unique_ids = np.unique(self['ch_names'], return_index=True)[1]\n if len(unique_ids) != self['nchan']:\n dups = set(self['ch_names'][x]\n for x in np.setdiff1d(range(self['nchan']), unique_ids))\n raise RuntimeError('Channel names are not unique, found '\n 'duplicates for: %s' % dups)\n\n def _update_redundant(self):\n \"\"\"Update the redundant entries\"\"\"\n self['ch_names'] = [ch['ch_name'] for ch in self['chs']]\n self['nchan'] = len(self['chs'])\n\n\ndef read_fiducials(fname):\n \"\"\"Read fiducials from a fiff file\n\n Parameters\n ----------\n fname : str\n The filename to read.\n\n Returns\n -------\n pts : list of dicts\n List of digitizer points (each point in a dict).\n coord_frame : int\n The coordinate frame of the points (one of\n mne.io.constants.FIFF.FIFFV_COORD_...)\n \"\"\"\n fid, tree, _ = fiff_open(fname)\n with fid:\n isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)\n isotrak = isotrak[0]\n pts = []\n coord_frame = FIFF.FIFFV_COORD_UNKNOWN\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n pts.append(tag.data)\n elif kind == FIFF.FIFF_MNE_COORD_FRAME:\n tag = read_tag(fid, pos)\n coord_frame = tag.data[0]\n\n if coord_frame == FIFF.FIFFV_COORD_UNKNOWN:\n err = (\"No coordinate frame was found in the file %r, it is probably \"\n \"not a valid fiducials file.\" % fname)\n raise ValueError(err)\n\n # coord_frame is not stored in the tag\n for pt in pts:\n pt['coord_frame'] = coord_frame\n\n return pts, coord_frame\n\n\ndef write_fiducials(fname, pts, coord_frame=0):\n \"\"\"Write fiducials to a fiff file\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int\n The coordinate frame of the points (one of\n mne.io.constants.FIFF.FIFFV_COORD_...)\n \"\"\"\n pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))\n bad_frames = pts_frames - set((coord_frame,))\n if len(bad_frames) > 0:\n err = (\"Points have coord_frame entries that are incompatible with \"\n \"coord_frame=%i: %s.\" % (coord_frame, str(tuple(bad_frames))))\n raise ValueError(err)\n\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_ISOTRAK)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)\n for pt in pts:\n write_dig_point(fid, pt)\n\n end_block(fid, FIFF.FIFFB_ISOTRAK)\n end_file(fid)\n\n\ndef _read_dig_fif(fid, meas_info):\n \"\"\"Helper to read digitizer data from a FIFF file\"\"\"\n isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)\n dig = None\n if len(isotrak) == 0:\n logger.info('Isotrak not found')\n elif len(isotrak) > 1:\n warn('Multiple Isotrak found')\n else:\n isotrak = isotrak[0]\n dig = []\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n dig.append(tag.data)\n dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD\n return dig\n\n\ndef _read_dig_points(fname, comments='%', unit='auto'):\n \"\"\"Read digitizer data from a text file.\n\n If fname ends in .hsp or .esp, the function assumes digitizer files in [m],\n otherwise it assumes space-delimited text files in [mm].\n\n Parameters\n ----------\n fname : str\n The filepath of space delimited file with points.\n comments : str\n The character used to indicate the start of a comment;\n Default: '%'.\n unit : 'auto' | 'm' | 'cm' | 'mm'\n Unit of the digitizer files (hsp and elp). If not 'm', coordinates will\n be rescaled to 'm'. Default is 'auto', which assumes 'm' for *.hsp and\n *.elp files and 'mm' for *.txt files, corresponding to the known\n Polhemus export formats.\n\n Returns\n -------\n dig_points : np.ndarray, shape (n_points, 3)\n Array of dig points in [m].\n \"\"\"\n if unit not in ('auto', 'm', 'mm', 'cm'):\n raise ValueError('unit must be one of \"auto\", \"m\", \"mm\", or \"cm\"')\n\n _, ext = op.splitext(fname)\n if ext == '.elp' or ext == '.hsp':\n with open(fname) as fid:\n file_str = fid.read()\n value_pattern = \"\\-?\\d+\\.?\\d*e?\\-?\\d*\"\n coord_pattern = \"({0})\\s+({0})\\s+({0})\\s*$\".format(value_pattern)\n if ext == '.hsp':\n coord_pattern = '^' + coord_pattern\n points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,\n re.MULTILINE)]\n dig_points = np.array(points_str, dtype=float)\n else:\n dig_points = np.loadtxt(fname, comments=comments, ndmin=2)\n if unit == 'auto':\n unit = 'mm'\n\n if dig_points.shape[-1] != 3:\n err = 'Data must be (n, 3) instead of %s' % (dig_points.shape,)\n raise ValueError(err)\n\n if unit == 'mm':\n dig_points /= 1000.\n elif unit == 'cm':\n dig_points /= 100.\n\n return dig_points\n\n\ndef _write_dig_points(fname, dig_points):\n \"\"\"Write points to text file\n\n Parameters\n ----------\n fname : str\n Path to the file to write. The kind of file to write is determined\n based on the extension: '.txt' for tab separated text file.\n dig_points : numpy.ndarray, shape (n_points, 3)\n Points.\n \"\"\"\n _, ext = op.splitext(fname)\n dig_points = np.asarray(dig_points)\n if (dig_points.ndim != 2) or (dig_points.shape[1] != 3):\n err = (\"Points must be of shape (n_points, 3), \"\n \"not %s\" % (dig_points.shape,))\n raise ValueError(err)\n\n if ext == '.txt':\n with open(fname, 'wb') as fid:\n version = __version__\n now = dt.now().strftime(\"%I:%M%p on %B %d, %Y\")\n fid.write(b(\"% Ascii 3D points file created by mne-python version \"\n \"{version} at {now}\\n\".format(version=version,\n now=now)))\n fid.write(b(\"% {N} 3D points, \"\n \"x y z per line\\n\".format(N=len(dig_points))))\n np.savetxt(fid, dig_points, delimiter='\\t', newline='\\n')\n else:\n msg = \"Unrecognized extension: %r. Need '.txt'.\" % ext\n raise ValueError(msg)\n\n\ndef _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,\n dig_points=None, dig_ch_pos=None):\n \"\"\"Constructs digitizer info for the info.\n\n Parameters\n ----------\n nasion : array-like | numpy.ndarray, shape (3,) | None\n Point designated as the nasion point.\n lpa : array-like | numpy.ndarray, shape (3,) | None\n Point designated as the left auricular point.\n rpa : array-like | numpy.ndarray, shape (3,) | None\n Point designated as the right auricular point.\n hpi : array-like | numpy.ndarray, shape (n_points, 3) | None\n Points designated as head position indicator points.\n dig_points : array-like | numpy.ndarray, shape (n_points, 3)\n Points designed as the headshape points.\n dig_ch_pos : dict\n Dict of EEG channel positions.\n\n Returns\n -------\n dig : list\n List of digitizer points to be added to the info['dig'].\n \"\"\"\n dig = []\n if lpa is not None:\n lpa = np.asarray(lpa)\n if lpa.shape == (3,):\n dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,\n 'kind': FIFF.FIFFV_POINT_CARDINAL,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('LPA should have the shape (3,) instead of %s'\n % (lpa.shape,))\n raise ValueError(msg)\n if nasion is not None:\n nasion = np.asarray(nasion)\n if nasion.shape == (3,):\n dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,\n 'kind': FIFF.FIFFV_POINT_CARDINAL,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('Nasion should have the shape (3,) instead of %s'\n % (nasion.shape,))\n raise ValueError(msg)\n if rpa is not None:\n rpa = np.asarray(rpa)\n if rpa.shape == (3,):\n dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,\n 'kind': FIFF.FIFFV_POINT_CARDINAL,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('RPA should have the shape (3,) instead of %s'\n % (rpa.shape,))\n raise ValueError(msg)\n if hpi is not None:\n hpi = np.asarray(hpi)\n if hpi.shape[1] == 3:\n for idx, point in enumerate(hpi):\n dig.append({'r': point, 'ident': idx + 1,\n 'kind': FIFF.FIFFV_POINT_HPI,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('HPI should have the shape (n_points, 3) instead of '\n '%s' % (hpi.shape,))\n raise ValueError(msg)\n if dig_points is not None:\n dig_points = np.asarray(dig_points)\n if dig_points.shape[1] == 3:\n for idx, point in enumerate(dig_points):\n dig.append({'r': point, 'ident': idx + 1,\n 'kind': FIFF.FIFFV_POINT_EXTRA,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('Points should have the shape (n_points, 3) instead of '\n '%s' % (dig_points.shape,))\n raise ValueError(msg)\n if dig_ch_pos is not None:\n keys = sorted(dig_ch_pos.keys())\n for key in keys:\n dig.append({'r': dig_ch_pos[key], 'ident': int(key[-3:]),\n 'kind': FIFF.FIFFV_POINT_EEG,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n return dig\n\n\n@verbose\ndef read_info(fname, verbose=None):\n \"\"\"Read measurement info from a file\n\n Parameters\n ----------\n fname : str\n File name.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of Info\n Measurement information for the dataset.\n \"\"\"\n f, tree, _ = fiff_open(fname)\n with f as fid:\n info = read_meas_info(fid, tree)[0]\n return info\n\n\ndef read_bad_channels(fid, node):\n \"\"\"Read bad channels\n\n Parameters\n ----------\n fid : file\n The file descriptor.\n\n node : dict\n The node of the FIF tree that contains info on the bad channels.\n\n Returns\n -------\n bads : list\n A list of bad channel's names.\n \"\"\"\n nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n bads = []\n if len(nodes) > 0:\n for node in nodes:\n tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)\n if tag is not None and tag.data is not None:\n bads = tag.data.split(':')\n return bads\n\n\n@verbose\ndef read_meas_info(fid, tree, clean_bads=False, verbose=None):\n \"\"\"Read the measurement info\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n tree : tree\n FIF tree structure.\n clean_bads : bool\n If True, clean info['bads'] before running consistency check.\n Should only be needed for old files where we did not check bads\n before saving.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of Info\n Info on dataset.\n meas : dict\n Node in tree that contains the info.\n \"\"\"\n\n # Find the desired blocks\n meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)\n if len(meas) == 0:\n raise ValueError('Could not find measurement data')\n if len(meas) > 1:\n raise ValueError('Cannot read more that 1 measurement data')\n meas = meas[0]\n\n meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)\n if len(meas_info) == 0:\n raise ValueError('Could not find measurement info')\n if len(meas_info) > 1:\n raise ValueError('Cannot read more that 1 measurement info')\n meas_info = meas_info[0]\n\n # Read measurement info\n dev_head_t = None\n ctf_head_t = None\n dev_ctf_t = None\n meas_date = None\n highpass = None\n lowpass = None\n nchan = None\n sfreq = None\n chs = []\n experimenter = None\n description = None\n proj_id = None\n proj_name = None\n line_freq = None\n custom_ref_applied = False\n xplotter_layout = None\n kit_system_id = None\n for k in range(meas_info['nent']):\n kind = meas_info['directory'][k].kind\n pos = meas_info['directory'][k].pos\n if kind == FIFF.FIFF_NCHAN:\n tag = read_tag(fid, pos)\n nchan = int(tag.data)\n elif kind == FIFF.FIFF_SFREQ:\n tag = read_tag(fid, pos)\n sfreq = float(tag.data)\n elif kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n elif kind == FIFF.FIFF_LOWPASS:\n tag = read_tag(fid, pos)\n lowpass = float(tag.data)\n elif kind == FIFF.FIFF_HIGHPASS:\n tag = read_tag(fid, pos)\n highpass = float(tag.data)\n elif kind == FIFF.FIFF_MEAS_DATE:\n tag = read_tag(fid, pos)\n meas_date = tag.data\n elif kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n\n if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n dev_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n ctf_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \\\n cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:\n dev_ctf_t = cand\n elif kind == FIFF.FIFF_EXPERIMENTER:\n tag = read_tag(fid, pos)\n experimenter = tag.data\n elif kind == FIFF.FIFF_DESCRIPTION:\n tag = read_tag(fid, pos)\n description = tag.data\n elif kind == FIFF.FIFF_PROJ_ID:\n tag = read_tag(fid, pos)\n proj_id = tag.data\n elif kind == FIFF.FIFF_PROJ_NAME:\n tag = read_tag(fid, pos)\n proj_name = tag.data\n elif kind == FIFF.FIFF_LINE_FREQ:\n tag = read_tag(fid, pos)\n line_freq = float(tag.data)\n elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11\n tag = read_tag(fid, pos)\n custom_ref_applied = bool(tag.data)\n elif kind == FIFF.FIFF_XPLOTTER_LAYOUT:\n tag = read_tag(fid, pos)\n xplotter_layout = str(tag.data)\n elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID:\n tag = read_tag(fid, pos)\n kit_system_id = int(tag.data)\n\n # Check that we have everything we need\n if nchan is None:\n raise ValueError('Number of channels is not defined')\n\n if sfreq is None:\n raise ValueError('Sampling frequency is not defined')\n\n if len(chs) == 0:\n raise ValueError('Channel information not defined')\n\n if len(chs) != nchan:\n raise ValueError('Incorrect number of channel definitions found')\n\n if dev_head_t is None or ctf_head_t is None:\n hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n if len(hpi_result) == 1:\n hpi_result = hpi_result[0]\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and\n cand['to'] == FIFF.FIFFV_COORD_HEAD and\n dev_head_t is None):\n dev_head_t = cand\n elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and\n cand['to'] == FIFF.FIFFV_COORD_HEAD and\n ctf_head_t is None):\n ctf_head_t = cand\n\n # Locate the Polhemus data\n dig = _read_dig_fif(fid, meas_info)\n\n # Locate the acquisition information\n acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)\n acq_pars = None\n acq_stim = None\n if len(acqpars) == 1:\n acqpars = acqpars[0]\n for k in range(acqpars['nent']):\n kind = acqpars['directory'][k].kind\n pos = acqpars['directory'][k].pos\n if kind == FIFF.FIFF_DACQ_PARS:\n tag = read_tag(fid, pos)\n acq_pars = tag.data\n elif kind == FIFF.FIFF_DACQ_STIM:\n tag = read_tag(fid, pos)\n acq_stim = tag.data\n\n # Load the SSP data\n projs = _read_proj(fid, meas_info)\n\n # Load the CTF compensation data\n comps = read_ctf_comp(fid, meas_info, chs)\n\n # Load the bad channel list\n bads = read_bad_channels(fid, meas_info)\n\n #\n # Put the data together\n #\n if tree['id'] is not None:\n info = Info(file_id=tree['id'])\n else:\n info = Info(file_id=None)\n\n # Locate events list\n events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS)\n evs = list()\n for event in events:\n ev = dict()\n for k in range(event['nent']):\n kind = event['directory'][k].kind\n pos = event['directory'][k].pos\n if kind == FIFF.FIFF_EVENT_CHANNELS:\n ev['channels'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_EVENT_LIST:\n ev['list'] = read_tag(fid, pos).data\n evs.append(ev)\n info['events'] = evs\n\n # Locate HPI result\n hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n hrs = list()\n for hpi_result in hpi_results:\n hr = dict()\n hr['dig_points'] = []\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n hr['dig_points'].append(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER:\n hr['order'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COILS_USED:\n hr['used'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COIL_MOMENTS:\n hr['moments'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_FIT_GOODNESS:\n hr['goodness'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT:\n hr['good_limit'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT:\n hr['dist_limit'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_FIT_ACCEPT:\n hr['accept'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_COORD_TRANS:\n hr['coord_trans'] = read_tag(fid, pos).data\n hrs.append(hr)\n info['hpi_results'] = hrs\n\n # Locate HPI Measurement\n hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS)\n hms = list()\n for hpi_meas in hpi_meass:\n hm = dict()\n for k in range(hpi_meas['nent']):\n kind = hpi_meas['directory'][k].kind\n pos = hpi_meas['directory'][k].pos\n if kind == FIFF.FIFF_CREATOR:\n hm['creator'] = text_type(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_SFREQ:\n hm['sfreq'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_NCHAN:\n hm['nchan'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_NAVE:\n hm['nave'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_NCOIL:\n hm['ncoil'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_FIRST_SAMPLE:\n hm['first_samp'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_LAST_SAMPLE:\n hm['last_samp'] = int(read_tag(fid, pos).data)\n hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL)\n hcs = []\n for hpi_coil in hpi_coils:\n hc = dict()\n for k in range(hpi_coil['nent']):\n kind = hpi_coil['directory'][k].kind\n pos = hpi_coil['directory'][k].pos\n if kind == FIFF.FIFF_HPI_COIL_NO:\n hc['number'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_EPOCH:\n hc['epoch'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_SLOPES:\n hc['slopes'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_CORR_COEFF:\n hc['corr_coeff'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COIL_FREQ:\n hc['coil_freq'] = read_tag(fid, pos).data\n hcs.append(hc)\n hm['hpi_coils'] = hcs\n hms.append(hm)\n info['hpi_meas'] = hms\n\n subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)\n si = None\n if len(subject_info) == 1:\n subject_info = subject_info[0]\n si = dict()\n for k in range(subject_info['nent']):\n kind = subject_info['directory'][k].kind\n pos = subject_info['directory'][k].pos\n if kind == FIFF.FIFF_SUBJ_ID:\n tag = read_tag(fid, pos)\n si['id'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HIS_ID:\n tag = read_tag(fid, pos)\n si['his_id'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_LAST_NAME:\n tag = read_tag(fid, pos)\n si['last_name'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:\n tag = read_tag(fid, pos)\n si['first_name'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME:\n tag = read_tag(fid, pos)\n si['middle_name'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:\n tag = read_tag(fid, pos)\n si['birthday'] = tag.data\n elif kind == FIFF.FIFF_SUBJ_SEX:\n tag = read_tag(fid, pos)\n si['sex'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HAND:\n tag = read_tag(fid, pos)\n si['hand'] = int(tag.data)\n info['subject_info'] = si\n\n hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM)\n hs = None\n if len(hpi_subsystem) == 1:\n hpi_subsystem = hpi_subsystem[0]\n hs = dict()\n for k in range(hpi_subsystem['nent']):\n kind = hpi_subsystem['directory'][k].kind\n pos = hpi_subsystem['directory'][k].pos\n if kind == FIFF.FIFF_HPI_NCOIL:\n tag = read_tag(fid, pos)\n hs['ncoil'] = int(tag.data)\n elif kind == FIFF.FIFF_EVENT_CHANNEL:\n tag = read_tag(fid, pos)\n hs['event_channel'] = text_type(tag.data)\n hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL)\n hc = []\n for coil in hpi_coils:\n this_coil = dict()\n for j in range(coil['nent']):\n kind = coil['directory'][j].kind\n pos = coil['directory'][j].pos\n if kind == FIFF.FIFF_EVENT_BITS:\n tag = read_tag(fid, pos)\n this_coil['event_bits'] = np.array(tag.data)\n hc.append(this_coil)\n hs['hpi_coils'] = hc\n info['hpi_subsystem'] = hs\n\n # Read processing history\n _read_proc_history(fid, tree, info)\n\n # Make the most appropriate selection for the measurement id\n if meas_info['parent_id'] is None:\n if meas_info['id'] is None:\n if meas['id'] is None:\n if meas['parent_id'] is None:\n info['meas_id'] = info['file_id']\n else:\n info['meas_id'] = meas['parent_id']\n else:\n info['meas_id'] = meas['id']\n else:\n info['meas_id'] = meas_info['id']\n else:\n info['meas_id'] = meas_info['parent_id']\n\n info['experimenter'] = experimenter\n info['description'] = description\n info['proj_id'] = proj_id\n info['proj_name'] = proj_name\n\n if meas_date is None:\n meas_date = [info['meas_id']['secs'], info['meas_id']['usecs']]\n info['meas_date'] = meas_date\n\n info['sfreq'] = sfreq\n info['highpass'] = highpass if highpass is not None else 0.\n info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0\n info['line_freq'] = line_freq\n\n # Add the channel information and make a list of channel names\n # for convenience\n info['chs'] = chs\n\n #\n # Add the coordinate transformations\n #\n info['dev_head_t'] = dev_head_t\n info['ctf_head_t'] = ctf_head_t\n info['dev_ctf_t'] = dev_ctf_t\n if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None:\n from ..transforms import Transform\n head_ctf_trans = linalg.inv(ctf_head_t['trans'])\n dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])\n info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans)\n\n # All kinds of auxliary stuff\n info['dig'] = dig\n info['bads'] = bads\n info._update_redundant()\n if clean_bads:\n info['bads'] = [b for b in bads if b in info['ch_names']]\n info['projs'] = projs\n info['comps'] = comps\n info['acq_pars'] = acq_pars\n info['acq_stim'] = acq_stim\n info['custom_ref_applied'] = custom_ref_applied\n info['xplotter_layout'] = xplotter_layout\n info['kit_system_id'] = kit_system_id\n info._check_consistency()\n return info, meas\n\n\ndef write_meas_info(fid, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info into a file id (from a fif file)\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n info : instance of Info\n The measurement info structure.\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n\n Notes\n -----\n Tags are written in a particular order for compatibility with maxfilter.\n \"\"\"\n info._check_consistency()\n\n # Measurement info\n start_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n for event in info['events']:\n start_block(fid, FIFF.FIFFB_EVENTS)\n if event.get('channels') is not None:\n write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])\n if event.get('list') is not None:\n write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])\n end_block(fid, FIFF.FIFFB_EVENTS)\n\n # HPI Result\n for hpi_result in info['hpi_results']:\n start_block(fid, FIFF.FIFFB_HPI_RESULT)\n for d in hpi_result['dig_points']:\n write_dig_point(fid, d)\n if 'order' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,\n hpi_result['order'])\n if 'used' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])\n if 'moments' in hpi_result:\n write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,\n hpi_result['moments'])\n if 'goodness' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,\n hpi_result['goodness'])\n if 'good_limit' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,\n hpi_result['good_limit'])\n if 'dist_limit' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,\n hpi_result['dist_limit'])\n if 'accept' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])\n if 'coord_trans' in hpi_result:\n write_coord_trans(fid, hpi_result['coord_trans'])\n end_block(fid, FIFF.FIFFB_HPI_RESULT)\n\n # HPI Measurement\n for hpi_meas in info['hpi_meas']:\n start_block(fid, FIFF.FIFFB_HPI_MEAS)\n if hpi_meas.get('creator') is not None:\n write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])\n if hpi_meas.get('sfreq') is not None:\n write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])\n if hpi_meas.get('nchan') is not None:\n write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])\n if hpi_meas.get('nave') is not None:\n write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])\n if hpi_meas.get('ncoil') is not None:\n write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])\n if hpi_meas.get('first_samp') is not None:\n write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])\n if hpi_meas.get('last_samp') is not None:\n write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])\n for hpi_coil in hpi_meas['hpi_coils']:\n start_block(fid, FIFF.FIFFB_HPI_COIL)\n if hpi_coil.get('number') is not None:\n write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])\n if hpi_coil.get('epoch') is not None:\n write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])\n if hpi_coil.get('slopes') is not None:\n write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])\n if hpi_coil.get('corr_coeff') is not None:\n write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,\n hpi_coil['corr_coeff'])\n if hpi_coil.get('coil_freq') is not None:\n write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,\n hpi_coil['coil_freq'])\n end_block(fid, FIFF.FIFFB_HPI_COIL)\n end_block(fid, FIFF.FIFFB_HPI_MEAS)\n\n # Polhemus data\n if info['dig'] is not None:\n start_block(fid, FIFF.FIFFB_ISOTRAK)\n for d in info['dig']:\n write_dig_point(fid, d)\n\n end_block(fid, FIFF.FIFFB_ISOTRAK)\n\n # megacq parameters\n if info['acq_pars'] is not None or info['acq_stim'] is not None:\n start_block(fid, FIFF.FIFFB_DACQ_PARS)\n if info['acq_pars'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])\n\n if info['acq_stim'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])\n\n end_block(fid, FIFF.FIFFB_DACQ_PARS)\n\n # Coordinate transformations if the HPI result block was not there\n if info['dev_head_t'] is not None:\n write_coord_trans(fid, info['dev_head_t'])\n\n if info['ctf_head_t'] is not None:\n write_coord_trans(fid, info['ctf_head_t'])\n\n if info['dev_ctf_t'] is not None:\n write_coord_trans(fid, info['dev_ctf_t'])\n\n # Projectors\n _write_proj(fid, info['projs'])\n\n # Bad channels\n if len(info['bads']) > 0:\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n # General\n if info.get('experimenter') is not None:\n write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])\n if info.get('description') is not None:\n write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])\n if info.get('proj_id') is not None:\n write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])\n if info.get('proj_name') is not None:\n write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])\n if info.get('meas_date') is not None:\n write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])\n write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])\n write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])\n if info['lowpass'] is not None:\n write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])\n if info['highpass'] is not None:\n write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])\n if info.get('line_freq') is not None:\n write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])\n if data_type is not None:\n write_int(fid, FIFF.FIFF_DATA_PACK, data_type)\n if info.get('custom_ref_applied'):\n write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied'])\n if info.get('xplotter_layout'):\n write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout'])\n\n # Channel information\n for k, c in enumerate(info['chs']):\n # Scan numbers may have been messed up\n c = deepcopy(c)\n c['scanno'] = k + 1\n # for float/double, the \"range\" param is unnecessary\n if reset_range is True:\n c['range'] = 1.0\n write_ch_info(fid, c)\n\n # Subject information\n if info.get('subject_info') is not None:\n start_block(fid, FIFF.FIFFB_SUBJECT)\n si = info['subject_info']\n if si.get('id') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])\n if si.get('his_id') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])\n if si.get('last_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])\n if si.get('first_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])\n if si.get('middle_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])\n if si.get('birthday') is not None:\n write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])\n if si.get('sex') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])\n if si.get('hand') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])\n end_block(fid, FIFF.FIFFB_SUBJECT)\n\n if info.get('hpi_subsystem') is not None:\n hs = info['hpi_subsystem']\n start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)\n if hs.get('ncoil') is not None:\n write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])\n if hs.get('event_channel') is not None:\n write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])\n if hs.get('hpi_coils') is not None:\n for coil in hs['hpi_coils']:\n start_block(fid, FIFF.FIFFB_HPI_COIL)\n if coil.get('event_bits') is not None:\n write_int(fid, FIFF.FIFF_EVENT_BITS,\n coil['event_bits'])\n end_block(fid, FIFF.FIFFB_HPI_COIL)\n end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)\n\n # CTF compensation info\n write_ctf_comp(fid, info['comps'])\n\n # KIT system ID\n if info.get('kit_system_id') is not None:\n write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id'])\n\n end_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n # Processing history\n _write_proc_history(fid, info)\n\n\ndef write_info(fname, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info in fif file.\n\n Parameters\n ----------\n fname : str\n The name of the file. Should end by -info.fif.\n info : instance of Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n \"\"\"\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_MEAS)\n write_meas_info(fid, info, data_type, reset_range)\n end_block(fid, FIFF.FIFFB_MEAS)\n end_file(fid)\n\n\ndef _is_equal_dict(dicts):\n \"\"\"Aux function\"\"\"\n tests = zip(*[d.items() for d in dicts])\n is_equal = []\n for d in tests:\n k0, v0 = d[0]\n if (isinstance(v0, (list, np.ndarray)) and len(v0) > 0 and\n isinstance(v0[0], dict)):\n for k, v in d:\n is_equal.append((k0 == k) and _is_equal_dict(v))\n else:\n is_equal.append(all(np.all(k == k0) and\n (np.array_equal(v, v0) if isinstance(v, np.ndarray)\n else np.all(v == v0)) for k, v in d))\n return all(is_equal)\n\n\n@verbose\ndef _merge_dict_values(dicts, key, verbose=None):\n \"\"\"Merge things together\n\n Fork for {'dict', 'list', 'array', 'other'}\n and consider cases where one or all are of the same type.\n \"\"\"\n values = [d[key] for d in dicts]\n msg = (\"Don't know how to merge '%s'. Make sure values are \"\n \"compatible.\" % key)\n\n def _flatten(lists):\n return [item for sublist in lists for item in sublist]\n\n def _check_isinstance(values, kind, func):\n return func([isinstance(v, kind) for v in values])\n\n def _where_isinstance(values, kind):\n \"\"\"Aux function\"\"\"\n return np.where([isinstance(v, type) for v in values])[0]\n\n # list\n if _check_isinstance(values, list, all):\n lists = (d[key] for d in dicts)\n return (_uniquify_projs(_flatten(lists)) if key == 'projs'\n else _flatten(lists))\n elif _check_isinstance(values, list, any):\n idx = _where_isinstance(values, list)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n lists = (d[key] for d in dicts if isinstance(d[key], list))\n return _flatten(lists)\n # dict\n elif _check_isinstance(values, dict, all):\n is_qual = _is_equal_dict(values)\n if is_qual:\n return values[0]\n else:\n RuntimeError(msg)\n elif _check_isinstance(values, dict, any):\n idx = _where_isinstance(values, dict)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n raise RuntimeError(msg)\n # ndarray\n elif _check_isinstance(values, np.ndarray, all):\n is_qual = all(np.all(values[0] == x) for x in values[1:])\n if is_qual:\n return values[0]\n elif key == 'meas_date':\n logger.info('Found multiple entries for %s. '\n 'Setting value to `None`' % key)\n return None\n else:\n raise RuntimeError(msg)\n elif _check_isinstance(values, np.ndarray, any):\n idx = _where_isinstance(values, np.ndarray)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n raise RuntimeError(msg)\n # other\n else:\n unique_values = set(values)\n if len(unique_values) == 1:\n return list(values)[0]\n elif isinstance(list(unique_values)[0], BytesIO):\n logger.info('Found multiple StringIO instances. '\n 'Setting value to `None`')\n return None\n elif isinstance(list(unique_values)[0], string_types):\n logger.info('Found multiple filenames. '\n 'Setting value to `None`')\n return None\n else:\n raise RuntimeError(msg)\n\n\n@verbose\ndef _merge_info(infos, force_update_to_first=False, verbose=None):\n \"\"\"Merge multiple measurement info dictionaries.\n\n - Fields that are present in only one info object will be used in the\n merged info.\n - Fields that are present in multiple info objects and are the same\n will be used in the merged info.\n - Fields that are present in multiple info objects and are different\n will result in a None value in the merged info.\n - Channels will be concatenated. If multiple info objects contain\n channels with the same name, an exception is raised.\n\n Parameters\n ----------\n infos | list of instance of Info\n Info objects to merge into one info object.\n force_update_to_first : bool\n If True, force the fields for objects in `info` will be updated\n to match those in the first item. Use at your own risk, as this\n may overwrite important metadata.\n verbose : bool, str, int, or NonIe\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of Info\n The merged info object.\n \"\"\"\n for info in infos:\n info._check_consistency()\n if force_update_to_first is True:\n infos = deepcopy(infos)\n _force_update_info(infos[0], infos[1:])\n info = Info()\n info['chs'] = []\n for this_info in infos:\n info['chs'].extend(this_info['chs'])\n info._update_redundant()\n duplicates = set([ch for ch in info['ch_names']\n if info['ch_names'].count(ch) > 1])\n if len(duplicates) > 0:\n msg = (\"The following channels are present in more than one input \"\n \"measurement info objects: %s\" % list(duplicates))\n raise ValueError(msg)\n\n transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']\n for trans_name in transforms:\n trans = [i[trans_name] for i in infos if i[trans_name]]\n if len(trans) == 0:\n info[trans_name] = None\n elif len(trans) == 1:\n info[trans_name] = trans[0]\n elif all(np.all(trans[0]['trans'] == x['trans']) and\n trans[0]['from'] == x['from'] and\n trans[0]['to'] == x['to']\n for x in trans[1:]):\n info[trans_name] = trans[0]\n else:\n msg = (\"Measurement infos provide mutually inconsistent %s\" %\n trans_name)\n raise ValueError(msg)\n\n # KIT system-IDs\n kit_sys_ids = [i['kit_system_id'] for i in infos if i['kit_system_id']]\n if len(kit_sys_ids) == 0:\n info['kit_system_id'] = None\n elif len(set(kit_sys_ids)) == 1:\n info['kit_system_id'] = kit_sys_ids[0]\n else:\n raise ValueError(\"Trying to merge channels from different KIT systems\")\n\n # other fields\n other_fields = ['acq_pars', 'acq_stim', 'bads', 'buffer_size_sec',\n 'comps', 'custom_ref_applied', 'description', 'dig',\n 'experimenter', 'file_id', 'filename', 'highpass',\n 'hpi_results', 'hpi_meas', 'hpi_subsystem', 'events',\n 'line_freq', 'lowpass', 'meas_date', 'meas_id',\n 'proj_id', 'proj_name', 'projs', 'sfreq',\n 'subject_info', 'sfreq', 'xplotter_layout']\n for k in other_fields:\n info[k] = _merge_dict_values(infos, k)\n\n info._check_consistency()\n return info\n\n\ndef create_info(ch_names, sfreq, ch_types=None, montage=None):\n \"\"\"Create a basic Info instance suitable for use with create_raw\n\n Parameters\n ----------\n ch_names : list of str | int\n Channel names. If an int, a list of channel names will be created\n from :func:`range(ch_names) <range>`.\n sfreq : float\n Sample rate of the data.\n ch_types : list of str | str\n Channel types. If None, data are assumed to be misc.\n Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc',\n 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'hbr' or 'hbo'.\n If str, then all channels are assumed to be of the same type.\n montage : None | str | Montage | DigMontage | list\n A montage containing channel positions. If str or Montage is\n specified, the channel info will be updated with the channel\n positions. Default is None. If DigMontage is specified, the\n digitizer information will be updated. A list of unique montages,\n can be specifed and applied to the info. See also the documentation of\n :func:`mne.channels.read_montage` for more information.\n\n Returns\n -------\n info : instance of Info\n The measurement info.\n\n Notes\n -----\n The info dictionary will be sparsely populated to enable functionality\n within the rest of the package. Advanced functionality such as source\n localization can only be obtained through substantial, proper\n modifications of the info structure (not recommended).\n\n Note that the MEG device-to-head transform ``info['dev_head_t']`` will\n be initialized to the identity transform.\n \"\"\"\n if isinstance(ch_names, int):\n ch_names = list(np.arange(ch_names).astype(str))\n if not isinstance(ch_names, (list, tuple)):\n raise TypeError('ch_names must be a list, tuple, or int')\n sfreq = float(sfreq)\n if sfreq <= 0:\n raise ValueError('sfreq must be positive')\n nchan = len(ch_names)\n if ch_types is None:\n ch_types = ['misc'] * nchan\n if isinstance(ch_types, string_types):\n ch_types = [ch_types] * nchan\n if len(ch_types) != nchan:\n raise ValueError('ch_types and ch_names must be the same length '\n '(%s != %s)' % (len(ch_types), nchan))\n info = _empty_info(sfreq)\n info['meas_date'] = np.array([0, 0], np.int32)\n loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32)\n for ci, (name, kind) in enumerate(zip(ch_names, ch_types)):\n if not isinstance(name, string_types):\n raise TypeError('each entry in ch_names must be a string')\n if not isinstance(kind, string_types):\n raise TypeError('each entry in ch_types must be a string')\n if kind not in _kind_dict:\n raise KeyError('kind must be one of %s, not %s'\n % (list(_kind_dict.keys()), kind))\n kind = _kind_dict[kind]\n chan_info = dict(loc=loc.copy(), unit_mul=0, range=1., cal=1.,\n kind=kind[0], coil_type=kind[1],\n unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n ch_name=name, scanno=ci + 1, logno=ci + 1)\n info['chs'].append(chan_info)\n info._update_redundant()\n if montage is not None:\n from ..channels.montage import (Montage, DigMontage, _set_montage,\n read_montage)\n if not isinstance(montage, list):\n montage = [montage]\n for montage_ in montage:\n if isinstance(montage_, (Montage, DigMontage)):\n _set_montage(info, montage_)\n elif isinstance(montage_, string_types):\n montage_ = read_montage(montage_)\n _set_montage(info, montage_)\n else:\n raise TypeError('Montage must be an instance of Montage, '\n 'DigMontage, a list of montages, or filepath, '\n 'not %s.' % type(montage))\n info._check_consistency()\n return info\n\n\nRAW_INFO_FIELDS = (\n 'acq_pars', 'acq_stim', 'bads', 'buffer_size_sec', 'ch_names', 'chs',\n 'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t',\n 'dev_head_t', 'dig', 'experimenter', 'events',\n 'file_id', 'filename', 'highpass', 'hpi_meas', 'hpi_results',\n 'hpi_subsystem', 'kit_system_id', 'line_freq', 'lowpass', 'meas_date',\n 'meas_id', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq',\n 'subject_info', 'xplotter_layout',\n)\n\n\ndef _empty_info(sfreq):\n \"\"\"Create an empty info dictionary\"\"\"\n from ..transforms import Transform\n _none_keys = (\n 'acq_pars', 'acq_stim', 'buffer_size_sec', 'ctf_head_t', 'description',\n 'dev_ctf_t', 'dig', 'experimenter',\n 'file_id', 'filename', 'highpass', 'hpi_subsystem', 'kit_system_id',\n 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name',\n 'subject_info', 'xplotter_layout',\n )\n _list_keys = ('bads', 'chs', 'comps', 'events', 'hpi_meas', 'hpi_results',\n 'projs')\n info = Info()\n for k in _none_keys:\n info[k] = None\n for k in _list_keys:\n info[k] = list()\n info['custom_ref_applied'] = False\n info['dev_head_t'] = Transform('meg', 'head', np.eye(4))\n info['highpass'] = 0.\n info['sfreq'] = float(sfreq)\n info['lowpass'] = info['sfreq'] / 2.\n info._update_redundant()\n info._check_consistency()\n return info\n\n\ndef _force_update_info(info_base, info_target):\n \"\"\"Update target info objects with values from info base.\n\n Note that values in info_target will be overwritten by those in info_base.\n This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'.\n\n Parameters\n ----------\n info_base : mne.Info\n The Info object you want to use for overwriting values\n in target Info objects.\n info_target : mne.Info | list of mne.Info\n The Info object(s) you wish to overwrite using info_base. These objects\n will be modified in-place.\n \"\"\"\n exclude_keys = ['chs', 'ch_names', 'nchan']\n info_target = np.atleast_1d(info_target).ravel()\n all_infos = np.hstack([info_base, info_target])\n for ii in all_infos:\n if not isinstance(ii, Info):\n raise ValueError('Inputs must be of type Info. '\n 'Found type %s' % type(ii))\n for key, val in info_base.items():\n if key in exclude_keys:\n continue\n for i_targ in info_target:\n i_targ[key] = val\n\n\ndef anonymize_info(info):\n \"\"\"Anonymize measurement information in place.\n\n Reset 'subject_info', 'meas_date', 'file_id', and 'meas_id' keys if they\n exist in ``info``.\n\n Parameters\n ----------\n info : dict, instance of Info\n Measurement information for the dataset.\n\n Returns\n -------\n info : instance of Info\n Measurement information for the dataset.\n\n Notes\n -----\n Operates in place.\n \"\"\"\n if not isinstance(info, Info):\n raise ValueError('self must be an Info instance.')\n if info.get('subject_info') is not None:\n del info['subject_info']\n info['meas_date'] = [0, 0]\n for key_1 in ('file_id', 'meas_id'):\n for key_2 in ('secs', 'msecs', 'usecs'):\n info[key_1][key_2] = 0\n return info\n"
] | [
[
"numpy.ones",
"numpy.diff",
"numpy.any",
"numpy.argsort",
"numpy.size",
"numpy.random.mtrand.RandomState",
"matplotlib.path.Path",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.get_cmap",
"matplotlib.patches.PathPatch",
"numpy.where",
"numpy.linspace",
"numpy.unique",
"numpy.diag_indices",
"numpy.zeros",
"numpy.max",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.colorbar",
"numpy.zeros_like",
"numpy.cumsum",
"matplotlib.pyplot.getp",
"matplotlib.pyplot.Normalize",
"numpy.tril_indices",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.yticks"
],
[
"numpy.eye",
"numpy.zeros",
"numpy.savetxt",
"numpy.iterable",
"numpy.asarray",
"scipy.linalg.inv",
"numpy.atleast_1d",
"numpy.arange",
"numpy.hstack",
"numpy.all",
"numpy.array_equal",
"numpy.array",
"numpy.dot",
"numpy.unique",
"numpy.loadtxt"
]
] |
jsaez8/qtt | [
"fa6497ace86a255f33a2192ba01d063d07d6895e"
] | [
"src/qtt/instrument_drivers/virtual_awg.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 31 13:04:09 2016\n\n@author: diepencjv\n\"\"\"\n\n# %%\nimport numpy as np\nimport scipy.signal\nimport logging\nimport warnings\n\nimport qcodes\nfrom qcodes import Instrument\nfrom qcodes.plots.pyqtgraph import QtPlot\nfrom qcodes.data.data_array import DataArray\nimport qtt\nimport qtt.utilities.tools\n\nlogger = logging.getLogger(__name__)\n# %%\n\n\nclass virtual_awg(Instrument):\n \"\"\"\n\n Attributes:\n _awgs (list): handles to instruments\n awg_map (dict)\n hardware (Instrument): contains AWG to plunger values\n corr (float): unknown\n delay_FPGA (float): time delay of signals going through fridge\n\n \"\"\"\n\n def __init__(self, name, instruments=[], awg_map=None, hardware=None, verbose=1, **kwargs):\n super().__init__(name, **kwargs)\n logger.info('initialize virtual_awg %s' % name)\n self._awgs = instruments\n self.awg_map = awg_map\n self.hardware = hardware\n self.verbose = verbose\n self.delay_FPGA = 2.0e-6 # should depend on filterboxes\n self.corr = .0 # legacy code, specific for FPGA board not used any more\n self.maxdatapts = 16e6 # This used to be set to the fpga maximum, but that maximum should not be handled here\n\n self.awg_seq = None\n if len(self._awgs) == 0 and self.verbose:\n print('no physical AWGs connected')\n elif len(self._awgs) == 1:\n self.awg_cont = self._awgs[0]\n self.awg_cont.set('run_mode', 'CONT')\n elif len(self._awgs) == 2 and 'awg_mk' in self.awg_map:\n self.awg_cont = self._awgs[self.awg_map['awg_mk'][0]]\n self.awg_cont.set('run_mode', 'CONT')\n self.awg_seq = self._awgs[(self.awg_map['awg_mk'][0] + 1) % 2]\n\n self._set_seq_mode(self.awg_seq)\n self.delay_AWG = self.hardware.parameters['delay_AWG'].get()\n else:\n raise Exception(\n 'Configuration of AWGs not supported by virtual_awg instrument')\n\n self.AWG_clock = 1e8\n self.ch_amp = 4.0\n for awg in self._awgs:\n awg.set('clock_freq', self.AWG_clock)\n awg.delete_all_waveforms_from_list()\n for i in range(1, 5):\n awg.set('ch%s_amp' % i, self.ch_amp)\n\n def _set_seq_mode(self, a):\n a.set('run_mode', 'SEQ')\n a.sequence_length.set(1)\n a.set_sqel_trigger_wait(1, 0)\n\n def get_idn(self):\n ''' Overrule because the default VISA command does not work '''\n IDN = {'vendor': 'QuTech', 'model': 'virtual_awg',\n 'serial': None, 'firmware': None}\n return IDN\n\n def awg_gate(self, gate):\n \"\"\" Return true of the gate can be controlled by the awg\n\n Args:\n gate ()\n \"\"\"\n if gate is None:\n return False\n\n if isinstance(gate, dict):\n # vector scan, assume we can do it fast if all components are fast\n return np.all([self.awg_gate(g) for g in gate])\n if self.awg_map is None:\n return False\n\n if gate in self.awg_map:\n return True\n else:\n return False\n\n def stop(self, verbose=0):\n ''' Stops all AWGs and turns of all channels '''\n for awg in self._awgs:\n awg.stop()\n for i in range(1, 5):\n awg.set('ch%d_state' % i, 0)\n\n if verbose:\n print('Stopped AWGs')\n\n def sweep_init(self, waveforms, period=1e-3, delete=True, samp_freq=None):\n ''' Send waveform(s) to gate(s)\n\n Arguments:\n waveforms (dict): the waveforms with the gates as keys\n period (float): period of the waveform in seconds\n\n Returns:\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n\n Example:\n --------\n >> sweep_info = sweep_init(waveforms)\n '''\n sweepgates = [g for g in waveforms]\n\n if delete:\n for awg in self._awgs:\n awg.delete_all_waveforms_from_list()\n\n awgs = [self._awgs[self.awg_map[g][0]] for g in sweepgates]\n if 'fpga_mk' in self.awg_map:\n marker_info = self.awg_map['fpga_mk']\n marker_delay = self.delay_FPGA\n marker_name = 'fpga_mk'\n elif 'm4i_mk' in self.awg_map:\n marker_info = self.awg_map['m4i_mk']\n if samp_freq is not None:\n pretrigger_period = 16 / samp_freq\n else:\n pretrigger_period = 0\n marker_delay = self.delay_FPGA + pretrigger_period\n marker_name = 'm4i_mk'\n\n awgs.append(self._awgs[marker_info[0]])\n\n sweep_info = dict()\n wave_len = len(waveforms[sweepgates[0]]['wave'])\n for g in sweepgates:\n sweep_info[self.awg_map[g]] = dict()\n sweep_info[self.awg_map[g]]['waveform'] = waveforms[g]['wave']\n sweep_info[self.awg_map[g]]['marker1'] = np.zeros(wave_len)\n sweep_info[self.awg_map[g]]['marker2'] = np.zeros(wave_len)\n if 'name' in waveforms[g]:\n sweep_info[self.awg_map[g]]['name'] = waveforms[g]['name']\n else:\n sweep_info[self.awg_map[g]]['name'] = 'waveform_%s' % g\n if marker_info[:2] == self.awg_map[g]:\n sweep_info[marker_info[:2]]['delay'] = marker_delay\n\n # marker points\n marker_points = np.zeros(wave_len)\n marker_points[int(marker_delay * self.AWG_clock):(int(marker_delay * self.AWG_clock) + wave_len // 20)] = 1.0\n\n if marker_info[:2] not in sweep_info:\n sweep_info[marker_info[:2]] = dict()\n sweep_info[marker_info[:2]]['waveform'] = np.zeros(wave_len)\n sweep_info[marker_info[:2]]['marker1'] = np.zeros(wave_len)\n sweep_info[marker_info[:2]]['marker2'] = np.zeros(wave_len)\n for g in sweepgates:\n marker_name += '_%s' % g\n sweep_info[marker_info[:2]]['name'] = marker_name\n sweep_info[marker_info[:2]]['delay'] = marker_delay\n\n sweep_info[marker_info[:2]]['marker%d' % marker_info[2]] = marker_points\n self._awgs[marker_info[0]].set(\n 'ch%i_m%i_low' % (marker_info[1], marker_info[2]), 0)\n self._awgs[marker_info[0]].set(\n 'ch%i_m%i_high' % (marker_info[1], marker_info[2]), 2.6)\n\n # awg marker\n if getattr(self, 'awg_seq', None) is not None:\n awg_info = self.awg_map['awg_mk']\n if awg_info[:2] not in sweep_info:\n awgs.append(self._awgs[awg_info[0]])\n sweep_info[awg_info[:2]] = dict()\n sweep_info[awg_info[:2]]['waveform'] = np.zeros(wave_len)\n sweep_info[awg_info[:2]]['marker1'] = np.zeros(wave_len)\n sweep_info[awg_info[:2]]['marker2'] = np.zeros(wave_len)\n sweep_info[awg_info[:2]]['name'] = 'awg_mk'\n\n awg_marker = np.zeros(wave_len)\n awg_marker[0:wave_len // 20] = 1\n awg_marker = np.roll(\n awg_marker, wave_len - int(self.delay_AWG * self.AWG_clock))\n sweep_info[awg_info[:2]]['marker%d' %\n self.awg_map['awg_mk'][2]] = awg_marker\n self._awgs[awg_info[0]].set(\n 'ch%i_m%i_low' % (awg_info[1], awg_info[2]), 0)\n self._awgs[awg_info[0]].set(\n 'ch%i_m%i_high' % (awg_info[1], awg_info[2]), 2.6)\n\n # send waveforms\n if delete:\n for sweep in sweep_info:\n try:\n self._awgs[sweep[0]].send_waveform_to_list(sweep_info[sweep]['waveform'], sweep_info[\n sweep]['marker1'], sweep_info[sweep]['marker2'], sweep_info[sweep]['name'])\n except Exception as ex:\n print(ex)\n print('sweep_info[sweep][waveform] %s' % (sweep_info[sweep]['waveform'].shape,))\n print('sweep_info[sweep][marker1] %s' % (sweep_info[sweep]['marker1'].shape,))\n print('sweep_info[sweep][marker2] %s' % (sweep_info[sweep]['marker2'].shape,))\n\n return sweep_info\n\n def sweep_run(self, sweep_info):\n ''' Activate AWG(s) and channel(s) for the sweep(s).\n\n Arguments:\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n for sweep in sweep_info:\n if hasattr(self, 'awg_seq') and self._awgs[sweep[0]] == self.awg_seq:\n self._awgs[sweep[0]].set_sqel_waveform(\n sweep_info[sweep]['name'], sweep[1], 1)\n self._awgs[sweep[0]].set_sqel_loopcnt_to_inf(1)\n self._awgs[sweep[0]].set_sqel_event_jump_target_index(\n sweep[1], 1)\n self._awgs[sweep[0]].set_sqel_event_jump_type(1, 'IND')\n else:\n self._awgs[sweep[0]].set(\n 'ch%i_waveform' % sweep[1], sweep_info[sweep]['name'])\n\n for sweep in sweep_info:\n self._awgs[sweep[0]].set('ch%i_state' % sweep[1], 1)\n\n awgnrs = set([sweep[0] for sweep in sweep_info])\n for nr in awgnrs:\n self._awgs[nr].run()\n\n def make_sawtooth(self, sweeprange, period, width=.95, repetitionnr=1, start_zero=False):\n '''Make a sawtooth with a decline width determined by width. Not yet scaled with\n awg_to_plunger value.\n\n Arguments:\n sweeprange (float): the range of voltages to sweep over\n period (float): the period of the triangular signal\n\n Returns:\n wave_raw (array): raw data which represents the waveform\n '''\n samplerate = 1. / self.AWG_clock\n tt = np.arange(0, period * repetitionnr + samplerate, samplerate)\n v_wave = float(sweeprange / ((self.ch_amp / 2.0)))\n wave_raw = (v_wave / 2) * scipy.signal.sawtooth(2 * np.pi * tt / period, width=width)\n# idx_zero = np.argmin(np.abs(wave_raw))\n# wave_raw = np.roll(wave_raw, wave_raw.size-idx_zero)\n if start_zero:\n o = int((wave_raw.size) * (1 - width) / 2)\n wave_raw = np.roll(wave_raw, o)\n\n return wave_raw\n\n def make_pulses(self, voltages, waittimes, reps=1, filtercutoff=None, mvrange=None):\n \"\"\"Make a pulse sequence with custom voltage levels and wait times at each level.\n\n Arguments:\n voltages (list of floats): voltage levels to be applied in the sequence\n waittimes (list of floats): duration of each pulse in the sequence\n reps (int): number of times to repeat the pulse sequence in the waveform\n filtercutoff (float): cutoff frequency of a 1st order butterworth filter to make the pulse steps smoother\n\n Returns:\n wave_raw (array): raw data which represents the waveform\n \"\"\"\n if len(waittimes) != len(voltages):\n raise Exception('Number of voltage levels must be equal to the number of wait times')\n samples = [int(x * self.AWG_clock) for x in waittimes]\n if mvrange is None:\n mvrange = [max(voltages), min(voltages)]\n v_wave = float((mvrange[0] - mvrange[1]) / self.ch_amp)\n v_prop = [2 * ((x - mvrange[1]) / (mvrange[0] - mvrange[1])) - 1 for x in voltages]\n wave_raw = np.concatenate([x * v_wave * np.ones(y) for x, y in zip(v_prop, samples)])\n if filtercutoff is not None:\n b, a = scipy.signal.butter(1, 0.5 * filtercutoff / self.AWG_clock, btype='low', analog=False, output='ba')\n wave_raw = scipy.signal.filtfilt(b, a, wave_raw)\n wave_raw = np.tile(wave_raw, reps)\n\n return wave_raw\n\n def check_frequency_waveform(self, period, width):\n \"\"\" Check whether a sawtooth waveform with specified period can be generated \"\"\"\n old_sr = self.AWG_clock\n new_sr = 5 / (period * (1 - width))\n if (new_sr) > old_sr:\n warnings.warn('awg sampling frequency %.1f MHz is too low for signal requested (sr %.1f [MHz], period %.1f [ms])' % (\n old_sr / 1e6, new_sr / 1e6, 1e3 * period), UserWarning)\n return new_sr\n\n def sweep_gate(self, gate, sweeprange, period, width=.95, wave_name=None, delete=True):\n ''' Send a sawtooth signal with the AWG to a gate to sweep. Also\n send a marker to the measurement instrument.\n\n Args:\n gate (string): the name of the gate to sweep\n sweeprange (float): the range of voltages to sweep over\n period (float): the period of the triangular signal\n\n Returns:\n waveform (dict): The waveform being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n\n Example:\n >>> waveform, sweep_info = sweep_gate('P1',sweeprange=60,period=1e-3)\n '''\n\n self.check_frequency_waveform(period, width)\n self.check_amplitude(gate, sweeprange)\n\n start_zero = True\n waveform = dict()\n wave_raw = self.make_sawtooth(sweeprange, period, width, start_zero=start_zero)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % gate].get()\n wave = wave_raw / awg_to_plunger\n waveform[gate] = dict()\n waveform[gate]['wave'] = wave\n if wave_name is None:\n waveform[gate]['name'] = 'sweep_%s' % gate\n else:\n waveform[gate]['name'] = wave_name\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['width'] = width\n waveform['start_zero'] = start_zero\n waveform['sweeprange'] = sweeprange\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_gate_virt(self, gate_comb, sweeprange, period, width=.95, delete=True):\n ''' Send a sawtooth signal with the AWG to a linear combination of\n gates to sweep. Also send a marker to the measurement instrument.\n\n Arguments:\n gate_comb (dict): the gates to sweep and the coefficients as values\n sweeprange (float): the range of voltages to sweep over\n period (float): the period of the triangular signal\n\n Returns:\n waveform (dict): The waveform being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n\n self.check_frequency_waveform(period, width)\n\n waveform = dict()\n for g in gate_comb:\n self.check_amplitude(g, gate_comb[g] * sweeprange)\n for g in gate_comb:\n wave_raw = self.make_sawtooth(sweeprange, period, width)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw * gate_comb[g] / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'sweep_%s' % g\n\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['width'] = width\n waveform['sweeprange'] = sweeprange\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweepandpulse_gate(self, sweepdata, pulsedata, wave_name=None, delete=True, shift_zero=True):\n ''' Makes and outputs a waveform which overlays a sawtooth signal to sweep\n a gate, with a pulse sequence. A marker is sent to the measurement instrument\n at the start of the waveform.\n IMPORTANT: The function offsets the voltages values so that the last point is 0 V on all gates (i.e. it centers the pulse sequence on the last point)\n\n Args:\n sweepdata (dict): inputs for the sawtooth (gate, sweeprange, period, width).\n See sweep_gate for more info.\n pulsedata (dict): inputs for the pulse sequence (gate_voltages, waittimes).\n See pulse_gates for more info.\n\n Returns:\n waveform (dict): The waveform being sent with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n\n sweepgate = sweepdata['gate']\n sweeprange = sweepdata['sweeprange']\n period = sweepdata['period']\n width = sweepdata.get('width', 0.95)\n\n gate_voltages = pulsedata['gate_voltages'].copy()\n if shift_zero:\n for g in gate_voltages:\n gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]\n waittimes = pulsedata['waittimes']\n filtercutoff = pulsedata.get('filtercutoff', None)\n\n pulsesamp = [int(round(x * self.AWG_clock)) for x in waittimes]\n sawsamp = int(round(period * width * self.AWG_clock))\n pulsereps = int(np.ceil(self.AWG_clock * period * width / sum(pulsesamp)))\n allvoltages = np.concatenate([v for v in gate_voltages.values()])\n mvrange = [max(allvoltages), min(allvoltages)]\n\n self.check_frequency_waveform(period, width)\n\n waveform = dict()\n wave_sweep = self.make_sawtooth(sweeprange, period, width)\n for g in gate_voltages:\n self.check_amplitude(g, sweeprange + (mvrange[0] - mvrange[1]))\n for g in gate_voltages:\n wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=pulsereps,\n filtercutoff=filtercutoff, mvrange=mvrange)\n wave_raw = wave_raw[:sawsamp]\n wave_raw = np.pad(wave_raw, (0, len(wave_sweep) - len(wave_raw)), 'edge')\n if sweepgate == g:\n wave_raw += wave_sweep\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n if wave_name is None:\n waveform[g]['name'] = 'sweepandpulse_%s' % g\n else:\n waveform[g]['name'] = wave_name\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['width'] = width\n waveform['sweeprange'] = sweeprange\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period\n waveform['pulse_voltages'] = gate_voltages\n waveform['pulse_waittimes'] = waittimes\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_process(self, data, waveform, Naverage=1, direction='forwards', start_offset=1):\n \"\"\" Process the data returned by reading out based on the shape of\n the sawtooth send with the AWG.\n\n Args:\n data (list or Nxk array): the data (N is the number of samples)\n waveform (dict): contains the wave and the sawtooth width\n Naverage (int): number of times the signal was averaged\n direction (string): option to use backwards signal i.o. forwards\n\n Returns:\n data_processed (array): The data after dropping part of it.\n\n Example:\n >> data_processed = sweep_process(data, waveform, 25)\n \"\"\"\n width = waveform['width']\n\n if isinstance(data, list):\n data = np.array(data)\n\n if direction == 'forwards':\n end = int(np.floor(width * data.shape[0] - 1))\n data_processed = data[start_offset:end]\n elif direction == 'backwards':\n begin = int(np.ceil(width * data.shape[0] + 1))\n data_processed = data[begin:]\n data_processed = data_processed[::-1]\n\n data_processed = np.array(data_processed) / Naverage\n\n return data_processed\n\n def sweep_2D(self, samp_freq, sweepgates, sweepranges, resolution, width=.95, comp=None, delete=True):\n ''' Send sawtooth signals to the sweepgates which effectively do a 2D\n scan.\n\n The first sweepgate is the fast changing gate (on the horizontal axis).\n\n Arguments:\n samp_freq (float): sampling frequency of the measurement instrument in Hertz.\n sweepgates (list): two strings with names of gates to sweep\n sweepranges (list): two floats for sweepranges in milliVolts\n\n Returns:\n waveform (dict): The waveforms being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n# JP: I think FPGA exceptions should not be handled by awg\n# if resolution[0] * resolution[1] > self.maxdatapts:\n# raise Exception('resolution is set higher than FPGA memory allows')\n\n if self.corr != 0:\n raise Exception('please do not use the .corr setting any more')\n error_corr = resolution[0] * self.corr\n period_horz = resolution[0] / samp_freq + error_corr\n period_vert = resolution[1] * period_horz\n\n self.check_frequency_waveform(period_horz, width)\n for g, r in zip(sweepgates, sweepranges):\n self.check_amplitude(g, r)\n\n waveform = dict()\n # horizontal waveform\n wave_horz_raw = self.make_sawtooth(\n sweepranges[0], period_horz, repetitionnr=resolution[1])\n awg_to_plunger_horz = self.hardware.parameters[\n 'awg_to_%s' % sweepgates[0]].get()\n wave_horz = wave_horz_raw / awg_to_plunger_horz\n waveform[sweepgates[0]] = dict()\n waveform[sweepgates[0]]['wave'] = wave_horz\n waveform[sweepgates[0]]['name'] = 'sweep_2D_horz_%s' % sweepgates[0]\n\n # vertical waveform\n wave_vert_raw = self.make_sawtooth(sweepranges[1], period_vert)\n awg_to_plunger_vert = self.hardware.parameters[\n 'awg_to_%s' % sweepgates[1]].get()\n wave_vert = wave_vert_raw / awg_to_plunger_vert\n waveform[sweepgates[1]] = dict()\n waveform[sweepgates[1]]['wave'] = wave_vert\n waveform[sweepgates[1]]['name'] = 'sweep_2D_vert_%s' % sweepgates[1]\n\n if comp is not None:\n for g in comp:\n if g not in sweepgates:\n waveform[g] = dict()\n waveform[g]['wave'] = comp[g]['vert'] * \\\n wave_vert + comp[g]['horz'] * wave_horz\n waveform[g]['name'] = 'sweep_2D_comp_%s' % g\n else:\n raise Exception('Can not compensate a sweepgate')\n\n sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)\n self.sweep_run(sweep_info)\n\n waveform['width_horz'] = width\n waveform['sweeprange_horz'] = sweepranges[0]\n waveform['width_vert'] = width\n waveform['sweeprange_vert'] = sweepranges[1]\n waveform['resolution'] = resolution\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period_vert\n waveform['period_horz'] = period_horz\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_2D_virt(self, samp_freq, gates_horz, gates_vert, sweepranges, resolution, width=.95, delete=True):\n ''' Send sawtooth signals to the linear combinations of gates set by\n gates_horz and gates_vert which effectively do a 2D scan of two virtual\n gates.\n\n The horizontal direction is the direction where the AWG signal is changing fastest. It is the first element in the resolution and sweepranges.\n\n Arguments:\n samp_freq (float): sampling frequency of the measurement instrument in Hertz.\n gates_horz (dict): the gates for the horizontal direction and their coefficients\n gates_vert (dict): the gates for the vertical direction and their coefficients\n sweepranges (list): two floats for sweepranges in milliVolts\n resolution (list): two ints for numbers of pixels\n\n Returns:\n waveform (dict): The waveforms being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n# JP: I think FPGA exceptions should not be handled by awg\n# if resolution[0] * resolution[1] > self.maxdatapts:\n# raise Exception('resolution is set higher than memory allows')\n\n error_corr = resolution[0] * self.corr\n period_horz = resolution[0] / samp_freq + error_corr\n period_vert = resolution[1] * period_horz\n\n new_sr = self.check_frequency_waveform(period_horz, width)\n # self.reset_AWG(new_sr)\n\n waveform = dict()\n # horizontal virtual gate\n for g in gates_horz:\n self.check_amplitude(g, sweepranges[0] * gates_horz[g])\n for g in gates_horz:\n wave_raw = self.make_sawtooth(sweepranges[0], period_horz, repetitionnr=resolution[1])\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw * gates_horz[g] / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'sweep_2D_virt_%s' % g\n\n # vertical virtual gate\n for g in gates_vert:\n self.check_amplitude(g, sweepranges[1] * gates_vert[g])\n for g in gates_vert:\n wave_raw = self.make_sawtooth(sweepranges[1], period_vert)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw * gates_vert[g] / awg_to_plunger\n if g in waveform:\n waveform[g]['wave'] = waveform[g]['wave'] + wave\n else:\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'sweep_2D_virt_%s' % g\n\n # TODO: Implement compensation of sensing dot plunger\n\n sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)\n self.sweep_run(sweep_info)\n\n waveform['width_horz'] = width\n waveform['sweeprange_horz'] = sweepranges[0]\n waveform['width_vert'] = width\n waveform['sweeprange_vert'] = sweepranges[1]\n waveform['resolution'] = resolution\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period_vert\n waveform['period_horz'] = period_horz\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_2D_process(self, data, waveform, diff_dir=None):\n ''' Process data from sweep_2D\n\n Arguments:\n data (list): the raw measured data\n waveform (dict): The waveforms that was sent with the AWG.\n\n Returns:\n data_processed (list): the processed data\n '''\n width_horz = waveform['width_horz']\n width_vert = waveform['width_vert']\n resolution = waveform['resolution']\n\n # split up the fpga data in chunks of horizontal sweeps\n chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]\n chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]\n data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]\n\n if diff_dir is not None:\n data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)\n\n return data_processed\n\n def pulse_gates(self, gate_voltages, waittimes, reps=1, filtercutoff=None, reset_to_zero=False, delete=True):\n ''' Send a pulse sequence with the AWG that can span over any gate space.\n Sends a marker to measurement instrument at the start of the sequence.\n Only works with physical gates.\n\n Arguments:\n gate_voltages (dict): keys are gates to apply the sequence to, and values\n are arrays with the voltage levels to be applied in the sequence\n waittimes (list of floats): duration of each pulse in the sequence\n reset_to_zero (bool): if True, the function offsets the voltages values so that the last point is 0V\n on all gates (i.e. it centers the pulse sequence on the last point).\n\n Returns:\n waveform (dict): The waveform being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n\n period = sum(waittimes)\n if reset_to_zero:\n for g in gate_voltages:\n gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]\n allvoltages = np.concatenate([v for v in gate_voltages.values()])\n mvrange = [max(allvoltages), min(allvoltages)]\n waveform = dict()\n for g in gate_voltages:\n wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=reps,\n filtercutoff=filtercutoff, mvrange=mvrange)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'pulses_%s' % g\n\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['voltages'] = gate_voltages\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['waittimes'] = waittimes\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def reset_AWG(self, clock=1e8):\n \"\"\" Reset AWG to videomode and scanfast \"\"\"\n self.AWG_clock = clock\n for a in self._awgs:\n a.clock_freq.set(clock)\n a.trigger_mode.set('CONT')\n a.trigger_source.set('INT')\n\n for ii in range(1, 5):\n f = getattr(a, 'ch%d_amp' % ii)\n val = f()\n if val != 4.0:\n warnings.warn('AWG channel %d output not at 4.0 V' % ii)\n if self.awg_seq is not None:\n self._set_seq_mode(self.awg_seq)\n\n def set_amplitude(self, amplitude):\n \"\"\" Set the AWG peak-to-peak amplitude for all channels\n\n Args:\n amplitude (float): peak-to-peak amplitude (V)\n\n \"\"\"\n if amplitude < 0.02:\n warnings.warn('Trying to set AWG amplitude too low, setting it to minimum (20mV)')\n amplitude = 0.02\n elif amplitude > 4.5:\n warnings.warn('Trying to set AWG amplitude too high, setting it to maximum (4.5V)')\n amplitude = 4.5\n\n # tektronics 5014 has precision of 1mV\n self.ch_amp = round(amplitude, 3)\n for awg in self._awgs:\n for i in range(1, 5):\n awg.set('ch%s_amp' % i, self.ch_amp)\n\n def check_amplitude(self, gate, mvrange):\n \"\"\" Calculates the lowest allowable AWG peak-to-peak amplitude based on the\n ranges to be applied to the gates. If the AWG amplitude is too low, it gives\n a warning and increases the amplitude.\n\n Args:\n gate (str): name of the gate to check\n mvrange (float): voltage range, in mV, that the gate needs to reach\n \"\"\"\n min_amp = mvrange / self.hardware.parameters['awg_to_%s' % gate].get()\n if min_amp > 4:\n raise(Exception('Sweep range of gate %s is larger than maximum allowed by the AWG' % gate))\n if self.ch_amp < min_amp:\n min_amp = np.ceil(min_amp * 10) / 10\n self.set_amplitude(min_amp)\n warnings.warn('AWG amplitude too low for this range, setting to %.1f' % min_amp)\n\n# %%\n\n\ndef plot_wave_raw(wave_raw, samplerate=None, station=None):\n ''' Plot the raw wave\n\n Arguments:\n wave_raw (array): raw data which represents the waveform\n\n Returns:\n plot (QtPlot): the plot showing the data\n '''\n if samplerate is None:\n if station is None:\n raise Exception('There is no station')\n samplerate = 1 / station.awg.getattr('AWG_clock')\n else:\n samplerate = samplerate\n horz_var = np.arange(0, len(wave_raw) * samplerate, samplerate)\n x = DataArray(name='time(s)', label='time (s)',\n preset_data=horz_var, is_setpoint=True)\n y = DataArray(\n label='sweep value (mV)', preset_data=wave_raw, set_arrays=(x,))\n plot = QtPlot(x, y)\n\n return plot\n\n\ndef sweep_2D_process(data, waveform, diff_dir=None):\n ''' Process data from sweep_2D\n\n Arguments:\n data (list): the raw measured data\n waveform (dict): The waveforms that was sent with the AWG.\n\n Returns:\n data_processed (list): the processed data\n '''\n width_horz = waveform['width_horz']\n width_vert = waveform['width_vert']\n resolution = waveform['resolution']\n\n # split up the fpga data in chunks of horizontal sweeps\n chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]\n chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]\n data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]\n\n if diff_dir is not None:\n data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)\n\n return data_processed\n"
] | [
[
"numpy.tile",
"numpy.roll",
"numpy.ones",
"numpy.ceil",
"numpy.zeros",
"numpy.floor",
"numpy.arange",
"numpy.array"
]
] |
MarkusHaak/fieldbioinformatics | [
"3d291477a3d84968816c8e57e6078fc80135f422"
] | [
"artic/deprecated/plot_amplicon_depth.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nPlot the mean read depth per amplicon.\n\nThis has been written for use in the ARTIC pipeline so there are no file checks - it assumes the following:\n * the primer scheme is in ARTIC format\n * the input depth files are in the format: `chrom\\treadgroup\\tposition\\tdepth\n * readgroup equates to primer pool\n * the primer pairs in the scheme are sorted by amplicon number (i.e. readgroups are interleaved)\n * depth values are provided for all positions (see output of make_depth_mask.py for expected format)\n\n\"\"\"\nfrom .vcftagprimersites import read_bed_file\nimport sys\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport os\n\n\nos.environ['QT_QPA_PLATFORM'] = 'offscreen'\nimport seaborn as sns\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef go(args):\n\n # get the primer scheme\n primerScheme = read_bed_file(args.primerScheme)\n\n # number the amplicons in the scheme and link them to primer start site\n ampliconCounter = 1\n\n # store the amplicon number and starts by read group dict\n rgAmplicons = {}\n rgStarts = {}\n\n # process the primers by readgroup\n for primer in primerScheme:\n poolName = primer['PoolName']\n if poolName not in rgAmplicons:\n rgAmplicons[poolName] = []\n rgStarts[poolName] = []\n if primer['direction'] == '+':\n rgAmplicons[poolName].append(ampliconCounter)\n rgStarts[poolName].append(primer['start'])\n ampliconCounter += 1\n\n # for pandas cut func to create bins, we need to add an extra value to the starts (just use inf)\n for startList in rgStarts.values():\n startList.append(np.inf)\n\n # process the depth files\n dfs = {}\n for depthFile in args.depthFiles:\n\n # read in the depth file\n df = pd.read_csv(depthFile, sep='\\t', header=None,\n names=['refName', 'readGroup',\n 'position', 'depth'],\n dtype={'refName': str, 'readGroup': str,\n 'position': int, 'depth': int},\n usecols=(0, 1, 2, 3),)\n\n # check that there aren't too many positions in the depth data for plotting\n # assert len(df.index) < 30000, \"error: too many data points to plot\"\n\n # check all ref positions have a depth value\n startPos = df[\"position\"][0]\n endPos = df[\"position\"][df.index[-1]]\n assert len(df.index) == ((endPos - startPos) +\n 1), \"error: depth needs to be reported at all positions\"\n\n # check the primer scheme contains the readgroup\n rgList = df.readGroup.unique()\n assert len(rgList) == 1, \"error: depth file has %d readgroups, need 1 (%s)\" % (\n len(rgList), depthFile)\n rg = rgList[0]\n assert rg in rgAmplicons, \"error: readgroup not found in provided primer scheme (%s)\" % (\n rg)\n\n # get the amplicon starts for this readgroup\n amplicons = sorted(rgAmplicons[rg])\n starts = sorted(rgStarts[rg])\n\n # bin read depths by amplicon for this readgroup\n df['amplicon'] = pd.cut(\n x=df['position'], bins=starts, labels=amplicons)\n\n # store the mean of each bin\n bins = (df.groupby(['amplicon'])[\n 'depth'].mean()).rename(depthFile.name)\n\n # add to the pile\n assert rg not in dfs, \"error: readgroup present in multiple files (%s)\" % (\n rg)\n dfs[rg] = bins\n\n # combine the series data from each input file\n newDF = pd.concat(dfs, axis=1)\n newDF.sort_index(axis=0, inplace=True)\n newDF.reset_index(inplace=True)\n\n # melt the DF for seaborn\n newDF = newDF.melt(\"amplicon\", var_name=\"read group\",\n value_name=\"mean amplicon read depth\")\n newDF = newDF.dropna()\n\n # plot the bar\n g = sns.catplot(data=newDF,\n x=\"amplicon\",\n y=\"mean amplicon read depth\",\n hue=\"read group\",\n height=4,\n aspect=3,\n kind=\"bar\",\n dodge=False,\n legend=False)\n g.set(yscale=\"log\")\n g.fig.suptitle(args.sampleID)\n plt.legend(loc='upper right')\n plt.xticks(rotation=45, size=6)\n plt.savefig(args.outFilePrefix + \"-barplot.png\")\n plt.close()\n\n # plot the box\n g = sns.catplot(data=newDF,\n x=\"read group\",\n y=\"mean amplicon read depth\",\n kind=\"box\")\n g.fig.suptitle(args.sampleID)\n plt.savefig(args.outFilePrefix + \"-boxplot.png\")\n plt.close()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--primerScheme', required=True,\n help='the ARTIC primer scheme')\n parser.add_argument('--sampleID', required=True,\n help='the sample ID for the provided depth files')\n parser.add_argument('--outFilePrefix', default=\"./amplicon-depth\",\n help='the prefix to give the output plot file')\n parser.add_argument(\n \"depthFiles\", type=argparse.FileType('r'), nargs='+', help='the depth files produced by make_depth_mask.py')\n args = parser.parse_args()\n go(args)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"pandas.cut",
"pandas.concat",
"matplotlib.pyplot.close",
"matplotlib.use"
]
] |
cperales/pygsom | [
"ac4d4818f441d862cb5183e1d2ea814e3f805759"
] | [
"gsom.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015 Philipp Ludwig <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF\nOR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\"\"\"@package GSOM\n\nThis is an implementation of the growing self-organizing map.\n\nDifferent possible approaches for the GSOM have been presented in the past\nby various researchers. To make things clear, this implementation is based\non the one described in the work of:\n\nAlahakoon, Damminda, S. Halgamuge, and Bala Srinivasan:\n\"Dynamic self-organizing maps with controlled growth for knowledge discovery.\"\nNeural Networks, IEEE Transactions on 11.3 (2000): 601-614.\n\nSadly, this article is not as comprehensive as desirable. Therefore this\nimplementation should not be taken as a reference, but as a best-effort\nversion. Some details of the algorithm have been assembled based on the\nwork of Mengxue Cao et. al, who described their approach within their work:\n\n\"Growing Self-Organizing Map Approach for Semantic Acquisition Modeling\nway within their work\"\n\nRefer to both papers for further details.\n\nAdditionally, this algorithm picks up some of the aspects proposed in the\nwork of:\n\nAndreas Nürnberger and Marcin Detyniecki:\n\"Externally growing self-organizing maps and its application to e-mail\n database visualization and exploration\"\n\"\"\"\nfrom math import log, exp\nimport itertools\nimport math\nimport random\nimport scipy\n\n\nclass GSOMNode:\n \"\"\" Represents one node in a growing SOM. \"\"\"\n R = random.Random()\n\n def __init__(self, dim, x, y, data):\n \"\"\" Initialize this node. \"\"\"\n # Create a weight vector of the given dimension:\n # Initialize the weight vector with random values between 0 and 1.\n self.weights = scipy.array([self.R.random() for _ in range(dim)])\n\n # Remember the error occuring at this particular node\n self.error = 0.0\n\n # Holds the number of the iteration during the node has been inserted.\n self.it = 0\n\n # Holds the number of the last iteration where the node has won.\n self.last_it = 0\n\n # Holds the best-matching data.\n self.data = data\n self.last_changed = 0\n\n # This node has no neighbours yet.\n self.right = None\n self.left = None\n self.up = None\n self.down = None\n\n # Copy the given coordinates.\n self.x, self.y = x, y\n\n def adjust_weights(self, target, learn_rate):\n \"\"\" Adjust the weights of this node. \"\"\"\n for w in range(0, len(target)):\n self.weights[w] += learn_rate * (target[w] - self.weights[w])\n\n def is_boundary(self):\n \"\"\" Check if this node is at the boundary of the map. \"\"\"\n if not self.right: return True\n if not self.left: return True\n if not self.up: return True\n if not self.down: return True\n return False\n\n\nclass GSOM:\n \"\"\" Represents a growing self-organizing map. \"\"\"\n\n @staticmethod\n def _distance(v1, v2):\n \"\"\" Calculate the euclidean distance between two scipy arrays.\"\"\"\n dist = 0.0\n for v, w in zip(v1, v2):\n dist += pow(v - w, 2)\n return dist\n\n def _find_bmu(self, vec):\n \"\"\" Find the best matching unit within the map for the given input_\n vector. \"\"\"\n dist=float(\"inf\")\n winner = False\n for node in self.nodes:\n d = self._distance(vec, node.weights)\n if d < dist:\n dist = d\n winner = node\n return winner\n\n def _find_similar_boundary(self, node):\n \"\"\" Find the most similar boundary node to the given node. \"\"\"\n dist = float(\"inf\")\n winner = False\n for boundary in self.nodes:\n if not boundary.is_boundary(): continue\n if boundary == node: continue\n\n d = self._distance(node.weights, boundary.weights)\n if d < dist:\n dist = d\n winner = node\n\n return winner\n\n def __init__(self, X, y, spread_factor=0.5):\n \"\"\" Initializes this GSOM using the given data. \"\"\"\n # Assign the data\n self.data = []\n for fn, t in zip(X, y):\n arr = scipy.array([t])\n self.data.append([fn, arr])\n\n # Determine the dimension of the data.\n self.dim = len(self.data[0][0])\n\n # Calculate the growing threshold:\n self._GT = -self.dim * math.log(spread_factor, 2)\n\n # Create the 4 starting Nodes.\n self.nodes = []\n n00 = GSOMNode(dim=self.dim, x=0, y=0, data=self.data)\n n01 = GSOMNode(self.dim, 0, 1, self.data)\n n10 = GSOMNode(self.dim, 1, 0, self.data)\n n11 = GSOMNode(self.dim, 1, 1, self.data)\n self.nodes.extend([n00, n01, n10, n11])\n\n # Create starting topology\n n00.right = n10\n n00.up = n01\n n01.right = n11\n n01.down = n00\n n10.up = n11\n n10.left = n00\n n11.left = n01\n n11.down = n10\n\n # Set properties\n self.it = 0 # Current iteration\n self.max_it = len(self.data)\n self.num_it = 1000 # Total iterations\n self.init_lr = 0.1 # Initial value of the learning rate\n self.alpha = 0.1\n self.output = open(\"gsom.csv\", \"w\")\n\n def train(self):\n # Select the next input_.\n input_ = random.choice(self.data)[1]\n input_ = random.choice(self.data)[0]\n\n # Calculate the learn rate.\n # Note that the learning rate, according to the original paper,\n # is reseated for every new input_.\n learn_rate = self.init_lr * self.alpha * (1 - 1.5/len(self.nodes))\n\n # We now present the input_ several times to the network.\n # It is unclear what's a good number here, since no publication\n # took the effort to name a value. However, the implementation\n # provided by Arkadi Kagan presents the input_ 20 times, so we\n # will copy that here.\n recalc_nodes = []\n for _ in range(20):\n # Find the best matching unit\n BMU = self._find_bmu(input_)\n BMU.last_it = self.it\n\n # Adapt the weights of the direct topological neighbours\n neighbours = []\n neighbours.append(BMU)\n if BMU.left: neighbours.append(BMU.left)\n if BMU.right: neighbours.append(BMU.right)\n if BMU.up: neighbours.append(BMU.up)\n if BMU.down: neighbours.append(BMU.down)\n\n if BMU not in recalc_nodes: recalc_nodes.append(BMU)\n\n for node in neighbours:\n node.adjust_weights(input_, learn_rate)\n if node not in recalc_nodes: recalc_nodes.append(node)\n\n # Calculate the error.\n err = self._distance(BMU.weights, input_)\n\n # Add the error to the node.\n growing, nodes = self._node_add_error(BMU, err)\n if growing: recalc_nodes.extend(nodes)\n\n # Count the iteration\n self.it += 1\n\n # Re-Calc representative data elements for changed nodes.\n used_data = []\n for node in self.nodes:\n used_data.append(node.data)\n\n for node in recalc_nodes:\n dist = float(\"inf\")\n winner = False\n winner_fn = False\n\n for fn, point in self.data:\n # if fn in used_data: continue\n\n d = self._distance(point, node.weights)\n if(d < dist):\n dist = d\n winner = point\n winner_fn = fn\n\n if node.data != winner_fn:\n node.data = winner_fn\n node.last_changed = self.it\n self.output.write(str(node.x) + \",\" + str(node.y)\\\n + \",change\\n\")\n used_data.append(winner_fn)\n\n # Remove unused nodes.\n self._remove_unused_nodes()\n\n def _node_add_error(self, node, error):\n \"\"\" Add the given error to the error value of the given node.\n\n This will also take care of growing the map (if necessary) and\n distributing the error along the neighbours (if necessary) \"\"\"\n node.error += error\n\n # Consider growing\n if node.error > self._GT:\n if not node.is_boundary():\n # Find the boundary node which is most similar to this node.\n node = self._find_similar_boundary(node)\n if not node:\n print(\"GSOM: Error: No free boundary node found!\")\n\n \"\"\" Old method:\n # Distribute the error along the neighbours.\n # Since this is not a boundary node, this node must have\n # 4 neighbours.\n node.error = 0.5 * self._GT\n node.left.error += 0.25 * node.left.error\n node.right.error += 0.25 * node.right.error\n node.up.error += 0.25 * node.up.error\n node.down.error += 0.25 * node.down.error\n \"\"\"\n nodes = self._grow(node)\n return True, nodes\n\n return False, 0\n\n def _grow(self, node):\n \"\"\" Grow this GSOM. \"\"\"\n # We grow this GSOM at every possible direction.\n nodes = []\n if node.left is None:\n nn = self._insert(node.x - 1, node.y, node)\n nodes.append(nn)\n print(\"Growing left at: (\" + str(node.x) + \",\" + str(node.y)\\\n + \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n\n if node.right is None:\n nn = self._insert(node.x + 1, node.y, node)\n nodes.append(nn)\n print(\"Growing right at: (\" + str(node.x) + \",\" + str(node.y)\\\n + \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n\n if node.up is None:\n nn = self._insert(node.x, node.y + 1, node)\n nodes.append(nn)\n print(\"Growing up at: (\" + str(node.x) + \",\" + str(node.y) +\\\n \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n\n if node.down is None:\n nn = self._insert(node.x, node.y - 1, node)\n nodes.append(nn)\n print(\"Growing down at: (\" + str(node.x) + \",\" + str(node.y) +\\\n \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n return nodes\n\n def _insert(self, x, y, init_node):\n # Create new node\n new_node = GSOMNode(self.dim, x, y, self.data)\n self.nodes.append(new_node)\n\n # Save the number of the current iteration. We need this to prune\n # this node later (if neccessary).\n new_node.it = new_node.last_it = self.it\n\n # Create the connections to possible neighbouring nodes.\n for node in self.nodes:\n # Left, Right, Up, Down\n if node.x == x - 1 and node.y == y:\n new_node.left = node\n node.right = new_node\n if node.x == x + 1 and node.y == y:\n new_node.right = node\n node.left = new_node\n if node.x == x and node.y == y + 1:\n new_node.up = node\n node.down = new_node\n if node.x == x and node.y == y - 1:\n new_node.down = node\n node.up = new_node\n\n # Calculate new weights, look for a neighbour.\n neigh = new_node.left\n if neigh is None: neigh = new_node.right\n if neigh is None: neigh = new_node.up\n if neigh is None: neigh = new_node.down\n if neigh is None: print(\"_insert: No neighbour found!\")\n\n for i in range(0, len(new_node.weights)):\n new_node.weights[i] = 2 * init_node.weights[i] - neigh.weights[i]\n\n return new_node\n\n\n def _remove_unused_nodes(self):\n \"\"\" Remove all nodes from the GSOM that have not been used. \"\"\"\n to_remove = []\n\n # Iterate over all nodes.\n for node in self.nodes:\n # Different rules for nodes that have been used or not.\n iterations_not_won = self.it - node.last_it\n\n # If we have 50 nodes, every node is allowed not to win 50 times\n # in a row. This means every node must be picked at least once.\n if iterations_not_won < len(self.nodes) * 4.0 * (1 + self.it/len(self.data)) : continue\n\n\n # First, remove the connections to the neighbouring nodes.\n if node.left: node.left.right = None\n if node.up: node.up.down = None\n if node.down: node.down.up = None\n if node.right: node.right.left = None\n\n # Save this node for removing.\n to_remove.append(node)\n\n # Now remove all marked nodes.\n for node in to_remove:\n print(\"Removing node @ \" + str(node.x) + \", \" + str(node.y) + \\\n \" - Current it: \" + str(self.it) + \" - Last time won: \" +\\\n str(node.last_it))\n if node.data:\n self.output.write(node.data + \",\" + str(node.x)+\",\"+str(node.y)\\\n + \",remove\\n\")\n self.nodes.remove(node)\n\n"
] | [
[
"scipy.array"
]
] |
eEcoLiDAR/lcMacroPipeline | [
"91709f93ef53a3e453f0ce967e1094688688f684"
] | [
"tests/test_grid.py"
] | [
"from pathlib import Path\nimport unittest\nimport numpy as np\nimport pylas\n\nfrom laserfarm.grid import Grid\n\ntry:\n import matplotlib\n matplotlib_available = True\nexcept ModuleNotFoundError:\n matplotlib_available = False\n\nif matplotlib_available:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n\n\nclass TestValidGridSetup(unittest.TestCase):\n def setUp(self):\n self.grid = Grid()\n self.grid.setup(0., 0., 20., 20., 5)\n\n def test_gridMins(self):\n np.testing.assert_allclose(self.grid.grid_mins, [0., 0.])\n\n def test_gridMaxs(self):\n np.testing.assert_allclose(self.grid.grid_maxs, [20., 20.])\n\n def test_gridWidth(self):\n np.testing.assert_allclose(self.grid.grid_width, 20.)\n\n def test_tileWidth(self):\n np.testing.assert_allclose(self.grid.tile_width, 4.)\n\n def test_tileIndexForPoint(self):\n np.testing.assert_array_equal(self.grid.get_tile_index(0.1, 0.2),\n (0, 0))\n\n def test_tileIndexForArray(self):\n np.testing.assert_array_equal(self.grid.get_tile_index((0.1, 19.9),\n (0.2, 19.8)),\n ((0, 0), (4, 4)))\n\n def test_tileBoundsForPoint(self):\n np.testing.assert_array_equal(self.grid.get_tile_bounds(0, 0),\n ((0., 0.), (4., 4.)))\n\n def test_tileBoundsForArray(self):\n np.testing.assert_array_equal(self.grid.get_tile_bounds((0, 0),\n (0, 1)),\n (((0., 0.), (0., 4.)),\n ((4., 4.), (4., 8.))))\n\n\nclass TestInvalidGridSetup(unittest.TestCase):\n def test_fractionalNumberOfTilesGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 20., 20., 0.1)\n\n def test_zeroNumberOfTilesGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 20., 20., 0)\n\n def test_zeroWidthGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 0., 20., 5)\n\n def test_rectangularGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 10., 20., 5)\n\n\nclass TestRealGridValid(unittest.TestCase):\n _test_dir = 'test_tmp_dir'\n _test_data_dir = 'testdata'\n _test_tile_idx = [101, 101]\n\n _test_file_name = 'C_43FN1_1_1.LAZ'\n _min_x = -113107.8100\n _min_y = 214783.8700\n _max_x = 398892.1900\n _max_y = 726783.87\n _n_tiles_sides = 256\n\n plot = False\n\n def setUp(self):\n self.grid = Grid()\n self.grid.setup(min_x=self._min_x,\n min_y=self._min_y,\n max_x=self._max_x,\n max_y=self._max_y,\n n_tiles_side=self._n_tiles_sides)\n self._test_data_path = Path(self._test_data_dir).joinpath(self._test_file_name)\n self.points = _read_points_from_file(str(self._test_data_path))\n\n def test_isPointInTile(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx)\n self.assertTrue(np.all(mask_valid_points))\n\n\nclass TestRealGridLowPrecision(TestRealGridValid):\n \"\"\"\n The following tile has been obtained by using large scale parameters (0.1)\n in the PDAL LAS writer. Some points thus fall outside the tile boundary\n when read from the file.\n \"\"\"\n _test_file_name = 'C_43FN1_1.LAZ'\n\n def test_isPointInTile(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx)\n if self.plot and matplotlib_available:\n _plot_points_and_tile(self.grid,\n self.points[~mask_valid_points],\n self._test_tile_idx,\n self._test_data_path.with_suffix('.png').name)\n self.assertFalse(np.all(mask_valid_points))\n\n def test_isPointInTileWithPrecision(self):\n x_pts, y_pts = self.points.T\n precision = np.abs(np.rint(self._max_y) - self._max_y)\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx,\n precision=precision)\n self.assertTrue(np.all(mask_valid_points))\n\n\nclass TestRealGridLowPrecisionRoundedOrigin(TestRealGridValid):\n \"\"\"\n The following tile has been obtained by rounding off the coordinates\n of the origin and by using the default scale parameters (0.01) in the PDAL\n LAS writer.\n \"\"\"\n _test_file_name = 'C_43FN1_1.LAZ'\n _test_tile_idx = [101, 101]\n\n _min_x = -113108.00\n _min_y = 214784.00\n _max_x = 398892.00\n _max_y = 726784.00\n\n def test_isPointInTile(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx)\n if self.plot and matplotlib_available:\n _plot_points_and_tile(self.grid,\n self.points[~mask_valid_points],\n self._test_tile_idx,\n self._test_data_path.with_suffix('.png').name)\n self.assertFalse(np.all(mask_valid_points))\n\n def test_isPointInTileWithPrecision(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx,\n precision=0.01)\n self.assertTrue(np.all(mask_valid_points))\n\n\ndef _read_points_from_file(filename):\n file = pylas.read(filename)\n return np.column_stack((file.x, file.y))\n\n\ndef _plot_points_and_tile(grid, points, tile_indices, filename=None):\n \"\"\"\n Plot points\n\n :param grid: grid object\n :param points: (Nx2) array containing X,Y coordinates of the points\n :param tile_indices: [N_x, N_y], where N_i is the integer tile index along\n dimension i\n :param filename: optional, path where to save plot\n \"\"\"\n # plot points\n x_pts, y_pts = points.T\n plt.scatter(x_pts, y_pts, color='r')\n # plot tile\n tile_mins, tile_maxs = grid.get_tile_bounds(*tile_indices)\n line = np.array((tile_mins,\n [tile_mins[0], tile_maxs[1]],\n tile_maxs,\n [tile_maxs[0], tile_mins[1]],\n tile_mins))\n x, y = line.T\n plt.plot(x, y, color='k')\n # add tile label\n x_cntr, y_cntr = (tile_mins + tile_maxs) / 2.\n plt.text(x_cntr, y_cntr, '({}, {})'.format(*tile_indices),\n horizontalalignment='center',\n verticalalignment='center')\n if filename is not None:\n plt.savefig(filename, dpi=300)\n else:\n plt.show()\n plt.close(plt.figure())\n"
] | [
[
"numpy.rint",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"numpy.column_stack",
"matplotlib.pyplot.show",
"numpy.all",
"numpy.testing.assert_allclose",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter"
]
] |
madhawav/plan2scene | [
"cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5"
] | [
"code/src/plan2scene/texture_gen/custom_ops/noise.py"
] | [
"# Code adapted from https://github.com/henzler/neuraltexture/blob/master/code/custom_ops/noise/noise.py\n\nfrom torch import nn\nfrom torch.autograd import Function\nimport plan2scene.texture_gen.utils.neural_texture_helper as utils_nt\nimport noise_cuda\nimport torch\nimport numpy as np\nfrom torch.autograd import gradcheck\n\n\nclass NoiseFunction(Function):\n @staticmethod\n def forward(ctx, position, seed):\n ctx.save_for_backward(position, seed)\n noise = noise_cuda.forward(position, seed)\n return noise\n\n @staticmethod\n def backward(ctx, grad_noise):\n position, seed = ctx.saved_tensors\n d_position_bilinear = noise_cuda.backward(position, seed)\n\n d_position = torch.stack([torch.zeros_like(d_position_bilinear), d_position_bilinear], dim=0)\n\n return grad_noise.unsqueeze(2) * d_position, None\n\n\nclass Noise(nn.Module):\n def __init__(self):\n super(Noise, self).__init__()\n\n def forward(self, position, seed):\n noise = NoiseFunction.apply(position.contiguous(), seed.contiguous())\n return noise\n"
] | [
[
"torch.zeros_like"
]
] |
liloganle/Reinforcement-Learning | [
"29ffb74a1c8e506c544245c9aff37e958e503f26"
] | [
"Chapter9/Figure9-1.py"
] | [
"# -*- coding:utf-8 -*-\n\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n\nclass RandomWalk(object):\n def __init__(self, num_states=1000, groups=10, alpha=2e-5):\n self.num_states = num_states # the number of states\n self.groups = groups # the number of groups\n self.alpha = alpha # the step size\n\n self.group_value = np.zeros(groups) # the value of each group\n self.group_size = int(num_states / groups) # the size of each group\n\n self.states = np.arange(1, num_states+1) # all states except terminal state\n self.start_state = int(num_states / 2) # the start state\n self.end_state = [0, num_states + 1] # the terminal states\n self.action = [-1, 1] # right:1, left:-1\n self.neighbors = 100 # the neighboring states\n\n def select_action(self):\n \"\"\"to select randomly an action\"\"\"\n if np.random.binomial(1, 0.5):\n return self.action[1] # select right action\n else:\n return self.action[0] # select left action\n\n def find_next_state(self, state, action):\n \"\"\"to get the next state and reward\"\"\"\n move_step = np.random.randint(1, self.neighbors+1) # the step size of moving\n move_step *= action\n next_state = state + move_step # the next state\n next_state = max(min(next_state, self.end_state[1]), 0)\n\n if next_state == self.end_state[0]: # terminating on the left\n reward = -1\n elif next_state == self.end_state[1]: # terminating on the right\n reward = 1\n else:\n reward = 0\n return next_state, reward\n\n def get_state_value(self, state):\n \"\"\"to get the state value except for terminal states\"\"\"\n group_idx = (state - 1) // self.group_size\n return self.group_value[group_idx]\n \n def update_group_value(self, state, delta):\n \"\"\"to update the group_value\"\"\"\n group_idx = (state - 1) // self.group_size\n self.group_value[group_idx] += delta\n\n def gradient_monte_carlo(self, state_distribution):\n \"\"\" the gradient-descent version of Monte Carlo state-value prediction\"\"\"\n state = self.start_state # initialize the state\n trajectory = [state] # track the transition state\n\n while state not in self.end_state:\n action = self.select_action() # select an action\n next_state, reward = self.find_next_state(state, action) # get the next state and reward\n trajectory.append(next_state) # record the transition state\n state = next_state\n\n for stat in trajectory[:-1]:\n delta = self.alpha * (reward - self.get_state_value(stat))\n self.update_group_value(stat, delta)\n state_distribution[stat] += 1\n\n\ndef dp_compute_value(test_class):\n \"\"\"using Dynamic programming to find the true state values\"\"\"\n value = np.arange(-test_class.end_state[1], test_class.end_state[1] + 1, 2) / test_class.end_state[1]\n print(\"Starting computing......\")\n while True:\n value_temp = value.copy()\n for state in test_class.states:\n value[state] = 0\n for act in test_class.action:\n for step in range(1, test_class.neighbors + 1):\n step *= act\n next_state = state + step\n next_state = max(min(next_state, test_class.end_state[1]), 0)\n # update the value\n value[state] += 1/(2*test_class.neighbors)*value[next_state]\n if np.linalg.norm(value - value_temp) < 0.001:\n break\n print(\"Completed!!!\")\n return value\n\n\nif __name__ == \"__main__\":\n episodes = 100000\n test_exam = RandomWalk()\n\n true_value = dp_compute_value(test_class=test_exam)\n distribution = np.zeros(test_exam.num_states + len(test_exam.end_state))\n for itr in tqdm(range(episodes)):\n test_exam.gradient_monte_carlo(distribution)\n\n distribution /= np.sum(distribution)\n state_value = [test_exam.get_state_value(stat) for stat in test_exam.states]\n\n plt.figure(1)\n plt.plot(test_exam.states, true_value[1:-1], label=\"True value\")\n plt.plot(test_exam.states, state_value, label=\"Approximate MC value\")\n plt.xlabel(\"State\")\n plt.ylabel(\"Value\")\n plt.legend()\n plt.savefig(\"./images/Figure9-1-1.png\")\n plt.show()\n\n plt.figure(2)\n plt.plot(test_exam.states, distribution[1:-1], label=\"State Distribution\")\n plt.xlabel(\"State\")\n plt.ylabel(\"Distribution\")\n plt.legend()\n plt.savefig(\"./images/Figure9-1-2.png\")\n plt.show()\n\n plt.close()\n print(\"Completed!!!You can check it in 'images' directory\")\n\n\n"
] | [
[
"numpy.random.binomial",
"numpy.sum",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.plot",
"numpy.random.randint",
"numpy.linalg.norm",
"matplotlib.pyplot.xlabel"
]
] |
timelyportfolio/bokeh | [
"6cecb7211277b9d838039d0eb15e50a10f9ac3d1",
"6cecb7211277b9d838039d0eb15e50a10f9ac3d1"
] | [
"sphinx/source/tutorial/solutions/les_mis.py",
"examples/glyphs/prim_server.py"
] | [
"import numpy as np\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.models import HoverTool, ColumnDataSource\nfrom bokeh.sampledata.les_mis import data\n\n# EXERCISE: try out different sort orders for the names\nnodes = data['nodes']\nnames = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]\n\n# store the links information in numpy\nN = len(nodes)\ncounts = np.empty((N, N))\nfor link in data['links']:\n counts[link['source'], link['target']] = link['value']\n counts[link['target'], link['source']] = link['value']\n\n# We will use these colors to color each group by a different color\ncolormap = [\n \"#444444\", \"#a6cee3\", \"#1f78b4\", \"#b2df8a\", \"#33a02c\", \"#fb9a99\",\n \"#e31a1c\", \"#fdbf6f\", \"#ff7f00\", \"#cab2d6\", \"#6a3d9a\"\n]\n\n# set up some data to plot! We will need to have values for every pair of names. The\n# co-occurrence count for a given pair of names is in `count[i,j]`. The strategy is\n# to color each rect by the group, and set its alpha based on the count.\nxname = []\nyname = []\ncolor = []\nalpha = []\nfor i, n1 in enumerate(nodes):\n for j, n2 in enumerate(nodes):\n xname.append(n1['name'])\n yname.append(n2['name'])\n\n a = min(counts[i,j]/4.0, 0.9) + 0.1\n alpha.append(a)\n\n if n1['group'] == n2['group']:\n color.append(colormap[n1['group']])\n else:\n color.append('lightgrey')\n\n# EXERCISE: output static HTML file\noutput_file(\"les_mis.html\")\n\n# EXERCISE: create a ColumnDataSource to hold the xnames, ynames, colors, alphas,\n# and counts. NOTE: the counts array is 2D and will need to be flattened\nsource = ColumnDataSource(\n data=dict(\n xname=xname,\n yname=yname,\n colors=color,\n alphas=alpha,\n count=counts.flatten(),\n )\n)\n\n# create a new figure\np = figure(title=\"Les Mis Occurrences (one at a time)\",\n x_axis_location=\"above\", tools=\"resize,hover\",\n x_range=list(reversed(names)), y_range=names,\n plot_width=800, plot_height=800)\n\n# EXERCISE: use the `p.rect` renderer to render a categorical heatmap of all the\n# data. Experiment with the widths and heights (use categorical percentage\n# unite) as well as colors and alphas.\np.rect('xname', 'yname', 0.9, 0.9, source=source,\n color='colors', alpha='alphas', line_color=None)\n\n# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:\n# - remove the axis and grid lines\n# - remove the major ticks\n# - make the tick labels smaller\n# - set the x-axis orientation to vertical, or angled\np.grid.grid_line_color = None\np.axis.axis_line_color = None\np.axis.major_tick_line_color = None\np.axis.major_label_text_font_size = \"5pt\"\np.axis.major_label_standoff = 0\np.xaxis.major_label_orientation = np.pi/3\n\n# EXERCISE: configure the hover tool to display both names as well as\n# the count value as tooltips\nhover = p.select(dict(type=HoverTool))\nhover.tooltips = [\n ('names', '@yname, @xname'),\n ('count', '@count'),\n]\n\n# EXERCISE: show the plot\nshow(p)\n",
"from __future__ import print_function\n\nimport numpy as np\n\nfrom bokeh.browserlib import view\nfrom bokeh.document import Document\nfrom bokeh.models.glyphs import *\nfrom bokeh.models import (\n Plot, Range1d, LinearAxis, Grid, ColumnDataSource, PanTool, WheelZoomTool\n)\nfrom bokeh.session import Session\n\ndocument = Document()\nsession = Session()\nsession.use_doc('prim_server')\nsession.load_document(document)\n\nx = np.arange(1,6)\ny = np.arange(5, 0, -1)\n\nsource = ColumnDataSource(data=dict(x=x,y=y))\n\nxdr = Range1d(start=0, end=10)\nydr = Range1d(start=0, end=10)\n\ndef make_plot(name, glyph):\n plot = Plot(x_range=xdr, y_range=ydr, min_border=80)\n\n plot.add_glyph(source, glyph)\n\n xaxis = LinearAxis()\n plot.add_layout(xaxis, 'below')\n\n yaxis = LinearAxis()\n plot.add_layout(yaxis, 'left')\n\n plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\n plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\n plot.add_tools(PanTool(), WheelZoomTool())\n\n document.add(plot)\n session.store_document(document)\n\nmake_plot('annular_wedge', AnnularWedge(x=\"x\", y=\"y\", inner_radius=0.2, outer_radius=0.5, start_angle=0.8, end_angle=3.8))\nmake_plot('annulus', Annulus(x=\"x\", y=\"y\", inner_radius=0.2, outer_radius=0.5))\nmake_plot('arc', Arc(x=\"x\", y=\"y\", radius=0.4, start_angle=0.8, end_angle=3.8))\nmake_plot('circle', Circle(x=\"x\", y=\"y\", radius=1))\nmake_plot('oval', Oval(x=\"x\", y=\"y\", width=0.5, height=0.8, angle=-0.6))\nmake_plot('ray', Ray(x=\"x\", y=\"y\", length=25, angle=0.6))\nmake_plot('rect', Rect(x=\"x\", y=\"y\", width=0.5, height=0.8, angle=-0.6))\nmake_plot('text', Text(x=\"x\", y=\"y\", text={\"value\":\"foo\"}, angle=0.6))\nmake_plot('wedge', Wedge(x=\"x\", y=\"y\", radius=0.5, start_angle=0.9, end_angle=3.2))\n\nlink = session.object_link(document.context)\nprint(\"please visit %s to see plots\" % link)\nview(link)\n"
] | [
[
"numpy.empty"
],
[
"numpy.arange"
]
] |
aristoteleo/scribe-py | [
"ea28d2b588f8648b9ce1679fe18c3142aee2aa58"
] | [
"Scribe/other_estimators.py"
] | [
"import pandas\nimport numpy as np\nfrom multiprocessing import Pool\n\n\ndef __individual_corr(id1, id2, x, y):\n return (id1, id2, corr(x, y)[0])\n\n\ndef __individual_mi(id1, id2, x, y):\n return (id1, id2, mi(x, y))\n\n\ndef corr(self, number_of_processes=1):\n \"\"\"Calculate pairwise correlation over the data\n\n Arguments\n ---------\n self: 'class causal_model object'\n An instance of a causal_model class object. This object can be converted from an AnnData object through\n load_anndata function.\n number_of_processes: `int` (Default: 1)\n Number of processes to use.\n\n Returns\n ---------\n corr_results: 'pd.core.frame.DataFrame'\n The correlation network inferred.\n \"\"\"\n self.corr_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)\n if number_of_processes > 1: temp_input = []\n\n for id1 in self.node_ids:\n for id2 in self.node_ids:\n\n if id1 == id2: continue\n\n if number_of_processes == 1:\n self.corr_results.loc[id1, id2] = __individual_corr((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))[2]\n else:\n temp_input.append((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))\n\n if number_of_processes > 1:\n tmp_results = Pool(number_of_processes).map(__individual_corr, temp_input)\n for t in tmp_results: self.corr_results.loc[t[0], t[1]] = t[2]\n\n return self.corr_results\n\n\ndef mi(self, number_of_processes=1):\n \"\"\"Calculate pairwise mutual information over the data\n\n Arguments\n ---------\n self: 'class causal_model object'\n An instance of a causal_model class object. This object can be converted from an AnnData object through\n load_anndata function.\n number_of_processes: `int` (Default: 1)\n Number of processes to use.\n\n Returns\n ---------\n mi_results: 'pd.core.frame.DataFrame'\n The mutual information network inferred.\n \"\"\"\n self.mi_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)\n if number_of_processes > 1: temp_input = []\n\n for id1 in self.node_ids:\n for id2 in self.node_ids:\n\n if id1 == id2: continue\n\n if number_of_processes == 1:\n self.mi_results.loc[id1, id2] = __individual_mi((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))[2]\n else:\n temp_input.append((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))\n\n if number_of_processes > 1:\n tmp_results = Pool(number_of_processes).map(__individual_mi, temp_input)\n for t in tmp_results: self.mi_results.loc[t[0], t[1]] = t[2]\n\n return self.mi_results\n"
] | [
[
"pandas.DataFrame"
]
] |
frankilepro/LiTeFlow | [
"d07105ea00ad29b701e1b100d9cda2297eef19de"
] | [
"liteflow/input.py"
] | [
"\"\"\"Utilities for input pipelines.\"\"\"\n\nimport tensorflow as tf\n\n\ndef shuffle(tensors,\n capacity=32,\n min_after_dequeue=16,\n num_threads=1,\n dtypes=None,\n shapes=None,\n seed=None,\n shared_name=None,\n name='shuffle'):\n \"\"\"Wrapper around a `tf.RandomShuffleQueue` creation.\n\n Return a dequeue op that dequeues elements from `tensors` in a\n random order, through a `tf.RandomShuffleQueue` -- see for further\n documentation.\n\n Arguments:\n tensors: an iterable of tensors.\n capacity: (Optional) the capacity of the queue; default value set to 32.\n num_threads: (Optional) the number of threads to be used fo the queue runner;\n default value set to 1.\n min_after_dequeue: (Optional) minimum number of elements to remain in the\n queue after a `dequeue` or `dequeu_many` has been performend,\n in order to ensure better mixing of elements; default value set to 16.\n dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;\n if not provided, will be inferred from `tensors`.\n shapes: (Optional) list of shapes, one for each tensor in `tensors`.\n seed: (Optional) seed for random shuffling.\n shared_name: (Optional) If non-empty, this queue will be shared under\n the given name across multiple sessions.\n name: Optional name scope for the ops.\n\n Returns:\n The tuple of tensors that was randomly dequeued from `tensors`.\n \"\"\"\n\n tensors = list(tensors)\n with tf.name_scope(name, values=tensors):\n dtypes = dtypes or list([t.dtype for t in tensors])\n queue = tf.RandomShuffleQueue(\n seed=seed,\n shared_name=shared_name,\n name='random_shuffle_queue',\n dtypes=dtypes,\n shapes=shapes,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue)\n enqueue = queue.enqueue(tensors)\n runner = tf.train.QueueRunner(queue, [enqueue] * num_threads)\n tf.train.add_queue_runner(runner)\n dequeue = queue.dequeue()\n return dequeue\n\n\ndef shuffle_batch(tensors,\n batch_size,\n capacity=32,\n num_threads=1,\n min_after_dequeue=16,\n dtypes=None,\n shapes=None,\n seed=None,\n enqueue_many=False,\n dynamic_pad=True,\n allow_smaller_final_batch=False,\n shared_name=None,\n name='shuffle_batch'):\n \"\"\"Create shuffled and padded batches of tensors in `tensors`.\n\n Dequeue elements from `tensors` shuffling, batching and dynamically\n padding them. First a `tf.RandomShuffleQueue` is created and fed with\n `tensors` (using the `dket.input.shuffle` function); the dequeued tensors\n shapes are then set and fed into a `tf.train.batch` function that provides\n batching and dynamic padding.\n\n\n Arguments:\n tensors: an iterable of tensors.\n batch_size: an `int` representing th batch size.\n capacity: (Optional) the capacity of the queues; default value set to 32.\n num_threads: (Optional) the number of threads to be used fo the queue runner;\n default value set to 1.\n min_after_dequeue: (Optional) minimum number of elements to remain in the\n shuffling queue after a `dequeue` or `dequeu_many` has been performend,\n in order to ensure better mixing of elements; default value set to 16.\n dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;\n if not provided, will be inferred from `tensors`.\n shapes: (Optional) list of shapes, one for each tensor in `tensors`.\n seed: (Optional) seed for random shuffling.\n enqueue_many: Whether each tensor in tensors is a single example.\n dynamic_pad: Boolean. Allow variable dimensions in input shapes.\n The given dimensions are padded upon dequeue so that tensors within\n a batch have the same shapes.\n allow_smaller_final_batch: (Optional) Boolean. If True, allow the final\n batch to be smaller if there are insufficient items left in the queue.\n shared_name: if set, the queues will be shared under the given name\n across different sessions.\n name: scope name for the given ops.\n\n Returns:\n A batch of tensors from `tensors`, shuffled and padded.\n \"\"\"\n\n tensors = list(tensors)\n with tf.name_scope(name, values=tensors):\n dtypes = dtypes or list([t.dtype for t in tensors])\n shapes = shapes or list([t.get_shape() for t in tensors])\n inputs = shuffle(tensors,\n seed=seed,\n dtypes=dtypes,\n capacity=capacity,\n num_threads=num_threads,\n min_after_dequeue=min_after_dequeue,\n shared_name=shared_name,\n name='shuffle')\n\n # fix the shapes\n for tensor, shape in zip(inputs, shapes):\n tensor.set_shape(shape)\n\n minibatch = tf.train.batch(\n tensors=inputs,\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=capacity,\n dynamic_pad=dynamic_pad,\n allow_smaller_final_batch=allow_smaller_final_batch,\n shared_name=shared_name,\n enqueue_many=enqueue_many,\n name='batch')\n return minibatch\n"
] | [
[
"tensorflow.train.add_queue_runner",
"tensorflow.train.QueueRunner",
"tensorflow.RandomShuffleQueue",
"tensorflow.name_scope",
"tensorflow.train.batch"
]
] |
Nickwangpeng/tsfresh | [
"48118627d9d4644906613e25b077ce2ec82ca2f9"
] | [
"tsfresh/feature_selection/relevance.py"
] | [
"# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\"\"\"\nContains a feature selection method that evaluates the importance of the different extracted features. To do so,\nfor every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.\nThe methods that calculate the p-values are called feature selectors.\n\nAfterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and\nwhich to cut off (solely based on the p-values).\n\"\"\"\n\nfrom multiprocessing import Pool\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom functools import partial, reduce\n\nfrom tsfresh import defaults\nfrom tsfresh.feature_selection.benjamini_hochberg_test import benjamini_hochberg_test\nfrom tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \\\n target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test\nfrom tsfresh.utilities.distribution import initialize_warnings_in_workers\n\n\ndef calculate_relevance_table(X, y, ml_task='auto', n_jobs=defaults.N_PROCESSES,\n show_warnings=defaults.SHOW_WARNINGS, chunksize=defaults.CHUNKSIZE,\n test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,\n test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,\n test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,\n fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT):\n \"\"\"\n Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`.\n The relevance table is calculated for the intended machine learning task `ml_task`.\n\n To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test\n is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to\n decide which features to keep and which to delete.\n\n We are testing\n\n :math:`H_0` = the Feature is not relevant and should not be added\n\n against\n\n :math:`H_1` = the Feature is relevant and should be kept\n\n or in other words\n\n :math:`H_0` = Target and Feature are independent / the Feature has no influence on the target\n\n :math:`H_1` = Target and Feature are associated / dependent\n\n When the target is binary this becomes\n\n :math:`H_0 = \\\\left( F_{\\\\text{target}=1} = F_{\\\\text{target}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( F_{\\\\text{target}=1} \\\\neq F_{\\\\text{target}=0} \\\\right)`\n\n Where :math:`F` is the distribution of the target.\n\n In the same way we can state the hypothesis when the feature is binary\n\n :math:`H_0 = \\\\left( T_{\\\\text{feature}=1} = T_{\\\\text{feature}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( T_{\\\\text{feature}=1} \\\\neq T_{\\\\text{feature}=0} \\\\right)`\n\n Here :math:`T` is the distribution of the target.\n\n TODO: And for real valued?\n\n :param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.\n It can contain both binary or real-valued features at the same time.\n :type X: pandas.DataFrame\n\n :param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.\n :type y: pandas.Series or numpy.ndarray\n\n :param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.\n Defaults to `'auto'`, meaning the intended task is inferred from `y`.\n If `y` has a boolean, integer or object dtype, the task is assumend to be classification,\n else regression.\n :type ml_task: str\n\n :param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature\n (currently unused)\n :type test_for_binary_target_binary_feature: str\n\n :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature\n :type test_for_binary_target_real_feature: str\n\n :param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)\n :type test_for_real_target_binary_feature: str\n\n :param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)\n :type test_for_real_target_real_feature: str\n\n :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant\n features among all created features.\n :type fdr_level: float\n\n :param hypotheses_independent: Can the significance of the features be assumed to be independent?\n Normally, this should be set to False as the features are never\n independent (e.g. mean and median)\n :type hypotheses_independent: bool\n\n :param n_jobs: Number of processes to use during the p-value calculation\n :type n_jobs: int\n\n :param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).\n :type show_warnings: bool\n\n :param chunksize: The size of one chunk that is submitted to the worker\n process for the parallelisation. Where one chunk is defined as a\n singular time series for one id and one kind. If you set the chunksize\n to 10, then it means that one task is to calculate all features for 10\n time series. If it is set it to None, depending on distributor,\n heuristics are used to find the optimal chunksize. If you get out of\n memory exceptions, you can try it with the dask distributor and a\n smaller chunksize.\n :type chunksize: None or int\n\n :return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance\n of this particular feature. The DataFrame has the columns\n \"Feature\",\n \"type\" (binary, real or const),\n \"p_value\" (the significance of this feature as a p-value, lower means more significant)\n \"relevant\" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is\n not relevant] for this feature)\n :rtype: pandas.DataFrame\n \"\"\"\n if ml_task not in ['auto', 'classification', 'regression']:\n raise ValueError('ml_task must be one of: \\'auto\\', \\'classification\\', \\'regression\\'')\n elif ml_task == 'auto':\n ml_task = infer_ml_task(y)\n\n with warnings.catch_warnings():\n if not show_warnings:\n warnings.simplefilter(\"ignore\")\n else:\n warnings.simplefilter(\"default\")\n\n if n_jobs == 0:\n map_function = map\n else:\n pool = Pool(processes=n_jobs, initializer=initialize_warnings_in_workers, initargs=(show_warnings,))\n map_function = partial(pool.map, chunksize=chunksize)\n\n relevance_table = pd.DataFrame(index=pd.Series(X.columns, name='feature'))\n relevance_table['feature'] = relevance_table.index\n relevance_table['type'] = pd.Series(\n map_function(get_feature_type, [X[feature] for feature in relevance_table.index]),\n index=relevance_table.index\n )\n table_real = relevance_table[relevance_table.type == 'real'].copy()\n table_binary = relevance_table[relevance_table.type == 'binary'].copy()\n\n table_const = relevance_table[relevance_table.type == 'constant'].copy()\n table_const['p_value'] = np.NaN\n table_const['relevant'] = False\n\n if not table_const.empty:\n warnings.warn(\"[test_feature_significance] Constant features: {}\"\n .format(\", \".join(table_const.feature)), RuntimeWarning)\n\n if len(table_const) == len(relevance_table):\n if n_jobs != 0:\n pool.close()\n pool.terminate()\n pool.join()\n return table_const\n\n if ml_task == 'classification':\n tables = []\n for label in y.unique():\n _test_real_feature = partial(target_binary_feature_real_test, y=(y == label),\n test=test_for_binary_target_real_feature)\n _test_binary_feature = partial(target_binary_feature_binary_test, y=(y == label))\n tmp = _calculate_relevance_table_for_implicit_target(\n table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,\n fdr_level, map_function\n )\n tables.append(tmp)\n relevance_table = combine_relevance_tables(tables)\n elif ml_task == 'regression':\n _test_real_feature = partial(target_real_feature_real_test, y=y)\n _test_binary_feature = partial(target_real_feature_binary_test, y=y)\n relevance_table = _calculate_relevance_table_for_implicit_target(\n table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,\n fdr_level, map_function\n )\n\n if n_jobs != 0:\n pool.close()\n pool.terminate()\n pool.join()\n\n relevance_table = pd.concat([relevance_table, table_const], axis=0)\n\n if sum(relevance_table['relevant']) == 0:\n warnings.warn(\n \"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage \"\n \"of irrelevant features, consider using an higher fdr level or add other features.\"\n .format(ml_task, fdr_level), RuntimeWarning)\n\n return relevance_table\n\n\ndef _calculate_relevance_table_for_implicit_target(table_real, table_binary, X, test_real_feature, test_binary_feature,\n hypotheses_independent, fdr_level, map_function):\n table_real['p_value'] = pd.Series(\n map_function(test_real_feature, [X[feature] for feature in table_real.index]),\n index=table_real.index\n )\n table_binary['p_value'] = pd.Series(\n map_function(test_binary_feature, [X[feature] for feature in table_binary.index]),\n index=table_binary.index\n )\n relevance_table = pd.concat([table_real, table_binary])\n return benjamini_hochberg_test(relevance_table, hypotheses_independent, fdr_level)\n\n\ndef infer_ml_task(y):\n \"\"\"\n Infer the machine learning task to select for.\n The result will be either `'regression'` or `'classification'`.\n If the target vector only consists of integer typed values or objects, we assume the task is `'classification'`.\n Else `'regression'`.\n\n :param y: The target vector y.\n :type y: pandas.Series\n :return: 'classification' or 'regression'\n :rtype: str\n \"\"\"\n if y.dtype.kind in np.typecodes['AllInteger'] or y.dtype == np.object:\n ml_task = 'classification'\n else:\n ml_task = 'regression'\n\n return ml_task\n\n\ndef combine_relevance_tables(relevance_tables):\n \"\"\"\n Create a combined relevance table out of a list of relevance tables,\n aggregating the p-values and the relevances.\n\n :param relevance_tables: A list of relevance tables\n :type relevance_tables: List[pd.DataFrame]\n :return: The combined relevance table\n :rtype: pandas.DataFrame\n \"\"\"\n def _combine(a, b):\n a.relevant |= b.relevant\n a.p_value = a.p_value.combine(b.p_value, min, 1)\n return a\n\n return reduce(_combine, relevance_tables)\n\n\ndef get_feature_type(feature_column):\n \"\"\"\n For a given feature, determine if it is real, binary or constant.\n Here binary means that only two unique values occur in the feature.\n\n :param feature_column: The feature column\n :type feature_column: pandas.Series\n :return: 'constant', 'binary' or 'real'\n \"\"\"\n n_unique_values = len(set(feature_column.values))\n if n_unique_values == 1:\n return 'constant'\n elif n_unique_values == 2:\n return 'binary'\n else:\n return 'real'\n"
] | [
[
"pandas.Series",
"pandas.concat"
]
] |
moonieann/welib | [
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52"
] | [
"welib/FEM/reduction.py"
] | [
"import numpy as np\n\nfrom welib.system.eva import eig\n\n\ndef CraigBampton(MM, KK, Ileader, nModesCB=None, Ifollow=None, F=None, DD=None, fullModesOut=False): \n \"\"\"\n Performs the CraigBampton (CB) reduction of a system given some input master dofs index\n and a number of modes. Reduced matrices, and Guyan and Craig-Bampton modes are returned.\n \n INPUTS\n Ileader : index of leader DOFs\n nModesCB: number of CB modes to keep\n MM, KK : Maff and stiffness matrix\n \n INPUTS (Optional)\n nModesCB: number of CB modes to keep. Default: all\n Ifollow: indices of follower DOFs. Default: complementary set to Ileader\n fullModesOut: if true, the Guyan and CB modes\n \n OUTPUTS\n fc: critical frequency\n Mr,Kr,Fr,Dr: reduced mass, stiffness, force and damping matrices\n \n AUTHOR: E. Branlard\n \"\"\"\n \n # --- Input cleanup\n Ileader = np.asarray(Ileader).ravel()\n # --- Optional arguments\n if Ifollow is None:\n # Then we take the complementary to Ileader\n Iall = np.arange(len(MM))\n Ifollow = [i for i in Iall if i not in Ileader]\n else:\n Ifollow = np.asarray(Ifollow).ravel()\n if nModesCB is None:\n nModesCB=len(Ifollow)\n\n # Partitioning - NOTE: leaders will be first in reduced matrix Mr and Kr\n Mll= MM[np.ix_(Ileader, Ileader)]\n Kll= KK[np.ix_(Ileader, Ileader)]\n Mff= MM[np.ix_(Ifollow, Ifollow)]\n Kff= KK[np.ix_(Ifollow, Ifollow)]\n Mlf= MM[np.ix_(Ileader, Ifollow)]\n Klf= KK[np.ix_(Ileader, Ifollow)]\n \n # --- Solve for Guyan modes\n Kff1Kfl = np.linalg.solve(Kff,(np.transpose(Klf))) # Kss1Ksm=Kss\\(Kms');\n #Kff1Kfl = np.linalg.inv(Kff).dot(Klf.T)\n Kff1Kfl = np.linalg.lstsq(Kff,Klf.T, rcond=None)[0]\n Phi_G = - Kff1Kfl;\n\n # --- Solve EVP for constrained system\n Phi_CB, Lambda_CB = eig(Kff,Mff)\n Omega2 = np.diag(Lambda_CB).copy()\n Omega2[Omega2<0]=0.0\n f_CB = np.sqrt(Omega2)/(2*np.pi)\n # --- Taking only thefirst few modes\n Phi_CB = Phi_CB[:,:nModesCB]\n Lambda_CB = Lambda_CB[:,:nModesCB]\n f_CB = f_CB[:nModesCB]\n # --- Using the T matrix:\n # # T=[eye(nm) zeros(nm,nModesCB); -Kff1Kfl Phi_CB];\n # # MM=[Mll Mlf; Mlf' Mff];\n # # KK=[Kll Klf; Klf' Kff];\n # # Mr=T' * MM * T;\n # # Kr=T' * KK * T;\n\n # --- Building reduced matrices\n #Mr11=Mmm-(Kss1Ksm')*Mms' - Mms*Kss1Ksm + (Kss1Ksm')*Mss*Kss1Ksm;\n #Kr11=Kmm-Kms*Kss1Ksm;\n #Mr12=(Mms-(Kss1Ksm')*Mss)*Psic;\n Mr11 = Mll - (np.transpose(Kff1Kfl)).dot(np.transpose(Mlf)) - Mlf.dot(Kff1Kfl) + (np.transpose(Kff1Kfl)).dot(Mff).dot(Kff1Kfl)\n Kr11 = Kll - Klf.dot(Kff1Kfl)\n Mr12 = (Mlf - (np.transpose(Kff1Kfl)).dot(Mff)).dot(Phi_CB)\n ZZ = np.zeros((len(Ileader),nModesCB))\n\n # --- Guyan frequencies\n Phi_G2, Lambda_G = eig(Kr11,Mr11)\n Omega2 = np.diag(Lambda_G).copy()\n Omega2[Omega2<0]=0.0\n f_G = np.sqrt(Omega2)/(2*np.pi)\n\n # Building reduced matrix \n Mr = np.block( [ [Mr11 , Mr12 ], [ Mr12.T, np.eye(nModesCB) ] ])\n Kr = np.block( [ [Kr11 , ZZ ], [ ZZ.T , Lambda_CB[:nModesCB,:]] ])\n\n # --- Augmenting modes so that they have the same dimension as MM\n # Add \"1\" for Guyan modes, and \"0\" for CB modes\n if fullModesOut:\n Phi_G, Phi_CB = augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=Ifollow)\n\n if DD is not None:\n raise NotImplementedError('Not done')\n if F is not None:\n raise NotImplementedError('Not done')\n\n return Mr, Kr, Phi_G, Phi_CB, f_G, f_CB\n\n\ndef augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=None):\n \"\"\" \n Augment Guyan and Craig Bampton modes, so as to return full DOF vectors\n going back to the original size\n \"\"\"\n # --- Augment modes so that they go back to same size after BC\n nl = len(Ileader)\n nall = nl+Phi_G.shape[0]\n nf = nall-nl\n if Ifollow is None:\n Iall = np.arange(nall)\n Ifollow = list(np.setdiff1d(Iall, Ileader))\n # Guyan\n Phi_G_aug = np.zeros((nall, nl))\n Phi_G_aug[Ileader,:] = np.eye(nl)\n Phi_G_aug[Ifollow,:] = Phi_G\n # \n Phi_CB_aug = np.zeros((nall, Phi_CB.shape[1]))\n Phi_CB_aug[Ileader,:] = 0\n Phi_CB_aug[Ifollow,:] = Phi_CB\n\n return Phi_G_aug, Phi_CB_aug\n\n\n\nif __name__=='__main__':\n np.set_printoptions(linewidth=500)\n L = 100\n EI = 1868211939147.334\n Maff = L * 8828.201296825122\n KK = EI / (L ** 3) * np.array([[12,6 * L,- 12,6 * L],[6 * L,4 * L ** 2,- 6 * L,2 * L ** 2],[- 12,- 6 * L,12,- 6 * L],[6 * L,2 * L ** 2,- 6 * L,4 * L ** 2]])\n MM = Maff / 420 * np.array([[156,22 * L,54,- 13 * L],[22 * L,4 * L ** 2,13 * L,- 3 * L ** 2],[54,13 * L,156,- 22 * L],[- 13 * L,- 3 * L ** 2,- 22 * L,4 * L ** 2]])\n print(MM)\n Mr,Kr,Phi_G,Phi_CB,f_CB,f_G = CraigBampton(MM,KK,[2], nModesCB=2)\n print(Mr)\n print(Kr)\n print(Phi_G)\n print(Phi_CB)\n print(f_CB)\n ## --- Solve EVA\n __,Lambda = eig(Kr,Mr)\n f= np.sqrt(np.sort(np.diag(Lambda)))/(2*np.pi)\n print(f)\n# f = np.sqrt(Omega2) / (2 * pi)\n# for i in np.arange(1,np.amin(8,Mr.shape[1-1])+1).reshape(-1):\n# print('f%d=%8.3f Rayleigh Ratio=%.5f\\n' % (i,f(i),(f(i) / fc) ** 2))\n\n\n"
] | [
[
"numpy.eye",
"numpy.ix_",
"numpy.transpose",
"numpy.zeros",
"numpy.diag",
"numpy.setdiff1d",
"numpy.set_printoptions",
"numpy.asarray",
"numpy.arange",
"numpy.linalg.lstsq",
"numpy.sqrt",
"numpy.array",
"numpy.block"
]
] |
c4dt/mlbench-core | [
"8a5cf6e00ff4535b2aea23b213241858a5ee5f00"
] | [
"mlbench_core/optim/pytorch/fp_optimizers.py"
] | [
"# import ctypes\nimport logging\nimport math\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom mlbench_core.utils.pytorch.distributed import (\n AllReduceAggregation,\n AllReduceAggregationHVD,\n)\n\ntry:\n from apex.optimizers import FusedAdam\n from apex import amp\nexcept ImportError as e:\n pass\n\nlogger = logging.getLogger(\"mlbench\")\n\n\nclass FP16Optimizer:\n \"\"\"\n Mixed precision optimizer with dynamic loss scaling and backoff.\n https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\n\n Args:\n fp16_model (`obj`:torch.nn.Module): model (previously casted to half)\n world_size (int): Distributed world size\n use_cuda (bool): Use cuda tensors for aggregation\n use_horovod (bool): Use Horovod for aggregation\n by_layer (bool): Aggregate by layer\n grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients\n loss_scale (int): initial loss scale\n dls_downscale (int): loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients\n dls_upscale (int): loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully\n dls_upscale_interval (int): interval for loss scale upscaling\n average_models (bool): Average the models\n \"\"\"\n\n def __init__(\n self,\n fp16_model,\n world_size,\n use_cuda=False,\n use_horovod=False,\n by_layer=False,\n grad_clip=float(\"inf\"),\n loss_scale=1024,\n dls_downscale=2,\n dls_upscale=2,\n dls_upscale_interval=128,\n average_models=True,\n ):\n self.use_cuda = use_cuda\n\n self.fp16_model = fp16_model\n self.fp16_params, self.fp32_params = self.initialize_flat_fp32_weight()\n self.since_last_invalid = 0\n self.loss_scale = loss_scale\n self.dls_downscale = dls_downscale\n self.dls_upscale = dls_upscale\n self.dls_upscale_interval = dls_upscale_interval\n self.grad_clip = grad_clip\n self.world_size = dist.get_world_size()\n\n self.optimizer = None\n\n if use_horovod:\n self.agg = AllReduceAggregationHVD(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n else:\n self.agg = AllReduceAggregation(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n\n if average_models:\n self.agg_mode = \"avg\"\n else:\n raise NotImplementedError(\"Only average model is supported right now.\")\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n # Flattening master weight\n def initialize_flat_fp32_weight(self):\n \"\"\" Initializes the model's parameters in fp32 and fp16\n\n Returns:\n (torch.Tensor, torch.Tensor): The Parametrs in fp16 and fp32\n \"\"\"\n # Set all gradients to None\n for p in self.fp16_model.parameters():\n p.grad = None\n\n # Count number of parameters per layer\n nelem = 0\n for p in self.fp16_model.parameters():\n nelem += p.numel()\n fp32_params = torch.empty(\n nelem,\n dtype=torch.float32,\n device=torch.device(\"cuda\" if self.use_cuda else \"cpu\"),\n )\n fp16_params = torch.empty(\n nelem,\n dtype=torch.float16,\n device=torch.device(\"cuda\" if self.use_cuda else \"cpu\"),\n )\n\n pointer = 0\n for p in self.fp16_model.parameters():\n nelem = p.numel()\n fp32_params[pointer : pointer + nelem].copy_(p.data.view(-1))\n fp16_params[pointer : pointer + nelem].copy_(p.data.view(-1))\n pointer += nelem\n\n fp32_params = torch.nn.Parameter(fp32_params, requires_grad=True)\n fp32_params.grad = torch.autograd.Variable(\n fp32_params.data.new(*fp32_params.size())\n )\n\n fp16_params = torch.nn.Parameter(fp16_params, requires_grad=True)\n fp16_params.grad = torch.autograd.Variable(\n fp16_params.data.new(*fp16_params.size())\n )\n\n return fp16_params, fp32_params\n\n @staticmethod\n def fp16_to_fp32_flat_grad(fp32_params, fp16_model):\n \"\"\" Copies the parameters in `fp16_model` into `fp32_params` in-place\n\n Args:\n fp32_params (torch.Tensor): Parameters in fp32\n fp16_model (torch.nn.Module): Model in fp16\n\n \"\"\"\n pointer = 0\n for p in fp16_model.parameters():\n nelem = p.numel()\n fp32_params.grad.data[pointer : pointer + nelem].copy_(p.grad.data.view(-1))\n pointer += nelem\n\n @staticmethod\n def fp32_to_fp16_grads(fp16_model, fp32_params):\n \"\"\" Copies the parameters in `fp32_params` into `fp16_model` in-place\n\n Args:\n fp16_model (torch.nn.Module): Model in fp16\n fp32_params (torch.Tensor): Parameters in fp32\n\n \"\"\"\n pointer = 0\n for p in fp16_model.parameters():\n nelem = p.numel()\n p.data.view(-1).copy_(fp32_params.data[pointer : pointer + nelem])\n pointer += nelem\n\n def backward_loss(self, loss):\n \"\"\" Scales and performs backward on the given loss\n\n Args:\n loss (torch.nn.Module): The loss\n\n \"\"\"\n loss *= self.loss_scale\n loss.backward()\n\n def step(self, closure=None):\n \"\"\"\n Performs one step of the optimizer.\n Applies loss scaling, computes gradients in fp16, converts gradients to\n fp32, inverts scaling and applies optional gradient norm clipping.\n If gradients are finite, it applies update to fp32 master weights and\n copies updated parameters to fp16 model for the next iteration. If\n gradients are not finite, it skips the batch and adjusts scaling factor\n for the next iteration.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n\n scaling_factor = self.loss_scale\n\n # Aggregate gradients\n self.agg(self.fp16_model, self.agg_mode)\n # Cast fp16 params to fp32 for optimizer\n self.fp16_to_fp32_flat_grad(self.fp32_params, self.fp16_model)\n\n if scaling_factor != 1.0:\n self.fp32_params.grad.data /= scaling_factor\n norm = clip_grad_norm_([self.fp32_params], self.grad_clip)\n\n updated = False\n if math.isfinite(norm):\n self.optimizer.step(closure=closure)\n self.fp32_to_fp16_grads(self.fp16_model, self.fp32_params)\n self.since_last_invalid += 1\n updated = True\n else:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logger.info(f\"Skipped batch, new scale: {self.loss_scale}\")\n\n if self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n self.since_last_invalid = 0\n\n for p in self.fp16_model.parameters():\n p.grad = None\n\n return updated\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n\nclass FP32Optimizer:\n \"\"\"\n Standard optimizer, computes backward and applies weight update.\n\n Args:\n model (`obj`:torch.nn.Module): model\n world_size (int): Distributed world size\n use_cuda (bool): Use cuda tensors for aggregation\n by_layer (bool): Aggregate by layer\n grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients\n average_models (bool): Average the models\n \"\"\"\n\n def __init__(\n self,\n model,\n world_size,\n use_cuda=False,\n by_layer=False,\n grad_clip=None,\n average_models=True,\n ):\n self.model = model\n self.grad_clip = grad_clip\n self.optimizer = None\n self.agg = AllReduceAggregation(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n if average_models:\n self.agg_mode = \"avg\"\n else:\n raise NotImplementedError(\"Only average model is supported right now.\")\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def step(self, closure=None):\n \"\"\"\n Performs one step of the optimizer.\n \"\"\"\n if self.grad_clip != float(\"inf\"):\n clip_grad_norm_(self.model.parameters(), self.grad_clip)\n\n self.agg(self.model, self.agg_mode)\n self.optimizer.step(closure=closure)\n return True\n\n def backward_loss(self, loss):\n loss.backward()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n\nclass AMPOptimizer:\n \"\"\"\n Optimizer compatible with AMP.\n Uses AMP to apply loss scaling, computes backward and applies weight\n update.\n\n Args:\n model (`obj`:torch.nn.Module): model\n grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients\n loss_scale (int): initial loss scale\n dls_upscale_interval (int): interval for loss scale upscaling\n average_models (bool): Average the models\n world_size (int): Distributed world size\n use_cuda (bool): Use cuda tensors for aggregation\n by_layer (bool): Aggregate by layer\n use_horovod (bool): Use Horovod for aggregation\n \"\"\"\n\n def __init__(\n self,\n model,\n grad_clip=None,\n loss_scale=8192,\n dls_upscale_interval=128,\n average_models=True,\n world_size=1,\n use_cuda=False,\n by_layer=False,\n use_horovod=False,\n ):\n self.model = model\n self.grad_clip = grad_clip\n self.optimizer = None\n loss_scaler = amp._amp_state.loss_scalers[0]\n loss_scaler._loss_scale = loss_scale\n loss_scaler._scale_seq_len = dls_upscale_interval\n\n if average_models:\n self.agg_mode = \"avg\"\n else:\n raise NotImplementedError(\"Only average model is supported right now.\")\n\n if use_horovod:\n self.agg = AllReduceAggregationHVD(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n else:\n self.agg = AllReduceAggregation(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def backward_loss(self, loss):\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n\n def step(self, closure=None):\n \"\"\"\n Performs one step of the optimizer.\n \"\"\"\n if self.grad_clip != float(\"inf\"):\n clip_grad_norm_(amp.master_params(self.optimizer), self.grad_clip)\n\n self.agg(self.model, self.agg_mode)\n self.optimizer.step(closure=closure)\n return True\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n"
] | [
[
"torch.device",
"torch.distributed.get_world_size",
"torch.nn.Parameter",
"torch.nn.utils.clip_grad_norm_"
]
] |
yj1990/sec_mmf | [
"72a8c0d5a6aadb4362c07a5606c70e51b08a53cd"
] | [
"secmmf/mmf_data_loader/form_parsers.py"
] | [
"import pandas as pd\nimport bs4 as bs\nimport untangle as ut\nimport requests\nimport urllib.request as rq\nfrom collections import OrderedDict\n\nfrom secmmf.mmf_data_loader.utils import get_edgar_url\n\nclass N_MFP2:\n\n def __init__(self):\n self.select_cols()\n\n def born(self, tag):\n # if tag is a single-node tag contains a navigable string, return a list with that string\n # if tag has multiple element, needs to further born them\n childs = []\n for x in tag:\n if (x != '\\n') & (type(x) != bs.element.Comment):\n childs.append(x)\n return childs\n\n def dive(self, root, surname=''):\n name = surname + root.name\n sons = []\n for son in self.born(root):\n if type(son) == bs.element.NavigableString:\n text = ': '.join([name, son])\n sons.append(text)\n elif type(son) == bs.element.Tag:\n sons.extend(self.dive(son, surname=name + '_'))\n return sons\n\n def teach(self, root):\n sons = []\n for son in self.born(root):\n if len(self.born(son)) == 1:\n sons.append((son.name, son.get_text().replace('\\n', '')))\n elif len(self.born(son)) > 1:\n for grandson in self.born(son):\n sons.append((son.name + '_' + grandson.name,\n grandson.get_text().replace('\\n', '')))\n return sons\n\n def teach_rec(self, root):\n sons = []\n for son in self.born(root):\n if len(self.born(son)) == 1:\n sons.append((son.name, son.get_text().replace('\\n', '')))\n elif len(self.born(son)) > 1:\n sons.append(teach_rec(son))\n return sons\n\n def parse(self, url='https://www.sec.gov/Archives/edgar/data/759667/000070217219000020/primary_doc.xml'):\n\n stubs = self.stubs\n #_tonum = self._tonum\n #series_level_names = self.series_level_names\n #class_level_names = self.class_level_names\n\n source = rq.urlopen(url).read()\n soup = bs.BeautifulSoup(source, 'xml')\n\n # parse XML info into a list of dictionaries\n mmf = []\n for tag in self.born(soup.formData):\n if tag.name in ['classLevelInfo', 'generalInfo', 'seriesLevelInfo']:\n mmf.append((tag.name, self.teach(tag)))\n\n general_series_class = []\n general_series = mmf[0][1] + mmf[1][1]\n\n for i, x in enumerate(general_series):\n if x[0] == 'numberOfSharesOutstanding':\n y = list(x)\n y[0] = 'series_numberOfSharesOutstanding'\n general_series[i] = tuple(y)\n\n for x in mmf[2:]:\n general_series_class.append(OrderedDict(general_series + x[1]))\n\n df = pd.DataFrame(general_series_class)\n if 'nameOfPersonDescExpensePay' in df.columns:\n df.drop(columns='nameOfPersonDescExpensePay', inplace=True)\n\n # rename those columns that have reversed patterns\n namemap = []\n for x in ['weeklyGrossRedemptions', 'weeklyGrossSubscriptions']:\n namemap.append(dict([('fridayWeek' + str(i + 1) + '_' + x,\n x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))\n for x in ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets']:\n namemap.append(dict([(x + '_' + 'fridayDay' + str(i + 1),\n x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))\n\n for i in range(4):\n df = df.rename(columns=namemap[i])\n\n # make data wide to long on weekly holding statistics\n df = pd.wide_to_long(df, stubnames=self.stubs,\n i='classesId', j='week', sep='_', suffix='\\w+')\n df.reset_index(inplace=True)\n df['week'] = df['week'].apply(\n lambda x: int(x.replace('fridayWeek', '')))\n\n #df = df[['week']+series_level_names+class_level_names]\n\n # change the type of numeric data to float\n #df[_tonum] = df[_tonum].astype(dtype = float)\n\n return df\n\n def parse_csv(self, url):\n source = get_edgar_url(url).content\n soup = bs.BeautifulSoup(source, 'xml')\n return self.dive(soup.formData)\n\n def select_cols(self):\n\n self.stubs = ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets',\n 'totalValueWeeklyLiquidAssets', 'percentageWeeklyLiquidAssets',\n 'netAssetValue', 'netAssetPerShare',\n 'weeklyGrossRedemptions', 'weeklyGrossSubscriptions']\n\n self._tonum = ['totalShareClassesInSeries',\n 'averagePortfolioMaturity',\n 'averageLifeMaturity',\n 'cash',\n 'totalValuePortfolioSecurities',\n 'amortizedCostPortfolioSecurities',\n 'totalValueOtherAssets',\n 'totalValueLiabilities',\n 'netAssetOfSeries',\n 'numberOfSharesOutstanding',\n 'stablePricePerShare',\n 'sevenDayGrossYield',\n 'minInitialInvestment',\n 'netAssetsOfClass',\n 'totalForTheMonthReported_weeklyGrossSubscriptions',\n 'totalForTheMonthReported_weeklyGrossRedemptions',\n 'sevenDayNetYield'] + self.stubs\n\n self.series_level_names = ['reportDate',\n 'cik',\n 'seriesId',\n 'totalShareClassesInSeries',\n 'finalFilingFlag',\n 'fundAcqrdOrMrgdWthAnthrFlag',\n 'securitiesActFileNumber',\n 'adviser_adviserName',\n 'adviser_adviserFileNumber',\n 'indpPubAccountant_name',\n 'indpPubAccountant_city',\n 'indpPubAccountant_stateCountry',\n 'administrator',\n 'transferAgent_name',\n 'transferAgent_cik',\n 'transferAgent_fileNumber',\n 'feederFundFlag',\n 'masterFundFlag',\n 'seriesFundInsuCmpnySepAccntFlag',\n 'moneyMarketFundCategory',\n 'fundExemptRetailFlag',\n 'averagePortfolioMaturity',\n 'averageLifeMaturity',\n 'totalValueDailyLiquidAssets',\n 'totalValueWeeklyLiquidAssets',\n 'percentageDailyLiquidAssets',\n 'percentageWeeklyLiquidAssets',\n 'cash',\n 'totalValuePortfolioSecurities',\n 'amortizedCostPortfolioSecurities',\n 'totalValueOtherAssets',\n 'totalValueLiabilities',\n 'netAssetOfSeries',\n 'series_numberOfSharesOutstanding',\n 'stablePricePerShare',\n 'sevenDayGrossYield',\n 'netAssetValue']\n self.class_level_names = ['classesId',\n 'minInitialInvestment',\n 'netAssetsOfClass',\n 'numberOfSharesOutstanding',\n 'netAssetPerShare',\n 'weeklyGrossSubscriptions',\n 'weeklyGrossRedemptions',\n 'totalForTheMonthReported_weeklyGrossSubscriptions',\n 'totalForTheMonthReported_weeklyGrossRedemptions',\n 'sevenDayNetYield',\n 'personPayForFundFlag']\n"
] | [
[
"pandas.wide_to_long",
"pandas.DataFrame"
]
] |
wjwainwright/Capstone | [
"a2ea661079ece6ff5008f4399b3f0f6d32c598d3"
] | [
"IsoFit.py"
] | [
"try:\n runCount += 1\nexcept:\n isoIn = False\n clIn = False\n cataIn = False\n closePlots = False\n resultsIn = False\n clusterList = []\n clusters=[]\n isochrones = []\n isoList = []\n catalogue = []\n runCount = 1\n\nclass resultClusterObj:\n def __init__(self,cl):\n import numpy as np\n \n #Automatically populates variables based on those from the cluster it was given, except the data arrays\n global properties\n \n #List of all of the variables defined for the cluster cl, strips out the __functions__\n properties = [a for a in dir(cl) if not a.startswith('_')]\n for prop in properties:\n #Saves all 'number' type variables to the memory of the result cluster object\n if eval(f\"type(cl.{prop})\") == float or eval(f\"type(cl.{prop})\") == np.float64 or eval(f\"type(cl.{prop})\") == int:\n exec(f\"self.{prop} = float(cl.{prop})\")\n elif eval(f\"type(cl.{prop})\") == str:\n exec(f\"self.{prop} = cl.{prop}\")\n \n #Manually defined properties\n self.name = cl.name\n self.clType = cl.clType\n\nclass clusterObj:\n def __init__(self,name='genericCluster',basedir='clusters/',brightThreshold=15):\n #Declare instance variables\n self.basedir = basedir\n self.dataPath = self.basedir + f\"{name}/data/\"\n self.imgPath = self.basedir + f\"{name}/plots/\"\n self.unfilteredWide = []\n self.unfilteredNarrow = []\n self.filtered = []\n self.mag = []\n self.iso = []\n self.condensed = []\n self.condensed0 = []\n self.condensedInit=[]\n self.unfilteredBright = []\n self.filteredBright = []\n self.brightmag = []\n self.distFiltered = []\n self.binaries = []\n self.stars = []\n self.brightThreshold = brightThreshold\n self.mean_par = 0\n self.stdev_par = 0\n self.mean_ra = 0\n self.mean_dec = 0\n self.stdev_ra = 0\n self.stdev_dec = 0\n self.mean_pmra = 0\n self.stdev_pmra = 0\n self.mean_pmdec = 0\n self.stdev_pmdec = 0\n self.mean_a_g = 0\n self.stdev_a_g = 0\n self.mean_e_bp_rp = 0\n self.stdev_e_bp_rp = 0\n self.mean_par_over_ra = 0\n self.stdev_par_over_ra = 0\n self.dist_mod = 0\n self.turnPoint = 0\n self.reddening = 0\n self.radDist = 0\n self.massLoaded = False\n \n #Catalogued properties\n self.name = name\n self.clType = \"None\"\n self.pmra_min = -99\n self.pmra_max = -99\n self.pmdec_min = -99\n self.pmdec_max = -99\n self.par_min = -99\n self.par_max = -99\n self.cltpx = -99\n self.cltpy = -99\n self.noise_cutoff = -99\n \n #Check directory locations\n import os\n if not os.path.isdir(self.dataPath):\n os.mkdir(self.dataPath)\n if not os.path.isdir(self.imgPath):\n os.mkdir(self.imgPath)\n if not os.path.isdir(f\"{self.imgPath}/png\"):\n os.mkdir(f\"{self.imgPath}/png\")\n\n\n#Gaia DR2 Implementation\n# class starObj:\n# def __init__(self,name,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err,ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr,astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_match_obs,astro_sigma5d,match_obs,g_mag,b_mag,r_mag,b_r,b_g,g_r,radvel,radvel_err,variable,teff,a_g,e_bp_rp,lum):\n# #Declare instance variables\n# self.name = name\n# self.ra = float(ra)\n# self.ra_err = float(ra_err)\n# self.dec = float(dec)\n# self.dec_err = float(dec_err)\n# self.par = float(par)\n# self.par_err = float(par_err)\n# self.par_over_err = float(par_over_err)\n# self.pmra = float(pmra)\n# self.pmra_err = float(pmra_err)\n# self.pmdec = float(pmdec)\n# self.pmdec_err = float(pmdec_err)\n# self.ra_dec_corr = float(ra_dec_corr)\n# self.ra_par_corr = float(ra_par_corr)\n# self.ra_pmra_corr = float(ra_pmra_corr)\n# self.ra_pmdec_corr = float(ra_pmdec_corr)\n# self.dec_par_corr = float(dec_par_corr)\n# self.dec_pmra_corr = float(dec_pmra_corr)\n# self.dec_pmdec_corr = float(dec_pmdec_corr)\n# self.par_pmra_corr = float(par_pmra_corr)\n# self.par_pmdec_corr = float(par_pmdec_corr)\n# self.pmra_pmdec_corr = float(pmra_pmdec_corr)\n# self.astro_n_obs = float(astro_n_obs)\n# self.astro_n_good_obs = float(astro_n_good_obs)\n# self.astro_n_bad_obs = float(astro_n_bad_obs)\n# self.astro_gof = float(astro_gof)\n# self.astro_chi2 = float(astro_chi2)\n# self.astro_noise = float(astro_noise)\n# self.astro_noise_sig = float(astro_noise_sig)\n# self.astro_match_obs = float(astro_match_obs)\n# self.astro_sigma5d = float(astro_sigma5d)\n# self.match_obs = float(match_obs)\n# self.g_mag = float(g_mag)\n# self.b_mag = float(b_mag)\n# self.r_mag = float(r_mag)\n# self.b_r = float(b_r)\n# self.b_g = float(b_g)\n# self.g_r = float(g_r)\n# self.radvel = float(radvel)\n# self.radvel_err = float(radvel_err)\n# self.variable = variable\n# self.teff = float(teff)\n# self.a_g = float(a_g)\n# self.e_bp_rp = float(e_bp_rp)\n# self.lum = float(lum)\n# self.member = 0\n# self.binary = 0\n# self.radDist = 0\n \n# self.par_over_ra = float(par)/float(ra)\n# self.par_over_dec = float(par)/float(dec)\n# self.par_over_pmra = float(par)/float(pmra)\n# self.par_over_pmdec = float(par)/float(pmdec)\n \n# self.vosaPoints = []\n# self.excess = 0\n\n#Gaia DR3 implementation\nclass starObj:\n def __init__(self,name,source_id,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err, #Basic astrometrics\n ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr, #Correlations\n astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_nu_eff, #Assorted astrometric properties\n pseudocolor,pseudocolor_err,ra_pseudocolor_corr,dec_pseudocolor_corr,par_pseudocolor_corr,pmra_pseudoclor_corr,pmdec_pseudocolor_corr, #Pseudocolor\n astro_sigma5d,duplicated_source, #More assorted properties\n g_flux,g_flux_err,g_mag, #Gaia_G\n b_flux,b_flux_err,b_mag, #Gaia_BP\n r_flux,r_flux_err,r_mag, #Gaia_RP\n b_over_r_excess,b_r,b_g,g_r, #Color indices and excess\n radvel,radvel_err,radvel_num_transits,radvel_teff,radvel_feh, #Template Teff and Fe/H used to calculate the radvel\n l,b,long,lat): #Galactic l and b, ecliptic long and lat\n import numpy as np\n #Declare instance variables\n self.name = name\n self.source_id = source_id\n self.ra = float(ra)\n self.ra_err = float(ra_err)\n self.dec = float(dec)\n self.dec_err = float(dec_err)\n self.par = float(par)\n self.par_err = float(par_err)\n self.par_over_err = float(par_over_err)\n self.pmra = float(pmra)\n self.pmra_err = float(pmra_err)\n self.pmdec = float(pmdec)\n self.pmdec_err = float(pmdec_err)\n \n self.ra_dec_corr = float(ra_dec_corr)\n self.ra_par_corr = float(ra_par_corr)\n self.ra_pmra_corr = float(ra_pmra_corr)\n self.ra_pmdec_corr = float(ra_pmdec_corr)\n self.dec_par_corr = float(dec_par_corr)\n self.dec_pmra_corr = float(dec_pmra_corr)\n self.dec_pmdec_corr = float(dec_pmdec_corr)\n self.par_pmra_corr = float(par_pmra_corr)\n self.par_pmdec_corr = float(par_pmdec_corr)\n self.pmra_pmdec_corr = float(pmra_pmdec_corr)\n \n self.astro_n_obs = float(astro_n_obs)\n self.astro_n_good_obs = float(astro_n_good_obs)\n self.astro_n_bad_obs = float(astro_n_bad_obs)\n self.astro_gof = float(astro_gof)\n self.astro_chi2 = float(astro_chi2)\n self.astro_noise = float(astro_noise)\n self.astro_noise_sig = float(astro_noise_sig)\n self.astro_nu_eff = float(astro_nu_eff)\n \n self.astro_sigma5d = float(astro_sigma5d)\n self.duplicated_source = bool(duplicated_source)\n \n self.g_flux = float(g_flux)\n self.g_flux_err = float(g_flux_err)\n self.g_mag = float(g_mag)\n \n self.b_flux = float(b_flux)\n self.b_flux_err = float(b_flux_err)\n self.b_mag = float(b_mag)\n \n self.r_flux = float(r_flux)\n self.r_flux_err = float(r_flux_err)\n self.r_mag = float(r_mag)\n \n self.b_over_r_excess = float(b_over_r_excess)\n self.b_r = float(b_r)\n self.b_g = float(b_g)\n self.g_r = float(g_r)\n \n self.radvel = float(radvel)\n self.radvel_err = float(radvel_err)\n self.radvel_num_transits=float(radvel_num_transits)\n self.radvel_teff = float(radvel_teff)\n self.radvel_feh = float(radvel_feh)\n \n self.l = float(l)\n self.b = float(b)\n self.long = float(long)\n self.lat = float(lat)\n \n self.member = 0\n self.binary = 0\n self.radDist = 0\n \n self.par_over_ra = float(par)/float(ra)\n self.par_over_dec = float(par)/float(dec)\n self.par_over_pmra = float(par)/float(pmra)\n self.par_over_pmdec = float(par)/float(pmdec)\n \n self.normRA = self.ra*np.cos(self.dec*np.pi/180)\n \n self.vosaPoints = []\n self.excess = 0\n\n\n\nclass isochroneObj:\n def __init__(self,age=404,feh=404,afe=404,y=404,basedir='isochrones/',subdir='processed',isodir=''):\n #Declare instance variables\n self.basedir = basedir\n self.subdir = subdir\n self.isodir = isodir\n self.starList = []\n self.age = age\n self.feh = feh\n self.afe = afe\n self.y = y\n self.name = f\"feh_{feh}_afe_{afe}_age_{age}_y_{y}\"\n self.distance = 0\n self.coeff = []\n self.g = []\n self.br = []\n\n\nclass fakeStarObj:\n def __init__(self,g_mag,b_mag,r_mag):\n #Declare instance variables\n self.g_mag = g_mag\n self.b_mag = b_mag\n self.r_mag = r_mag\n self.b_r = self.b_mag-self.r_mag\n self.b_g = self.b_mag-self.g_mag\n self.g_r = self.g_mag-self.r_mag\n self.score = 0\n\nclass mistStar:\n def __init__(self,properties):\n #Declare instance variables\n \n for prop,val in properties:\n if \"inf\" in str(val):\n val = 50\n exec(f\"self.{prop} = {val}\")\n\n\nclass condensedPoint:\n def __init__(self,b_r,g_mag,weight):\n self.b_r = b_r\n self.g_mag = g_mag\n self.weight = weight\n\n\nclass vosaPoint:\n def __init__(self,filterID,wavelength,obs_flux,obs_error,flux,flux_error,excess):\n self.filterID = filterID\n self.wavelength = wavelength\n self.obs_flux = obs_flux\n self.obs_error = obs_error\n self.flux = flux\n self.flux_error = flux_error\n self.excess = excess\n\n\nclass cataloguedCluster():\n def __init__(self,name,clType,pmra_min,pmra_max,pmdec_min,pmdec_max,par_min,par_max,cltpx,cltpy,noise_cutoff):\n #Catalogued properties\n self.name = str(name)\n self.clType = str(clType)\n self.pmra_min = float(pmra_min)\n self.pmra_max = float(pmra_max)\n self.pmdec_min = float(pmdec_min)\n self.pmdec_max = float(pmdec_max)\n self.par_min = float(par_min)\n self.par_max = float(par_max)\n self.cltpx = float(cltpx)\n self.cltpy = float(cltpy)\n self.noise_cutoff = float(noise_cutoff)\n\n\n\n\n\n\nclass Datum:\n from matplotlib import colors as mcolors\n colorin = mcolors.to_rgba(\"red\")\n colorout = mcolors.to_rgba(\"blue\")\n\n def __init__(self, x, y, include=False):\n self.x = x\n self.y = y\n if include:\n self.color = self.colorin\n else:\n self.color = self.colorout\n\n\nclass LassoManager:\n \n\n def __init__(self, ax, data, cluster):\n from matplotlib.collections import RegularPolyCollection\n \n self.axes = ax\n self.canvas = ax.figure.canvas\n self.data = data\n self.cluster = cluster\n\n self.Nxy = len(data)\n\n facecolors = [d.color for d in data]\n self.xys = [(d.x, d.y) for d in data]\n self.collection = RegularPolyCollection(\n 6, sizes=(5,),\n facecolors=facecolors,\n offsets=self.xys,\n transOffset=ax.transData)\n\n ax.add_collection(self.collection)\n\n self.cid = self.canvas.mpl_connect('button_press_event', self.on_press)\n\n def callback(self, verts):\n from matplotlib import path\n global coords\n global clusters\n \n cluster = clusters[self.cluster.name]\n \n facecolors = self.collection.get_facecolors()\n p = path.Path(verts)\n ind = p.contains_points(self.xys)\n \n cluster.binaries = []\n \n for i in range(len(self.xys)):\n if ind[i]:\n facecolors[i] = Datum.colorin\n star = cluster.filtered[[a.b_r for a in cluster.filtered].index(self.xys[i][0])]\n cluster.binaries.append(star)\n else:\n facecolors[i] = Datum.colorout\n self.canvas.draw_idle()\n self.canvas.widgetlock.release(self.lasso)\n del self.lasso\n\n def on_press(self, event):\n from matplotlib.widgets import Lasso\n \n if self.canvas.widgetlock.locked():\n return\n if event.inaxes is None:\n return\n self.lasso = Lasso(event.inaxes,\n (event.xdata, event.ydata),\n self.callback)\n # acquire a lock on the widget drawing\n self.canvas.widgetlock(self.lasso)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef clusterCatalogue(types='all'):\n import numpy as np\n import pandas as pd\n global data\n global catalogue\n global cataIn\n \n data = pd.read_csv(\"catalogue.csv\",sep=',',dtype=str)\n data = data.to_numpy(dtype=str)\n cata = []\n for row in data:\n cata.append(cataloguedCluster(*row))\n \n if types == 'all':\n catalogue = cata\n \n cataIn = True\n return\n \n\n\ndef readClusters(cList=[\"M67\"],basedir=\"clusters/\",smRad=0.35):\n #Imports\n import numpy as np\n import pandas as pd\n global clusterList\n global clusters\n global stars\n global clIn\n global catalogue\n \n try:\n if clIn and len(clusterList) > 0:\n for clname in cList:\n if clname in clusters:\n unloadClusters([clname])\n except:\n clusterList=[]\n \n #Check the cluster catalogue to load the catalogued properties\n if not cataIn:\n clusterCatalogue()\n \n #Loop through clusters\n for clname in cList:\n #Create cluster objects\n cluster = clusterObj(name=clname,basedir=basedir)\n \n reference = None\n \n for cl in catalogue:\n if str(cl.name) == str(clname):\n reference = cl\n print(f\"Catalogue match for {clname} found\")\n break\n if reference == None:\n print(f\"Catalogue match for {clname} was not found, please create one\")\n continue\n\n #Filter all of the methods out of the properties list\n properties = [a for a in dir(reference) if not a.startswith('_')]\n print(properties)\n #exec(f\"print(reference.{properties[1]})\")\n #print(properties)\n \n #Now we have a list of all the attributes assigned to the catalogue (the self.variables)\n for p in properties:\n prop = getattr(reference,p)\n #print(prop)\n exec(f\"cluster.{p} = prop\")\n try:\n if prop <= -98:\n print(f\"{clname} does not have a specified catalogue value for {p}\")\n except:\n continue\n \n\n # if cluster.name == 'NGC752' or cluster.name == 'NGC188':\n # cluster.brightThreshold=18\n \n # if \"M67\" in clname:\n # cluster.type = \"open\"\n # if \"M35\" in clname:\n # cluster.type = \"open\"\n # if \"NGC188\" in clname:\n # cluster.type = \"open\"\n # if \"NGC752\" in clname:\n # cluster.type = \"open\"\n # if \"IC4651\" in clname:\n # cluster.type = \"open\"\n # if \"NGC2451\" in clname:\n # cluster.type = \"open\"\n # if \"AlphaPer\" in clname:\n # cluster.type = \"open\"\n # if \"M12\" in clname:\n # cluster.type = \"globular\"\n # if \"M3\" in clname:\n # cluster.type = \"globular\"\n # if \"M5\" in clname:\n # cluster.type = \"globular\"\n # if \"M15\" in clname:\n # cluster.type = \"globular\"\n # if \"M53\" in clname:\n # cluster.type = \"globular\"\n # if \"NGC6426\" in clname:\n # cluster.type = \"globular\"\n # if \"NGC6934\" in clname:\n # cluster.type = \"globular\"\n \n \"\"\"\n #Generate wide-field star list\n starlist = np.genfromtxt(cluster.dataPath+\"narrow.csv\", delimiter=\",\", skip_header=1, usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17))\n starlist = preFilter(starlist)\n for s in starlist:\n star = starObj(s[0],s[1],s[2],s[3],s[4],s[5],s[6],s[7],s[8],s[9],s[10],s[11],s[12],s[13],s[14],s[15],s[16],s[17])\n cluster.unfilteredNarrow.append(star) \n \"\"\"\n \n #Generate narrow-field star list\n starlist = pd.read_csv(cluster.dataPath+\"wide.csv\",sep=',',dtype=str)\n stars = pd.read_csv(cluster.dataPath+\"wide.csv\",sep=',',dtype=str)\n starlist = starlist.to_numpy(dtype=str)\n #starlist = np.genfromtxt(cluster.dataPath+\"wide.csv\", delimiter=\",\", skip_header=1)\n print(f\"{clname} initial length: {len(starlist)}\")\n starlist = preFilter(starlist)\n print(f\"{clname} post-prefiltered length: {len(starlist)}\")\n \n ramean = np.mean([float(x) for x in starlist[:,1]])\n decmean = np.mean([float(x) for x in starlist[:,3]])\n \n \n for s in starlist:\n star = starObj(*s)\n cluster.unfilteredWide.append(star)\n \n if np.less_equal(star.g_mag,cluster.brightThreshold):\n cluster.unfilteredBright.append(star)\n \n # if np.less_equal(np.sqrt(((star.ra-ramean)*np.cos(np.pi/180*star.dec))**2+(star.dec-decmean)**2),smRad):\n # cluster.unfilteredNarrow.append(star)\n clusterList.append(cluster)\n calcStats(cluster,mode='narrow')\n \n if not 'YSO' in clname:\n rmOutliers()\n clIn = True\n toDict()\n\n\ndef pad(string, pads):\n spl = string.split(',')\n return '\\n'.join([','.join(spl[i:i+pads]) for i in range(0,len(spl),pads)])\n\n\ndef readIso(basedir='isochrones/',subdir='MIST_raw/'):\n #Important note: The ages are rounded to a few decimal places in the Gyr range\n #This has the effect of making it such that a few dozen isochrones in the kyr range \n #are overwritten because they all round to the same value. I found this to be an issue\n #worth overlooking given that a cluster of that age hasn't been identified yet\n \n \n #Imports\n import os\n import re\n \n global isochrone_headers\n global isoList\n global isoIn\n \n path = basedir + subdir\n \n isoList = []\n \n for fn in os.listdir(path):\n \n #Read in file\n main = open(path+fn).read()\n main = main.split(\"\\n\")\n \n #Relevant variables from headers\n N_iso = int(main[7].split(\"=\")[1])\n index = 13\n \n varList = re.sub(\"\\s+\", \",\", main[5].strip()).split(\",\")\n afe = varList[4]\n feh = varList[3]\n y = varList[1]\n z = varList[2]\n v_vcrit = varList[5]\n \n #Column labels\n #Replace any number of spaces with a single comma, then replace a few problematic phrases and split the list by commas\n isochrone_headers = re.sub(\"\\s+\", \",\", main[12].replace(\"2MASS\",\"TwoMASS\").replace(\"[Fe/H]\",\"feh\").strip()).split(\",\")[1:]\n \n for idx in range(0,N_iso):\n N_stars = int(re.sub(\"\\s+\", \",\" , main[index-3].split(\"=\")[1]).split(\",\")[1])\n \n #print(f\"Iso = {idx} N_stars = {N_stars}\")\n \n #Populate a single isochrone\n stars = []\n for i in range(index,index+N_stars):\n #Send the header and values to the mistStar object\n #print(f\"i = {i}\")\n values = [float(a) for a in re.sub(\"\\s+\", \",\" , main[i].strip()).split(\",\")]\n properties = zip(isochrone_headers,values)\n stars.append(mistStar(properties))\n #Create the isochrone from the list of stars\n age = round(10**values[1]/1e9,3)\n iso = isochroneObj(age,feh,afe,y)\n iso.starList = stars\n iso.br = [star.Gaia_BP_EDR3-star.Gaia_RP_EDR3 for star in stars]\n iso.g = [star.Gaia_G_EDR3 for star in stars]\n isoList.append(iso)\n \n index += N_stars + 5\n \n isoIn = True\n toDict()\n \n\n\ndef checkIsoDupes():\n global isochrones\n global isoList\n \n names = []\n for iso in isoList:\n if iso.name in names:\n print(iso.name)\n else:\n names.append(iso.name)\n\n\ndef processIso(basedir='isochrones/',subdir='raw/'):\n #Imports\n import os\n import re\n \n path = basedir + subdir\n \n for fn in os.listdir(path):\n main = open(path+fn).read()\n part = main.split('\\n\\n\\n')\n part[0] = part[0].split('#----------------------------------------------------')[3].split('\\n',1)[1]\n \n for a in range(len(part)):\n temp = part[a].split('#AGE=')[1].split(' EEPS=')[0]\n age = temp.strip()\n \n out = part[a].split('\\n',2)[2]\n out = re.sub(\"\\s+\", \",\", out.strip())\n out = pad(out,8)\n \n filename = f\"{basedir}processed/\"+fn.split('.')[0]+'/'+age+\".csv\"\n \n os.makedirs(os.path.dirname(filename), exist_ok=True) \n with open(filename,\"w\") as f:\n f.write(out)\n\n\ndef readIsochrones(basedir='isochrones/',subdir='processed/'):\n #Imports\n import os\n import numpy as np\n global isoList\n global isoIn\n \n isoList=[]\n \n for folder in os.listdir(basedir+subdir):\n for fn in os.listdir(basedir+subdir+folder):\n \n #Get the age and metallicities of the isochrones\n ageStr = fn.split('.csv')[0]\n fehStr = folder.split('feh')[1].split('afe')[0]\n afeStr = folder.split('afe')[1].split('y')[0]\n if 'y' in folder:\n yStr = folder.split('y')[1]\n else:\n yStr = '0'\n \n feh = float(fehStr[1]+fehStr[2])/10\n afe = float(afeStr[1])/10\n age = float(ageStr)\n y = int(yStr)\n \n if fehStr[0] == 'm':\n feh = feh*-1\n if afeStr[0] == 'm':\n afe = afe*-1\n \n #Debug\n #print(f\"folder:{folder} fn:{fn} fehStr:{fehStr} feh:{feh} afeStr:{afeStr} afe:{afe} ageStr:{ageStr} age:{age}\")\n \n #Create isochone object\n iso = isochroneObj(age=age,feh=feh,afe=afe,y=y,basedir=basedir,subdir=subdir,isodir=folder+'/')\n \n isoArr = np.genfromtxt(basedir+subdir+folder+\"/\"+fn, delimiter=\",\")\n for s in isoArr:\n star = fakeStarObj(s[5],s[6],s[7])\n iso.starList.append(star)\n iso.br.append(s[6]-s[7])\n iso.g.append(s[5])\n \n isoList.append(iso)\n isoIn = True\n toDict()\n\ndef preFilter(starList):\n #Imports\n import numpy as np\n \n final = []\n #Columns to be checked for NaN values. If an NaN is present in this column, the entry(star) is discarded from the \"unfiltered\" list\n #2-12 is the astrometry\n #42,45,48 are the g,bp,rp magnitudes\n #50-52 are the color indices\n cols = list(range(2,13))+[42]+[45]+[48]+list(range(50,53))\n \n #Filters out NaN values except for the last two columns\n for n,s in enumerate(starList):\n dump = False\n for c in cols:\n if np.isnan(float(s[c])):\n dump = True\n if not dump:\n final.append(starList[n])\n \n #Reshapes array \n final = np.array(final)\n \n return final\n\ndef rmOutliers():\n #Imports\n global clusterList\n import numpy as np\n \n for cluster in clusterList:\n \n if cluster.clType.lower() == \"globular\":\n scale = 4\n else:\n scale = 1.5\n \n #Variables\n pmthreshold = 5\n pmpthreshold = 50\n parthreshold = 5\n posthreshold = 5\n toRemove=[]\n \n #print(cluster.mean_pmra,cluster.mean_pmdec,cluster.stdev_pmra,cluster.stdev_pmdec)\n #print(len(cluster.unfilteredWide))\n \n #Classifies outliers\n for star in cluster.unfilteredWide:\n if cluster.name == \"NGC188\":\n if star.ra > 100:\n toRemove.append(star)\n #print(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),star.pmra,star.pmdec)\n if np.greater(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),pmthreshold) or np.greater(np.sqrt(((star.ra-cluster.mean_ra)*np.cos(np.pi/180*star.dec))**2+(star.dec-cluster.mean_dec)**2),posthreshold) or np.greater(abs(star.par),parthreshold):\n #if np.greater(np.sqrt((star.pmra-cluster.mean_pmra)**2+(star.pmdec-cluster.mean_pmdec)**2),threshold):\n toRemove.append(star)\n \n #Removes the outliers from the array\n for rm in toRemove:\n cluster.unfilteredWide.remove(rm)\n try:\n cluster.unfilteredNarrow.remove(rm)\n except ValueError:\n pass\n \n #print(len(cluster.unfilteredWide))\n\ndef calcStats(cluster,mode='filtered'):\n #Imports\n import numpy as np\n \n #Reads in all the values for a cluster\n par=[]\n par_err=[]\n ra=[]\n dec=[]\n pmra=[]\n pmdec=[]\n gmag = []\n br = []\n # a_g=[]\n # e_bp_rp=[]\n \n loopList=[]\n \n checkLoaded([cluster])\n \n if type(cluster) == str:\n cluster = clusters[cluster]\n \n if mode == 'bright':\n loopList = cluster.filteredBright\n elif mode == 'narrow':\n loopList = cluster.unfilteredNarrow\n elif mode == 'filtered':\n loopList = cluster.filtered\n \n for star in loopList:\n par.append(star.par)\n par_err.append(star.par_err)\n pmra.append(star.pmra)\n pmdec.append(star.pmdec)\n ra.append(star.ra)\n dec.append(star.dec)\n gmag.append(star.g_mag)\n br.append(star.b_r)\n \n # if not np.isnan(star.a_g) and not star.a_g == 0:\n # a_g.append(star.a_g)\n # if not np.isnan(star.e_bp_rp) and not star.e_bp_rp == 0:\n # e_bp_rp.append(star.e_bp_rp)\n \n #Calculate the statistics\n cluster.mean_par = np.mean(par[:])\n cluster.mean_ra = np.mean(ra[:])\n cluster.mean_dec = np.mean(dec[:])\n cluster.stdev_ra = np.std(ra[:])\n cluster.stdev_dec = np.std(dec[:])\n cluster.stdev_par = np.std(par[:])\n cluster.mean_pmra = np.mean(pmra[:])\n cluster.stdev_pmra = np.std(pmra[:])\n cluster.mean_pmdec = np.mean(pmdec[:])\n cluster.stdev_pmdec = np.std(pmdec[:])\n # cluster.mean_a_g = np.mean(a_g[:])\n # cluster.stdev_a_g = np.std(a_g[:])\n # cluster.mean_e_bp_rp = np.mean(e_bp_rp[:])\n # cluster.stdev_e_bp_rp = np.std(e_bp_rp[:])\n cluster.mean_par_over_ra = np.mean([x/y for x,y in zip(par,ra)])\n cluster.stdev_par_over_ra = np.std([x/y for x,y in zip(par,ra)])\n cluster.mean_par_err = np.mean(par_err[:])\n \n cluster.dist_mod = 5*np.log10(1000/cluster.mean_par)-5\n \n for star in loopList:\n star.radDist = np.sqrt((star.ra-cluster.mean_ra)**2+(star.dec-cluster.mean_dec)**2)\n star.normRadDist = np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-cluster.mean_ra*np.cos(cluster.mean_dec*np.pi/180))**2+(star.dec-cluster.mean_dec)**2)\n\n\ndef saveClusters(cList):\n #Imports\n import dill\n \n saveResults(cList)\n #Creates a pickle file with all of the saved instances\n for cl in cList:\n cluster = clusters[cl]\n #print(cluster.name,id(cluster))\n with open(f\"{cluster.dataPath}filtered.pk1\", 'wb') as output:\n dill.dump(cluster, output)\n\n\ndef saveIsochrones():\n #Imports\n import dill\n global clusterList\n \n #Creates a pickle file with all of the saved instances\n for iso in isoList:\n with open(f\"{iso.basedir}pickled/{iso.name}.pk1\", 'wb') as output:\n dill.dump(iso, output)\n\n \ndef loadClusters(clusterNames=[\"M67\"],basedir='clusters/'):\n #Imports\n import dill\n global clusterList\n global clusters\n global clIn\n \n for clusterName in clusterNames:\n if clusterName in clusters:\n unloadClusters([clusterName])\n #Reads in instances from the saved pickle file\n with open(f\"{basedir}{clusterName}/data/filtered.pk1\",'rb') as input:\n cluster = dill.load(input)\n clusterList.append(cluster)\n clIn = True\n toDict()\n\n\ndef loadIsochrones(basedir='isochrones/'):\n #Imports\n import dill\n import os\n global isoList\n global isoIn\n \n isoList=[]\n \n for fn in os.listdir(basedir+\"pickled/\"):\n #Reads in instances from the saved pickle file\n with open(f\"{basedir}pickled/{fn}\",'rb') as input:\n iso = dill.load(input)\n isoList.append(iso)\n isoIn = True\n toDict()\n\n\ndef unloadClusters(cList=['all']):\n #Imports\n global clusterList\n global clusters\n \n if 'all' in cList:\n cList = [cluster.name for cluster in clusterList]\n \n for cl in cList:\n cluster = clusters[cl]\n \n clusterList.remove(cluster)\n clusters.pop(cl)\n del cluster\n \n\ndef dataProcess(cList,load=False,fit=True,unload=True,plotting=True,member=True,save=True,close=True):\n #This method is largely intended for re-processing a bulk batch of clusters that have already been processed before,\n #meaning they already have condensed point lists or you are already aware of their fitting quality\n \n #Imports\n import matplotlib.pyplot as plt\n global clusterList\n global clusters\n global closePlots\n \n if not isoIn:\n loadIsochrones()\n \n \n loadList = [\"M15\",\"M12\",\"M39\",\"M46\",\"M67\",\"NGC188\",\"NGC2355\",\"NGC2158\",\"IC4651\",\"NGC6791\",\"NGC2360\",\"NGC2204\"]\n \n for cl in cList:\n \n if cl in loadList:\n condensing = \"load\"\n else:\n condensing = \"auto\"\n \n if load:\n loadClusters([cl])\n else:\n readClusters([cl])\n turboFilter([cl])\n \n if close:\n plt.close('all') \n \n \n if fit:\n turboFit([cl],condensing=condensing)\n if plotting:\n plot([cl],['pos','pm','cmd','quiver','iso'])\n if close:\n plt.close('all') \n \n if member:\n proxyMatch([cl])\n boundedStats([cl],saveCl=False,unloadCl=False)\n membership(cl,mode='filtered')\n membership(cl,mode='bounded',N=75)\n plt.close('all')\n \n if save:\n saveClusters([cl])\n saveResults([cl])\n if unload:\n unloadClusters([cl])\n \n\n\n\ndef turboFilter(cl=[\"all\"]):\n #Imports\n global clusterList\n \n cList = checkLoaded(cl)\n \n for clus in cList:\n cluster = clusters[clus]\n \n cluster.filteredBright,cluster.brightmag = pmFilter(cluster.unfilteredBright,cluster.name)\n print(f\"==========================={cluster.name}===========================\")\n print(f\"bright unf/pm fil: {len(cluster.unfilteredBright)} / {len(cluster.filteredBright)}\")\n calcStats(cluster,mode='bright')\n distFilter(cluster)\n print(f\"dist(all): {len(cluster.distFiltered)}\")\n cluster.filtered,cluster.mag = pmFilter(cluster.distFiltered,cluster.name)\n \n \n #Manual filtering of extraneous points\n cluster.filtered,cluster.mag = manualFilter(cluster)\n \n \n print(f\"pm(all): {len(cluster.filtered)}\")\n \n customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')\n \n magnitude = cutNoise(cluster)\n print(f\"noise cutoff: mag {magnitude} length {len(cluster.filtered)}\")\n \n customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')\n \n \"\"\"\n for i in range(10):\n print(f\"{cluster.filtered[i].b_r} {cluster.mag[i,0]}\")\n \"\"\"\n \n calcStats(cluster,mode='filtered')\n setFlag()\n\n\ndef manualFilter(cluster):\n #This exists to remove any points that may or may not be relevant to the cluster but are prohibiting the fit from happening\n \n if \"M35\" in cluster.name:\n filtered = [star for star in cluster.filtered if star.g_mag > 9 or star.b_r < 1]\n return filtered,magList(filtered)\n else:\n return cluster.filtered,cluster.mag\n\ndef magList(filtered):\n import numpy as np\n \n mag = np.empty((0,2))\n \n for star in filtered:\n mag = np.r_[mag,[[star.b_r,star.g_mag]]]\n\n\ndef pmFilter(starList,name):\n #Imports\n import numpy as np\n \n filtered = []\n mag = np.empty((0,2))\n cluster = clusters[name]\n assert cluster.name == name\n \n #Apply an elliptical filter to the proper motion space \n pmra_width = (cluster.pmra_max-cluster.pmra_min)/2\n pmdec_width = (cluster.pmdec_max-cluster.pmdec_min)/2\n pmra_center = cluster.pmra_min+pmra_width\n pmdec_center = cluster.pmdec_min+pmdec_width\n \n print(pmra_center,pmdec_center)\n \n for star in starList:\n if (star.pmra-pmra_center)**2/pmra_width**2 + (star.pmdec-pmdec_center)**2/pmdec_width**2 <= 1:\n filtered.append(star)\n mag = np.r_[mag,[[star.b_r,star.g_mag]]]\n \n assert len(filtered) > 1\n print(len(filtered))\n \n return filtered,mag\n\n\ndef distFilter(cluster):\n #Imports\n import numpy as np\n \n \n if cluster.par_min == 0 or cluster.par_max == 0:\n threshold = 1.5*cluster.mean_par\n \n print(f\"{cluster.name} filtered using mean parallax\")\n for star in cluster.unfilteredWide:\n if not np.greater(np.abs(star.par-cluster.mean_par),threshold*cluster.stdev_par):\n cluster.distFiltered.append(star)\n else:\n print(f\"{cluster.name} filtered using min & max parallax values\")\n for star in cluster.unfilteredWide:\n if star.par > cluster.par_min and star.par < cluster.par_max:\n cluster.distFiltered.append(star)\n\n\n\ndef cutNoise(cluster):\n #Imports\n import numpy as np\n \n stars = sorted(cluster.filtered,key=lambda x: x.g_mag)\n new = []\n newMag = np.empty((0,2))\n \n if cluster.noise_cutoff <= -98:\n threshold = 1\n print(f\"{cluster.name} noise cutoff undefined, using default\")\n else:\n threshold = cluster.noise_cutoff\n \n bad = 0\n badCut = 5\n for i,s in enumerate(stars):\n if s.astro_sigma5d > threshold:\n bad += 1\n if bad >= badCut:\n break\n else:\n new.append(s)\n newMag = np.r_[newMag,[[s.b_r,s.g_mag]]]\n \n cluster.filtered = new\n cluster.mag = newMag\n return s.g_mag\n\n\ndef turboFit(cl=[\"all\"],condensing='auto',weighting='pos',tp=\"catalogue\",minScore=0.001):\n #Typical use cases are auto, pos, catalogue --OR-- manual, equal, catalogue\n #Imports\n import time\n global clusterList\n \n cList = checkLoaded(cl)\n \n print(\"=========================Fitting=========================\")\n t0 = time.time()\n \n status = condense(cList,condensing,weighting,tp,minScore)\n if status == \"Suspended\":\n return\n \n for cluster in cList:\n redFitting(cluster,minScore,weighting)\n \n \n t1 = time.time()\n \n print(f\"Total {cluster.name} fit runtime: {t1-t0} seconds\")\n \n\n\ndef redFitting(cluster,minScore,weighting):\n #Imports\n import numpy as np\n import math\n from sys import stdout\n from time import sleep\n global clusterList\n \n if type(cluster) == str:\n cluster = clusters[cluster]\n \n cluster.iso = []\n \n redMin = 0\n redMax = 0.7\n step = 0.05\n \n redList = [round(x,2) for x in np.arange(redMin,redMax+step,step)]\n \n for reddening in redList:\n stdout.write(f\"\\rCurrent reddening value for {cluster.name}: {reddening:.2f} / ({redList[0]:.2f}->{redList[-1]:.2f})\")\n shapeFit(cluster,reddening,minScore,weighting)\n stdout.flush()\n sleep(0.1)\n \n cluster.iso = sorted(cluster.iso,key=lambda x: x[1])\n best = float(cluster.iso[0][2])\n \n print(f\"\\nCoarse-step reddening for {cluster.name}: {best}\")\n \n subMin = best - 0.05\n subMax = best + 0.05\n substep = 0.01\n \n if subMin < 0:\n subMin = 0\n \n subList = [round(x,2) for x in np.arange(subMin,subMax+substep,substep) if not round(x,2) in redList and round(x,2) > subMin and round(x,2) < subMax]\n \n for reddening in subList:\n stdout.write(f\"\\rCurrent fine-step reddening value for {cluster.name}: {reddening:.2f} / ({subList[0]:.2f}->{subList[-1]:.2f})\")\n shapeFit(cluster,reddening,minScore,weighting)\n stdout.flush()\n sleep(0.1)\n \n cluster.iso = sorted(cluster.iso,key=lambda x: x[1])\n \n cluster.reddening = float(cluster.iso[0][2])\n cluster.fit_age = float(isochrones[cluster.iso[0][0]].age)\n cluster.fit_feh = float(isochrones[cluster.iso[0][0]].feh)\n cluster.fit_afe = float(isochrones[cluster.iso[0][0]].afe)\n cluster.fit_y = float(isochrones[cluster.iso[0][0]].y)\n \n #Unrelated properties but I needed somewhere to assign them\n setattr(cluster,'meanDist',1000/cluster.mean_par)\n \n meanL = np.mean([a.l*np.pi/180 for a in cluster.filtered])\n galDist = 8000 #pc\n gd = cluster.meanDist**2 + galDist**2 - 2*cluster.meanDist*galDist*np.cos(meanL)\n setattr(cluster,'meanGalacticDist',gd**0.5)\n \n print(f\"\\nReddening for {cluster.name}: {best}\")\n\n\ndef shapeFit(cluster,reddening,minScore,weighting):\n #Imports\n import numpy as np\n import shapely.geometry as geom\n global isoList\n \n \n conversion = 2.1\n \n isoFitList = np.empty((0,3))\n for iso in isoList:\n isoLine = geom.LineString(tuple(zip([x+reddening for x in iso.br],[x+cluster.dist_mod+conversion*reddening for x in iso.g])))\n dist = []\n for star in cluster.condensed:\n starPt = geom.Point(star.b_r,star.g_mag)\n #print(starPt.distance(isoLine))\n pointDist = np.abs(starPt.distance(isoLine))*star.weight\n if pointDist < minScore*star.weight:\n pointDist = minScore*star.weight\n dist.append(pointDist**2)\n isoScore = np.sum(dist[:])\n #print(isoScore,dist)\n #print(list(geom.shape(isoLine).coords))\n isoFitList = np.r_[isoFitList,[[iso.name,float(isoScore),float(reddening)]]]\n #compareInstances(iso,cluster.iso[-1][0])\n #print(isoScore)\n cluster.iso.extend(isoFitList)\n #best = cluster.iso[1][0]\n #specificPlot(cluster.name,best.name,reddening)\n #print(f\"\\nFirst point of best fit: {best.br[0]+reddening},{best.g[0]+conversion*reddening+cluster.dist_mod}\")\n\n \ndef onclick(x,y,fig,ax,cluster,minScore,weighting,newList):\n def func(event):\n import matplotlib.pyplot as plt\n global coords\n \n ix, iy = event.xdata, event.ydata\n \n if str(event.button) == \"MouseButton.RIGHT\":\n for i,(cx,cy) in enumerate(coords):\n if abs(ix-cx) <= 0.075 and abs(iy-cy) <= 0.25:\n coords.pop(i)\n ax.clear()\n ax.scatter(x,y,s=0.5,color='dimgray')\n ax.invert_yaxis()\n ax.scatter([a[0] for a in coords],[a[1] for a in coords],c='red',s=10)\n plt.gcf().canvas.draw_idle()\n \n if str(event.button) == \"MouseButton.LEFT\":\n coords.append((ix, iy))\n ax.scatter(ix,iy,c='red',s=10)\n plt.gcf().canvas.draw_idle()\n \n if str(event.button) == \"MouseButton.MIDDLE\":\n fig.canvas.mpl_disconnect(cid)\n plt.close(fig)\n updateCondensed(cluster,minScore,weighting,newList)\n \n if len(coords) >= 100:\n fig.canvas.mpl_disconnect(cid)\n plt.close(fig)\n updateCondensed(cluster,minScore,weighting,newList)\n \n \n return\n return func\n\n\ndef updateCondensed(cluster,minScore,weighting,newList):\n #Imports\n import numpy as np\n global coords\n \n condensed = []\n for point in coords:\n if cluster.clType.lower() == \"globular\" or weighting.lower() == \"equal\":\n weight = 1\n else:\n #Automatic weighting scheme currently unsupported for manual condensed point definition,\n #but the framework is here to be able to insert it without having to worry about it being\n #passed around from function to function\n weight = 1\n condensed.append(condensedPoint(point[0],point[1],weight))\n \n if cluster.reddening == 0:\n cluster.condensed0 = condensed\n cluster.condensed = condensed\n \n np.savetxt(f\"{cluster.dataPath}condensed.csv\",coords,delimiter=',')\n \n redFitting(cluster,minScore,weighting)\n if len(newList) > 0:\n turboFit(newList,'manual',weighting,'catalogue',minScore)\n return\n\n\ndef find_nearest(array, value):\n #Imports\n import numpy as np\n \n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n\n\ndef testCluster(name='feh_0.00_afe_0.00_age_0.141_y_0.2703'):\n #Imports\n import numpy as np\n global clusterList\n global clIn\n \n iso = isochrones[name]\n test = clusterObj('test')\n filtered = [starObj('fake',0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,a.Gaia_G_EDR3,0,0,0,0,0,0,0,a.Gaia_BP_EDR3-a.Gaia_RP_EDR3,0,0,0,0,0,0,0,0,0,0,0) for a in iso.starList]\n test.filtered = filtered\n \n mag = np.empty((0,2))\n for star in test.filtered:\n mag = np.r_[mag,[[star.b_r,star.g_mag]]]\n test.mag = mag\n \n if not 'test' in clusters:\n clusterList.append(test)\n else:\n idx = clusterList.index(clusters['test'])\n clusterList.pop(idx)\n clusterList.append(test)\n clIn = True\n toDict()\n\ndef condense(cList,condensing,weighting,tp,minScore=0.001):\n #Imports\n import numpy as np\n global isoList\n global mag\n \n \n for cluster in cList:\n \n if type(cluster) == str:\n cluster = clusters[cluster]\n cList[cList.index(cluster.name)] = cluster\n \n \n #Creates mag arrays to be used in place of the filtered star objects\n mag = cluster.mag[:,:]\n mag[mag[:,1].argsort()]\n gmag = list(mag[:,1])\n gmin = mag[0,1]\n gmax = mag[-1,1]\n div = 50\n seg = (gmax-gmin)/div\n minpoints = 1\n \n #The array that will become the condensed points list\n condensed = np.empty((0,3))\n turnPoints = []\n \n \n if condensing.lower() == \"load\":\n global pts\n pts = np.genfromtxt(f\"{cluster.dataPath}condensed.csv\",delimiter=',')\n condensed = []\n for point in pts:\n #Missing alternate weighting schemes, but can be imlemented *here*\n condensed.append(condensedPoint(point[0],point[1],1))\n cluster.condensed = condensed\n cluster.condensed0 = condensed\n continue\n \n #Manual point definition\n if condensing.lower() == \"manual\":\n import matplotlib.pyplot as plt\n global cid\n global coords\n coords = []\n \n if len(cList) == 1:\n newList = []\n else:\n newList = cList[cList.index(cluster)+1:]\n \n x,y = mag[:,0],mag[:,1]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x,y,s=0.25,color='dimgray')\n ax.invert_yaxis()\n \n hook = onclick(x,y,fig,ax,cluster,minScore,weighting,newList)\n cid = fig.canvas.mpl_connect('button_press_event', hook) \n \n return \"Suspended\"\n \n \n \n \n \n \n #Vertically stacked slices in brightness\n for i in range(div):\n sliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]\n #print(np.array(sliced).shape)\n \n #Skip forseen problems with empty arrays\n if len(sliced) < minpoints:\n continue\n condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]\n \n condensed = condensed[::-1]\n \n \n \n #Uses defined turning points in the cluster catalogue\n if tp.lower() == \"catalogue\":\n if cluster.cltpx <= -98 and cluster.cltpy <= -98:\n tp == \"auto\"\n \n #If no turning point is found, or auto is specified, then this section of code\n #attempts to find the turning point through steep gradient changes in the main sequence\n if tp.lower() == \"auto\":\n #Criteria for the line that forms the basis of the gradient change method\n start = 4\n end = 11\n theta_crit = 5\n \n #Creates a slope-intercept fit for the lower main sequence\n basex = [a[0] for a in condensed[start:end]]\n basey = [a[1] for a in condensed[start:end]]\n base = np.polyfit(basex,basey,1)\n \n #Travels up the main sequence\n for i,point in enumerate(condensed):\n if i == start:\n continue\n #Creates a fit line between the start point and the current point\n x = [point[0],condensed[start,0]]\n y = [point[1],condensed[start,1]]\n lin = np.polyfit(x,y,1)\n \n #Calculates an angle between the new line and the lower main sequence\n point[2] = 180/np.pi*np.arctan(abs( (base[0]-lin[0])/(1+base[0]*lin[0]) ))\n \n #If the angle between the two lines is large enough, the point is considered\n #to be a candidate turning point, and is appended to the list of candidates\n if point[2] > theta_crit and i > end:\n turnPoints.append(point)\n \n \n #Analysis plot showing the theta value for each condensed point\n import matplotlib.pyplot as plt\n plt.figure()\n plt.scatter(condensed[:,0],condensed[:,1],c=condensed[:,2])\n plt.set_cmap('brg')\n plt.gca().invert_yaxis()\n clb = plt.colorbar()\n clb.ax.set_title(\"Theta\")\n plt.savefig(f'condensed_{cluster.name}')\n \n #If no automatic turning point is found, ends the method here\n if len(turnPoints) == 0:\n print(\"No turning point identified for {cluster.name}\")\n return\n else:\n #Identifies the proper turning point as a 5% color offset of the dimmest turning point candidate\n turnPoints = sorted(turnPoints,key=lambda x: x[1])\n tp = turnPoints[-1]\n tp[0] = tp[0] - 0.05*np.abs(tp[0])\n cluster.turnPoint = tp\n \n #Stores the condensed point list\n cl = []\n for point in condensed:\n cl.append(condensedPoint(point[0],point[1],point[2]))\n \n cluster.condensedInit = cl\n # [ B-R , G , Theta ]\n print(f\"{cluster.name} Turning Point: {cluster.turnPoint}\")\n \n \n \n \n \n \n \n #Assuming the undefined catch for manual would be caught the first time around\n if tp.lower() == \"catalogue\":\n cluster.turnPoint = [cluster.cltpx,cluster.cltpy]\n \n if cluster.clType.lower() == \"open\":\n #Recalc with the turnPoint limit enforced - Ignore blue stragglers\n condensed = np.empty((0,3))\n condensed_giant = np.empty((0,3))\n yList = []\n \n #Vertically stacked slices in brightness\n for i in range(div):\n rawSliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]\n \n sliced = np.empty((0,2))\n sliced_giant = np.empty((0,2))\n for point in rawSliced:\n #print(point)\n if point[0] >= cluster.turnPoint[0]:\n sliced = np.r_[sliced,[[point[0],point[1]]]]\n else:\n sliced_giant = np.r_[sliced_giant,[[point[0],point[1]]]]\n \n #Skip forseen problems with empty arrays\n if len(sliced) > 0:\n x = np.median(sliced[:,0])\n y = np.median(sliced[:,1])\n yList.append(y)\n condensed = np.r_[condensed,[[x,y,1]]]\n if len(sliced_giant) > 3:\n xg = np.median(sliced_giant[:,0])\n yg = np.median(sliced_giant[:,1])\n condensed_giant = np.r_[condensed_giant,[[xg,yg,1]]]\n \n \n #New turning point found from the reduced data set\n newTP = find_nearest(yList,cluster.turnPoint[1])\n \n index = 0\n \n for i,point in enumerate(condensed):\n if newTP == point[1]:\n index = i\n #print(f\"{point} found to be TP\")\n break\n assert not index == 0\n \n \n #Binary star list\n tpcut = index + 3\n \n xset = condensed[tpcut:-1,0]\n yset = condensed[tpcut:-1,1]\n #print(cluster.name,yset)\n fit = np.polyfit(xset,yset,1)\n \n #Distance from the main sequence linear fit\n for star in cluster.filtered: \n x0 = star.b_r\n y0 = star.g_mag\n dist = abs( y0 - fit[0]*x0 - fit[1] ) / np.sqrt(fit[0]**2 + 1)\n star.distance_MS = dist\n \n if dist > 0.05 and y0 < fit[0]*x0+fit[1] and x0 > xset[0] and y0 > condensed[index,1]:\n cluster.binaries.append(star)\n star.binary = 1\n else:\n star.binary = 0\n \n \n \n \n #Fit weight parameters\n N = len(condensed)\n beta = -2\n \n index = index - 7\n \n for i,point in enumerate(condensed):\n #point[2] = 5/(1+np.abs(index-i))\n if weighting.lower() == 'pos':\n point[2] = np.exp(beta*((i-index)/N)**2)\n \n \n # if cluster.type == \"globular\":\n # condensed = np.vstack((condensed,condensed_giant))\n \n condensed = condensed[::-1]\n \n\n \n cl = []\n coords = []\n for point in condensed:\n cl.append(condensedPoint(point[0],point[1],point[2]))\n coords.append((point[0],point[1]))\n \n np.savetxt(f\"{cluster.dataPath}condensed.csv\",coords,delimiter=',')\n \n if cluster.reddening == 0:\n cluster.condensed0 = cl\n cluster.condensed = cl\n \n\n# def checkLoaded(cList):\n \n# needsLoading = []\n# loaded = []\n \n# for cl in cList:\n# if not cl in clusters:\n# needsLoading.append(cl)\n# else:\n# loaded.append(cl)\n \n# return loaded,needsLoading()\n \n\n\ndef toDict():\n #Imports\n global clusterList\n global clusters\n global isoList\n global isochrones\n global resultList\n global results\n global clIn\n global isoIn\n global resultsIn\n \n if clIn:\n clName = []\n \n for cluster in clusterList:\n clName.append(cluster.name)\n clusters = dict(zip(clName,clusterList))\n \n if isoIn:\n \n isoName = []\n \n for iso in isoList:\n isoName.append(iso.name)\n isochrones = dict(zip(isoName,isoList))\n \n if resultsIn:\n resName=[]\n \n for res in resultList:\n resName.append(res.name)\n results = dict(zip(resName,resultList))\n\n\ndef plot(cList=['all'],modes=['pos','pm','cmd','quiver','iso'],closePlots=False):\n #Imports\n import matplotlib.pyplot as plt\n from matplotlib.patches import Rectangle\n import numpy as np\n import os\n global clusterList\n \n cList = checkLoaded(cList)\n \n for cl in cList:\n \n cluster = clusters[cl]\n \n if not os.path.isdir(f\"{cluster.imgPath}/png\"):\n os.mkdir(f\"{cluster.imgPath}/png\")\n \n #Position plots\n if 'pos' in modes:\n \n unfra=[star.ra for star in cluster.unfilteredWide]\n unfdec=[star.dec for star in cluster.unfilteredWide]\n ra=[star.ra for star in cluster.filtered]\n dec=[star.dec for star in cluster.filtered]\n \n unfnormra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide]\n normra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered]\n \n #Unfiltered position plot\n plt.figure(f\"{cluster.name}_ra_dec_unfiltered\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter(unfra[:],unfdec[:],s=0.5,c='dimgray')\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered.png\",dpi=500)\n \n #Filtered position plot\n plt.figure(f\"{cluster.name}_ra_dec_filtered\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Filtered\")\n plt.scatter(ra[:],dec[:],s=0.5,c='midnightblue')\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered.png\",dpi=500)\n \n #Position overlay\n plt.figure(f\"{cluster.name}_ra_dec_overlay\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Overlay\")\n plt.scatter(unfra[:],unfdec[:],s=0.5,c='lightgray')\n plt.scatter(ra[:],dec[:],s=1,c='midnightblue')\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay.png\",dpi=500)\n \n \n #Normalized\n #NormRA = RA*cos(DEC)\n \n #Unfiltered normalized position plot\n plt.figure(f\"{cluster.name}_ra_dec_unfiltered_normalized\")\n plt.xlabel('RA*cos(DEC) (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Unfiltered Normalized\")\n plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='dimgray')\n #plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_normalized.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_normalized.png\",dpi=500)\n \n #Filtered normalized position plot\n plt.figure(f\"{cluster.name}_ra_dec_filtered_normalized\")\n plt.xlabel('RA*cos(DEC) (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Filtered Normalized\")\n plt.scatter(normra[:],dec[:],s=0.5,c='midnightblue')\n #plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_filtered_normalized.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered_normalized.png\",dpi=500)\n \n #Position overlay normalized\n plt.figure(f\"{cluster.name}_ra_dec_overlay_normalized\")\n plt.xlabel('RA*cos(DEC) (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Overlay Normalized\")\n plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='lightgray')\n plt.scatter(normra[:],dec[:],s=1,c='midnightblue')\n #plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_overlay_normalized.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay_normalized.png\",dpi=500)\n \n #Proper motion plots\n if 'pm' in modes:\n \n unfpmra=[star.pmra for star in cluster.unfilteredWide]\n unfpmdec=[star.pmdec for star in cluster.unfilteredWide]\n pmra=[star.pmra for star in cluster.filtered]\n pmdec=[star.pmdec for star in cluster.filtered]\n \n unfpara=[star.par for star in cluster.unfilteredWide]\n para=[star.par for star in cluster.filtered]\n \n x0 = cluster.pmra_min\n x1 = cluster.pmra_max\n y0 = cluster.pmdec_min\n y1 = cluster.pmdec_max\n width = x1-x0\n scale = 5\n subscale = 2\n xmin = x0-scale*width\n xmax = x1+scale*width\n ymin = y0-scale*width\n ymax = y1+scale*width\n sxmin = x0-subscale*width\n sxmax = x1+subscale*width\n symin = y0-subscale*width\n symax = y1+subscale*width\n \n \n #Unfiltered proper motion plot\n plt.figure(f\"{cluster.name}_pm_unfiltered\")\n plt.xlabel(r'PMRA ($mas*yr^{-1}$)')\n plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='dimgray')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_unfiltered.png\",dpi=500)\n plt.xlim([sxmin,sxmax])\n plt.ylim([symin,symax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_unfiltered_closeup.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_unfiltered_closeup.png\",dpi=500)\n \n #Filtered proper motion plot\n plt.figure(f\"{cluster.name}_pm_filtered\")\n plt.xlabel(r'PMRA ($mas*yr^{-1}$)')\n plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')\n plt.title(f\"{cluster.name} Filtered\")\n plt.scatter(pmra[:],pmdec[:],s=0.5,c='midnightblue')\n # plt.xlim([xmin,xmax])\n # plt.ylim([ymin,ymax])\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_filtered.png\",dpi=500)\n \n #Proper motion overlay\n plt.figure(f\"{cluster.name}_pm_overlay\")\n plt.xlabel(r'PMRA ($mas*yr^{-1}$)')\n plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')\n plt.title(f\"{cluster.name} Overlay\")\n plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='lightgray')\n plt.scatter(pmra[:],pmdec[:],s=1,c='midnightblue')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_overlay.png\",dpi=500)\n plt.xlim([sxmin,sxmax])\n plt.ylim([symin,symax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_overlay_closeup.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_overlay_closeup.png\",dpi=500)\n \n #Unfiltered PM/Parallax\n plt.figure(f\"{cluster.name}_pm_over_parallax_unfiltered\")\n plt.xlabel('PMRA / Parallax')\n plt.ylabel('PMDEC / Parallax')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter([a/b for a,b in zip(unfpmra,unfpara)],[a/b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_over_parallax_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_over_parallax_unfiltered.png\",dpi=500)\n \n #Unfiltered PM*Parallax\n plt.figure(f\"{cluster.name}_pm_times_parallax_unfiltered\")\n plt.xlabel('PMRA * Parallax')\n plt.ylabel('PMDEC * Parallax')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter([a*b for a,b in zip(unfpmra,unfpara)],[a*b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_times_parallax_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_times_parallax_unfiltered.png\",dpi=500)\n \n \n #CMD plots\n if 'cmd' in modes:\n \n unfgmag=[star.g_mag for star in cluster.unfilteredWide]\n unf_b_r=[star.b_r for star in cluster.unfilteredWide]\n gmag=[star.g_mag for star in cluster.filtered]\n b_r=[star.b_r for star in cluster.filtered]\n \n bright_b_r = [x.b_r for x in cluster.filteredBright]\n bright_gmag = [x.g_mag for x in cluster.filteredBright]\n par_b_r = [x.b_r for x in cluster.distFiltered]\n par_gmag = [x.g_mag for x in cluster.distFiltered]\n \n #Reddening Correction\n plt.figure(f\"{cluster.name}_reddening_CMD\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('G Mag')\n plt.title(f\"{cluster.name} Reddening = {cluster.reddening:.2f}\")\n plt.scatter(b_r[:],gmag[:],s=0.5,c='dimgray',label='Observed')\n plt.arrow(b_r[int(len(b_r)/2)]-cluster.reddening,gmag[int(len(gmag)/2)]-2.1*cluster.reddening,cluster.reddening,2.1*cluster.reddening,color='red')\n plt.scatter([s-cluster.reddening for s in b_r[:]],[s-2.1*cluster.reddening for s in gmag[:]],s=1,c='midnightblue',label='Corrected')\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_reddening_CMD.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_reddening_CMD.png\",dpi=500)\n \n #Unfiltered CMD plot\n plt.figure(f\"{cluster.name}_CMD_unfiltered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_unfiltered.png\",dpi=500)\n \n #Filtered CMD plot\n plt.figure(f\"{cluster.name}_CMD_filtered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Parallax & Proper Motion Filtered\")\n plt.scatter(b_r[:],gmag[:],s=0.5,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_filtered.png\",dpi=500)\n \n #CMD overlay\n plt.figure(f\"{cluster.name}_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Overlay\")\n plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')\n plt.scatter(b_r[:],gmag[:],s=1,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_overlay.png\",dpi=500)\n \n #Condensed CMD overlay\n plt.figure(f\"{cluster.name}_condensed_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Condensed Overlay\")\n plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')\n plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c='red',label='Proxy Points')\n try:\n plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')\n except:\n print(f\"No turning point found for {cluster.name}\")\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_condensed_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_condensed_CMD_overlay.png\",dpi=500)\n \n #Weighted CMD overlay\n plt.figure(f\"{cluster.name}_weighted_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Weighted Overlay\")\n plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')\n plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Proxy Points')\n try:\n plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')\n except:\n print(f\"No turning point found for {cluster.name}\")\n plt.set_cmap('brg')\n clb = plt.colorbar()\n clb.ax.set_title(\"Weight\")\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_weighted_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_weighted_CMD_overlay.png\",dpi=500)\n \n \n #Initial Condensed CMD overlay\n plt.figure(f\"{cluster.name}_initial_condensed_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Initial Condensed Overlay\")\n plt.scatter(b_r,gmag,s=0.5,c='dimgray',label='Data')\n plt.scatter([s.b_r for s in cluster.condensedInit],[s.g_mag for s in cluster.condensedInit],s=5,c='red',label='Proxy Points')\n try:\n plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')\n except:\n print(f\"No turning point found for {cluster.name}\")\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_initial_condensed_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_initial_condensed_CMD_overlay.png\",dpi=500)\n \n #Brightness-PM Filtered CMD plot\n plt.figure(f\"{cluster.name}_CMD_bright_filtered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Bright-Only Proper Motion Filtered\")\n plt.scatter(bright_b_r[:],bright_gmag[:],s=0.5,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_bright_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_bright_filtered.png\",dpi=500)\n \n #Parallax Filtered CMD plot\n plt.figure(f\"{cluster.name}_CMD_parallax_filtered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Parallax Filtered\")\n plt.scatter(par_b_r[:],par_gmag[:],s=0.5,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_parallax_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_parallax_filtered.png\",dpi=500)\n \n \n if 'quiver' in modes:\n \n unfra=[star.ra for star in cluster.unfilteredWide]\n unfdec=[star.dec for star in cluster.unfilteredWide]\n unfpmra=[star.pmra for star in cluster.unfilteredWide]\n unfpmdec=[star.pmdec for star in cluster.unfilteredWide]\n \n x0 = min([s.ra for s in cluster.filtered])\n x1 = max([s.ra for s in cluster.filtered])\n y0 = min([s.dec for s in cluster.filtered])\n y1 = max([s.dec for s in cluster.filtered])\n width = x1-x0\n scale = 0.25\n xmin = x0+scale*width\n xmax = x1-scale*width\n ymin = y0+scale*width\n ymax = y1-scale*width\n \n #Unfiltered position quiver plot\n plt.figure(f\"{cluster.name}_ra_dec_unfiltered_quiver\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Unfiltered\")\n ax = plt.gca()\n ax.quiver(unfra[:],unfdec[:],unfpmra[:],unfpmdec[:],color='midnightblue',width=0.003,scale=400,scale_units='width')\n plt.axis(\"square\")\n plt.gcf().set_size_inches(10,10)\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver.png\",dpi=500)\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.png\",dpi=500)\n \n \n #Isochrone plots\n if 'iso' in modes:\n \n gmag=[star.g_mag for star in cluster.filtered]\n b_r=[star.b_r for star in cluster.filtered]\n isochrone = isochrones[cluster.iso[0][0]]\n \n #Isochrone best fit\n plt.figure(f\"{cluster.name}_Iso_best\")\n plt.gca().invert_yaxis()\n plt.xlabel('Dereddened BP-RP')\n plt.ylabel('Corrected Absolute G Mag')\n plt.title(f\"{cluster.name} Isochrone Best Fit\")\n plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening-cluster.dist_mod for s in gmag],s=0.5,c='dimgray',label='Cluster')\n \n isoLabels = isochrone.name.split('_')\n isoLabel = r\"$[\\frac{Fe}{H}]$\" + \"=\" + isoLabels[1] + \"\\n\" \\\n + r\"$[\\frac{\\alpha}{Fe}]$\" + \"=\" + isoLabels[3] + \"\\n\" \\\n + r\"$[Y]$\" + \"=\" + isoLabels[7] + \"\\n\" \\\n + \"Age\" + \"=\" + isoLabels[5] + \" Gyr\"\n \n plt.plot(isochrone.br,isochrone.g,c='midnightblue',label=isoLabel)\n plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening-cluster.dist_mod for s in cluster.condensed],s=5,c='red',label='Cluster Proxy')\n extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n h,l = plt.gca().get_legend_handles_labels()\n h.insert(0,extra)\n l.insert(0,f\"Reddening: {cluster.reddening}\")\n plt.legend(h,l)\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_Iso_BestFit.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_Iso_BestFit.png\",dpi=500)\n \n #Membership plots\n if 'membership' in modes:\n proxyMatch([cl])\n boundedStats([cl],saveCl=False,unloadCl=False)\n membership(cl,mode='filtered')\n membership(cl,mode='bounded',N=50)\n \n #3D Position plots\n if '3D' in modes:\n \n A = [a.ra * np.pi/180 for a in cluster.filtered]\n B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]\n C = [1/(1000*c.par) for c in cluster.filtered]\n \n x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]\n y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]\n z = [c*np.sin(b) for b,c in zip(B,C)]\n \n r = [np.sqrt(a**2+b**2) for a,b in zip(x,y)]\n theta = [np.arctan(b/a) for a,b in zip(x,y)]\n \n plt.figure(f\"{cluster.name}_3D_Position\")\n ax = plt.axes(projection='3d')\n ax.scatter3D(x,y,z)\n ax.scatter(0,0,0,color='red')\n scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)\n \n if closePlots:\n plt.close('all')\n\n\n\n# def Plot3D(cList):\n# #Imports\n# import matplotlib.pyplot as plt\n# import numpy as np\n# global clusterList\n \n# needsLoading=[]\n \n# plt.figure(f\"3D_Position_Ensemble\")\n# ax = plt.axes(projection='3d')\n \n \n# for cl in cList:\n# if not cl in clusters:\n# needsLoading.append(cl)\n \n# if not len(needsLoading) == 0:\n# loadClusters(needsLoading)\n \n# for cl in cList:\n# cluster = clusters[cl]\n \n# A = [a.ra * np.pi/180 for a in cluster.filtered]\n# B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]\n# C = [1/(0.001*c.par) for c in cluster.filtered]\n \n# #Flatten radially\n# C = [np.mean(C)]*len(C)\n \n# x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]\n# y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]\n# z = [c*np.sin(b) for b,c in zip(B,C)]\n \n# #Force Cluster to origin\n# # x = [a-np.mean(x) for a in x]\n# # y = [a-np.mean(y) for a in y]\n# # z = [a-np.mean(z) for a in z]\n \n# ax.scatter3D(x,y,z,label=cluster.name)\n \n# scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n# ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)\n# #ax.scatter(0,0,0,color='black')\n# plt.legend()\n\n\ndef yso_lookup():\n #Imports\n from astroquery.simbad import Simbad\n import numpy as np\n import os\n import re\n \n global names\n global sect\n global results\n global ra\n global dec\n \n main = open(\"Excess Examples/YSO_object_list.dat\").read()\n main = main.split(\"\\n\")[:-1]\n \n #Get the names of all of the objects identified\n names = []\n ra = []\n dec = []\n validNames = []\n for row in main:\n sect = re.split('\\s+',row)\n if sect[0] == '':\n sect = sect[1:]\n if sect[2] == 'none':\n continue\n \n name = sect[2]\n \n blacklist = ['A','Ab','AB','ABC','B','AaB']\n for entry in sect[3:]:\n if '.' in entry or entry in blacklist:\n break\n name = name + \" \" + entry\n \n names.append(name)\n \n #Perform a SIMBAD query for the identified objects\n results = []\n for name in names:\n result = Simbad.query_object(name)\n if not type(result) == type(None):\n results.append(result)\n validNames.append(name.replace(' ',''))\n \n ra1 = str(result.columns['RA']).split('\\n')[-1]\n ra1 = re.split('\\s+',ra1)\n \n if '' in ra1:\n ra.append('---')\n else:\n ra.append(str(round(float(ra1[0])*15+float(ra1[1])/4+float(ra1[2])/240,5)))\n \n dec1 = str(result.columns['DEC']).split('\\n')[-1]\n dec1 = re.split('\\s+',dec1)\n if '' in dec1:\n dec.append('---')\n else:\n dec.append(str(round(float(dec1[0])+float(dec1[1])/60+float(dec1[2])/3600,5)))\n \n #Create a text file in the VOSA readable format\n VOSAdata = []\n gaiadata = []\n for i in range(len(validNames)):\n line1 = f\"{validNames[i]} {ra[i]} {dec[i]} --- --- --- --- --- --- ---\"\n line2 = f\"{ra[i]} {dec[i]}\"\n VOSAdata.append(line1)\n if '-' in line2:\n continue\n gaiadata.append(line2)\n np.savetxt(\"Excess Examples/yso_vosa_output.txt\",VOSAdata,fmt=\"%s\")\n np.savetxt(\"Excess Examples/yso_gaia_output.txt\",gaiadata,fmt=\"%s\")\n \n\n\ndef exportVOSA(cl):\n #Imports\n import numpy as np\n \n if not cl in clusters:\n loadClusters([cl])\n \n cluster = clusters[cl]\n \n #objname RA DEC DIS Av Filter Flux Error PntOpts ObjOpts\n data = []\n for star in cluster.filtered:\n name = star.name.replace(\" \",\"\")\n line = f\"{name} {star.ra} {star.dec} {1000/star.par} --- --- --- --- --- ---\"\n data.append(line)\n np.savetxt(f\"{cluster.dataPath}{cluster.name}_VOSA.txt\",data,fmt=\"%s\")\n\n\ndef readSED(cList=['all'],printMissing=False):\n #imports\n import numpy as np\n import re\n import os\n \n cList = checkLoaded(cList)\n \n for cl in cList:\n\n cluster = clusters[cl]\n \n objPath = cluster.dataPath + \"vosa_results/objects/\"\n \n names = []\n for star in cluster.filtered:\n flat = star.name.replace(\" \",\"\").replace(\"DR2\",\"\").replace(\"EDR3\",\"\").replace(\"DR3\",\"\")\n names.append(flat)\n star.flatName = flat\n cluster.stars = dict(zip(names,cluster.filtered))\n \n idx = 0\n newStars = dict()\n \n #Each star in a cluster has its own folder, and each folder contains several data sets\n for folder in os.listdir(objPath):\n \n fileName = folder.replace(\"DR2\",\"\").replace(\"EDR3\",\"\").replace(\"DR3\",\"\")\n #Weed out VOSA stars not in current filtered members list\n if not fileName in cluster.stars:\n if printMissing:\n print(f\"{fileName} is missing from filtered list, skipping it...\")\n continue\n \n main = open(objPath+folder+\"/sed/\"+folder+\".sed.dat\").read()\n main = main.split(\"\\n\")\n data = main[10:-1]\n \n #Create a list of measurement object pointers to attach to the stars later\n measurements = []\n \n #Convert every line of the data set into a vosaPoint object\n for row in data:\n sect = re.split('\\s+',row)[1:-1]\n measurements.append(vosaPoint(str(sect[0]),float(sect[1]),float(sect[2]),float(sect[3]),float(sect[4]),float(sect[5]),float(sect[6])))\n \n cluster.stars[fileName].vosaPoints = measurements\n #Weed out cluster.stars members who do not have a vosa table\n newStars[fileName] = cluster.stars[fileName]\n \n idx += 1\n \n cluster.stars = newStars\n \n \n \ndef checkBinary(cl):\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([cl])\n cluster = clusters[cl]\n \n global lman\n \n \n data = [Datum(star.b_r,star.g_mag) for star in cluster.filtered]\n \n # ax = plt.axes(xlim=(cluster.min_b_r-0.25,cluster.max_b_r+0.25), ylim=(cluster.min_g_mag-1,cluster.max_g_mag+1),autoscale_on=False)\n ax = plt.axes(xlim=(0, 2.5), ylim=(8, 20), autoscale_on=False)\n \n ax.invert_yaxis()\n ax.set_title('Lasso points using left mouse button')\n\n lman = LassoManager(ax, data,cluster)\n\n plt.show()\n \n \n\ndef vosaBinaries(cl):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n import os\n \n checkLoaded([cl])\n \n cluster = clusters[cl]\n \n if not os.path.isdir(f\"{cluster.imgPath}vosaBinaries/\"):\n os.mkdir(f\"{cluster.imgPath}vosaBinaries/\")\n \n \n for star in cluster.stars.values():\n if not star.binary == 1:\n return\n \n\n\ndef excessIR(cl,plot=True):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n import os\n \n checkLoaded([cl])\n \n cluster = clusters[cl]\n \n if not os.path.isdir(f\"{cluster.imgPath}excessIR/\"):\n os.mkdir(f\"{cluster.imgPath}excessIR/\")\n \n \n for star in cluster.stars.values():\n \n excess = False\n \n for vp in star.vosaPoints:\n \n if vp.excess > 0:\n excess = True\n \n if excess:\n \n #print(f\"{star.name} has {len(star.vosaPoints)} VOSA points\")\n \n star.hasExcess = 1\n \n if plot:\n plt.figure(f'{cluster.name} - {star.name}')\n plt.title(f'{cluster.name} : {star.name}')\n \n ax = plt.gca()\n ax.set_yscale('log')\n ax.set_xscale('log')\n plt.ylabel(r'Flux ($ergs^{-1}cm^{-2}\\AA^{-1}$)')\n plt.xlabel(r'Wavelength ($\\AA$)')\n \n plt.scatter([a.wavelength for a in star.vosaPoints],[a.flux for a in star.vosaPoints])\n \n plt.savefig(f\"{cluster.imgPath}excessIR/{star.name}.pdf\")\n plt.savefig(f\"{cluster.imgPath}excessIR/{star.name}.png\",dpi=500)\n\n\n\n\ndef proxyMatch(cList,plot=False):\n #Imports\n import matplotlib.pyplot as plt\n import numpy as np\n \n checkLoaded(cList) \n \n for cl in cList:\n cluster = clusters[cl]\n \n iso = isochrones[cluster.iso[0][0]]\n isoPoints = []\n for pt in iso.starList:\n isoPoints.append(pt)\n # if pt.Gaia_G_EDR3+cluster.dist_mod > cluster.turnPoint[1]:\n # isoPoints.append(pt)\n \n for star in cluster.filtered:\n minDist = 0.2\n smallestDist = 10\n vertCutoff = 1\n minPoint = None\n for point in isoPoints:\n dist = abs(point.Gaia_BP_EDR3-point.Gaia_RP_EDR3-star.b_r+cluster.reddening)\n if dist < minDist:\n if abs(point.Gaia_G_EDR3+cluster.dist_mod - star.g_mag + 2.1*cluster.reddening) < vertCutoff:\n minDist = dist\n minPoint = point\n elif dist < smallestDist:\n smallestDist = dist\n try:\n assert minDist < 0.2\n except:\n print(f\"[{cluster.name}] Star too distant from isochrone to make a good proxy: BP-RP: {star.b_r} | G: {star.g_mag} | Dist: {smallestDist}\")\n star.proxyMass = 0\n star.proxyLogTemp = 0\n star.proxyFeH = 0\n star.proxyLogAge = 0\n star.proxy = None\n continue\n \n #print(minDist)\n star.proxyMass = minPoint.star_mass\n star.proxyLogTemp = minPoint.log_Teff\n star.proxyFeH = minPoint.feh\n star.proxyLogAge = minPoint.log10_isochrone_age_yr\n star.proxy = minPoint\n \n cluster.massLoaded = True\n cluster.meanProxyMass = np.mean([a.proxyMass for a in cluster.filtered])\n cluster.totalProxyMass = np.sum([a.proxyMass for a in cluster.filtered])\n \n cluster.min_g_mag = min([a.g_mag for a in cluster.filtered])\n cluster.max_g_mag = max([a.g_mag for a in cluster.filtered])\n cluster.min_b_r = min([a.b_r for a in cluster.filtered])\n cluster.max_b_r = max([a.b_r for a in cluster.filtered])\n # if plot:\n # plt.figure(f\"{cluster.name}_proxy_fit\")\n \n \n\n\n\ndef variableHistogram(cl,var):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([cl])\n \n cluster = clusters[cl]\n \n plt.figure()\n plt.title(f\"{cluster.name} Histogram of {var}\")\n plt.xlabel(f\"{var}\")\n plt.ylabel(\"Count\")\n plt.hist([eval(f\"a.{var}\") for a in cluster.filtered],bins='auto')\n\n\ndef varHist2D(cl,var1,var2,color='default',listType='filtered'):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([cl])\n \n \n #Check allowed entries\n allowedTypes = ['filtered','unfilteredWide','unfilteredBright,filteredBright,binaries']\n if not listType in allowedTypes:\n print(f\"{listType} is not a valid list type, defaulting to filtered\")\n listType = \"filtered\"\n \n \n cluster = clusters[cl]\n \n plt.figure(figsize=(8,8))\n \n #Axis size and spacing\n left, width = 0.1, 0.65\n bottom, height = 0.1, 0.65\n spacing = 0.005\n rect_scatter = [left, bottom, width, height]\n rect_histx = [left, bottom + height + spacing, width, 0.2]\n rect_histy = [left + width + spacing, bottom, 0.2, height]\n \n ax_scatter = plt.axes(rect_scatter)\n ax_scatter.tick_params(direction='in', top=True, right=True)\n ax_histx = plt.axes(rect_histx)\n ax_histx.tick_params(direction='in', labelbottom=False)\n ax_histy = plt.axes(rect_histy)\n ax_histy.tick_params(direction='in', labelleft=False)\n \n x = [eval(f\"a.{var1}\") for a in eval(f\"cluster.{listType}\")]\n y = [eval(f\"a.{var2}\") for a in eval(f\"cluster.{listType}\")]\n \n if color == 'default':\n ax_scatter.scatter(x, y, s=5)\n else:\n colorMap = plt.get_cmap('coolwarm')#.reversed()\n ax_scatter.scatter(x, y, s=5, c=[eval(f\"a.{color}\") for a in eval(f\"cluster.{listType}\")], cmap = colorMap)\n # clb = plt.colorbar(ax_scatter)\n # clb.ax.set_title(f\"{color}\")\n \n ax_histx.hist(x,bins='auto')\n ax_histy.hist(y,bins='auto',orientation='horizontal')\n \n ax_histx.set_title(f\"Histogram of {listType} {cluster.name} in {var1} and {var2}\")\n ax_scatter.set_xlabel(f\"{var1}\")\n ax_scatter.set_ylabel(f\"{var2}\")\n \n\n\n\n\n\ndef Plot3D(cList=['all'],showEarth=True,flatten=True):\n #Imports\n import plotly.express as px\n import plotly.io as pio\n import numpy as np\n global clusterList\n \n pio.renderers.default='browser'\n \n fig = px.scatter_3d()\n \n if showEarth:\n fig.add_scatter3d(x=[0],y=[0],z=[0],marker=dict(color='lightblue'),name=\"Earth\")\n \n cList = checkLoaded(cList)\n \n big = []\n \n for cl in cList:\n cluster = clusters[cl]\n \n A = [a.ra * np.pi/180 for a in cluster.filtered]\n B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]\n C = [1/(0.001*c.par) for c in cluster.filtered]\n \n #Flatten radially\n if flatten:\n C = [np.mean(C)]*len(C)\n \n x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]\n y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]\n z = [c*np.sin(b) for b,c in zip(B,C)]\n \n #Force Cluster to origin\n # x = [a-np.mean(x) for a in x]\n # y = [a-np.mean(y) for a in y]\n # z = [a-np.mean(z) for a in z]\n \n fig.add_scatter3d(x=x,y=y,z=z,name=cl,mode=\"markers\",marker=dict(size=2))\n \n big.append(np.amax(x))\n big.append(np.amax(y))\n big.append(np.amax(z))\n \n\n #fig.layout.scene = dict(aspectmode=\"manual\",aspectratio=dict(x=1,y=1,z=1))\n #fig.update_layout(scene=dict(aspectmode=\"cube\",xaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),yaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),zaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)])))\n fig.update_layout(scene=dict(aspectmode=\"cube\",xaxis=dict(showbackground=False),yaxis=dict(showbackground=False),zaxis=dict(showbackground=False,visible=False)))\n \n fig.show()\n\n\ndef specificPlot(cl,iso,reddening,score):\n #Imports\n import matplotlib.pyplot as plt\n from matplotlib.patches import Rectangle\n import os\n \n checkLoaded([cl])\n \n cluster = clusters[f\"{cl}\"]\n isochrone = isochrones[f\"{iso}\"]\n \n #These are displayed on the plot\n # score = 0\n reddening = float(reddening)\n \n #Directory for saving plot outputs\n if not os.path.isdir(\"SpecificPlots/pdf/\"):\n os.makedirs(\"SpecificPlots/pdf/\")\n if not os.path.isdir(\"SpecificPlots/png/\"):\n os.makedirs(\"SpecificPlots/png/\")\n \n # #Find the score of the associated isochrone\n # for chrone in cluster.iso:\n # if chrone[0] == iso and chrone[2] == reddening:\n # score = chrone[1]\n # break\n \n #Plots the CMD and the isochrone, with all of the points adjusted to reddening, extinction, and distance modulus\n plt.figure()\n plt.gca().invert_yaxis()\n plt.xlabel('B-R')\n plt.ylabel('G Mag')\n plt.title(f\"{cl} {iso}\")\n plt.scatter([s.b_r for s in cluster.filtered],[s.g_mag for s in cluster.filtered],s=0.05,c='dimgray',label='Cluster')\n plt.plot([x + reddening for x in isochrone.br],[x+cluster.dist_mod+2.1*reddening for x in isochrone.g],c='midnightblue',label=f\"Score: {float(score):.7f}\")\n plt.scatter([s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Cluster Proxy')\n \n #Colors the points by their fitting weight\n plt.set_cmap('brg')\n clb = plt.colorbar()\n clb.ax.set_title(\"Weight\")\n \n #Label for the reddening\n extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n h,l = plt.gca().get_legend_handles_labels()\n h.insert(0,extra)\n l.insert(0,f\"Reddening: {reddening}\")\n plt.legend(h,l)\n \n #Save figure output to disk\n plt.savefig(f\"SpecificPlots/pdf/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.pdf\")\n plt.savefig(f\"SpecificPlots/png/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.png\",dpi=500)\n\n\ndef plotRange(cl,a,b):\n global clusters\n \n checkLoaded([cl])\n \n #Plots the top fitting isochrones over the range a to b for a given cluster\n #Does this by calling the specificPlot() method for each isochrone over the range\n for isochrone in clusters[f\"{cl}\"].iso[a:b]:\n specificPlot(cl,isochrones[isochrone[0]].name,isochrone[2],isochrone[1])\n\ndef getIsoScore(cl,iso,red,output=True):\n #Return the score for a given cluster's isochrone fit\n for i in cl.iso:\n if i[0] == iso.name and float(i[2]) == red:\n return i[1]\n if output:\n print(f\"No score found for {cl.name} | {iso.name} | {red}\")\n return 0\n\n\ndef onkey(x,y,cx,cy,fig,ax,cluster,iso,reddening):\n global curIso\n global curReddening\n curIso = iso\n curReddening = reddening\n \n def func(event):\n import matplotlib.patches as patches\n global curIso\n global curReddening\n global isochrones\n \n key = str(event.key)\n #print(key)\n \n ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]\n fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]\n \n age_index = ageSorted.index(curIso)\n feh_index = fehSorted.index(curIso)\n \n #Move up or down in the desired variable space, with wrap-around at the ends of the lists\n if key == \"w\":\n #Increase metallicity\n try:\n curIso = fehSorted[feh_index+1]\n feh_index = feh_index+1\n except:\n curIso = fehSorted[0]\n feh_index = 0\n if key == \"s\":\n #Decrease metallicity\n curIso = fehSorted[feh_index-1]\n feh_index = feh_index-1\n if feh_index < 0:\n feh_index = len(fehSorted)+feh_index\n if key == \"a\":\n #Increase age\n curIso = ageSorted[age_index-1]\n age_index = age_index-1\n if age_index < 0:\n age_index = len(ageSorted)+age_index\n if key == \"d\":\n #Decrease age\n try:\n curIso = ageSorted[age_index+1]\n age_index = age_index+1\n except:\n curIso = ageSorted[0]\n age_index = 0\n if key == \"q\":\n #Decrease metallicity\n curReddening = round(curReddening-0.01,2)\n if key == \"e\":\n #Increase metalicity\n curReddening = round(curReddening+0.01,2)\n if key == \"r\":\n #Reset to originally requested isochrone\n curIso = iso\n ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]\n fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]\n age_index = ageSorted.index(curIso)\n feh_index = fehSorted.index(curIso)\n if key == \" \":\n #Print currently highlighted isochrone to console\n score = getIsoScore(cluster,curIso,curReddening)\n fig.savefig(f\"Jamboree Images/frames/{curIso.name}.png\",dpi=500)\n print(f\"{curIso.name} | {curReddening} | {score}\")\n \n score = getIsoScore(cluster,curIso,curReddening,output=False)\n \n #Replots everything with the updated isochrone\n ax.clear()\n ax.scatter(x,y,s=0.25,color='dimgray')\n ax.scatter(cx,cy,s=4,color='red')\n ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+curReddening for a in curIso.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*curReddening for a in curIso.starList],color='darkblue')\n ax.set_title(f\"{curIso.name}\\n {curReddening}\\n {score}\")\n ax.set_xlabel(\"Apparent BP-RP\")\n ax.set_ylabel(\"Apparent G Mag\")\n ax.invert_yaxis()\n \n \n #Progress bar indicators for the interactive plot\n \n #Sets the dimensons of the boxes\n x0,x1 = ax.get_xlim()\n y0,y1 = ax.get_ylim()\n margin = 0.01\n width = 0.05 * (x1-x0)\n height = 0.6 * (y1-y0)\n xmargin = margin * (x1-x0)\n ymargin = margin * (y1-y0)\n \n \n #The two main progress bars\n rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n #rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n \n ax.add_patch(rect1)\n ax.add_patch(rect2)\n #ax.add_patch(rect3)\n \n #The segments filling up the progress bars\n n = len(ageSorted)\n #Adds cells bottom to top\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == age_index:\n color = 'red'\n else:\n color = 'black'\n #Age progress bar\n ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n n = len(fehSorted)\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == feh_index:\n color = 'red'\n else:\n color = 'black'\n #Metallicity progress bar\n ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n \n fig.canvas.draw_idle()\n \n \n return func\n\ndef interactivePlot(cl,iso=0,reddening=\"auto\"):\n #Imports\n import matplotlib.pyplot as plt\n import matplotlib.patches as patches\n global clusters\n global isochrones\n global kid\n \n checkLoaded([cl])\n \n cluster = clusters[f\"{cl}\"]\n \n #Select the starting isochrone based on user input\n if type(iso) == str:\n isochrone = isochrones[f\"{iso}\"]\n elif type(iso) == int:\n assert iso >= 0\n isochrone = isochrones[cluster.iso[iso][0]]\n else:\n print(\"Invalid declaration of 'iso'\")\n return\n name = isochrone.name\n \n #Get the reddening if not manually defined\n if reddening == \"auto\":\n reddening = cluster.reddening\n assert type(reddening) == float or type(reddening) == int\n \n score = getIsoScore(cluster,isochrone,reddening)\n \n # #Sorted and secondary-sorted isochrone lists\n # ageSorted = sorted(isoList,key=lambda x: (x.age,x.feh))\n # fehSorted = sorted(isoList,key=lambda x: (x.feh,x.age))\n ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == isochrone.feh]\n fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == isochrone.age]\n age_index = ageSorted.index(isochrone)\n feh_index = fehSorted.index(isochrone)\n \n \n #Coordinate lists to plot in addition to the isochrones\n x,y = cluster.mag[:,0],cluster.mag[:,1]\n cx,cy = [s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed]\n \n \n #Systematically remove some of the conflicting default keymaps in Pyplot\n letters = ['w','s','a','d','q','e','r']\n for letter in letters:\n #Finds all keymap references in the rcParams\n for param in [key for key in plt.rcParams if key.startswith(\"keymap\") ]:\n try:\n plt.rcParams[param].remove(letter)\n except:\n continue\n \n \n #Initialize the plot that will be updated every time\n fig = plt.figure(f\"Interactive plot of {cl}\")\n ax = fig.add_subplot(111)\n ax.scatter(x,y,s=0.25,color='dimgray')\n ax.scatter(cx,cy,s=4,color='red')\n ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+reddening for a in isochrone.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*reddening for a in isochrone.starList],color='darkblue')\n ax.set_title(f\"{name}\\n {reddening}\\n {score}\")\n ax.set_xlabel(\"Apparent BP-RP\")\n ax.set_ylabel(\"Apparent G Mag\")\n ax.invert_yaxis()\n \n x0,x1 = ax.get_xlim()\n y0,y1 = ax.get_ylim()\n margin = 0.01\n width = 0.05 * (x1-x0)\n height = 0.6 * (y1-y0)\n xmargin = margin * (x1-x0)\n ymargin = margin * (y1-y0)\n \n \n rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n #rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n \n ax.add_patch(rect1)\n ax.add_patch(rect2)\n #ax.add_patch(rect3)\n \n n = len(ageSorted)\n #Adds cells bottom to top\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == age_index:\n color = 'red'\n else:\n color = 'black'\n ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n n = len(fehSorted)\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == feh_index:\n color = 'red'\n else:\n color = 'black'\n ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n \n #Launch the key_press listener\n hook = onkey(x,y,cx,cy,fig,ax,cluster,isochrone,reddening)\n kid = fig.canvas.mpl_connect('key_press_event',hook)\n\n\ndef printList(cList,varList):\n \n cList = checkLoaded(cList)\n \n for cl in cList:\n cluster = clusters[cl]\n for a in varList:\n clStr = f\"[{cl}] {a} =\"\n exec(f\"print(clStr,cluster.{a})\")\n\ndef statRange(cl,a,b):\n import numpy as np\n global clusters\n \n checkLoaded([cl])\n if not isoIn:\n loadIsochrones()\n \n ages = []\n fehs = []\n ys = []\n reds = []\n \n #Computes the mean age, metallicity, and reddening for the top fitting isochrones over the range a to b for a given cluster\n #For example, a=0, b=10 will average the top 10 isochrone fits\n for isochrone in clusters[cl].iso[a:b]:\n iso = isochrones[isochrone[0]]\n print(f\"{iso.name} Reddening:{isochrone[2]}\")\n ages.append(float(iso.age))\n fehs.append(float(iso.feh))\n ys.append(float(iso.y))\n reds.append(float(isochrone[2]))\n \n \n print(f\"[{cl}] Mean age= {np.mean(ages)} Mean feh= {np.mean(fehs)} Mean y= {np.mean(ys)} Mean Reddening= {np.mean(reds)}\")\n \n\n \ndef setFlag():\n #Imports\n global clusterlist\n \n #Goes back and sets membership flags for all of the clusters loaded in memory to ensure that this tag can be used later\n #This takes place automatically after running turboFilter()\n #Example use case for this variable is in the customPlot() method\n for cluster in clusterList:\n for star in cluster.filtered:\n for unfStar in cluster.unfilteredWide:\n if star == unfStar:\n unfStar.member = 1\n \ndef customPlot(var1,var2,clname,mode='filtered',iso=False,square=True,color='default',title='default',close=False,save=True):\n #Imports\n import matplotlib.pyplot as plt\n global closePlots\n \n #Load the cluster if it isn't yet\n checkLoaded([clname])\n cluster = clusters[f\"{clname}\"]\n \n \n #Set the list of stars to be used for the given cluster\n #Using a mode not specified will return a referenced before assignment error\n if mode == 'filtered':\n starlist = cluster.filtered\n elif mode == 'unfiltered':\n starlist = cluster.unfilteredWide\n elif mode == 'bright_filtered':\n starlist = cluster.filteredBright\n elif mode == 'dist_filtered':\n starlist = cluster.distFiltered\n elif mode == 'bright_unfiltered':\n starlist = cluster.unfilteredBright\n elif mode == 'duo':\n starlist = cluster.unfilteredWide \n starlistF = cluster.filtered\n elif mode == 'binary':\n starlist = cluster.binaries\n elif mode == 'duoBinary':\n starlist = cluster.filtered\n starlistF = cluster.binaries\n elif mode == 'duoBright':\n starlist = cluster.unfilteredBright\n starlistF = cluster.filteredBright\n elif mode == 'duoDist':\n starlist = cluster.distFiltered\n starlistF = cluster.filtered\n elif mode == 'condensed':\n starlist = cluster.condensed\n elif mode == 'duoCondensed':\n starlist = cluster.filtered\n starlistF = cluster.condensed\n elif mode == 'bounded':\n starlist = cluster.bounded\n elif mode == 'duoBounded':\n starlist = cluster.filtered\n starlistF = cluster.bounded\n else:\n print(\"No preset star list configuration found with that alias\")\n return\n \n #Basic plot features with axis labels and a title\n plt.figure()\n if title == 'default':\n plt.title(f\"{clname} {mode} | {var1} vs {var2} | {color} color\")\n else:\n plt.title(f\"{title}\")\n plt.xlabel(f\"{var1}\".upper())\n plt.ylabel(f\"{var2}\".upper())\n \n #Plots differently depending on the mode\n #The color tag can be used to add distinction of a third variable while limited to two axes\n #If unspecified, filtered starlist with midnight blue coloring will be the result\n if iso:\n plt.gca().invert_yaxis()\n if 'duo' in mode:\n #plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=[0.1+a.member*1.4 for a in starlist],c=[list(('lightgray',eval('z.par')))[z.member] for z in starlist])\n plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=2,c='gray')\n if color == 'default': \n plt.scatter([eval(f\"x.{var1}\") for x in starlistF],[eval(f\"y.{var2}\") for y in starlistF],s=2.5,c='red')\n else:\n plt.scatter([eval(f\"x.{var1}\") for x in starlistF],[eval(f\"y.{var2}\") for y in starlistF],s=2.5,c=[eval(f\"z.{color}\") for z in starlistF])\n plt.set_cmap('brg')\n clb = plt.colorbar()\n clb.ax.set_title(f\"{color}\")\n else:\n if color == 'default': \n plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=1,c='midnightblue')\n else:\n plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=2,c=[eval(f\"z.{color}\") for z in starlist])\n plt.set_cmap('cool')\n clb = plt.colorbar()\n clb.ax.set_title(f\"{color}\")\n \n #By default, squares the axes to avoid misinformation from stretched axes\n #Turn this off and iso to true for a color magnitude diagram\n if square:\n plt.axis(\"square\")\n \n if save:\n plt.savefig(f\"SpecificPlots/pdf/{clname}_{mode}_{var1}_{var2}.pdf\")\n plt.savefig(f\"SpecificPlots/png/{clname}_{mode}_{var1}_{var2}.png\",dpi=500)\n \n if close or closePlots:\n plt.close()\n if save:\n print(f\"Custom Plot {clname}_{mode}_{var1}_{var2} saved and closed\")\n else:\n print(f\"Custom Plot {clname}_{mode}_{var1}_{var2} closed\")\n\ndef splitMS(clname='M67',slope=3,offset=12.2):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([clname])\n cluster = clusters[clname]\n \n xlist = [s.b_r for s in cluster.filtered]\n ylist = [s.g_mag for s in cluster.filtered]\n \n x = np.linspace(1,2,100)\n \n #Create a diagram showing the lower edge and upper edge of the main sequence, which in theory are separated by 0.75mag\n plt.figure()\n plt.title('Main and Binary Sequences')\n plt.xlabel('B-R')\n plt.ylabel('Apparent G Mag')\n plt.scatter(xlist,ylist,s=0.5,label='Filtered Star Data')\n plt.plot(x,[slope*a + offset for a in x],color='r',label='Main Sequence')\n plt.plot(x,[slope*a + offset - 0.75 for a in x],'--',color='r',label='MS shifted 0.75 mag')\n plt.xlim(0.6,2.2)\n plt.ylim(13,19)\n plt.legend()\n plt.gca().invert_yaxis()\n plt.savefig(f\"SpecificPlots/png/{clname}_MS_Spread.png\",dpi=500)\n plt.savefig(f\"SpecificPlots/pdf/{clname}_MS_Spread.pdf\")\n\n\ndef kingProfile(r,K,R):\n \n return K*(1+r**2/R**2)**(-1)\n\ndef kingError(r,K,R,dK,dR):\n import numpy as np\n \n dfdK = (1+r**2/R**2)**(-1)\n dfdR = 2*K*r**2*R*(r**2+R**2)**(-2)\n return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)\n\ndef densityProfile(r,K,R):\n import numpy as np\n \n #The exponential that is fit for the membership profile\n #R is a characteristic radius, typically negative but the absolute value is used for comparison\n #K is a scalar constant\n return K*np.exp(-1*r/R)\n\ndef densityError(r,K,R,dK,dR):\n import numpy as np\n \n dfdK = abs(np.exp(-1*r/R))\n dfdR = abs(K*r/(R**2)*np.exp(-1*r/R))\n return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)\n \n\ndef toIntensity(mag):\n msun = -26.74 #apparent magnitude\n Isun = 1360 #w/m^)\n \n return Isun*10**( 0.4*(msun-mag) )\n\n\ndef membership(clname='M67',N=100,mode='filtered',numPercentileBins=5,percentile=0.2,delta=5,normalize=True):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.patches import Circle\n import scipy.optimize as so\n import scipy.stats as st\n import math\n \n global volume\n \n checkLoaded([clname])\n cluster = clusters[clname]\n \n mode = mode.lower()\n \n #Default mode is filtered, but unfiltered data can be processed\n if \"filtered\" in mode:\n starList = cluster.filtered\n elif \"bounded\" in mode:\n starList = cluster.bounded\n else:\n starList = cluster.unfilteredWide\n \n #Load mass estimates from isochrone fitting\n if not cluster.massLoaded:\n proxyMatch([cluster.name])\n assert cluster.massLoaded\n assert len(starList) > 0\n \n #Assign x and y lists based on normalization or not\n if normalize:\n starX = [a.ra*np.cos(a.dec*np.pi/180) for a in starList]\n starY = [a.dec for a in starList]\n mode = mode + \"_normalized\"\n else:\n starX = [a.ra for a in starList]\n starY = [a.dec for a in starList]\n \n #Determine bounds of the field of view (post-filtering)\n xmax = max(starX)\n ymax = max(starY)\n x0 = np.mean(starX)\n y0 = np.mean(starY)\n newN = N\n \n #Determine radius of the field of view\n rx = xmax-x0\n ry = ymax-y0\n #r = np.mean([rx,ry])\n radiusFOV = ry\n #Using the mean ra and dec radius caused problems with clusters\n #like NGC188, which are close to the celestial pole and have\n #a very stretched mapping to the RA DEC space\n \n ringBins = list(np.linspace(0,radiusFOV,N))\n \n #The bins are divided up such that 50% of the bins are located in the inner 25% of the cluster radius\n #The remaining 50% of the bins are divided from 25% to 100% of the radius\n rings = list(np.linspace(0,radiusFOV/4,math.ceil(N/2)))\n ring2 = list(np.linspace(radiusFOV/4,radiusFOV,math.floor(N/2)+1))\n ring2 = ring2[1:-1]\n rings.extend(ring2)\n \n x=rings[:-1]\n # for i in range(0,len(rings[:-1])):\n # x.append((rings[i+1]+rings[i])/2)\n counts = list(np.zeros(N-1,dtype=int))\n masses = list(np.zeros(N-1,dtype=int))\n \n rads=[]\n for star in starList:\n #Radial distance from the mean RA and Dec of the cluster\n if normalize:\n rads.append(np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2))\n else:\n rads.append(np.sqrt((star.ra-x0)**2+(star.dec-y0)**2))\n #Find the nearest ring to the star\n r = find_nearest(rings, rads[-1])\n i = rings.index(r)\n #Check bounds\n if i < len(counts):\n #If outside last ring, add to that count\n if r > rads[-1]:\n counts[i-1] += 1\n masses [i-1] += star.proxyMass\n else:\n counts[i] += 1\n masses [i] += star.proxyMass\n #Worth noting here that the way that this is set up, the rings don't actually mark the bounds of the bins but rather the midpoints.\n #There is no check to see if you are exterior or interior to the nearest ring, but rather what ring you are nearest to,\n #so the rings mark the midpoints of their bins not the boundaries\n \n \n #Histogram of the counts in each radial bin\n plt.figure(f\"{clname}_membership_{mode}\")\n plt.hist(rads,bins=ringBins)\n plt.xlabel(\"Radius (deg)\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} Membership\")\n plt.savefig(f\"{cluster.imgPath}{clname}_membership_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_membership_{mode}.png\",dpi=500)\n\n #Calculates the volume of each region bounded by two concentric rings and the number density of the stars counted in those regions\n volume = []\n for i in range(0,len(rings[:-1])):\n volume.append(np.pi*(rings[i+1]**2-rings[i]**2))\n numDensity = [a/b for a,b in zip(counts,volume)]\n massDensity = [a/b for a,b in zip(masses,volume)]\n error_num = [np.sqrt(a)/b for a,b in zip(counts,volume)]\n error_mass = [np.sqrt(a)/b for a,b in zip(masses,volume)]\n \n for i in range(0,len(error_num)):\n if error_num[i] < 0.1:\n error_num[i] = 0.1\n\n #Cut out the inner 5% because overbinning in the center of a circle doesn't help\n x = x[math.ceil(N/20):-1]\n counts = counts[math.ceil(N/20):-1]\n numDensity = numDensity[math.ceil(N/20):-1]\n massDensity = massDensity[math.ceil(N/20):-1]\n error_num = error_num[math.ceil(N/20):-1]\n error_mass = error_mass[math.ceil(N/20):-1]\n\n #Further filter the data based on outliers, either extremely low density or extremely big jumps in density from bin to bin\n i = 0\n numSmall = 0\n numGrad = 0\n while i < len(x)-1:\n if numDensity[i] < 0.5 or numDensity[i] < numDensity[i+1]/delta or massDensity[i] < 0.1:\n x.pop(i)\n counts.pop(i)\n numDensity.pop(i)\n massDensity.pop(i)\n error_num.pop(i)\n error_mass.pop(i)\n numSmall += 1\n newN -= 1\n elif abs(numDensity[i]) > abs(numDensity[i+1])*delta:# or abs(numDensity[i]) < abs(numDensity[i-1])/3:\n x.pop(i)\n counts.pop(i)\n numDensity.pop(i)\n massDensity.pop(i)\n error_num.pop(i)\n error_mass.pop(i)\n numGrad += 1\n newN -= 1\n else:\n i += 1\n if numDensity[-1] < 0.01 or massDensity[-1] < 0.01:\n x.pop(-1)\n counts.pop(-1)\n numDensity.pop(-1)\n massDensity.pop(-1)\n error_num.pop(-1)\n error_mass.pop(-1)\n numSmall += 1\n newN -= 1\n \n \n print(f\"[{cluster.name}] Removed {numSmall} points with too small of a density and {numGrad} points with too extreme of a delta\")\n\n\n\n #========= Number Density =========\n \n #Number density vs radial bin plot\n plt.figure(f\"{clname}_density_{mode}\")\n plt.errorbar(x,numDensity,yerr=error_num,ls='None')\n plt.scatter(x,numDensity)\n plt.xlabel(\"Radius (deg)\")\n plt.ylabel(r\"Surface Number Density ($deg^{-2}$)\")\n plt.title(f\"{clname} {mode.capitalize()} Number Density\".replace(\"_normalized\",' Normalized'))\n \n #Fit an exponential curve to the density plot based on the densityProfile function defined above\n \n if \"NGC2355\" in cluster.name:\n p0=[5000,0.1]\n else:\n p0=[5000,0.1]\n \n #print([b/a for a,b in zip(numDensity,error_num)])\n \n fit,var = so.curve_fit(kingProfile,x,numDensity,p0,maxfev=1000)\n \n #Std. Dev. from variance\n err = np.sqrt(var[1][1])\n err_coeff = np.sqrt(var[0][0])\n \n scale = np.abs(fit[1]*3600/206265)/(cluster.mean_par/1000)\n #scaleVar = (3600/206265)*(err/(cluster.mean_par/1000) ) + 2*fit[1]/(cluster.mean_par_err/1000)\n scaleVar = np.abs(scale*np.sqrt((var[1][1]/fit[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))\n \n #Scale radius from count in parsecs\n setattr(cluster,f\"scaleRad_{mode}\",scale)\n setattr(cluster,f\"scaleRad_err_{mode}\",scaleVar)\n #Scale radius from count in degrees\n setattr(cluster,f\"scaleAngle_{mode}\",abs(fit[1]))\n setattr(cluster,f\"scaleAngle_err_{mode}\",err)\n setattr(cluster,f\"numDensity_coeff_{mode}\",fit[0])\n setattr(cluster,f\"numDensity_coeff_err_{mode}\",err_coeff)\n\n \n #Plot the curve fit \n numLabel = ( f\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\" \n + fr\"K={fit[0]:.3f} $\\pm$ {err_coeff:.3f}\" + \"\\n\" \n + fr\"$\\rho$={np.abs(fit[1]):.3f}$\\degree$ $\\pm$ {err:.3f}$\\degree$\"+ \"\\n\" \n + fr\"R={scale:.3f}pc $\\pm$ {scaleVar:.3f}pc\" )\n \n plt.plot(x,[kingProfile(a,*fit) for a in x],color='red',label=numLabel)\n plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],label=r'$1\\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_{mode}.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_log_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_log_{mode}.png\",dpi=500)\n \n \n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_density_filtered\")\n \n plt.title(f\"{clname} Overlaid Number Density\")\n plt.errorbar(x,numDensity,yerr=error_num,ls='None',color='midnightblue')\n plt.scatter(x,numDensity,color='midnightblue')\n plt.plot(x,[kingProfile(a,*fit) for a in x],color='darkred',label=numLabel)\n plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.yscale('linear')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_overlay.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_log_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_log_overlay.png\",dpi=500)\n \n #========= Mass Density =========\n \n #Mass density vs radial bin plot\n plt.figure(f\"{clname}_mass_density_{mode}\")\n plt.errorbar(x,massDensity,yerr=error_mass,ls='None')\n plt.scatter(x,massDensity)\n plt.xlabel(\"Radius (deg)\")\n plt.ylabel(r\"Surface Mass Density ($M_{\\odot}*deg^{-2}$)\")\n plt.title(f\"{clname} {mode.capitalize()} Mass Density\".replace(\"_normalized\",' Normalized'))\n \n #Fit an exponential curve to the density plot based on the densityProfile function defined above\n fit_mass,var_mass = so.curve_fit(kingProfile,x,massDensity,p0,maxfev=1000)\n \n #Std. Dev. from variance\n err_mass = np.sqrt(var[1][1])\n err_mass_coeff = np.sqrt(var[0][0])\n \n scale_mass = np.abs(fit_mass[1]*3600/206265)/(cluster.mean_par/1000)\n #scaleVar_mass = (3600/206265)*(err_mass/(cluster.mean_par/1000) ) + 2*fit_mass[1]/(cluster.mean_par_err/1000)\n scaleVar_mass = np.abs(scale_mass*np.sqrt((var_mass[1][1]/fit_mass[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))\n \n #Scale radius from mass in parsecs\n setattr(cluster,f\"scaleRad_mass_{mode}\",scale_mass)\n setattr(cluster,f\"scaleRad_mass_err_{mode}\",scaleVar_mass)\n #Scale radius from mass in degrees\n setattr(cluster,f\"scaleAngle_mass_{mode}\",abs(fit_mass[1]))\n setattr(cluster,f\"scaleAngle_mass_err_{mode}\",err_mass)\n setattr(cluster,f\"massDensity_coeff_{mode}\",fit_mass[0])\n setattr(cluster,f\"massDensity_coeff_err_{mode}\",err_mass_coeff)\n \n #Plot the curve fit\n massLabel = ( f\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\" \n + fr\"K={fit_mass[0]:.3f} $\\pm$ {err_mass_coeff:.3f}\" + \"\\n\" \n + fr\"$\\rho$={np.abs(fit_mass[1]):.3f}$\\degree$ $\\pm$ {err_mass:.3f}$\\degree$\"+ \"\\n\" \n + fr\"R={scale_mass:.3f}pc $\\pm$ {scaleVar_mass:.3f}pc\" )\n \n plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='red',label=massLabel)\n plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],label=r'$1\\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_{mode}.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_log_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_log_{mode}.png\",dpi=500)\n \n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_mass_density_filtered\")\n \n plt.title(f\"{clname} Overlaid Mass Density\")\n plt.errorbar(x,massDensity,yerr=error_mass,ls='None',color='midnightblue')\n plt.scatter(x,massDensity,color='midnightblue')\n plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='darkred',label=massLabel)\n plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.yscale('linear')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_overlay.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_log_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_log_overlay.png\",dpi=500)\n \n \n #========= Average Mass =========\n \n averageMass = [a/b for a,b in zip(massDensity,numDensity)]\n \n xDist = [np.abs(a*3600/206265)/(cluster.mean_par/1000) for a in x]\n \n #Average Mass plot\n plt.figure(f\"{clname}_average_mass_{mode}\")\n plt.scatter(xDist,averageMass,label=fr\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\"+f\"{numPercentileBins} Percentile Bins\")\n plt.xlabel(\"Distance from Center (pc)\")\n plt.ylabel(r\"Average Stellar Mass ($M_{\\odot}$)\")\n plt.title(f\"{clname} {mode.capitalize()} Average Mass\".replace(\"_normalized\",' Normalized'))\n \n \n #Split average mass data into numPercentileBins number of bins\n if \"filtered\" in mode:\n cluster.pMin = xDist[0]\n cluster.pMax = xDist[-1]\n \n pBins = np.linspace(cluster.pMin,cluster.pMax,numPercentileBins+1)\n xBins = []\n for i in range(len(pBins)-1):\n xBins.append((pBins[i]+pBins[i+1])/2)\n pBins = np.delete(pBins,0)\n pBins = np.delete(pBins,-1)\n for b in pBins:\n plt.axvline(x=b,color='black',linestyle='--')\n \n binned = []\n for n in range(numPercentileBins):\n binned.append([])\n \n #Assign the average mass data points to the bins\n for i in range(len(xDist)):\n #Finds the nearest xBin to each x value and sorts the corresponding averageMass into that bin\n val = find_nearest(xBins,xDist[i])\n idx = xBins.index(val)\n binned[idx].append(averageMass[i])\n \n #Creates arrays that are numPercentileBins long that store the standard and quantile means of the points in those bins\n quantileMean = []\n binMean = []\n meanBins = []\n for b in binned:\n if len(b) == 0:\n continue\n binSorted = sorted(b)\n #Finds the index of the lower percentile marker (ex. 20%)\n lower = binSorted.index(find_nearest(binSorted, np.quantile(b,percentile)))\n #Finds the index of the upper percentile marker (ex. 80%)\n upper = binSorted.index(find_nearest(binSorted, np.quantile(b,1-percentile)))\n #Means between lower and upper percentile markers\n quantileMean.append(np.mean(binSorted[lower:upper+1]))\n #Standard Mean\n binMean.append(np.mean(b))\n #Bins\n meanBins.append(xBins[binned.index(b)])\n \n try:\n fit, var = so.curve_fit(kingProfile,xDist,[kingProfile(a,*fit_mass)/kingProfile(a,*fit) for a in x])\n residual_coeff, residual_scaleAngle = fit[0],fit[1]\n except:\n print(f\"Unable to fit the residuals for {cluster.name}\")\n residual_coeff, residual_scaleAngle = -99, -99\n \n massFit = st.linregress(meanBins,quantileMean)\n fitslope, intercept, rval, pval, fitslope_err, intercept_err = massFit.slope, massFit.intercept, massFit.rvalue, massFit.pvalue, massFit.stderr, massFit.intercept_stderr\n residual_scaleRad = np.abs(residual_scaleAngle*3600/206265)/(cluster.mean_par/1000)\n \n setattr(cluster,f\"residual_coeff_{mode}\",residual_coeff)\n setattr(cluster,f\"residual_scaleAngle_{mode}\",residual_scaleAngle)\n setattr(cluster,f\"residual_scaleRad_{mode}\",residual_scaleRad)\n \n setattr(cluster,f\"mass_slope_{mode}\",fitslope)\n setattr(cluster,f\"mass_slope_err_{mode}\",fitslope_err)\n setattr(cluster,f\"mass_intercept_{mode}\",intercept)\n setattr(cluster,f\"mass_intercept_err_{mode}\",intercept_err)\n setattr(cluster,f\"mass_fit_r2_{mode}\",rval**2)\n setattr(cluster,f\"mass_fit_p_{mode}\",pval)\n \n fitLabel = ( fr\"Slope = {fitslope:.3f} $\\pm$ {fitslope_err:.3f}\" + \"\\n\" \n + fr\"Intercept = {intercept:.3f} $\\pm$ {intercept_err:.3f}\" + \"\\n\" \n + fr\"$r^2$ = {rval**2:.3f} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized'))\n \n #Plot the quantile and standard means on the existing average mass plot\n plt.scatter(meanBins,quantileMean,color='red',label=f'Interquartile Mean ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='red',label=fitLabel)\n #plt.scatter(meanBins,binMean,color='dimgray',label=f'{mode.capitalize()} Standard Mean')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_averageMass_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_averageMass_{mode}.png\",dpi=500)\n \n \n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_average_mass_filtered\")\n \n plt.title(f\"{clname} Overlaid Average Mass\")\n plt.scatter(xDist,averageMass,color='midnightblue',label=fr\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\"+f\"{numPercentileBins} Percentile Bins\")\n plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='darkred',label=fitLabel)\n plt.scatter(meanBins,quantileMean,color='darkred',label=f'Interquartile Mean ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n #plt.scatter(meanBins,binMean,color='black',label=f'{mode.capitalize()} Standard Mean')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_averageMass_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_averageMass_overlay.png\",dpi=500)\n \n #========= Radius Plot =========\n plt.figure(f\"{clname}_characteristic_radius_{mode}\")\n if normalize:\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA*cos(Dec) (Deg)\")\n else:\n plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA (Deg)\")\n pltRad = abs(getattr(cluster,f\"scaleAngle_{mode}\"))\n outline1 = Circle([x0,y0],1*pltRad,color='red',fill=False,ls='--',label=fr\"$\\rho$={1*pltRad:0.3f}$\\degree$\",alpha=0.7)\n outline2 = Circle([x0,y0],5*pltRad,color='red',fill=False,ls='--',label=fr\"5$\\rho$={5*pltRad:0.3f}$\\degree$\",alpha=0.7)\n #outline3 = Circle([x0,y0],10*abs(getattr(cluster,f\"scaleAngle_{mode}\")),color='red',fill=False,ls='--',label=fr\"10$\\rho$={3*abs(fit[1]):0.3f}$\\degree$\",alpha=0.7)\n plt.gca().add_patch(outline1)\n plt.gca().add_patch(outline2)\n #plt.gca().add_patch(outline3)\n plt.legend(fontsize=10,loc='upper right')\n plt.axis('square')\n \n plt.ylabel(\"DEC (Deg)\")\n plt.title(f\"{clname} {mode.capitalize()} Characteristic Radius\".replace(\"_normalized\",' Normalized'))\n plt.gcf().set_size_inches(8,8)\n plt.savefig(f\"{cluster.imgPath}{clname}_radialMembership_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_radialMembership_{mode}.png\",dpi=500)\n \n if \"M67\" in clname and \"filtered\" in mode:\n plt.figure(f\"{clname}_rings_{mode}\")\n if normalize:\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA*cos(Dec) (Deg)\")\n \n else:\n plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA (Deg)\")\n \n \n for i in range(0,len(rings)):\n outline = Circle([x0,y0],rings[i],color='red',fill=False)\n plt.gca().add_patch(outline)\n \n plt.legend(fontsize=10,loc='upper right')\n plt.axis('square')\n \n plt.ylabel(\"DEC (Deg)\")\n plt.title(f\"{clname} Radial Bins\")\n plt.gcf().set_size_inches(8,8)\n plt.savefig(f\"SpecificPlots/pdf/{clname}_radialBins_{mode}.pdf\".replace(\"_filtered\",''))\n plt.savefig(f\"SpecificPlots/png/{clname}_radialBins_{mode}.png\".replace(\"_filtered\",''),dpi=500)\n plt.xlim(x0-0.15,x0+0.15)\n plt.ylim(y0-0.15,y0+0.15)\n plt.savefig(f\"SpecificPlots/pdf/{clname}_radialBins_center_{mode}.pdf\".replace(\"_filtered\",''))\n plt.savefig(f\"SpecificPlots/png/{clname}_radialBins_center_{mode}.png\".replace(\"_filtered\",''),dpi=500)\n \n \n #========= Stars by Mass =========\n massList = []\n innerMassList = []\n for star in starList:\n massList.append(star.proxyMass)\n if normalize:\n if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMassList.append(star.proxyMass)\n else:\n if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMassList.append(star.proxyMass)\n \n mBins = np.arange(min(massList),max(massList)+0.1,0.1)\n inBins = np.arange(min(innerMassList),max(innerMassList)+0.1,0.1)\n plt.figure(f\"{clname}_mass_frequency_{mode}\")\n plt.xlabel(r\"Stellar Mass ($M_{\\odot}$)\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} {mode.capitalize()} Mass Frequency\".replace(\"_normalized\",' Normalized'))\n plt.hist(massList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'))\n plt.hist(innerMassList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_massFrequency_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massFrequency_{mode}.png\",dpi=500)\n\n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_mass_frequency_filtered\")\n plt.title(f\"{clname} Overlaid Mass Frequency\")\n plt.hist(massList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'),color='red')\n plt.hist(innerMassList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_massFrequency_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massFrequency_overlay.png\",dpi=500)\n \n \n #========= Stars by Magnitude =========\n magList = []\n innerMagList = []\n for star in starList:\n magList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)\n if normalize:\n if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)\n else:\n if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)\n \n mBins = np.arange(min(magList),max(magList)+0.1,0.1)\n inBins = np.arange(min(innerMagList),max(innerMagList)+0.1,0.1)\n plt.figure(f\"{clname}_mag_frequency_{mode}\")\n plt.xlabel(r\"Absolute G Mag\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} {mode.capitalize()} Absolute Magnitude Frequency\".replace(\"_normalized\",' Normalized'))\n plt.hist(magList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'))\n plt.hist(innerMagList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_magFrequency_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_magFrequency_{mode}.png\",dpi=500)\n\n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_mag_frequency_filtered\")\n plt.title(f\"{clname} Overlaid Absolute Magnitude Frequency\")\n plt.hist(magList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'),color='red')\n plt.hist(innerMagList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_magFrequency_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_magFrequency_overlay.png\",dpi=500)\n \n #========= Stars by Color =========\n colorList = []\n innerColorList = []\n for star in starList:\n colorList.append(star.b_r-cluster.reddening)\n if normalize:\n if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerColorList.append(star.b_r-cluster.reddening)\n else:\n if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerColorList.append(star.b_r-cluster.reddening)\n \n mBins = np.arange(min(colorList),max(colorList)+0.1,0.1)\n inBins = np.arange(min(innerColorList),max(innerColorList)+0.1,0.1)\n plt.figure(f\"{clname}_color_frequency_{mode}\")\n plt.xlabel(r\"Dereddened BP-RP\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} {mode.capitalize()} Dereddened Color Index Frequency\".replace(\"_normalized\",' Normalized'))\n plt.hist(colorList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'))\n plt.hist(innerColorList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_colorFrequency_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_colorFrequency_{mode}.png\",dpi=500)\n\n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_color_frequency_filtered\")\n plt.title(f\"{clname} Overlaid Dereddened Color Index Frequency\")\n plt.hist(colorList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'),color='red')\n plt.hist(innerColorList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_colorFrequency_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_colorFrequency_overlay.png\",dpi=500)\n \n \n \n #========= Other Radii ========= \n massSum = np.sum([star.proxyMass for star in starList])\n intensitySum = np.sum([toIntensity(star.g_mag) for star in starList])\n \n curMassSum = 0\n curIntSum = 0\n massFound = False\n intFound = False\n \n if normalize:\n setattr(cluster,f\"medianRad_{mode}\",np.median([np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))\n setattr(cluster,f\"medianAngle_{mode}\",np.median([star.normRadDist for star in starList]))\n radialStarList = sorted(starList,key=lambda x: x.normRadDist)\n \n for star in radialStarList:\n curMassSum += star.proxyMass\n curIntSum += toIntensity(star.g_mag)\n \n if curMassSum > massSum/2 and not massFound:\n setattr(cluster,f\"halfMassRad_{mode}\",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfMassAngle_{mode}\",star.normRadDist)\n massFound = True\n if curIntSum > intensitySum/2 and not intFound:\n setattr(cluster,f\"halfLightRad_{mode}\",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfLightAngle_{mode}\",star.normRadDist)\n intFound = True\n if massFound and intFound:\n break\n \n plt.figure(f\"{clname}_other_radii_{mode}\")\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA*cos(Dec) (deg)\")\n else:\n setattr(cluster,f\"medianRad_{mode}\",np.median([np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))\n setattr(cluster,f\"medianAngle_{mode}\",np.median([star.radDist for star in starList]))\n radialStarList = sorted(starList,key=lambda x: x.radDist)\n \n for star in radialStarList:\n curMassSum += star.proxyMass\n curIntSum += toIntensity(star.g_mag)\n \n if curMassSum > massSum/2 and not massFound:\n setattr(cluster,f\"halfMassRad_{mode}\",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfMassAngle_{mode}\",star.radDist)\n massFound = True\n if curIntSum > intensitySum/2 and not intFound:\n setattr(cluster,f\"halfLightRad_{mode}\",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfLightAngle_{mode}\",star.radDist)\n intFound = True\n if massFound and intFound:\n break\n \n plt.figure(f\"{clname}_other_radii_{mode}\")\n plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA (deg)\")\n \n medRad = getattr(cluster,f\"medianRad_{mode}\")\n medAngle = getattr(cluster,f\"medianAngle_{mode}\")\n mRad = getattr(cluster,f\"halfMassRad_{mode}\")\n mAngle = getattr(cluster,f\"halfMassAngle_{mode}\")\n lRad = getattr(cluster,f\"halfLightRad_{mode}\")\n lAngle = getattr(cluster,f\"halfLightAngle_{mode}\")\n print(medAngle)\n outline1 = Circle([x0,y0],medAngle,color='red',fill=False,ls='--',label=fr\"Median Star Distance = {medAngle:.3f}$\\degree$, {medRad:.3f}pc\",alpha=1)\n outline2 = Circle([x0,y0],mAngle,color='darkgreen',fill=False,ls='--',label=fr\"Half Mass Radius = {mAngle:.3f}$\\degree$, {mRad:.3f}pc\",alpha=1)\n outline3 = Circle([x0,y0],lAngle,color='purple',fill=False,ls='--',label=fr\"Half Light Radius = {lAngle:.3f}$\\degree$, {lRad:.3f}pc\",alpha=1)\n plt.gca().add_patch(outline1)\n plt.gca().add_patch(outline2)\n plt.gca().add_patch(outline3)\n plt.legend(fontsize=10,loc='upper right')\n plt.axis('square')\n plt.ylabel(\"DEC (Deg)\")\n plt.title(f\"{clname} {mode.capitalize()} Various Radii\".replace(\"_normalized\",' Normalized'))\n plt.gcf().set_size_inches(8,8)\n plt.savefig(f\"{cluster.imgPath}{clname}_otherRadii_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_otherRadii_{mode}.png\",dpi=500)\n \n \n\ndef checkLoaded(cList):\n if 'all' in cList:\n cList = [c.name for c in clusterList]\n else:\n for cl in cList:\n if not cl in clusters:\n loadClusters([cl])\n \n return cList\n\ndef saveResults(cList,outdir=\"results\"):\n #Imports\n import numpy as np\n import dill\n import os\n global clusters\n global clusterList\n \n checkLoaded(cList)\n \n #Check and create the relevant directory paths to save/load the results\n if not os.path.isdir(f\"{outdir}/\"):\n os.mkdir(f\"{outdir}/\")\n if not os.path.isdir(f\"{outdir}/pickled/\"):\n os.mkdir(f\"{outdir}/pickled/\")\n \n else:\n for cl in cList:\n cluster = clusters[cl]\n #Creates a \"result cluster\" object from the cluster, effectively just stripping away lists\n rCl = resultClusterObj(cluster)\n #Pickle the result cluster object\n with open(f\"{outdir}/pickled/{cluster.name}.pk1\", 'wb') as output:\n dill.dump(rCl, output)\n \n #Store variables into an array to be printed as csv\n properties = [a for a in dir(rCl) if not a.startswith('_')]\n res = [getattr(rCl,p) for p in properties]\n #Stack into an array of 2 rows with variable names and values\n fin = np.vstack((properties,res))\n np.savetxt(f\"{outdir}/{cluster.name}.csv\",fin,delimiter=',',fmt='%s')\n\ndef loadResults(filter=\"None\",indir=\"results\"):\n #Imports\n import numpy as np\n import dill\n import os\n global resultList\n global resultsIn\n \n assert os.path.isdir(\"results/\")\n resultList = []\n for fn in os.listdir(indir+\"/pickled/\"):\n #Reads in instances from the saved pickle file\n with open(f\"{indir}/pickled/{fn}\",'rb') as input:\n res = dill.load(input)\n resultList.append(res)\n resultsIn = True\n toDict()\n\ndef refreshProperties(cList=['all']):\n import numpy as np\n global catalogue\n global clusterList\n global clusters\n \n clusterCatalogue()\n checkLoaded(cList)\n \n #Loop through clusters\n for cluster in cList:\n \n reference = None\n \n for cl in catalogue:\n if str(cl.name) == str(cluster.name):\n reference = cl\n print(f\"Catalogue match for {cluster.name} found\")\n break\n if reference == None:\n print(f\"Catalogue match for {cluster.name} was not found, please create one\")\n continue\n\n #Filter all of the methods out of the properties list\n properties = [a for a in dir(reference) if not a.startswith('_')]\n #print(properties)\n #exec(f\"print(reference.{properties[1]})\")\n #print(properties)\n \n #Now we have a list of all the attributes assigned to the catalogue (the self.variables)\n for p in properties:\n prop = getattr(reference,p)\n #print(prop)\n exec(f\"cluster.{p} = prop\")\n try:\n if prop <= -98:\n print(f\"{cluster.name} does not have a specified catalogue value for {p}\")\n except:\n continue\n \n #Additional properties that may be useful\n for star in cluster.filtered:\n star.normRA = star.pmra*np.cos(star.dec*np.pi/180)\n \n print(f\"{cluster.name} properties refreshed from catalogue\")\n\n\n \n\ndef statPlot(statX,statY,population=\"open\",color=\"default\",square=True,invertY=False,logX=False,logY=False,pointLabels=True,linFit=False,directory='default'):\n #Create plots of stat X vs stat Y across a population of clusters, similar to customPlot()\n #Can be set to use a custom list of clusters, or all clusters of a given type\n #\n import matplotlib\n import matplotlib.pyplot as plt\n import numpy as np\n from scipy.stats import linregress\n global clusters\n global clusterList\n global catalogue\n global resultsIn\n global resultList\n \n \n if not resultsIn:\n loadResults()\n \n #Filter out incorrect inputs\n if type(population) == str:\n population = population.lower()\n try:\n assert population == \"open\" or population == \"globular\"\n except:\n print(\"Specified population type not recognized\")\n else:\n try:\n assert type(population) == list\n assert type(population[0]) == str\n except:\n print(\"Population type given is not valid, must be either a list of cluster name strings or a single string \\'open\\' or \\'closed\\'\")\n return\n try:\n assert len(population) > 1\n except:\n print(\"Population statistic plots cannot be made with fewer than 2 clusters given\")\n return\n \n \n #Load cluster information from cList\n #This is going to involve using the resultCluster object to read data from each cluster folder in the cList\n cList = []\n banList = ['NGC2204']\n if type(population) == str:\n for res in resultList:\n if res.clType.lower() == population and not res.name in banList:\n cList.append(res)\n else:\n for res in resultList:\n if res.name in population:\n cList.append(res)\n \n if statX.lower() == \"b_r\" and statY.lower() == \"g_mag\":\n #Corrected CMD overlay\n \n NUM_COLORS = len(cList)\n cm = plt.get_cmap('nipy_spectral')\n \n \n plt.figure(\"uncorrected\")\n plt.title(\"Cluster Overlay\")\n plt.xlabel(\"Observed B-R\")\n plt.ylabel(\"Apparent G Mag\")\n plt.gca().invert_yaxis()\n plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])\n \n plt.figure(\"unshifted\")\n plt.title(\"Corrected Cluster Overlay\")\n plt.xlabel(\"Dereddened B-R\")\n plt.ylabel(\"Absolute G Mag\")\n plt.gca().invert_yaxis()\n plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])\n \n plt.figure(\"shifted\")\n plt.title(\"Corrected Cluster Overlay - Offset\")\n plt.xlabel(\"Dereddened B-R\")\n plt.ylabel(\"Absolute G Mag\")\n plt.gca().invert_yaxis()\n plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])\n \n index = 0\n offset = 2.5\n for cluster in cList:\n try:\n path = cluster.dataPath\n except:\n path = f\"clusters/{cluster.name}/data/\"\n \n condensed = np.genfromtxt(f\"{path}condensed.csv\",delimiter=\",\")\n cluster.condensed = condensed\n \n #Adjust by cluster.reddening and cluster.dist_mod\n x1 = [a[0] for a in condensed]\n y1 = [a[1] for a in condensed]\n x2 = [a[0]-cluster.reddening for a in condensed]\n y2 = [a[1]-2.1*cluster.reddening-cluster.dist_mod for a in condensed]\n x3 = [a[0]-cluster.reddening for a in condensed]\n y3 = [a[1]-2.1*cluster.reddening-cluster.dist_mod+index*offset for a in condensed]\n \n index += 1\n \n plt.figure(\"uncorrected\")\n plt.scatter(x1,y1,label=f\"{cluster.name}\")\n \n plt.figure(\"unshifted\")\n plt.axvline(x=1.6,ymax=0.5,color='black',linestyle='--')\n plt.axhline(y=4,xmin=0.59,color='black',linestyle='--')\n plt.scatter(x2,y2,label=f\"{cluster.name}\")\n \n plt.figure(\"shifted\")\n plt.scatter(x3,y3,label=f\"{cluster.name}\")\n plt.axvline(x=1.6,color='black',linestyle='--')\n \n # if 'NGC2301' in cluster.name:\n # for a,b in zip(x2,y2):\n # print(f\"{a},{b}\")\n \n \n plt.figure(\"uncorrected\")\n plt.legend(fontsize=10,loc='upper right')\n plt.gcf().set_size_inches(8,6)\n plt.savefig(f\"results/plots/pdf/{population}_clusters_stacked_cmd_apparent.pdf\")\n plt.savefig(f\"results/plots/png/{population}_clusters_stacked_cmd_apparent.png\",dpi=500)\n \n plt.figure(\"unshifted\")\n plt.legend(fontsize=10,loc='upper right')\n plt.gcf().set_size_inches(8,6)\n plt.savefig(f\"results/plots/pdf/{population}_clusters_stacked_cmd_absolute.pdf\")\n plt.savefig(f\"results/plots/png/{population}_clusters_stacked_cmd_absolute.png\",dpi=500)\n \n plt.figure(\"shifted\")\n plt.legend(fontsize=10,loc='upper right')\n plt.gcf().set_size_inches(8,6)\n plt.savefig(f\"results/plots/pdf/{population}_clusters_stacked_cmd_shifted.pdf\")\n plt.savefig(f\"results/plots/png/{population}_clusters_stacked_cmd_shifted.png\",dpi=500)\n \n \n \n else:\n x = [getattr(a, statX) for a in cList]\n y = [getattr(a, statY) for a in cList]\n \n plt.figure()\n plt.xlabel(f\"{statX}\")\n plt.ylabel(f\"{statY}\")\n if pointLabels:\n for cluster in cList:\n plt.scatter(getattr(cluster, statX),getattr(cluster, statY),label=cluster.name)\n plt.legend(fontsize=\"small\")\n else:\n plt.scatter(x,y)\n \n if linFit:\n reg = linregress(x,y)\n plt.plot(x,[reg[0]*a+reg[1] for a in x])\n \n plt.savefig(f\"SpecificPlots/pdf/{population}_{statX}_{statY}.pdf\")\n plt.savefig(f\"SpecificPlots/png/{population}_{statX}_{statY}.png\",dpi=500)\n \n return\n\ndef ageMassFit(t,m0,k):\n import numpy as np\n \n return 1 + m0*np.exp(-1*k*t)\n\ndef extinctionLaw(d,M0):\n import numpy as np\n \n return M0 -2.5*np.log10(1/(4*np.pi*d**2))\n\ndef resultPlots():\n #Imports\n import matplotlib.pyplot as plt\n import numpy as np\n from scipy.stats import linregress\n from scipy.optimize import curve_fit\n global clusters\n global clusterList\n global catalogue\n global resultsIn\n global resultList\n \n \n if not resultsIn:\n loadResults()\n \n #Select open clusters from resultList\n banList = ['NGC2204']\n cList = []\n for res in resultList:\n if res.clType.lower() == \"open\" and not res.name in banList:\n cList.append(res)\n \n \n #Filtered mass versus age\n fname = \"mass_vs_age_filtered\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"Mean Cluster Member Mass ($M_{\\odot}$)\")\n plt.scatter([c.fit_age for c in cList],[c.meanProxyMass for c in cList])\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Bounded mass versus age\n fname = \"mass_vs_age_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"Mean Cluster Member Mass ($M_{\\odot}$)\")\n \n x,y = [c.fit_age for c in cList],[c.meanBoundedProxyMass for c in cList]\n plt.scatter(x,y)\n fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)\n xr = list(np.linspace(min(x),max(x),101))\n \n fitLabel = fr\"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$\" + \"\\n\" + fr\"Uncertainties = $\\pm{var[0][0]:.3f}, \\pm{var[1][1]:.3f}$\"\n \n plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)\n plt.legend()\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Mass intercept versus age\n fname = \"mass_intercept_vs_age_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"Mean Stellar Mass in Core ($M_{\\odot}$)\")\n \n x,y = [c.fit_age for c in cList],[c.mass_intercept_bounded for c in cList]\n plt.scatter(x,y)\n fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)\n xr = list(np.linspace(min(x),max(x),101))\n \n fitLabel = fr\"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$\" + \"\\n\" + fr\"Uncertainties = $\\pm{var[0][0]:.3f}, \\pm{var[1][1]:.3f}$\"\n \n plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)\n plt.legend()\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Mass slope versus age\n fname = \"mass_slop_vs_age_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"IQM Stellar Mass Dropoff ($\\frac{M_{\\odot}}{pc}$)\")\n \n x,y = [c.fit_age for c in cList],[c.mass_slope_bounded for c in cList]\n plt.scatter(x,y)\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Magnitude versus distance (Extinction law)\n fname = \"mag_vs_dist_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Cluster Distance from Earth (pc)\")\n plt.ylabel(r\"Mean Apparent G Magnitude\")\n \n x,y = [c.meanDist for c in cList],[c.mean_bounded_g_mag for c in cList]\n plt.scatter(x,y)\n fit,var = curve_fit(extinctionLaw,x,y,maxfev=1000)\n xr = list(np.linspace(min(x),max(x),101))\n plt.plot(xr,[extinctionLaw(a,fit[0]) for a in xr],label=\"Inverse Square Law \\n\" + fr\" $M_0 = {fit[0]:.3f} \\pm {var[0][0]:.3f}$\")\n plt.gca().invert_yaxis()\n plt.legend()\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n #Bounded fraction versus distance\n fname = \"bounded_fraction_vs_dist\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Cluster Distance from Earth (pc)\")\n plt.ylabel(\"Fraction Unaffected by BP-RP Limit\")\n \n x,y = [c.meanDist for c in cList],[c.fractionBounded for c in cList]\n plt.scatter(x,y)\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Radii\n plt.figure()\n plt.scatter([c.meanGalacticDist for c in cList],[c.halfLightRad_bounded/c.medianRad_bounded for c in cList])\n\n\n \ndef boundedStats(cList,xmax=1.6,saveCl=True,unloadCl=True):\n import numpy as np\n global clusters\n global subList\n for cl in cList:\n checkLoaded([cl])\n cluster = clusters[cl]\n \n subList = [star for star in cluster.filtered if not (star.b_r-cluster.reddening > xmax and star.g_mag > cluster.cltpy)]\n \n cluster.bounded = subList\n \n #Windowed properties (over the xmin to xmax range)\n cluster.meanBoundedProxyMass = np.mean([a.proxyMass for a in subList])\n cluster.totalBoundedProxyMass = np.sum([a.proxyMass for a in subList])\n cluster.numBounded = len(subList)\n cluster.fractionBounded = len(subList)/len(cluster.filtered)\n cluster.mean_bounded_b_r = np.mean([a.b_r for a in subList])\n cluster.mean_bounded_g_mag = np.mean([a.g_mag for a in subList])\n \n if saveCl:\n saveClusters([cl])\n saveResults([cl])\n if unloadCl:\n unloadClusters([cl])\n \n \n \n\n\ndef tryFits(fitVar='fit_age'):\n from scipy.stats import linregress\n \n global resultsIn\n global resultList\n global props\n global r2\n \n if not resultsIn:\n loadResults()\n \n cList = []\n for res in resultList:\n if res.clType.lower() == \"open\":\n cList.append(res)\n \n if 'all' in fitVar:\n #List of plottable variables\n props = dir(cList[0])\n props = [a for a in props if not '__' in a]\n propList = [a for a in props if type(getattr(cList[0],a)) == float]\n propList.remove('turnPoint')\n \n \n r2 = []\n \n for pr in propList:\n #List of plottable variables\n props = dir(cList[0])\n props = [a for a in props if not '__' in a]\n props = [a for a in props if type(getattr(cList[0],a)) == float]\n props.remove('turnPoint')\n props.remove(pr)\n \n for prop in props:\n \n x = [getattr(a, pr) for a in cList]\n y = [getattr(a, prop) for a in cList]\n \n reg = linregress(x,y)\n r2.append((pr,prop,reg[2]**2))\n \n r2 = sorted(r2,key = lambda x: x[2],reverse=True)\n \n print(\"Top 100 r^2 values:\")\n for r in r2[:200]:\n print(f\"{r[0]} | {r[1]} | {r[2]}\")\n \n \n else:\n #List of plottable variables\n props = dir(cList[0])\n props = [a for a in props if not '__' in a]\n props = [a for a in props if type(getattr(cList[0],a)) == float]\n props.remove('turnPoint')\n props.remove(fitVar)\n \n r2 = []\n for prop in props:\n \n x = [getattr(a, fitVar) for a in cList]\n y = [getattr(a, prop) for a in cList]\n \n reg = linregress(x,y)\n r2.append((prop,reg[2]**2))\n \n r2 = sorted(r2,key = lambda x: x[1],reverse=True)\n \n print(\"Top 20 r^2 values:\")\n for r in r2[:20]:\n print(f\"{r[0]} | {r[1]}\")\n \n \n\ndef prelimPlot(cl):\n import matplotlib.pyplot as plt\n \n cluster = clusters[cl]\n plt.scatter([a.ra for a in cluster.unfilteredWide],[a.dec for a in cluster.unfilteredWide],s=0.1)\n plt.figure()\n plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1)\n # plt.figure()\n # plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1,c=[a.par for a in cluster.unfilteredWide])\n # plt.set_cmap('cool')\n # clb = plt.colorbar()\n plt.figure()\n plt.scatter([a.b_r for a in cluster.unfilteredWide],[a.g_mag for a in cluster.unfilteredWide],s=0.1)\n plt.gca().invert_yaxis()\n # plt.figure()\n # plt.scatter([a.par for a in cluster.unfilteredWide],[a.par for a in cluster.unfilteredWide],s=0.1,c=[(a.pmra**2 + a.pmdec**2)**0.5 for a in cluster.unfilteredWide])\n # plt.set_cmap('cool')\n \n \n"
] | [
[
"numpy.sum",
"scipy.optimize.curve_fit",
"numpy.quantile",
"numpy.savetxt",
"matplotlib.pyplot.yscale",
"numpy.asarray",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.ylabel",
"matplotlib.path.Path",
"numpy.amax",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"numpy.vstack",
"matplotlib.colors.to_rgba",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"numpy.abs",
"matplotlib.pyplot.set_cmap",
"numpy.cos",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"matplotlib.pyplot.gcf",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.get_cmap",
"numpy.delete",
"numpy.log10",
"matplotlib.pyplot.hist",
"numpy.linspace",
"matplotlib.collections.RegularPolyCollection",
"matplotlib.pyplot.scatter",
"numpy.mean",
"numpy.sqrt",
"matplotlib.pyplot.axvline",
"numpy.zeros",
"pandas.read_csv",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axis",
"numpy.median",
"numpy.arange",
"scipy.stats.linregress",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.std",
"matplotlib.pyplot.legend",
"numpy.empty",
"numpy.arctan",
"numpy.exp",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"matplotlib.patches.Circle",
"matplotlib.widgets.Lasso",
"numpy.less_equal",
"numpy.array",
"numpy.sin",
"numpy.genfromtxt",
"matplotlib.pyplot.xlabel"
]
] |
aripekka/tbcalc | [
"a0337db245f5391bfa9a42123994832c299b1fbe"
] | [
"tests/test_tensor_transform.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nTests for the tensor transform functions. Run with pytest.\n\nCreated on Sat May 9 00:09:00 2020\n\n@author: aripekka\n\"\"\"\n\nimport sys\nimport os.path\nimport numpy as np\n\nsys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))\n\nfrom tbcalc.transverse_deformation import * \nfrom tbcalc import cartesian_tensors_to_cylindrical\n\nfrom pyTTE import TTcrystal, Quantity\n\ndef test_isotropic_circular():\n\n #Calculate the reference stresses and strains as implemented in the \n #deprecated sbcalc package\n\n E = 165\n nu = 0.22\n\n thickness = 0.1\n\n Rx = 1000.0\n Ry = 500.0\n\n R = np.sqrt(Rx*Ry)\n \n L = 100.0 \n \n x=np.linspace(-L/2,L/2,150)\n X,Y=np.meshgrid(x,x)\n\n RR = np.sqrt(X**2 + Y**2)\n PHI = np.arctan2(Y,X)\n\n stress, strain, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)\n\n stress_cyl = cartesian_tensors_to_cylindrical(stress)\n strain_cyl = cartesian_tensors_to_cylindrical(strain)\n\n\n stress_cyl_ref = {}\n stress_cyl_ref['rr'] = E/(16*R**2)*(L**2/4-RR**2)+stress['xx'](X,Y)*0\n stress_cyl_ref['phiphi'] = E/(16*R**2)*(L**2/4-3*RR**2)+stress['xx'](X,Y)*0\n stress_cyl_ref['rphi'] = stress['xx'](X,Y)*0\n stress_cyl_ref['phir'] = stress['xx'](X,Y)*0\n\n strain_cyl_ref = {}\n strain_cyl_ref['rr'] = 1/(16*R**2)*((1-nu)*L**2/4-(1-3*nu)*RR**2)+stress['xx'](X,Y)*0\n strain_cyl_ref['phiphi'] = 1/(16*R**2)*((1-nu)*L**2/4-(3-nu)*RR**2)+stress['xx'](X,Y)*0\n strain_cyl_ref['rphi'] = stress['xx'](X,Y)*0\n strain_cyl_ref['phir'] = stress['xx'](X,Y)*0\n strain_cyl_ref['zphi'] = stress['xx'](X,Y)*0\n strain_cyl_ref['phiz'] = stress['xx'](X,Y)*0\n strain_cyl_ref['rz'] = stress['xx'](X,Y)*0\n strain_cyl_ref['zr'] = stress['xx'](X,Y)*0\n strain_cyl_ref['zz'] = nu/(4*R**2)*(RR**2-L**2/8)+stress['xx'](X,Y)*0\n\n meps = np.finfo(np.float).eps #m\n \n for i in ['r','phi']:\n for j in ['r','phi']:\n assert np.all(np.logical_or(np.abs(stress_cyl_ref[i+j] - stress_cyl[i+j](RR,PHI)) < meps,\n np.logical_and(np.isnan(stress_cyl_ref[i+j]), np.isnan(stress_cyl[i+j](RR,PHI)))))\n\n for i in ['r','phi','z']:\n for j in ['r','phi','z']:\n assert np.all(np.logical_or(np.abs(strain_cyl_ref[i+j] - strain_cyl[i+j](RR,PHI)) < meps,\n np.logical_and(np.isnan(strain_cyl_ref[i+j]), np.isnan(strain_cyl[i+j](RR,PHI)))))"
] | [
[
"numpy.arctan2",
"numpy.isnan",
"numpy.sqrt",
"numpy.finfo",
"numpy.meshgrid",
"numpy.linspace"
]
] |
liyunze-coding/Trigger-Me-Elmo-2 | [
"6950ffa4bfd264e213626f1ab3cff249fbab36da"
] | [
"app.py"
] | [
"from flask import Flask, render_template, request, jsonify\nimport base64\nimport logging\nimport numpy as np\nfrom deepface import DeepFace\nfrom PIL import Image\nfrom io import BytesIO\nimport subprocess\nimport os\nimport cv2\nimport random\nimport webbrowser\n\napp = Flask(__name__)\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\nfaceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nerror_path = {'race': {'asian': 0, 'indian': 0, 'black': 0, 'white': 0,\n 'middle eastern': 0, 'latino hispanic': 0}, 'dominant_race': '?'}\ndirectory = 'static/img'\n\nif 'img' not in os.listdir('static/'):\n os.mkdir(directory)\n\nfor f in os.listdir(directory):\n os.remove(os.path.join(directory, f))\n\n\ndef generate_random_string():\n numbers = '1234567890'\n res = ''.join(random.choice(numbers) for _ in range(10))\n return f'{directory}/{res}.png'\n\n\[email protected]('/')\ndef main():\n return render_template('index.html')\n\n\[email protected]('/photocap')\ndef photo_cap():\n photo_base64 = request.args.get('photo')\n\n _, encoded = photo_base64.split(\",\", 1)\n binary_data = base64.b64decode(encoded)\n\n f = BytesIO()\n f.write(binary_data)\n f.seek(0)\n image = Image.open(f)\n image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.3, 5)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n fn = generate_random_string()\n\n cv2.imwrite(fn, image)\n try:\n obj = DeepFace.analyze(image, actions=['race'])\n obj['filename'] = fn\n return jsonify(obj)\n\n except ValueError:\n other_json = error_path\n other_json['filename'] = fn\n\n return jsonify(other_json)\n\n except Exception as e:\n print(e)\n other_json = error_path\n other_json['filename'] = fn\n\n return jsonify(other_json)\n\n\nif __name__ == \"__main__\":\n # p = subprocess.Popen(['python -m SimpleHTTPServer'], shell=True) #Only for macOS\n webbrowser.open_new('http://127.0.0.1:8000/')\n app.run(host='localhost', port=8000, debug=True)\n"
] | [
[
"numpy.array"
]
] |
pyrito/SpeechSplit | [
"ee70ee77e54d5b7cd1b39e7bef1cb96ae78f8beb"
] | [
"solver.py"
] | [
"from torch.utils.tensorboard.summary import hparams\nfrom model import Generator_3 as Generator\nfrom model import InterpLnr\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport pickle\n\nfrom utils import pad_seq_to_2, quantize_f0_torch, quantize_f0_numpy\n\n# use demo data for simplicity\n# make your own validation set as needed\nvalidation_pt = pickle.load(open('assets/demo.pkl', \"rb\"))\n\nclass Solver(object):\n \"\"\"Solver for training\"\"\"\n\n def __init__(self, vcc_loader, config, hparams):\n \"\"\"Initialize configurations.\"\"\"\n\n # Data loader.\n self.vcc_loader = vcc_loader\n self.hparams = hparams\n\n # Training configurations.\n self.num_iters = config.num_iters\n self.g_lr = config.g_lr\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.resume_iters = config.resume_iters\n \n # Miscellaneous.\n self.use_tensorboard = config.use_tensorboard\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device('cuda:{}'.format(config.device_id) if self.use_cuda else 'cpu')\n\n # Directories.\n self.log_dir = config.log_dir\n self.sample_dir = config.sample_dir\n self.model_save_dir = config.model_save_dir\n\n # Step size.\n self.log_step = config.log_step\n self.sample_step = config.sample_step\n self.model_save_step = config.model_save_step\n \n\n # Build the model and tensorboard.\n self.build_model()\n if self.use_tensorboard:\n self.build_tensorboard()\n\n \n def build_model(self): \n self.G = Generator(self.hparams)\n \n self.Interp = InterpLnr(self.hparams)\n \n self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n self.print_network(self.G, 'G')\n \n self.G.to(self.device)\n self.Interp.to(self.device)\n\n \n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n \n \n def print_optimizer(self, opt, name):\n print(opt)\n print(name)\n \n \n def restore_model(self, resume_iters):\n print('Loading the trained models from step {}...'.format(resume_iters))\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))\n g_checkpoint = torch.load(G_path, map_location=lambda storage, loc: storage)\n self.G.load_state_dict(g_checkpoint['model'])\n self.g_optimizer.load_state_dict(g_checkpoint['optimizer'])\n self.g_lr = self.g_optimizer.param_groups[0]['lr']\n \n \n def build_tensorboard(self):\n \"\"\"Build a tensorboard logger.\"\"\"\n from torch.utils.tensorboard import SummaryWriter\n self.writer = SummaryWriter(self.log_dir)\n \n\n def reset_grad(self):\n \"\"\"Reset the gradient buffers.\"\"\"\n self.g_optimizer.zero_grad()\n \n def encode_context(self):\n # Set data loader.\n data_loader = self.vcc_loader\n \n # Fetch fixed inputs for debugging.\n data_iter = iter(data_loader)\n \n # Start encoding from scratch or resume from checkpoint.\n start_iters = 0\n if self.resume_iters:\n print('Resuming ...')\n start_iters = self.resume_iters\n self.num_iters += self.resume_iters\n self.restore_model(self.resume_iters)\n # self.print_optimizer(self.g_optimizer, 'G_optimizer')\n \n \n # Print logs in specified order\n keys = ['G/loss_id']\n \n # Start encoding.\n print('Start encoding...')\n start_time = time.time()\n\n encoded_audio = {}\n # May need this if looping doesn't work: \n # for i in max(range(start_iters, self.num_iters), len(self.vcc_loader)):\n print(len(self.vcc_loader))\n count = 0\n for i, (x_real_org, emb_org, f0_org, len_org, id_org) in enumerate(self.vcc_loader):\n\n # =================================================================================== #\n # 1. Send input data to device #\n # =================================================================================== #\n \n # x_real_org = x_real_org.to(self.device)\n # emb_org = emb_org.to(self.device)\n # len_org = len_org.to(self.device)\n # f0_org = f0_org.to(self.device)\n \n \n # =================================================================================== #\n # 2. Encode using the generator #\n # =================================================================================== #\n \n self.G = self.G.eval()\n\n pad = 8 - ((len_org[0] + 1) % 8)\n encode_length = len_org[0] + 1 + pad\n print(id_org)\n x_real_pad, _ = pad_seq_to_2(x_real_org, encode_length)\n # len_org = torch.tensor([val_sub[k][2]]).to(self.device) \n f0_org_pad, _ = pad_seq_to_2(f0_org, encode_length) # np.pad(f0_org, (0, 512-len_org[0]), 'constant', constant_values=(0, 0))\n assert x_real_pad.shape[1] == f0_org_pad.shape[1]\n f0_quantized = quantize_f0_numpy(np.squeeze(f0_org_pad))[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device) \n x_real_pad = torch.from_numpy(x_real_pad).to(self.device) \n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0, x_real_pad, emb_org)\n\n # code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0_intrp_org, x_real_org, emb_org)\n # print(f'content: {code_content}')\n\n encoded_audio[id_org[0]] = code_content\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n log = \"Elapsed [{}], Audio file[{}/{}]\".format(et, i+1, len(self.vcc_loader))\n print(log)\n count += 1\n if count % 100 == 0:\n with open(f'assets/encoded-{self.hparams.encode_mode}-{count}.pkl', 'wb') as f:\n pickle.dump(encoded_audio, f)\n del encoded_audio\n encoded_audio = {}\n\n\n\n#=====================================================================================================================\n \n \n \n def train(self):\n # Set data loader.\n data_loader = self.vcc_loader\n \n # Fetch fixed inputs for debugging.\n data_iter = iter(data_loader)\n \n # Start training from scratch or resume training.\n start_iters = 0\n if self.resume_iters:\n print('Resuming ...')\n start_iters = self.resume_iters\n self.num_iters += self.resume_iters\n self.restore_model(self.resume_iters)\n self.print_optimizer(self.g_optimizer, 'G_optimizer')\n \n # Learning rate cache for decaying.\n g_lr = self.g_lr\n print ('Current learning rates, g_lr: {}.'.format(g_lr))\n \n # Print logs in specified order\n keys = ['G/loss_id']\n \n # Start training.\n print('Start training...')\n start_time = time.time()\n \n for i in range(start_iters, self.num_iters):\n\n # =================================================================================== #\n # 1. Preprocess input data #\n # =================================================================================== #\n\n # Fetch real images and labels.\n try:\n x_real_org, emb_org, f0_org, len_org = next(data_iter)\n except:\n data_iter = iter(data_loader)\n x_real_org, emb_org, f0_org, len_org = next(data_iter)\n \n x_real_org = x_real_org.to(self.device)\n emb_org = emb_org.to(self.device)\n len_org = len_org.to(self.device)\n f0_org = f0_org.to(self.device)\n \n \n # =================================================================================== #\n # 2. Train the generator #\n # =================================================================================== #\n \n self.G = self.G.train()\n \n # Identity mapping loss\n x_f0 = torch.cat((x_real_org, f0_org), dim=-1)\n x_f0_intrp = self.Interp(x_f0, len_org) \n f0_org_intrp = quantize_f0_torch(x_f0_intrp[:,:,-1])[0]\n x_f0_intrp_org = torch.cat((x_f0_intrp[:,:,:-1], f0_org_intrp), dim=-1)\n \n x_identic = self.G(x_f0_intrp_org, x_real_org, emb_org)\n g_loss_id = F.mse_loss(x_real_org, x_identic, reduction='mean') \n \n # Backward and optimize.\n g_loss = g_loss_id\n self.reset_grad()\n g_loss.backward()\n self.g_optimizer.step()\n\n # Logging.\n loss = {}\n loss['G/loss_id'] = g_loss_id.item()\n \n\n # =================================================================================== #\n # 4. Miscellaneous #\n # =================================================================================== #\n\n # Print out training information.\n if (i+1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n log = \"Elapsed [{}], Iteration [{}/{}]\".format(et, i+1, self.num_iters)\n for tag in keys:\n log += \", {}: {:.8f}\".format(tag, loss[tag])\n print(log)\n\n if self.use_tensorboard:\n for tag, value in loss.items():\n self.writer.add_scalar(tag, value, i+1)\n \n \n # Save model checkpoints.\n if (i+1) % self.model_save_step == 0:\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))\n torch.save({'model': self.G.state_dict(),\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\n print('Saved model checkpoints into {}...'.format(self.model_save_dir)) \n \n\n # Validation.\n if (i+1) % self.sample_step == 0:\n self.G = self.G.eval()\n with torch.no_grad():\n loss_val = []\n for val_sub in validation_pt:\n emb_org_val = torch.from_numpy(val_sub[1]).to(self.device) \n for k in range(2, 3):\n x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)\n len_org = torch.tensor([val_sub[k][2]]).to(self.device) \n f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))\n f0_quantized = quantize_f0_numpy(f0_org)[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device) \n x_real_pad = torch.from_numpy(x_real_pad).to(self.device) \n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)\n g_loss_val = F.mse_loss(x_real_pad, x_identic_val, reduction='sum')\n loss_val.append(g_loss_val.item())\n val_loss = np.mean(loss_val) \n print('Validation loss: {}'.format(val_loss))\n if self.use_tensorboard:\n self.writer.add_scalar('Validation_loss', val_loss, i+1)\n \n\n # plot test samples\n if (i+1) % self.sample_step == 0:\n self.G = self.G.eval()\n with torch.no_grad():\n for val_sub in validation_pt:\n emb_org_val = torch.from_numpy(val_sub[1]).to(self.device) \n for k in range(2, 3):\n x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)\n len_org = torch.tensor([val_sub[k][2]]).to(self.device) \n f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))\n f0_quantized = quantize_f0_numpy(f0_org)[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device) \n x_real_pad = torch.from_numpy(x_real_pad).to(self.device) \n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n x_f0_F = torch.cat((x_real_pad, torch.zeros_like(f0_org_val)), dim=-1)\n x_f0_C = torch.cat((torch.zeros_like(x_real_pad), f0_org_val), dim=-1)\n \n x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)\n x_identic_woF = self.G(x_f0_F, x_real_pad, emb_org_val)\n x_identic_woR = self.G(x_f0, torch.zeros_like(x_real_pad), emb_org_val)\n x_identic_woC = self.G(x_f0_C, x_real_pad, emb_org_val)\n \n melsp_gd_pad = x_real_pad[0].cpu().numpy().T\n melsp_out = x_identic_val[0].cpu().numpy().T\n melsp_woF = x_identic_woF[0].cpu().numpy().T\n melsp_woR = x_identic_woR[0].cpu().numpy().T\n melsp_woC = x_identic_woC[0].cpu().numpy().T\n \n min_value = np.min(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))\n max_value = np.max(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))\n \n # fig, (ax1,ax2,ax3,ax4,ax5) = plt.subplots(5, 1, sharex=True)\n # im1 = ax1.imshow(melsp_gd_pad, aspect='auto', vmin=min_value, vmax=max_value)\n # im2 = ax2.imshow(melsp_out, aspect='auto', vmin=min_value, vmax=max_value)\n # im3 = ax3.imshow(melsp_woC, aspect='auto', vmin=min_value, vmax=max_value)\n # im4 = ax4.imshow(melsp_woR, aspect='auto', vmin=min_value, vmax=max_value)\n # im5 = ax5.imshow(melsp_woF, aspect='auto', vmin=min_value, vmax=max_value)\n # plt.savefig(f'{self.sample_dir}/{i+1}_{val_sub[0]}_{k}.png', dpi=150)\n # plt.close(fig) "
] | [
[
"torch.nn.functional.mse_loss",
"torch.load",
"numpy.squeeze",
"torch.zeros_like",
"torch.no_grad",
"torch.tensor",
"numpy.hstack",
"torch.cuda.is_available",
"torch.from_numpy",
"torch.utils.tensorboard.SummaryWriter",
"numpy.pad",
"torch.cat",
"numpy.mean"
]
] |
snsnlou/mars | [
"6b8eec162eccc8bb980a98ca2cf1e6a4b866d302"
] | [
"mars/dataframe/datastore/tests/test_datastore_execute.py"
] | [
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport pandas as pd\n\nimport mars.dataframe as md\nfrom mars.config import option_context\nfrom mars.dataframe import DataFrame\nfrom mars.deploy.local.core import new_cluster\nfrom mars.session import new_session\nfrom mars.tests.core import TestBase, flaky\n\ntry:\n import vineyard\nexcept ImportError:\n vineyard = None\ntry:\n import sqlalchemy\nexcept ImportError:\n sqlalchemy = None\ntry:\n import pyarrow as pa\nexcept ImportError:\n pa = None\ntry:\n import fastparquet\nexcept ImportError:\n fastparquet = None\n\n_exec_timeout = 120 if 'CI' in os.environ else -1\n\n\nclass Test(TestBase):\n def setUp(self):\n super().setUp()\n self.ctx, self.executor = self._create_test_context()\n\n def testToCSVExecution(self):\n index = pd.RangeIndex(100, 0, -1, name='index')\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n }, index=index)\n df = DataFrame(raw, chunk_size=33)\n\n with tempfile.TemporaryDirectory() as base_path:\n # DATAFRAME TESTS\n # test one file with dataframe\n path = os.path.join(base_path, 'out.csv')\n\n r = df.to_csv(path)\n self.executor.execute_dataframe(r)\n\n result = pd.read_csv(path, dtype=raw.dtypes.to_dict())\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw)\n\n # test multi files with dataframe\n path = os.path.join(base_path, 'out-*.csv')\n r = df.to_csv(path)\n self.executor.execute_dataframe(r)\n\n dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),\n dtype=raw.dtypes.to_dict())\n for i in range(4)]\n result = pd.concat(dfs, axis=0)\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw)\n pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.iloc[33: 66])\n\n with self.ctx:\n # test df with unknown shape\n df2 = DataFrame(raw, chunk_size=(50, 2))\n df2 = df2[df2['col1'] < 1]\n path2 = os.path.join(base_path, 'out2.csv')\n r = df2.to_csv(path2)\n self.executor.execute_dataframes([r])\n\n result = pd.read_csv(path2, dtype=raw.dtypes.to_dict())\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw)\n\n # SERIES TESTS\n series = md.Series(raw.col1, chunk_size=33)\n\n # test one file with series\n path = os.path.join(base_path, 'out.csv')\n r = series.to_csv(path)\n self.executor.execute_dataframe(r)\n\n result = pd.read_csv(path, dtype=raw.dtypes.to_dict())\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw.col1.to_frame())\n\n # test multi files with series\n path = os.path.join(base_path, 'out-*.csv')\n r = series.to_csv(path)\n self.executor.execute_dataframe(r)\n\n dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),\n dtype=raw.dtypes.to_dict())\n for i in range(4)]\n result = pd.concat(dfs, axis=0)\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw.col1.to_frame())\n pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.col1.to_frame().iloc[33: 66])\n\n @unittest.skipIf(sqlalchemy is None, 'sqlalchemy not installed')\n def testToSQL(self):\n index = pd.RangeIndex(100, 0, -1, name='index')\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100).astype('int64'),\n }, index=index)\n\n with tempfile.TemporaryDirectory() as d:\n table_name1 = 'test_table'\n table_name2 = 'test_table2'\n uri = 'sqlite:///' + os.path.join(d, 'test.db')\n\n engine = sqlalchemy.create_engine(uri)\n\n # test write dataframe\n df = DataFrame(raw, chunk_size=33)\n r = df.to_sql(table_name1, con=engine)\n self.executor.execute_dataframe(r)\n\n written = pd.read_sql(table_name1, con=engine, index_col='index') \\\n .sort_index(ascending=False)\n pd.testing.assert_frame_equal(raw, written)\n\n # test write with existing table\n with self.assertRaises(ValueError):\n df.to_sql(table_name1, con=uri).execute()\n\n # test write series\n series = md.Series(raw.col1, chunk_size=33)\n with engine.connect() as conn:\n r = series.to_sql(table_name2, con=conn)\n self.executor.execute_dataframe(r)\n\n written = pd.read_sql(table_name2, con=engine, index_col='index') \\\n .sort_index(ascending=False)\n pd.testing.assert_frame_equal(raw.col1.to_frame(), written)\n\n @unittest.skipIf(vineyard is None, 'vineyard not installed')\n @flaky(max_runs=3)\n def testToVineyard(self):\n def run_with_given_session(session, **kw):\n ipc_socket = os.environ.get('VINEYARD_IPC_SOCKET', '/tmp/vineyard/vineyard.sock')\n with option_context({'vineyard.socket': ipc_socket}):\n df1 = DataFrame(pd.DataFrame(np.arange(12).reshape(3, 4), columns=['a', 'b', 'c', 'd']),\n chunk_size=2)\n object_id = df1.to_vineyard().execute(session=session, **kw).fetch(session=session)\n df2 = md.from_vineyard(object_id)\n\n df1_value = df1.execute(session=session, **kw).fetch(session=session)\n df2_value = df2.execute(session=session, **kw).fetch(session=session)\n pd.testing.assert_frame_equal(\n df1_value.reset_index(drop=True), df2_value.reset_index(drop=True))\n\n with new_session().as_default() as session:\n run_with_given_session(session)\n\n with new_cluster(scheduler_n_process=2, worker_n_process=2,\n shared_memory='20M', web=False) as cluster:\n with new_session(cluster.endpoint).as_default() as session:\n run_with_given_session(session, timeout=_exec_timeout)\n\n @unittest.skipIf(pa is None, 'pyarrow not installed')\n def testToParquetArrowExecution(self):\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.arange(100),\n 'col3': np.random.choice(['a', 'b', 'c'], (100,)),\n })\n df = DataFrame(raw, chunk_size=33)\n\n with tempfile.TemporaryDirectory() as base_path:\n # DATAFRAME TESTS\n path = os.path.join(base_path, 'out-*.parquet')\n r = df.to_parquet(path)\n self.executor.execute_dataframe(r)\n\n read_df = md.read_parquet(path)\n result = self.executor.execute_dataframe(read_df, concat=True)[0]\n result = result.sort_index()\n pd.testing.assert_frame_equal(result, raw)\n\n read_df = md.read_parquet(path)\n result = self.executor.execute_dataframe(read_df, concat=True)[0]\n result = result.sort_index()\n pd.testing.assert_frame_equal(result, raw)\n\n # test read_parquet then to_parquet\n read_df = md.read_parquet(path)\n r = read_df.to_parquet(path)\n self.executor.execute_dataframes([r])\n\n # test partition_cols\n path = os.path.join(base_path, 'out-partitioned')\n r = df.to_parquet(path, partition_cols=['col3'])\n self.executor.execute_dataframe(r)\n\n read_df = md.read_parquet(path)\n result = self.executor.execute_dataframe(read_df, concat=True)[0]\n result['col3'] = result['col3'].astype('object')\n pd.testing.assert_frame_equal(result.sort_values('col1').reset_index(drop=True),\n raw.sort_values('col1').reset_index(drop=True))\n\n @unittest.skipIf(fastparquet is None, 'fastparquet not installed')\n def testToParquetFastParquetExecution(self):\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.arange(100),\n 'col3': np.random.choice(['a', 'b', 'c'], (100,)),\n })\n df = DataFrame(raw, chunk_size=33)\n\n with tempfile.TemporaryDirectory() as base_path:\n # test fastparquet\n path = os.path.join(base_path, 'out-fastparquet-*.parquet')\n r = df.to_parquet(path, engine='fastparquet', compression='gzip')\n self.executor.execute_dataframe(r)\n"
] | [
[
"pandas.read_sql",
"numpy.random.choice",
"numpy.arange",
"pandas.RangeIndex",
"numpy.random.rand",
"pandas.concat",
"pandas.testing.assert_frame_equal"
]
] |
candleinwindsteve/Stratipy | [
"ea505df1e4830141c590922d654edfbde498b924"
] | [
"stratipy/filtering_diffusion.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\nimport sys\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg import norm\nfrom scipy.io import loadmat, savemat\nfrom nbs_class import Ppi, Patient\nfrom subprocess import call\n# import h5py\nimport os\nimport glob\nimport time\nimport datetime\n\n# NOTE mutationProfileDiffusion -> propagation\n# mutationProfile -> M, PPIAdjacencyMatrix -> adj, dataFolder -> result_folder\n# PPI_influence_min -> ppi_influence_min, PPI_influence_max-> ppi_influence_max\n# PPI_influence()-> calcul_ppi_influence(), PPI_influence -> ppi_influence\n# influenceDistance->influence_distance\n# influenceMat -> ppi_influence, PPIneighboorsMax -> ngh_max,\n# bestInfluencers -> best_influencers\n# filteredGenes -> deg0, keepSingletons -> keep_singletons\n# mutationsMin -> min_mutation, mutationsMax -> mutationsMax\n# newnet -> ppi_ngh, netFinal -> ppi_final, mutFinal -> mut_final\n# filteredPatients -> filtered_patients\n\n\n# @profile\ndef propagation(M, adj, alpha=0.7, tol=10e-6): # TODO equation, M, alpha\n \"\"\"Network propagation iterative process\n\n Iterative algorithm for apply propagation using random walk on a network:\n Initialize::\n X1 = M\n\n Repeat::\n X2 = alpha * X1.A + (1-alpha) * M\n X1 = X2\n\n Until::\n norm(X2-X1) < tol\n\n Where::\n A : degree-normalized adjacency matrix\n\n Parameters\n ----------\n M : sparse matrix\n Data matrix to be diffused.\n\n adj : sparse matrix\n Adjacency matrice.\n\n alpha : float, default: 0.7\n Diffusion/propagation factor with 0 <= alpha <= 1.\n For alpha = 0 : no diffusion.\n For alpha = 1 :\n\n tol : float, default: 10e-6\n Convergence threshold.\n\n Returns\n -------\n X2 : sparse matrix\n Smoothed matrix.\n \"\"\"\n print(' ==== propagation ==== ')\n\n n = adj.shape[0]\n # diagonal = 1 -> degree\n # TODO to set diagonal = 0 before applying eye\n adj = adj+sp.eye(n, dtype=np.float32)\n\n d = sp.dia_matrix((np.array(adj.sum(axis=0))**-1, [0]),\n shape=(n, n),\n dtype=np.float32)\n A = adj.dot(d)\n\n X1 = M.astype(np.float32)\n X2 = alpha * X1.dot(A) + (1-alpha) * M\n i = 0\n while norm(X2-X1) > tol:\n X1 = X2\n X2 = alpha * X1.dot(A) + (1-alpha) * M\n i += 1\n print('Propagation iteration = {} ----- {}'.format(\n i, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n return X2\n\n\n# @profile\ndef compare_ij_ji(ppi, out_min=True, out_max=True):\n \"\"\"Helper function for calcul_ppi_influence\n\n In most cases the influence (propagation) is not symmetric. We have to\n compare weight (a_ij) and (a_ji) for all pairs in order to obtain symmetric\n matrix/matrices. 2 choices available: minimum or maximum weight.\n a = min [(a_ij),(a_ji)]\n a = max [(a_ij),(a_ji)]\n Minimum weight is chosen to avoid Hubs phenomenon.\n\n Parameters\n ----------\n ppi : sparse matrix\n Matrice to apply comparison.\n\n out_min, out_max : boolean, default: True\n Minimum and/or maximum weight is chosen.\n\n Returns\n -------\n ppi_min, ppi_max : sparse matrix\n Symmertric matrix with minimum and/or maximum weight.\n \"\"\"\n # TODO matrice type of ppi\n n = ppi.shape[0]\n ppi = ppi.tolil() # need \"lil_matrix\" for reshape\n # transpose to compare ppi(ij) and ppi(ji)\n ppi_transp = sp.lil_matrix.transpose(ppi)\n # reshape to 1D matrix\n ppi_1d = ppi.reshape((1, n**2))\n ppi_1d_transp = ppi_transp.reshape((1, n**2))\n\n # reshapeto original size matrix after comparison (min/max)\n if out_min and out_max:\n ppi_min = (sp.coo_matrix.tolil(\n sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))\n ).reshape((n, n)).astype(np.float32)\n ppi_max = (sp.coo_matrix.tolil(\n sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))\n ).reshape((n, n)).astype(np.float32)\n\n print('ppi_min', type(ppi_min), ppi_min.dtype, ppi_min.shape)\n print('ppi_max', type(ppi_max), ppi_max.dtype, ppi_max.shape)\n return ppi_min, ppi_max\n\n elif out_min:\n ppi_min = (sp.coo_matrix.tolil(\n sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,\n dtype=np.float32))).reshape((n, n))\n return ppi_min\n\n elif out_max:\n ppi_max = (sp.coo_matrix.tolil(\n sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,\n dtype=np.float32))).reshape((n, n))\n return ppi_max\n else:\n print('You have to choice Min or Max') # TODO change error message\n\n\n# @profile\ndef calcul_final_influence(M, adj, result_folder, influence_weight='min',\n simplification=True, compute=False, overwrite=False,\n alpha=0.7, tol=10e-6):\n \"\"\"Compute network influence score\n\n Network propagation iterative process is applied on PPI. (1) The network\n influence distance matrix and (2) influence matrices based on minimum /\n maximum weight are saved as MATLAB-style files (.mat).\n - (1) : 'influence_distance_alpha={}_tol={}.mat'\n in 'influence_distance' directory\n - (2) : 'ppi_influence_alpha={}_tol={}.mat'\n in 'ppi_influence' directory\n Where {} are parameter values. The directories will be automatically\n created if not exist.\n\n If compute=False, the latest data of directory will be taken into\n account:\n - latest data with same parameters (alpha and tol)\n - if not exist, latest data of directory but with differents parameters\n\n Parameters\n ----------\n M : sparse matrix\n Data matrix to be diffused.\n\n adj : sparse matrix\n Adjacency matrice.\n\n result_folder : str\n Path to create a new directory for save new files. If you want to creat\n in current directory, enter '/directory_name'. Absolute path is also\n supported.\n\n influence_weight :\n\n simplification : boolean, default: True\n\n compute : boolean, default: False\n If True, new network influence score will be computed.\n If False, the latest network influence score will be taken into\n account.\n\n overwrite : boolean, default: False\n If True, new network influence score will be computed even if the file\n which same parameters already exists in the directory.\n\n alpha : float, default: 0.7\n Diffusion (propagation) factor with 0 <= alpha <= 1.\n For alpha = 0 : no diffusion.\n For alpha = 1 :\n\n tol : float, default: 10e-6\n Convergence threshold.\n\n Returns\n -------\n final_influence : sparse matrix\n Smoothed PPI influence matrices based on minimum / maximum weight.\n \"\"\"\n influence_distance_directory = result_folder + 'influence_distance/'\n influence_distance_file = (\n influence_distance_directory +\n 'influence_distance_alpha={}_tol={}.mat'.format(alpha, tol))\n #######\n final_influence_directory = result_folder + 'final_influence/'\n final_influence_file = (\n final_influence_directory +\n 'final_influence_simp={}_alpha={}_tol={}.mat'.format(\n simplification, alpha, tol))\n #######\n\n existance_same_param = os.path.exists(final_influence_file)\n # TODO overwrite condition\n\n # check if same parameters file exists in directory\n if existance_same_param:\n final_influence_data = loadmat(final_influence_file)\n if influence_weight == 'min':\n final_influence = final_influence_data['final_influence_min']\n else:\n final_influence = final_influence_data['final_influence_max']\n print('final influence matrix', type(final_influence), final_influence.shape)\n print('***** Same parameters file of FINAL INFLUENCE already exists ***** {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n else:\n if compute:\n start = time.time()\n\n # check if influence distance file exists\n existance_same_influence = os.path.exists(influence_distance_file)\n if existance_same_influence:\n influence_data = loadmat(influence_distance_file)\n influence = influence_data['influence_distance']\n print('***** Same parameters file of INFLUENCE DISTANCE already exists ***** {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n else:\n influence = propagation(M, adj, alpha, tol)\n print('influence', type(influence), influence.dtype)\n\n # save influence distance before simplification with parameters' values in filename\n os.makedirs(influence_distance_directory, exist_ok=True) # NOTE For Python ≥ 3.2\n print(' ==== Start to save INFLUENCE DISTANCE ==== {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n start_save = time.time()\n savemat(influence_distance_file,\n {'influence_distance': influence,\n 'alpha': alpha},\n do_compression=True)\n end_save = time.time()\n print(\"---------- save time = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end_save - start_save),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # simplification: multiply by PPI adjacency matrix\n if simplification:\n influence = influence.multiply(sp.lil_matrix(adj))\n # -> influence as csr_matrix\n else:\n print(\"---------- No simplification ----------\")\n pass\n\n # compare influence[i,j] and influence[j,i] => min/max => final influence\n start_ij = time.time()\n final_influence_min, final_influence_max = compare_ij_ji(\n influence, out_min=True, out_max=True)\n end_ij = time.time()\n print(\"---------- compare ij/ji = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end_ij - start_ij),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # save final influence with parameters' values in filename\n os.makedirs(final_influence_directory, exist_ok=True)\n\n print(' ==== Start to save FINAL INFLUENCE ==== {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n start_save = time.time()\n savemat(final_influence_file,\n {'final_influence_min': final_influence_min,\n 'final_influence_max': final_influence_max,\n 'alpha': alpha}, do_compression=True)\n end_save = time.time()\n print(\"---------- save time = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end_save - start_save),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n if influence_weight == 'min':\n final_influence = final_influence_min\n else:\n final_influence = final_influence_max\n\n end = time.time()\n print(\"---------- Influence = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # take most recent file\n else:\n for x in final_influence_file, influence_distance_directory:\n print(x)\n newest_file = max(glob.iglob(x + '*.mat'),\n key=os.path.getctime)\n final_influence_data = loadmat(newest_file)\n if x == final_influence_directory:\n if influence_weight == 'min':\n final_influence = final_influence_data['final_influence_min']\n else:\n final_influence = final_influence_data['final_influence_max']\n return final_influence\n\n\n# @profile\ndef best_neighboors(ppi_filt, final_influence, ngh_max):\n \"\"\"Helper function for filter_ppi_patients\n\n Keeps only the connections with the best influencers.\n\n Parameters\n ----------\n ppi_filt : sparse matrix\n Filtration from ppi_total : only genes in PPI are considered.\n\n final_influence :\n Smoothed PPI influence matrices based on minimum or maximum weight.\n\n ngh_max : int\n Number of best influencers in PPI.\n\n Returns\n -------\n ppi_ngh : sparse matrix\n PPI with only best influencers.\n \"\"\"\n ngh_max = ngh_max + 1 # central protein included\n final_influence = final_influence.todense()\n print(type(final_influence))\n ppi_filt = ppi_filt.todense()\n ppi_ngh = np.zeros(ppi_filt.shape, dtype=np.float32)\n print('ppi_ngh', ppi_ngh.shape)\n for i in range(ppi_filt.shape[0]):\n best_influencers = np.argpartition(-final_influence[i, :], ngh_max)[:ngh_max]\n #NOTE different result if same value exists several times\n # best_influencers2 = np.argpartition(final_influence[i, :], -ngh_max)[-ngh_max:]\n ppi_ngh[i, best_influencers] = ppi_filt[i, best_influencers]\n ppi_ngh = np.max(np.dstack((ppi_ngh, ppi_ngh.T)), axis=2)\n print('ppi_ngh ', ppi_ngh.dtype)\n # too stringent if np.min\n return sp.csc_matrix(ppi_ngh)\n\n\n# @profile\ndef filter_ppi_patients(ppi_total, mut_total, ppi_filt, final_influence, ngh_max,\n keep_singletons=False,\n min_mutation=10, max_mutation=2000):\n \"\"\"Keeping only the connections with the best influencers and Filtering some\n patients based on mutation number\n\n 'the 11 most influential neighbors of each gene in the network as\n determined by network influence distance were used'\n 'Only mutation data generated using the Illumina GAIIx platform were\n retained for subsequent analy- sis, and patients with fewer than 10\n mutations were discarded.'\n\n Parameters\n ----------\n ppi_total : sparse matrix\n Built from all sparse sub-matrices (AA, ... , CC).\n\n mut_total : sparse matrix\n Patients' mutation profiles of all genes (rows: patients,\n columns: genes of AA, BB and CC).\n\n ppi_filt : sparse matrix\n Filtration from ppi_total : only genes in PPI are considered.\n\n final_influence :\n Smoothed PPI influence matrices based on minimum or maximum weight.\n\n ngh_max : int\n Number of best influencers in PPI.\n\n keep_singletons : boolean, default: False\n If True, proteins not annotated in PPI (genes founded only in patients'\n mutation profiles) will be also considered.\n If False, only annotated proteins in PPI will be considered.\n\n min_mutation, max_mutation : int\n Numbers of lowest mutations and highest mutations per patient.\n\n Returns\n -------\n ppi_final, mut_final : sparse matrix\n PPI and mutation profiles after filtering.\n \"\"\"\n # n = final_influence.shape[0]\n # final_influence = index_to_sym_matrix(n, final_influence)\n\n ppi_ngh = best_neighboors(ppi_filt, final_influence, ngh_max)\n print('ppi_ngh ', ppi_ngh.dtype)\n deg0 = Ppi(ppi_total).deg == 0 # True if protein degree = 0\n\n if keep_singletons:\n ppi_final = sp.bmat([\n [ppi_ngh, sp.csc_matrix((ppi_ngh.shape[0], sum(deg0)))],\n [sp.csc_matrix((sum(deg0), ppi_ngh.shape[0])),\n sp.csc_matrix((sum(deg0), sum(deg0)))]\n ]) # -> COO matrix\n # mut_final=sp.bmat([[mut_total[:,deg0==False],mut_total[:,deg0==True]]])\n mut_final = mut_total\n else:\n ppi_final = ppi_ngh\n mut_final = mut_total[:, Ppi(ppi_total).deg > 0]\n\n # filtered_patients = np.array([k < min_mutation or k > max_mutation for k in Patient(mut_final).mut_per_patient])\n # mut_final = mut_final[filtered_patients == False, :]\n\n # to avoid worse comparison '== False'\n mut_final = mut_final[np.array([min_mutation < k < max_mutation for k in\n Patient(mut_final).mut_per_patient])]\n\n print(\"Removing %i patients with less than %i or more than %i mutations\" %\n (mut_total.shape[0]-mut_final.shape[0], min_mutation, max_mutation))\n print(\"New adjacency matrix:\", ppi_final.shape)\n print(\"New mutation profile matrix:\", mut_final.shape)\n\n return ppi_final, mut_final\n\n\n# @profile\ndef quantile_norm_mean(anarray):\n \"\"\"Helper function for propagation_profile\n\n Forces the observations/variables to have identical intensity distribution.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n A = np.squeeze(np.asarray(anarray.T))\n AA = np.zeros_like(A)\n I = np.argsort(A, axis=0)\n AA[I, np.arange(A.shape[1])] = np.mean(A[I, np.arange(A.shape[1])],\n axis=1)[:, np.newaxis]\n return AA.T\n\n\n# @profile\ndef quantile_norm_median(anarray):\n A = np.squeeze(np.asarray(anarray.T))\n AA = np.zeros_like(A)\n I = np.argsort(A, axis=0)\n AA[I, np.arange(A.shape[1])] = np.median(A[I, np.arange(A.shape[1])],\n axis=1)[:, np.newaxis]\n return AA.T\n\n\n# @profile\ndef propagation_profile(mut_raw, adj, alpha, tol, qn):\n # TODO error messages\n start = time.time()\n if alpha > 0:\n # TODO verification of same parameter file\n mut_propag = propagation(mut_raw, adj, alpha, tol).todense()\n mut_propag[np.isnan(mut_propag)] = 0\n if qn == 'mean':\n mut_type = 'mean_qn'\n mut_propag = quantile_norm_mean(mut_propag)\n elif qn == 'median':\n mut_type = 'median_qn'\n mut_propag = quantile_norm_median(mut_propag)\n else:\n mut_type = 'diff'\n\n end = time.time()\n print(\"---------- Propagation on {} mutation profile = {} ---------- {}\"\n .format(mut_type,\n datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n return mut_type, mut_propag\n\n else:\n mut_type = 'raw'\n mut_raw = mut_raw.todense()\n\n end = time.time()\n print(\"---------- Propagation on {} mutation profile = {} ---------- {}\"\n .format(mut_type,\n datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n return mut_type, mut_raw\n"
] | [
[
"scipy.io.loadmat",
"numpy.zeros_like",
"scipy.sparse.linalg.norm",
"numpy.zeros",
"scipy.sparse.lil_matrix.transpose",
"scipy.sparse.csc_matrix",
"numpy.argpartition",
"numpy.argsort",
"numpy.asarray",
"scipy.sparse.eye",
"numpy.arange",
"numpy.dstack",
"scipy.io.savemat",
"scipy.sparse.lil_matrix",
"numpy.isnan",
"scipy.sparse.vstack"
]
] |
MohammadWasil/Self-Driving-Car | [
"9ef5b77e1268623c11e4c39d5c8e1e990caee273"
] | [
"Self Driving Car/Python with Tensorflow/driveSDC.py"
] | [
"import socket\r\n\r\nfrom tensorflow.keras.models import load_model\r\n\r\n\r\nfrom PIL import ImageGrab\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\n#Load the model.\r\nmodel = load_model(r\"D:\\Unity Game\\Self Driving Car\\SDCProgram\\Best Models\\data-003.h5\") \t# Directory to load the model\r\n\r\n\r\n# Socket Tcp Connection.\r\nhost = \"127.0.0.1\"\r\nport = 25001 # Port number\r\n#data = \"1,1,11\" # Data to be send\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP connection\r\nprint(\"starting connection\")\r\ntry:\r\n sock.connect((host, port)) #To connect ot the given port.\r\n print(\"Connected\")\r\n \r\nexcept:\r\n print(\"Might happen socket is closed!\")\r\n#######\r\n\r\ndef send_data(steering_angle, throttle):\r\n data_01 = str(steering_angle)\r\n data_02 = str(throttle)\r\n data = data_01 + ',' + data_02\r\n sock.sendall(data.encode(\"utf-8\")) # To send the data\r\n\r\nsteeringAngleList = []\r\nvelocityList = []\r\nthrottleList = []\r\n\r\nsteeringAngle = 0\r\nvelocity = 0\r\nthrottle = 0\r\n\r\narr1=[]\r\narr2=[]\r\narr3=[]\r\nsplitted_data = []\r\nreply=[]\r\ndef socketConnection():\r\n global globalsteeringAngle\r\n global velocity\r\n global throttle\r\n try:\r\n #data = \"1,0\"\r\n \r\n reply = sock.recv(2048).decode(\"utf-8\") # To receive the data\r\n #######send_data(reply)\r\n #print(\"Actual data received is: \", reply)\r\n \r\n splitted_data = reply.split(',')\r\n #print(\"after splitting the data: \", splitted_data)\r\n arr1.append(splitted_data[0])\r\n arr2.append(splitted_data[1])\r\n arr3.append(splitted_data[2])\r\n \r\n steeringAngle = float(splitted_data[0])\r\n velocity = float(splitted_data[1])\r\n throttle = float(splitted_data[2])\r\n \r\n except:\r\n print(\"Exception\")\r\n \r\n steeringAngleList = np.array(arr1) \r\n velocityList = np.array(arr2)\r\n throttleList = np.array(arr3)\r\n\r\n return steeringAngleList, velocityList, throttleList, steeringAngle, velocity, throttle\r\n\r\n\r\nfilename = r\"D:\\ML\\Unity-ML\\Drive SDC.csv\" \t#Directory to save your current Data in a csv file.\r\n\r\ndef csv_file(steer_Angle, velocity, throttle):\r\n \r\n #print(\"Writing to csv file!\")\r\n f = open(filename, \"w\")\r\n f.write(\"{},{},{}\\n\".format(\"Steerring Angle\", \"Current Velocity\", \"Throttle\"))\r\n \r\n for x in zip( steer_Angle, velocity, throttle):\r\n f.write(\"{},{},{}\\n\".format(x[0], x[1], x[2]))\r\n \r\n f.close()\r\n\r\n############################# \r\nMAX_SPEED = 25\r\nMIN_SPEED = 10\r\nspeed_limit = MAX_SPEED\r\n\r\ndef preprocess(image):\r\n return cv2.resize(image, (200, 66), cv2.INTER_AREA)\r\n\r\n\r\ndef drive(image, steering_angle, velocity, throttle):\r\n\r\n try:\r\n image = np.asarray(image) # from PIL image to numpy array\r\n image = preprocess(image) # apply the preprocessing\r\n image = np.array([image]) # the model expects 4D array\r\n \r\n steering_angle = float(model.predict(image, batch_size=1))\r\n steering_angle = (steering_angle/10)\r\n global speed_limit\r\n if velocity > speed_limit:\r\n speed_limit = MIN_SPEED # slow down\r\n else:\r\n speed_limit = MAX_SPEED\r\n throttle = 1.0 - steering_angle**2 - (velocity/speed_limit)**2\r\n\r\n print('{} {} {}'.format(steering_angle, throttle, velocity))\r\n steering_angle = (steering_angle*10)\r\n send_data(steering_angle, throttle)\r\n \r\n except Exception as e:\r\n print(\"Exception Occured\", e)\r\n \r\nnum = 0 \r\npath = r\"D:\\ML\\Unity-ML\\Drive SDC\" # Destination/path to which all the current images will be saved \r\nwhile (True):\r\n num = num + 1\r\n imageName = 'Wasil'+ str(num) + '.png' # Name of the images.\r\n #collecting current data\r\n strAngl, vlcty, thrttl, steeringAngle, velocity, throttle = socketConnection()\r\n image = np.array(ImageGrab.grab(bbox=(0, 120, 750, 540))) # Taking the screebshot and adding in the array\r\n \r\n csv_file(strAngl, vlcty, thrttl)\r\n cv2.imwrite(os.path.join(path, imageName), image) # Trying to save the image in the exact same directory.\r\n \r\n\r\n drive(image, steeringAngle, velocity, throttle)\r\n\r\n\"\"\"\r\n### NOTE: divide steering angle by 10.\r\n\"\"\""
] | [
[
"numpy.array",
"numpy.asarray",
"tensorflow.keras.models.load_model"
]
] |
fluxtransport/fiasco | [
"9d70d8bdb03197be1ddfd433e1392e214a1468e8"
] | [
"fiasco/element.py"
] | [
"\"\"\"\nClasses and functions for element-level operations\n\"\"\"\nimport numpy as np\nimport astropy.units as u\nimport plasmapy\n\nimport fiasco\n\n__all__ = ['Element']\n\n\nclass Element(fiasco.IonCollection):\n \"\"\"\n Collection of all ions for a particular element.\n\n The `Element` object provides a way to logically group together ions of the same\n element. This provides an easy way to compute element-level derived quantities such\n as the ionization fraction as a function of temperature.\n\n Parameters\n ----------\n element_name : `str`, `int`\n Symbol, atomic number, or full name of the element\n temperature : `~astropy.units.Quantity`\n\n See Also\n --------\n fiasco.Ion : All the same keyword arguments can also be passed here.\n \"\"\"\n\n @u.quantity_input\n def __init__(self, element_name, temperature: u.K, **kwargs):\n if type(element_name) is str:\n element_name = element_name.capitalize()\n Z = plasmapy.atomic.atomic_number(element_name)\n ion_list = []\n for i in range(Z + 1):\n ion = fiasco.Ion(f'{Z} {i+1}', temperature, **kwargs)\n ion_list.append(ion)\n\n super().__init__(*ion_list)\n\n @property\n def atomic_symbol(self):\n return self[0].atomic_symbol\n\n @property\n def atomic_number(self):\n return self[0].atomic_number\n\n @property\n def element_name(self):\n return self[0].element_name\n\n @property\n def abundance(self):\n return self[0].abundance\n\n def _rate_matrix(self):\n rate_matrix = np.zeros(self.temperature.shape+(self.atomic_number+1, self.atomic_number+1))\n rate_unit = self[0].ionization_rate().unit\n rate_matrix = rate_matrix * rate_unit\n for i in range(1, self.atomic_number):\n rate_matrix[:, i, i] = -(self[i].ionization_rate() + self[i].recombination_rate())\n rate_matrix[:, i, i-1] = self[i-1].ionization_rate()\n rate_matrix[:, i, i+1] = self[i+1].recombination_rate()\n rate_matrix[:, 0, 0] = -(self[0].ionization_rate() + self[0].recombination_rate())\n rate_matrix[:, 0, 1] = self[1].recombination_rate()\n rate_matrix[:, -1, -1] = -(self[-1].ionization_rate() + self[-1].recombination_rate())\n rate_matrix[:, -1, -2] = self[-2].ionization_rate()\n\n return rate_matrix\n\n def equilibrium_ionization(self, **kwargs):\n \"\"\"\n Calculate the ionization fraction, in equilibrium, for all ions of the element.\n\n Calculate the population fractions for every ion of this element as a function of\n temperature, assuming ionization equilibrium.\n\n Parameters\n ----------\n rate_matrix : `~astropy.units.Quantity`, optional\n :math:`Z+1` by :math:`Z+1` matrix of ionization and recombination rates. If not\n given, this will be computed automatically.\n\n See Also\n --------\n fiasco.Ion.ionization_rate\n fiasco.Ion.recombination_rate\n \"\"\"\n rate_matrix = kwargs.get('rate_matrix', None)\n if rate_matrix is None:\n rate_matrix = self._rate_matrix()\n # Solve system of equations using singular value decomposition\n _, _, V = np.linalg.svd(rate_matrix.value)\n # Select columns of V with smallest eigenvalues (returned in descending order)\n # NOTE: must take the absolute value as the SVD solution is only accurate up\n # to the sign. We require that the solutions must be positive.\n ioneq = np.fabs(V[:, -1, :])\n ioneq /= ioneq.sum(axis=1)[:, np.newaxis]\n\n return u.Quantity(ioneq)\n\n def __getitem__(self, value):\n if type(value) is str:\n el, ion = value.split()\n if '+' in ion:\n value = int(ion.strip('+'))\n else:\n value = int(ion) - 1\n return super().__getitem__(value)\n\n def __repr__(self):\n ion_list = '\\n'.join([i.ion_name for i in self._ion_list])\n return f\"\"\"Element\n-------\n{self.atomic_symbol} ({self.atomic_number}) -- {self.element_name}\n\nAvailable Ions\n--------------\n{ion_list}\"\"\"\n"
] | [
[
"numpy.linalg.svd",
"numpy.fabs",
"numpy.zeros"
]
] |
yigitozgumus/Polimi_Thesis | [
"711c1edcf1fdb92fc6c15bf5ab1be141c13995c3"
] | [
"models/new/sencebgan.py"
] | [
"import tensorflow as tf\n\nfrom base.base_model import BaseModel\nfrom utils.alad_utils import get_getter\nimport utils.alad_utils as sn\n\n\nclass SENCEBGAN(BaseModel):\n def __init__(self, config):\n super(SENCEBGAN, self).__init__(config)\n self.build_model()\n self.init_saver()\n\n def build_model(self):\n ############################################################################################\n # INIT\n ############################################################################################\n # Kernel initialization for the convolutions\n if self.config.trainer.init_type == \"normal\":\n self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)\n elif self.config.trainer.init_type == \"xavier\":\n self.init_kernel = tf.contrib.layers.xavier_initializer(\n uniform=False, seed=None, dtype=tf.float32\n )\n # Placeholders\n self.is_training_gen = tf.placeholder(tf.bool)\n self.is_training_dis = tf.placeholder(tf.bool)\n self.is_training_enc_g = tf.placeholder(tf.bool)\n self.is_training_enc_r = tf.placeholder(tf.bool)\n self.feature_match1 = tf.placeholder(tf.float32)\n self.feature_match2 = tf.placeholder(tf.float32)\n self.image_input = tf.placeholder(\n tf.float32, shape=[None] + self.config.trainer.image_dims, name=\"x\"\n )\n self.noise_tensor = tf.placeholder(\n tf.float32, shape=[None, self.config.trainer.noise_dim], name=\"noise\"\n )\n ############################################################################################\n # MODEL\n ############################################################################################\n self.logger.info(\"Building training graph...\")\n with tf.variable_scope(\"SENCEBGAN\"):\n # First training part\n # G(z) ==> x'\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen = self.generator(self.noise_tensor)\n # Discriminator outputs\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_real, self.decoded_real = self.discriminator(\n self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n self.embedding_fake, self.decoded_fake = self.discriminator(\n self.image_gen, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n # Second training part\n # E(x) ==> z'\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded = self.encoder_g(self.image_input)\n # G(z') ==> G(E(x)) ==> x''\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc = self.generator(self.image_encoded)\n # Discriminator outputs\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_enc_fake, self.decoded_enc_fake = self.discriminator(\n self.image_gen_enc, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n self.embedding_enc_real, self.decoded_enc_real = self.discriminator(\n self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n with tf.variable_scope(\"Discriminator_Model_XX\"):\n self.im_logit_real, self.im_f_real = self.discriminator_xx(\n self.image_input,\n self.image_input,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.im_logit_fake, self.im_f_fake = self.discriminator_xx(\n self.image_input,\n self.image_gen_enc,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n # Third training part\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded_r = self.encoder_g(self.image_input)\n\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc_r = self.generator(self.image_encoded_r)\n\n with tf.variable_scope(\"Encoder_R_Model\"):\n self.image_ege = self.encoder_r(self.image_gen_enc_r)\n\n with tf.variable_scope(\"Discriminator_Model_ZZ\"):\n self.z_logit_real, self.z_f_real = self.discriminator_zz(\n self.image_encoded_r,\n self.image_encoded_r,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.z_logit_fake, self.z_f_fake = self.discriminator_zz(\n self.image_encoded_r,\n self.image_ege,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n\n ############################################################################################\n # LOSS FUNCTIONS\n ############################################################################################\n with tf.name_scope(\"Loss_Functions\"):\n with tf.name_scope(\"Generator_Discriminator\"):\n # Discriminator Loss\n if self.config.trainer.mse_mode == \"norm\":\n self.disc_loss_real = tf.reduce_mean(\n self.mse_loss(\n self.decoded_real,\n self.image_input,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n self.disc_loss_fake = tf.reduce_mean(\n self.mse_loss(\n self.decoded_fake,\n self.image_gen,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n elif self.config.trainer.mse_mode == \"mse\":\n self.disc_loss_real = self.mse_loss(\n self.decoded_real,\n self.image_input,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n self.disc_loss_fake = self.mse_loss(\n self.decoded_fake,\n self.image_gen,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n self.loss_discriminator = (\n tf.math.maximum(self.config.trainer.disc_margin - self.disc_loss_fake, 0)\n + self.disc_loss_real\n )\n # Generator Loss\n pt_loss = 0\n if self.config.trainer.pullaway:\n pt_loss = self.pullaway_loss(self.embedding_fake)\n self.loss_generator = self.disc_loss_fake + self.config.trainer.pt_weight * pt_loss\n # New addition to enforce visual similarity\n delta_noise = self.embedding_real - self.embedding_fake\n delta_flat = tf.layers.Flatten()(delta_noise)\n loss_noise_gen = tf.reduce_mean(tf.norm(delta_flat, ord=2, axis=1, keepdims=False))\n self.loss_generator += 0.1 * loss_noise_gen\n\n with tf.name_scope(\"Encoder_G\"):\n if self.config.trainer.mse_mode == \"norm\":\n self.loss_enc_rec = tf.reduce_mean(\n self.mse_loss(\n self.image_gen_enc,\n self.image_input,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n self.loss_enc_f = tf.reduce_mean(\n self.mse_loss(\n self.decoded_enc_real,\n self.decoded_enc_fake,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n elif self.config.trainer.mse_mode == \"mse\":\n self.loss_enc_rec = tf.reduce_mean(\n self.mse_loss(\n self.image_gen_enc,\n self.image_input,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n )\n self.loss_enc_f = tf.reduce_mean(\n self.mse_loss(\n self.embedding_enc_real,\n self.embedding_enc_fake,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n )\n self.loss_encoder_g = (\n self.loss_enc_rec + self.config.trainer.encoder_f_factor * self.loss_enc_f\n )\n if self.config.trainer.enable_disc_xx:\n self.enc_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_real, labels=tf.zeros_like(self.im_logit_real)\n )\n self.enc_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_fake, labels=tf.ones_like(self.im_logit_fake)\n )\n self.enc_loss_xx = tf.reduce_mean(self.enc_xx_real + self.enc_xx_fake)\n self.loss_encoder_g += self.enc_loss_xx\n\n with tf.name_scope(\"Encoder_R\"):\n if self.config.trainer.mse_mode == \"norm\":\n self.loss_encoder_r = tf.reduce_mean(\n self.mse_loss(\n self.image_ege,\n self.image_encoded_r,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n\n elif self.config.trainer.mse_mode == \"mse\":\n self.loss_encoder_r = tf.reduce_mean(\n self.mse_loss(\n self.image_ege,\n self.image_encoded_r,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n )\n\n if self.config.trainer.enable_disc_zz:\n self.enc_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_real, labels=tf.zeros_like(self.z_logit_real)\n )\n self.enc_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_fake, labels=tf.ones_like(self.z_logit_fake)\n )\n self.enc_loss_zz = tf.reduce_mean(self.enc_zz_real + self.enc_zz_fake)\n self.loss_encoder_r += self.enc_loss_zz\n\n if self.config.trainer.enable_disc_xx:\n with tf.name_scope(\"Discriminator_XX\"):\n self.loss_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_real, labels=tf.ones_like(self.im_logit_real)\n )\n self.loss_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_fake, labels=tf.zeros_like(self.im_logit_fake)\n )\n self.dis_loss_xx = tf.reduce_mean(self.loss_xx_real + self.loss_xx_fake)\n if self.config.trainer.enable_disc_zz:\n with tf.name_scope(\"Discriminator_ZZ\"):\n self.loss_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_real, labels=tf.ones_like(self.z_logit_real)\n )\n self.loss_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_fake, labels=tf.zeros_like(self.z_logit_fake)\n )\n self.dis_loss_zz = tf.reduce_mean(self.loss_zz_real + self.loss_zz_fake)\n\n ############################################################################################\n # OPTIMIZERS\n ############################################################################################\n with tf.name_scope(\"Optimizers\"):\n self.generator_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_gen,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n self.encoder_g_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_enc,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n self.encoder_r_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_enc,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n self.discriminator_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_dis,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n # Collect all the variables\n all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # Generator Network Variables\n self.generator_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Generator_Model\")\n ]\n # Discriminator Network Variables\n self.discriminator_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Discriminator_Model\")\n ]\n # Discriminator Network Variables\n self.encoder_g_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Encoder_G_Model\")\n ]\n self.encoder_r_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Encoder_R_Model\")\n ]\n self.dxxvars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Discriminator_Model_XX\")\n ]\n self.dzzvars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Discriminator_Model_ZZ\")\n ]\n # Generator Network Operations\n self.gen_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Generator_Model\"\n )\n # Discriminator Network Operations\n self.disc_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Discriminator_Model\"\n )\n self.encg_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Encoder_G_Model\"\n )\n\n self.encr_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Encoder_R_Model\"\n )\n self.update_ops_dis_xx = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Discriminator_Model_XX\"\n )\n self.update_ops_dis_zz = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Discriminator_Model_ZZ\"\n )\n with tf.control_dependencies(self.gen_update_ops):\n self.gen_op = self.generator_optimizer.minimize(\n self.loss_generator,\n var_list=self.generator_vars,\n global_step=self.global_step_tensor,\n )\n with tf.control_dependencies(self.disc_update_ops):\n self.disc_op = self.discriminator_optimizer.minimize(\n self.loss_discriminator, var_list=self.discriminator_vars\n )\n with tf.control_dependencies(self.encg_update_ops):\n self.encg_op = self.encoder_g_optimizer.minimize(\n self.loss_encoder_g,\n var_list=self.encoder_g_vars,\n global_step=self.global_step_tensor,\n )\n with tf.control_dependencies(self.encr_update_ops):\n self.encr_op = self.encoder_r_optimizer.minimize(\n self.loss_encoder_r,\n var_list=self.encoder_r_vars,\n global_step=self.global_step_tensor,\n )\n if self.config.trainer.enable_disc_xx:\n with tf.control_dependencies(self.update_ops_dis_xx):\n self.disc_op_xx = self.discriminator_optimizer.minimize(\n self.dis_loss_xx, var_list=self.dxxvars\n )\n if self.config.trainer.enable_disc_zz:\n with tf.control_dependencies(self.update_ops_dis_zz):\n self.disc_op_zz = self.discriminator_optimizer.minimize(\n self.dis_loss_zz, var_list=self.dzzvars\n )\n # Exponential Moving Average for Estimation\n self.dis_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_dis = self.dis_ema.apply(self.discriminator_vars)\n\n self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)\n\n self.encg_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_encg = self.encg_ema.apply(self.encoder_g_vars)\n\n self.encr_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_encr = self.encr_ema.apply(self.encoder_r_vars)\n\n if self.config.trainer.enable_disc_xx:\n self.dis_xx_ema = tf.train.ExponentialMovingAverage(\n decay=self.config.trainer.ema_decay\n )\n maintain_averages_op_dis_xx = self.dis_xx_ema.apply(self.dxxvars)\n\n if self.config.trainer.enable_disc_zz:\n self.dis_zz_ema = tf.train.ExponentialMovingAverage(\n decay=self.config.trainer.ema_decay\n )\n maintain_averages_op_dis_zz = self.dis_zz_ema.apply(self.dzzvars)\n\n with tf.control_dependencies([self.disc_op]):\n self.train_dis_op = tf.group(maintain_averages_op_dis)\n\n with tf.control_dependencies([self.gen_op]):\n self.train_gen_op = tf.group(maintain_averages_op_gen)\n\n with tf.control_dependencies([self.encg_op]):\n self.train_enc_g_op = tf.group(maintain_averages_op_encg)\n\n with tf.control_dependencies([self.encr_op]):\n self.train_enc_r_op = tf.group(maintain_averages_op_encr)\n\n if self.config.trainer.enable_disc_xx:\n with tf.control_dependencies([self.disc_op_xx]):\n self.train_dis_op_xx = tf.group(maintain_averages_op_dis_xx)\n\n if self.config.trainer.enable_disc_zz:\n with tf.control_dependencies([self.disc_op_zz]):\n self.train_dis_op_zz = tf.group(maintain_averages_op_dis_zz)\n\n ############################################################################################\n # TESTING\n ############################################################################################\n self.logger.info(\"Building Testing Graph...\")\n with tf.variable_scope(\"SENCEBGAN\"):\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_q_ema, self.decoded_q_ema = self.discriminator(\n self.image_input,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_ema = self.generator(\n self.embedding_q_ema, getter=get_getter(self.gen_ema)\n )\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_rec_ema, self.decoded_rec_ema = self.discriminator(\n self.image_gen_ema,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n # Second Training Part\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded_ema = self.encoder_g(\n self.image_input, getter=get_getter(self.encg_ema)\n )\n\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc_ema = self.generator(\n self.image_encoded_ema, getter=get_getter(self.gen_ema)\n )\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_enc_fake_ema, self.decoded_enc_fake_ema = self.discriminator(\n self.image_gen_enc_ema,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.embedding_enc_real_ema, self.decoded_enc_real_ema = self.discriminator(\n self.image_input,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n if self.config.trainer.enable_disc_xx:\n with tf.variable_scope(\"Discriminator_Model_XX\"):\n self.im_logit_real_ema, self.im_f_real_ema = self.discriminator_xx(\n self.image_input,\n self.image_input,\n getter=get_getter(self.dis_xx_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.im_logit_fake_ema, self.im_f_fake_ema = self.discriminator_xx(\n self.image_input,\n self.image_gen_enc_ema,\n getter=get_getter(self.dis_xx_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n # Third training part\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded_r_ema = self.encoder_g(self.image_input)\n\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc_r_ema = self.generator(self.image_encoded_r_ema)\n\n with tf.variable_scope(\"Encoder_R_Model\"):\n self.image_ege_ema = self.encoder_r(self.image_gen_enc_r_ema)\n\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_encr_fake_ema, self.decoded_encr_fake_ema = self.discriminator(\n self.image_gen_enc_r_ema,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.embedding_encr_real_ema, self.decoded_encr_real_ema = self.discriminator(\n self.image_input,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n\n if self.config.trainer.enable_disc_zz:\n with tf.variable_scope(\"Discriminator_Model_ZZ\"):\n self.z_logit_real_ema, self.z_f_real_ema = self.discriminator_zz(\n self.image_encoded_r_ema,\n self.image_encoded_r_ema,\n getter=get_getter(self.dis_zz_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.z_logit_fake_ema, self.z_f_fake_ema = self.discriminator_zz(\n self.image_encoded_r_ema,\n self.image_ege_ema,\n getter=get_getter(self.dis_zz_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n\n with tf.name_scope(\"Testing\"):\n with tf.name_scope(\"Image_Based\"):\n delta = self.image_input - self.image_gen_enc_ema\n self.rec_residual = -delta\n delta_flat = tf.layers.Flatten()(delta)\n img_score_l1 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"img_loss__1\"\n )\n self.img_score_l1 = tf.squeeze(img_score_l1)\n\n delta = self.decoded_enc_fake_ema - self.decoded_enc_real_ema\n delta_flat = tf.layers.Flatten()(delta)\n img_score_l2 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"img_loss__2\"\n )\n self.img_score_l2 = tf.squeeze(img_score_l2)\n \n with tf.name_scope(\"Noise_Based\"):\n\n delta = self.image_encoded_r_ema - self.image_ege_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_1 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"final_score_1\"\n )\n self.final_score_1 = tf.squeeze(final_score_1)\n self.score_comb_im = (\n 1 * self.img_score_l1\n + self.feature_match1 * self.final_score_1\n )\n delta = self.image_encoded_r_ema - self.embedding_enc_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_2 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"final_score_2\"\n )\n self.final_score_2 = tf.squeeze(final_score_2)\n\n delta = self.embedding_encr_real_ema - self.embedding_encr_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_3 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"final_score_3\"\n )\n self.final_score_3 = tf.squeeze(final_score_3)\n\n # Combo 1\n self.score_comb_z = (\n (1 - self.feature_match2) * self.final_score_2\n + self.feature_match2 * self.final_score_3\n )\n\n # Combo 2\n\n\n if self.config.trainer.enable_disc_xx:\n\n delta = self.im_f_real_ema - self.im_f_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_4 = tf.norm(\n delta_flat, ord=1, axis=1, keepdims=False, name=\"final_score_4\"\n )\n self.final_score_4 = tf.squeeze(final_score_4)\n\n delta = self.z_f_real_ema - self.z_f_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_6 = tf.norm(\n delta_flat, ord=1, axis=1, keepdims=False, name=\"final_score_6\"\n )\n self.final_score_6 = tf.squeeze(final_score_6)\n\n ############################################################################################\n # TENSORBOARD\n ############################################################################################\n if self.config.log.enable_summary:\n with tf.name_scope(\"train_summary\"):\n with tf.name_scope(\"dis_summary\"):\n tf.summary.scalar(\"loss_disc\", self.loss_discriminator, [\"dis\"])\n tf.summary.scalar(\"loss_disc_real\", self.disc_loss_real, [\"dis\"])\n tf.summary.scalar(\"loss_disc_fake\", self.disc_loss_fake, [\"dis\"])\n if self.config.trainer.enable_disc_xx:\n tf.summary.scalar(\"loss_dis_xx\", self.dis_loss_xx, [\"enc_g\"])\n if self.config.trainer.enable_disc_zz:\n tf.summary.scalar(\"loss_dis_zz\", self.dis_loss_zz, [\"enc_r\"])\n with tf.name_scope(\"gen_summary\"):\n tf.summary.scalar(\"loss_generator\", self.loss_generator, [\"gen\"])\n with tf.name_scope(\"enc_summary\"):\n tf.summary.scalar(\"loss_encoder_g\", self.loss_encoder_g, [\"enc_g\"])\n tf.summary.scalar(\"loss_encoder_r\", self.loss_encoder_r, [\"enc_r\"])\n with tf.name_scope(\"img_summary\"):\n tf.summary.image(\"input_image\", self.image_input, 1, [\"img_1\"])\n tf.summary.image(\"reconstructed\", self.image_gen, 1, [\"img_1\"])\n # From discriminator in part 1\n tf.summary.image(\"decoded_real\", self.decoded_real, 1, [\"img_1\"])\n tf.summary.image(\"decoded_fake\", self.decoded_fake, 1, [\"img_1\"])\n # Second Stage of Training\n tf.summary.image(\"input_enc\", self.image_input, 1, [\"img_2\"])\n tf.summary.image(\"reconstructed\", self.image_gen_enc, 1, [\"img_2\"])\n # From discriminator in part 2\n tf.summary.image(\"decoded_enc_real\", self.decoded_enc_real, 1, [\"img_2\"])\n tf.summary.image(\"decoded_enc_fake\", self.decoded_enc_fake, 1, [\"img_2\"])\n # Testing\n tf.summary.image(\"input_image\", self.image_input, 1, [\"test\"])\n tf.summary.image(\"reconstructed\", self.image_gen_enc_r_ema, 1, [\"test\"])\n tf.summary.image(\"residual\", self.rec_residual, 1, [\"test\"])\n\n self.sum_op_dis = tf.summary.merge_all(\"dis\")\n self.sum_op_gen = tf.summary.merge_all(\"gen\")\n self.sum_op_enc_g = tf.summary.merge_all(\"enc_g\")\n self.sum_op_enc_r = tf.summary.merge_all(\"enc_r\")\n self.sum_op_im_1 = tf.summary.merge_all(\"img_1\")\n self.sum_op_im_2 = tf.summary.merge_all(\"img_2\")\n self.sum_op_im_test = tf.summary.merge_all(\"test\")\n self.sum_op = tf.summary.merge([self.sum_op_dis, self.sum_op_gen])\n\n ###############################################################################################\n # MODULES\n ###############################################################################################\n def generator(self, noise_input, getter=None):\n with tf.variable_scope(\"Generator\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Dense(\n units=2 * 2 * 256, kernel_initializer=self.init_kernel, name=\"fc\"\n )(noise_input)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n x_g = tf.reshape(x_g, [-1, 2, 2, 256])\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=128,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=64,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=32,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n net_name = \"Layer_5\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=1,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.tanh(x_g, name=\"tanh\")\n return x_g\n\n def discriminator(self, image_input, getter=None, do_spectral_norm=False):\n layers = sn if do_spectral_norm else tf.layers\n with tf.variable_scope(\"Discriminator\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"Encoder\"):\n x_e = tf.reshape(\n image_input,\n [-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],\n )\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_e = layers.conv2d(\n x_e,\n filters=32,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n # 14 x 14 x 64\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_e = layers.conv2d(\n x_e,\n filters=64,\n kernel_size=5,\n padding=\"same\",\n strides=2,\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n # 7 x 7 x 128\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_e = layers.conv2d(\n x_e,\n filters=128,\n kernel_size=5,\n padding=\"same\",\n strides=2,\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n # 4 x 4 x 256\n x_e = tf.layers.Flatten()(x_e)\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_e = layers.dense(\n x_e,\n units=self.config.trainer.noise_dim,\n kernel_initializer=self.init_kernel,\n name=\"fc\",\n )\n\n embedding = x_e\n with tf.variable_scope(\"Decoder\"):\n net = tf.reshape(embedding, [-1, 1, 1, self.config.trainer.noise_dim])\n net_name = \"layer_1\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=256,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv1\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv1/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv1/relu\")\n\n net_name = \"layer_2\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=128,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv2\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv2/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv2/relu\")\n\n net_name = \"layer_3\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=64,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv3\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv3/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv3/relu\")\n net_name = \"layer_4\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=32,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv4\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv4/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv4/relu\")\n net_name = \"layer_5\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=1,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv5\",\n )(net)\n decoded = tf.nn.tanh(net, name=\"tconv5/tanh\")\n return embedding, decoded\n\n def encoder_g(self, image_input, getter=None):\n with tf.variable_scope(\"Encoder_G\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n x_e = tf.reshape(\n image_input,\n [-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],\n )\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=64,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_g,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=128,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_g,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=256,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_g,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n x_e = tf.layers.Flatten()(x_e)\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Dense(\n units=self.config.trainer.noise_dim,\n kernel_initializer=self.init_kernel,\n name=\"fc\",\n )(x_e)\n return x_e\n\n def encoder_r(self, image_input, getter=None):\n with tf.variable_scope(\"Encoder_R\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n x_e = tf.reshape(\n image_input,\n [-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],\n )\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=64,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_r,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=128,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_r,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=256,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_r,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n x_e = tf.layers.Flatten()(x_e)\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Dense(\n units=self.config.trainer.noise_dim,\n kernel_initializer=self.init_kernel,\n name=\"fc\",\n )(x_e)\n return x_e\n\n # Regularizer discriminator for the Generator Encoder\n def discriminator_xx(self, img_tensor, recreated_img, getter=None, do_spectral_norm=False):\n \"\"\" Discriminator architecture in tensorflow\n\n Discriminates between (x, x) and (x, rec_x)\n Args:\n img_tensor:\n recreated_img:\n getter: for exponential moving average during inference\n reuse: sharing variables or not\n do_spectral_norm:\n \"\"\"\n layers = sn if do_spectral_norm else tf.layers\n with tf.variable_scope(\"Discriminator_xx\", reuse=tf.AUTO_REUSE, custom_getter=getter):\n net = tf.concat([img_tensor, recreated_img], axis=1)\n net_name = \"layer_1\"\n with tf.variable_scope(net_name):\n net = layers.conv2d(\n net,\n filters=64,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv1\",\n )\n net = tf.nn.leaky_relu(\n features=net, alpha=self.config.trainer.leakyReLU_alpha, name=\"conv2/leaky_relu\"\n )\n net = tf.layers.dropout(\n net,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_g,\n name=\"dropout\",\n )\n with tf.variable_scope(net_name, reuse=True):\n weights = tf.get_variable(\"conv1/kernel\")\n\n net_name = \"layer_2\"\n with tf.variable_scope(net_name):\n net = layers.conv2d(\n net,\n filters=128,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2\",\n )\n net = tf.nn.leaky_relu(\n features=net, alpha=self.config.trainer.leakyReLU_alpha, name=\"conv2/leaky_relu\"\n )\n net = tf.layers.dropout(\n net,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_g,\n name=\"dropout\",\n )\n net = tf.layers.Flatten()(net)\n\n intermediate_layer = net\n\n net_name = \"layer_3\"\n with tf.variable_scope(net_name):\n net = tf.layers.dense(net, units=1, kernel_initializer=self.init_kernel, name=\"fc\")\n logits = tf.squeeze(net)\n\n return logits, intermediate_layer\n\n # Regularizer discriminator for the Reconstruction Encoder\n\n def discriminator_zz(self, noise_tensor, recreated_noise, getter=None, do_spectral_norm=False):\n \"\"\" Discriminator architecture in tensorflow\n\n Discriminates between (z, z) and (z, rec_z)\n Args:\n noise_tensor:\n recreated_noise:\n getter: for exponential moving average during inference\n reuse: sharing variables or not\n do_spectral_norm:\n \"\"\"\n layers = sn if do_spectral_norm else tf.layers\n\n with tf.variable_scope(\"Discriminator_zz\", reuse=tf.AUTO_REUSE, custom_getter=getter):\n y = tf.concat([noise_tensor, recreated_noise], axis=-1)\n\n net_name = \"y_layer_1\"\n with tf.variable_scope(net_name):\n y = layers.dense(y, units=64, kernel_initializer=self.init_kernel, name=\"fc\")\n y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)\n y = tf.layers.dropout(\n y,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_r,\n name=\"dropout\",\n )\n\n net_name = \"y_layer_2\"\n with tf.variable_scope(net_name):\n y = layers.dense(y, units=32, kernel_initializer=self.init_kernel, name=\"fc\")\n y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)\n y = tf.layers.dropout(\n y,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_r,\n name=\"dropout\",\n )\n\n intermediate_layer = y\n\n net_name = \"y_layer_3\"\n with tf.variable_scope(net_name):\n y = layers.dense(y, units=1, kernel_initializer=self.init_kernel, name=\"fc\")\n logits = tf.squeeze(y)\n\n return logits, intermediate_layer\n\n ###############################################################################################\n # CUSTOM LOSSES\n ###############################################################################################\n def mse_loss(self, pred, data, mode=\"norm\", order=2):\n if mode == \"norm\":\n delta = pred - data\n delta = tf.layers.Flatten()(delta)\n loss_val = tf.norm(delta, ord=order, axis=1, keepdims=False)\n elif mode == \"mse\":\n loss_val = tf.reduce_mean(tf.squared_difference(pred, data))\n return loss_val\n\n def pullaway_loss(self, embeddings):\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n normalized_embeddings = embeddings / norm\n similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)\n batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)\n pt_loss = (tf.reduce_sum(similarity) - batch_size) / (batch_size * (batch_size - 1))\n return pt_loss\n\n def init_saver(self):\n self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.nn.tanh",
"tensorflow.reshape",
"tensorflow.summary.image",
"tensorflow.variable_scope",
"tensorflow.layers.Conv2D",
"tensorflow.matmul",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.concat",
"tensorflow.summary.merge",
"tensorflow.math.maximum",
"tensorflow.reduce_sum",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.random_normal_initializer",
"tensorflow.layers.Dense",
"tensorflow.norm",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.get_collection",
"tensorflow.tanh",
"tensorflow.layers.batch_normalization",
"tensorflow.zeros_like",
"tensorflow.nn.leaky_relu",
"tensorflow.train.Saver",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.group",
"tensorflow.layers.dense",
"tensorflow.layers.dropout",
"tensorflow.control_dependencies",
"tensorflow.placeholder",
"tensorflow.layers.Conv2DTranspose",
"tensorflow.summary.merge_all",
"tensorflow.squared_difference",
"tensorflow.train.AdamOptimizer",
"tensorflow.layers.Flatten",
"tensorflow.reduce_mean",
"tensorflow.square",
"tensorflow.nn.relu",
"tensorflow.get_variable"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.