repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
AlexChrisF/udacity | [
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953",
"b7f85a74058fc63ccb7601c418450ab934ef5953"
] | [
"tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py",
"tensorflow/python/platform/flags.py",
"tensorflow/python/kernel_tests/softplus_op_test.py",
"tensorflow/contrib/keras/python/keras/applications/vgg19.py",
"tensorflow/compiler/tests/xla_test.py",
"tensorflow/python/kernel_tests/variable_ops_test.py",
"tensorflow/contrib/image/python/ops/single_image_random_dot_stereograms.py",
"tensorflow/contrib/distributions/python/kernel_tests/special_math_test.py",
"tensorflow/python/client/session.py",
"tensorflow/python/ops/data_flow_grad.py",
"tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py",
"tensorflow/python/training/adagrad_test.py",
"tensorflow/contrib/learn/python/learn/dataframe/transforms/csv_parser.py",
"tensorflow/python/kernel_tests/betainc_op_test.py",
"tensorflow/tools/docs/build_docs_test.py",
"tensorflow/contrib/session_bundle/example/export_half_plus_two.py",
"tensorflow/tensorboard/backend/event_processing/event_multiplexer.py",
"tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py",
"tensorflow/tensorboard/backend/http_util.py",
"tensorflow/examples/learn/text_classification_character_rnn.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.contrib.layers.sparse_feature_cross.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.layers.python.ops import sparse_feature_cross_op\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import test\n\n\nclass SparseCrossOpTest(test.TestCase):\n\n def test_simple(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1'],\n ['batch2-FC1-F1', 'batch2-FC1-F2']]),\n self._sparse_tensor([['batch1-FC2-F1'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']])\n ])\n expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [\n 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',\n 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_dense(self):\n \"\"\"Tests only dense inputs.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],\n ['batch2-FC1-F1', 'batch2-FC1-F2']],\n dtypes.string),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'\n ], [\n 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',\n 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_integer_mixed_string_sparse(self):\n \"\"\"Tests mixed type.\"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([[11], [333, 55555]]),\n self._sparse_tensor([['batch1-FC2-F1'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']])\n ])\n expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [\n '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',\n '55555_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_integer_mixed_string_dense(self):\n \"\"\"Tests mixed dense inputs.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor([[\n '11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',\n '333_X_batch1-FC2-F2'\n ], [\n '55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',\n '999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_sparse_cross_dense(self):\n \"\"\"Tests sparse and dense inputs.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1'],\n ['batch2-FC1-F1', 'batch2-FC1-F2']]),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor(\n [['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [\n 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',\n 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_integer_sparse_input(self):\n \"\"\"Tests mixed type sparse and dense inputs.\"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([[11], [333, 5555]]),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor(\n [['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [\n '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',\n '5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_permutation_3x3x3(self):\n \"\"\"Tests 3x3x3 permutation.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor(\n [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),\n self._sparse_tensor(\n [['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),\n self._sparse_tensor(\n [['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',\n 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',\n 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',\n 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',\n 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',\n 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',\n 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',\n 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',\n 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_permutation_3x1x2(self):\n \"\"\"Tests 3x1x2 permutation.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor(\n [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_large_batch(self):\n \"\"\"Tests with large batch size to force multithreding.\n \"\"\"\n batch_size = 5000\n col1 = []\n col2 = []\n col3 = []\n for b in range(batch_size):\n col1.append(\n ['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])\n col2.append(['batch%d-FC2-F1' % b])\n col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])\n\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor(col1), self._sparse_tensor(col2),\n self._sparse_tensor(col3)\n ])\n\n col_out = []\n for b in range(batch_size):\n col_out.append([\n 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),\n 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),\n 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),\n 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),\n 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),\n 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)\n ])\n\n expected_out = self._sparse_tensor(col_out)\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_one_column_empty(self):\n \"\"\"Tests when one column is empty.\n\n The crossed tensor should be empty.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),\n self._sparse_tensor([], 1),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])\n ])\n with self.test_session() as sess:\n self._assert_sparse_tensor_empty(sess.run(op))\n\n def test_some_columns_empty(self):\n \"\"\"Tests when more than one columns are empty.\n\n Cross for the corresponding batch should be empty.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),\n self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'\n ]], 2)\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_all_columns_empty(self):\n \"\"\"Tests when all columns are empty.\n\n The crossed tensor should be empty.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([]), self._sparse_tensor([]),\n self._sparse_tensor([])\n ])\n with self.test_session() as sess:\n self._assert_sparse_tensor_empty(sess.run(op))\n\n def test_hashed_output_zero_bucket(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[3735511728867393167]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_hashed_output_zero_bucket_v2(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True,\n hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[1971693436396284976]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n # TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.\n def test_hashed_output(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True,\n num_buckets=100)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[74]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_hashed_output_v2(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True,\n num_buckets=100,\n hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[83]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_hashed_output_v1_has_collision(self):\n \"\"\"Tests the old version of the fingerprint concatenation has collisions.\n \"\"\"\n # The last 10 bits of 359 and 1024+359 are identical.\n # As a result, all the crosses collide.\n t1 = constant_op.constant([[359], [359 + 1024]])\n t2 = constant_op.constant([list(range(10)), list(range(10))])\n cross = sparse_feature_cross_op.sparse_feature_cross(\n [t2, t1], hashed_output=True, num_buckets=1024)\n cross_dense = sparse_ops.sparse_tensor_to_dense(cross)\n with session.Session():\n values = cross_dense.eval()\n self.assertTrue(numpy.equal(values[0], values[1]).all())\n\n def test_hashed_output_v2_has_no_collision(self):\n \"\"\"Tests the new version of the fingerprint concatenation has no collisions.\n \"\"\"\n # Although the last 10 bits of 359 and 1024+359 are identical.\n # As a result, all the crosses shouldn't collide.\n t1 = constant_op.constant([[359], [359 + 1024]])\n t2 = constant_op.constant([list(range(10)), list(range(10))])\n cross = sparse_feature_cross_op.sparse_feature_cross(\n [t2, t1],\n hashed_output=True,\n num_buckets=1024,\n hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)\n cross_dense = sparse_ops.sparse_tensor_to_dense(cross)\n with session.Session():\n values = cross_dense.eval()\n self.assertTrue(numpy.not_equal(values[0], values[1]).all())\n\n def test_hashed_3x1x2(self):\n \"\"\"Tests 3x1x2 permutation with hashed output.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor(\n [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])\n ],\n hashed_output=True,\n num_buckets=1000)\n with self.test_session() as sess:\n out = sess.run(op)\n self.assertEqual(6, len(out.values))\n self.assertAllEqual([[0, i] for i in range(6)], out.indices)\n self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))\n all_values_are_different = len(out.values) == len(set(out.values))\n self.assertTrue(all_values_are_different)\n\n def _assert_sparse_tensor_empty(self, sp):\n self.assertEquals(0, sp.indices.size)\n self.assertEquals(0, sp.values.size)\n # TODO(zakaria): check if we can ignore the first dim of the shape.\n self.assertEquals(0, sp.dense_shape[1])\n\n def _assert_sparse_tensor_equals(self, sp1, sp2):\n self.assertAllEqual(sp1.indices.eval(), sp2.indices)\n self.assertAllEqual(sp1.values.eval(), sp2.values)\n self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)\n\n def _sparse_tensor(self, data, batch_size=-1):\n \"\"\"Generates a SparseTensor.\n\n Args:\n data: Should be a list of list of strings or int64. Each item of the outer\n list represents a batch. Each item of the batch is a feature of a\n specific feature column.\n batch_size: optional batch size, especially for cases when data has no\n entry for some batches.\n\n Returns:\n A SparseTensor.\n \"\"\"\n indices = []\n values = []\n max_col_count = 0\n for batch, batch_ix in zip(data, range(len(data))):\n for column, column_ix in zip(batch, range(len(batch))):\n indices.append([batch_ix, column_ix])\n values.append(column)\n max_col_count = max(max_col_count, column_ix + 1)\n shape = [batch_size if batch_size != -1 else len(data), max_col_count]\n value_type = (dtypes.string if not values or isinstance(values[0], str) else\n dtypes.int64)\n return sparse_tensor.SparseTensor(\n constant_op.constant(indices, dtypes.int64, [len(indices), 2]),\n constant_op.constant(values, value_type, [len(indices)]),\n constant_op.constant(shape, dtypes.int64))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Implementation of the flags interface.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse as _argparse\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_global_parser = _argparse.ArgumentParser()\n\n\n# pylint: disable=invalid-name\n\n\nclass _FlagValues(object):\n \"\"\"Global container and accessor for flags and their values.\"\"\"\n\n def __init__(self):\n self.__dict__['__flags'] = {}\n self.__dict__['__parsed'] = False\n\n def _parse_flags(self, args=None):\n result, unparsed = _global_parser.parse_known_args(args=args)\n for flag_name, val in vars(result).items():\n self.__dict__['__flags'][flag_name] = val\n self.__dict__['__parsed'] = True\n return unparsed\n\n def __getattr__(self, name):\n \"\"\"Retrieves the 'value' attribute of the flag --name.\"\"\"\n if not self.__dict__['__parsed']:\n self._parse_flags()\n if name not in self.__dict__['__flags']:\n raise AttributeError(name)\n return self.__dict__['__flags'][name]\n\n def __setattr__(self, name, value):\n \"\"\"Sets the 'value' attribute of the flag --name.\"\"\"\n if not self.__dict__['__parsed']:\n self._parse_flags()\n self.__dict__['__flags'][name] = value\n\n\ndef _define_helper(flag_name, default_value, docstring, flagtype):\n \"\"\"Registers 'flag_name' with 'default_value' and 'docstring'.\"\"\"\n _global_parser.add_argument('--' + flag_name,\n default=default_value,\n help=docstring,\n type=flagtype)\n\n\n# Provides the global object that can be used to access flags.\nFLAGS = _FlagValues()\n\n\ndef DEFINE_string(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'string'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as a string.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n _define_helper(flag_name, default_value, docstring, str)\n\n\ndef DEFINE_integer(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'int'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as an int.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n _define_helper(flag_name, default_value, docstring, int)\n\n\ndef DEFINE_boolean(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'boolean'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as a boolean.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n # Register a custom function for 'bool' so --flag=True works.\n def str2bool(v):\n return v.lower() in ('true', 't', '1')\n _global_parser.add_argument('--' + flag_name,\n nargs='?',\n const=True,\n help=docstring,\n default=default_value,\n type=str2bool)\n\n # Add negated version, stay consistent with argparse with regard to\n # dashes in flag names.\n _global_parser.add_argument('--no' + flag_name,\n action='store_false',\n dest=flag_name.replace('-', '_'))\n\n\n# The internal google library defines the following alias, so we match\n# the API for consistency.\nDEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name\n\n\ndef DEFINE_float(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'float'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as a float.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n _define_helper(flag_name, default_value, docstring, float)\n\n_allowed_symbols = [\n # We rely on gflags documentation.\n 'DEFINE_bool',\n 'DEFINE_boolean',\n 'DEFINE_float',\n 'DEFINE_integer',\n 'DEFINE_string',\n 'FLAGS',\n]\nremove_undocumented(__name__, _allowed_symbols)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Softplus and SoftplusGrad.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import nn_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\nclass SoftplusTest(test.TestCase):\n\n def _npSoftplus(self, np_features):\n np_features = np.asarray(np_features)\n zero = np.asarray(0).astype(np_features.dtype)\n return np.logaddexp(zero, np_features)\n\n def _testSoftplus(self, np_features, use_gpu=False):\n np_softplus = self._npSoftplus(np_features)\n with self.test_session(use_gpu=use_gpu):\n softplus = nn_ops.softplus(np_features)\n tf_softplus = softplus.eval()\n self.assertAllCloseAccordingToType(np_softplus, tf_softplus)\n self.assertTrue(np.all(tf_softplus > 0))\n self.assertShapeEqual(np_softplus, softplus)\n\n def testNumbers(self):\n for t in [np.float16, np.float32, np.float64]:\n self._testSoftplus(\n np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),\n use_gpu=False)\n self._testSoftplus(\n np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),\n use_gpu=True)\n log_eps = np.log(np.finfo(t).eps)\n one = t(1)\n ten = t(10)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=False)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten - log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=True)\n\n def testGradient(self):\n with self.test_session():\n x = constant_op.constant(\n [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],\n shape=[2, 5],\n name=\"x\")\n y = nn_ops.softplus(x, name=\"softplus\")\n x_init = np.asarray(\n [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],\n dtype=np.float32,\n order=\"F\")\n err = gradient_checker.compute_gradient_error(\n x, [2, 5], y, [2, 5], x_init_value=x_init)\n print(\"softplus (float) gradient err = \", err)\n self.assertLess(err, 1e-4)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n\"\"\"VGG19 model for Keras.\n\n# Reference\n\n- [Very Deep Convolutional Networks for Large-Scale Image\nRecognition](https://arxiv.org/abs/1409.1556)\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\n\nfrom tensorflow.contrib.keras.python.keras import backend as K\nfrom tensorflow.contrib.keras.python.keras.applications.imagenet_utils import _obtain_input_shape\nfrom tensorflow.contrib.keras.python.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import\nfrom tensorflow.contrib.keras.python.keras.applications.imagenet_utils import preprocess_input # pylint: disable=unused-import\nfrom tensorflow.contrib.keras.python.keras.engine.topology import get_source_inputs\nfrom tensorflow.contrib.keras.python.keras.layers import Conv2D\nfrom tensorflow.contrib.keras.python.keras.layers import Dense\nfrom tensorflow.contrib.keras.python.keras.layers import Flatten\nfrom tensorflow.contrib.keras.python.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.contrib.keras.python.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.contrib.keras.python.keras.layers import Input\nfrom tensorflow.contrib.keras.python.keras.layers import MaxPooling2D\nfrom tensorflow.contrib.keras.python.keras.models import Model\nfrom tensorflow.contrib.keras.python.keras.utils import layer_utils\nfrom tensorflow.contrib.keras.python.keras.utils.data_utils import get_file\n\n\nWEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5'\nWEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n\ndef VGG19(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates the VGG19 architecture.\n\n Optionally loads weights pre-trained\n on ImageNet. Note that when using TensorFlow,\n for best performance you should set\n `image_data_format=\"channels_last\"` in your Keras config\n at ~/.keras/keras.json.\n\n The model and the weights are compatible with both\n TensorFlow and Theano. The data format\n convention used by the model is the one\n specified in your Keras config file.\n\n Arguments:\n include_top: whether to include the 3 fully-connected\n layers at the top of the network.\n weights: one of `None` (random initialization)\n or \"imagenet\" (pre-training on ImageNet).\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or `(3, 224, 244)` (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 48.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n \"\"\"\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n # Determine proper input shape\n input_shape = _obtain_input_shape(\n input_shape,\n default_size=224,\n min_size=48,\n data_format=K.image_data_format(),\n include_top=include_top)\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n img_input = Input(tensor=input_tensor, shape=input_shape)\n\n # Block 1\n x = Conv2D(\n 64, (3, 3), activation='relu', padding='same',\n name='block1_conv1')(img_input)\n x = Conv2D(\n 64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(\n 128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = Conv2D(\n 128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(\n 256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = Conv2D(\n 256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n x = Conv2D(\n 256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n x = Conv2D(\n 256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\n x = Conv2D(\n 512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n if include_top:\n # Classification block\n x = Flatten(name='flatten')(x)\n x = Dense(4096, activation='relu', name='fc1')(x)\n x = Dense(4096, activation='relu', name='fc2')(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='vgg19')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file(\n 'vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models')\n else:\n weights_path = get_file(\n 'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='block5_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1')\n layer_utils.convert_dense_weights_data_format(dense, shape,\n 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Definition of XLA test case.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport random\nimport re\n\nimport numpy as np\n\nfrom tensorflow.contrib.compiler import jit\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging as logging\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('test_device', None,\n 'Tensorflow device on which to place operators under test')\nflags.DEFINE_string('types', None, 'Types to test. Comma-separated list.')\nflags.DEFINE_string('disabled_manifest', None,\n 'Path to a file with a list of tests that should not run.')\n\n\nclass XLATestCase(test.TestCase):\n \"\"\"XLA test cases are parameterized test cases.\"\"\"\n\n def __init__(self, method_name='runTest'):\n super(XLATestCase, self).__init__(method_name)\n self.device = FLAGS.test_device\n self.has_custom_call = (self.device == 'XLA_CPU')\n self.all_tf_types = [\n dtypes.DType(types_pb2.DataType.Value(name))\n for name in FLAGS.types.split(',')\n ]\n self.all_types = [dtype.as_numpy_dtype for dtype in self.all_tf_types]\n self.int_types = [\n dtype.as_numpy_dtype for dtype in self.all_tf_types if dtype.is_integer\n ]\n self.float_types = [\n dtype.as_numpy_dtype for dtype in self.all_tf_types if dtype.is_floating\n ]\n self.numeric_types = self.int_types + self.float_types\n\n # Parse the manifest file, if any, into a regex identifying tests to\n # disable\n self.disabled_regex = None\n if FLAGS.disabled_manifest is not None:\n comments_re = re.compile('#.*$')\n manifest_file = open(FLAGS.disabled_manifest, 'r')\n lines = manifest_file.read().splitlines()\n lines = [comments_re.sub('', l).strip() for l in lines]\n self.disabled_regex = re.compile('|'.join(lines))\n manifest_file.close()\n\n def setUp(self):\n name = '{}.{}'.format(type(self).__name__, self._testMethodName)\n if self.disabled_regex is not None and self.disabled_regex.match(name):\n logging.info('Disabled test case: %s', name)\n self.skipTest('{} is disabled by manifest.'.format(name))\n return\n logging.info('Start test case: %s', name)\n\n random.seed(random_seed.DEFAULT_GRAPH_SEED)\n np.random.seed(random_seed.DEFAULT_GRAPH_SEED)\n\n def tearDown(self):\n logging.info('End test case: %s', self._testMethodName)\n\n @contextlib.contextmanager\n def test_session(self):\n \"\"\"Custom implementation of test_session() for XLA tests.\n\n We override the standard Tensorflow test_session() since it is too\n specific to CPU and GPU tests. In particular, we want to disable soft\n placement and explicitly assign ops to devices under test.\n\n Yields:\n A session to use when running a test case.\n \"\"\"\n graph = ops.Graph()\n with session.Session(graph=graph) as sess, graph.as_default():\n yield sess\n\n @contextlib.contextmanager\n def test_scope(self):\n \"\"\"Test scope that runs tests on a Tensorflow/XLA device.\n\n Uses a compilation_scope() to mark operators to compile.\n\n Yields:\n A scope to apply to the operators under test.\n \"\"\"\n with ops.device('device:{}:0'.format(self.device)):\n yield\n\n\ndef Benchmark(tf_bench,\n builder_fn,\n use_xla_jit,\n device,\n separate_compiled_gradients=False):\n \"\"\"Build a graph and run benchmarks against it, with or without XLA.\n\n Args:\n tf_bench: An instance of tf.test.Benchmark, used to run the benchmark.\n builder_fn: A function that builds a graph when invoked, and returns\n (name, fetches), where name is the name of the test, and fetches\n is a list of tensors to fetch as output.\n use_xla_jit: If true compile with the XLA JIT, otherwise use regular TF.\n device: The tensorflow device to run on, e.g. \"cpu\", \"gpu\".\n separate_compiled_gradients: If true put each gradient subgraph into a\n separate compilation scope. This gives fine-grained control over which\n portions of the graph will be compiled as a single unit. Compiling\n gradients separately may yield better performance for some graphs.\n The scope is named based on the scope of the forward computation as well\n as the name of the gradients. As a result, the gradients will be compiled\n in a scope that is separate from both the forward computation, and from\n other gradients.\n \"\"\"\n\n with ops.Graph().as_default():\n name = None\n targets = []\n with ops.device(device):\n fetches = []\n jit_scope = jit.experimental_jit_scope\n with jit_scope(\n compile_ops=use_xla_jit,\n separate_compiled_gradients=separate_compiled_gradients):\n name, fetches = builder_fn()\n\n # We only want to benchmark the operations themselves, and not the data\n # transfer of the result(s). Non-compiled identity ops ensure XLA\n # doesn't know we're dropping the results, otherwise it might compile\n # away the entire computation.\n for fetch in fetches:\n targets.append(array_ops.identity(fetch).op)\n\n config = config_pb2.ConfigProto(allow_soft_placement=True)\n with session.Session(config=config) as sess:\n sess.run(variables.global_variables_initializer())\n xla = 'xla_' if use_xla_jit else ''\n tf_bench.run_op_benchmark(\n sess, targets, name='%s_%s%s' % (name, xla, device))\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.tf.variable_op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n_NP_TO_TF = {\n np.float32: dtypes.float32,\n np.float64: dtypes.float64,\n np.int32: dtypes.int32,\n np.int64: dtypes.int64,\n}\n\n\nclass VariableOpTest(test.TestCase):\n\n def _initFetch(self, x, tftype, use_gpu=None):\n with self.test_session(use_gpu=use_gpu):\n p = state_ops.variable_op(x.shape, tftype)\n op = state_ops.assign(p, x)\n op.op.run()\n return p.eval()\n\n def _testTypes(self, vals):\n for dtype in [np.float32, np.float64, np.int32, np.int64]:\n self.setUp()\n x = vals.astype(dtype)\n tftype = _NP_TO_TF[dtype]\n self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))\n # NOTE(touts): the GPU test should pass for all types, whether the\n # Variable op has an implementation for that type on GPU as we expect\n # that Variable and Assign have GPU implementations for matching tf.\n self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))\n\n def testBasic(self):\n self._testTypes(np.arange(0, 20).reshape([4, 5]))\n\n def testset_shape(self):\n p = state_ops.variable_op([1, 2], dtypes.float32)\n self.assertEqual([1, 2], p.get_shape())\n p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)\n self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())\n\n def testAssign(self):\n value = np.array([[42.0, 43.0]])\n var = state_ops.variable_op(value.shape, dtypes.float32)\n self.assertShapeEqual(value, var)\n assigned = state_ops.assign(var, value)\n self.assertShapeEqual(value, assigned)\n\n def testAssignNoValidateShape(self):\n value = np.array([[42.0, 43.0]])\n var = state_ops.variable_op(value.shape, dtypes.float32)\n self.assertShapeEqual(value, var)\n assigned = state_ops.assign(var, value, validate_shape=False)\n self.assertShapeEqual(value, assigned)\n\n def testAssignNoVarShape(self):\n value = np.array([[42.0, 43.0]])\n var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)\n self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())\n assigned = state_ops.assign(var, value)\n self.assertShapeEqual(value, assigned)\n\n def testAssignNoVarShapeNoValidateShape(self):\n value = np.array([[42.0, 43.0]])\n var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)\n self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())\n assigned = state_ops.assign(var, value, validate_shape=False)\n self.assertShapeEqual(value, assigned)\n\n def _NewShapelessTensor(self):\n tensor = array_ops.placeholder(dtypes.float32)\n self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())\n return tensor\n\n def testAssignNoValueShape(self):\n value = self._NewShapelessTensor()\n shape = [1, 2]\n var = state_ops.variable_op(shape, dtypes.float32)\n assigned = state_ops.assign(var, value)\n self.assertEqual(shape, var.get_shape())\n self.assertEqual(shape, assigned.get_shape())\n\n def testAssignNoValueShapeNoValidateShape(self):\n value = self._NewShapelessTensor()\n shape = [1, 2]\n var = state_ops.variable_op(shape, dtypes.float32)\n self.assertEqual(shape, var.get_shape())\n assigned = state_ops.assign(var, value, validate_shape=False)\n self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())\n\n def testAssignNoShape(self):\n with self.test_session():\n value = self._NewShapelessTensor()\n var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)\n self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())\n self.assertEqual(tensor_shape.unknown_shape(),\n state_ops.assign(var, value).get_shape())\n\n def testAssignNoShapeNoValidateShape(self):\n with self.test_session():\n value = self._NewShapelessTensor()\n var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)\n self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())\n self.assertEqual(\n tensor_shape.unknown_shape(),\n state_ops.assign(\n var, value, validate_shape=False).get_shape())\n\n def testAssignUpdate(self):\n var = state_ops.variable_op([1, 2], dtypes.float32)\n added = state_ops.assign_add(var, [[2.0, 3.0]])\n self.assertEqual([1, 2], added.get_shape())\n subbed = state_ops.assign_sub(var, [[12.0, 13.0]])\n self.assertEqual([1, 2], subbed.get_shape())\n\n def testAssignUpdateNoVarShape(self):\n var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)\n added = state_ops.assign_add(var, [[2.0, 3.0]])\n self.assertEqual([1, 2], added.get_shape())\n subbed = state_ops.assign_sub(var, [[12.0, 13.0]])\n self.assertEqual([1, 2], subbed.get_shape())\n\n def testAssignUpdateNoValueShape(self):\n var = state_ops.variable_op([1, 2], dtypes.float32)\n added = state_ops.assign_add(var, self._NewShapelessTensor())\n self.assertEqual([1, 2], added.get_shape())\n subbed = state_ops.assign_sub(var, self._NewShapelessTensor())\n self.assertEqual([1, 2], subbed.get_shape())\n\n def testAssignUpdateNoShape(self):\n var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)\n added = state_ops.assign_add(var, self._NewShapelessTensor())\n self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())\n subbed = state_ops.assign_sub(var, self._NewShapelessTensor())\n self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())\n\n def testTemporaryVariable(self):\n with self.test_session(use_gpu=True):\n var = gen_state_ops._temporary_variable(\n [1, 2], dtypes.float32, var_name=\"foo\")\n var = state_ops.assign(var, [[4.0, 5.0]])\n var = state_ops.assign_add(var, [[6.0, 7.0]])\n final = gen_state_ops._destroy_temporary_variable(var, var_name=\"foo\")\n self.assertAllClose([[10.0, 12.0]], final.eval())\n\n def testDestroyNonexistentTemporaryVariable(self):\n with self.test_session(use_gpu=True):\n var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)\n final = gen_state_ops._destroy_temporary_variable(var, var_name=\"bad\")\n with self.assertRaises(errors.NotFoundError):\n final.eval()\n\n def testDuplicateTemporaryVariable(self):\n with self.test_session(use_gpu=True):\n var1 = gen_state_ops._temporary_variable(\n [1, 2], dtypes.float32, var_name=\"dup\")\n var1 = state_ops.assign(var1, [[1.0, 2.0]])\n var2 = gen_state_ops._temporary_variable(\n [1, 2], dtypes.float32, var_name=\"dup\")\n var2 = state_ops.assign(var2, [[3.0, 4.0]])\n final = var1 + var2\n with self.assertRaises(errors.AlreadyExistsError):\n final.eval()\n\n def testDestroyTemporaryVariableTwice(self):\n with self.test_session(use_gpu=True):\n var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)\n val1 = gen_state_ops._destroy_temporary_variable(var, var_name=\"dup\")\n val2 = gen_state_ops._destroy_temporary_variable(var, var_name=\"dup\")\n final = val1 + val2\n with self.assertRaises(errors.NotFoundError):\n final.eval()\n\n def testTemporaryVariableNoLeak(self):\n with self.test_session(use_gpu=True):\n var = gen_state_ops._temporary_variable(\n [1, 2], dtypes.float32, var_name=\"bar\")\n final = array_ops.identity(var)\n final.eval()\n\n def testTwoTemporaryVariablesNoLeaks(self):\n with self.test_session(use_gpu=True):\n var1 = gen_state_ops._temporary_variable(\n [1, 2], dtypes.float32, var_name=\"var1\")\n var2 = gen_state_ops._temporary_variable(\n [1, 2], dtypes.float32, var_name=\"var2\")\n final = var1 + var2\n final.eval()\n\n def testAssignDependencyAcrossDevices(self):\n with self.test_session(use_gpu=True):\n # The variable and an op to increment it are on the GPU.\n var = state_ops.variable_op([1], dtypes.float32)\n state_ops.assign(var, [1.0]).eval()\n increment = state_ops.assign_add(var, [1.0])\n with ops.control_dependencies([increment]):\n with ops.device(\"/cpu:0\"):\n # This mul op is pinned to the CPU, but reads the variable from the\n # GPU. The test ensures that the dependency on 'increment' is still\n # honored, i.e., the Send and Recv from GPU to CPU should take place\n # only after the increment.\n result = math_ops.multiply(var, var)\n self.assertAllClose([4.0], result.eval())\n\n def testIsVariableInitialized(self):\n for use_gpu in [True, False]:\n with self.test_session(use_gpu=use_gpu):\n v0 = state_ops.variable_op([1, 2], dtypes.float32)\n self.assertEqual(False, variables.is_variable_initialized(v0).eval())\n state_ops.assign(v0, [[2.0, 3.0]]).eval()\n self.assertEqual(True, variables.is_variable_initialized(v0).eval())\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python layer for image_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.util import loader\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import resource_loader\n\n_sirds_ops = loader.load_op_library(\n resource_loader.get_path_to_datafile(\n \"_single_image_random_dot_stereograms.so\"))\n\ndef single_image_random_dot_stereograms(\n depth_values,\n hidden_surface_removal=None,\n convergence_dots_size=None,\n dots_per_inch=None,\n eye_separation=None, mu=None,\n normalize=None, normalize_max=None,\n normalize_min=None,\n border_level=None,\n number_colors=None,\n output_image_shape=None,\n output_data_window=None):\n \"\"\"Output a RandomDotStereogram Tensor for export via encode_PNG/JPG OP.\n\n Given the 2-D tensor 'depth_values' with encoded Z values, this operation\n will encode 3-D data into a 2-D image. The output of this Op is suitable\n for the encode_PNG/JPG ops. Be careful with image compression as this may\n corrupt the encode 3-D data witin the image.\n\n Based upon [this paper](http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper).\n\n This outputs a SIRDS image as picture_out.png:\n\n ```python\n img=[[1,2,3,3,2,1],\n [1,2,3,4,5,2],\n [1,2,3,4,5,3],\n [1,2,3,4,5,4],\n [6,5,4,4,5,5]]\n session = tf.InteractiveSession()\n sirds = single_image_random_dot_stereograms(\n img,\n convergence_dots_size=8,\n number_colors=256,normalize=True)\n\n out = sirds.eval()\n png = tf.image.encode_png(out).eval()\n with open('picture_out.png', 'wb') as f:\n f.write(png)\n ```\n\n Args:\n depth_values: A `Tensor`. Must be one of the following types: \n `float64`, `float32`, `int64`, `int32`. Z values of data to encode\n into 'output_data_window' window, lower further away {0.0 floor(far),\n 1.0 ceiling(near) after norm}, must be 2-D tensor\n hidden_surface_removal: An optional `bool`. Defaults to `True`.\n Activate hidden surface removal\n convergence_dots_size: An optional `int`. Defaults to `8`.\n Black dot size in pixels to help view converge image, drawn on bottom\n of the image\n dots_per_inch: An optional `int`. Defaults to `72`.\n Output device in dots/inch\n eye_separation: An optional `float`. Defaults to `2.5`.\n Separation between eyes in inches\n mu: An optional `float`. Defaults to `0.3333`.\n Depth of field, Fraction of viewing distance (eg. 1/3 = 0.3333)\n normalize: An optional `bool`. Defaults to `True`.\n Normalize input data to [0.0, 1.0] \n normalize_max: An optional `float`. Defaults to `-100`.\n Fix MAX value for Normalization (0.0) - if < MIN, autoscale\n normalize_min: An optional `float`. Defaults to `100`.\n Fix MIN value for Normalization (0.0) - if > MAX, autoscale\n border_level: An optional `float`. Defaults to `0`.\n Value of bord in depth 0.0 {far} to 1.0 {near} \n number_colors: An optional `int`. Defaults to `256`. 2 (Black &\n White), 256 (grayscale), and Numbers > 256 (Full Color) are\n supported\n output_image_shape: An optional `tf.TensorShape` or list of `ints`. \n Defaults to shape `[1024, 768, 1]`. Defines output shape of returned\n image in '[X,Y, Channels]' 1-grayscale, 3 color; channels will be\n updated to 3 if number_colors > 256\n output_data_window: An optional `tf.TensorShape` or list of `ints`.\n Defaults to `[1022, 757]`. Size of \"DATA\" window, must be equal to or\n smaller than `output_image_shape`, will be centered and use\n `convergence_dots_size` for best fit to avoid overlap if possible\n\n Returns:\n A `Tensor` of type `uint8` of shape 'output_image_shape' with encoded\n 'depth_values'\n \"\"\"\n\n result = _sirds_ops.single_image_random_dot_stereograms(\n depth_values=depth_values,\n hidden_surface_removal=hidden_surface_removal,\n convergence_dots_size=convergence_dots_size,\n dots_per_inch=dots_per_inch,\n eye_separation=eye_separation, mu=mu,\n normalize=normalize,\n normalize_max=normalize_max,\n normalize_min=normalize_min,\n border_level=border_level,\n number_colors=number_colors,\n output_image_shape=output_image_shape,\n output_data_window=output_data_window)\n return result\n\nops.NotDifferentiable(\"SingleImageRandomDotStereograms\")\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Special Math Ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\nfrom scipy import special\nfrom scipy import stats\n\nfrom tensorflow.contrib.distributions.python.ops import special_math\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\nsm = special_math\n\n\ndef _check_strictly_increasing(array_1d):\n diff = np.diff(array_1d)\n np.testing.assert_array_less(0, diff)\n\n\ndef _make_grid(dtype, grid_spec):\n \"\"\"Returns a uniform grid + noise, reshaped to shape argument.\"\"\"\n rng = np.random.RandomState(0)\n num_points = np.prod(grid_spec.shape)\n grid = np.linspace(grid_spec.min, grid_spec.max, num=num_points).astype(dtype)\n grid_spacing = (grid_spec.max - grid_spec.min) / num_points\n grid += 0.1 * grid_spacing * rng.randn(*grid.shape)\n # More useful if it's sorted (e.g. for testing monotonicity, or debugging).\n grid = np.sort(grid)\n return np.reshape(grid, grid_spec.shape)\n\n\nGridSpec = collections.namedtuple(\"GridSpec\", [\"min\", \"max\", \"shape\"])\n\nErrorSpec = collections.namedtuple(\"ErrorSpec\", [\"rtol\", \"atol\"])\n\n\nclass NdtrTest(test.TestCase):\n _use_log = False\n # Grid min/max chosen to ensure 0 < cdf(x) < 1.\n _grid32 = GridSpec(min=-12.9, max=5., shape=[100])\n _grid64 = GridSpec(min=-37.5, max=8., shape=[100])\n _error32 = ErrorSpec(rtol=1e-4, atol=0.)\n _error64 = ErrorSpec(rtol=1e-6, atol=0.)\n\n def _test_grid(self, dtype, grid_spec, error_spec):\n if self._use_log:\n self._test_grid_log(dtype, grid_spec, error_spec)\n else:\n self._test_grid_no_log(dtype, grid_spec, error_spec)\n\n def _test_grid_log(self, dtype, grid_spec, error_spec):\n with self.test_session():\n grid = _make_grid(dtype, grid_spec)\n actual = sm.log_ndtr(grid).eval()\n\n # Basic tests.\n # isfinite checks for NaN and Inf.\n self.assertTrue(np.isfinite(actual).all())\n # On the grid, -inf < log_cdf(x) < 0. In this case, we should be able\n # to use a huge grid because we have used tricks to escape numerical\n # difficulties.\n self.assertTrue((actual < 0).all())\n _check_strictly_increasing(actual)\n\n # Versus scipy.\n expected = special.log_ndtr(grid)\n # Scipy prematurely goes to zero at some places that we don't. So don't\n # include these in the comparison.\n self.assertAllClose(\n expected.astype(np.float64)[expected < 0],\n actual.astype(np.float64)[expected < 0],\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n def _test_grid_no_log(self, dtype, grid_spec, error_spec):\n with self.test_session():\n grid = _make_grid(dtype, grid_spec)\n actual = sm.ndtr(grid).eval()\n\n # Basic tests.\n # isfinite checks for NaN and Inf.\n self.assertTrue(np.isfinite(actual).all())\n # On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due\n # to numerical limitations of cdf.\n self.assertTrue((actual > 0).all())\n self.assertTrue((actual < 1).all())\n _check_strictly_increasing(actual)\n\n # Versus scipy.\n expected = special.ndtr(grid)\n # Scipy prematurely goes to zero at some places that we don't. So don't\n # include these in the comparison.\n self.assertAllClose(\n expected.astype(np.float64)[expected < 0],\n actual.astype(np.float64)[expected < 0],\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n def test_float32(self):\n self._test_grid(np.float32, self._grid32, self._error32)\n\n def test_float64(self):\n self._test_grid(np.float64, self._grid64, self._error64)\n\n\nclass LogNdtrTestLower(NdtrTest):\n _use_log = True\n _grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100])\n _grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100])\n _error32 = ErrorSpec(rtol=1e-4, atol=0.)\n _error64 = ErrorSpec(rtol=1e-4, atol=0.)\n\n\n# The errors are quite large when the input is > 6 or so. Also,\n# scipy.special.log_ndtr becomes zero very early, before 10,\n# (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and\n# avoid this issue.\nclass LogNdtrTestMid(NdtrTest):\n _use_log = True\n _grid32 = GridSpec(\n min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])\n _grid64 = GridSpec(\n min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])\n # Differences show up as soon as we're in the tail, so add some atol.\n _error32 = ErrorSpec(rtol=0.1, atol=1e-7)\n _error64 = ErrorSpec(rtol=0.1, atol=1e-7)\n\n\nclass LogNdtrTestUpper(NdtrTest):\n _use_log = True\n _grid32 = GridSpec(\n min=sm.LOGNDTR_FLOAT32_UPPER,\n max=12., # Beyond this, log_cdf(x) may be zero.\n shape=[100])\n _grid64 = GridSpec(\n min=sm.LOGNDTR_FLOAT64_UPPER,\n max=35., # Beyond this, log_cdf(x) may be zero.\n shape=[100])\n _error32 = ErrorSpec(rtol=1e-6, atol=1e-14)\n _error64 = ErrorSpec(rtol=1e-6, atol=1e-14)\n\n\nclass NdtrGradientTest(test.TestCase):\n _use_log = False\n _grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8])\n _error32 = ErrorSpec(rtol=1e-4, atol=0)\n _error64 = ErrorSpec(rtol=1e-7, atol=0)\n\n def assert_all_true(self, v):\n self.assertAllEqual(np.ones_like(v, dtype=np.bool), v)\n\n def assert_all_false(self, v):\n self.assertAllEqual(np.zeros_like(v, dtype=np.bool), v)\n\n def _test_grad_finite(self, dtype):\n with self.test_session():\n x = variables.Variable([-100., 0., 100.], dtype=dtype)\n output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))\n grad_output = gradients_impl.gradients(output, x)\n variables.global_variables_initializer().run()\n # isfinite checks for NaN and Inf.\n self.assert_all_true(np.isfinite(output.eval()))\n self.assert_all_true(np.isfinite(grad_output[0].eval()))\n\n def _test_grad_accuracy(self, dtype, grid_spec, error_spec):\n raw_grid = _make_grid(dtype, grid_spec)\n grid = ops.convert_to_tensor(raw_grid)\n with self.test_session():\n fn = sm.log_ndtr if self._use_log else sm.ndtr\n\n # If there are N points in the grid,\n # grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of\n # the ith output point w.r.t. the jth grid point. We only expect the\n # diagonal to be nonzero.\n # TODO(b/31131137): Replace tf.test.compute_gradient with our own custom\n # gradient evaluation to ensure we correctly handle small function delta.\n grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,\n fn(grid),\n grid_spec.shape)\n grad_eval = np.diag(grad_eval)\n\n # Check for NaN separately in order to get informative failures.\n self.assert_all_false(np.isnan(grad_eval))\n self.assert_all_true(grad_eval > 0.)\n # isfinite checks for NaN and Inf.\n self.assert_all_true(np.isfinite(grad_eval))\n\n # Do the same checks but explicitly compute the gradient.\n # (We did this because we're not sure if we trust\n # tf.test.compute_gradient.)\n grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()\n self.assert_all_false(np.isnan(grad_eval))\n if self._use_log:\n g = np.reshape(grad_eval, [-1])\n half = np.ceil(len(g) / 2)\n self.assert_all_true(g[:int(half)] > 0.)\n self.assert_all_true(g[int(half):] >= 0.)\n else:\n # The ndtr gradient will only be non-zero in the range [-14, 14] for\n # float32 and [-38, 38] for float64.\n self.assert_all_true(grad_eval >= 0.)\n # isfinite checks for NaN and Inf.\n self.assert_all_true(np.isfinite(grad_eval))\n\n # Versus scipy.\n expected = stats.norm.pdf(raw_grid)\n if self._use_log:\n expected /= special.ndtr(raw_grid)\n expected[np.isnan(expected)] = 0.\n # Scipy prematurely goes to zero at some places that we don't. So don't\n # include these in the comparison.\n self.assertAllClose(\n expected.astype(np.float64)[expected < 0],\n grad_eval.astype(np.float64)[expected < 0],\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n def test_float32(self):\n self._test_grad_accuracy(np.float32, self._grid, self._error32)\n self._test_grad_finite(np.float32)\n\n def test_float64(self):\n self._test_grad_accuracy(np.float64, self._grid, self._error64)\n self._test_grad_finite(np.float64)\n\n\nclass LogNdtrGradientTest(NdtrGradientTest):\n _use_log = True\n\n\nclass LogCDFLaplaceTest(test.TestCase):\n # Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot\n # rely on scipy to cross check the extreme values.\n\n # Test will be done differently over different ranges. These are the values\n # such that when exceeded by x, produce output that causes the naive (scipy)\n # implementation to have numerical issues.\n #\n # If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps.\n # With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is\n # the value of x such that any larger value will result in\n # 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a\n # log # of zero. We therefore choose these as our cutoffs for testing.\n CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1.\n CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1.\n\n def assertAllTrue(self, x):\n self.assertAllEqual(np.ones_like(x, dtype=np.bool), x)\n\n def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec):\n with self.test_session():\n grid = _make_grid(dtype, grid_spec)\n actual = sm.log_cdf_laplace(grid).eval()\n\n # Basic tests.\n # isfinite checks for NaN and Inf.\n self.assertAllTrue(np.isfinite(actual))\n self.assertAllTrue((actual < 0))\n _check_strictly_increasing(actual)\n\n # Versus scipy.\n scipy_dist = stats.laplace(loc=0., scale=1.)\n expected = scipy_dist.logcdf(grid.astype(scipy_dtype))\n self.assertAllClose(\n expected.astype(np.float64),\n actual.astype(np.float64),\n rtol=error_spec.rtol,\n atol=error_spec.atol)\n\n def test_float32_lower_and_mid_segment_scipy_float32_ok(self):\n # Choose values mild enough that we can use scipy in float32, which will\n # allow for a high accuracy match to scipy (since we both use float32).\n self._test_grid_log(\n np.float32, # dtype\n np.float32, # scipy_dtype\n GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]),\n ErrorSpec(rtol=5e-4, atol=0))\n\n def test_float32_all_segments_with_scipy_float64_ok(self):\n # Choose values outside the range where scipy float32 works.\n # Let scipy use float64. This means we\n # won't be exactly the same since we are in float32.\n self._test_grid_log(\n np.float32, # dtype\n np.float64, # scipy_dtype\n GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]),\n ErrorSpec(rtol=0.05, atol=0))\n\n def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self):\n with self.test_session() as sess:\n # On the lower branch, log_cdf_laplace(x) = x, so we know this will be\n # fine, but test to -200 anyways.\n grid = _make_grid(\n np.float32, GridSpec(min=-200, max=80, shape=[20, 100]))\n grid = ops.convert_to_tensor(grid)\n\n actual = sm.log_cdf_laplace(grid)\n grad = gradients_impl.gradients(actual, grid)[0]\n\n actual_, grad_ = sess.run([actual, grad])\n\n # isfinite checks for NaN and Inf.\n self.assertAllTrue(np.isfinite(actual_))\n self.assertAllTrue(np.isfinite(grad_))\n self.assertFalse(np.any(actual_ == 0))\n self.assertFalse(np.any(grad_ == 0))\n\n def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self):\n with self.test_session() as sess:\n # On the lower branch, log_cdf_laplace(x) = x, so we know this will be\n # fine, but test to -200 anyways.\n grid = _make_grid(\n np.float64, GridSpec(min=-200, max=700, shape=[20, 100]))\n grid = ops.convert_to_tensor(grid)\n\n actual = sm.log_cdf_laplace(grid)\n grad = gradients_impl.gradients(actual, grid)[0]\n\n actual_, grad_ = sess.run([actual, grad])\n\n # isfinite checks for NaN and Inf.\n self.assertAllTrue(np.isfinite(actual_))\n self.assertAllTrue(np.isfinite(grad_))\n self.assertFalse(np.any(actual_ == 0))\n self.assertFalse(np.any(grad_ == 0))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A client interface for TensorFlow.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python import pywrap_tensorflow as tf_session\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import session_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\n\n\nclass SessionInterface(object):\n \"\"\"Base class for implementations of TensorFlow client sessions.\"\"\"\n\n @property\n def graph(self):\n \"\"\"The underlying TensorFlow graph, to be used in building Operations.\"\"\"\n raise NotImplementedError('graph')\n\n @property\n def sess_str(self):\n \"\"\"The TensorFlow process to which this session will connect.\"\"\"\n raise NotImplementedError('sess_str')\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n \"\"\"Runs operations in the session. See `BaseSession.run()` for details.\"\"\"\n raise NotImplementedError('run')\n\n def partial_run_setup(self, fetches, feeds=None):\n \"\"\"Sets up the feeds and fetches for partial runs in the session.\"\"\"\n raise NotImplementedError('partial_run_setup')\n\n def partial_run(self, handle, fetches, feed_dict=None):\n \"\"\"Continues the execution with additional feeds and fetches.\"\"\"\n raise NotImplementedError('partial_run')\n\ndef _get_indexed_slices_value_from_fetches(fetched_vals):\n return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],\n fetched_vals[2]\n if len(fetched_vals) == 3 else None)\n\n\ndef _get_feeds_for_indexed_slices(feed, feed_val):\n return list(zip([feed.values, feed.indices] if feed.dense_shape is None else\n [feed.values, feed.indices, feed.dense_shape], feed_val))\n\n\n# List of extensions supported to convert run arguments into actual fetches and\n# feeds.\n#\n# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),\n# where the function signatures are:\n# fetch_fn : Type -> (list of Tensors,\n# lambda: list of fetched np.ndarray -> TypeVal)\n# feed_fn1 : Type, TypeVal -> list of (Tensor, value)\n# feed_fn2 : Type -> list of Tensors\n#\n# `fetch_fn` describes how to expand fetch into its\n# component Tensors and how to contract the fetched results back into\n# a single return value.\n#\n# Each feed function describes how to unpack a single fed value and map it to\n# feeds of one or more tensors and their corresponding values: `feed_fn1` is\n# used to feed a run, `feed_fn2` to set up a partial run.\n#\n# TODO(touts): We could reimplement these as specialized _FeedMapper\n# implementations after we refactor the feed handling code to use them.\n#\n# Eventually, this registration could be opened up to support custom Tensor\n# expansions.\n# pylint: disable=g-long-lambda\n_REGISTERED_EXPANSIONS = [\n # SparseTensors are fetched as SparseTensorValues. They can be fed\n # SparseTensorValues or normal tuples.\n (sparse_tensor.SparseTensor,\n lambda fetch: (\n [fetch.indices, fetch.values, fetch.dense_shape],\n lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),\n lambda feed, feed_val: list(zip(\n [feed.indices, feed.values, feed.dense_shape], feed_val)),\n lambda feed: [feed.indices, feed.values, feed.dense_shape]),\n # IndexedSlices are fetched as IndexedSlicesValues. They can be fed\n # IndexedSlicesValues or normal tuples.\n (ops.IndexedSlices,\n lambda fetch: (\n [fetch.values, fetch.indices] if fetch.dense_shape is None\n else [fetch.values, fetch.indices, fetch.dense_shape],\n _get_indexed_slices_value_from_fetches),\n _get_feeds_for_indexed_slices,\n lambda feed: [feed.values, feed.indices] if feed.dense_shape is None\n else [feed.values, feed.indices, feed.dense_shape]),\n # The default catches all other types and performs no expansions.\n (object,\n lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),\n lambda feed, feed_val: [(feed, feed_val)],\n lambda feed: [feed])]\n# pylint: enable=g-long-lambda\n\ndef register_session_run_conversion_functions(tensor_type, fetch_function,\n feed_function=None, feed_function_for_partial_run=None):\n \"\"\"Register fetch and feed conversion functions for `tf.Session.run()`.\n\n This function registers a triple of conversion functions for fetching and/or\n feeding values of user-defined types in a call to tf.Session.run().\n\n An example\n\n ```python\n class SquaredTensor(object):\n def __init__(self, tensor):\n self.sq = tf.square(tensor)\n #you can define conversion functions as follows:\n fetch_function = lambda squared_tensor:([squared_tensor.sq],\n lambda val: val[0])\n feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]\n feed_function_for_partial_run = lambda feed: [feed.sq]\n #then after invoking this register function, you can use as follows:\n session.run(squared_tensor1,\n feed_dict = {squared_tensor2 : some_numpy_array})\n ```\n\n Args:\n tensor_type: The type for which you want to register a conversion function.\n fetch_function: A callable that takes an object of type `tensor_type` and\n returns a tuple, where the first element is a list of `tf.Tensor` objects,\n and the second element is a callable that takes a list of ndarrays and\n returns an object of some value type that corresponds to `tensor_type`.\n fetch_function describes how to expand fetch into its component Tensors\n and how to contract the fetched results back into a single return value.\n feed_function: A callable that takes feed_key and feed_value as input, and\n returns a list of tuples (feed_tensor, feed_val), feed_key must have type\n `tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed\n function describes how to unpack a single fed value and map it to feeds\n of one or more tensors and their corresponding values.\n feed_function_for_partial_run: A callable for specifying tensor values to\n feed when setting up a partial run, which takes a `tensor_type` type\n object as input, and returns a list of Tensors.\n \"\"\"\n for conversion_function in _REGISTERED_EXPANSIONS:\n if issubclass(conversion_function[0], tensor_type):\n raise ValueError(\n '%s has already been registered so ignore it.', tensor_type)\n return\n _REGISTERED_EXPANSIONS.insert(0,\n (tensor_type, fetch_function, feed_function, feed_function_for_partial_run))\n\n\nclass _FetchMapper(object):\n \"\"\"Definition of the interface provided by fetch mappers.\n\n Fetch mappers are utility classes used by the _FetchHandler to handle\n arbitrary structures for the `fetch` argument to `Session.run()`.\n\n The `fetch` argument can be of various shapes: single tensor or op, list of\n fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The\n structures can be arbitrarily nested.\n\n The low level run() API only wants a list of tensor or op names. The various\n `_FetchMapper` subclasses below take care of handling the different shapes:\n uniquifying the fetches, and constructing results with the original shape.\n \"\"\"\n\n def unique_fetches(self):\n \"\"\"Return the list of unique tensors or ops needed by this fetch mapper.\n\n Returns:\n A list of tensors or ops.\n \"\"\"\n raise NotImplementedError('Must be implemented by subclasses')\n\n def build_results(self, values):\n \"\"\"Build results that match the original shape of the fetch.\n\n Args:\n values: List of values returned by run(). The values correspond\n exactly to the list tensors or ops returned by unique_fetches().\n\n Returns:\n A struct of the same shape as the original fetch object handled by\n this fetch mapper. In the returned struct, the original fetches are\n replaced by their fetched values.\n \"\"\"\n raise NotImplementedError('Must be implemented by subclasses')\n\n @staticmethod\n def for_fetch(fetch):\n \"\"\"Creates fetch mapper that handles the structure of `fetch`.\n\n The default graph must be the one from which we want to fetch values when\n this function is called.\n\n Args:\n fetch: An arbitrary fetch structure: singleton, list, tuple,\n namedtuple, or dict.\n\n Returns:\n An instance of a subclass of `_FetchMapper` that handles the shape.\n \"\"\"\n if fetch is None:\n raise TypeError('Fetch argument %r has invalid type %r' %\n (fetch, type(fetch)))\n elif isinstance(fetch, (list, tuple)):\n # NOTE(touts): This is also the code path for namedtuples.\n return _ListFetchMapper(fetch)\n elif isinstance(fetch, dict):\n return _DictFetchMapper(fetch)\n else:\n # Look for a handler in the registered expansions.\n for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:\n if isinstance(fetch, tensor_type):\n fetches, contraction_fn = fetch_fn(fetch)\n return _ElementFetchMapper(fetches, contraction_fn)\n # Did not find anything.\n raise TypeError('Fetch argument %r has invalid type %r' %\n (fetch, type(fetch)))\n\n\nclass _ElementFetchMapper(_FetchMapper):\n \"\"\"Fetch mapper for singleton tensors and ops.\"\"\"\n\n def __init__(self, fetches, contraction_fn):\n \"\"\"Creates an _ElementFetchMapper.\n\n This is the fetch mapper used for leaves in the fetch struct. Because of\n the expansions mechanism, a leaf can actually fetch more than one tensor.\n\n Also note that the fetches here can be just strings (tensor or op names) or\n any other object that the graph knows how to convert to a tensor, such as a\n Variable. So we have to run each fetch through `as_graph_element()` to get\n the corresponding tensor or op.\n\n Args:\n fetches: List of objects, as returned by a fetch_fn defined\n in _REGISTERED_EXPANSIONS.\n contraction_fn: Callable as returned by a fetch_fn.\n \"\"\"\n self._unique_fetches = []\n for fetch in fetches:\n try:\n self._unique_fetches.append(ops.get_default_graph().as_graph_element(\n fetch, allow_tensor=True, allow_operation=True))\n except TypeError as e:\n raise TypeError('Fetch argument %r has invalid type %r, '\n 'must be a string or Tensor. (%s)'\n % (fetch, type(fetch), str(e)))\n except ValueError as e:\n raise ValueError('Fetch argument %r cannot be interpreted as a '\n 'Tensor. (%s)' % (fetch, str(e)))\n except KeyError as e:\n raise ValueError('Fetch argument %r cannot be interpreted as a '\n 'Tensor. (%s)' % (fetch, str(e)))\n self._contraction_fn = contraction_fn\n\n def unique_fetches(self):\n return self._unique_fetches\n\n def build_results(self, values):\n if not values:\n # 'Operation' case\n return None\n else:\n return self._contraction_fn(values)\n\n\ndef _uniquify_fetches(fetch_mappers):\n \"\"\"Uniquifies fetches from a list of fetch_mappers.\n\n This is a utility function used by _ListFetchMapper and _DictFetchMapper. It\n gathers all the unique fetches from a list of mappers and builds a list\n containing all of them but without duplicates (unique_fetches).\n\n It also returns a 2-D list of integers (values_indices) indicating at which\n index in unique_fetches the fetches of the mappers are located.\n\n This list is as follows:\n values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index\n\n Args:\n fetch_mappers: list of fetch mappers.\n\n Returns:\n A list of fetches.\n A 2-D list of integers.\n \"\"\"\n unique_fetches = []\n value_indices = []\n seen_fetches = {}\n for m in fetch_mappers:\n m_value_indices = []\n for f in m.unique_fetches():\n j = seen_fetches.get(f)\n if j is None:\n j = len(seen_fetches)\n seen_fetches[f] = j\n unique_fetches.append(f)\n m_value_indices.append(j)\n value_indices.append(m_value_indices)\n return unique_fetches, value_indices\n\n\nclass _ListFetchMapper(_FetchMapper):\n \"\"\"Fetch mapper for lists, tuples, and namedtuples.\"\"\"\n\n def __init__(self, fetches):\n \"\"\"Creates a _ListFetchMapper.\n\n Args:\n fetches: List, tuple, or namedtuple of fetches.\n \"\"\"\n self._fetch_type = type(fetches)\n self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]\n self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n def unique_fetches(self):\n return self._unique_fetches\n\n def build_results(self, values):\n # Create the list of results for each mapper.\n results = []\n for m, vi in zip(self._mappers, self._value_indices):\n results.append(m.build_results([values[j] for j in vi]))\n # Return a value of the original type of the fetches.\n if self._fetch_type == list:\n return results\n elif self._fetch_type == tuple:\n return tuple(results)\n else:\n # This is the code path for namedtuple.\n return self._fetch_type(*results)\n\n\nclass _DictFetchMapper(_FetchMapper):\n \"\"\"Fetch mapper for dicts.\"\"\"\n\n def __init__(self, fetches):\n \"\"\"Creates a _DictFetchMapper.\n\n Args:\n fetches: Dict of fetches.\n \"\"\"\n self._fetch_type = type(fetches)\n self._keys = fetches.keys()\n self._mappers = [_FetchMapper.for_fetch(fetch)\n for fetch in fetches.values()]\n self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n def unique_fetches(self):\n return self._unique_fetches\n\n def build_results(self, values):\n results = self._fetch_type()\n for k, m, vi in zip(self._keys, self._mappers, self._value_indices):\n results[k] = m.build_results([values[j] for j in vi])\n return results\n\n\nclass _FetchHandler(object):\n \"\"\"Handler for structured fetches.\n\n Given a graph, a user-provided structure for fetches, and a feed dict, this\n class takes care of generating a list of tensor names to fetch and op names\n to run for a low level `run()` call.\n\n Given the results of the low level run call, this class can also rebuild a\n result structure matching the user-provided structure for fetches, but\n containing the corresponding results.\n \"\"\"\n # TODO(touts): Make this class also take care of destructuring the feed\n # dict instead of doing it in the callers.\n\n def __init__(self, graph, fetches, feeds, feed_handles=None):\n \"\"\"Creates a fetch handler.\n\n Args:\n graph: Graph of the fetches. Used to check for fetchability\n and to convert all fetches to tensors or ops as needed.\n fetches: An arbitrary fetch structure: singleton, list, tuple,\n namedtuple, or dict.\n feeds: A feed dict where keys are fully resolved tensor names.\n feed_handles: A dict from feed names to TensorHandle objects used as\n direct feeds.\n \"\"\"\n with graph.as_default():\n self._fetch_mapper = _FetchMapper.for_fetch(fetches)\n self._fetches = []\n self._targets = []\n self._feeds = feeds\n self._feed_handles = feed_handles or {}\n self._ops = []\n self._fetch_handles = {}\n for fetch in self._fetch_mapper.unique_fetches():\n fetch_name = compat.as_bytes(fetch.name)\n if isinstance(fetch, ops.Operation):\n self._assert_fetchable(graph, fetch)\n self._targets.append(fetch_name)\n self._ops.append(True)\n else:\n self._assert_fetchable(graph, fetch.op)\n self._fetches.append(fetch_name)\n self._ops.append(False)\n # Remember the fetch if it is for a tensor handle.\n if (isinstance(fetch, ops.Tensor) and\n (fetch.op.type == 'GetSessionHandle' or\n fetch.op.type == 'GetSessionHandleV2')):\n self._fetch_handles[fetch_name] = fetch.op.inputs[0].dtype\n self._final_fetches = [x for x in self._fetches if x not in feeds]\n\n def _assert_fetchable(self, graph, op):\n if not graph.is_fetchable(op):\n raise ValueError(\n 'Operation %r has been marked as not fetchable.' % op.name)\n\n def fetches(self):\n \"\"\"Return the unique names of tensors to fetch.\n\n Returns:\n A list of strings.\n \"\"\"\n return self._final_fetches\n\n def targets(self):\n \"\"\"Return the unique names of ops to run.\n\n Returns:\n A list of strings.\n \"\"\"\n return self._targets\n\n def build_results(self, session, tensor_values):\n \"\"\"Build results matching the original fetch shape.\n\n `tensor_values` must be a list of the same length as\n the one returned by `fetches()`, and holding the requested\n fetch values.\n\n This method builds a struct with the same shape as the original `fetches`\n passed to the constructor, in which the fetches are replaced by their\n fetched value.\n\n Args:\n session: The enclosing session. Used for tensor handles.\n tensor_values: List of values matching the list returned\n by fetches().\n\n Returns:\n A structure of the same shape as the original `fetches` argument but\n containing tensors or None (for fetched ops).\n \"\"\"\n full_values = []\n assert len(self._final_fetches) == len(tensor_values)\n i = 0\n j = 0\n for is_op in self._ops:\n if is_op:\n full_values.append(None)\n else:\n # If the fetch was in the feeds, use the fed value, otherwise\n # use the returned value.\n if self._fetches[i] in self._feed_handles:\n # A fetch had a corresponding direct TensorHandle feed. Call eval()\n # to obtain the Tensor value from the TensorHandle.\n value = self._feed_handles[self._fetches[i]].eval()\n else:\n value = self._feeds.get(self._fetches[i])\n if value is None:\n value = tensor_values[j]\n j += 1\n dtype = self._fetch_handles.get(self._fetches[i])\n if dtype:\n full_values.append(session_ops.TensorHandle(value, dtype, session))\n else:\n full_values.append(value)\n i += 1\n assert j == len(tensor_values)\n return self._fetch_mapper.build_results(full_values)\n\n\nclass BaseSession(SessionInterface):\n \"\"\"A class for interacting with a TensorFlow computation.\n\n The BaseSession enables incremental graph building with inline\n execution of Operations and evaluation of Tensors.\n \"\"\"\n\n def __init__(self, target='', graph=None, config=None):\n \"\"\"Constructs a new TensorFlow session.\n\n Args:\n target: (Optional) The TensorFlow execution engine to connect to.\n graph: (Optional) The graph to be used. If this argument is None,\n the default graph will be used.\n config: (Optional) ConfigProto proto used to configure the session.\n\n Raises:\n tf.errors.OpError: Or one of its subclasses if an error occurs while\n creating the TensorFlow session.\n TypeError: If one of the arguments has the wrong type.\n \"\"\"\n if graph is None:\n self._graph = ops.get_default_graph()\n else:\n if not isinstance(graph, ops.Graph):\n raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))\n self._graph = graph\n\n self._opened = False\n self._closed = False\n\n self._current_version = 0\n self._extend_lock = threading.Lock()\n if target is not None:\n try:\n self._target = compat.as_bytes(target)\n except TypeError:\n raise TypeError('target must be a string, but got %s' % type(target))\n else:\n self._target = None\n\n self._delete_lock = threading.Lock()\n self._dead_handles = []\n\n if config is not None:\n if not isinstance(config, config_pb2.ConfigProto):\n raise TypeError('config must be a tf.ConfigProto, but got %s'\n % type(config))\n self._config = config\n self._add_shapes = config.graph_options.infer_shapes\n else:\n self._config = None\n self._add_shapes = False\n\n self._session = None\n opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)\n try:\n with errors.raise_exception_on_not_ok_status() as status:\n self._session = tf_session.TF_NewDeprecatedSession(opts, status)\n finally:\n tf_session.TF_DeleteSessionOptions(opts)\n\n def close(self):\n \"\"\"Closes this session.\n\n Calling this method frees all resources associated with the session.\n\n Raises:\n tf.errors.OpError: Or one of its subclasses if an error occurs while\n closing the TensorFlow session.\n \"\"\"\n with self._extend_lock:\n if self._opened and not self._closed:\n self._closed = True\n with errors.raise_exception_on_not_ok_status() as status:\n tf_session.TF_CloseDeprecatedSession(self._session, status)\n\n def __del__(self):\n # cleanly ignore all exceptions\n try:\n self.close()\n except Exception: # pylint: disable=broad-except\n pass\n if self._session is not None:\n # We create `status` outside the `try` block because at shutdown\n # `tf_session` may have been garbage collected, and the creation\n # of a status object may fail. In that case, we prefer to ignore\n # the failure and silently leak the session object, since the\n # program is about to terminate.\n status = None\n try:\n status = tf_session.TF_NewStatus()\n tf_session.TF_DeleteDeprecatedSession(self._session, status)\n finally:\n if status is not None:\n tf_session.TF_DeleteStatus(status)\n self._session = None\n\n @property\n def graph(self):\n \"\"\"The graph that was launched in this session.\"\"\"\n return self._graph\n\n @property\n def graph_def(self):\n \"\"\"A serializable version of the underlying TensorFlow graph.\n\n Returns:\n A graph_pb2.GraphDef proto containing nodes for all of the Operations in\n the underlying TensorFlow graph.\n \"\"\"\n return self._graph.as_graph_def(add_shapes=self._add_shapes)\n\n @property\n def sess_str(self):\n return self._target\n\n def as_default(self):\n \"\"\"Returns a context manager that makes this object the default session.\n\n Use with the `with` keyword to specify that calls to\n @{tf.Operation.run} or @{tf.Tensor.eval} should be executed in\n this session.\n\n ```python\n c = tf.constant(..)\n sess = tf.Session()\n\n with sess.as_default():\n assert tf.get_default_session() is sess\n print(c.eval())\n ```\n\n To get the current default session, use @{tf.get_default_session}.\n\n *N.B.* The `as_default` context manager *does not* close the\n session when you exit the context, and you must close the session\n explicitly.\n\n ```python\n c = tf.constant(...)\n sess = tf.Session()\n with sess.as_default():\n print(c.eval())\n # ...\n with sess.as_default():\n print(c.eval())\n\n sess.close()\n ```\n\n Alternatively, you can use `with tf.Session():` to create a\n session that is automatically closed on exiting the context,\n including when an uncaught exception is raised.\n\n *N.B.* The default session is a property of the current thread. If you\n create a new thread, and wish to use the default session in that\n thread, you must explicitly add a `with sess.as_default():` in that\n thread's function.\n\n *N.B.* Entering a `with sess.as_default():` block does not affect\n the current default graph. If you are using multiple graphs, and\n `sess.graph` is different from the value of @{tf.get_default_graph},\n you must explicitly enter a `with sess.graph.as_default():` block\n to make `sess.graph` the default graph.\n\n Returns:\n A context manager using this session as the default session.\n \"\"\"\n return ops.default_session(self)\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n \"\"\"Runs operations and evaluates tensors in `fetches`.\n\n This method runs one \"step\" of TensorFlow computation, by\n running the necessary graph fragment to execute every `Operation`\n and evaluate every `Tensor` in `fetches`, substituting the values in\n `feed_dict` for the corresponding input values.\n\n The `fetches` argument may be a single graph element, or an arbitrarily\n nested list, tuple, namedtuple, dict, or OrderedDict containing graph\n elements at its leaves. A graph element can be one of the following types:\n\n * An @{tf.Operation}.\n The corresponding fetched value will be `None`.\n * A @{tf.Tensor}.\n The corresponding fetched value will be a numpy ndarray containing the\n value of that tensor.\n * A @{tf.SparseTensor}.\n The corresponding fetched value will be a\n @{tf.SparseTensorValue}\n containing the value of that sparse tensor.\n * A `get_tensor_handle` op. The corresponding fetched value will be a\n numpy ndarray containing the handle of that tensor.\n * A `string` which is the name of a tensor or operation in the graph.\n\n The value returned by `run()` has the same shape as the `fetches` argument,\n where the leaves are replaced by the corresponding values returned by\n TensorFlow.\n\n Example:\n\n ```python\n a = tf.constant([10, 20])\n b = tf.constant([1.0, 2.0])\n # 'fetches' can be a singleton\n v = session.run(a)\n # v is the numpy array [10, 20]\n # 'fetches' can be a list.\n v = session.run([a, b])\n # v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the\n # 1-D array [1.0, 2.0]\n # 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:\n MyData = collections.namedtuple('MyData', ['a', 'b'])\n v = session.run({'k1': MyData(a, b), 'k2': [b, a]})\n # v is a dict with\n # v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and\n # 'b' (the numpy array [1.0, 2.0])\n # v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array\n # [10, 20].\n ```\n\n The optional `feed_dict` argument allows the caller to override\n the value of tensors in the graph. Each key in `feed_dict` can be\n one of the following types:\n\n * If the key is a @{tf.Tensor}, the\n value may be a Python scalar, string, list, or numpy ndarray\n that can be converted to the same `dtype` as that\n tensor. Additionally, if the key is a\n @{tf.placeholder}, the shape of\n the value will be checked for compatibility with the placeholder.\n * If the key is a\n @{tf.SparseTensor},\n the value should be a\n @{tf.SparseTensorValue}.\n * If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value\n should be a nested tuple with the same structure that maps to their\n corresponding values as above.\n\n Each value in `feed_dict` must be convertible to a numpy array of the dtype\n of the corresponding key.\n\n The optional `options` argument expects a [`RunOptions`] proto. The options\n allow controlling the behavior of this particular step (e.g. turning tracing\n on).\n\n The optional `run_metadata` argument expects a [`RunMetadata`] proto. When\n appropriate, the non-Tensor output of this step will be collected there. For\n example, when users turn on tracing in `options`, the profiled info will be\n collected into this argument and passed back.\n\n Args:\n fetches: A single graph element, a list of graph elements,\n or a dictionary whose values are graph elements or lists of graph\n elements (described above).\n feed_dict: A dictionary that maps graph elements to values\n (described above).\n options: A [`RunOptions`] protocol buffer\n run_metadata: A [`RunMetadata`] protocol buffer\n\n Returns:\n Either a single value if `fetches` is a single graph element, or\n a list of values if `fetches` is a list, or a dictionary with the\n same keys as `fetches` if that is a dictionary (described above).\n\n Raises:\n RuntimeError: If this `Session` is in an invalid state (e.g. has been\n closed).\n TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.\n ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a\n `Tensor` that doesn't exist.\n \"\"\"\n run_metadata_ptr = tf_session.TF_NewBuffer()\n if options:\n options_ptr = tf_session.TF_NewBufferFromString(\n compat.as_bytes(options.SerializeToString()))\n else:\n options_ptr = None\n\n try:\n result = self._run(None, fetches, feed_dict, options_ptr,\n run_metadata_ptr)\n if run_metadata:\n proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)\n run_metadata.ParseFromString(compat.as_bytes(proto_data))\n finally:\n tf_session.TF_DeleteBuffer(run_metadata_ptr)\n if options:\n tf_session.TF_DeleteBuffer(options_ptr)\n return result\n\n def partial_run(self, handle, fetches, feed_dict=None):\n \"\"\"Continues the execution with more feeds and fetches.\n\n This is EXPERIMENTAL and subject to change.\n\n To use partial execution, a user first calls `partial_run_setup()` and\n then a sequence of `partial_run()`. `partial_run_setup` specifies the\n list of feeds and fetches that will be used in the subsequent\n `partial_run` calls.\n\n The optional `feed_dict` argument allows the caller to override\n the value of tensors in the graph. See run() for more information.\n\n Below is a simple example:\n\n ```python\n a = array_ops.placeholder(dtypes.float32, shape=[])\n b = array_ops.placeholder(dtypes.float32, shape=[])\n c = array_ops.placeholder(dtypes.float32, shape=[])\n r1 = math_ops.add(a, b)\n r2 = math_ops.multiply(r1, c)\n\n h = sess.partial_run_setup([r1, r2], [a, b, c])\n res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})\n res = sess.partial_run(h, r2, feed_dict={c: res})\n ```\n\n Args:\n handle: A handle for a sequence of partial runs.\n fetches: A single graph element, a list of graph elements,\n or a dictionary whose values are graph elements or lists of graph\n elements (see documentation for `run`).\n feed_dict: A dictionary that maps graph elements to values\n (described above).\n\n Returns:\n Either a single value if `fetches` is a single graph element, or\n a list of values if `fetches` is a list, or a dictionary with the\n same keys as `fetches` if that is a dictionary\n (see documentation for `run`).\n\n Raises:\n tf.errors.OpError: Or one of its subclasses on error.\n \"\"\"\n # TODO(touts): Support feeding and fetching the same tensor.\n return self._run(handle, fetches, feed_dict, None, None)\n\n def partial_run_setup(self, fetches, feeds=None):\n \"\"\"Sets up a graph with feeds and fetches for partial run.\n\n This is EXPERIMENTAL and subject to change.\n\n Note that contrary to `run`, `feeds` only specifies the graph elements.\n The tensors will be supplied by the subsequent `partial_run` calls.\n\n Args:\n fetches: A single graph element, or a list of graph elements.\n feeds: A single graph element, or a list of graph elements.\n\n Returns:\n A handle for partial run.\n\n Raises:\n RuntimeError: If this `Session` is in an invalid state (e.g. has been\n closed).\n TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.\n tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.\n \"\"\"\n def _feed_fn(feed):\n for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:\n if isinstance(feed, tensor_type):\n return feed_fn(feed)\n raise TypeError('Feed argument %r has invalid type %r'\n % (feed, type(feed)))\n\n # Check session.\n if self._closed:\n raise RuntimeError('Attempted to use a closed Session.')\n if self.graph.version == 0:\n raise RuntimeError('The Session graph is empty. Add operations to the '\n 'graph before calling run().')\n\n # Create request.\n feed_list = []\n\n # Validate and process feed_list.\n is_list_feed = isinstance(feeds, (list, tuple))\n if not is_list_feed:\n feeds = [feeds]\n for feed in feeds:\n for subfeed in _feed_fn(feed):\n try:\n subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,\n allow_operation=False)\n feed_list.append(compat.as_bytes(subfeed_t.name))\n except Exception as e:\n e.message = ('Cannot interpret feed_list key as Tensor: '\n + e.message)\n e.args = (e.message,)\n raise e\n\n # Validate and process fetches.\n # TODO(touts): Support feeding and fetching the same tensor.\n fetch_handler = _FetchHandler(self._graph, fetches, {})\n\n # Set up a graph with feeds and fetches for partial run.\n def _setup_fn(session, feed_list, fetch_list, target_list):\n self._extend_graph()\n with errors.raise_exception_on_not_ok_status() as status:\n return tf_session.TF_PRunSetup(session, feed_list, fetch_list,\n target_list, status)\n\n return self._do_call(_setup_fn, self._session, feed_list,\n fetch_handler.fetches(), fetch_handler.targets())\n\n def _run(self, handle, fetches, feed_dict, options, run_metadata):\n \"\"\"Perform either run or partial_run, depending the presence of `handle`.\"\"\"\n def _feed_fn(feed, feed_val):\n for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:\n if isinstance(feed, tensor_type):\n return feed_fn(feed, feed_val)\n raise TypeError('Feed argument %r has invalid type %r'\n % (feed, type(feed)))\n\n # Check session.\n if self._closed:\n raise RuntimeError('Attempted to use a closed Session.')\n if self.graph.version == 0:\n raise RuntimeError('The Session graph is empty. Add operations to the '\n 'graph before calling run().')\n\n # Create request.\n feed_dict_string = {}\n feed_map = {}\n\n # Validate and process feed_dict.\n feed_handles = {}\n if feed_dict:\n feed_dict = nest.flatten_dict_items(feed_dict)\n for feed, feed_val in feed_dict.items():\n for subfeed, subfeed_val in _feed_fn(feed, feed_val):\n try:\n subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,\n allow_operation=False)\n except Exception as e:\n raise TypeError('Cannot interpret feed_dict key as Tensor: '\n + e.args[0])\n\n if isinstance(subfeed_val, ops.Tensor):\n raise TypeError('The value of a feed cannot be a tf.Tensor object. '\n 'Acceptable feed values include Python scalars, '\n 'strings, lists, numpy ndarrays, or TensorHandles.')\n\n subfeed_dtype = subfeed_t.dtype.as_numpy_dtype\n if isinstance(subfeed_val,\n int) and subfeed_dtype(subfeed_val) != subfeed_val:\n raise TypeError(\n 'Type of feed value ' + str(subfeed_val) + ' is not'\n ' compatible with Tensor type ' + str(subfeed_dtype) + '.'\n ' Try explicitly setting the type of the feed tensor'\n ' to a larger type (e.g. int64).')\n\n is_tensor_handle_feed = isinstance(subfeed_val,\n session_ops.TensorHandle)\n subfeed_name = compat.as_bytes(subfeed_t.name)\n if is_tensor_handle_feed:\n np_val = subfeed_val.to_numpy_array()\n feed_handles[subfeed_name] = subfeed_val\n else:\n np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)\n\n if (not is_tensor_handle_feed and\n not subfeed_t.get_shape().is_compatible_with(np_val.shape)):\n raise ValueError(\n 'Cannot feed value of shape %r for Tensor %r, '\n 'which has shape %r'\n % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))\n if not self.graph.is_feedable(subfeed_t):\n raise ValueError('Tensor %s may not be fed.' % subfeed_t)\n\n feed_dict_string[subfeed_name] = np_val\n feed_map[subfeed_name] = (subfeed_t, subfeed_val)\n\n # Create a fetch handler to take care of the structure of fetches.\n fetch_handler = _FetchHandler(\n self._graph, fetches, feed_dict_string, feed_handles=feed_handles)\n\n # Run request and get response.\n # We need to keep the movers alive for the following _do_run().\n # These movers are no longer needed when _do_run() completes, and\n # are deleted when `movers` goes out of scope when this _run() ends.\n # TODO(yuanbyu, keveman): Revisit whether we should just treat feeding\n # of a handle from a different device as an error.\n movers = self._update_with_movers(feed_dict_string, feed_map)\n final_fetches = fetch_handler.fetches()\n final_targets = fetch_handler.targets()\n if final_fetches or final_targets:\n results = self._do_run(handle, final_targets, final_fetches,\n feed_dict_string, options, run_metadata)\n else:\n results = []\n return fetch_handler.build_results(self, results)\n\n def make_callable(self, fetches, feed_list=None):\n \"\"\"Returns a Python callable that runs a particular step.\n\n The returned callable will take `len(feed_list)` arguments whose types\n must be compatible feed values for the respective elements of `feed_list`.\n For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th\n argument to the returned callable must be a numpy ndarray (or something\n convertible to an ndarray) with matching element type and shape. See\n @{tf.Session.run} for details of the allowable feed key and value types.\n\n The returned callable will have the same return type as\n `tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,\n the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,\n it will return `None`.\n\n Args:\n fetches: A value or list of values to fetch. See @{tf.Session.run}\n for details of the allowable fetch types.\n feed_list: (Optional.) A list of `feed_dict` keys. See\n @{tf.Session.run} for details of the allowable feed key types.\n\n Returns:\n A function that when called will execute the step defined by\n `feed_list` and `fetches` in this session.\n\n Raises:\n TypeError: If `fetches` or `feed_list` cannot be interpreted\n as arguments to @{tf.Session.run}.\n \"\"\"\n if feed_list is not None:\n if not isinstance(feed_list, (list, tuple)):\n raise TypeError('`feed_list` must be a list or tuple.')\n # Delegate any non-empty feed lists to the existing `run()` logic.\n # TODO(mrry): Refactor the feed handling logic from\n # `Session._run()` so that we can convert the feeds to a list of\n # strings here.\n def _generic_run(*feed_args):\n feed_dict = {feed: feed_val\n for feed, feed_val in zip(feed_list, feed_args)}\n return self.run(fetches, feed_dict=feed_dict)\n return _generic_run\n\n # Ensure any changes to the graph are reflected in the runtime.\n # Note that we don't need to do this on subsequent calls to the\n # returned object, because the arguments to `fetches` must already be\n # in the graph.\n self._extend_graph()\n\n # Create a fetch handler to take care of the structure of fetches.\n fetch_handler = _FetchHandler(self._graph, fetches, {})\n fetch_list_as_strings = fetch_handler.fetches()\n target_list_as_strings = fetch_handler.targets()\n\n if isinstance(fetches, ops.Operation):\n # Special case for fetching a single operation, because the\n # function will have no return value.\n assert not fetch_list_as_strings\n assert len(target_list_as_strings) == 1\n def _single_operation_run():\n with errors.raise_exception_on_not_ok_status() as status:\n tf_session.TF_Run(self._session, None, {}, [],\n target_list_as_strings, status, None)\n return _single_operation_run\n elif isinstance(fetches, ops.Tensor):\n # Special case for fetching a single tensor, because the\n # function can return the result of `TF_Run()` directly.\n assert len(fetch_list_as_strings) == 1\n assert not target_list_as_strings\n def _single_tensor_run():\n with errors.raise_exception_on_not_ok_status() as status:\n results = tf_session.TF_Run(self._session, None, {},\n fetch_list_as_strings, [], status, None)\n return results[0]\n return _single_tensor_run\n else:\n # In all other cases, we must use `fetch_handler` to build the\n # results for us.\n def _fetch_handler_run():\n with errors.raise_exception_on_not_ok_status() as status:\n results = tf_session.TF_Run(self._session, None, {},\n fetch_list_as_strings,\n target_list_as_strings, status, None)\n return fetch_handler.build_results(self, results)\n return _fetch_handler_run\n\n # Captures the name of a node in an error status.\n _NODEDEF_NAME_RE = re.compile(r'\\[\\[Node: ([^ ]*?) =')\n\n def _do_run(self, handle, target_list, fetch_list, feed_dict,\n options, run_metadata):\n \"\"\"Runs a step based on the given fetches and feeds.\n\n Args:\n handle: a handle for partial_run. None if this is just a call to run().\n target_list: A list of byte arrays corresponding to names of tensors\n or operations to be run to, but not fetched.\n fetch_list: A list of byte arrays corresponding to names of tensors to\n be fetched and operations to be run.\n feed_dict: A dictionary that maps tensor names (as byte arrays) to\n numpy ndarrays.\n options: A (pointer to a) [`RunOptions`] protocol buffer, or None\n run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None\n\n Returns:\n A list of numpy ndarrays, corresponding to the elements of\n `fetch_list`. If the ith element of `fetch_list` contains the\n name of an operation, the first Tensor output of that operation\n will be returned for that element.\n\n Raises:\n tf.errors.OpError: Or one of its subclasses on error.\n \"\"\"\n def _run_fn(session, feed_dict, fetch_list, target_list, options,\n run_metadata):\n # Ensure any changes to the graph are reflected in the runtime.\n self._extend_graph()\n with errors.raise_exception_on_not_ok_status() as status:\n return tf_session.TF_Run(session, options,\n feed_dict, fetch_list, target_list,\n status, run_metadata)\n\n def _prun_fn(session, handle, feed_dict, fetch_list):\n if target_list:\n raise RuntimeError('partial_run() requires empty target_list.')\n with errors.raise_exception_on_not_ok_status() as status:\n return tf_session.TF_PRun(session, handle, feed_dict, fetch_list,\n status)\n\n if handle is None:\n return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n target_list, options, run_metadata)\n else:\n return self._do_call(_prun_fn, self._session, handle, feed_dict,\n fetch_list)\n\n def _do_call(self, fn, *args):\n try:\n return fn(*args)\n except errors.OpError as e:\n message = compat.as_text(e.message)\n m = BaseSession._NODEDEF_NAME_RE.search(message)\n node_def = None\n op = None\n if m is not None:\n node_name = m.group(1)\n try:\n op = self._graph.get_operation_by_name(node_name)\n node_def = op.node_def\n except KeyError:\n pass\n raise type(e)(node_def, op, message)\n\n def _extend_graph(self):\n # Ensure any changes to the graph are reflected in the runtime.\n with self._extend_lock:\n if self._graph.version > self._current_version:\n # pylint: disable=protected-access\n graph_def, self._current_version = self._graph._as_graph_def(\n from_version=self._current_version,\n add_shapes=self._add_shapes)\n # pylint: enable=protected-access\n\n with errors.raise_exception_on_not_ok_status() as status:\n tf_session.TF_ExtendGraph(\n self._session, graph_def.SerializeToString(), status)\n self._opened = True\n\n # The threshold to run garbage collection to delete dead tensors.\n _DEAD_HANDLES_THRESHOLD = 10\n\n def _register_dead_handle(self, handle):\n # Register a dead handle in the session. Delete the dead tensors when\n # the number of dead tensors exceeds certain threshold.\n tensors_to_delete = None\n with self._delete_lock:\n self._dead_handles.append(handle)\n if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:\n tensors_to_delete = self._dead_handles\n self._dead_handles = []\n # Delete the dead tensors.\n if tensors_to_delete:\n feeds = {}\n fetches = []\n for deleter_key, tensor_handle in enumerate(tensors_to_delete):\n holder, deleter = session_ops._get_handle_deleter(self.graph,\n deleter_key,\n tensor_handle)\n feeds[holder] = tensor_handle\n fetches.append(deleter)\n self.run(fetches, feed_dict=feeds)\n\n def _update_with_movers(self, feed_dict, feed_map):\n # If a tensor handle that is fed to a device incompatible placeholder,\n # we move the tensor to the right device, generate a new tensor handle,\n # and update `feed_dict` to use the new handle.\n handle_movers = []\n for feed_name, val in feed_map.items():\n mover = session_ops._get_handle_mover(self.graph, *val)\n if mover:\n handle_movers.append((feed_name, val[1], mover))\n # Transfer a tensor to the right device if needed.\n if not handle_movers:\n return []\n else:\n feeds = {}\n fetches = []\n for _, handle, mover in handle_movers:\n feeds[mover[0]] = handle\n fetches.append(mover[1])\n handles = self.run(fetches, feed_dict=feeds)\n for handle_mover, handle in zip(handle_movers, handles):\n np_val = np.array(handle.handle, dtype=np.object)\n feed_dict[handle_mover[0]] = np_val\n return handles\n\n\nclass Session(BaseSession):\n \"\"\"A class for running TensorFlow operations.\n\n A `Session` object encapsulates the environment in which `Operation`\n objects are executed, and `Tensor` objects are evaluated. For\n example:\n\n ```python\n # Build a graph.\n a = tf.constant(5.0)\n b = tf.constant(6.0)\n c = a * b\n\n # Launch the graph in a session.\n sess = tf.Session()\n\n # Evaluate the tensor `c`.\n print(sess.run(c))\n ```\n\n A session may own resources, such as\n @{tf.Variable}, @{tf.QueueBase},\n and @{tf.ReaderBase}. It is important to release\n these resources when they are no longer required. To do this, either\n invoke the @{tf.Session.close} method on the session, or use\n the session as a context manager. The following two examples are\n equivalent:\n\n ```python\n # Using the `close()` method.\n sess = tf.Session()\n sess.run(...)\n sess.close()\n\n # Using the context manager.\n with tf.Session() as sess:\n sess.run(...)\n ```\n\n The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)\n protocol buffer exposes various configuration options for a\n session. For example, to create a session that uses soft constraints\n for device placement, and log the resulting placement decisions,\n create a session as follows:\n\n ```python\n # Launch the graph in a session that allows soft device placement and\n # logs the placement decisions.\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True))\n ```\n \"\"\"\n\n def __init__(self, target='', graph=None, config=None):\n \"\"\"Creates a new TensorFlow session.\n\n If no `graph` argument is specified when constructing the session,\n the default graph will be launched in the session. If you are\n using more than one graph (created with `tf.Graph()` in the same\n process, you will have to use different sessions for each graph,\n but each graph can be used in multiple sessions. In this case, it\n is often clearer to pass the graph to be launched explicitly to\n the session constructor.\n\n Args:\n target: (Optional.) The execution engine to connect to.\n Defaults to using an in-process engine. See\n @{$distributed$Distributed TensorFlow}\n for more examples.\n graph: (Optional.) The `Graph` to be launched (described above).\n config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)\n protocol buffer with configuration options for the session.\n\n \"\"\"\n super(Session, self).__init__(target, graph, config=config)\n # NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.\n self._default_graph_context_manager = None\n self._default_session_context_manager = None\n\n def __enter__(self):\n if self._default_graph_context_manager is None:\n self._default_graph_context_manager = self.graph.as_default()\n else:\n raise RuntimeError('Session context managers are not re-entrant. '\n 'Use `Session.as_default()` if you want to enter '\n 'a session multiple times.')\n if self._default_session_context_manager is None:\n self._default_session_context_manager = self.as_default()\n self._default_graph_context_manager.__enter__()\n return self._default_session_context_manager.__enter__()\n\n def __exit__(self, exec_type, exec_value, exec_tb):\n if exec_type is errors.OpError:\n logging.error('Session closing due to OpError: %s', (exec_value,))\n self._default_session_context_manager.__exit__(\n exec_type, exec_value, exec_tb)\n self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)\n\n self._default_session_context_manager = None\n self._default_graph_context_manager = None\n\n self.close()\n\n @staticmethod\n def reset(target, containers=None, config=None):\n \"\"\"Resets resource containers on `target`, and close all connected sessions.\n\n A resource container is distributed across all workers in the\n same cluster as `target`. When a resource container on `target`\n is reset, resources associated with that container will be cleared.\n In particular, all Variables in the container will become undefined:\n they lose their values and shapes.\n\n NOTE:\n (i) reset() is currently only implemented for distributed sessions.\n (ii) Any sessions on the master named by `target` will be closed.\n\n If no resource containers are provided, all containers are reset.\n\n Args:\n target: The execution engine to connect to.\n containers: A list of resource container name strings, or `None` if all of\n all the containers are to be reset.\n config: (Optional.) Protocol buffer with configuration options.\n\n Raises:\n tf.errors.OpError: Or one of its subclasses if an error occurs while\n resetting containers.\n \"\"\"\n if target is not None:\n target = compat.as_bytes(target)\n if containers is not None:\n containers = [compat.as_bytes(c) for c in containers]\n else:\n containers = []\n tf_session.TF_Reset(target, containers, config)\n\n\nclass InteractiveSession(BaseSession):\n \"\"\"A TensorFlow `Session` for use in interactive contexts, such as a shell.\n\n The only difference with a regular `Session` is that an `InteractiveSession`\n installs itself as the default session on construction.\n The methods @{tf.Tensor.eval}\n and @{tf.Operation.run}\n will use that session to run ops.\n\n This is convenient in interactive shells and [IPython\n notebooks](http://ipython.org), as it avoids having to pass an explicit\n `Session` object to run ops.\n\n For example:\n\n ```python\n sess = tf.InteractiveSession()\n a = tf.constant(5.0)\n b = tf.constant(6.0)\n c = a * b\n # We can just use 'c.eval()' without passing 'sess'\n print(c.eval())\n sess.close()\n ```\n\n Note that a regular session installs itself as the default session when it\n is created in a `with` statement. The common usage in non-interactive\n programs is to follow that pattern:\n\n ```python\n a = tf.constant(5.0)\n b = tf.constant(6.0)\n c = a * b\n with tf.Session():\n # We can also use 'c.eval()' here.\n print(c.eval())\n ```\n \"\"\"\n\n def __init__(self, target='', graph=None, config=None):\n \"\"\"Creates a new interactive TensorFlow session.\n\n If no `graph` argument is specified when constructing the session,\n the default graph will be launched in the session. If you are\n using more than one graph (created with `tf.Graph()` in the same\n process, you will have to use different sessions for each graph,\n but each graph can be used in multiple sessions. In this case, it\n is often clearer to pass the graph to be launched explicitly to\n the session constructor.\n\n Args:\n target: (Optional.) The execution engine to connect to.\n Defaults to using an in-process engine.\n graph: (Optional.) The `Graph` to be launched (described above).\n config: (Optional) `ConfigProto` proto used to configure the session.\n \"\"\"\n if not config:\n # If config is not provided, choose some reasonable defaults for\n # interactive use:\n #\n # - Grow GPU memory as needed at the cost of fragmentation.\n gpu_options = config_pb2.GPUOptions(allow_growth=True)\n config = config_pb2.ConfigProto(gpu_options=gpu_options)\n # Interactive sessions always place pruned graphs.\n config.graph_options.place_pruned_graph = True\n\n super(InteractiveSession, self).__init__(target, graph, config)\n self._default_session = self.as_default()\n self._default_session.enforce_nesting = False\n self._default_session.__enter__()\n self._explicit_graph = graph\n if self._explicit_graph is not None:\n self._default_graph = graph.as_default()\n self._default_graph.enforce_nesting = False\n self._default_graph.__enter__()\n\n def close(self):\n \"\"\"Closes an `InteractiveSession`.\"\"\"\n super(InteractiveSession, self).close()\n if self._explicit_graph is not None:\n self._default_graph.__exit__(None, None, None)\n self._default_session.__exit__(None, None, None)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Gradients for operators defined in data_flow_ops.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\n\n\[email protected](\"DynamicPartition\")\ndef _DynamicPartitionGrads(op, *grads):\n \"\"\"Gradients for DynamicPartition.\"\"\"\n data = op.inputs[0]\n indices = op.inputs[1]\n num_partitions = op.get_attr(\"num_partitions\")\n\n prefix_shape = array_ops.shape(indices)\n original_indices = array_ops.reshape(\n math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)\n partitioned_indices = data_flow_ops.dynamic_partition(\n original_indices, indices, num_partitions)\n reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads)\n reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))\n return [reconstructed, None]\n\n\[email protected](\"DynamicStitch\")\ndef _DynamicStitchGrads(op, grad):\n \"\"\"Gradients for DynamicStitch.\"\"\"\n\n num_values = len(op.inputs) // 2\n indices_grad = [None] * num_values\n\n def AsInt32(x):\n return (x if op.inputs[0].dtype == dtypes.int32 else\n math_ops.cast(x, dtypes.int32))\n inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]\n if isinstance(grad, ops.IndexedSlices):\n output_shape = array_ops.shape(op.outputs[0])\n output_rows = output_shape[0]\n grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)\n values_grad = [array_ops.gather(grad, inp) for inp in inputs]\n return indices_grad + values_grad\n\n\nops.NotDifferentiable(\"Queue\")\nops.NotDifferentiable(\"QueueEnqueue\")\nops.NotDifferentiable(\"QueueEnqueueMany\")\nops.NotDifferentiable(\"QueueDequeue\")\nops.NotDifferentiable(\"QueueDequeueMany\")\nops.NotDifferentiable(\"QueueDequeueUpTo\")\nops.NotDifferentiable(\"QueueClose\")\nops.NotDifferentiable(\"QueueSize\")\n\nops.NotDifferentiable(\"Stack\")\nops.NotDifferentiable(\"StackPush\")\nops.NotDifferentiable(\"StackPop\")\nops.NotDifferentiable(\"StackClose\")\n\nops.NotDifferentiable(\"GetSessionHandle\")\nops.NotDifferentiable(\"GetSessionHandleV2\")\nops.NotDifferentiable(\"GetSessionTensor\")\nops.NotDifferentiable(\"DeleteSessionTensor\")\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Neural network components for hybrid models.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer\n\nfrom tensorflow.python.framework import ops\n\nfrom tensorflow.python.ops import array_ops\n\n\nclass FullyConnectedLayer(hybrid_layer.HybridLayer):\n \"\"\"A stacked, fully-connected feed-forward neural network layer.\"\"\"\n\n def _define_vars(self, params):\n pass\n\n def inference_graph(self, data):\n with ops.device(self.device_assigner):\n # Compute activations for the neural network.\n nn_activations = layers.fully_connected(data, self.params.layer_size)\n\n for _ in range(1, self.params.num_layers):\n # pylint: disable=W0106\n nn_activations = layers.fully_connected(nn_activations,\n self.params.layer_size)\n return nn_activations\n\n\nclass ManyToOneLayer(hybrid_layer.HybridLayer):\n\n def _define_vars(self, params):\n pass\n\n def inference_graph(self, data):\n with ops.device(self.device_assigner):\n # Compute activations for the neural network.\n nn_activations = layers.fully_connected(data, 1)\n\n # There is always one activation per instance by definition, so squeeze\n # away the extra dimension.\n return array_ops.squeeze(nn_activations, squeeze_dims=[1])\n\n\nclass FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):\n \"\"\"A stacked, fully-connected flattened feed-forward neural network layer.\"\"\"\n\n def _define_vars(self, params):\n pass\n\n def inference_graph(self, data):\n with ops.device(self.device_assigner):\n # Compute activations for the neural network.\n nn_activations = [layers.fully_connected(data, self.params.layer_size)]\n\n for _ in range(1, self.params.num_layers):\n # pylint: disable=W0106\n nn_activations.append(\n layers.fully_connected(\n nn_activations[-1],\n self.params.layer_size))\n\n nn_activations_tensor = array_ops.concat(\n nn_activations, 1, name=\"flattened_nn_activations\")\n\n return nn_activations_tensor\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for aggregate operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import adagrad\n\n\nclass AdagradOptimizerTest(test.TestCase):\n\n def doTestBasic(self, use_locking=False, use_resource=False):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)\n var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)\n else:\n var0 = variables.Variable([1.0, 2.0], dtype=dtype)\n var1 = variables.Variable([3.0, 4.0], dtype=dtype)\n grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)\n grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)\n ada_opt = adagrad.AdagradOptimizer(\n 3.0, initial_accumulator_value=0.1, use_locking=use_locking)\n ada_update = ada_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Run 3 steps of adagrad\n for _ in range(3):\n ada_update.run()\n # Validate updated params\n self.assertAllCloseAccordingToType(\n np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([2.715679168701172, 3.715679168701172]), var1.eval())\n\n def testBasic(self):\n self.doTestBasic(use_locking=False)\n\n def testBasicResource(self):\n self.doTestBasic(use_locking=False, use_resource=True)\n\n def testBasicLocked(self):\n self.doTestBasic(use_locking=True)\n\n def testMinimizeSparseResourceVariable(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n var0 = resource_variable_ops.ResourceVariable(\n [[1.0, 2.0], [3.0, 4.0]], dtype=dtype)\n x = constant_op.constant([[4.0], [5.0]], dtype=dtype)\n pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)\n loss = pred * pred\n sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType(\n [[1.0, 2.0], [3.0, 4.0]], var0.eval())\n # Run 1 step of sgd\n sgd_op.run()\n # Validate updated params\n self.assertAllCloseAccordingToType(\n [[0, 1], [3, 4]], var0.eval(), atol=0.01)\n\n def testTensorLearningRate(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n var0 = variables.Variable([1.0, 2.0], dtype=dtype)\n var1 = variables.Variable([3.0, 4.0], dtype=dtype)\n grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)\n grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)\n ada_opt = adagrad.AdagradOptimizer(\n constant_op.constant(3.0), initial_accumulator_value=0.1)\n ada_update = ada_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Run 3 steps of adagrad\n for _ in range(3):\n ada_update.run()\n # Validate updated params\n self.assertAllCloseAccordingToType(\n np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([2.715679168701172, 3.715679168701172]), var1.eval())\n\n def testSparseBasic(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)\n var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)\n grads0 = ops.IndexedSlices(\n constant_op.constant(\n [0.1], shape=[1, 1], dtype=dtype),\n constant_op.constant([0]),\n constant_op.constant([2, 1]))\n grads1 = ops.IndexedSlices(\n constant_op.constant(\n [0.01], shape=[1, 1], dtype=dtype),\n constant_op.constant([1]),\n constant_op.constant([2, 1]))\n ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)\n ada_update = ada_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllClose([[1.0], [2.0]], var0.eval())\n self.assertAllClose([[3.0], [4.0]], var1.eval())\n # Run 3 step of sgd\n for _ in range(3):\n ada_update.run()\n # Validate updated params\n self.assertAllCloseAccordingToType(\n np.array([[-1.6026098728179932], [2.0]]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([[3.0], [3.715679168701172]]), var1.eval())\n\n def testSparseRepeatedIndices(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n repeated_index_update_var = variables.Variable(\n [[1.0], [2.0]], dtype=dtype)\n aggregated_update_var = variables.Variable(\n [[1.0], [2.0]], dtype=dtype)\n grad_repeated_index = ops.IndexedSlices(\n constant_op.constant(\n [0.1, 0.1], shape=[2, 1], dtype=dtype),\n constant_op.constant([1, 1]),\n constant_op.constant([2, 1]))\n grad_aggregated = ops.IndexedSlices(\n constant_op.constant(\n [0.2], shape=[1, 1], dtype=dtype),\n constant_op.constant([1]),\n constant_op.constant([2, 1]))\n repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(\n [(grad_repeated_index, repeated_index_update_var)])\n aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(\n [(grad_aggregated, aggregated_update_var)])\n variables.global_variables_initializer().run()\n self.assertAllClose(aggregated_update_var.eval(),\n repeated_index_update_var.eval())\n for _ in range(3):\n repeated_update.run()\n aggregated_update.run()\n self.assertAllClose(aggregated_update_var.eval(),\n repeated_index_update_var.eval())\n\n def testSparseRepeatedIndicesResourceVariable(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n var_repeated = resource_variable_ops.ResourceVariable(\n [1.0, 2.0], dtype=dtype)\n loss_repeated = math_ops.reduce_sum(\n embedding_ops.embedding_lookup(var_repeated, [0, 0]))\n var_aggregated = resource_variable_ops.ResourceVariable(\n [1.0, 2.0], dtype=dtype)\n loss_aggregated = 2 * math_ops.reduce_sum(\n embedding_ops.embedding_lookup(var_aggregated, [0]))\n update_op_repeated = adagrad.AdagradOptimizer(\n 2.0).minimize(loss_repeated)\n update_op_aggregated = adagrad.AdagradOptimizer(\n 2.0).minimize(loss_aggregated)\n variables.global_variables_initializer().run()\n self.assertAllCloseAccordingToType(\n var_repeated.eval(), var_aggregated.eval())\n for _ in range(3):\n update_op_repeated.run()\n update_op_aggregated.run()\n self.assertAllCloseAccordingToType(\n var_repeated.eval(), var_aggregated.eval())\n\n def testSparseStability(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n shape = [1, 6]\n var0 = variables.Variable(\n [[\n 0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,\n -0.0105945\n ]],\n dtype=dtype)\n grads0 = ops.IndexedSlices(\n constant_op.constant(\n [[\n -5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,\n -8.4877e-05, -9.48906e-05\n ]],\n shape=shape,\n dtype=dtype),\n constant_op.constant([0]),\n constant_op.constant(shape))\n ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)\n ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))\n self.assertEqual([\"accumulator\"], ada_opt.get_slot_names())\n slot0 = ada_opt.get_slot(var0, \"accumulator\")\n init = variables.global_variables_initializer()\n for _ in range(100):\n init.run()\n ada_update.run()\n self.assertAllCloseAccordingToType(\n np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())\n self.assertAllCloseAccordingToType(\n np.array([[\n 0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,\n -0.01029443\n ]]), var0.eval())\n\n def testSharing(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session():\n var0 = variables.Variable([1.0, 2.0], dtype=dtype)\n var1 = variables.Variable([3.0, 4.0], dtype=dtype)\n grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)\n grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)\n ada_opt = adagrad.AdagradOptimizer(3.0)\n # Apply the optimizer twice. Both applications will use\n # the same accums.\n ada_update1 = ada_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n ada_update2 = ada_opt.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n self.assertEqual([\"accumulator\"], ada_opt.get_slot_names())\n slot0 = ada_opt.get_slot(var0, \"accumulator\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n slot1 = ada_opt.get_slot(var1, \"accumulator\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n variables.global_variables_initializer().run()\n\n # Fetch params to validate initial values.\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Mix the first and the second adagrad for 3 steps.\n ada_update1.run()\n ada_update2.run()\n ada_update1.run()\n # Validate updated params (the same as with only 1 Adagrad).\n self.assertAllCloseAccordingToType(\n np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())\n self.assertAllCloseAccordingToType(\n np.array([2.715679168701172, 3.715679168701172]), var1.eval())\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A Transform that parses lines from a CSV file.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.learn.python.learn.dataframe import transform\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import parsing_ops\n\n\nclass CSVParser(transform.TensorFlowTransform):\n \"\"\"A Transform that parses lines from a CSV file.\"\"\"\n\n def __init__(self, column_names, default_values):\n \"\"\"Initialize `CSVParser`.\n\n Args:\n column_names: a list of strings containing the names of columns to be\n output by the parser.\n default_values: a list containing each column.\n \"\"\"\n super(CSVParser, self).__init__()\n self._column_names = tuple(column_names)\n self._default_values = default_values\n\n @property\n def name(self):\n return \"CSVParser\"\n\n @property\n def input_valency(self):\n return 1\n\n @property\n def _output_names(self):\n return self.column_names\n\n @transform.parameter\n def column_names(self):\n return self._column_names\n\n @transform.parameter\n def default_values(self):\n return self._default_values\n\n def _apply_transform(self, input_tensors, **kwargs):\n default_consts = [constant_op.constant(d, shape=[1])\n for d in self._default_values]\n parsed_values = parsing_ops.decode_csv(input_tensors[0],\n record_defaults=default_consts)\n # pylint: disable=not-callable\n return self.return_type(*parsed_values)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for 3d convolutional operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\nclass BetaincTest(test.TestCase):\n\n def _testBetaInc(self, a_s, b_s, x_s, dtype):\n try:\n from scipy import special # pylint: disable=g-import-not-at-top\n np_dt = dtype.as_numpy_dtype\n\n # Test random values\n a_s = a_s.astype(np_dt) # in (0, infty)\n b_s = b_s.astype(np_dt) # in (0, infty)\n x_s = x_s.astype(np_dt) # in (0, 1)\n tf_a_s = constant_op.constant(a_s, dtype=dtype)\n tf_b_s = constant_op.constant(b_s, dtype=dtype)\n tf_x_s = constant_op.constant(x_s, dtype=dtype)\n tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)\n with self.test_session():\n tf_out = tf_out_t.eval()\n scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)\n\n # the scipy version of betainc uses a double-only implementation.\n # TODO(ebrevdo): identify reasons for (sometime) precision loss\n # with doubles\n tol = 1e-4 if dtype == dtypes.float32 else 5e-5\n self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=0)\n\n # Test out-of-range values (most should return nan output)\n combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))\n a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)\n with self.test_session():\n tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()\n scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt)\n self.assertAllCloseAccordingToType(scipy_comb, tf_comb)\n\n # Test broadcasting between scalars and other shapes\n with self.test_session():\n self.assertAllCloseAccordingToType(\n special.betainc(0.1, b_s, x_s).astype(np_dt),\n math_ops.betainc(0.1, b_s, x_s).eval(),\n rtol=tol,\n atol=0)\n self.assertAllCloseAccordingToType(\n special.betainc(a_s, 0.1, x_s).astype(np_dt),\n math_ops.betainc(a_s, 0.1, x_s).eval(),\n rtol=tol,\n atol=0)\n self.assertAllCloseAccordingToType(\n special.betainc(a_s, b_s, 0.1).astype(np_dt),\n math_ops.betainc(a_s, b_s, 0.1).eval(),\n rtol=tol,\n atol=0)\n self.assertAllCloseAccordingToType(\n special.betainc(0.1, b_s, 0.1).astype(np_dt),\n math_ops.betainc(0.1, b_s, 0.1).eval(),\n rtol=tol,\n atol=0)\n self.assertAllCloseAccordingToType(\n special.betainc(0.1, 0.1, 0.1).astype(np_dt),\n math_ops.betainc(0.1, 0.1, 0.1).eval(),\n rtol=tol,\n atol=0)\n\n with self.assertRaisesRegexp(ValueError, \"must be equal\"):\n math_ops.betainc(0.5, [0.5], [[0.5]])\n\n with self.test_session():\n with self.assertRaisesOpError(\"Shapes of .* are inconsistent\"):\n a_p = array_ops.placeholder(dtype)\n b_p = array_ops.placeholder(dtype)\n x_p = array_ops.placeholder(dtype)\n math_ops.betainc(a_p, b_p, x_p).eval(\n feed_dict={a_p: 0.5,\n b_p: [0.5],\n x_p: [[0.5]]})\n\n except ImportError as e:\n tf_logging.warn(\"Cannot test special functions: %s\" % str(e))\n\n def testBetaIncFloat(self):\n a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)\n b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)\n x_s = np.random.rand(10, 10) # in (0, 1)\n self._testBetaInc(a_s, b_s, x_s, dtypes.float32)\n\n def testBetaIncDouble(self):\n a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)\n b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)\n x_s = np.random.rand(10, 10) # in (0, 1)\n self._testBetaInc(a_s, b_s, x_s, dtypes.float64)\n\n def testBetaIncDoubleVeryLargeValues(self):\n a_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)\n b_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)\n x_s = np.random.rand(10, 10) # in (0, 1)\n self._testBetaInc(a_s, b_s, x_s, dtypes.float64)\n\n def testBetaIncDoubleVerySmallValues(self):\n a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)\n b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)\n x_s = np.random.rand(10, 10) # in (0, 1)\n self._testBetaInc(a_s, b_s, x_s, dtypes.float64)\n\n def testBetaIncFloatVerySmallValues(self):\n a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)\n b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)\n x_s = np.random.rand(10, 10) # in (0, 1)\n self._testBetaInc(a_s, b_s, x_s, dtypes.float32)\n\n def testBetaIncFpropAndBpropAreNeverNAN(self):\n with self.test_session() as sess:\n space = np.logspace(-8, 5).tolist()\n space_x = np.linspace(1e-16, 1 - 1e-16).tolist()\n ga_s, gb_s, gx_s = zip(*list(itertools.product(space, space, space_x)))\n # Test grads are never nan\n ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32)\n gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32)\n gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32)\n tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t)\n tf_gout, grads_x = sess.run(\n [tf_gout_t,\n gradients_impl.gradients(tf_gout_t, [ga_s_t, gb_s_t, gx_s_t])[2]])\n\n # Equivalent to `assertAllFalse` (if it existed).\n self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),\n np.isnan(tf_gout))\n self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),\n np.isnan(grads_x))\n\n def testBetaIncGrads(self):\n err_tolerance = 1e-3\n with self.test_session():\n # Test gradient\n ga_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)\n gb_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)\n gx_s = np.random.rand(2, 2) # in (0, 1)\n tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64)\n tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64)\n tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)\n tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)\n err = gradient_checker.compute_gradient_error(\n [tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape)\n print(\"betainc gradient err = %g \" % err)\n self.assertLess(err, err_tolerance)\n\n # Test broadcast gradient\n gx_s = np.random.rand() # in (0, 1)\n tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)\n tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)\n err = gradient_checker.compute_gradient_error(\n [tf_gx_s], [()], tf_gout_t, ga_s.shape)\n print(\"betainc gradient err = %g \" % err)\n self.assertLess(err, err_tolerance)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Run the python doc generator and fail if there are any broken links.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.python import debug as tf_debug\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.tools.docs import generate_lib\n\n\nclass Flags(object):\n resource_root = resource_loader.get_root_dir_with_all_resources()\n src_dir = os.path.join(resource_root, 'third_party/tensorflow/docs_src')\n base_dir = os.path.join(resource_root, 'third_party/tensorflow/')\n output_dir = googletest.GetTempDir()\n\n\nclass BuildDocsTest(googletest.TestCase):\n\n def testBuildDocs(self):\n doc_generator = generate_lib.DocGenerator()\n\n doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])\n doc_generator.load_contrib()\n\n status = doc_generator.build(Flags())\n\n if status:\n self.fail('Found %s Errors!' % status)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Exports a toy linear regression inference graph.\n\nExports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter\nformat.\n\nThis graph calculates,\n y = a*x + b\nwhere a and b are variables with a=0.5 and b=2.\n\nOutput from this program is typically used to exercise Session\nloading and execution code.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib.session_bundle import exporter\n\nFLAGS = None\n\n\ndef Export(export_dir, use_checkpoint_v2):\n with tf.Session() as sess:\n # Make model parameters a&b variables instead of constants to\n # exercise the variable reloading mechanisms.\n a = tf.Variable(0.5, name=\"a\")\n b = tf.Variable(2.0, name=\"b\")\n\n # Create a placeholder for serialized tensorflow.Example messages to be fed.\n serialized_tf_example = tf.placeholder(tf.string, name=\"tf_example\")\n\n # Parse the tensorflow.Example looking for a feature named \"x\" with a single\n # floating point value.\n feature_configs = {\"x\": tf.FixedLenFeature([1], dtype=tf.float32),}\n tf_example = tf.parse_example(serialized_tf_example, feature_configs)\n # Use tf.identity() to assign name\n x = tf.identity(tf_example[\"x\"], name=\"x\")\n\n # Calculate, y = a*x + b\n y = tf.add(tf.multiply(a, x), b, name=\"y\")\n\n # Setup a standard Saver for our variables.\n save = tf.train.Saver(\n {\n \"a\": a,\n \"b\": b\n },\n sharded=True,\n write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else\n tf.train.SaverDef.V1)\n\n # asset_path contains the base directory of assets used in training (e.g.\n # vocabulary files).\n original_asset_path = tf.constant(\"/tmp/original/export/assets\")\n # Ops reading asset files should reference the asset_path tensor\n # which stores the original asset path at training time and the\n # overridden assets directory at restore time.\n asset_path = tf.Variable(original_asset_path,\n name=\"asset_path\",\n trainable=False,\n collections=[])\n assign_asset_path = asset_path.assign(original_asset_path)\n\n # Use a fixed global step number.\n global_step_tensor = tf.Variable(123, name=\"global_step\")\n\n # Create a RegressionSignature for our input and output.\n regression_signature = exporter.regression_signature(\n input_tensor=serialized_tf_example,\n # Use tf.identity here because we export two signatures here.\n # Otherwise only graph for one of the signatures will be loaded\n # (whichever is created first) during serving.\n output_tensor=tf.identity(y))\n named_graph_signature = {\n \"inputs\": exporter.generic_signature({\"x\": x}),\n \"outputs\": exporter.generic_signature({\"y\": y})\n }\n\n # Create two filename assets and corresponding tensors.\n # TODO(b/26254158) Consider adding validation of file existance as well as\n # hashes (e.g. sha1) for consistency.\n original_filename1 = tf.constant(\"hello1.txt\")\n tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)\n filename1 = tf.Variable(original_filename1,\n name=\"filename1\",\n trainable=False,\n collections=[])\n assign_filename1 = filename1.assign(original_filename1)\n original_filename2 = tf.constant(\"hello2.txt\")\n tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)\n filename2 = tf.Variable(original_filename2,\n name=\"filename2\",\n trainable=False,\n collections=[])\n assign_filename2 = filename2.assign(original_filename2)\n\n # Init op contains a group of all variables that we assign.\n init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)\n\n # CopyAssets is used as a callback during export to copy files to the\n # given export directory.\n def CopyAssets(filepaths, export_path):\n print(\"copying asset files to: %s\" % export_path)\n for filepath in filepaths:\n print(\"copying asset file: %s\" % filepath)\n\n # Run an export.\n tf.global_variables_initializer().run()\n export = exporter.Exporter(save)\n export.init(\n sess.graph.as_graph_def(),\n init_op=init_op,\n default_graph_signature=regression_signature,\n named_graph_signatures=named_graph_signature,\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),\n assets_callback=CopyAssets)\n export.export(export_dir, global_step_tensor, sess)\n\n\ndef main(_):\n Export(FLAGS.export_dir, FLAGS.use_checkpoint_v2)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\n \"--export_dir\",\n type=str,\n default=\"/tmp/half_plus_two\",\n help=\"Directory where to export inference model.\"\n )\n parser.add_argument(\n \"--use_checkpoint_v2\",\n \"bool\",\n nargs=\"?\",\n const=True,\n default=False,\n help=\"If true, write v2 checkpoint files.\"\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Provides an interface for working with multiple event files.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport threading\n\nimport six\n\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.tensorboard.backend.event_processing import directory_watcher\nfrom tensorflow.tensorboard.backend.event_processing import event_accumulator\nfrom tensorflow.tensorboard.backend.event_processing import io_wrapper\n\n\nclass EventMultiplexer(object):\n \"\"\"An `EventMultiplexer` manages access to multiple `EventAccumulator`s.\n\n Each `EventAccumulator` is associated with a `run`, which is a self-contained\n TensorFlow execution. The `EventMultiplexer` provides methods for extracting\n information about events from multiple `run`s.\n\n Example usage for loading specific runs from files:\n\n ```python\n x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})\n x.Reload()\n ```\n\n Example usage for loading a directory where each subdirectory is a run\n\n ```python\n (eg:) /parent/directory/path/\n /parent/directory/path/run1/\n /parent/directory/path/run1/events.out.tfevents.1001\n /parent/directory/path/run1/events.out.tfevents.1002\n\n /parent/directory/path/run2/\n /parent/directory/path/run2/events.out.tfevents.9232\n\n /parent/directory/path/run3/\n /parent/directory/path/run3/events.out.tfevents.9232\n x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')\n (which is equivalent to:)\n x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}\n ```\n\n If you would like to watch `/parent/directory/path`, wait for it to be created\n (if necessary) and then periodically pick up new runs, use\n `AutoloadingMultiplexer`\n @@Tensors\n \"\"\"\n\n def __init__(self,\n run_path_map=None,\n size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,\n purge_orphaned_data=True):\n \"\"\"Constructor for the `EventMultiplexer`.\n\n Args:\n run_path_map: Dict `{run: path}` which specifies the\n name of a run, and the path to find the associated events. If it is\n None, then the EventMultiplexer initializes without any runs.\n size_guidance: A dictionary mapping from `tagType` to the number of items\n to store for each tag of that type. See\n `event_accumulator.EventAccumulator` for details.\n purge_orphaned_data: Whether to discard any events that were \"orphaned\" by\n a TensorFlow restart.\n \"\"\"\n logging.info('Event Multiplexer initializing.')\n self._accumulators_mutex = threading.Lock()\n self._accumulators = {}\n self._paths = {}\n self._reload_called = False\n self._size_guidance = size_guidance\n self.purge_orphaned_data = purge_orphaned_data\n if run_path_map is not None:\n logging.info('Event Multplexer doing initialization load for %s',\n run_path_map)\n for (run, path) in six.iteritems(run_path_map):\n self.AddRun(path, run)\n logging.info('Event Multiplexer done initializing')\n\n def AddRun(self, path, name=None):\n \"\"\"Add a run to the multiplexer.\n\n If the name is not specified, it is the same as the path.\n\n If a run by that name exists, and we are already watching the right path,\n do nothing. If we are watching a different path, replace the event\n accumulator.\n\n If `Reload` has been called, it will `Reload` the newly created\n accumulators.\n\n Args:\n path: Path to the event files (or event directory) for given run.\n name: Name of the run to add. If not provided, is set to path.\n\n Returns:\n The `EventMultiplexer`.\n \"\"\"\n if name is None or name is '':\n name = path\n accumulator = None\n with self._accumulators_mutex:\n if name not in self._accumulators or self._paths[name] != path:\n if name in self._paths and self._paths[name] != path:\n # TODO(danmane) - Make it impossible to overwrite an old path with\n # a new path (just give the new path a distinct name)\n logging.warning('Conflict for name %s: old path %s, new path %s',\n name, self._paths[name], path)\n logging.info('Constructing EventAccumulator for %s', path)\n accumulator = event_accumulator.EventAccumulator(\n path,\n size_guidance=self._size_guidance,\n purge_orphaned_data=self.purge_orphaned_data)\n self._accumulators[name] = accumulator\n self._paths[name] = path\n if accumulator:\n if self._reload_called:\n accumulator.Reload()\n return self\n\n def AddRunsFromDirectory(self, path, name=None):\n \"\"\"Load runs from a directory; recursively walks subdirectories.\n\n If path doesn't exist, no-op. This ensures that it is safe to call\n `AddRunsFromDirectory` multiple times, even before the directory is made.\n\n If path is a directory, load event files in the directory (if any exist) and\n recursively call AddRunsFromDirectory on any subdirectories. This mean you\n can call AddRunsFromDirectory at the root of a tree of event logs and\n TensorBoard will load them all.\n\n If the `EventMultiplexer` is already loaded this will cause\n the newly created accumulators to `Reload()`.\n Args:\n path: A string path to a directory to load runs from.\n name: Optionally, what name to apply to the runs. If name is provided\n and the directory contains run subdirectories, the name of each subrun\n is the concatenation of the parent name and the subdirectory name. If\n name is provided and the directory contains event files, then a run\n is added called \"name\" and with the events from the path.\n\n Raises:\n ValueError: If the path exists and isn't a directory.\n\n Returns:\n The `EventMultiplexer`.\n \"\"\"\n logging.info('Starting AddRunsFromDirectory: %s', path)\n for subdir in GetLogdirSubdirectories(path):\n logging.info('Adding events from directory %s', subdir)\n rpath = os.path.relpath(subdir, path)\n subname = os.path.join(name, rpath) if name else rpath\n self.AddRun(subdir, name=subname)\n logging.info('Done with AddRunsFromDirectory: %s', path)\n return self\n\n def Reload(self):\n \"\"\"Call `Reload` on every `EventAccumulator`.\"\"\"\n logging.info('Beginning EventMultiplexer.Reload()')\n self._reload_called = True\n # Build a list so we're safe even if the list of accumulators is modified\n # even while we're reloading.\n with self._accumulators_mutex:\n items = list(self._accumulators.items())\n\n names_to_delete = set()\n for name, accumulator in items:\n try:\n accumulator.Reload()\n except (OSError, IOError) as e:\n logging.error(\"Unable to reload accumulator '%s': %s\", name, e)\n except directory_watcher.DirectoryDeletedError:\n names_to_delete.add(name)\n\n with self._accumulators_mutex:\n for name in names_to_delete:\n logging.warning(\"Deleting accumulator '%s'\", name)\n del self._accumulators[name]\n logging.info('Finished with EventMultiplexer.Reload()')\n return self\n\n def PluginAssets(self, plugin_name):\n \"\"\"Get index of runs and assets for a given plugin.\n\n Args:\n plugin_name: Name of the plugin we are checking for.\n\n Returns:\n A dictionary that maps from run_name to a list of plugin\n assets for that run.\n \"\"\"\n with self._accumulators_mutex:\n # To avoid nested locks, we construct a copy of the run-accumulator map\n items = list(six.iteritems(self._accumulators))\n\n return {run: accum.PluginAssets(plugin_name) for run, accum in items}\n\n def RetrievePluginAsset(self, run, plugin_name, asset_name):\n \"\"\"Return the contents for a specific plugin asset from a run.\n\n Args:\n run: The string name of the run.\n plugin_name: The string name of a plugin.\n asset_name: The string name of an asset.\n\n Returns:\n The string contents of the plugin asset.\n\n Raises:\n KeyError: If the asset is not available.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.RetrievePluginAsset(plugin_name, asset_name)\n\n def FirstEventTimestamp(self, run):\n \"\"\"Return the timestamp of the first event of the given run.\n\n This may perform I/O if no events have been loaded yet for the run.\n\n Args:\n run: A string name of the run for which the timestamp is retrieved.\n\n Returns:\n The wall_time of the first event of the run, which will typically be\n seconds since the epoch.\n\n Raises:\n KeyError: If the run is not found.\n ValueError: If the run has no events loaded and there are no events on\n disk to load.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.FirstEventTimestamp()\n\n def Scalars(self, run, tag):\n \"\"\"Retrieve the scalar events associated with a run and tag.\n\n Args:\n run: A string name of the run for which values are retrieved.\n tag: A string name of the tag for which values are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.ScalarEvents`.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.Scalars(tag)\n\n def HealthPills(self, run, node_name):\n \"\"\"Retrieve the health pill events associated with a run and node name.\n\n Args:\n run: A string name of the run for which health pills are retrieved.\n node_name: A string name of the node for which health pills are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the node name is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.HealthPillEvents`.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.HealthPills(node_name)\n\n def GetOpsWithHealthPills(self, run):\n \"\"\"Determines which ops have at least 1 health pill event for a given run.\n\n Args:\n run: The name of the run.\n\n Raises:\n KeyError: If the run is not found, or the node name is not available for\n the given run.\n\n Returns:\n The list of names of ops with health pill events.\n \"\"\"\n return self._GetAccumulator(run).GetOpsWithHealthPills()\n\n def Graph(self, run):\n \"\"\"Retrieve the graph associated with the provided run.\n\n Args:\n run: A string name of a run to load the graph for.\n\n Raises:\n KeyError: If the run is not found.\n ValueError: If the run does not have an associated graph.\n\n Returns:\n The `GraphDef` protobuf data structure.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.Graph()\n\n def MetaGraph(self, run):\n \"\"\"Retrieve the metagraph associated with the provided run.\n\n Args:\n run: A string name of a run to load the graph for.\n\n Raises:\n KeyError: If the run is not found.\n ValueError: If the run does not have an associated graph.\n\n Returns:\n The `MetaGraphDef` protobuf data structure.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.MetaGraph()\n\n def RunMetadata(self, run, tag):\n \"\"\"Get the session.run() metadata associated with a TensorFlow run and tag.\n\n Args:\n run: A string name of a TensorFlow run.\n tag: A string name of the tag associated with a particular session.run().\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for the\n given run.\n\n Returns:\n The metadata in the form of `RunMetadata` protobuf data structure.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.RunMetadata(tag)\n\n def Histograms(self, run, tag):\n \"\"\"Retrieve the histogram events associated with a run and tag.\n\n Args:\n run: A string name of the run for which values are retrieved.\n tag: A string name of the tag for which values are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.HistogramEvents`.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.Histograms(tag)\n\n def CompressedHistograms(self, run, tag):\n \"\"\"Retrieve the compressed histogram events associated with a run and tag.\n\n Args:\n run: A string name of the run for which values are retrieved.\n tag: A string name of the tag for which values are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.CompressedHistogramEvents`.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.CompressedHistograms(tag)\n\n def Images(self, run, tag):\n \"\"\"Retrieve the image events associated with a run and tag.\n\n Args:\n run: A string name of the run for which values are retrieved.\n tag: A string name of the tag for which values are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.ImageEvents`.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.Images(tag)\n\n def Audio(self, run, tag):\n \"\"\"Retrieve the audio events associated with a run and tag.\n\n Args:\n run: A string name of the run for which values are retrieved.\n tag: A string name of the tag for which values are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.AudioEvents`.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.Audio(tag)\n\n def Tensors(self, run, tag):\n \"\"\"Retrieve the tensor events associated with a run and tag.\n\n Args:\n run: A string name of the run for which values are retrieved.\n tag: A string name of the tag for which values are retrieved.\n\n Raises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\n Returns:\n An array of `event_accumulator.TensorEvent`s.\n \"\"\"\n accumulator = self._GetAccumulator(run)\n return accumulator.Tensors(tag)\n\n def Runs(self):\n \"\"\"Return all the run names in the `EventMultiplexer`.\n\n Returns:\n ```\n {runName: { images: [tag1, tag2, tag3],\n scalarValues: [tagA, tagB, tagC],\n histograms: [tagX, tagY, tagZ],\n compressedHistograms: [tagX, tagY, tagZ],\n graph: true, meta_graph: true}}\n ```\n \"\"\"\n with self._accumulators_mutex:\n # To avoid nested locks, we construct a copy of the run-accumulator map\n items = list(six.iteritems(self._accumulators))\n return {run_name: accumulator.Tags() for run_name, accumulator in items}\n\n def RunPaths(self):\n \"\"\"Returns a dict mapping run names to event file paths.\"\"\"\n return self._paths\n\n def _GetAccumulator(self, run):\n with self._accumulators_mutex:\n return self._accumulators[run]\n\n\ndef GetLogdirSubdirectories(path):\n \"\"\"Returns subdirectories with event files on path.\"\"\"\n if gfile.Exists(path) and not gfile.IsDirectory(path):\n raise ValueError('GetLogdirSubdirectories: path exists and is not a '\n 'directory, %s' % path)\n\n # ListRecursively just yields nothing if the path doesn't exist.\n return (\n subdir\n for (subdir, files) in io_wrapper.ListRecursively(path)\n if list(filter(event_accumulator.IsTensorFlowEventsFile, files))\n )\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.nets.resnet_v2.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.contrib.slim.python.slim.nets import resnet_utils\nfrom tensorflow.contrib.slim.python.slim.nets import resnet_v2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef create_test_input(batch_size, height, width, channels):\n \"\"\"Create test input tensor.\n\n Args:\n batch_size: The number of images per batch or `None` if unknown.\n height: The height of each image or `None` if unknown.\n width: The width of each image or `None` if unknown.\n channels: The number of channels per image or `None` if unknown.\n\n Returns:\n Either a placeholder `Tensor` of dimension\n [batch_size, height, width, channels] if any of the inputs are `None` or a\n constant `Tensor` with the mesh grid values along the spatial dimensions.\n \"\"\"\n if None in [batch_size, height, width, channels]:\n return array_ops.placeholder(dtypes.float32,\n (batch_size, height, width, channels))\n else:\n return math_ops.to_float(\n np.tile(\n np.reshape(\n np.reshape(np.arange(height), [height, 1]) + np.reshape(\n np.arange(width), [1, width]), [1, height, width, 1]),\n [batch_size, 1, 1, channels]))\n\n\nclass ResnetUtilsTest(test.TestCase):\n\n def testSubsampleThreeByThree(self):\n x = array_ops.reshape(math_ops.to_float(math_ops.range(9)), [1, 3, 3, 1])\n x = resnet_utils.subsample(x, 2)\n expected = array_ops.reshape(\n constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])\n with self.test_session():\n self.assertAllClose(x.eval(), expected.eval())\n\n def testSubsampleFourByFour(self):\n x = array_ops.reshape(math_ops.to_float(math_ops.range(16)), [1, 4, 4, 1])\n x = resnet_utils.subsample(x, 2)\n expected = array_ops.reshape(\n constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])\n with self.test_session():\n self.assertAllClose(x.eval(), expected.eval())\n\n def testConv2DSameEven(self):\n n, n2 = 4, 2\n\n # Input image.\n x = create_test_input(1, n, n, 1)\n\n # Convolution kernel.\n w = create_test_input(1, 3, 3, 1)\n w = array_ops.reshape(w, [3, 3, 1, 1])\n\n variable_scope.get_variable('Conv/weights', initializer=w)\n variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))\n variable_scope.get_variable_scope().reuse_variables()\n\n y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')\n y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37],\n [43, 66, 84, 46], [26, 37, 46, 22]])\n y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])\n\n y2 = resnet_utils.subsample(y1, 2)\n y2_expected = math_ops.to_float([[14, 43], [43, 84]])\n y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])\n\n y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')\n y3_expected = y2_expected\n\n y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')\n y4_expected = math_ops.to_float([[48, 37], [37, 22]])\n y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])\n\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertAllClose(y1.eval(), y1_expected.eval())\n self.assertAllClose(y2.eval(), y2_expected.eval())\n self.assertAllClose(y3.eval(), y3_expected.eval())\n self.assertAllClose(y4.eval(), y4_expected.eval())\n\n def testConv2DSameOdd(self):\n n, n2 = 5, 3\n\n # Input image.\n x = create_test_input(1, n, n, 1)\n\n # Convolution kernel.\n w = create_test_input(1, 3, 3, 1)\n w = array_ops.reshape(w, [3, 3, 1, 1])\n\n variable_scope.get_variable('Conv/weights', initializer=w)\n variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))\n variable_scope.get_variable_scope().reuse_variables()\n\n y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')\n y1_expected = math_ops.to_float([[14, 28, 43, 58, 34],\n [28, 48, 66, 84, 46],\n [43, 66, 84, 102, 55],\n [58, 84, 102, 120, 64],\n [34, 46, 55, 64, 30]])\n y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])\n\n y2 = resnet_utils.subsample(y1, 2)\n y2_expected = math_ops.to_float([[14, 43, 34],\n [43, 84, 55],\n [34, 55, 30]])\n y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])\n\n y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')\n y3_expected = y2_expected\n\n y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')\n y4_expected = y2_expected\n\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertAllClose(y1.eval(), y1_expected.eval())\n self.assertAllClose(y2.eval(), y2_expected.eval())\n self.assertAllClose(y3.eval(), y3_expected.eval())\n self.assertAllClose(y4.eval(), y4_expected.eval())\n\n def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):\n \"\"\"A plain ResNet without extra layers before or after the ResNet blocks.\"\"\"\n with variable_scope.variable_scope(scope, values=[inputs]):\n with arg_scope([layers.conv2d], outputs_collections='end_points'):\n net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)\n end_points = utils.convert_collection_to_dict('end_points')\n return net, end_points\n\n def testEndPointsV2(self):\n \"\"\"Test the end points of a tiny v2 bottleneck network.\"\"\"\n blocks = [\n resnet_v2.resnet_v2_block(\n 'block1', base_depth=1, num_units=2, stride=2),\n resnet_v2.resnet_v2_block(\n 'block2', base_depth=2, num_units=2, stride=1),\n ]\n inputs = create_test_input(2, 32, 16, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')\n expected = [\n 'tiny/block1/unit_1/bottleneck_v2/shortcut',\n 'tiny/block1/unit_1/bottleneck_v2/conv1',\n 'tiny/block1/unit_1/bottleneck_v2/conv2',\n 'tiny/block1/unit_1/bottleneck_v2/conv3',\n 'tiny/block1/unit_2/bottleneck_v2/conv1',\n 'tiny/block1/unit_2/bottleneck_v2/conv2',\n 'tiny/block1/unit_2/bottleneck_v2/conv3',\n 'tiny/block2/unit_1/bottleneck_v2/shortcut',\n 'tiny/block2/unit_1/bottleneck_v2/conv1',\n 'tiny/block2/unit_1/bottleneck_v2/conv2',\n 'tiny/block2/unit_1/bottleneck_v2/conv3',\n 'tiny/block2/unit_2/bottleneck_v2/conv1',\n 'tiny/block2/unit_2/bottleneck_v2/conv2',\n 'tiny/block2/unit_2/bottleneck_v2/conv3'\n ]\n self.assertItemsEqual(expected, end_points)\n\n def _stack_blocks_nondense(self, net, blocks):\n \"\"\"A simplified ResNet Block stacker without output stride control.\"\"\"\n for block in blocks:\n with variable_scope.variable_scope(block.scope, 'block', [net]):\n for i, unit in enumerate(block.args):\n with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):\n net = block.unit_fn(net, rate=1, **unit)\n return net\n\n def testAtrousValuesBottleneck(self):\n \"\"\"Verify the values of dense feature extraction by atrous convolution.\n\n Make sure that dense feature extraction by stack_blocks_dense() followed by\n subsampling gives identical results to feature extraction at the nominal\n network output stride using the simple self._stack_blocks_nondense() above.\n \"\"\"\n block = resnet_v2.resnet_v2_block\n blocks = [\n block('block1', base_depth=1, num_units=2, stride=2),\n block('block2', base_depth=2, num_units=2, stride=2),\n block('block3', base_depth=4, num_units=2, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n nominal_stride = 8\n\n # Test both odd and even input dimensions.\n height = 30\n width = 31\n with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):\n for output_stride in [1, 2, 4, 8, None]:\n with ops.Graph().as_default():\n with self.test_session() as sess:\n random_seed.set_random_seed(0)\n inputs = create_test_input(1, height, width, 3)\n # Dense feature extraction followed by subsampling.\n output = resnet_utils.stack_blocks_dense(inputs, blocks,\n output_stride)\n if output_stride is None:\n factor = 1\n else:\n factor = nominal_stride // output_stride\n\n output = resnet_utils.subsample(output, factor)\n # Make the two networks use the same weights.\n variable_scope.get_variable_scope().reuse_variables()\n # Feature extraction at the nominal network rate.\n expected = self._stack_blocks_nondense(inputs, blocks)\n sess.run(variables.global_variables_initializer())\n output, expected = sess.run([output, expected])\n self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)\n\n\nclass ResnetCompleteNetworkTest(test.TestCase):\n \"\"\"Tests with complete small ResNet v2 networks.\"\"\"\n\n def _resnet_small(self,\n inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n reuse=None,\n scope='resnet_v2_small'):\n \"\"\"A shallow and thin ResNet v2 for faster tests.\"\"\"\n block = resnet_v2.resnet_v2_block\n blocks = [\n block('block1', base_depth=1, num_units=3, stride=2),\n block('block2', base_depth=2, num_units=3, stride=2),\n block('block3', base_depth=4, num_units=3, stride=2),\n block('block4', base_depth=8, num_units=2, stride=1),\n ]\n return resnet_v2.resnet_v2(inputs, blocks, num_classes, global_pool,\n output_stride, include_root_block, reuse, scope)\n\n def testClassificationEndPoints(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n logits, end_points = self._resnet_small(\n inputs, num_classes, global_pool, scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])\n self.assertTrue('predictions' in end_points)\n self.assertListEqual(end_points['predictions'].get_shape().as_list(),\n [2, 1, 1, num_classes])\n\n def testClassificationShapes(self):\n global_pool = True\n num_classes = 10\n inputs = create_test_input(2, 224, 224, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs, num_classes, global_pool, scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 28, 28, 4],\n 'resnet/block2': [2, 14, 14, 8],\n 'resnet/block3': [2, 7, 7, 16],\n 'resnet/block4': [2, 7, 7, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n inputs = create_test_input(2, 321, 321, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs, num_classes, global_pool, scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 41, 41, 4],\n 'resnet/block2': [2, 21, 21, 8],\n 'resnet/block3': [2, 11, 11, 16],\n 'resnet/block4': [2, 11, 11, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testRootlessFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n inputs = create_test_input(2, 128, 128, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs,\n num_classes,\n global_pool,\n include_root_block=False,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 64, 64, 4],\n 'resnet/block2': [2, 32, 32, 8],\n 'resnet/block3': [2, 16, 16, 16],\n 'resnet/block4': [2, 16, 16, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testAtrousFullyConvolutionalEndpointShapes(self):\n global_pool = False\n num_classes = 10\n output_stride = 8\n inputs = create_test_input(2, 321, 321, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n _, end_points = self._resnet_small(\n inputs,\n num_classes,\n global_pool,\n output_stride=output_stride,\n scope='resnet')\n endpoint_to_shape = {\n 'resnet/block1': [2, 41, 41, 4],\n 'resnet/block2': [2, 41, 41, 8],\n 'resnet/block3': [2, 41, 41, 16],\n 'resnet/block4': [2, 41, 41, 32]\n }\n for endpoint in endpoint_to_shape:\n shape = endpoint_to_shape[endpoint]\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n def testAtrousFullyConvolutionalValues(self):\n \"\"\"Verify dense feature extraction with atrous convolution.\"\"\"\n nominal_stride = 32\n for output_stride in [4, 8, 16, 32, None]:\n with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):\n with ops.Graph().as_default():\n with self.test_session() as sess:\n random_seed.set_random_seed(0)\n inputs = create_test_input(2, 81, 81, 3)\n # Dense feature extraction followed by subsampling.\n output, _ = self._resnet_small(\n inputs, None, global_pool=False, output_stride=output_stride)\n if output_stride is None:\n factor = 1\n else:\n factor = nominal_stride // output_stride\n output = resnet_utils.subsample(output, factor)\n # Make the two networks use the same weights.\n variable_scope.get_variable_scope().reuse_variables()\n # Feature extraction at the nominal network rate.\n expected, _ = self._resnet_small(inputs, None, global_pool=False)\n sess.run(variables.global_variables_initializer())\n self.assertAllClose(\n output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)\n\n def testUnknownBatchSize(self):\n batch = 2\n height, width = 65, 65\n global_pool = True\n num_classes = 10\n inputs = create_test_input(None, height, width, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n logits, _ = self._resnet_small(\n inputs, num_classes, global_pool, scope='resnet')\n self.assertTrue(logits.op.name.startswith('resnet/logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, 1, 1, num_classes])\n images = create_test_input(batch, height, width, 3)\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 1, 1, num_classes))\n\n def testFullyConvolutionalUnknownHeightWidth(self):\n batch = 2\n height, width = 65, 65\n global_pool = False\n inputs = create_test_input(batch, None, None, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n output, _ = self._resnet_small(inputs, None, global_pool)\n self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])\n images = create_test_input(batch, height, width, 3)\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 3, 3, 32))\n\n def testAtrousFullyConvolutionalUnknownHeightWidth(self):\n batch = 2\n height, width = 65, 65\n global_pool = False\n output_stride = 8\n inputs = create_test_input(batch, None, None, 3)\n with arg_scope(resnet_utils.resnet_arg_scope()):\n output, _ = self._resnet_small(\n inputs, None, global_pool, output_stride=output_stride)\n self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])\n images = create_test_input(batch, height, width, 3)\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch, 9, 9, 32))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorBoard HTTP utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gzip\nimport json\nimport re\nimport time\nimport wsgiref.handlers\n\nimport six\n\nfrom werkzeug import wrappers\n\nfrom tensorflow.python.util import compat\nfrom tensorflow.tensorboard.backend import json_util\n\n\n_EXTRACT_MIMETYPE_PATTERN = re.compile(r'^[^;\\s]*')\n_EXTRACT_CHARSET_PATTERN = re.compile(r'charset=([-_0-9A-Za-z]+)')\n\n# Allows *, gzip or x-gzip, but forbid gzip;q=0\n# https://tools.ietf.org/html/rfc7231#section-5.3.4\n_ALLOWS_GZIP_PATTERN = re.compile(\n r'(?:^|,|\\s)(?:(?:x-)?gzip|\\*)(?!;q=0)(?:\\s|,|$)')\n\n_TEXTUAL_MIMETYPES = set([\n 'application/javascript',\n 'application/json',\n 'application/json+protobuf',\n 'image/svg+xml',\n 'text/css',\n 'text/csv',\n 'text/html',\n 'text/plain',\n 'text/tab-separated-values',\n 'text/x-protobuf',\n])\n\n_JSON_MIMETYPES = set([\n 'application/json',\n 'application/json+protobuf',\n])\n\n\ndef Respond(request,\n content,\n content_type,\n code=200,\n expires=0,\n content_encoding=None,\n encoding='utf-8'):\n \"\"\"Construct a werkzeug Response.\n\n Responses are transmitted to the browser with compression if: a) the browser\n supports it; b) it's sane to compress the content_type in question; and c)\n the content isn't already compressed, as indicated by the content_encoding\n parameter.\n\n Browser and proxy caching is completely disabled by default. If the expires\n parameter is greater than zero then the response will be able to be cached by\n the browser for that many seconds; however, proxies are still forbidden from\n caching so that developers can bypass the cache with Ctrl+Shift+R.\n\n For textual content that isn't JSON, the encoding parameter is used as the\n transmission charset which is automatically appended to the Content-Type\n header. That is unless of course the content_type parameter contains a\n charset parameter. If the two disagree, the characters in content will be\n transcoded to the latter.\n\n If content_type declares a JSON media type, then content MAY be a dict, list,\n tuple, or set, in which case this function has an implicit composition with\n json_util.Cleanse and json.dumps. The encoding parameter is used to decode\n byte strings within the JSON object; therefore transmitting binary data\n within JSON is not permitted. JSON is transmitted as ASCII unless the\n content_type parameter explicitly defines a charset parameter, in which case\n the serialized JSON bytes will use that instead of escape sequences.\n\n Args:\n request: A werkzeug Request object. Used mostly to check the\n Accept-Encoding header.\n content: Payload data as byte string, unicode string, or maybe JSON.\n content_type: Media type and optionally an output charset.\n code: Numeric HTTP status code to use.\n expires: Second duration for browser caching.\n content_encoding: Encoding if content is already encoded, e.g. 'gzip'.\n encoding: Input charset if content parameter has byte strings.\n\n Returns:\n A werkzeug Response object (a WSGI application).\n \"\"\"\n\n mimetype = _EXTRACT_MIMETYPE_PATTERN.search(content_type).group(0)\n charset_match = _EXTRACT_CHARSET_PATTERN.search(content_type)\n charset = charset_match.group(1) if charset_match else encoding\n textual = charset_match or mimetype in _TEXTUAL_MIMETYPES\n if mimetype in _JSON_MIMETYPES and (isinstance(content, dict) or\n isinstance(content, list) or\n isinstance(content, set) or\n isinstance(content, tuple)):\n content = json.dumps(json_util.Cleanse(content, encoding),\n ensure_ascii=not charset_match)\n if charset != encoding:\n content = compat.as_text(content, encoding)\n content = compat.as_bytes(content, charset)\n if textual and not charset_match and mimetype not in _JSON_MIMETYPES:\n content_type += '; charset=' + charset\n if (not content_encoding and textual and\n _ALLOWS_GZIP_PATTERN.search(request.headers.get('Accept-Encoding', ''))):\n out = six.BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3)\n f.write(content)\n f.close()\n content = out.getvalue()\n content_encoding = 'gzip'\n if request.method == 'HEAD':\n content = ''\n headers = []\n\n headers.append(('Content-Length', str(len(content))))\n if content_encoding:\n headers.append(('Content-Encoding', content_encoding))\n if expires > 0:\n e = wsgiref.handlers.format_date_time(time.time() + float(expires))\n headers.append(('Expires', e))\n headers.append(('Cache-Control', 'private, max-age=%d' % expires))\n else:\n headers.append(('Expires', '0'))\n headers.append(('Cache-Control', 'no-cache, must-revalidate'))\n\n return wrappers.Response(\n response=content, status=code, headers=headers, content_type=content_type)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.\n\nThis model is similar to one described in this paper:\n \"Character-level Convolutional Networks for Text Classification\"\n http://arxiv.org/abs/1509.01626\n\nand is somewhat alternative to the Lua code from here:\n https://github.com/zhangxiangxiao/Crepe\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport pandas\nfrom sklearn import metrics\nimport tensorflow as tf\n\nlearn = tf.contrib.learn\n\nFLAGS = None\n\nMAX_DOCUMENT_LENGTH = 100\nHIDDEN_SIZE = 20\n\n\ndef char_rnn_model(features, target):\n \"\"\"Character level recurrent neural network model to predict classes.\"\"\"\n target = tf.one_hot(target, 15, 1, 0)\n byte_list = tf.one_hot(features, 256, 1, 0)\n byte_list = tf.unstack(byte_list, axis=1)\n\n cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)\n _, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)\n\n logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss,\n tf.contrib.framework.get_global_step(),\n optimizer='Adam',\n learning_rate=0.01)\n\n return ({\n 'class': tf.argmax(logits, 1),\n 'prob': tf.nn.softmax(logits)\n }, loss, train_op)\n\n\ndef main(unused_argv):\n # Prepare training and testing data\n dbpedia = learn.datasets.load_dataset(\n 'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)\n x_train = pandas.DataFrame(dbpedia.train.data)[1]\n y_train = pandas.Series(dbpedia.train.target)\n x_test = pandas.DataFrame(dbpedia.test.data)[1]\n y_test = pandas.Series(dbpedia.test.target)\n\n # Process vocabulary\n char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)\n x_train = np.array(list(char_processor.fit_transform(x_train)))\n x_test = np.array(list(char_processor.transform(x_test)))\n\n # Build model\n classifier = learn.Estimator(model_fn=char_rnn_model)\n\n # Train and predict\n classifier.fit(x_train, y_train, steps=100)\n y_predicted = [\n p['class'] for p in classifier.predict(\n x_test, as_iterable=True)\n ]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--test_with_fake_data',\n default=False,\n help='Test the example code with fake data.',\n action='store_true')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.platform.test.main",
"tensorflow.python.client.session.Session",
"numpy.equal",
"numpy.not_equal",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.util.all_util.remove_undocumented"
],
[
"tensorflow.python.ops.nn_ops.softplus",
"tensorflow.python.ops.gradient_checker.compute_gradient_error",
"numpy.asarray",
"numpy.finfo",
"numpy.all",
"tensorflow.python.platform.test.main",
"numpy.array",
"numpy.logaddexp",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.contrib.keras.python.keras.engine.topology.get_source_inputs",
"tensorflow.contrib.keras.python.keras.utils.layer_utils.convert_dense_weights_data_format",
"tensorflow.contrib.keras.python.keras.models.Model",
"tensorflow.contrib.keras.python.keras.layers.Flatten",
"tensorflow.contrib.keras.python.keras.backend.backend",
"tensorflow.contrib.keras.python.keras.layers.Dense",
"tensorflow.contrib.keras.python.keras.layers.Conv2D",
"tensorflow.contrib.keras.python.keras.backend.image_data_format",
"tensorflow.contrib.keras.python.keras.utils.data_utils.get_file",
"tensorflow.contrib.keras.python.keras.layers.MaxPooling2D",
"tensorflow.contrib.keras.python.keras.utils.layer_utils.convert_all_kernels_in_model",
"tensorflow.contrib.keras.python.keras.layers.Input",
"tensorflow.contrib.keras.python.keras.layers.GlobalAveragePooling2D",
"tensorflow.contrib.keras.python.keras.layers.GlobalMaxPooling2D"
],
[
"numpy.random.seed",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.core.framework.types_pb2.DataType.Value",
"tensorflow.python.client.session.Session",
"tensorflow.python.framework.ops.device",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.core.protobuf.config_pb2.ConfigProto"
],
[
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_state_ops._temporary_variable",
"tensorflow.python.ops.gen_state_ops._destroy_temporary_variable",
"tensorflow.python.ops.variables.is_variable_initialized",
"tensorflow.python.ops.state_ops.assign_add",
"numpy.arange",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.framework.ops.control_dependencies",
"numpy.array",
"tensorflow.python.ops.state_ops.variable_op"
],
[
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.python.framework.ops.NotDifferentiable"
],
[
"numpy.diag",
"scipy.stats.laplace",
"numpy.linspace",
"tensorflow.python.ops.variables.Variable",
"numpy.zeros_like",
"numpy.any",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.ones_like",
"scipy.special.ndtr",
"numpy.reshape",
"numpy.finfo",
"numpy.diff",
"tensorflow.python.platform.test.main",
"numpy.isnan",
"scipy.special.log_ndtr",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.random.RandomState",
"scipy.stats.norm.pdf",
"numpy.isfinite",
"numpy.sort",
"numpy.testing.assert_array_less",
"numpy.prod",
"tensorflow.python.ops.variables.global_variables_initializer"
],
[
"tensorflow.python.pywrap_tensorflow.TF_GetBuffer",
"numpy.asarray",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.util.compat.as_text",
"tensorflow.python.pywrap_tensorflow.TF_NewBuffer",
"tensorflow.python.pywrap_tensorflow.TF_PRun",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.pywrap_tensorflow.TF_DeleteStatus",
"tensorflow.python.pywrap_tensorflow.TF_Run",
"tensorflow.python.framework.ops.default_session",
"tensorflow.python.pywrap_tensorflow.TF_Reset",
"tensorflow.python.framework.errors.raise_exception_on_not_ok_status",
"tensorflow.python.pywrap_tensorflow.TF_NewSessionOptions",
"tensorflow.python.pywrap_tensorflow.TF_CloseDeprecatedSession",
"tensorflow.python.ops.session_ops.TensorHandle",
"tensorflow.python.util.nest.flatten_dict_items",
"numpy.array",
"tensorflow.python.ops.session_ops._get_handle_deleter",
"tensorflow.python.pywrap_tensorflow.TF_PRunSetup",
"tensorflow.python.ops.session_ops._get_handle_mover",
"tensorflow.python.pywrap_tensorflow.TF_NewDeprecatedSession",
"tensorflow.core.protobuf.config_pb2.GPUOptions",
"tensorflow.python.pywrap_tensorflow.TF_NewStatus",
"tensorflow.python.pywrap_tensorflow.TF_DeleteSessionOptions",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.pywrap_tensorflow.TF_DeleteDeprecatedSession",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.pywrap_tensorflow.TF_DeleteBuffer"
],
[
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.data_flow_ops.dynamic_stitch",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.math_ops.unsorted_segment_sum",
"tensorflow.python.ops.data_flow_ops.dynamic_partition",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.python.framework.ops.device"
],
[
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.training.adagrad.AdagradOptimizer",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.parsing_ops.decode_csv",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.linspace",
"tensorflow.python.ops.gradient_checker.compute_gradient_error",
"numpy.isnan",
"numpy.logspace",
"tensorflow.python.ops.math_ops.betainc",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.test.main",
"numpy.random.rand",
"numpy.random.randn",
"numpy.zeros_like",
"tensorflow.python.ops.gradients_impl.gradients",
"scipy.special.betainc",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.platform.googletest.GetTempDir",
"tensorflow.python.platform.googletest.main",
"tensorflow.tools.docs.generate_lib.DocGenerator",
"tensorflow.python.platform.resource_loader.get_root_dir_with_all_resources"
],
[
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.FixedLenFeature",
"tensorflow.Variable",
"tensorflow.get_collection",
"tensorflow.parse_example",
"tensorflow.contrib.session_bundle.exporter.generic_signature",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.contrib.session_bundle.exporter.Exporter",
"tensorflow.train.Saver",
"tensorflow.group",
"tensorflow.add_to_collection",
"tensorflow.app.run"
],
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.tensorboard.backend.event_processing.event_accumulator.EventAccumulator",
"tensorflow.tensorboard.backend.event_processing.io_wrapper.ListRecursively",
"tensorflow.python.platform.gfile.IsDirectory"
],
[
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.subsample",
"numpy.arange",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.contrib.layers.python.layers.utils.convert_collection_to_dict",
"tensorflow.contrib.slim.python.slim.nets.resnet_v2.resnet_v2",
"tensorflow.contrib.framework.python.ops.arg_scope",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.conv2d_same",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.contrib.layers.conv2d",
"tensorflow.contrib.slim.python.slim.nets.resnet_v2.resnet_v2_block",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.ops.math_ops.range",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.stack_blocks_dense",
"tensorflow.contrib.slim.python.slim.nets.resnet_utils.resnet_arg_scope",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.util.compat.as_text",
"tensorflow.tensorboard.backend.json_util.Cleanse",
"tensorflow.python.util.compat.as_bytes"
],
[
"tensorflow.contrib.losses.softmax_cross_entropy",
"tensorflow.nn.softmax",
"pandas.Series",
"tensorflow.unstack",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.contrib.framework.get_global_step",
"pandas.DataFrame",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.app.run",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.one_hot",
"tensorflow.argmax",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"1.4",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
vulpicastor/advent-of-code-2021 | [
"12aaf84091604caf88acf3b4f7a118d866c33f5f"
] | [
"src/11.py"
] | [
"#!/usr/bin/env python3\n\n# pylint: disable=unused-import\nimport collections\nimport functools\nimport io\nimport itertools\nimport operator as op\nimport re\nimport timeit\n\nimport numpy as np\nimport aocd\n\nYEAR = 2021\nDAY = 11\n\n\ndef step(grid):\n grid += 1\n flash = np.zeros_like(grid, dtype=bool)\n while np.any(grid[~flash] > 9):\n new_flash = (grid > 9) ^ flash\n grid[:-1, :-1] += new_flash[1:, 1:]\n grid[:-1, :] += new_flash[1:, :]\n grid[:-1, 1:] += new_flash[1:, :-1]\n grid[:, :-1] += new_flash[:, 1:]\n grid[:, 1:] += new_flash[:, :-1]\n grid[1:, :-1] += new_flash[:-1, 1:]\n grid[1:, :] += new_flash[:-1, :]\n grid[1:, 1:] += new_flash[:-1, :-1]\n flash |= new_flash\n grid[flash] = 0\n return flash\n\n\ndef main():\n data = \"\"\"5483143223\n2745854711\n5264556173\n6141336146\n6357385478\n4167524645\n2176841721\n6882881134\n4846848554\n5283751526\"\"\"\n data = aocd.get_data(day=DAY, year=YEAR)\n inlist = np.array([list(map(int, l)) for l in data.split('\\n')])\n print(inlist)\n\n grid = inlist.copy()\n num_flashes = 0\n for i in range(100):\n num_flashes += np.sum(step(grid))\n print(num_flashes)\n answer = num_flashes\n\n aocd.submit(answer, part='a', day=DAY, year=YEAR)\n\n grid = inlist.copy()\n for i in itertools.count(1):\n flash = step(grid)\n if np.all(flash):\n answer = i\n break\n print(answer)\n aocd.submit(answer, part='b', day=DAY, year=YEAR)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.all",
"numpy.zeros_like",
"numpy.any"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elyase/jack | [
"a4f43a4012a540d55d2e05d8a904e6f8cc3002f1"
] | [
"jack/train_reader.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport logging\nimport math\nimport os\nimport random\nimport shutil\n\nimport tensorflow as tf\n\nfrom jack import readers\nfrom jack.core.tensorflow import TFReader\nfrom jack.eval import evaluate_reader, pretty_print_results\nfrom jack.util.hooks import LossHook, ExamplesPerSecHook, ETAHook\n\nlogger = logging.getLogger(__name__)\n\n\ndef train(reader, train_data, test_data, dev_data, configuration: dict, debug=False):\n if isinstance(reader, TFReader):\n train_tensorflow(reader, train_data, test_data, dev_data, configuration, debug)\n else:\n train_pytorch(reader, train_data, test_data, dev_data, configuration, debug)\n\n\ndef train_tensorflow(reader, train_data, test_data, dev_data, configuration: dict, debug=False):\n import tensorflow as tf\n seed = configuration.get('seed', 0)\n\n # make everything deterministic\n random.seed(seed)\n tf.set_random_seed(seed)\n\n clip_value = configuration.get('clip_value')\n batch_size = configuration.get('batch_size')\n dev_batch_size = configuration.get('dev_batch_size') or batch_size\n epochs = configuration.get('epochs')\n l2 = configuration.get('l2')\n optimizer = configuration.get('optimizer')\n learning_rate = configuration.get('learning_rate')\n min_learning_rate = configuration.get('min_learning_rate')\n learning_rate_decay = configuration.get('learning_rate_decay')\n log_interval = configuration.get('log_interval')\n validation_interval = configuration.get('validation_interval')\n tensorboard_folder = configuration.get('tensorboard_folder')\n reader_type = configuration.get('reader')\n save_dir = configuration.get('save_dir')\n write_metrics_to = configuration.get('write_metrics_to')\n\n if clip_value != 0.0:\n clip_value = - abs(clip_value), abs(clip_value)\n\n learning_rate = tf.get_variable(\"learning_rate\", initializer=learning_rate, dtype=tf.float32, trainable=False)\n lr_decay_op = learning_rate.assign(tf.maximum(learning_rate_decay * learning_rate, min_learning_rate))\n\n name_to_optimizer = {\n 'gd': tf.train.GradientDescentOptimizer,\n 'adam': tf.train.AdamOptimizer,\n 'adagrad': tf.train.AdagradOptimizer,\n 'adadelta': tf.train.AdadeltaOptimizer,\n 'rmsprop': tf.train.RMSPropOptimizer\n }\n\n if optimizer not in name_to_optimizer:\n raise ValueError('Unknown optimizer: {}'.format(optimizer))\n\n tf_optimizer_class = name_to_optimizer[optimizer]\n tf_optimizer = tf_optimizer_class(learning_rate=learning_rate)\n\n sw = None\n if tensorboard_folder is not None:\n if os.path.exists(tensorboard_folder):\n shutil.rmtree(tensorboard_folder)\n sw = tf.summary.FileWriter(tensorboard_folder)\n\n # Hooks\n iter_interval = 1 if debug else log_interval\n hooks = [LossHook(reader, iter_interval, summary_writer=sw),\n ETAHook(reader, iter_interval, int(math.ceil(len(train_data) / batch_size)), epochs),\n ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]\n\n preferred_metric, best_metric = readers.eval_hooks[reader_type].preferred_metric_and_initial_score()\n\n def side_effect(metrics, prev_metric):\n \"\"\"Returns: a state (in this case a metric) that is used as input for the next call\"\"\"\n if prev_metric is None: # store whole reader only at beginning of training\n reader.store(save_dir)\n m = metrics[preferred_metric]\n if prev_metric is not None and m < prev_metric:\n reader.session.run(lr_decay_op)\n logger.info(\"Decayed learning rate to: %.5f\" % reader.session.run(learning_rate))\n elif m > best_metric[0] and save_dir is not None:\n best_metric[0] = m\n reader.model_module.store(os.path.join(save_dir, \"model_module\"))\n logger.info(\"Saving reader to: %s\" % save_dir)\n return m\n\n # this is the standard hook for the reader\n hooks.append(readers.eval_hooks[reader_type](\n reader, dev_data, dev_batch_size, summary_writer=sw, side_effect=side_effect,\n iter_interval=validation_interval,\n epoch_interval=(1 if validation_interval is None else None),\n write_metrics_to=write_metrics_to))\n\n # Train\n reader.train(tf_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,\n l2=l2, clip=clip_value, clip_op=tf.clip_by_value, summary_writer=sw)\n\n # Test final reader\n if dev_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, dev_data, batch_size)\n\n logger.info(\"############### Results on the Dev Set##############\")\n pretty_print_results(result_dict)\n\n if test_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, test_data, batch_size)\n\n logger.info(\"############### Results on the Test Set##############\")\n pretty_print_results(result_dict)\n\n\ndef train_pytorch(reader, train_data, test_data, dev_data, configuration: dict, debug=False):\n import torch\n seed = configuration.get('seed')\n\n # make everything deterministic\n random.seed(seed)\n torch.manual_seed(seed)\n\n clip_value = configuration.get('clip_value')\n batch_size = configuration.get('batch_size')\n epochs = configuration.get('epochs')\n l2 = configuration.get('l2')\n optimizer = configuration.get('optimizer')\n learning_rate = configuration.get('learning_rate')\n learning_rate_decay = configuration.get('learning_rate_decay')\n log_interval = configuration.get('log_interval')\n validation_interval = configuration.get('validation_interval')\n tensorboard_folder = configuration.get('tensorboard_folder')\n model = configuration.get('reader')\n save_dir = configuration.get('save_dir')\n write_metrics_to = configuration.get('write_metrics_to')\n\n # need setup here already :(\n reader.setup_from_data(train_data, is_training=True)\n\n if clip_value != 0.0:\n clip_value = - abs(clip_value), abs(clip_value)\n\n name_to_optimizer = {\n 'gd': torch.optim.SGD,\n 'adam': torch.optim.Adam,\n 'adagrad': torch.optim.Adagrad,\n 'adadelta': torch.optim.Adadelta\n }\n\n if optimizer not in name_to_optimizer:\n raise ValueError('Unknown optimizer: {}'.format(optimizer))\n\n torch_optimizer_class = name_to_optimizer[optimizer]\n params = list(reader.model_module.prediction_module.parameters())\n params.extend(reader.model_module.loss_module.parameters())\n\n torch_optimizer = torch_optimizer_class(params, lr=learning_rate)\n\n sw = None\n if tensorboard_folder is not None:\n if os.path.exists(tensorboard_folder):\n shutil.rmtree(tensorboard_folder)\n sw = tf.summary.FileWriter(tensorboard_folder)\n\n # Hooks\n iter_interval = 1 if debug else log_interval\n hooks = [LossHook(reader, iter_interval, summary_writer=sw),\n ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]\n\n preferred_metric, best_metric = readers.eval_hooks[model].preferred_metric_and_initial_score()\n\n def side_effect(metrics, prev_metric):\n \"\"\"Returns: a state (in this case a metric) that is used as input for the next call\"\"\"\n m = metrics[preferred_metric]\n if prev_metric is not None and m < prev_metric:\n for param_group in torch_optimizer.param_groups:\n param_group['lr'] *= learning_rate_decay\n logger.info(\"Decayed learning rate to: %.5f\" % param_group['lr'])\n elif m > best_metric[0] and save_dir is not None:\n best_metric[0] = m\n if prev_metric is None: # store whole model only at beginning of training\n reader.store(save_dir)\n else:\n reader.model_module.store(os.path.join(save_dir, \"model_module\"))\n logger.info(\"Saving model to: %s\" % save_dir)\n return m\n\n # this is the standard hook for the model\n hooks.append(readers.eval_hooks[model](\n reader, dev_data, batch_size, summary_writer=sw, side_effect=side_effect,\n iter_interval=validation_interval,\n epoch_interval=(1 if validation_interval is None else None),\n write_metrics_to=write_metrics_to))\n\n # Train\n reader.train(torch_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,\n l2=l2, clip=clip_value)\n\n # Test final model\n if dev_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, dev_data, batch_size)\n\n logger.info(\"############### Results on the Dev Set##############\")\n pretty_print_results(result_dict)\n\n if test_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, test_data, batch_size)\n\n logger.info(\"############### Results on the Test Set##############\")\n pretty_print_results(result_dict)\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.summary.FileWriter",
"torch.manual_seed",
"tensorflow.maximum",
"tensorflow.set_random_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
limberc/hypercl | [
"ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807",
"ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807",
"ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807"
] | [
"utils/batchnorm_layer.py",
"utils/init_utils.py",
"mnets/mlp.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2019 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n- **title** :utils/batchnorm_layer.py\n- **author** :ch\n- **contact** :[email protected]\n- **created** :09/02/2019\n- **version** :1.0\n- **python_version** :3.6.8\n\nImplementation of a hypernet compatible batchnorm layer.\n\nThe joint use of batch-normalization and hypernetworks is not straight forward,\nmainly due to the statistics accumulated by the batch-norm operation which\nexpect the weights of the main network to only change slowly. If a hypernetwork\nreplaces the whole set of weights, the statistics previously estimated by the\nbatch-norm layer might be completely off.\n\nTo circumvent this problem, we provide multiple solutions:\n\n - In a continual learning setting with one set of weights per task, we can\n simply estimate and store statistics per task (hence, the batch-norm\n operation has to be conditioned on the task).\n - The statistics are distilled into the hypernetwork. This would require\n the addition of an extra loss term.\n - The statistics can be treated as parameters that are outputted by the\n hypernetwork. In this case, nothing enforces that these \"statistics\"\n behave similar to statistics that would result from a running estimate\n (hence, the resulting operation might have nothing in common with batch-\n norm).\n - Always use the statistics estimated on the current batch.\n\nNote, we also provide the option of turning off the statistics, in which case\nthe statistics will be set to zero mean and unit variance. This is helpful when\ninterpreting batch-normalization as a general form of gain modulation (i.e.,\njust applying a shift and scale to neural activities).\n\"\"\"\nfrom warnings import warn\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BatchNormLayer(nn.Module):\n r\"\"\"Hypernetwork-compatible batch-normalization layer.\n\n Note, batch normalization performs the following operation\n\n .. math::\n\n y = \\frac{x - \\mathrm{E}[x]}{\\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\\n \\gamma + \\beta\n\n This class allows to deviate from this standard implementation in order to\n provide the flexibility required when using hypernetworks. Therefore, we\n slightly change the notation to\n\n .. math::\n\n y = \\frac{x - m_{\\text{stats}}^{(t)}}{\\sqrt{v_{\\text{stats}}^{(t)} + \\\n \\epsilon}} * \\gamma^{(t)} + \\beta^{(t)}\n\n We use this notation to highlight that the running statistics\n :math:`m_{\\text{stats}}^{(t)}` and :math:`v_{\\text{stats}}^{(t)}` are not\n necessarily estimates resulting from mean and variance computation but might\n be learned parameters (e.g., the outputs of a hypernetwork).\n\n We additionally use the superscript :math:`(t)` to denote that the gain\n :math:`\\gamma`, offset :math:`\\beta` and statistics may be dynamically\n selected based on some external context information.\n\n This class provides the possibility to checkpoint statistics\n :math:`m_{\\text{stats}}^{(t)}` and :math:`v_{\\text{stats}}^{(t)}`, but\n **not** gains and offsets.\n\n .. note::\n If context-dependent gains :math:`\\gamma^{(t)}` and offsets\n :math:`\\beta^{(t)}` are required, then they have to be maintained\n externally, e.g., via a task-conditioned hypernetwork (see\n `this paper`_ for an example) and passed to the :meth:`forward` method.\n\n .. _this paper: https://arxiv.org/abs/1906.00695\n\n Attributes:\n weights: A list of all internal weights of this layer. If all\n weights are assumed to be generated externally, then this\n attribute will be ``None``.\n param_shapes: A list of list of integers. Each list represents the\n shape of a parameter tensor. Note, this attribute is\n independent of the attribute :attr:`weights`, it always comprises\n the shapes of all weight tensors as if the network would be stand-\n alone (i.e., no weights being passed to the :meth:`forward` method).\n Note, unless ``learnable_stats`` is enabled, the layer statistics\n are not considered here.\n hyper_shapes: A list of list of integers. Each list represents the\n shape of a weight tensor that can be passed to the :meth:`forward`\n method. If all weights are maintained internally, then this\n attribute will be ``None``.\n Specifically, this attribute is controlled by the argument\n ``affine``. If ``affine`` is ``True``, this attribute will be\n ``None``. Otherwise this attribute contains the shape of\n :math:`\\gamma` and :math:`\\beta`.\n num_stats: The number :math:`T` of internally managed statistics\n :math:`\\{(m_{\\text{stats}}^{(1)}, v_{\\text{stats}}^{(1)}), \\dots, \\\n (m_{\\text{stats}}^{(T)}, v_{\\text{stats}}^{(T)}) \\}`. This number is\n incremented everytime the method :meth:`checkpoint_stats` is called.\n \"\"\"\n\n def __init__(self, num_features, momentum=0.1, affine=True,\n track_running_stats=True, frozen_stats=False,\n learnable_stats=False):\n r\"\"\"\n Args:\n num_features: See argument ``num_features``, for instance, of class\n :class:`torch.nn.BatchNorm1d`.\n momentum: See argument ``momentum`` of class\n :class:`torch.nn.BatchNorm1d`.\n affine: See argument ``affine`` of class\n :class:`torch.nn.BatchNorm1d`. If set to :code:`False`, the\n input activity will simply be \"whitened\" according to the\n applied layer statistics (except if gain :math:`\\gamma` and\n offset :math:`\\beta` are passed to the :meth:`forward` method).\n\n Note, if ``learnable_stats`` is :code:`False`, then setting\n ``affine`` to :code:`False` results in no learnable weights for\n this layer (running stats might still be updated, but not via\n gradient descent).\n\n Note, even if this option is ``False``, one may still pass a\n gain :math:`\\gamma` and offset :math:`\\beta` to the\n :meth:`forward` method.\n track_running_stats: See argument ``track_running_stats`` of class\n :class:`torch.nn.BatchNorm1d`.\n frozen_stats: If ``True``, the layer statistics are frozen at their\n initial values of :math:`\\gamma = 1` and :math:`\\beta = 0`,\n i.e., layer activity will not be whitened.\n\n Note, this option requires ``track_running_stats`` to be set to\n ``False``.\n learnable_stats: If ``True``, the layer statistics are initialized\n as learnable parameters (:code:`requires_grad=True`).\n\n Note, these extra parameters will be maintained internally and\n not added to the :attr:`weights`. Statistics can always be\n maintained externally and passed to the :meth:`forward` method.\n\n Note, this option requires ``track_running_stats`` to be set to\n ``False``.\n \"\"\"\n super(BatchNormLayer, self).__init__()\n\n if learnable_stats:\n # FIXME We need our custom stats computation for this.\n # The running stats updated by `torch.nn.functional.batch_norm` do\n # not allow backpropagation.\n # See here on how they are computed:\n # https://github.com/pytorch/pytorch/blob/96fe2b4ecbbd02143d95f467655a2d697282ac32/aten/src/ATen/native/Normalization.cpp#L137\n raise NotImplementedError('Option \"learnable_stats\" has not been ' +\n 'implemented yet!')\n\n if momentum is None:\n # If one wants to implement this, then please note that the\n # attribute `num_batches_tracked` has to be added. Also, note the\n # extra code for computing the momentum value in the forward method\n # of class `_BatchNorm`:\n # https://pytorch.org/docs/stable/_modules/torch/nn/modules/batchnorm.html#_BatchNorm\n raise NotImplementedError('This reimplementation of PyTorch its ' +\n 'batchnorm layer does not support ' +\n 'setting \"momentum\" to None.')\n\n if learnable_stats and track_running_stats:\n raise ValueError('Option \"track_running_stats\" must be set to ' +\n 'False when enabling \"learnable_stats\".')\n\n if frozen_stats and track_running_stats:\n raise ValueError('Option \"track_running_stats\" must be set to ' +\n 'False when enabling \"frozen_stats\".')\n\n self._num_features = num_features\n self._momentum = momentum\n self._affine = affine\n self._track_running_stats = track_running_stats\n self._frozen_stats = frozen_stats\n self._learnable_stats = learnable_stats\n\n self.register_buffer('_num_stats', torch.tensor(0, dtype=torch.long))\n\n self._weights = nn.ParameterList()\n self._param_shapes = [[num_features], [num_features]]\n\n if affine:\n # Gamma\n self.register_parameter('scale', nn.Parameter( \\\n torch.Tensor(num_features), requires_grad=True))\n # Beta\n self.register_parameter('bias', nn.Parameter( \\\n torch.Tensor(num_features), requires_grad=True))\n\n self._weights.append(self.scale)\n self._weights.append(self.bias)\n\n nn.init.ones_(self.scale)\n nn.init.zeros_(self.bias)\n\n elif not learnable_stats:\n self._weights = None\n\n if learnable_stats:\n # Don't forget to add the new params to `self._weights`.\n # Don't forget to add shapes to `self._param_shapes`.\n raise NotImplementedError()\n\n elif track_running_stats or frozen_stats:\n # Note, in case of frozen stats, we just don't update the stats\n # initialized here later on.\n self.checkpoint_stats()\n else:\n mname, vname = self._stats_names(0)\n self.register_buffer(mname, None)\n self.register_buffer(vname, None)\n\n @property\n def weights(self):\n \"\"\"Getter for read-only attribute :attr:`weights`.\n\n Returns:\n A :class:`torch.nn.ParameterList` or ``None``, if no parameters are\n internally maintained.\n \"\"\"\n return self._weights\n\n @property\n def param_shapes(self):\n \"\"\"Getter for read-only attribute :attr:`param_shapes`.\n\n Returns:\n A list of lists of integers.\n \"\"\"\n return self._param_shapes\n\n @property\n def hyper_shapes(self):\n \"\"\"Getter for read-only attribute :attr:`hyper_shapes`.\n\n Returns:\n A list of lists of integers.\n \"\"\"\n # FIXME not implemented attribute. Do we even need the attribute, given\n # that all components are individually passed to the forward method?\n raise NotImplementedError('Not implemented yet!')\n return self._hyper_shapes\n\n @property\n def num_stats(self):\n \"\"\"Getter for read-only attribute :attr:`num_stats`.\n\n Returns:\n (int)\n \"\"\"\n return self._num_stats\n\n def forward(self, inputs, running_mean=None, running_var=None, weight=None,\n bias=None, stats_id=None):\n r\"\"\"Apply batch normalization to given layer activations.\n\n Based on the state if this module (attribute :attr:`training`), the\n configuration of this layer and the parameters currently passed, the\n behavior of this function will be different.\n\n The core of this method still relies on the function\n :func:`torch.nn.functional.batch_norm`. In the following we list the\n different behaviors of this method based on the context.\n\n **In training mode:**\n\n We first consider the case that this module is in training mode, i.e.,\n :meth:`torch.nn.Module.train` has been called.\n\n Usually, during training, the running statistics are not used when\n computing the output, instead the statistics computed on the current\n batch are used (denoted by *use batch stats* in the table below).\n However, the batch statistics are typically updated during training\n (denoted by *update running stats* in the table below).\n\n The above described scenario would correspond to passing batch\n statistics to the function :func:`torch.nn.functional.batch_norm` and\n setting the parameter ``training`` to ``True``.\n\n +----------------------+---------------------+-------------------------+\n | **training mode** | **use batch stats** | **update running stats**|\n +----------------------+---------------------+-------------------------+\n | given stats | Yes | Yes |\n +----------------------+---------------------+-------------------------+\n | track running stats | Yes | Yes |\n +----------------------+---------------------+-------------------------+\n | frozen stats | No | No |\n +----------------------+---------------------+-------------------------+\n | learnable stats | Yes | Yes [1]_ |\n +----------------------+---------------------+-------------------------+\n |no track running stats| Yes | No |\n +----------------------+---------------------+-------------------------+\n\n The meaning of each row in this table is as follows:\n\n - **given stats**: External stats are provided via the parameters\n ``running_mean`` and ``running_var``.\n - **track running stats**: If ``track_running_stats`` was set to\n ``True`` in the constructor and no stats were given.\n - **frozen stats**: If ``frozen_stats`` was set to ``True`` in the\n constructor and no stats were given.\n - **learnable stats**: If ``learnable_stats`` was set to ``True`` in\n the constructor and no stats were given.\n - **no track running stats**: If none of the above options apply,\n then the statistics will always be computed from the current batch\n (also in eval mode).\n\n .. note::\n If provided, running stats specified via ``running_mean`` and\n ``running_var`` always have priority.\n\n .. [1] We use a custom implementation to update the running statistics,\n that is compatible with backpropagation.\n\n **In evaluation mode:**\n\n We now consider the case that this module is in evaluation mode, i.e.,\n :meth:`torch.nn.Module.eval` has been called.\n\n Here is the same table as above just for the evaluation mode.\n\n +----------------------+---------------------+-------------------------+\n | **evaluation mode** | **use batch stats** | **update running stats**|\n +----------------------+---------------------+-------------------------+\n | track running stats | No | No |\n +----------------------+---------------------+-------------------------+\n | frozen stats | No | No |\n +----------------------+---------------------+-------------------------+\n | learnable stats | No | No |\n +----------------------+---------------------+-------------------------+\n | given stats | No | No |\n +----------------------+---------------------+-------------------------+\n |no track running stats| Yes | No |\n +----------------------+---------------------+-------------------------+\n\n Args:\n inputs: The inputs to the batchnorm layer.\n running_mean (optional): Running mean stats\n :math:`m_{\\text{stats}}`. This option has priority, i.e., any\n internally maintained statistics are ignored if given.\n\n .. note::\n If specified, then ``running_var`` also has to be specified.\n running_var (optional): Similar to option ``running_mean``, but for\n the running variance stats :math:`v_{\\text{stats}}`\n\n .. note::\n If specified, then ``running_mean`` also has to be\n specified.\n weight (optional): The gain factors :math:`\\gamma`. If given, any\n internal gains are ignored. If option ``affine`` was set to\n ``False`` in the constructor and this option remains ``None``,\n then no gains are multiplied to the \"whitened\" inputs.\n bias (optional): The behavior of this option is similar to option\n ``weight``, except that this option represents the offsets\n :math:`\\beta`.\n stats_id: This argument is optional except if multiple running\n stats checkpoints exist (i.e., attribute :attr:`num_stats` is\n greater than 1) and no running stats have been provided to this\n method.\n\n .. note::\n This argument is ignored if running stats have been passed.\n\n Returns:\n The layer activation ``inputs`` after batch-norm has been applied.\n \"\"\"\n assert (running_mean is None and running_var is None or \\\n running_mean is not None and running_var is not None)\n\n if not self._affine:\n if weight is None or bias is None:\n raise ValueError('Layer was generated in non-affine mode. ' +\n 'Therefore, arguments \"weight\" and \"bias\" ' +\n 'may not be None.')\n\n # No gains given but we have internal gains.\n # Otherwise, if no gains are given we leave `weight` as None.\n if weight is None and self._affine:\n weight = self.scale\n if bias is None and self._affine:\n bias = self.bias\n\n stats_given = running_mean is not None\n\n if (running_mean is None or running_var is None):\n if stats_id is None and self.num_stats > 1:\n raise ValueError('Parameter \"stats_id\" is not defined but ' +\n 'multiple running stats are available.')\n elif self._track_running_stats:\n if stats_id is None:\n stats_id = 0\n assert (stats_id < self.num_stats)\n\n rm, rv = self.get_stats(stats_id)\n\n if running_mean is None:\n running_mean = rm\n if running_var is None:\n running_var = rv\n elif stats_id is not None:\n warn('Parameter \"stats_id\" is ignored since running stats have ' +\n 'been provided.')\n\n momentum = self._momentum\n\n if stats_given or self._track_running_stats:\n return F.batch_norm(inputs, running_mean, running_var,\n weight=weight, bias=bias,\n training=self.training, momentum=momentum)\n\n if self._learnable_stats:\n raise NotImplementedError()\n\n if self._frozen_stats:\n return F.batch_norm(inputs, running_mean, running_var,\n weight=weight, bias=bias, training=False)\n\n # TODO implement scale and shift here. Note, that `running_mean` and\n # `running_var` are always 0 and 1, resp. Therefore, the call to\n # `F.batch_norm` is a waste of computation.\n # ret = inputs\n # if weight is not None:\n # # Multiply `ret` with `weight` such that dimensions are\n # # respected.\n # pass\n # if bias is not None:\n # # Add `bias` to modified `ret` such that dimensions are\n # # respected.\n # pass\n # return ret\n\n else:\n assert (not self._track_running_stats)\n\n # Always compute statistics based on current batch.\n return F.batch_norm(inputs, None, None, weight=weight, bias=bias,\n training=True, momentum=momentum)\n\n def checkpoint_stats(self, device=None):\n \"\"\"Buffers for a new set of running stats will be registered.\n\n Calling this function will also increment the attribute\n :attr:`num_stats`.\n\n Args:\n device (optional): If not provided, the newly created statistics\n will either be moved to the device of the most recent statistics\n or to CPU if no prior statistics exist.\n \"\"\"\n assert (self._track_running_stats or \\\n self._frozen_stats and self._num_stats == 0)\n\n if device is None:\n if self.num_stats > 0:\n mname_old, _ = self._stats_names(self._num_stats - 1)\n device = getattr(self, mname_old).device\n\n if self._learnable_stats:\n raise NotImplementedError()\n\n mname, vname = self._stats_names(self._num_stats)\n self._num_stats += 1\n\n self.register_buffer(mname, torch.zeros(self._num_features,\n device=device))\n self.register_buffer(vname, torch.ones(self._num_features,\n device=device))\n\n def get_stats(self, stats_id=None):\n \"\"\"Get a set of running statistics (means and variances).\n\n Args:\n stats_id (optional): ID of stats. If not provided, the most recent\n stats are returned.\n\n Returns:\n (tuple): Tuple containing:\n\n - **running_mean**\n - **running_var**\n \"\"\"\n if stats_id is None:\n stats_id = self.num_stats - 1\n assert (stats_id < self.num_stats)\n\n mname, vname = self._stats_names(stats_id)\n\n running_mean = getattr(self, mname)\n running_var = getattr(self, vname)\n\n return running_mean, running_var\n\n def _stats_names(self, stats_id):\n \"\"\"Get the buffer names for mean and variance statistics depending on\n the ``stats_id``, i.e., the ID of the stats checkpoint.\n\n Args:\n stats_id: ID of stats.\n\n Returns:\n (tuple): Tuple containing:\n\n - **mean_name**\n - **var_name**\n \"\"\"\n mean_name = 'mean_%d' % stats_id\n var_name = 'var_%d' % stats_id\n\n return mean_name, var_name\n\n\nif __name__ == '__main__':\n pass\n",
"#!/usr/bin/env python3\n# Copyright 2019 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @title :utils/init_utils.py\n# @author :ch\n# @contact :[email protected]\n# @created :12/20/2019\n# @version :1.0\n# @python_version :3.6.9\n\"\"\"\nHelper functions for weight initialization\n------------------------------------------\n\nThe module :mod:`utils.init_utils` contains helper functions that might be\nuseful for initialization of weights. The functions are somewhat complementary\nto what is already provided in the PyTorch module :mod:`torch.nn.init`.\n\"\"\"\nimport math\n\nimport numpy as np\nimport torch\n\n\ndef xavier_fan_in_(tensor):\n \"\"\"Initialize the given weight tensor with Xavier fan-in init.\n\n Unfortunately, :func:`torch.nn.init.xavier_uniform_` doesn't give\n us the choice to use fan-in init (always uses the harmonic mean).\n Therefore, we provide our own implementation.\n\n Args:\n tensor (torch.Tensor): Weight tensor that will be modified\n (initialized) in-place.\n \"\"\"\n fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(tensor)\n std = 1. / math.sqrt(fan_in)\n # Note, std(Unif(-a, a)) = a / sqrt(3)\n a = math.sqrt(3.0) * std\n\n torch.nn.init._no_grad_uniform_(tensor, -a, a)\n\n\ndef calc_fan_in_and_out(shapes):\n \"\"\"Calculate fan-in and fan-out.\n\n Note:\n This function expects the shapes of an at least 2D tensor.\n\n Args:\n shapes (list): List of integers.\n\n Returns:\n (tuple) Tuple containing:\n\n - **fan_in**\n - **fan_out**\n \"\"\"\n assert len(shapes) > 1\n\n fan_in = shapes[1]\n fan_out = shapes[0]\n\n if len(shapes) > 2:\n receptive_field_size = int(np.prod(shapes[2:]))\n else:\n receptive_field_size = 1\n\n fan_in *= receptive_field_size\n fan_out *= receptive_field_size\n\n return fan_in, fan_out\n\n\nif __name__ == '__main__':\n pass\n",
"#!/usr/bin/env python3\n# Copyright 2019 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n- **title** :mnets/mlp.py\n- **author** :ch\n- **contact** :[email protected]\n- **created** :10/21/2019\n- **version** :1.0\n- **python_version** :3.6.8\n\nImplementation of a fully-connected neural network.\n\nAn example usage is as a main model, that doesn't include any trainable weights.\nInstead, weights are received as additional inputs. For instance, using an\nauxilliary network, a so called hypernetwork, see\n\n Ha et al., \"HyperNetworks\", arXiv, 2016,\n https://arxiv.org/abs/1609.09106\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mnets.mnet_interface import MainNetInterface\nfrom utils.batchnorm_layer import BatchNormLayer\nfrom utils.context_mod_layer import ContextModLayer\nfrom utils.torch_utils import init_params\n\n\nclass MLP(nn.Module, MainNetInterface):\n \"\"\"Implementation of a Multi-Layer Perceptron (MLP).\n\n This is a simple fully-connected network, that receives input vector\n :math:`\\mathbf{x}` and outputs a vector :math:`\\mathbf{y}` of real values.\n\n The output mapping does not include a non-linearity by default, as we wanna\n map to the whole real line (but see argument ``out_fn``).\n\n Args:\n n_in (int): Number of inputs.\n n_out (int): Number of outputs.\n hidden_layers (list): A list of integers, each number denoting the size\n of a hidden layer.\n activation_fn: The nonlinearity used in hidden layers. If ``None``, no\n nonlinearity will be applied.\n use_bias (bool): Whether layers may have bias terms.\n no_weights (bool): If set to ``True``, no trainable parameters will be\n constructed, i.e., weights are assumed to be produced ad-hoc\n by a hypernetwork and passed to the :meth:`forward` method.\n init_weights (optional): This option is for convinience reasons.\n The option expects a list of parameter values that are used to\n initialize the network weights. As such, it provides a\n convinient way of initializing a network with a weight draw\n produced by the hypernetwork.\n\n Note, internal weights (see \n :attr:`mnets.mnet_interface.MainNetInterface.weights`) will be\n affected by this argument only.\n dropout_rate: If ``-1``, no dropout will be applied. Otherwise a number\n between 0 and 1 is expected, denoting the dropout rate of hidden\n layers.\n use_spectral_norm: Use spectral normalization for training.\n use_batch_norm (bool): Whether batch normalization should be used. Will\n be applied before the activation function in all hidden layers.\n bn_track_stats (bool): If batch normalization is used, then this option\n determines whether running statistics are tracked in these\n layers or not (see argument ``track_running_stats`` of class\n :class:`utils.batchnorm_layer.BatchNormLayer`).\n\n If ``False``, then batch statistics are utilized even during\n evaluation. If ``True``, then running stats are tracked. When\n using this network in a continual learning scenario with\n different tasks then the running statistics are expected to be\n maintained externally. The argument ``stats_id`` of the method\n :meth:`utils.batchnorm_layer.BatchNormLayer.forward` can be\n provided using the argument ``condition`` of method :meth:`forward`.\n\n Example:\n To maintain the running stats, one can simply iterate over\n all batch norm layers and checkpoint the current running\n stats (e.g., after learning a task when applying a Continual\n learning scenario).\n\n .. code:: python\n\n for bn_layer in net.batchnorm_layers:\n bn_layer.checkpoint_stats()\n distill_bn_stats (bool): If ``True``, then the shapes of the batchnorm\n statistics will be added to the attribute\n :attr:`mnets.mnet_interface.MainNetInterface.\\\nhyper_shapes_distilled` and the current statistics will be returned by the\n method :meth:`distillation_targets`.\n\n Note, this attribute may only be ``True`` if ``bn_track_stats``\n is ``True``.\n use_context_mod (bool): Add context-dependent modulation layers\n :class:`utils.context_mod_layer.ContextModLayer` after the linear\n computation of each layer.\n context_mod_inputs (bool): Whether context-dependent modulation should\n also be applied to network intpus directly. I.e., assume\n :math:`\\mathbf{x}` is the input to the network. Then the first\n network operation would be to modify the input via\n :math:`\\mathbf{x} \\cdot \\mathbf{g} + \\mathbf{s}` using context-\n dependent gain and shift parameters.\n\n Note:\n Argument applies only if ``use_context_mod`` is ``True``.\n no_last_layer_context_mod (bool): If ``True``, context-dependent\n modulation will not be applied to the output layer.\n\n Note:\n Argument applies only if ``use_context_mod`` is ``True``.\n context_mod_no_weights (bool): The weights of the context-mod layers\n (:class:`utils.context_mod_layer.ContextModLayer`) are treated\n independently of the option ``no_weights``.\n This argument can be used to decide whether the context-mod\n parameters (gains and shifts) are maintained internally or\n externally.\n\n Note:\n Check out argument ``weights`` of the :meth:`forward` method\n on how to correctly pass weights to the network that are\n externally maintained.\n context_mod_post_activation (bool): Apply context-mod layers after the\n activation function (``activation_fn``) in hidden layer rather than\n before, which is the default behavior.\n\n Note:\n This option only applies if ``use_context_mod`` is ``True``.\n\n Note:\n This option does not affect argument ``context_mod_inputs``.\n\n Note:\n This option does not affect argument\n ``no_last_layer_context_mod``. Hence, if a output-nonlinearity\n is applied through argument ``out_fn``, then context-modulation\n would be applied before this non-linearity.\n context_mod_gain_offset (bool): Activates option ``apply_gain_offset``\n of class :class:`utils.context_mod_layer.ContextModLayer` for all\n context-mod layers that will be instantiated.\n out_fn (optional): If provided, this function will be applied to the\n output neurons of the network.\n\n Warning:\n This changes the interpretation of the output of the\n :meth:`forward` method.\n verbose (bool): Whether to print information (e.g., the number of\n weights) during the construction of the network.\n \"\"\"\n\n def __init__(self, n_in=1, n_out=1, hidden_layers=[10, 10],\n activation_fn=torch.nn.ReLU(), use_bias=True, no_weights=False,\n init_weights=None, dropout_rate=-1, use_spectral_norm=False,\n use_batch_norm=False, bn_track_stats=True,\n distill_bn_stats=False, use_context_mod=False,\n context_mod_inputs=False, no_last_layer_context_mod=False,\n context_mod_no_weights=False,\n context_mod_post_activation=False,\n context_mod_gain_offset=False, out_fn=None, verbose=True):\n # FIXME find a way using super to handle multiple inheritence.\n # super(MainNetwork, self).__init__()\n nn.Module.__init__(self)\n MainNetInterface.__init__(self)\n\n if use_spectral_norm:\n raise NotImplementedError('Spectral normalization not yet ' +\n 'implemented for this network.')\n\n if use_batch_norm and use_context_mod:\n # FIXME Does it make sense to have both enabled?\n # I.e., should we produce a warning or error?\n pass\n\n self._a_fun = activation_fn\n assert (init_weights is None or \\\n (not no_weights or not context_mod_no_weights))\n self._no_weights = no_weights\n self._dropout_rate = dropout_rate\n # self._use_spectral_norm = use_spectral_norm\n self._use_batch_norm = use_batch_norm\n self._bn_track_stats = bn_track_stats\n self._distill_bn_stats = distill_bn_stats and use_batch_norm\n self._use_context_mod = use_context_mod\n self._context_mod_inputs = context_mod_inputs\n self._no_last_layer_context_mod = no_last_layer_context_mod\n self._context_mod_no_weights = context_mod_no_weights\n self._context_mod_post_activation = context_mod_post_activation\n self._context_mod_gain_offset = context_mod_gain_offset\n self._out_fn = out_fn\n\n self._has_bias = use_bias\n self._has_fc_out = True\n # We need to make sure that the last 2 entries of `weights` correspond\n # to the weight matrix and bias vector of the last layer.\n self._mask_fc_out = True\n self._has_linear_out = True if out_fn is None else False\n\n if use_spectral_norm and no_weights:\n raise ValueError('Cannot use spectral norm in a network without ' +\n 'parameters.')\n\n # FIXME make sure that this implementation is correct in all situations\n # (e.g., what to do if weights are passed to the forward method?).\n if use_spectral_norm:\n self._spec_norm = nn.utils.spectral_norm\n else:\n self._spec_norm = lambda x: x # identity\n\n self._param_shapes = []\n self._weights = None if no_weights and context_mod_no_weights \\\n else nn.ParameterList()\n self._hyper_shapes_learned = None \\\n if not no_weights and not context_mod_no_weights else []\n\n if dropout_rate != -1:\n assert (dropout_rate >= 0. and dropout_rate <= 1.)\n self._dropout = nn.Dropout(p=dropout_rate)\n\n ### Define and initialize context mod weights.\n self._context_mod_layers = nn.ModuleList() if use_context_mod else None\n self._context_mod_shapes = [] if use_context_mod else None\n\n if use_context_mod:\n cm_ind = 0\n cm_sizes = []\n if context_mod_inputs:\n cm_sizes.append(n_in)\n cm_sizes.extend(hidden_layers)\n if not no_last_layer_context_mod:\n cm_sizes.append(n_out)\n\n for i, n in enumerate(cm_sizes):\n cmod_layer = ContextModLayer(n,\n no_weights=context_mod_no_weights,\n apply_gain_offset=context_mod_gain_offset)\n self._context_mod_layers.append(cmod_layer)\n\n self.param_shapes.extend(cmod_layer.param_shapes)\n self._context_mod_shapes.extend(cmod_layer.param_shapes)\n if context_mod_no_weights:\n self._hyper_shapes_learned.extend(cmod_layer.param_shapes)\n else:\n self._weights.extend(cmod_layer.weights)\n\n # FIXME ugly code. Move initialization somewhere else.\n if not context_mod_no_weights and init_weights is not None:\n assert (len(cmod_layer.weights) == 2)\n for ii in range(2):\n assert (np.all(np.equal( \\\n list(init_weights[cm_ind].shape),\n list(cm_ind.weights[ii].shape))))\n cmod_layer.weights[ii].data = init_weights[cm_ind]\n cm_ind += 1\n\n if init_weights is not None:\n init_weights = init_weights[cm_ind:]\n\n ### Define and initialize batch norm weights.\n self._batchnorm_layers = nn.ModuleList() if use_batch_norm else None\n\n if use_batch_norm:\n if distill_bn_stats:\n self._hyper_shapes_distilled = []\n\n bn_ind = 0\n for i, n in enumerate(hidden_layers):\n bn_layer = BatchNormLayer(n, affine=not no_weights,\n track_running_stats=bn_track_stats)\n self._batchnorm_layers.append(bn_layer)\n self._param_shapes.extend(bn_layer.param_shapes)\n\n if no_weights:\n self._hyper_shapes_learned.extend(bn_layer.param_shapes)\n else:\n self._weights.extend(bn_layer.weights)\n\n if distill_bn_stats:\n self._hyper_shapes_distilled.extend( \\\n [list(p.shape) for p in bn_layer.get_stats(0)])\n\n # FIXME ugly code. Move initialization somewhere else.\n if not no_weights and init_weights is not None:\n assert (len(bn_layer.weights) == 2)\n for ii in range(2):\n assert (np.all(np.equal( \\\n list(init_weights[bn_ind].shape),\n list(bn_layer.weights[ii].shape))))\n bn_layer.weights[ii].data = init_weights[bn_ind]\n bn_ind += 1\n\n if init_weights is not None:\n init_weights = init_weights[bn_ind:]\n\n # Compute shapes of linear layers.\n linear_shapes = MLP.weight_shapes(n_in=n_in, n_out=n_out,\n hidden_layers=hidden_layers, use_bias=use_bias)\n self._param_shapes.extend(linear_shapes)\n\n num_weights = MainNetInterface.shapes_to_num_weights(self._param_shapes)\n\n if verbose:\n if use_context_mod:\n cm_num_weights = 0\n for cm_layer in self._context_mod_layers:\n cm_num_weights += MainNetInterface.shapes_to_num_weights( \\\n cm_layer.param_shapes)\n\n print('Creating an MLP with %d weights' % num_weights\n + (' (including %d weights associated with-' % cm_num_weights\n + 'context modulation)' if use_context_mod else '')\n + '.'\n + (' The network uses dropout.' if dropout_rate != -1 else '')\n + (' The network uses batchnorm.' if use_batch_norm else ''))\n\n self._layer_weight_tensors = nn.ParameterList()\n self._layer_bias_vectors = nn.ParameterList()\n\n if no_weights:\n self._hyper_shapes_learned.extend(linear_shapes)\n self._is_properly_setup()\n return\n\n ### Define and initialize linear weights.\n for i, dims in enumerate(linear_shapes):\n self._weights.append(nn.Parameter(torch.Tensor(*dims),\n requires_grad=True))\n if len(dims) == 1:\n self._layer_bias_vectors.append(self._weights[-1])\n else:\n self._layer_weight_tensors.append(self._weights[-1])\n\n if init_weights is not None:\n assert (len(init_weights) == len(linear_shapes))\n for i in range(len(init_weights)):\n assert (np.all(np.equal(list(init_weights[i].shape),\n linear_shapes[i])))\n if use_bias:\n if i % 2 == 0:\n self._layer_weight_tensors[i // 2].data = init_weights[i]\n else:\n self._layer_bias_vectors[i // 2].data = init_weights[i]\n else:\n self._layer_weight_tensors[i].data = init_weights[i]\n else:\n for i in range(len(self._layer_weight_tensors)):\n if use_bias:\n init_params(self._layer_weight_tensors[i],\n self._layer_bias_vectors[i])\n else:\n init_params(self._layer_weight_tensors[i])\n\n self._is_properly_setup()\n\n def forward(self, x, weights=None, distilled_params=None, condition=None):\n \"\"\"Compute the output :math:`y` of this network given the input\n :math:`x`.\n\n Args:\n (....): See docstring of method\n :meth:`mnets.mnet_interface.MainNetInterface.forward`. We\n provide some more specific information below.\n weights (list or dict): If a list of parameter tensors is given and\n context modulation is used (see argument ``use_context_mod`` in\n constructor), then these parameters are interpreted as context-\n modulation parameters if the length of ``weights`` equals\n :code:`2*len(net.context_mod_layers)`. Otherwise, the length is\n expected to be equal to the length of the attribute\n :attr:`mnets.mnet_interface.MainNetInterface.param_shapes`.\n\n Alternatively, a dictionary can be passed with the possible\n keywords ``internal_weights`` and ``mod_weights``. Each keyword\n is expected to map onto a list of tensors.\n The keyword ``internal_weights`` refers to all weights of this\n network except for the weights of the context-modulation layers.\n The keyword ``mod_weights``, on the other hand, refers\n specifically to the weights of the context-modulation layers.\n It is not necessary to specify both keywords.\n distilled_params: Will be passed as ``running_mean`` and\n ``running_var`` arguments of method\n :meth:`utils.batchnorm_layer.BatchNormLayer.forward` if\n batch normalization is used.\n condition (optional, int or dict): If ``int`` is provided, then this\n argument will be passed as argument ``stats_id`` to the method\n :meth:`utils.batchnorm_layer.BatchNormLayer.forward` if\n batch normalization is used.\n\n If a ``dict`` is provided instead, the following keywords are\n allowed:\n\n - ``bn_stats_id``: Will be handled as ``stats_id`` of the\n batchnorm layers as described above.\n - ``cmod_ckpt_id``: Will be passed as argument ``ckpt_id``\n to the method\n :meth:`utils.context_mod_layer.ContextModLayer.forward`.\n\n Returns:\n (tuple): Tuple containing:\n\n - **y**: The output of the network.\n - **h_y** (optional): If ``out_fn`` was specified in the\n constructor, then this value will be returned. It is the last\n hidden activation (before the ``out_fn`` has been applied).\n \"\"\"\n if ((not self._use_context_mod and self._no_weights) or \\\n (self._no_weights or self._context_mod_no_weights)) and \\\n weights is None:\n raise Exception('Network was generated without weights. ' +\n 'Hence, \"weights\" option may not be None.')\n\n ############################################\n ### Extract which weights should be used ###\n ############################################\n # I.e., are we using internally maintained weights or externally given\n # ones or are we even mixing between these groups.\n n_cm = 0 if self.context_mod_layers is None else \\\n 2 * len(self.context_mod_layers)\n\n if weights is None:\n weights = self.weights\n\n if self._use_context_mod:\n cm_weights = weights[:n_cm]\n int_weights = weights[n_cm:]\n else:\n int_weights = weights\n else:\n int_weights = None\n cm_weights = None\n\n if isinstance(weights, dict):\n assert ('internal_weights' in weights.keys() or \\\n 'mod_weights' in weights.keys())\n if 'internal_weights' in weights.keys():\n int_weights = weights['internal_weights']\n if 'mod_weights' in weights.keys():\n cm_weights = weights['mod_weights']\n else:\n if self._use_context_mod and \\\n len(weights) == n_cm:\n cm_weights = weights\n else:\n assert (len(weights) == len(self.param_shapes))\n if self._use_context_mod:\n cm_weights = weights[:n_cm]\n int_weights = weights[n_cm:]\n else:\n int_weights = weights\n\n if self._use_context_mod and cm_weights is None:\n if self._context_mod_no_weights:\n raise Exception('Network was generated without weights ' +\n 'for context-mod layers. Hence, they must be passed ' +\n 'via the \"weights\" option.')\n cm_weights = self.weights[:n_cm]\n if int_weights is None:\n if self._no_weights:\n raise Exception('Network was generated without internal ' +\n 'weights. Hence, they must be passed via the ' +\n '\"weights\" option.')\n if self._context_mod_no_weights:\n int_weights = self.weights\n else:\n int_weights = self.weights[n_cm:]\n\n # Note, context-mod weights might have different shapes, as they\n # may be parametrized on a per-sample basis.\n if self._use_context_mod:\n assert (len(cm_weights) == len(self._context_mod_shapes))\n int_shapes = self.param_shapes[n_cm:]\n assert (len(int_weights) == len(int_shapes))\n for i, s in enumerate(int_shapes):\n assert (np.all(np.equal(s, list(int_weights[i].shape))))\n\n cm_ind = 0\n bn_ind = 0\n\n if self._use_batch_norm:\n n_bn = 2 * len(self.batchnorm_layers)\n bn_weights = int_weights[:n_bn]\n layer_weights = int_weights[n_bn:]\n else:\n layer_weights = int_weights\n\n w_weights = []\n b_weights = []\n for i, p in enumerate(layer_weights):\n if self.has_bias and i % 2 == 1:\n b_weights.append(p)\n else:\n w_weights.append(p)\n\n ########################\n ### Parse condition ###\n #######################\n\n bn_cond = None\n cmod_cond = None\n\n if condition is not None:\n if isinstance(condition, dict):\n assert ('bn_stats_id' in condition.keys() or \\\n 'cmod_ckpt_id' in condition.keys())\n if 'bn_stats_id' in condition.keys():\n bn_cond = condition['bn_stats_id']\n if 'cmod_ckpt_id' in condition.keys():\n cmod_cond = condition['cmod_ckpt_id']\n else:\n bn_cond = condition\n\n ######################################\n ### Select batchnorm running stats ###\n ######################################\n if self._use_batch_norm:\n nn = len(self._batchnorm_layers)\n running_means = [None] * nn\n running_vars = [None] * nn\n\n if distilled_params is not None:\n if not self._distill_bn_stats:\n raise ValueError('Argument \"distilled_params\" can only be ' +\n 'provided if the return value of ' +\n 'method \"distillation_targets()\" is not None.')\n shapes = self.hyper_shapes_distilled\n assert (len(distilled_params) == len(shapes))\n for i, s in enumerate(shapes):\n assert (np.all(np.equal(s, list(distilled_params[i].shape))))\n\n # Extract batchnorm stats from distilled_params\n for i in range(0, len(distilled_params), 2):\n running_means[i // 2] = distilled_params[i]\n running_vars[i // 2] = distilled_params[i + 1]\n\n elif self._use_batch_norm and self._bn_track_stats and \\\n bn_cond is None:\n for i, bn_layer in enumerate(self._batchnorm_layers):\n running_means[i], running_vars[i] = bn_layer.get_stats()\n\n ###########################\n ### Forward Computation ###\n ###########################\n hidden = x\n\n # Context-dependent modulation of inputs directly.\n if self._use_context_mod and self._context_mod_inputs:\n hidden = self._context_mod_layers[cm_ind].forward(hidden,\n weights=cm_weights[2 * cm_ind:2 * cm_ind + 2],\n ckpt_id=cmod_cond)\n cm_ind += 1\n\n for l in range(len(w_weights)):\n W = w_weights[l]\n if self.has_bias:\n b = b_weights[l]\n else:\n b = None\n\n # Linear layer.\n hidden = self._spec_norm(F.linear(hidden, W, bias=b))\n\n # Only for hidden layers.\n if l < len(w_weights) - 1:\n # Context-dependent modulation (pre-activation).\n if self._use_context_mod and \\\n not self._context_mod_post_activation:\n hidden = self._context_mod_layers[cm_ind].forward(hidden,\n weights=cm_weights[2 * cm_ind:2 * cm_ind + 2],\n ckpt_id=cmod_cond)\n cm_ind += 1\n\n # Batch norm\n if self._use_batch_norm:\n hidden = self._batchnorm_layers[bn_ind].forward(hidden,\n running_mean=running_means[bn_ind],\n running_var=running_vars[bn_ind],\n weight=bn_weights[2 * bn_ind],\n bias=bn_weights[2 * bn_ind + 1], stats_id=bn_cond)\n bn_ind += 1\n\n # Dropout\n if self._dropout_rate != -1:\n hidden = self._dropout(hidden)\n\n # Non-linearity\n if self._a_fun is not None:\n hidden = self._a_fun(hidden)\n\n # Context-dependent modulation (post-activation).\n if self._use_context_mod and self._context_mod_post_activation:\n hidden = self._context_mod_layers[cm_ind].forward(hidden,\n weights=cm_weights[2 * cm_ind:2 * cm_ind + 2],\n ckpt_id=cmod_cond)\n cm_ind += 1\n\n # Context-dependent modulation in output layer.\n if self._use_context_mod and not self._no_last_layer_context_mod:\n hidden = self._context_mod_layers[cm_ind].forward(hidden,\n weights=cm_weights[2 * cm_ind:2 * cm_ind + 2],\n ckpt_id=cmod_cond)\n\n if self._out_fn is not None:\n return self._out_fn(hidden), hidden\n\n return hidden\n\n def distillation_targets(self):\n \"\"\"Targets to be distilled after training.\n\n See docstring of abstract super method\n :meth:`mnets.mnet_interface.MainNetInterface.distillation_targets`.\n\n This method will return the current batch statistics of all batch\n normalization layers if ``distill_bn_stats`` and ``use_batch_norm``\n was set to ``True`` in the constructor.\n\n Returns:\n The target tensors corresponding to the shapes specified in\n attribute :attr:`hyper_shapes_distilled`.\n \"\"\"\n if self.hyper_shapes_distilled is None:\n return None\n\n ret = []\n for bn_layer in self._batchnorm_layers:\n ret.extend(bn_layer.get_stats())\n\n return ret\n\n @staticmethod\n def weight_shapes(n_in=1, n_out=1, hidden_layers=[10, 10], use_bias=True):\n \"\"\"Compute the tensor shapes of all parameters in a fully-connected\n network.\n\n Args:\n n_in: Number of inputs.\n n_out: Number of output units.\n hidden_layers: A list of ints, each number denoting the size of a\n hidden layer.\n use_bias: Whether the FC layers should have biases.\n\n Returns:\n A list of list of integers, denoting the shapes of the individual\n parameter tensors.\n \"\"\"\n shapes = []\n\n prev_dim = n_in\n layer_out_sizes = hidden_layers + [n_out]\n for i, size in enumerate(layer_out_sizes):\n shapes.append([size, prev_dim])\n if use_bias:\n shapes.append([size])\n prev_dim = size\n\n return shapes\n\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"torch.nn.functional.batch_norm",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"torch.tensor",
"torch.nn.init.ones_",
"torch.nn.ParameterList",
"torch.nn.init.zeros_"
],
[
"torch.nn.init._no_grad_uniform_",
"torch.nn.init._calculate_fan_in_and_fan_out",
"numpy.prod"
],
[
"torch.nn.ReLU",
"torch.nn.functional.linear",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
calebchoo/modulabs | [
"10fbaf0581700641fc9b38b1bd722044bfb7c638",
"314d9cd9b607460f8bfea80fc828b1521ca18443",
"314d9cd9b607460f8bfea80fc828b1521ca18443",
"314d9cd9b607460f8bfea80fc828b1521ca18443",
"e8aa19bd4fbcbbaeacb1f4f753e6c4f15dee1d9c"
] | [
"tensorflow/contrib/learn/python/learn/estimators/linear.py",
"tensorflow/python/ops/nn.py",
"tensorflow/contrib/learn/python/learn/datasets/mnist.py",
"tensorflow/contrib/learn/python/learn/tests/dataframe/transform_test.py",
"tensorflow/contrib/learn/python/learn/graph_actions.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Linear Estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined\nfrom tensorflow.contrib.learn.python.learn.estimators import sdca_optimizer\nfrom tensorflow.contrib.learn.python.learn.estimators.base import DeprecatedMixin\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\n# TODO(b/29580537): Replace with @changing decorator.\ndef _changing(feature_columns):\n if feature_columns is not None:\n return\n logging.warn(\n \"Change warning: `feature_columns` will be required after 2016-08-01.\\n\"\n \"Instructions for updating:\\n\"\n \"Pass `tf.contrib.learn.infer_real_valued_columns_from_input(x)` or\"\n \" `tf.contrib.learn.infer_real_valued_columns_from_input_fn(input_fn)`\"\n \" as `feature_columns`, where `x` or `input_fn` is your argument to\"\n \" `fit`, `evaluate`, or `predict`.\")\n\n\nclass LinearClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):\n \"\"\"Linear classifier model.\n\n Train a linear model to classify instances into one of multiple possible\n classes. When number of possible classes is 2, this is binary classification.\n\n Example:\n\n ```python\n education = sparse_column_with_hash_bucket(column_name=\"education\",\n hash_bucket_size=1000)\n occupation = sparse_column_with_hash_bucket(column_name=\"occupation\",\n hash_bucket_size=1000)\n\n education_x_occupation = crossed_column(columns=[education, occupation],\n hash_bucket_size=10000)\n\n # Estimator using the default optimizer.\n estimator = LinearClassifier(\n feature_columns=[occupation, education_x_occupation])\n\n # Or estimator using the FTRL optimizer with regularization.\n estimator = LinearClassifier(\n feature_columns=[occupation, education_x_occupation],\n optimizer=tf.train.FtrlOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator using the SDCAOptimizer.\n estimator = LinearClassifier(\n feature_columns=[occupation, education_x_occupation],\n optimizer=tf.contrib.learn.SDCAOptimizer(\n example_id_column='example_id',\n symmetric_l2_regularization=2.0\n ))\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.fit(input_fn=input_fn_train)\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `RealValuedColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n - if `feature_columns` is `None`, then `input` must contains only real\n valued `Tensor`.\n \"\"\"\n\n def __init__(self,\n feature_columns=None,\n model_dir=None,\n n_classes=2,\n weight_column_name=None,\n optimizer=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n config=None):\n \"\"\"Construct a `LinearClassifier` estimator object.\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc.\n n_classes: number of target classes. Default is binary classification.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: The optimizer used to train the model. If specified, it should\n be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,\n the Ftrl optimizer will be used.\n gradient_clip_norm: A `float` > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n A `LinearClassifier` estimator.\n \"\"\"\n _changing(feature_columns)\n super(LinearClassifier, self).__init__(\n model_dir=model_dir,\n n_classes=n_classes,\n weight_column_name=weight_column_name,\n linear_feature_columns=feature_columns,\n linear_optimizer=optimizer,\n gradient_clip_norm=gradient_clip_norm,\n enable_centered_bias=enable_centered_bias,\n config=config)\n self._feature_columns_inferred = False\n\n # TODO(b/29580537): Remove feature_columns inference.\n def _validate_linear_feature_columns(self, features):\n if self._linear_feature_columns is None:\n self._linear_feature_columns = layers.infer_real_valued_columns(features)\n self._feature_columns_inferred = True\n elif self._feature_columns_inferred:\n this_dict = {c.name: c for c in self._linear_feature_columns}\n that_dict = {\n c.name: c for c in layers.infer_real_valued_columns(features)\n }\n if this_dict != that_dict:\n raise ValueError(\n \"Feature columns, expected %s, got %s.\", (this_dict, that_dict))\n\n def _get_train_ops(self, features, targets):\n \"\"\"See base class.\"\"\"\n self._validate_linear_feature_columns(features)\n if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):\n return super(LinearClassifier, self)._get_train_ops(features, targets)\n\n # SDCA currently supports binary classification only.\n if self._target_column.num_label_columns > 2:\n raise ValueError(\n \"SDCA does not currently support multi-class classification.\")\n global_step = contrib_variables.get_global_step()\n assert global_step\n\n logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(\n columns_to_tensors=features,\n feature_columns=self._linear_feature_columns,\n num_outputs=self._target_column.num_label_columns,\n weight_collections=[self._linear_weight_collection],\n scope=\"linear\")\n with ops.control_dependencies([self._centered_bias()]):\n loss = self._target_column.loss(logits, targets, features)\n logging_ops.scalar_summary(\"loss\", loss)\n\n train_ops = self._linear_optimizer.get_train_step(\n self._linear_feature_columns, self._target_column.weight_column_name,\n \"logistic_loss\", features, targets, columns_to_variables, global_step)\n\n return train_ops, loss\n\n def _get_eval_ops(self, features, targets, metrics=None):\n self._validate_linear_feature_columns(features)\n return super(LinearClassifier, self)._get_eval_ops(\n features, targets, metrics)\n\n def _get_predict_ops(self, features):\n \"\"\"See base class.\"\"\"\n self._validate_linear_feature_columns(features)\n return super(LinearClassifier, self)._get_predict_ops(features)\n\n @property\n def weights_(self):\n return self.linear_weights_\n\n @property\n def bias_(self):\n return self.linear_bias_\n\n\nclass LinearRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):\n \"\"\"Linear regressor model.\n\n Train a linear regression model to predict target variable value given\n observation of feature values.\n\n Example:\n\n ```python\n education = sparse_column_with_hash_bucket(column_name=\"education\",\n hash_bucket_size=1000)\n occupation = sparse_column_with_hash_bucket(column_name=\"occupation\",\n hash_bucket_size=1000)\n\n education_x_occupation = crossed_column(columns=[education, occupation],\n hash_bucket_size=10000)\n\n estimator = LinearRegressor(\n feature_columns=[occupation, education_x_occupation])\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.fit(input_fn=input_fn_train)\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a KeyError:\n\n * if `weight_column_name` is not `None`:\n key=weight_column_name, value=a `Tensor`\n * for column in `feature_columns`:\n - if isinstance(column, `SparseColumn`):\n key=column.name, value=a `SparseTensor`\n - if isinstance(column, `RealValuedColumn`):\n key=column.name, value=a `Tensor`\n - if `feature_columns` is `None`:\n input must contains only real valued `Tensor`.\n \"\"\"\n\n def __init__(self,\n feature_columns=None,\n model_dir=None,\n weight_column_name=None,\n optimizer=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n target_dimension=1,\n config=None):\n \"\"\"Construct a `LinearRegressor` estimator object.\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph, etc.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: An instance of `tf.Optimizer` used to train the model. If\n `None`, will use an Ftrl optimizer.\n gradient_clip_norm: A `float` > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n target_dimension: dimension of the target for multilabels.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n A `LinearRegressor` estimator.\n \"\"\"\n _changing(feature_columns)\n super(LinearRegressor, self).__init__(\n model_dir=model_dir,\n weight_column_name=weight_column_name,\n linear_feature_columns=feature_columns,\n linear_optimizer=optimizer,\n gradient_clip_norm=gradient_clip_norm,\n enable_centered_bias=enable_centered_bias,\n target_dimension=target_dimension,\n config=config)\n self._feature_columns_inferred = False\n\n # TODO(b/29580537): Remove feature_columns inference.\n def _validate_linear_feature_columns(self, features):\n if self._linear_feature_columns is None:\n self._linear_feature_columns = layers.infer_real_valued_columns(features)\n self._feature_columns_inferred = True\n elif self._feature_columns_inferred:\n this_dict = {c.name: c for c in self._linear_feature_columns}\n that_dict = {\n c.name: c for c in layers.infer_real_valued_columns(features)\n }\n if this_dict != that_dict:\n raise ValueError(\n \"Feature columns, expected %s, got %s.\", (this_dict, that_dict))\n\n def _get_train_ops(self, features, targets):\n \"\"\"See base class.\"\"\"\n if isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):\n raise ValueError(\"SDCAOptimizer does not currently support regression.\")\n self._validate_linear_feature_columns(features)\n return super(LinearRegressor, self)._get_train_ops(features, targets)\n\n def _get_eval_ops(self, features, targets, metrics=None):\n self._validate_linear_feature_columns(features)\n return super(LinearRegressor, self)._get_eval_ops(\n features, targets, metrics)\n\n def _get_predict_ops(self, features):\n \"\"\"See base class.\"\"\"\n self._validate_linear_feature_columns(features)\n return super(LinearRegressor, self)._get_predict_ops(features)\n\n @property\n def weights_(self):\n return self.linear_weights_\n\n @property\n def bias_(self):\n return self.linear_bias_\n\n\n# TensorFlowLinearRegressor and TensorFlowLinearClassifier are deprecated.\nclass TensorFlowLinearRegressor(DeprecatedMixin, LinearRegressor,\n _sklearn.RegressorMixin):\n pass\n\n\nclass TensorFlowLinearClassifier(DeprecatedMixin, LinearClassifier,\n _sklearn.ClassifierMixin):\n pass\n\n\nTensorFlowRegressor = TensorFlowLinearRegressor\nTensorFlowClassifier = TensorFlowLinearClassifier\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n# pylint: disable=unused-import,g-bad-import-order\n\"\"\"## Activation Functions\n\nThe activation ops provide different types of nonlinearities for use in neural\nnetworks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`,\n`softplus`, and `softsign`), continuous but not everywhere differentiable\nfunctions (`relu`, `relu6`, and `relu_x`), and random regularization\n(`dropout`).\n\nAll activation ops apply componentwise, and produce a tensor of the same\nshape as the input tensor.\n\n@@relu\n@@relu6\n@@elu\n@@softplus\n@@softsign\n@@dropout\n@@bias_add\n@@sigmoid\n@@tanh\n\n## Convolution\n\nThe convolution ops sweep a 2-D filter over a batch of images, applying the\nfilter to each window of each image of the appropriate size. The different\nops trade off between generic vs. specific filters:\n\n* `conv2d`: Arbitrary filters that can mix channels together.\n* `depthwise_conv2d`: Filters that operate on each channel independently.\n* `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter.\n\nNote that although these ops are called \"convolution\", they are strictly\nspeaking \"cross-correlation\" since the filter is combined with an input window\nwithout reversing the filter. For details, see [the properties of\ncross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties).\n\nThe filter is applied to image patches of the same size as the filter and\nstrided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies\nthe filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the\nfilter to every other image patch in each dimension, etc.\n\nIgnoring channels for the moment, and assume that the 4-D `input` has shape\n`[batch, in_height, in_width, ...]` and the 4-D `filter` has shape\n`[filter_height, filter_width, ...]`, then the spatial semantics of the\nconvolution ops are as follows: first, according to the padding scheme chosen\nas `'SAME'` or `'VALID'`, the output size and the padding pixels are computed.\nFor the `'SAME'` padding, the output height and width are computed as:\n\n out_height = ceil(float(in_height) / float(strides[1]))\n out_width = ceil(float(in_width) / float(strides[2]))\n\nand the padding on the top and left are computed as:\n\n pad_along_height = ((out_height - 1) * strides[1] +\n filter_height - in_height)\n pad_along_width = ((out_width - 1) * strides[2] +\n filter_width - in_width)\n pad_top = pad_along_height / 2\n pad_left = pad_along_width / 2\n\nNote that the division by 2 means that there might be cases when the padding on\nboth sides (top vs bottom, right vs left) are off by one. In this case, the\nbottom and right sides always get the one additional padded pixel. For example,\nwhen `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the\nbottom. Note that this is different from existing libraries such as cuDNN and\nCaffe, which explicitly specify the number of padded pixels and always pad the\nsame number of pixels on both sides.\n\nFor the `'VALID`' padding, the output height and width are computed as:\n\n out_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))\n out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))\n\nand the padding values are always zero. The output is then computed as\n\n output[b, i, j, :] =\n sum_{di, dj} input[b, strides[1] * i + di - pad_top,\n strides[2] * j + dj - pad_left, ...] *\n filter[di, dj, ...]\n\nwhere any value outside the original input image region are considered zero (\ni.e. we pad zero values around the border of the image).\n\nSince `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these\nvectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new\nvectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]`\nis multiplied by a vector `filter[di, dj, k]`, and all the vectors are\nconcatenated.\n\n@@conv2d\n@@depthwise_conv2d\n@@separable_conv2d\n@@atrous_conv2d\n@@conv2d_transpose\n@@conv3d\n\n## Pooling\n\nThe pooling ops sweep a rectangular window over the input tensor, computing a\nreduction operation for each window (average, max, or max with argmax). Each\npooling op uses rectangular windows of size `ksize` separated by offset\n`strides`. For example, if `strides` is all ones every window is used, if\n`strides` is all twos every other window is used in each dimension, etc.\n\nIn detail, the output is\n\n output[i] = reduce(value[strides * i:strides * i + ksize])\n\nwhere the indices also take into consideration the padding values. Please refer\nto the `Convolution` section for details about the padding calculation.\n\n@@avg_pool\n@@max_pool\n@@max_pool_with_argmax\n@@avg_pool3d\n@@max_pool3d\n\n## Morphological filtering\n\nMorphological operators are non-linear filters used in image processing.\n\n[Greyscale morphological dilation]\n(https://en.wikipedia.org/wiki/Dilation_(morphology)) is the max-sum counterpart\nof standard sum-product convolution:\n\n output[b, y, x, c] =\n max_{dy, dx} input[b,\n strides[1] * y + rates[1] * dy,\n strides[2] * x + rates[2] * dx,\n c] +\n filter[dy, dx, c]\n\nThe `filter` is usually called structuring function. Max-pooling is a special\ncase of greyscale morphological dilation when the filter assumes all-zero\nvalues (a.k.a. flat structuring function).\n\n[Greyscale morphological erosion]\n(https://en.wikipedia.org/wiki/Erosion_(morphology)) is the min-sum counterpart\nof standard sum-product convolution:\n\n output[b, y, x, c] =\n min_{dy, dx} input[b,\n strides[1] * y - rates[1] * dy,\n strides[2] * x - rates[2] * dx,\n c] -\n filter[dy, dx, c]\n\nDilation and erosion are dual to each other. The dilation of the input signal\n`f` by the structuring signal `g` is equal to the negation of the erosion of\n`-f` by the reflected `g`, and vice versa.\n\nStriding and padding is carried out in exactly the same way as in standard\nconvolution. Please refer to the `Convolution` section for details.\n\n@@dilation2d\n@@erosion2d\n\n## Normalization\n\nNormalization is useful to prevent neurons from saturating when inputs may\nhave varying scale, and to aid generalization.\n\n@@l2_normalize\n@@local_response_normalization\n@@sufficient_statistics\n@@normalize_moments\n@@moments\n\n## Losses\n\nThe loss ops measure error between two tensors, or between a tensor and zero.\nThese can be used for measuring accuracy of a network in a regression task\nor for regularization purposes (weight decay).\n\n@@l2_loss\n\n## Classification\n\nTensorFlow provides several operations that help you perform classification.\n\n@@sigmoid_cross_entropy_with_logits\n@@softmax\n@@log_softmax\n@@softmax_cross_entropy_with_logits\n@@sparse_softmax_cross_entropy_with_logits\n@@weighted_cross_entropy_with_logits\n\n## Embeddings\n\nTensorFlow provides library support for looking up values in embedding\ntensors.\n\n@@embedding_lookup\n@@embedding_lookup_sparse\n\n## Recurrent Neural Networks\n\nTensorFlow provides a number of methods for constructing Recurrent\nNeural Networks. Most accept an `RNNCell`-subclassed object\n(see the documentation for `tf.nn.rnn_cell`).\n\n@@dynamic_rnn\n@@rnn\n@@state_saving_rnn\n@@bidirectional_rnn\n\n## Conectionist Temporal Classification (CTC)\n\n@@ctc_loss\n@@ctc_greedy_decoder\n@@ctc_beam_search_decoder\n\n## Evaluation\n\nThe evaluation ops are useful for measuring the performance of a network.\nSince they are nondifferentiable, they are typically used at evaluation time.\n\n@@top_k\n@@in_top_k\n\n## Candidate Sampling\n\nDo you want to train a multiclass or multilabel model with thousands\nor millions of output classes (for example, a language model with a\nlarge vocabulary)? Training with a full Softmax is slow in this case,\nsince all of the classes are evaluated for every training example.\nCandidate Sampling training algorithms can speed up your step times by\nonly considering a small randomly-chosen subset of contrastive classes\n(called candidates) for each batch of training examples.\n\nSee our [Candidate Sampling Algorithms Reference]\n(../../extras/candidate_sampling.pdf)\n\n### Sampled Loss Functions\n\nTensorFlow provides the following sampled loss functions for faster training.\n\n@@nce_loss\n@@sampled_softmax_loss\n\n### Candidate Samplers\n\nTensorFlow provides the following samplers for randomly sampling candidate\nclasses when using one of the sampled loss functions above.\n\n@@uniform_candidate_sampler\n@@log_uniform_candidate_sampler\n@@learned_unigram_candidate_sampler\n@@fixed_unigram_candidate_sampler\n\n### Miscellaneous candidate sampling utilities\n\n@@compute_accidental_hits\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import candidate_sampling_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import numerics\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import seq2seq\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.ops.math_ops import sigmoid\nfrom tensorflow.python.ops.math_ops import tanh\nfrom tensorflow.python.util.all_util import make_all\n\n# Bring more nn-associated functionality into this package.\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.ctc_ops import *\nfrom tensorflow.python.ops.nn_ops import *\nfrom tensorflow.python.ops.candidate_sampling_ops import *\nfrom tensorflow.python.ops.embedding_ops import *\nfrom tensorflow.python.ops.rnn import *\n# pylint: enable=wildcard-import\n\n\ndef sigmoid_cross_entropy_with_logits(logits, targets, name=None):\n \"\"\"Computes sigmoid cross entropy given `logits`.\n\n Measures the probability error in discrete classification tasks in which each\n class is independent and not mutually exclusive. For instance, one could\n perform multilabel classification where a picture can contain both an elephant\n and a dog at the same time.\n\n For brevity, let `x = logits`, `z = targets`. The logistic loss is\n\n z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + log(1 + exp(-x))\n = x - x * z + log(1 + exp(-x))\n\n For x < 0, to avoid overflow in exp(-x), we reformulate the above\n\n x - x * z + log(1 + exp(-x))\n = log(exp(x)) - x * z + log(1 + exp(-x))\n = - x * z + log(1 + exp(x))\n\n Hence, to ensure stability and avoid overflow, the implementation uses this\n equivalent formulation\n\n max(x, 0) - x * z + log(1 + exp(-abs(x)))\n\n `logits` and `targets` must have the same type and shape.\n\n Args:\n logits: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `logits`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n logistic losses.\n\n Raises:\n ValueError: If `logits` and `targets` do not have the same shape.\n \"\"\"\n with ops.op_scope([logits, targets], name, \"logistic_loss\") as name:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n targets = ops.convert_to_tensor(targets, name=\"targets\")\n try:\n targets.get_shape().merge_with(logits.get_shape())\n except ValueError:\n raise ValueError(\n \"logits and targets must have the same shape (%s vs %s)\"\n % (logits.get_shape(), targets.get_shape()))\n\n # The logistic loss formula from above is\n # x - x * z + log(1 + exp(-x))\n # For x < 0, a more numerically stable formula is\n # -x * z + log(1 + exp(x))\n # Note that these two expressions can be combined into the following:\n # max(x, 0) - x * z + log(1 + exp(-abs(x)))\n # To allow computing gradients at zero, we define custom versions of max and\n # abs functions.\n zeros = array_ops.zeros_like(logits, dtype=logits.dtype)\n cond = (logits >= zeros)\n relu_logits = math_ops.select(cond, logits, zeros)\n neg_abs_logits = math_ops.select(cond, -logits, logits)\n return math_ops.add(relu_logits - logits * targets,\n math_ops.log(1 + math_ops.exp(neg_abs_logits)),\n name=name)\n\n\ndef weighted_cross_entropy_with_logits(logits, targets, pos_weight,\n name=None):\n \"\"\"Computes a weighted cross entropy.\n\n This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,\n allows one to trade off recall and precision by up- or down-weighting the\n cost of a positive error relative to a negative error.\n\n The usual cross-entropy cost is defined as:\n\n targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits))\n\n The argument `pos_weight` is used as a multiplier for the positive targets:\n\n targets * -log(sigmoid(logits)) * pos_weight +\n (1 - targets) * -log(1 - sigmoid(logits))\n\n For brevity, let `x = logits`, `z = targets`, `q = pos_weight`.\n The loss is:\n\n qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))\n = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))\n\n Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,\n the implementation uses\n\n (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))\n\n `logits` and `targets` must have the same type and shape.\n\n Args:\n logits: A `Tensor` of type `float32` or `float64`.\n targets: A `Tensor` of the same type and shape as `logits`.\n pos_weight: A coefficient to use on the positive examples.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n weightedlogistic losses.\n\n Raises:\n ValueError: If `logits` and `targets` do not have the same shape.\n \"\"\"\n with ops.op_scope([logits, targets], name, \"logistic_loss\") as name:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n targets = ops.convert_to_tensor(targets, name=\"targets\")\n try:\n targets.get_shape().merge_with(logits.get_shape())\n except ValueError:\n raise ValueError(\n \"logits and targets must have the same shape (%s vs %s)\"\n % (logits.get_shape(), targets.get_shape()))\n\n # The logistic loss formula from above is\n # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))\n # For x < 0, a more numerically stable formula is\n # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x\n # To avoid branching, we use the combined version\n # (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))\n log_weight = 1 + (pos_weight - 1) * targets\n return math_ops.add(\n (1 - targets) * logits,\n log_weight * (math_ops.log(1 + math_ops.exp(-math_ops.abs(logits))) +\n nn_ops.relu(-logits)),\n name=name)\n\n\ndef relu_layer(x, weights, biases, name=None):\n \"\"\"Computes Relu(x * weight + biases).\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"nn_relu_layer\" is used.\n\n Returns:\n A 2-D Tensor computing relu(matmul(x, weights) + biases).\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.op_scope([x, weights, biases], name, \"relu_layer\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)\n return nn_ops.relu(xw_plus_b, name=name)\n\n\ndef l2_normalize(x, dim, epsilon=1e-12, name=None):\n \"\"\"Normalizes along dimension `dim` using an L2 norm.\n\n For a 1-D tensor with `dim = 0`, computes\n\n output = x / sqrt(max(sum(x**2), epsilon))\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `dim`.\n\n Args:\n x: A `Tensor`.\n dim: Dimension along which to normalize.\n epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the\n divisor if `norm < sqrt(epsilon)`.\n name: A name for this operation (optional).\n\n Returns:\n A `Tensor` with the same shape as `x`.\n \"\"\"\n with ops.op_scope([x], name, \"l2_normalize\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True)\n x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))\n return math_ops.mul(x, x_inv_norm, name=name)\n\n\ndef zero_fraction(value, name=None):\n \"\"\"Returns the fraction of zeros in `value`.\n\n If `value` is empty, the result is `nan`.\n\n This is useful in summaries to measure and report sparsity. For example,\n\n z = tf.Relu(...)\n summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z))\n\n Args:\n value: A tensor of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n The fraction of zeros in `value`, with type `float32`.\n \"\"\"\n with ops.op_scope([value], name, \"zero_fraction\"):\n value = ops.convert_to_tensor(value, name=\"value\")\n zero = constant_op.constant(0, dtype=value.dtype, name=\"zero\")\n return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero),\n dtypes.float32))\n\n\ndef depthwise_conv2d(input, filter, strides, padding, name=None):\n \"\"\"Depthwise 2-D convolution.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter tensor of shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`\n containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`\n applies a different filter to each input channel (expanding from 1 channel\n to `channel_multiplier` channels for each), then concatenates the results\n together. The output has `in_channels * channel_multiplier` channels.\n\n In detail,\n\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[di, dj, k, q]\n\n Must have `strides[0] = strides[3] = 1`. For the most common case of the\n same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: 4-D with shape `[batch, in_height, in_width, in_channels]`.\n filter: 4-D with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n strides: 1-D of size 4. The stride of the sliding window for each\n dimension of `input`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)\n name: A name for this operation (optional).\n\n Returns:\n A 4-D `Tensor` of shape\n `[batch, out_height, out_width, in_channels * channel_multiplier].`\n \"\"\"\n with ops.op_scope([input, filter], name, \"depthwise\") as name:\n input = ops.convert_to_tensor(input, name=\"tensor_in\")\n filter = ops.convert_to_tensor(filter, name=\"filter_in\")\n # A shape is required to statically compute the number of separable filters.\n if filter.get_shape().ndims is not None:\n assert len(filter.get_shape()) == 4\n in_channels = filter.get_shape()[2]\n # Sanity checks, if shape information is available for the inputs.\n if input.get_shape().ndims is not None:\n assert len(input.get_shape()) == 4\n assert input.get_shape()[3] == in_channels, (\n \"Mismatched input depth %d and number of depthwise filters %d.\" % (\n input.get_shape()[3].value, in_channels))\n else:\n assert input.get_shape().ndims is not None, (\n \"Either tensor must provide static shape information.\")\n assert input.get_shape().ndims == 4\n in_channels = input.get_shape()[3]\n\n if in_channels == 1:\n return nn_ops.conv2d(input, filter, strides, padding, name=name)\n else:\n return nn_ops.depthwise_conv2d_native(input, filter, strides, padding,\n name=name)\n\n\ndef separable_conv2d(input, depthwise_filter, pointwise_filter, strides,\n padding,\n name=None):\n \"\"\"2-D convolution with separable filters.\n\n Performs a depthwise convolution that acts separately on channels followed by\n a pointwise convolution that mixes channels. Note that this is separability\n between dimensions `[1, 2]` and `3`, not spatial separability between\n dimensions `1` and `2`.\n\n In detail,\n\n output[b, i, j, k] = sum_{di, dj, q, r]\n input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n depthwise_filter[di, dj, q, r] *\n pointwise_filter[0, 0, q * channel_multiplier + r, k]\n\n `strides` controls the strides for the depthwise convolution only, since\n the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have\n `strides[0] = strides[3] = 1`. For the most common case of the same\n horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.\n depthwise_filter: 4-D `Tensor` with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n Contains `in_channels` convolutional filters of depth 1.\n pointwise_filter: 4-D `Tensor` with shape\n `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise\n filter to mix channels after `depthwise_filter` has convolved spatially.\n strides: 1-D of size 4. The strides for the depthwise convolution for\n each dimension of `input`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)\n name: A name for this operation (optional).\n\n Returns:\n A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.\n\n Raises:\n ValueError: If channel_multiplier * in_channels > out_channels,\n which means that the separable convolution is overparameterized.\n \"\"\"\n with ops.op_scope([input, depthwise_filter, pointwise_filter],\n name, \"separable_conv2d\") as name:\n input = ops.convert_to_tensor(input, name=\"tensor_in\")\n depthwise_filter = ops.convert_to_tensor(depthwise_filter,\n name=\"depthwise_filter\")\n pointwise_filter = ops.convert_to_tensor(pointwise_filter,\n name=\"pointwise_filter\")\n\n if pointwise_filter.get_shape().ndims is not None:\n assert len(pointwise_filter.get_shape()) == 4\n assert pointwise_filter.get_shape()[0] == 1\n assert pointwise_filter.get_shape()[1] == 1\n if depthwise_filter.get_shape().ndims and input.get_shape().ndims:\n channel_multiplier = depthwise_filter.get_shape()[3]\n in_channels = input.get_shape()[3]\n out_channels = pointwise_filter.get_shape()[3]\n if channel_multiplier * in_channels > out_channels:\n raise ValueError(\n (\"Refusing to perform an overparameterized separable \"\n \"convolution: channel_multiplier * in_channels = \"\n \"%d * %d = %d > %d = out_channels\" %\n (channel_multiplier, in_channels,\n channel_multiplier * in_channels, out_channels)))\n # The layout of the ops in the graph are expected to be as follows:\n # depthwise_conv2d // Conv2D op corresponding to native deptwise conv.\n # separable_conv2d // Conv2D op corresponding to the pointwise conv.\n depthwise = nn_ops.depthwise_conv2d_native(input, depthwise_filter, strides,\n padding, name=\"depthwise\")\n return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1],\n padding=\"VALID\", name=name)\n\n\ndef sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):\n \"\"\"Calculate the sufficient statistics for the mean and variance of `x`.\n\n These sufficient statistics are computed using the one pass algorithm on\n an input that's optionally shifted. See:\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data\n\n Args:\n x: A `Tensor`.\n axes: Array of ints. Axes along which to compute mean and variance.\n shift: A `Tensor` containing the value by which to shift the data for\n numerical stability, or `None` if no shift is to be performed. A shift\n close to the true mean provides the most numerically stable results.\n keep_dims: produce statistics with the same dimensionality as the input.\n name: Name used to scope the operations that compute the sufficient stats.\n\n Returns:\n Four `Tensor` objects of the same type as `x`:\n * the count (number of elements to average over).\n * the (possibly shifted) sum of the elements in the array.\n * the (possibly shifted) sum of squares of the elements in the array.\n * the shift by which the mean must be corrected or None if `shift` is None.\n \"\"\"\n with ops.op_scope([x, axes, shift], name, \"sufficient_statistics\"):\n x = ops.convert_to_tensor(x, name=\"x\")\n x_shape = x.get_shape()\n if x_shape.is_fully_defined():\n counts = 1\n m_shape = []\n for d in xrange(x_shape.ndims):\n dim = x_shape[d].value\n if d in set(axes):\n counts *= dim\n dim = 1\n m_shape.append(dim)\n counts = constant_op.constant(counts, dtype=x.dtype)\n else: # shape needs to be inferred at runtime.\n x_shape = array_ops.shape(x)\n select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape),\n True, False)\n m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape),\n x_shape)\n counts = math_ops.cast(\n math_ops.reduce_prod(x_shape / m_shape),\n x.dtype,\n name=\"count\")\n if shift is not None:\n shift = ops.convert_to_tensor(shift, name=\"shift\")\n m_ss = math_ops.sub(x, shift)\n v_ss = math_ops.squared_difference(x, shift)\n else: # no shift.\n m_ss = x\n v_ss = math_ops.square(x)\n m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name=\"mean_ss\")\n v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name=\"var_ss\")\n return counts, m_ss, v_ss, shift\n\n\ndef normalize_moments(counts, mean_ss, variance_ss, shift, name=None):\n \"\"\"Calculate the mean and variance of based on the sufficient statistics.\n\n Args:\n counts: A `Tensor` containing a the total count of the data (one value).\n mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\n shifted) sum of the elements to average over.\n variance_ss: A `Tensor` containing the variance sufficient statistics: the\n (possibly shifted) squared sum of the data to compute the variance over.\n shift: A `Tensor` containing the value by which the data is shifted for\n numerical stability, or `None` if no shift was performed.\n name: Name used to scope the operations that compute the moments.\n\n Returns:\n Two `Tensor` objects: `mean` and `variance`.\n \"\"\"\n with ops.op_scope([counts, mean_ss, variance_ss, shift], name, \"normalize\"):\n divisor = math_ops.inv(counts, name=\"divisor\")\n if shift is not None:\n shifted_mean = math_ops.mul(mean_ss, divisor, name=\"shifted_mean\")\n mean = math_ops.add(shifted_mean, shift, name=\"mean\")\n else: # no shift.\n shifted_mean = math_ops.mul(mean_ss, divisor, name=\"mean\")\n mean = shifted_mean\n variance = math_ops.sub(\n math_ops.mul(variance_ss, divisor),\n math_ops.square(shifted_mean),\n name=\"variance\")\n return (mean, variance)\n\n\ndef moments(x, axes, shift=None, name=None, keep_dims=False):\n \"\"\"Calculate the mean and variance of `x`.\n\n The mean and variance are calculated by aggregating the contents of `x`\n across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean\n and variance of a vector.\n\n When using these moments for batch normalization (see\n `tf.nn.batch_normalization`):\n * for so-called \"global normalization\", used with convolutional filters with\n shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.\n * for simple batch normalization pass `axes=[0]` (batch only).\n\n Args:\n x: A `Tensor`.\n axes: array of ints. Axes along which to compute mean and\n variance.\n shift: A `Tensor` containing the value by which to shift the data for\n numerical stability, or `None` if no shift is to be performed. A shift\n close to the true mean provides the most numerically stable results.\n keep_dims: produce moments with the same dimensionality as the input.\n name: Name used to scope the operations that compute the moments.\n\n Returns:\n Two `Tensor` objects: `mean` and `variance`.\n \"\"\"\n with ops.op_scope([x, axes, shift], name, \"moments\"):\n # The dynamic range of fp16 is too limited to support the collection of\n # sufficient statistics. As a workaround we simply perform the operations\n # on 32-bit floats before converting the mean and variance back to fp16\n y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x\n counts, m_ss, v_ss, shift = sufficient_statistics(y,\n axes,\n shift=shift,\n keep_dims=keep_dims,\n name=name)\n with ops.control_dependencies([counts, m_ss, v_ss]):\n mean, variance = normalize_moments(counts, m_ss, v_ss, shift, name=name)\n if x.dtype == dtypes.float16:\n return (math_ops.cast(mean, dtypes.float16), math_ops.cast(\n variance, dtypes.float16))\n else:\n return (mean, variance)\n\n\ndef batch_normalization(x,\n mean,\n variance,\n offset,\n scale,\n variance_epsilon,\n name=None):\n \"\"\"Batch normalization.\n\n As described in http://arxiv.org/abs/1502.03167.\n Normalizes a tensor by `mean` and `variance`, and applies (optionally) a\n `scale` \\\\\\\\(\\gamma\\\\\\\\) to it, as well as an `offset` \\\\\\\\(\\\\beta\\\\\\\\):\n\n \\\\\\\\(\\\\frac{\\gamma(x-\\mu)}{\\sigma}+\\\\beta\\\\\\\\)\n\n `mean`, `variance`, `offset` and `scale` are all expected to be of one of two\n shapes:\n * In all generality, they can have the same number of dimensions as the\n input `x`, with identical sizes as `x` for the dimensions that are not\n normalized over (the 'depth' dimension(s)), and dimension 1 for the\n others which are being normalized over.\n `mean` and `variance` in this case would typically be the outputs of\n `tf.nn.moments(..., keep_dims=True)` during training, or running averages\n thereof during inference.\n * In the common case where the 'depth' dimension is the last dimension in\n the input tensor `x`, they may be one dimensional tensors of the same\n size as the 'depth' dimension.\n This is the case for example for the common `[batch, depth]` layout of\n fully-connected layers, and `[batch, height, width, depth]` for\n convolutions.\n `mean` and `variance` in this case would typically be the outputs of\n `tf.nn.moments(..., keep_dims=False)` during training, or running averages\n thereof during inference.\n\n Args:\n x: Input `Tensor` of arbitrary dimensionality.\n mean: A mean `Tensor`.\n variance: A variance `Tensor`.\n offset: An offset `Tensor`, often denoted \\\\\\\\(\\\\beta\\\\\\\\) in equations, or\n None. If present, will be added to the normalized tensor.\n scale: A scale `Tensor`, often denoted \\\\\\\\(\\gamma\\\\\\\\) in equations, or\n `None`. If present, the scale is applied to the normalized tensor.\n variance_epsilon: A small float number to avoid dividing by 0.\n name: A name for this operation (optional).\n\n Returns:\n the normalized, scaled, offset tensor.\n \"\"\"\n with ops.op_scope([x, mean, variance, scale, offset], name, \"batchnorm\"):\n inv = math_ops.rsqrt(variance + variance_epsilon)\n if scale is not None:\n inv *= scale\n return x * inv + (\n offset - mean * inv if offset is not None else -mean * inv)\n\n\ndef batch_norm_with_global_normalization(t,\n m,\n v,\n beta,\n gamma,\n variance_epsilon,\n scale_after_normalization,\n name=None):\n \"\"\"Batch normalization.\n\n This op is deprecated. See `tf.nn.batch_normalization`.\n\n Args:\n t: A 4D input Tensor.\n m: A 1D mean Tensor with size matching the last dimension of t.\n This is the first output from tf.nn.moments,\n or a saved moving average thereof.\n v: A 1D variance Tensor with size matching the last dimension of t.\n This is the second output from tf.nn.moments,\n or a saved moving average thereof.\n beta: A 1D beta Tensor with size matching the last dimension of t.\n An offset to be added to the normalized tensor.\n gamma: A 1D gamma Tensor with size matching the last dimension of t.\n If \"scale_after_normalization\" is true, this tensor will be multiplied\n with the normalized tensor.\n variance_epsilon: A small float number to avoid dividing by 0.\n scale_after_normalization: A bool indicating whether the resulted tensor\n needs to be multiplied with gamma.\n name: A name for this operation (optional).\n\n Returns:\n A batch-normalized `t`.\n \"\"\"\n return batch_normalization(t, m, v, beta, gamma if scale_after_normalization\n else None, variance_epsilon, name)\n\n\ndef _sum_rows(x):\n \"\"\"Returns a vector summing up each row of the matrix x.\"\"\"\n # _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is\n # a matrix. The gradient of _sum_rows(x) is more efficient than\n # reduce_sum(x, 1)'s gradient in today's implementation. Therefore,\n # we use _sum_rows(x) in the nce_loss() computation since the loss\n # is mostly used for training.\n cols = array_ops.shape(x)[1]\n ones_shape = array_ops.pack([cols, 1])\n ones = array_ops.ones(ones_shape, x.dtype)\n return array_ops.reshape(math_ops.matmul(x, ones), [-1])\n\n\ndef _compute_sampled_logits(weights, biases, inputs, labels, num_sampled,\n num_classes, num_true=1,\n sampled_values=None,\n subtract_log_q=True,\n remove_accidental_hits=False,\n partition_strategy=\"mod\",\n name=None):\n \"\"\"Helper function for nce_loss and sampled_softmax_loss functions.\n\n Computes sampled output training logits and labels suitable for implementing\n e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see\n sampled_softmax_loss).\n\n Note: In the case where num_true > 1, we assign to each target class\n the target probability 1 / num_true so that the target probabilities\n sum to 1 per-example.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n `[num_classes, dim]`. The (possibly-partitioned) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes. Note that this format differs from\n the `labels` argument of `nn.softmax_cross_entropy_with_logits`.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n subtract_log_q: A `bool`. whether to subtract the log expected count of\n the labels in the sample to get the logits of the true labels.\n Default is True. Turn off for Negative Sampling.\n remove_accidental_hits: A `bool`. whether to remove \"accidental hits\"\n where a sampled class equals one of the target classes. Default is\n False.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `\"div\"` and `\"mod\"` are supported.\n Default is `\"mod\"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n Returns:\n out_logits, out_labels: `Tensor` objects each with shape\n `[batch_size, num_true + num_sampled]`, for passing to either\n `nn.sigmoid_cross_entropy_with_logits` (NCE) or\n `nn.softmax_cross_entropy_with_logits` (sampled softmax).\n \"\"\"\n\n if not isinstance(weights, list):\n weights = [weights]\n\n with ops.op_scope(\n weights + [biases, inputs, labels], name, \"compute_sampled_logits\"):\n if labels.dtype != dtypes.int64:\n labels = math_ops.cast(labels, dtypes.int64)\n labels_flat = array_ops.reshape(labels, [-1])\n\n # Sample the negative labels.\n # sampled shape: [num_sampled] tensor\n # true_expected_count shape = [batch_size, 1] tensor\n # sampled_expected_count shape = [num_sampled] tensor\n if sampled_values is None:\n sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(\n true_classes=labels,\n num_true=num_true,\n num_sampled=num_sampled,\n unique=True,\n range_max=num_classes)\n # NOTE: pylint cannot tell that 'sampled_values' is a sequence\n # pylint: disable=unpacking-non-sequence\n sampled, true_expected_count, sampled_expected_count = sampled_values\n # pylint: enable=unpacking-non-sequence\n\n # labels_flat is a [batch_size * num_true] tensor\n # sampled is a [num_sampled] int tensor\n all_ids = array_ops.concat(0, [labels_flat, sampled])\n\n # weights shape is [num_classes, dim]\n all_w = embedding_ops.embedding_lookup(\n weights, all_ids, partition_strategy=partition_strategy)\n all_b = embedding_ops.embedding_lookup(biases, all_ids)\n # true_w shape is [batch_size * num_true, dim]\n # true_b is a [batch_size * num_true] tensor\n true_w = array_ops.slice(\n all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], -1]))\n true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))\n\n # inputs shape is [batch_size, dim]\n # true_w shape is [batch_size * num_true, dim]\n # row_wise_dots is [batch_size, num_true, dim]\n dim = array_ops.shape(true_w)[1:2]\n new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim])\n row_wise_dots = math_ops.mul(\n array_ops.expand_dims(inputs, 1),\n array_ops.reshape(true_w, new_true_w_shape))\n # We want the row-wise dot plus biases which yields a\n # [batch_size, num_true] tensor of true_logits.\n dots_as_matrix = array_ops.reshape(row_wise_dots,\n array_ops.concat(0, [[-1], dim]))\n true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])\n true_b = array_ops.reshape(true_b, [-1, num_true])\n true_logits += true_b\n\n # Lookup weights and biases for sampled labels.\n # sampled_w shape is [num_sampled, dim]\n # sampled_b is a [num_sampled] float tensor\n sampled_w = array_ops.slice(\n all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])\n sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])\n\n # inputs has shape [batch_size, dim]\n # sampled_w has shape [num_sampled, dim]\n # sampled_b has shape [num_sampled]\n # Apply X*W'+B, which yields [batch_size, num_sampled]\n sampled_logits = math_ops.matmul(inputs,\n sampled_w,\n transpose_b=True) + sampled_b\n\n if remove_accidental_hits:\n acc_hits = candidate_sampling_ops.compute_accidental_hits(\n labels, sampled, num_true=num_true)\n acc_indices, acc_ids, acc_weights = acc_hits\n\n # This is how SparseToDense expects the indices.\n acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])\n acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(\n acc_ids, dtypes.int32), [-1, 1])\n sparse_indices = array_ops.concat(\n 1, [acc_indices_2d, acc_ids_2d_int32], \"sparse_indices\")\n # Create sampled_logits_shape = [batch_size, num_sampled]\n sampled_logits_shape = array_ops.concat(\n 0,\n [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)])\n if sampled_logits.dtype != acc_weights.dtype:\n acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)\n sampled_logits += sparse_ops.sparse_to_dense(\n sparse_indices, sampled_logits_shape, acc_weights,\n default_value=0.0, validate_indices=False)\n\n if subtract_log_q:\n # Subtract log of Q(l), prior probability that l appears in sampled.\n true_logits -= math_ops.log(true_expected_count)\n sampled_logits -= math_ops.log(sampled_expected_count)\n\n # Construct output logits and labels. The true labels/logits start at col 0.\n out_logits = array_ops.concat(1, [true_logits, sampled_logits])\n # true_logits is a float tensor, ones_like(true_logits) is a float tensor\n # of ones. We then divide by num_true to ensure the per-example labels sum\n # to 1.0, i.e. form a proper probability distribution.\n out_labels = array_ops.concat(\n 1, [array_ops.ones_like(true_logits) / num_true,\n array_ops.zeros_like(sampled_logits)])\n\n return out_logits, out_labels\n\n\ndef nce_loss(weights, biases, inputs, labels, num_sampled, num_classes,\n num_true=1,\n sampled_values=None,\n remove_accidental_hits=False,\n partition_strategy=\"mod\",\n name=\"nce_loss\"):\n \"\"\"Computes and returns the noise-contrastive estimation training loss.\n\n See [Noise-contrastive estimation: A new estimation principle for\n unnormalized statistical models]\n (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).\n Also see our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf)\n\n Note: In the case where `num_true` > 1, we assign to each target class\n the target probability 1 / `num_true` so that the target probabilities\n sum to 1 per-example.\n\n Note: It would be useful to allow a variable number of target classes per\n example. We hope to provide this functionality in a future release.\n For now, if you have a variable number of target classes, you can pad them\n out to a constant number by either repeating them or by padding\n with an otherwise unused class.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n [num_classes, dim]. The (possibly-partitioned) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. Whether to remove \"accidental hits\"\n where a sampled class equals one of the target classes. If set to\n `True`, this is a \"Sampled Logistic\" loss instead of NCE, and we are\n learning to generate log-odds instead of log probabilities. See\n our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf).\n Default is False.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `\"div\"` and `\"mod\"` are supported.\n Default is `\"mod\"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n\n Returns:\n A `batch_size` 1-D tensor of per-example NCE losses.\n \"\"\"\n logits, labels = _compute_sampled_logits(\n weights, biases, inputs, labels, num_sampled, num_classes,\n num_true=num_true,\n sampled_values=sampled_values,\n subtract_log_q=True,\n remove_accidental_hits=remove_accidental_hits,\n partition_strategy=partition_strategy,\n name=name)\n sampled_losses = sigmoid_cross_entropy_with_logits(logits,\n labels,\n name=\"sampled_losses\")\n # sampled_losses is batch_size x {true_loss, sampled_losses...}\n # We sum out true and sampled losses.\n return _sum_rows(sampled_losses)\n\n\ndef sampled_softmax_loss(weights, biases, inputs, labels, num_sampled,\n num_classes, num_true=1,\n sampled_values=None,\n remove_accidental_hits=True,\n partition_strategy=\"mod\",\n name=\"sampled_softmax_loss\"):\n \"\"\"Computes and returns the sampled softmax training loss.\n\n This is a faster way to train a softmax classifier over a huge number of\n classes.\n\n This operation is for training only. It is generally an underestimate of\n the full softmax loss.\n\n At inference time, you can compute full softmax probabilities with the\n expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`.\n\n See our [Candidate Sampling Algorithms Reference]\n (../../extras/candidate_sampling.pdf)\n\n Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)\n ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.\n\n Args:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape\n [num_classes, dim]. The (possibly-sharded) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward\n activations of the input network.\n labels: A `Tensor` of type `int64` and shape `[batch_size,\n num_true]`. The target classes. Note that this format differs from\n the `labels` argument of `nn.softmax_cross_entropy_with_logits`.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. whether to remove \"accidental hits\"\n where a sampled class equals one of the target classes. Default is\n True.\n partition_strategy: A string specifying the partitioning strategy, relevant\n if `len(weights) > 1`. Currently `\"div\"` and `\"mod\"` are supported.\n Default is `\"mod\"`. See `tf.nn.embedding_lookup` for more details.\n name: A name for the operation (optional).\n\n Returns:\n A `batch_size` 1-D tensor of per-example sampled softmax losses.\n\n \"\"\"\n logits, labels = _compute_sampled_logits(\n weights, biases, inputs, labels, num_sampled, num_classes,\n num_true=num_true,\n sampled_values=sampled_values,\n subtract_log_q=True,\n remove_accidental_hits=remove_accidental_hits,\n partition_strategy=partition_strategy,\n name=name)\n sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels)\n # sampled_losses is a [batch_size] tensor.\n return sampled_losses\n\n\n# TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn.\n__all__ = make_all(__name__)\n__all__.append(\"zero_fraction\") # documented in training.py\n\n# Modules whitelisted for reference through tf.nn.\n# TODO(cwhipkey): migrate callers to use the submodule directly.\n__all__.extend([\"nn_ops\", \"rnn_cell\", \"seq2seq\"])\n\n# Symbols whitelisted for export without documentation.\n# TODO(cwhipkey): review these and move to contrib or expose through\n# documentation.\n__all__.extend([\n \"all_candidate_sampler\",\n \"batch_norm_with_global_normalization\",\n \"batch_normalization\",\n \"conv2d_backprop_filter\",\n \"conv2d_backprop_input\",\n \"depthwise_conv2d_native\",\n \"lrn\",\n \"relu_layer\",\n \"xw_plus_b\",\n])\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions for downloading and reading MNIST data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\n\nimport numpy\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.contrib.learn.python.learn.datasets import base\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.platform import gfile\n\nSOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'\n\n\ndef _read32(bytestream):\n dt = numpy.dtype(numpy.uint32).newbyteorder('>')\n return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef extract_images(filename):\n \"\"\"Extract the images into a 4D uint8 numpy array [index, y, x, depth].\"\"\"\n print('Extracting', filename)\n with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data\n\n\ndef dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n\ndef extract_labels(filename, one_hot=False, num_classes=10):\n \"\"\"Extract the labels into a 1D uint8 numpy array [index].\"\"\"\n print('Extracting', filename)\n with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels\n\n\nclass DataSet(object):\n\n def __init__(self,\n images,\n labels,\n fake_data=False,\n one_hot=False,\n dtype=dtypes.float32,\n reshape=True):\n \"\"\"Construct a DataSet.\n one_hot arg is used only if fake_data is true. `dtype` can be either\n `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into\n `[0, 1]`.\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError('Invalid image dtype %r, expected uint8 or float32' %\n dtype)\n if fake_data:\n self._num_examples = 10000\n self.one_hot = one_hot\n else:\n assert images.shape[0] == labels.shape[0], (\n 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))\n self._num_examples = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n if reshape:\n assert images.shape[3] == 1\n images = images.reshape(images.shape[0],\n images.shape[1] * images.shape[2])\n if dtype == dtypes.float32:\n # Convert from [0, 255] -> [0.0, 1.0].\n images = images.astype(numpy.float32)\n images = numpy.multiply(images, 1.0 / 255.0)\n self._images = images\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0\n\n @property\n def images(self):\n return self._images\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_examples(self):\n return self._num_examples\n\n @property\n def epochs_completed(self):\n return self._epochs_completed\n\n def next_batch(self, batch_size, fake_data=False):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]\n\n\ndef read_data_sets(train_dir,\n fake_data=False,\n one_hot=False,\n dtype=dtypes.float32,\n reshape=True):\n if fake_data:\n\n def fake():\n return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)\n\n train = fake()\n validation = fake()\n test = fake()\n return base.Datasets(train=train, validation=validation, test=test)\n\n TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'\n TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'\n TEST_IMAGES = 't10k-images-idx3-ubyte.gz'\n TEST_LABELS = 't10k-labels-idx1-ubyte.gz'\n VALIDATION_SIZE = 5000\n\n local_file = base.maybe_download(TRAIN_IMAGES, train_dir,\n SOURCE_URL + TRAIN_IMAGES)\n train_images = extract_images(local_file)\n\n local_file = base.maybe_download(TRAIN_LABELS, train_dir,\n SOURCE_URL + TRAIN_LABELS)\n train_labels = extract_labels(local_file, one_hot=one_hot)\n\n local_file = base.maybe_download(TEST_IMAGES, train_dir,\n SOURCE_URL + TEST_IMAGES)\n test_images = extract_images(local_file)\n\n local_file = base.maybe_download(TEST_LABELS, train_dir,\n SOURCE_URL + TEST_LABELS)\n test_labels = extract_labels(local_file, one_hot=one_hot)\n\n validation_images = train_images[:VALIDATION_SIZE]\n validation_labels = train_labels[:VALIDATION_SIZE]\n train_images = train_images[VALIDATION_SIZE:]\n train_labels = train_labels[VALIDATION_SIZE:]\n\n train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)\n validation = DataSet(validation_images,\n validation_labels,\n dtype=dtype,\n reshape=reshape)\n test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)\n\n return base.Datasets(train=train, validation=validation, test=test)\n\n\ndef load_mnist():\n return read_data_sets('MNIST_data')\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests of the Transform class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib.learn.python import learn\nfrom tensorflow.contrib.learn.python.learn.dataframe.transform import _make_list_of_series\nfrom tensorflow.contrib.learn.python.learn.tests.dataframe import mocks\n\n\nclass TransformTest(tf.test.TestCase):\n \"\"\"Tests of the Transform class.\"\"\"\n\n def test_make_list_of_column(self):\n col1 = mocks.MockSeries(\"foo\", [])\n col2 = mocks.MockSeries(\"bar\", [])\n\n self.assertEqual([], _make_list_of_series(None))\n self.assertEqual([col1], _make_list_of_series(col1))\n self.assertEqual([col1], _make_list_of_series([col1]))\n self.assertEqual([col1, col2], _make_list_of_series([col1, col2]))\n self.assertEqual([col1, col2], _make_list_of_series((col1, col2)))\n\n def test_cache(self):\n z = mocks.MockSeries(\"foobar\", [])\n t = mocks.MockTwoOutputTransform(\"thb\", \"nth\", \"snt\")\n cache = {}\n t.apply_transform([z], cache)\n self.assertEqual(2, len(cache))\n\n expected_keys = [\n \"MockTransform(\"\n \"{'param_one': 'thb', 'param_three': 'snt', 'param_two': 'nth'})\"\n \"(foobar)[out1]\",\n \"MockTransform(\"\n \"{'param_one': 'thb', 'param_three': 'snt', 'param_two': 'nth'})\"\n \"(foobar)[out2]\"]\n\n self.assertEqual(expected_keys, sorted(cache.keys()))\n\n def test_parameters(self):\n t = mocks.MockTwoOutputTransform(\"a\", \"b\", \"c\")\n self.assertEqual({\"param_one\": \"a\", \"param_three\": \"c\", \"param_two\": \"b\"},\n t.parameters())\n\n def test_parameters_inherited_combined(self):\n t = mocks.MockTwoOutputTransform(\"thb\", \"nth\", \"snt\")\n\n expected = {\"param_one\": \"thb\", \"param_two\": \"nth\", \"param_three\": \"snt\"}\n self.assertEqual(expected, t.parameters())\n\n def test_return_type(self):\n t = mocks.MockTwoOutputTransform(\"a\", \"b\", \"c\")\n\n rt = t.return_type\n self.assertEqual(\"ReturnType\", rt.__name__)\n self.assertEqual((\"out1\", \"out2\"), rt._fields)\n\n def test_call(self):\n t = mocks.MockTwoOutputTransform(\"a\", \"b\", \"c\")\n # MockTwoOutputTransform has input valency 1\n input1 = mocks.MockSeries(\"foobar\", [])\n out1, out2 = t([input1]) # pylint: disable=not-callable\n\n self.assertEqual(learn.TransformedSeries, type(out1))\n # self.assertEqual(out1.transform, t)\n # self.assertEqual(out1.output_name, \"output1\")\n\n self.assertEqual(learn.TransformedSeries, type(out2))\n # self.assertEqual(out2.transform, t)\n # self.assertEqual(out2.output_name, \"output2\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"High level operations on graphs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport sys\nimport threading\nimport time\n\nimport numpy as np\n\nfrom six import reraise\n\nfrom tensorflow.contrib.framework.python.ops import ops as contrib_ops\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn import monitors as monitors_lib\nfrom tensorflow.contrib.learn.python.learn.utils import checkpoints\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.client import session as tf_session\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import queue_runner\nfrom tensorflow.python.training import saver as tf_saver\nfrom tensorflow.python.training import session_manager as session_manager_lib\nfrom tensorflow.python.training import summary_io\nfrom tensorflow.python.training import supervisor as tf_supervisor\n\n# Singleton for SummaryWriter per logdir folder.\n_SUMMARY_WRITERS = {}\n\n# Lock protecting _SUMMARY_WRITERS\n_summary_writer_lock = threading.Lock()\n\n\ndef clear_summary_writers():\n \"\"\"Clear cached summary writers. Currently only used for unit tests.\"\"\"\n _summary_writer_lock.acquire()\n _SUMMARY_WRITERS.clear()\n _summary_writer_lock.release()\n\n\ndef get_summary_writer(logdir):\n \"\"\"Returns single SummaryWriter per logdir in current run.\n\n Args:\n logdir: str, folder to write summaries.\n\n Returns:\n Existing `SummaryWriter` object or new one if never wrote to given\n directory.\n \"\"\"\n _summary_writer_lock.acquire()\n if logdir not in _SUMMARY_WRITERS:\n _SUMMARY_WRITERS[logdir] = summary_io.SummaryWriter(\n logdir, graph=ops.get_default_graph())\n _summary_writer_lock.release()\n return _SUMMARY_WRITERS[logdir]\n\n\nclass NanLossDuringTrainingError(RuntimeError):\n\n def __str__(self):\n return 'NaN loss during training.'\n\n\ndef _make_saver(graph, keep_checkpoint_max=5):\n vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)\n if vars_to_save:\n return tf_saver.Saver(vars_to_save,\n sharded=True,\n max_to_keep=keep_checkpoint_max)\n else:\n return None\n\n\ndef _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):\n logging.info('Loading model from checkpoint: %s.', checkpoint_path)\n assert gfile.Glob(checkpoint_path)\n saver = saver or _make_saver(graph)\n if saver:\n saver.restore(session, checkpoint_path)\n else:\n logging.info('No variables found in graph, not creating Saver() object.')\n\n\ndef _run_with_monitors(session, step, tensors, feed_dict, monitors):\n \"\"\"Runs session for given tensors with monitor callbacks.\"\"\"\n for monitor in monitors:\n tensors += monitor.step_begin(step)\n tensors = list(set(tensors))\n\n outputs = session.run(tensors, feed_dict=feed_dict)\n outputs = dict(zip(\n [t.name if isinstance(t, ops.Tensor) else t for t in tensors],\n outputs))\n\n should_stop = False\n for monitor in monitors:\n induce_stop = monitor.step_end(step, outputs)\n should_stop = should_stop or induce_stop\n return outputs, should_stop\n\n\n# TODO(wicke): switch to forced named kwargs\ndef train(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor=None,\n init_op=None,\n init_feed_dict=None,\n init_fn=None,\n log_every_steps=10,\n supervisor_is_chief=True,\n supervisor_master='',\n supervisor_save_model_secs=600,\n keep_checkpoint_max=5,\n supervisor_save_summaries_steps=100,\n feed_fn=None,\n steps=None,\n fail_on_nan_loss=True,\n monitors=None,\n max_steps=None):\n \"\"\"Train a model.\n\n Given `graph`, a directory to write outputs to (`output_dir`), and some ops,\n run a training loop. The given `train_op` performs one step of training on the\n model. The `loss_op` represents the objective function of the training. It is\n expected to increment the `global_step_tensor`, a scalar integer tensor\n counting training steps. This function uses `Supervisor` to initialize the\n graph (from a checkpoint if one is available in `output_dir`), write summaries\n defined in the graph, and write regular checkpoints as defined by\n `supervisor_save_model_secs`.\n\n Training continues until `global_step_tensor` evaluates to `max_steps`, or, if\n `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the\n program is terminated with exit code 1.\n\n Args:\n graph: A graph to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A directory to write outputs to.\n train_op: An op that performs one training step when run.\n loss_op: A scalar loss tensor.\n global_step_tensor: A tensor representing the global step. If none is given,\n one is extracted from the graph using the same logic as in `Supervisor`.\n init_op: An op that initializes the graph. If `None`, use `Supervisor`'s\n default.\n init_feed_dict: A dictionary that maps `Tensor` objects to feed values.\n This feed dictionary will be used when `init_op` is evaluated.\n init_fn: Optional callable passed to Supervisor to initialize the model.\n log_every_steps: Output logs regularly. The logs contain timing data and the\n current loss.\n supervisor_is_chief: Whether the current process is the chief supervisor in\n charge of restoring the model and running standard services.\n supervisor_master: The master string to use when preparing the session.\n supervisor_save_model_secs: Save a checkpoint every\n `supervisor_save_model_secs` seconds when training.\n keep_checkpoint_max: The maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n all checkpoint files are kept. This is simply passed as the max_to_keep\n arg to tf.Saver constructor.\n supervisor_save_summaries_steps: Save summaries every\n `supervisor_save_summaries_steps` seconds when training.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n steps: Trains for this many steps (e.g. current global step + `steps`).\n fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`\n evaluates to `NaN`. If false, continue training as if nothing happened.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n max_steps: Number of total steps for which to train model. If `None`,\n train forever. Two calls fit(steps=100) means 200 training iterations.\n On the other hand two calls of fit(max_steps=100) means, second call\n will not do any iteration since first call did all 100 steps.\n\n Returns:\n The final loss value.\n\n Raises:\n ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`\n is not provided. See `tf.contrib.framework.get_global_step` for how we\n look up the latter if not provided explicitly.\n NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever\n evaluates to `NaN`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n \"\"\"\n while True:\n try:\n return _train_internal(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor,\n init_op,\n init_feed_dict,\n init_fn,\n log_every_steps,\n supervisor_is_chief,\n supervisor_master,\n supervisor_save_model_secs,\n keep_checkpoint_max,\n supervisor_save_summaries_steps,\n feed_fn,\n steps,\n fail_on_nan_loss,\n monitors,\n max_steps)\n except errors.AbortedError:\n # Happens when PS restarts, keep training.\n logging.warning('Training got Aborted error. Keep training.')\n\n\ndef _train_internal(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor,\n init_op,\n init_feed_dict,\n init_fn,\n log_every_steps,\n supervisor_is_chief,\n supervisor_master,\n supervisor_save_model_secs,\n keep_checkpoint_max,\n supervisor_save_summaries_steps,\n feed_fn,\n steps,\n fail_on_nan_loss,\n monitors,\n max_steps):\n \"\"\"See train.\"\"\"\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n if train_op is None:\n raise ValueError('Missing train_op.')\n if loss_op is None:\n raise ValueError('Missing loss_op.')\n\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n if global_step_tensor is None:\n raise ValueError('No \"global_step\" was provided or found in the graph.')\n\n # Get current step.\n try:\n start_step = checkpoints.load_variable(\n output_dir, global_step_tensor.name)\n except (errors.NotFoundError, ValueError):\n start_step = 0\n\n summary_writer = (get_summary_writer(output_dir)\n if supervisor_is_chief else None)\n\n # TODO(ipolosukhin): Replace all functionality of Supervisor with Monitors.\n if not supervisor_is_chief:\n # monitors should run only on the chief.\n monitors = []\n elif not monitors:\n monitors = monitors_lib.get_default_monitors(\n loss_op=loss_op,\n summary_op=logging_ops.get_summary_op(),\n save_summary_steps=supervisor_save_summaries_steps,\n summary_writer=summary_writer)\n\n if max_steps is None:\n max_steps = (start_step + steps) if steps else None\n # Start monitors, can create graph parts.\n for monitor in monitors:\n monitor.begin(max_steps=max_steps)\n\n supervisor = tf_supervisor.Supervisor(\n graph,\n init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,\n init_feed_dict=init_feed_dict,\n is_chief=supervisor_is_chief,\n logdir=output_dir,\n saver=_make_saver(graph, keep_checkpoint_max),\n global_step=global_step_tensor,\n summary_op=None,\n summary_writer=summary_writer,\n save_model_secs=supervisor_save_model_secs,\n init_fn=init_fn)\n session = supervisor.PrepareSession(master=supervisor_master,\n start_standard_services=True)\n supervisor.StartQueueRunners(session)\n\n with session:\n get_current_step = lambda: session.run(global_step_tensor)\n\n start_step = get_current_step()\n last_step = start_step\n last_log_step = start_step\n loss_value = None\n logging.info('Training steps [%d,%s)', last_step, 'inf'\n if max_steps is None else str(max_steps))\n\n excinfo = None\n try:\n while not supervisor.ShouldStop() and (\n (max_steps is None) or (last_step < max_steps)):\n start_time = time.time()\n feed_dict = feed_fn() if feed_fn is not None else None\n\n outputs, should_stop = _run_with_monitors(\n session, last_step + 1, [train_op, loss_op], feed_dict, monitors)\n\n loss_value = outputs[loss_op.name]\n if np.isnan(loss_value):\n failure_message = 'Model diverged with loss = NaN.'\n if fail_on_nan_loss:\n logging.error(failure_message)\n raise NanLossDuringTrainingError()\n else:\n logging.warning(failure_message)\n\n if should_stop:\n break\n\n this_step = get_current_step()\n\n if this_step <= last_step:\n logging.error(\n 'Global step was not incremented by train op at step %s'\n ': new step %d', last_step, this_step)\n\n last_step = this_step\n is_last_step = (max_steps is not None) and (last_step >= max_steps)\n if is_last_step or (last_step - last_log_step >= log_every_steps):\n logging.info(\n 'training step %d, loss = %.5f (%.3f sec/batch).',\n last_step, loss_value, float(time.time() - start_time))\n last_log_step = last_step\n except errors.OutOfRangeError as e:\n logging.warn('Got exception during tf.learn training loop possibly '\n 'due to exhausted input queue %s.', e)\n except StopIteration:\n logging.info('Exhausted input iterarator.')\n except BaseException as e: # pylint: disable=broad-except\n # Hold on to any other exceptions while we try recording a final\n # checkpoint and summary.\n excinfo = sys.exc_info()\n finally:\n try:\n # Call supervisor.Stop() from within a try block because it re-raises\n # exceptions thrown by the supervised threads.\n supervisor.Stop(close_summary_writer=False)\n\n # Save one last checkpoint and summaries\n # TODO(wicke): This should be handled by Supervisor\n\n # In case we encountered an exception in the try block before we updated\n # last_step, update it here (again).\n last_step = get_current_step()\n if supervisor_is_chief:\n ckpt_path = supervisor.save_path\n logging.info('Saving checkpoint for step %d to checkpoint: %s.',\n last_step, ckpt_path)\n supervisor.saver.save(session, ckpt_path, global_step=last_step)\n\n # Finish monitors.\n for monitor in monitors:\n monitor.end()\n\n # catch OutOfRangeError which is thrown when queue is out of data (and for\n # other reasons as well).\n except errors.OutOfRangeError as e:\n logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '\n 'due to exhausted input queue. Note: summary_op is not '\n 'expected to trigger dequeues. %s.', e)\n except BaseException as e: # pylint: disable=broad-except\n # If we don't already have an exception to re-raise, raise this one.\n if not excinfo:\n raise\n # Otherwise, log this one and raise the other in the finally block.\n logging.error('Got exception during tf.learn final checkpoint %s.', e)\n finally:\n if excinfo:\n reraise(*excinfo)\n return loss_value\n\n\ndef _get_first_op_from_collection(collection_name):\n elements = ops.get_collection(collection_name)\n if elements:\n return elements[0]\n return None\n\n\ndef _get_saver():\n \"\"\"Lazy init and return saver.\"\"\"\n saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)\n if saver is None and variables.all_variables():\n saver = tf_saver.Saver()\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver)\n return saver\n\n\ndef _get_ready_op():\n ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)\n if ready_op is None:\n ready_op = variables.report_uninitialized_variables()\n ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)\n return ready_op\n\n\ndef _get_local_init_op():\n local_init_op = _get_first_op_from_collection(\n ops.GraphKeys.LOCAL_INIT_OP)\n if local_init_op is None:\n op_list = [variables.initialize_local_variables(),\n data_flow_ops.initialize_all_tables()]\n if op_list:\n local_init_op = control_flow_ops.group(*op_list)\n ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)\n return local_init_op\n\n\ndef _eval_results_to_str(eval_results):\n return ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())\n\n\ndef _write_summary_results(output_dir, eval_results, current_global_step):\n \"\"\"Writes eval results into summary file in given dir.\"\"\"\n logging.info('Saving evaluation summary for %d step: %s', current_global_step,\n _eval_results_to_str(eval_results))\n summary_writer = get_summary_writer(output_dir)\n summary = summary_pb2.Summary()\n for key in eval_results:\n if eval_results[key] is None:\n continue\n value = summary.value.add()\n value.tag = key\n if (isinstance(eval_results[key], np.float32) or\n isinstance(eval_results[key], float)):\n value.simple_value = float(eval_results[key])\n summary_writer.add_summary(summary, current_global_step)\n summary_writer.flush()\n\n\ndef evaluate(graph,\n output_dir,\n checkpoint_path,\n eval_dict,\n update_op=None,\n global_step_tensor=None,\n supervisor_master='',\n log_every_steps=10,\n feed_fn=None,\n max_steps=None):\n \"\"\"Evaluate a model loaded from a checkpoint.\n\n Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint\n to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval\n loop for `max_steps` steps, or until an exception (generally, an\n end-of-input signal from a reader operation) is raised from running\n `eval_dict`.\n\n In each step of evaluation, all tensors in the `eval_dict` are evaluated, and\n every `log_every_steps` steps, they are logged. At the very end of evaluation,\n a summary is evaluated (finding the summary ops using `Supervisor`'s logic)\n and written to `output_dir`.\n\n Args:\n graph: A `Graph` to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A string containing the directory to write a summary to.\n checkpoint_path: A string containing the path to a checkpoint to restore.\n Can be `None` if the graph doesn't require loading any variables.\n eval_dict: A `dict` mapping string names to tensors to evaluate. It is\n evaluated in every logging step. The result of the final evaluation is\n returned. If `update_op` is None, then it's evaluated in every step. If\n `max_steps` is `None`, this should depend on a reader that will raise an\n end-of-inupt exception when the inputs are exhausted.\n update_op: A `Tensor` which is run in every step.\n global_step_tensor: A `Variable` containing the global step. If `None`,\n one is extracted from the graph using the same logic as in `Supervisor`.\n Used to place eval summaries on training curves.\n supervisor_master: The master string to use when preparing the session.\n log_every_steps: Integer. Output logs every `log_every_steps` evaluation\n steps. The logs contain the `eval_dict` and timing information.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n max_steps: Integer. Evaluate `eval_dict` this many times.\n\n Returns:\n A tuple `(eval_results, global_step)`:\n eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)\n that are the result of running eval_dict in the last step. `None` if no\n eval steps were run.\n global_step: The global step this evaluation corresponds to.\n\n Raises:\n ValueError: if `output_dir` is empty.\n \"\"\"\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n\n # Create or get summary op, global_step and saver.\n saver = _get_saver()\n local_init_op = _get_local_init_op()\n ready_op = _get_ready_op()\n\n session_manager = session_manager_lib.SessionManager(\n local_init_op=local_init_op,\n ready_op=ready_op)\n session, initialized = session_manager.recover_session(\n master=supervisor_master,\n saver=saver,\n checkpoint_dir=checkpoint_path)\n\n # Start queue runners.\n coord = coordinator.Coordinator()\n threads = queue_runner.start_queue_runners(session, coord)\n\n with session:\n if not initialized:\n logging.warning('Failed to initialize from %s.', checkpoint_path)\n # TODO(ipolosukhin): This should be failing, but old code relies on that.\n session.run(variables.initialize_all_variables())\n if checkpoint_path:\n _restore_from_checkpoint(session, graph, checkpoint_path, saver)\n\n current_global_step = session.run(global_step_tensor)\n eval_results = None\n # TODO(amodei): Fix this to run through the eval set exactly once.\n step = 0\n eval_step = None\n feed_dict = None\n logging.info('Eval steps [%d,%s) for training step %d.', step,\n 'inf' if max_steps is None\n else str(max_steps), current_global_step)\n try:\n try:\n while (max_steps is None) or (step < max_steps):\n step += 1\n start_time = time.time()\n feed_dict = feed_fn() if feed_fn is not None else None\n if update_op is not None:\n session.run(update_op, feed_dict=feed_dict)\n else:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n\n # TODO(wicke): We should assert that the global step hasn't changed.\n if step % log_every_steps == 0:\n if eval_step is None or step != eval_step:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n duration = time.time() - start_time\n logging.info('Results after %d steps (%.3f sec/batch): %s.',\n step, float(duration),\n _eval_results_to_str(eval_results))\n finally:\n if eval_results is None or step != eval_step:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n # Stop session first, before queue runners.\n session.close()\n\n # Stop queue runners.\n try:\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=120)\n except (RuntimeError, errors.CancelledError) as e:\n logging.warning('Coordinator didn\\'t stop cleanly: %s', e)\n\n # catch OutOfRangeError which is thrown when queue is out of data (and for\n # other reasons as well).\n except errors.OutOfRangeError as e:\n if max_steps is None:\n logging.info('Input queue is exhausted.')\n else:\n logging.warn('Input queue is exhausted: %s.', e)\n # catch StopIteration which is thrown is DataReader is out of data.\n except StopIteration as e:\n if max_steps is None:\n logging.info('Input iterator is exhausted.')\n else:\n logging.warn('Input iterator is exhausted: %s.', e)\n\n # Save summaries for this evaluation.\n _write_summary_results(output_dir, eval_results, current_global_step)\n\n return eval_results, current_global_step\n\n\ndef run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):\n \"\"\"Run `output_dict` tensors `n` times, with the same `feed_dict` each run.\n\n Args:\n output_dict: A `dict` mapping string names to tensors to run. Must all be\n from the same graph.\n feed_dict: `dict` of input values to feed each run.\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n n: Number of times to repeat.\n\n Returns:\n A list of `n` `dict` objects, each containing values read from `output_dict`\n tensors.\n \"\"\"\n return run_feeds(\n output_dict=output_dict,\n feed_dicts=itertools.repeat(feed_dict, n),\n restore_checkpoint_path=restore_checkpoint_path)\n\n\n# TODO(ptucker): Add save_checkpoint_path.\ndef run_feeds(output_dict, feed_dicts, restore_checkpoint_path=None):\n \"\"\"Run `output_dict` tensors with each input in `feed_dicts`.\n\n If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,\n init all variables.\n\n Args:\n output_dict: A `dict` mapping string names to `Tensor` objects to run.\n Tensors must all be from the same graph.\n feed_dicts: Iterable of `dict` objects of input values to feed.\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n\n Returns:\n A list of dicts of values read from `output_dict` tensors, one item in the\n list for each item in `feed_dicts`. Keys are the same as `output_dict`,\n values are the results read from the corresponding `Tensor` in\n `output_dict`.\n\n Raises:\n ValueError: if `output_dict` or `feed_dicts` is None or empty.\n \"\"\"\n if not output_dict:\n raise ValueError('output_dict is invalid: %s.' % output_dict)\n if not feed_dicts:\n raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)\n\n graph = contrib_ops.get_graph_from_inputs(output_dict.values())\n\n with graph.as_default() as g:\n with tf_session.Session('') as session:\n if restore_checkpoint_path:\n _restore_from_checkpoint(session, g, restore_checkpoint_path)\n else:\n session.run(variables.initialize_all_variables())\n session.run(variables.initialize_local_variables())\n session.run(data_flow_ops.initialize_all_tables())\n coord = coordinator.Coordinator()\n threads = None\n try:\n threads = queue_runner.start_queue_runners(session, coord=coord)\n return [session.run(output_dict, f) for f in feed_dicts]\n finally:\n coord.request_stop()\n if threads:\n coord.join(threads, stop_grace_period_secs=120)\n\n\ndef infer(restore_checkpoint_path, output_dict, feed_dict=None):\n \"\"\"Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.\n\n If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,\n init all variables.\n\n Args:\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n output_dict: A `dict` mapping string names to `Tensor` objects to run.\n Tensors must all be from the same graph.\n feed_dict: `dict` object mapping `Tensor` objects to input values to feed.\n\n Returns:\n Dict of values read from `output_dict` tensors. Keys are the same as\n `output_dict`, values are the results read from the corresponding `Tensor`\n in `output_dict`.\n\n Raises:\n ValueError: if `output_dict` or `feed_dicts` is None or empty.\n \"\"\"\n return run_feeds(output_dict=output_dict,\n feed_dicts=[feed_dict] if feed_dict is not None else [None],\n restore_checkpoint_path=restore_checkpoint_path)[0]\n"
] | [
[
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.contrib.layers.infer_real_valued_columns",
"tensorflow.python.ops.logging_ops.scalar_summary",
"tensorflow.contrib.layers.weighted_sum_from_feature_columns"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.ops.math_ops.rsqrt",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.ops.math_ops.select",
"tensorflow.python.ops.math_ops.inv",
"tensorflow.python.ops.math_ops.sub",
"tensorflow.python.ops.candidate_sampling_ops.log_uniform_candidate_sampler",
"tensorflow.python.ops.candidate_sampling_ops.compute_accidental_hits",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.nn_ops.softmax_cross_entropy_with_logits",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.sparse_ops.sparse_to_dense",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.array_ops.pack",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.nn_ops.depthwise_conv2d_native",
"tensorflow.python.ops.nn_ops.conv2d",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.mul",
"tensorflow.python.ops.math_ops.squared_difference",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.nn_ops.relu",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.all_util.make_all",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.multiply",
"numpy.arange",
"tensorflow.contrib.learn.python.learn.datasets.base.Datasets",
"numpy.dtype",
"numpy.random.shuffle",
"numpy.frombuffer",
"tensorflow.python.framework.dtypes.as_dtype",
"numpy.zeros",
"tensorflow.python.platform.gfile.Open",
"tensorflow.contrib.learn.python.learn.datasets.base.maybe_download"
],
[
"tensorflow.contrib.learn.python.learn.tests.dataframe.mocks.MockSeries",
"tensorflow.contrib.learn.python.learn.tests.dataframe.mocks.MockTwoOutputTransform",
"tensorflow.test.main",
"tensorflow.contrib.learn.python.learn.dataframe.transform._make_list_of_series"
],
[
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.contrib.learn.python.learn.utils.checkpoints.load_variable",
"tensorflow.python.training.queue_runner.start_queue_runners",
"tensorflow.python.ops.logging_ops.get_summary_op",
"tensorflow.python.ops.variables.report_uninitialized_variables",
"tensorflow.contrib.framework.python.ops.variables.assert_or_get_global_step",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.training.session_manager.SessionManager",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.data_flow_ops.initialize_all_tables",
"tensorflow.python.ops.variables.initialize_local_variables",
"tensorflow.python.ops.variables.all_variables",
"numpy.isnan",
"tensorflow.python.training.coordinator.Coordinator",
"tensorflow.python.client.session.Session",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.ops.variables.initialize_all_variables",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.core.framework.summary_pb2.Summary",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.training.saver.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"0.12",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
peri044/TRTorch | [
"62c9830b24552651abbff611515114cbcaca8b7b"
] | [
"py/trtorch/_compile_spec.py"
] | [
"from typing import List, Dict, Any\nimport torch\nimport trtorch._C\nfrom trtorch import _types\n\n\ndef _supported_input_size_type(input_size: Any) -> bool:\n if isinstance(input_size, torch.Size):\n return True\n elif isinstance(input_size, tuple):\n return True\n elif isinstance(input_size, list):\n return True\n else:\n raise TypeError(\n \"Input sizes for inputs are required to be a List, tuple or torch.Size or a Dict of three sizes (min, opt, max), found type: \"\n + str(type(input_size)))\n\n\ndef _parse_input_ranges(input_sizes: List) -> List:\n\n if any(not isinstance(i, dict) and not _supported_input_size_type(i) for i in input_sizes):\n raise KeyError(\"An input size must either be a static size or a range of three sizes (min, opt, max) as Dict\")\n\n parsed_input_sizes = []\n for i in input_sizes:\n if isinstance(i, dict):\n if all(k in i for k in [\"min\", \"opt\", \"min\"]):\n in_range = trtorch._C.InputRange()\n in_range.min = i[\"min\"]\n in_range.opt = i[\"opt\"]\n in_range.max = i[\"max\"]\n parsed_input_sizes.append(in_range)\n\n elif \"opt\" in i:\n in_range = trtorch._C.InputRange()\n in_range.min = i[\"opt\"]\n in_range.opt = i[\"opt\"]\n in_range.max = i[\"opt\"]\n parsed_input_sizes.append(in_range)\n\n else:\n raise KeyError(\n \"An input size must either be a static size or a range of three sizes (min, opt, max) as Dict\")\n\n elif isinstance(i, list):\n in_range = trtorch._C.InputRange()\n in_range.min = i\n in_range.opt = i\n in_range.max = i\n parsed_input_sizes.append(in_range)\n\n elif isinstance(i, tuple):\n in_range = trtorch._C.InputRange()\n in_range.min = list(i)\n in_range.opt = list(i)\n in_range.max = list(i)\n parsed_input_sizes.append(in_range)\n\n return parsed_input_sizes\n\n\ndef _parse_op_precision(precision: Any) -> _types.dtype:\n if isinstance(precision, torch.dtype):\n if precision == torch.int8:\n return _types.dtype.int8\n elif precision == torch.half:\n return _types.dtype.half\n elif precision == torch.float:\n return _types.dtype.float\n else:\n raise TypeError(\"Provided an unsupported dtype as operating precision (support: int8, half, float), got: \" +\n str(precision))\n\n elif isinstance(precision, _types.DataTypes):\n return precision\n\n else:\n raise TypeError(\"Op precision type needs to be specified with a torch.dtype or a trtorch.dtype, got: \" +\n str(type(precision)))\n\n\ndef _parse_device_type(device: Any) -> _types.DeviceType:\n if isinstance(device, torch.device):\n if device.type == 'cuda':\n return _types.DeviceType.gpu\n else:\n ValueError(\"Got a device type other than GPU or DLA (type: \" + str(device.type) + \")\")\n elif isinstance(device, _types.DeviceType):\n return device\n elif isinstance(device, str):\n if device == \"gpu\" or device == \"GPU\":\n return _types.DeviceType.gpu\n elif device == \"dla\" or device == \"DLA\":\n return _types.DeviceType.dla\n else:\n ValueError(\"Got a device type other than GPU or DLA (type: \" + str(device) + \")\")\n else:\n raise TypeError(\"Device specification must be of type torch.device, string or trtorch.DeviceType, but got: \" +\n str(type(device)))\n\n\ndef _parse_compile_spec(compile_spec: Dict[str, Any]) -> trtorch._C.CompileSpec:\n info = trtorch._C.CompileSpec()\n if \"input_shapes\" not in compile_spec:\n raise KeyError(\n \"Input shapes for inputs are required as a List, provided as either a static sizes or a range of three sizes (min, opt, max) as Dict\"\n )\n\n info.input_ranges = _parse_input_ranges(compile_spec[\"input_shapes\"])\n\n if \"op_precision\" in compile_spec:\n info.op_precision = _parse_op_precision(compile_spec[\"op_precision\"])\n\n if \"refit\" in compile_spec:\n assert isinstance(compile_spec[\"refit\"], bool)\n info.refit = compile_spec[\"refit\"]\n\n if \"debug\" in compile_spec:\n assert isinstance(compile_spec[\"debug\"], bool)\n info.debug = compile_spec[\"debug\"]\n\n if \"strict_types\" in compile_spec:\n assert isinstance(compile_spec[\"strict_types\"], bool)\n info.strict_types = compile_spec[\"strict_types\"]\n\n if \"allow_gpu_fallback\" in compile_spec:\n assert isinstance(compile_spec[\"allow_gpu_fallback\"], bool)\n info.allow_gpu_fallback = compile_spec[\"allow_gpu_fallback\"]\n\n if \"device_type\" in compile_spec:\n info.device = _parse_device_type(compile_spec[\"device_type\"])\n\n if \"capability\" in compile_spec:\n assert isinstance(compile_spec[\"capability\"], _types.EngineCapability)\n info.capability = compile_spec[\"capability\"]\n\n if \"num_min_timing_iters\" in compile_spec:\n assert type(compile_spec[\"num_min_timing_iters\"]) is int\n info.num_min_timing_iters = compile_spec[\"num_min_timing_iters\"]\n\n if \"num_avg_timing_iters\" in compile_spec:\n assert type(compile_spec[\"num_avg_timing_iters\"]) is int\n info.num_avg_timing_iters = compile_spec[\"num_avg_timing_iters\"]\n\n if \"workspace_size\" in compile_spec:\n assert type(compile_spec[\"workspace_size\"]) is int\n info.workspace_size = compile_spec[\"workspace_size\"]\n\n if \"max_batch_size\" in compile_spec:\n assert type(compile_spec[\"max_batch_size\"]) is int\n info.max_batch_size = compile_spec[\"max_batch_size\"]\n\n return info\n\n\ndef TensorRTCompileSpec(compile_spec: Dict[str, Any]):\n \"\"\"\n Utility to create a formated spec dictionary for using the PyTorch TensorRT backend\n\n Args:\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional. Entries for each method to be compiled.\n\n .. code-block:: py\n\n CompileSpec = {\n \"forward\" : trtorch.TensorRTCompileSpec({\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": False, # enable refit\n \"debug\": False, # enable debuggable engine\n \"strict_types\": False, # kernels should strictly run in operating precision\n \"allow_gpu_fallback\": True, # (DLA only) Allow layers unsupported on DLA to run on GPU\n \"device\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n })\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.classes.tensorrt.CompileSpec: List of methods and formated spec objects to be provided to ``torch._C._jit_to_tensorrt``\n \"\"\"\n\n parsed_spec = _parse_compile_spec(compile_spec)\n\n backend_spec = torch.classes.tensorrt.CompileSpec()\n\n for i in parsed_spec.input_ranges:\n ir = torch.classes.tensorrt.InputRange()\n ir.set_min(i.min)\n ir.set_opt(i.opt)\n ir.set_max(i.max)\n backend_spec.append_input_range(ir)\n\n backend_spec.set_op_precision(int(parsed_spec.op_precision))\n backend_spec.set_refit(parsed_spec.refit)\n backend_spec.set_debug(parsed_spec.debug)\n backend_spec.set_refit(parsed_spec.refit)\n backend_spec.set_strict_types(parsed_spec.strict_types)\n backend_spec.set_allow_gpu_fallback(parsed_spec.allow_gpu_fallback)\n backend_spec.set_device(int(parsed_spec.device))\n backend_spec.set_capability(int(parsed_spec.capability))\n backend_spec.set_num_min_timing_iters(parsed_spec.num_min_timing_iters)\n backend_spec.set_num_avg_timing_iters(parsed_spec.num_avg_timing_iters)\n backend_spec.set_workspace_size(parsed_spec.workspace_size)\n backend_spec.set_max_batch_size(parsed_spec.max_batch_size)\n\n return backend_spec\n"
] | [
[
"torch.classes.tensorrt.InputRange",
"torch.classes.tensorrt.CompileSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brnor/dipl | [
"db516610aecffb10825e899fb5aa9f2902093b6e"
] | [
"gym_puyopuyo/test-feedforward-smallenv.py"
] | [
"from __future__ import print_function\n\nimport os\nimport pickle\nimport time\n\nfrom gym_puyopuyo import register\nimport gym\nimport numpy as np\n\nimport neat\nimport visualize\n\npiece_shape = (3, 2)\nDRAW_NETS = False\nNUM_COLORS = 3.0 # 3 colors in the small env mode\n# TODO: could probably read color number from observation data\nfn_results = \"feedforward-small\"\n\ndef multiplyMatrices(pieces, field, norm = True):\n pieces = pieces.astype(np.float64)\n field = field.astype(np.float64)\n pieces_sum = np.zeros(piece_shape)\n field_sum = np.zeros(field[0].shape)\n for i in range(0, len(pieces)):\n pieces[i] = np.multiply(pieces[i], i + 1)\n if(norm):\n pieces[i] /= NUM_COLORS\n pieces_sum += pieces[i]\n for i in range(0, len(field)):\n field[i] = np.multiply(field[i], i + 1)\n if(norm):\n field[i] /= NUM_COLORS\n field_sum += field[i]\n \n return pieces_sum, field_sum\n\ndef run():\n with open(\"results/winner-pickle-\"+fn_results, 'rb') as f:\n c = pickle.load(f)\n \n print('loaded genome:')\n print(c)\n\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward-small')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_path)\n\n net = neat.nn.FeedForwardNetwork.create(c, config)\n register()\n env = gym.make(\"PuyoPuyoEndlessSmall-v2\")\n done = False\n ob = env.reset()\n count = 0\n total_reward = 0\n\n while True:\n env.render()\n #input()\n time.sleep(0.5)\n pieces_sum, field_sum = multiplyMatrices(ob[0], ob[1])\n next_piece = pieces_sum[0]\n \n inp_piece = np.ndarray.flatten(next_piece)\n inp_field = np.ndarray.flatten(field_sum)\n inputs = np.hstack([inp_piece, inp_field])\n \n nn_output = net.activate(inputs)\n action = np.argmax(nn_output)\n #print(nn_output)\n #nn_output = int(round(nn_output[0] * NUM_ACTIONS))\n #print(nn_output)\n #input()\n \n ob, rew, done, info = env.step(action)\n \n total_reward += rew\n count += 1\n \n if done:\n break\n\n print(\"Game played for \", count, \" turns.\")\n print(\"Total score: \", total_reward)\n\n if DRAW_NETS:\n visualize.draw_net(config, c, view=True, \n filename=\"results/winner-\"+fn_results+\".net\")\n \n visualize.draw_net(config, c, view=True, \n filename=\"results/winner-\"+fn_results+\"-enabled.net\",\n show_disabled=False)\n \n visualize.draw_net(config, c, view=True, \n filename=\"results/winner-\"+fn_results+\"-pruned.net\",\n show_disabled=False, prune_unused=True)\n\nif __name__ == '__main__':\n run()\n"
] | [
[
"numpy.hstack",
"numpy.multiply",
"numpy.ndarray.flatten",
"numpy.argmax",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kumi123/pytorch-learning | [
"29f5b4d53f4e72b95b3fab979b1bc496ef23674c"
] | [
"chapter9_Computer-Vision/Deep-Dream/util.py"
] | [
"import PIL.Image\nfrom io import BytesIO\nfrom IPython.display import clear_output, Image, display\nimport numpy as np\n\n\ndef showarray(a, fmt='jpeg'):\n a = np.uint8(np.clip(a, 0, 255))\n f = BytesIO()\n PIL.Image.fromarray(a).save(f, fmt)\n display(Image(data=f.getvalue()))\n\n\ndef showtensor(a):\n mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])\n std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])\n inp = a[0, :, :, :]\n inp = inp.transpose(1, 2, 0)\n inp = std * inp + mean\n inp *= 255\n showarray(inp)\n clear_output(wait=True)\n"
] | [
[
"numpy.array",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TopCoder2K/mdetr | [
"aedfd63f550ae36d1477484c489a2aa438d10aa3"
] | [
"datasets/vqa_v2.py"
] | [
"# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved\n\"\"\"\nCOCO dataset which returns image_id for evaluation.\n\nMostly copy-paste from https://github.com/ashkamath/mdetr/blob/main/datasets/gqa.py\n\"\"\"\nimport json\nfrom pathlib import Path\n\nimport torch\nimport torchvision\nfrom transformers import RobertaTokenizerFast\n\nfrom .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms\n\nclass VQAv2Detection(ModulatedDetection):\n pass\n\nclass VQAv2QuestionAnswering(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):\n super(VQAv2QuestionAnswering, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)\n with open(ann_folder / \"vqa2_answer2id.json\", \"r\") as f:\n self.answer2id = json.load(f)\n with open(ann_folder / \"vqa2_answer2id_by_type.json\", \"r\") as f:\n self.answer2id_by_type = json.load(f)\n self.type2id = {\"yes/no\": 0, \"number\": 1, \"other\": 2}\n\n def __getitem__(self, idx):\n img, target = super(VQAv2QuestionAnswering, self).__getitem__(idx)\n image_id = self.ids[idx]\n coco_img = self.coco.loadImgs(image_id)[0]\n caption = coco_img[\"caption\"]\n dataset_name = coco_img[\"dataset_name\"]\n questionId = coco_img[\"questionId\"]\n target = {\"image_id\": image_id, \"annotations\": target, \"caption\": caption}\n img, target = self.prepare(img, target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n target[\"dataset_name\"] = dataset_name\n target[\"questionId\"] = questionId\n\n if coco_img[\"answer\"] not in self.answer2id:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n\n target[\"answer\"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)\n target[\"answer_type\"] = torch.as_tensor(self.type2id[coco_img[\"answer_type\"]], dtype=torch.long)\n\n # util.misc.collate_fn requires to put 'answer' before every type of answer in target\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"yes/no\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_yes/no\"] = torch.as_tensor(\n self.answer2id_by_type[\"yes/no\"][answer] if coco_img[\"answer_type\"] == \"yes/no\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"number\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_number\"] = torch.as_tensor(\n self.answer2id_by_type[\"number\"][answer] if coco_img[\"answer_type\"] == \"number\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"other\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_other\"] = torch.as_tensor(\n self.answer2id_by_type[\"other\"][answer] if coco_img[\"answer_type\"] == \"other\" else -100,\n dtype=torch.long,\n )\n\n return img, target\n\n\ndef build(image_set, args):\n # TODO: img or all?\n img_dir = Path(args.coco_img_path)\n assert img_dir.exists(), f\"provided COCO img path {img_dir} does not exist\"\n\n tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)\n\n if args.do_qa:\n # Для vqa2 это не нужно:\n # assert args.vqa2_split_type is not None\n\n if image_set == \"train\":\n datasets = []\n for imset in [\"train\", \"minival\"]:\n ann_file = Path(args.vqa2_ann_path) / f\"finetune_vqa2_{imset}.json\"\n\n datasets.append(\n VQAv2QuestionAnswering(\n img_dir / \"train2014\" if imset == \"train\" else img_dir / \"val2014\",\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.vqa2_ann_path),\n )\n )\n\n return torch.utils.data.ConcatDataset(datasets)\n elif image_set == \"val\":\n # TODO: правильный ли ann_file?\n ann_file = Path(args.vqa2_ann_path) / f\"finetune_vqa2_minival.json\"\n\n return VQAv2QuestionAnswering(\n img_dir / \"val2014\",\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.vqa2_ann_path),\n )\n elif image_set in [\"test\", \"testdev\", \"trainval\"]:\n ann_file = Path(args.vqa2_ann_path) / f\"finetune_vqa2_{image_set}.json\"\n\n return VQAv2QuestionAnswering(\n img_dir / \"test2015\",\n ann_file,\n transforms=make_coco_transforms(\"val\", cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.vqa2_ann_path),\n )\n\n else:\n assert False, f\"Unknown image set {image_set}\"\n"
] | [
[
"torch.utils.data.ConcatDataset",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imanolperez/optimal-double-execution | [
"b380087765925043b01fe2f1066e5e2d1d850cf9"
] | [
"src/data/gbm.py"
] | [
"import numpy as np\nfrom .base import Price\n\nclass GBM(Price):\n \"\"\"Brownian motion.\"\"\"\n\n def __init__(self, T=1., sigma1=0.02, sigma2=0.01, s1=1., s2=1.,\n drift1=0., drift2=0., n=100):\n self.sigma1 = sigma1\n self.sigma2 = sigma2\n self.drift1 = drift1\n self.drift2 = drift2\n self.n = n\n self.s1 = s1\n self.s2 = s2\n self.T = T\n\n def generate(self):\n dt1 = self.sigma1 ** 2 * self.T / self.n\n dt2 = self.sigma2 ** 2 * self.T / self.n\n\n bm1 = np.r_[[0.], np.sqrt(dt1) * np.random.randn(self.n - 1).cumsum()]\n bm2 = np.r_[[0.], np.sqrt(dt2) * np.random.randn(self.n - 1).cumsum()]\n\n path = np.c_[np.linspace(0, self.T, self.n), bm1, bm2]\n path[:, 1] = np.exp((self.drift1 - self.sigma1 ** 2 / 2.) * path[:, 0] + self.sigma1 * path[:, 1])\n path[:, 2] = np.exp((self.drift2 - self.sigma2 ** 2 / 2.) * path[:, 0] + self.sigma2 * path[:, 2])\n\n path[:, 1] *= self.s1\n path[:, 2] *= self.s2\n\n\n return path\n"
] | [
[
"numpy.exp",
"numpy.random.randn",
"numpy.sqrt",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zarppy/MUREIL_2014 | [
"25ba16554ce8f614b9337e0fffce75da3fa259a4"
] | [
"generator/txmultigeneratormultisite.py"
] | [
"#\r\n#\r\n# Copyright (C) University of Melbourne 2013\r\n#\r\n#\r\n#\r\n#Permission is hereby granted, free of charge, to any person obtaining a copy\r\n#of this software and associated documentation files (the \"Software\"), to deal\r\n#in the Software without restriction, including without limitation the rights\r\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n#copies of the Software, and to permit persons to whom the Software is\r\n#furnished to do so, subject to the following conditions:\r\n#\r\n#The above copyright notice and this permission notice shall be included in all\r\n#copies or substantial portions of the Software.\r\n#\r\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n#SOFTWARE.\r\n#\r\n#\r\n\r\n\"\"\"Module subclassing TxMultiGeneratorBase that provides an implementation for\r\nmulti-site generators. \r\n\"\"\"\r\n\r\nfrom tools import mureilexception, mureilbuilder\r\nimport copy\r\nimport numpy\r\nfrom generator import txmultigeneratorbase\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass TxMultiGeneratorMultiSite(txmultigeneratorbase.TxMultiGeneratorBase):\r\n \"\"\"Module subclassing TxMultiGeneratorBase that provides an implementation of\r\n state_handle and related handling functions for multi-site generators. \r\n \r\n The 'capacity' term in state_handle is implemented as a dict with one item per site. \r\n Each site item is a list of tuples containing (site_index,build_period,decommissioning_period),\r\n describing the set of installed capacity. \r\n \"\"\"\r\n \r\n def __init__(self):\r\n \"\"\"Initialise as for the base class, and also initialise the params_to_site map.\r\n \"\"\"\r\n \r\n txmultigeneratorbase.TxMultiGeneratorBase.__init__(self)\r\n\r\n # params_to_site maps the index in the params list to the site indices.\r\n self.params_to_site = []\r\n \r\n\r\n def get_config_spec(self):\r\n \"\"\"Return a list of tuples of format (name, conversion function, default),\r\n e.g. ('capex', float, 2.0). Put None if no conversion required, or if no\r\n default value, e.g. ('name', None, None)\r\n\r\n Configuration:\r\n time_period_yrs: float - the length of the time period in years\r\n time_scale_up_mult: float - the value to multiply non-discounted items,\r\n such as carbon emissions, by to account for a shorter dataset than the\r\n calculation period length.\r\n variable_cost_mult: as for time_scale_up_mult, but may include a factor for\r\n cost discounting.\r\n\r\n size: float, optional - relates param to new capacity\r\n\r\n carbon_price_m: float - carbon price in $M/tonne\r\n \r\n startup_data_name: string, optional - the name of the data array that contains\r\n data on startup capacities.\r\n startup_data_string: string, optional - a python format data array suitable for \r\n input into set_startup_state, all on a single line.\r\n\r\n params_to_site_data_name: string, optional - the name of the data array that\r\n contains a list of how the input params list maps to site indices.\r\n params_to_site_data_string: list of integers, optional - the site indices, \r\n listed separated by spaces, defining the site index corresponding to \r\n each optimisation param, in order.\r\n\r\n vom: float, default 0 - variable operating and maintenance cost, in $/MWh, same for all sites\r\n\r\n capital_cost: float, default 0 - cost in $M per MW for new capacity.\r\n install_cost: float, default 0 - cost in $M per site, when site has an\r\n installation from this generator for the first time.\r\n\r\n decommissioning_cost: float, optional (default 0) - cost in $M per MW for \r\n decommissioning.\r\n lifetime_yrs: float, default 20 - the time in years that new capacity lasts\r\n \"\"\"\r\n return txmultigeneratorbase.TxMultiGeneratorBase.get_config_spec(self) + [\r\n ('variable_cost_mult', float, 1.0),\r\n ('time_scale_up_mult', float, 1.0),\r\n ('carbon_price_m', float, 0.0),\r\n ('startup_data_name', None, ''),\r\n ('startup_data_string', mureilbuilder.python_eval, 'None'),\r\n ('params_to_site_data_name', None, ''),\r\n ('params_to_site_data_string', mureilbuilder.make_int_list, ''),\r\n ('decommissioning_cost', float, 0),\r\n ('vom', float, 0),\r\n ('capital_cost', float, 0),\r\n ('install_cost', float, 0),\r\n ('time_period_yrs', float, None),\r\n ('lifetime_yrs', float, 20),\r\n ('size', float, 1.0),\r\n ('start_min_param', int, 1e20),\r\n ('start_max_param', int, 1e20),\r\n ('timestep_hrs', float, None)\r\n ]\r\n\r\n\r\n def complete_configuration_pre_expand(self):\r\n \"\"\"Complete the configuration prior to expanding the\r\n period configs. \r\n \r\n This implementation checks that the lifetime_yrs is a multiple\r\n of time_period_yrs, and sets the startup state and params_to_site from the\r\n configuration strings.\r\n \"\"\"\r\n \r\n time_period_yrs = self.config['time_period_yrs']\r\n lifetime_yrs = self.config['lifetime_yrs']\r\n error = None\r\n if isinstance(lifetime_yrs, dict):\r\n for value in lifetime_yrs.itervalues():\r\n div = value / time_period_yrs\r\n if not (float(int(div)) == div):\r\n error = value\r\n else:\r\n div = lifetime_yrs / time_period_yrs\r\n if not (float(int(div)) == div):\r\n error = lifetime_yrs\r\n \r\n if error is not None:\r\n msg = ('In section ' + self.config['section'] + ', lifetime_yrs = ' +\r\n str(error) + ' which is required to be a multiple of time_period_yrs of ' +\r\n str(time_period_yrs))\r\n raise mureilexception.ConfigException(msg, {})\r\n\r\n # Set the startup state and the params to site from the configuration strings.\r\n if self.config['startup_data_string'] is not None:\r\n self.set_startup_state(self.config['startup_data_string'])\r\n \r\n if len(self.config['params_to_site_data_string']) > 0:\r\n self.params_to_site = self.config['params_to_site_data_string']\r\n \r\n\r\n def get_data_types(self):\r\n \"\"\"Return a list of keys for each type of\r\n data required, for example ts_wind, ts_demand.\r\n \r\n Outputs:\r\n data_type: list of strings - each a key name \r\n describing the data required for this generator.\r\n \"\"\"\r\n \r\n data_types = []\r\n \r\n if len(self.config['startup_data_name']) > 0:\r\n data_types.append(self.config['startup_data_name'])\r\n\r\n if len(self.config['params_to_site_data_name']) > 0:\r\n data_types.append(self.config['params_to_site_data_name'])\r\n \r\n return data_types\r\n \r\n \r\n def set_data(self, data):\r\n \"\"\"Set the data dict with the data series required\r\n for the generator.\r\n\r\n This implementation looks for the data types:\r\n self.config['startup_data_name']: Interpets this into\r\n the startup state, using the set_startup_state function.\r\n\r\n self.config['params_to_site_data_name']: Sets self.params_to_site\r\n to this.\r\n \r\n Inputs:\r\n data: dict - with keys matching those requested by\r\n get_data_types. \r\n \"\"\"\r\n startup_data_name = self.config['startup_data_name']\r\n if (len(startup_data_name) > 0) and (startup_data_name in data):\r\n self.set_startup_state(data[startup_data_name])\r\n\r\n params_to_site_name = self.config['params_to_site_data_name']\r\n if (len(params_to_site_name) > 0) and (params_to_site_name in data):\r\n self.params_to_site = data[params_to_site_name]\r\n\r\n \r\n def set_startup_state(self, startup_data):\r\n \"\"\"Set the startup state from the data provided. Sets \r\n self.startup_state from this.\r\n \r\n Inputs:\r\n startup_data: An array of generators * 4:\r\n [[site_index, capacity, build_date, decommissioning_period],\r\n ...]\r\n \"\"\"\r\n\r\n # Check if the startup data is empty. If so, just return.\r\n if len(startup_data) == 0:\r\n return\r\n\r\n # Find out which build periods are covered.\r\n startup_data = numpy.array(startup_data)\r\n if not (len(startup_data.shape) == 2):\r\n raise mureilexception.ConfigException('startup data array for module ' +\r\n self.config['section'] + ' is not rectangular.', {})\r\n \r\n if not (startup_data.shape[1] == 4):\r\n raise mureilexception.ConfigException('startup data array for module ' +\r\n self.config['section'] + ' shape ' + str(startup_data.shape) + \r\n ' but (n, 4) is required.', {})\r\n\r\n self.extra_periods = map(int, \r\n (list(set(startup_data[:,2].tolist() + self.extra_periods))))\r\n self.extra_periods.sort()\r\n\r\n # And insert each existing generator into the starting state.\r\n cap_list = self.startup_state['capacity']\r\n hist_list = self.startup_state['history']\r\n\r\n for i in range(startup_data.shape[0]):\r\n site_index = int(startup_data[i, 0])\r\n new_cap = startup_data[i, 1]\r\n period = int(startup_data[i, 2])\r\n decomm_date = int(startup_data[i, 3])\r\n\r\n new_entry = (new_cap, period, decomm_date)\r\n if decomm_date < self.run_periods[0]:\r\n logger.warning('Model in section ' + self.config['section'] +\r\n ' adds startup capacity decommissioned at end of ' + decomm_date +\r\n ' but the first run period is ' + self.run_periods[0] + \r\n ' so it has been removed from the startup state.')\r\n if site_index not in hist_list:\r\n hist_list[site_index] = []\r\n hist_list[site_index].append(new_entry)\r\n else:\r\n new_entry = (new_cap, period, decomm_date)\r\n\r\n if site_index not in cap_list:\r\n cap_list[site_index] = []\r\n cap_list[site_index].append(new_entry)\r\n\r\n\r\n def get_param_count(self):\r\n \"\"\"Return the number of parameters that this generator,\r\n as configured, requires to be optimised, per time period.\r\n \r\n Outputs:\r\n param_count: non-negative integer - the number of\r\n parameters required per time period.\r\n \"\"\"\r\n\r\n return len(self.params_to_site)\r\n \r\n \r\n def get_param_starts(self):\r\n \"\"\"Return two nested lists - one for min, one max, for starting values for the\r\n params. Must be either [[]] or [len(run_periods),param_count].\r\n \r\n Outputs:\r\n min_start_list: list of param integers, or [[]]\r\n max_start_list: list of param integers, or [[]]\r\n \"\"\"\r\n \r\n param_count = self.get_param_count()\r\n period_count = len(self.run_periods)\r\n \r\n if param_count > 0:\r\n if (self.config['start_min_param'] == 1e20):\r\n start_mins = [[]]\r\n else:\r\n start_mins = (numpy.ones((period_count, param_count)) * self.config['start_min_param']).tolist() \r\n\r\n if (self.config['start_max_param'] == 1e20):\r\n start_maxs = [[]]\r\n else:\r\n start_maxs = (numpy.ones((period_count, param_count)) * self.config['start_max_param']).tolist() \r\n else:\r\n start_mins = [[]]\r\n start_maxs = [[]]\r\n \r\n return start_mins, start_maxs\r\n \r\n \r\n def update_state_new_period_list(self, state_handle, period, new_capacity):\r\n \"\"\"Implements update_state_new_period_list as defined in txmultigeneratorbase,\r\n for the state_handle format for this multi-site implementation.\r\n \"\"\"\r\n\r\n state_handle['curr_period'] = period\r\n\r\n cap_list = state_handle['capacity'] \r\n\r\n for site_index, new_cap, decomm_date in new_capacity:\r\n site_index = int(site_index)\r\n \r\n new_entry = (new_cap, period, int(decomm_date))\r\n\r\n if site_index not in cap_list:\r\n cap_list[site_index] = []\r\n\r\n cap_list[site_index].append(new_entry)\r\n\r\n return None\r\n\r\n\r\n def update_state_new_period_params(self, state_handle, period, new_params):\r\n \"\"\"Implements update_state_new_period_params as defined in txmultigeneratorbase,\r\n for the state_handle format for this multi-site implementation.\r\n \r\n Filters any negative new_params values to 0.\r\n \"\"\"\r\n \r\n state_handle['curr_period'] = period\r\n curr_conf = self.period_configs[period]\r\n decomm_date = int(curr_conf['lifetime_yrs'] - curr_conf['time_period_yrs'] + period)\r\n \r\n cap_list = state_handle['capacity'] \r\n\r\n new_cap = numpy.array(new_params).clip(0) * curr_conf['size']\r\n\r\n for i in (numpy.nonzero(new_cap)[0]):\r\n site_index = self.params_to_site[i]\r\n new_entry = (new_cap[i], period, decomm_date)\r\n\r\n if site_index not in cap_list:\r\n cap_list[site_index] = []\r\n\r\n cap_list[site_index].append(new_entry)\r\n\r\n return None\r\n \r\n \r\n def calculate_update_decommission(self, state_handle):\r\n \"\"\"Implements update_decommission as defined in txmultigeneratorbase,\r\n for the state_handle format for this multi-site implementation.\r\n \"\"\"\r\n period = state_handle['curr_period']\r\n cap_list = state_handle['capacity']\r\n hist_list = state_handle['history']\r\n \r\n total_cost = 0.0\r\n sites = []\r\n cost = []\r\n decommissioned = []\r\n fully_decommissioned = []\r\n \r\n decomm_cost = self.period_configs[period]['decommissioning_cost']\r\n\r\n for site, site_caps in cap_list.iteritems():\r\n \r\n decomm = [tup for tup in site_caps if (tup[2] == period)]\r\n\r\n if len(decomm) > 0:\r\n sites.append(site)\r\n decom_cap = sum([tup[0] for tup in decomm])\r\n decommissioned.append(decom_cap)\r\n this_cost = decom_cap * decomm_cost\r\n cost.append(this_cost)\r\n total_cost += this_cost\r\n\r\n # add the decommissioned capacity to the 'history' list\r\n if not site in hist_list:\r\n hist_list[site] = []\r\n hist_list[site] += decomm\r\n \r\n # and rebuild the list of what's left\r\n # note that the expression in here is the complement of that to compute\r\n # decomm above.\r\n new_list = [tup for tup in site_caps if not (tup[2] == period)]\r\n \r\n # if all capacity is gone from this site\r\n if len(new_list) == 0:\r\n fully_decommissioned.append(site)\r\n else:\r\n cap_list[site] = new_list\r\n \r\n for site in fully_decommissioned:\r\n del cap_list[site]\r\n \r\n return total_cost, zip(sites, decommissioned, cost)\r\n \r\n \r\n def calculate_new_capacity_cost(self, state_handle):\r\n \"\"\"Implements calculate_new_capacity_cost as defined in TxMultiGeneratorBase,\r\n for the state_handle format for this multi-site implementation. Calculates\r\n the cost as a simple multiple of the new capacity size.\r\n \"\"\"\r\n \r\n period = state_handle['curr_period']\r\n cap_list = state_handle['capacity']\r\n hist_list = state_handle['history']\r\n \r\n total_cost = 0.0\r\n sites = []\r\n cost = []\r\n new_capacity = []\r\n \r\n for site, value in cap_list.iteritems():\r\n try:\r\n hist = hist_list[site]\r\n except KeyError:\r\n hist = []\r\n\r\n this_cost, new_cap = self.calculate_capital_cost_site(\r\n (value, hist), period, site)\r\n\r\n if new_cap > 0:\r\n sites.append(site)\r\n new_capacity.append(new_cap)\r\n cost.append(this_cost)\r\n total_cost += this_cost\r\n \r\n return total_cost, zip(sites, new_capacity, cost)\r\n\r\n \r\n def calculate_capital_cost_site(self, site_data, period, site):\r\n \"\"\"\"Calculate the incremental capital cost incurred in this \r\n period by the new capacity, for this site.\r\n \r\n This is a useful function for generators to override to implement\r\n cost functions that depend on the existing installed capacity. \r\n\r\n This function charges a per-MW cost plus an install figure if all\r\n the current capacity is new, and the site has not been used before\r\n for this type of generator.\r\n \r\n Inputs: \r\n site_data: a pair of lists - (current_capacity, history), each \r\n a list of tuples of (capacity, build, decom) from the\r\n state_handle.\r\n period: the current period, an integer\r\n site: the site index\r\n \r\n Outputs:\r\n cost: the cost in $M of this new capacity\r\n new_capacity: the total new capacity installed at this site\r\n \"\"\"\r\n \r\n new_cap_list = [tup[0] for tup in site_data[0] if (tup[1] == period)] \r\n new_cap = sum(new_cap_list)\r\n\r\n capacity_cost = self.period_configs[period]['capital_cost']\r\n this_cost = new_cap * capacity_cost\r\n\r\n install_cost = self.period_configs[period]['install_cost']\r\n if install_cost > 0:\r\n # check if all the current capacity is new\r\n if len(new_cap_list) == len(site_data[0]):\r\n # and check if the site has been used before, ever\r\n if len(site_data[1]) == 0:\r\n # the site is new, so charge the 'install' as well\r\n this_cost += install_cost\r\n \r\n return this_cost, new_cap \r\n \r\n \r\n def get_capacity(self, state_handle):\r\n \"\"\"Implement the get_capacity function as defined in TxMultiGeneratorBase, for this\r\n multi-site implementation.\r\n \"\"\"\r\n\r\n index_list = self.get_site_indices(state_handle)\r\n cap_list = state_handle['capacity']\r\n \r\n capacity = []\r\n\r\n for site in index_list:\r\n capacity.append(sum([tup[0] for tup in cap_list[site]]))\r\n \r\n return capacity\r\n\r\n \r\n def get_site_indices(self, state_handle):\r\n \"\"\"Implement the get_site_indices function as defined in TxMultiGeneratorBase, for this\r\n multi-site implementation.\r\n \"\"\"\r\n \r\n site_indices = state_handle['capacity'].keys()\r\n site_indices.sort()\r\n \r\n return site_indices\r\n\r\n\r\n def calculate_time_period_simple(self, state_handle, period, new_params, \r\n supply_request, full_results=False):\r\n \"\"\"Implement calculate_time_period_simple as defined in TxMultiGeneratorBase for\r\n the multi-site generator model.\r\n \"\"\"\r\n \r\n curr_config = self.period_configs[period]\r\n\r\n # Update the state and get the calculations for each site\r\n self.update_state_new_period_params(state_handle, period, new_params)\r\n site_indices = self.get_site_indices(state_handle)\r\n capital_cost, new_capacity = self.calculate_new_capacity_cost(state_handle)\r\n supply_list, variable_cost_list, carbon_emissions_list, other_list = ( \r\n self.calculate_outputs_and_costs(state_handle, supply_request))\r\n\r\n if full_results:\r\n capacity = self.get_capacity(state_handle)\r\n\r\n # Compute the total supply\r\n supply = numpy.sum(supply_list, axis=0)\r\n \r\n # Compute the total variable costs, including carbon cost, for the timeseries, scaled up\r\n cost = ((numpy.sum(variable_cost_list, axis=0) + \r\n (numpy.sum(carbon_emissions_list, axis=0) * curr_config['carbon_price_m'])) * (\r\n curr_config['variable_cost_mult']))\r\n \r\n # Do the decommissioning\r\n decomm_cost, decommissioned = self.calculate_update_decommission(state_handle)\r\n\r\n # Add the capital and decommissioning costs\r\n cost += decomm_cost\r\n cost += capital_cost\r\n\r\n if not full_results:\r\n return site_indices, cost, supply\r\n\r\n if full_results:\r\n results = {}\r\n results['site_indices'] = site_indices\r\n results['cost'] = cost\r\n results['aggregate_supply'] = supply\r\n results['capacity'] = capacity\r\n results['decommissioned'] = decommissioned\r\n results['new_capacity'] = new_capacity\r\n results['supply'] = supply_list\r\n results['variable_cost_period'] = variable_cost_list * curr_config['variable_cost_mult']\r\n results['carbon_emissions_period'] = (carbon_emissions_list * \r\n curr_config['time_scale_up_mult'])\r\n results['total_supply_period'] = (curr_config['time_scale_up_mult'] * numpy.sum(supply) *\r\n curr_config['timestep_hrs'])\r\n results['other'] = other_list\r\n results['desc_string'] = self.get_simple_desc_string(results, state_handle)\r\n\r\n return site_indices, cost, supply, results\r\n \r\n\r\n def calculate_time_period_full(self, state_handle, period, new_params, supply_request, \r\n max_supply=[], price=[], make_string=False, do_decommissioning=True):\r\n \"\"\"Implement calculate_time_period_full as defined in TxMultiGeneratorBase for\r\n the multi-site generator model.\r\n \"\"\"\r\n \r\n results = {}\r\n self.update_state_new_period_params(state_handle, period, new_params)\r\n results['site_indices'] = self.get_site_indices(state_handle)\r\n results['capacity'] = self.get_capacity(state_handle)\r\n dummy, results['new_capacity'] = self.calculate_new_capacity_cost(state_handle)\r\n results['supply'], results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (\r\n self.calculate_outputs_and_costs(state_handle, supply_request, max_supply, price))\r\n if do_decommissioning:\r\n dummy, results['decommissioned'] = (\r\n self.calculate_update_decommissioning(state_handle))\r\n else:\r\n results['decommissioned'] = []\r\n\r\n if make_string:\r\n results['desc_string'] = self.get_full_desc_string(results, state_handle)\r\n \r\n return results\r\n\r\n\r\n def recalculate_time_period_full(self, state_handle, results, supply_request, max_supply=[], price=[], make_string=False):\r\n \"\"\"Implement recalculate_time_period_full as defined in TxMultiGeneratorBase for\r\n the multi-site generator model.\r\n \"\"\"\r\n\r\n results['supply'], results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (\r\n self.calculate_outputs_and_costs(state_handle, supply_request, max_supply, price))\r\n\r\n if make_string:\r\n results['desc_string'] = self.get_full_desc_string(results, state_handle)\r\n return results\r\n else:\r\n return results \r\n\r\n\r\n def calculate_costs_from_schedule_and_finalise(self, state_handle, schedule, make_string=False): \r\n \"\"\"Calculate the costs, given the schedule from the dispatcher.\r\n Finalise the decommissioning for that period.\r\n This assumes that update_state_new_period_params has been called previously,\r\n and the offer quantities have been determined for the active sites.\r\n \r\n Inputs:\r\n state_handle: \r\n as for calculate_time_period_full in txmultigeneratorbase.py\r\n schedule: a set of timeseries for each active site, as previously\r\n listed in the call to get_offers_* \r\n \r\n Outputs:\r\n as for calculate_time_period_full in txmultigeneratorbase.py\r\n \"\"\"\r\n results = {}\r\n site_indices = self.get_site_indices(state_handle)\r\n results['site_indices'] = site_indices\r\n results['capacity'] = self.get_capacity(state_handle)\r\n results['new_capacity_total_cost'], results['new_capacity'] = self.calculate_new_capacity_cost(state_handle)\r\n results['supply'] = schedule\r\n results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (\r\n self.calculate_variable_costs(state_handle, site_indices, schedule))\r\n results['decomm_total_cost'], results['decommissioned'] = (\r\n self.calculate_update_decommission(state_handle))\r\n\r\n if make_string:\r\n results['desc_string'] = self.get_full_desc_string(results, state_handle)\r\n \r\n return results\r\n "
] | [
[
"numpy.array",
"numpy.sum",
"numpy.nonzero",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CartoDB/cartoframes | [
"7c7392be5d15d0472ff428546c4791ed1a3842b0"
] | [
"cartoframes/data/observatory/catalog/variable.py"
] | [
"import pandas as pd\n\nfrom .entity import CatalogEntity\nfrom .repository.dataset_repo import get_dataset_repo\nfrom .repository.variable_repo import get_variable_repo\nfrom .repository.constants import VARIABLE_FILTER\nfrom .summary import variable_describe, head, tail, counts, quantiles, top_values, histogram\n\n\n_DESCRIPTION_LENGTH_LIMIT = 50\n\n\nclass Variable(CatalogEntity):\n \"\"\"This class represents a :py:class:`Variable <cartoframes.data.observatory.Variable>`\n of datasets in the :py:class:`Catalog <cartoframes.data.observatory.Catalog>`.\n\n Variables contain column names, description, data type, aggregation method, and some other metadata that is\n useful to understand the underlying data inside a :obj:`Dataset`\n\n Examples:\n List the variables of a :py:class:`Dataset <cartoframes.data.observatory.Dataset>`\n in combination with nested filters (categories, countries, etc.)\n\n >>> dataset = Dataset.get('mbi_retail_turn_705247a')\n >>> dataset.variables\n [<Variable.get('RT_CI_95050c10')> #'Retail Turnover: index (country eq.100)', ...]\n\n \"\"\"\n _entity_repo = get_variable_repo()\n\n @property\n def datasets(self):\n \"\"\"Get the list of datasets related to this variable.\n\n Returns:\n :py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>` List of Dataset instances.\n\n Raises:\n CatalogError: if there's a problem when connecting to the catalog or no datasets are found.\n\n \"\"\"\n return get_dataset_repo().get_all({VARIABLE_FILTER: self.id})\n\n @property\n def name(self):\n \"\"\"Name of this variable.\"\"\"\n return self.data['name']\n\n @property\n def description(self):\n \"\"\"Description of this variable.\"\"\"\n return self.data['description']\n\n @property\n def column_name(self):\n \"\"\"Column name of the actual table related to the variable in the :obj:`Dataset`.\"\"\"\n return self.data['column_name']\n\n @property\n def db_type(self):\n \"\"\"Type in the database.\n\n Returns:\n str\n\n Examples: INTEGER, STRING, FLOAT, GEOGRAPHY, JSON, BOOL, etc.\n\n \"\"\"\n return self.data['db_type']\n\n @property\n def dataset(self):\n \"\"\"ID of the :obj:`Dataset` to which this variable belongs.\"\"\"\n return self.data['dataset_id']\n\n @property\n def agg_method(self):\n \"\"\"Text representing a description of the aggregation method used to compute the values in this `Variable`\"\"\"\n return self.data['agg_method']\n\n @property\n def variable_group(self):\n \"\"\"If any, ID of the variable group to which this variable belongs.\"\"\"\n return self.data['variable_group_id']\n\n @property\n def summary(self):\n \"\"\"JSON object with extra metadata that summarizes different properties of this variable.\"\"\"\n return self.data['summary_json']\n\n @property\n def project_name(self):\n project, _, _, _ = self.id.split('.')\n return project\n\n @property\n def schema_name(self):\n _, schema, _, _ = self.id.split('.')\n return schema\n\n @property\n def dataset_name(self):\n _, _, dataset, _ = self.id.split('.')\n return dataset\n\n def describe(self, autoformat=True):\n \"\"\"Shows a summary of the actual stats of the variable (column) of the dataset.\n Some of the stats provided per variable are: avg, max, min, sum, range,\n stdev, q1, q3, median and interquartile_range\n\n Args:\n autoformat (boolean): set automatic format for values. Default is True.\n\n Example:\n\n .. code::\n\n # avg average value\n # max max value\n # min min value\n # sum sum of all values\n # range\n # stdev standard deviation\n # q1 first quantile\n # q3 third quantile\n # median median value\n # interquartile_range\n\n \"\"\"\n FLOAT_FORMAT = 'display.float_format'\n\n if autoformat:\n pd.set_option(FLOAT_FORMAT, lambda x: '%.3f' % x)\n\n data = self.data['summary_json']\n return variable_describe(data)\n\n def head(self):\n \"\"\"Returns a sample of the 10 first values of the variable data.\n\n For the cases of datasets with a content fewer than 10 rows\n (i.e. zip codes of small countries), this method won't return anything\n\n \"\"\"\n data = self.data['summary_json']\n return head(self.__class__, data)\n\n def tail(self):\n \"\"\"Returns a sample of the 10 last values of the variable data.\n\n For the cases of datasets with a content fewer than 10 rows\n (i.e. zip codes of small countries), this method won't return anything\n\n \"\"\"\n data = self.data['summary_json']\n return tail(self.__class__, data)\n\n def counts(self):\n \"\"\"Returns a summary of different counts over the actual variable values.\n\n Example:\n\n .. code::\n\n # all total number of values\n # null total number of null values\n # zero number of zero-valued entries\n # extreme number of values 3stdev outside the interquartile range\n # distinct number of distinct (unique) entries\n # outliers number of outliers (outside 1.5stdev the interquartile range\n # zero_percent percent of values that are zero\n # distinct_percent percent of values that are distinct\n\n \"\"\"\n data = self.data['summary_json']\n return counts(data)\n\n def quantiles(self):\n \"\"\"Returns the quantiles of the variable data.\"\"\"\n data = self.data['summary_json']\n return quantiles(data)\n\n def top_values(self):\n \"\"\"Returns information about the top values of the variable data.\"\"\"\n data = self.data['summary_json']\n return top_values(data)\n\n def histogram(self):\n \"\"\"Plots an histogram with the variable data.\"\"\"\n data = self.data['summary_json']\n return histogram(data)\n\n def __repr__(self):\n descr = self.description\n\n if descr and len(descr) > _DESCRIPTION_LENGTH_LIMIT:\n descr = descr[0:_DESCRIPTION_LENGTH_LIMIT] + '...'\n\n return \"<{classname}.get('{entity_id}')> #'{descr}'\" \\\n .format(classname=self.__class__.__name__, entity_id=self._get_print_id(), descr=descr)\n"
] | [
[
"pandas.set_option"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PepSalehi/algorithms | [
"1c20f57185e6324aa840ccff98e69764b4213131",
"1c20f57185e6324aa840ccff98e69764b4213131",
"1c20f57185e6324aa840ccff98e69764b4213131",
"1c20f57185e6324aa840ccff98e69764b4213131",
"1c20f57185e6324aa840ccff98e69764b4213131"
] | [
"ML/tf-cifar-10/cifar10_input.py",
"ML/50-mlps/29-keras-cnn-big-filters/main.py",
"language-word-detection/lstm_recognizer.py",
"ML/rl/qtest.py",
"ML/50-mlps/23-keras-cnn-skip-connection/main.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Routine for decoding the CIFAR-10 binary file format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# Process images of this size. Note that this differs from the original CIFAR\n# image size of 32 x 32. If one alters this number, then the entire model\n# architecture will change and any model would need to be retrained.\nIMAGE_SIZE = 24\n\n# Global constants describing the CIFAR-10 data set.\nNUM_CLASSES = 10\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n\n\ndef read_cifar10(filename_queue):\n \"\"\"Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n label_bytes = 1 # 2 for CIFAR-100\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n # Every record consists of a label followed by the image, with a\n # fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes\n\n # Read a record, getting filenames from the filename_queue. No\n # header or footer in the CIFAR-10 format, so we leave header_bytes\n # and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n # The first bytes represent the label, which we convert from uint8->int32.\n result.label = tf.cast(\n tf.strided_slice(record_bytes, [0], [label_bytes], [1]), tf.int32)\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(\n tf.strided_slice(record_bytes, [label_bytes],\n [label_bytes + image_bytes], [1]),\n [result.depth, result.height, result.width])\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n \"\"\"Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n # Display the training images in the visualizer.\n tf.summary.image('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\ndef distorted_inputs(data_dir, batch_size):\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n # Randomly crop a [height, width] section of the image.\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image,\n max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image,\n lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(distorted_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n print ('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=True)\n\n\ndef inputs(eval_data, data_dir, batch_size):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,\n width, height)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(resized_image)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n if eval_data:\n read_input.label.set_shape((1,))\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=False)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party modules\nfrom keras.callbacks import CSVLogger, ModelCheckpoint\nfrom keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Input, Activation, Add\nimport numpy as np\nfrom keras.layers.pooling import GlobalAveragePooling2D\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Model\nfrom keras.regularizers import l1\n\n# internal modules\nimport hasy_tools\n\n# Load the data\ndata = hasy_tools.load_data()\ndatasets = ['train', 'test']\n\n# One-Hot encoding\nfor dataset in datasets:\n key = 'y_' + dataset\n data[key] = np.eye(hasy_tools.n_classes)[data[key].squeeze()]\n\n# Preprocessing\nfor dataset in datasets:\n key = 'x_' + dataset\n data[key] = hasy_tools.preprocess(data[key])\n\n# Generate Validation Data\nsplit = train_test_split(data['x_train'], data['y_train'],\n test_size=0.20,\n random_state=0,\n stratify=data['y_train'])\ndata['x_train'], data['x_val'], data['y_train'], data['y_val'] = split\ndatasets.append('val')\n\n# Define the model\ninput_ = Input(shape=(hasy_tools.WIDTH, hasy_tools.HEIGHT, 1))\nx = input_\nx = Conv2D(16, (17, 17), padding='same',\n kernel_initializer='he_uniform')(x)\nx = MaxPooling2D(pool_size=(2, 2))(x) # 16x16\nx = Conv2D(32, (9, 9), padding='same',\n kernel_initializer='he_uniform')(x)\nx = MaxPooling2D(pool_size=(2, 2))(x) # 8x8\nx = Conv2D(64, (5, 5), padding='same',\n kernel_initializer='he_uniform')(x)\nx = MaxPooling2D(pool_size=(2, 2))(x) # 4x4\nx = Conv2D(128, (3, 3), padding='same',\n kernel_initializer='he_uniform')(x)\nx = Flatten()(x) # Adjust for FCN\nx = Dense(512, kernel_regularizer=l1(0.01))(x)\nx = Dropout(0.50)(x)\nx = Activation('relu')(x)\nx = Dense(hasy_tools.n_classes)(x)\nx = Activation('softmax')(x)\nmodel = Model(inputs=input_, outputs=x)\n\n# Compile model\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n# Fit the model\ncsv_logger = CSVLogger('log.csv', append=True, separator=';')\ncheckpointer = ModelCheckpoint(filepath='checkpoint.h5',\n verbose=1,\n period=10,\n save_best_only=True)\nmodel.fit(data['x_train'], data['y_train'],\n validation_data=(data['x_val'], data['y_val']),\n epochs=500,\n batch_size=128,\n callbacks=[csv_logger, checkpointer])\n\n# Serialize model\nmodel.save('model.h5')\n\n# evaluate the model\nscores = model.evaluate(data['x_test'], data['y_test'])\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\n",
"#!/usr/bin/env python\n\n\"\"\"Try to classify if a word is part of the English language or not.\"\"\"\n\n# https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py\n\nimport logging\nimport sys\nimport os\n\nimport numpy as np\nfrom numpy.random import random_sample\nfrom itertools import permutations\n\n# ML stuff\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Dropout\nfrom sklearn.utils import shuffle\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.DEBUG,\n stream=sys.stdout)\n\n\ndef weighted_values(values, probabilities, size):\n \"\"\"\n Get values with some probability.\n\n Parameters\n ----------\n values : list\n probabilities : list\n size : int\n\n Returns\n -------\n list of values of length size\n Each element i is with probability p (of probabilites) value v of\n values\n \"\"\"\n bins = np.add.accumulate(probabilities)\n return values[np.digitize(random_sample(size), bins)]\n\n\ndef get_wordstats(words, verbose=True):\n \"\"\"Get statistics about words.\"\"\"\n wordstats = {}\n total_letters = 0\n for word in words:\n for letter in word:\n if letter in wordstats:\n wordstats[letter] += 1\n else:\n wordstats[letter] = 1\n total_letters += 1\n values, probabilities = zip(*wordstats.items())\n values = np.array(values)\n probabilities = [float(count) / total_letters for count in probabilities]\n probabilities = np.array(probabilities)\n if verbose:\n probs = sorted(zip(list(values), list(probabilities)),\n reverse=True,\n key=lambda n: n[1])\n for letter, p in probs[:10]:\n print(\"{letter}: {p}\".format(letter=letter, p=p))\n\n return values, probabilities\n\n\ndef generate_word(values, probabilities, length):\n \"\"\"Generate a word of length.\"\"\"\n return \"\".join(weighted_values(values, probabilities, length))\n\n\ndef get_data():\n \"\"\"Get the data to train a recurrent neural network.\"\"\"\n data = []\n with open('words2.txt') as f:\n words = f.read().splitlines()\n for word in words:\n data.append((word, True))\n wordlengths = [float(len(word)) for word in words]\n max_length = int(max(wordlengths))\n wordlength_dist = {}\n for word in words:\n if len(word) in wordlength_dist:\n wordlength_dist[len(word)] += 1\n else:\n wordlength_dist[len(word)] = 1\n\n # Get data about words\n print(wordlength_dist)\n values, probabilities = zip(*wordlength_dist.items())\n values = list(values)\n probabilities = [float(count)/len(words) for count in probabilities]\n print(\"max word length: %i\" % max_length)\n print(\"Mean word length: %0.2f\" % np.mean(wordlengths, dtype=float))\n print(\"median word length: %i\" % np.median(wordlengths))\n print(\"std word length: %0.2f\" % np.std(wordlengths, dtype=float))\n\n # Generate non-words\n missing = len(words)\n rounds = 0\n while missing > 0:\n rounds += 1\n print(\"Round {round} (missing: {missing})\".format(round=rounds,\n missing=missing))\n wordlength_sampling = weighted_values(np.array(values),\n np.array(probabilities),\n missing)\n missing = 0\n letters, letter_probabilities = get_wordstats(words)\n word_set = set(words)\n\n for wordlength in wordlength_sampling:\n pseudo_word = generate_word(letters,\n letter_probabilities,\n wordlength)\n if pseudo_word in word_set:\n for permutation in permutations(pseudo_word):\n if permutation not in word_set:\n word_set.add(pseudo_word)\n data.append((word, False))\n continue\n else:\n missing += 1\n else:\n word_set.add(pseudo_word)\n data.append((word, False))\n\n print(data[:10])\n print(\"Letters: %s\" % str(letters))\n\n # Transform the data to the required format\n input_enc = LabelEncoder()\n input_enc.fit(letters)\n output_enc = LabelEncoder() # OneHotEncoder(sparse=False)\n output_enc.fit([False, True])\n print(input_enc.transform(list(\"egg\")))\n\n print('Vectorization...')\n word_data = shuffle(data)\n x = np.zeros((len(word_data), max_length, len(letters)),\n dtype=np.bool)\n y = np.zeros((len(word_data), 2), dtype=np.bool)\n for i, dataitem in enumerate(word_data):\n word, label = dataitem\n for t, char in enumerate(word):\n x[i, t, input_enc.transform(char)] = 1\n y[i, output_enc.transform(label)] = 1\n return {'X': x,\n 'y': y,\n 'letters': letters,\n 'input_enc': input_enc,\n 'max_length': max_length}\n\n\ndef input_transform(word, max_length, letters, input_enc):\n \"\"\"Transform a word to the required format.\"\"\"\n x = np.zeros((1, max_length, len(letters)), dtype=np.bool)\n for t, char in enumerate(word):\n x[0, t, input_enc.transform(char)] = 1\n return x\n\n\ndef get_model(letters, max_length):\n \"\"\"Create a LSTM model.\"\"\"\n logging.info(\"Create model\")\n input_dim = len(letters)\n logging.info(\"input_dim=%i\", input_dim)\n model = Sequential()\n model.add(LSTM(8,\n return_sequences=True,\n input_shape=(max_length, len(letters))))\n model.add(Dropout(0.2))\n model.add(LSTM(8, return_sequences=False))\n model.add(Dropout(0.2))\n model.add(Dense(2, activation='softmax'))\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\ndata = get_data()\nprint(data['y'])\n\nif not os.path.isfile('model_a.yml'):\n logging.info(\"Training / Test split\")\n X_train, X_test, y_train, y_test = train_test_split(data['X'],\n data['y'],\n test_size=0.33,\n random_state=42)\n\n # Create the model and fit it to the data\n model = get_model(data['letters'], data['max_length'])\n\n logging.info(\"Fit model to data\")\n model.fit(X_train,\n y_train,\n nb_epoch=10, batch_size=32,\n verbose=2)\n yaml_string = model.to_yaml()\n with open('model_a.yml', 'w') as f:\n f.write(yaml_string)\n model.save_weights('model_a_weights.h5')\n loss_and_metrics = model.evaluate(X_test, y_test, batch_size=32)\n print(loss_and_metrics)\nelse:\n logging.info(\"Load stored model.\")\n from keras.models import model_from_yaml\n with open('model_a.yml') as f:\n yaml_string = f.read()\n model = model_from_yaml(yaml_string)\n model.load_weights('model_a_weights.h5')\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nfor word in [\"a\", \"is\", \"eggplant\", \"water\", \"ice\", \"rztglinxx\"]:\n # print(model.predict_classes(input_transform(\"eggplant\",\n # data['max_length'],\n # data['letters'],\n # data['input_enc']),\n # batch_size=1))\n print(word)\n in_ = input_transform(word,\n data['max_length'],\n data['letters'],\n data['input_enc'])\n print(model.predict_proba(in_,\n batch_size=1))\n",
"# -*- coding: utf-8 -*-\n# ref: https://gym.openai.com/evaluations/eval_1lfzNKEHS9GA7nNWE73w\n\nimport numpy as np\nimport gym\nfrom gym import wrappers\n\n# Q learning params\nALPHA = 0.1 # learning rate\nGAMMA = 0.99 # reward discount\nLEARNING_COUNT = 100000\nTEST_COUNT = 10000\n\nTURN_LIMIT = 100\nIS_MONITOR = True\n\nclass Agent:\n def __init__(self, env):\n self.env = env\n self.episode_reward = 0.0\n self.q_val = np.zeros(16 * 4).reshape(16, 4).astype(np.float32)\n\n def learn(self):\n # one episode learning\n state = self.env.reset()\n #self.env.render()\n \n for t in range(TURN_LIMIT):\n act = self.env.action_space.sample() # random\n next_state, reward, done, info = self.env.step(act)\n q_next_max = np.max(self.q_val[next_state])\n # Q <- Q + a(Q' - Q)\n # <=> Q <- (1-a)Q + a(Q')\n self.q_val[state][act] = (1 - ALPHA) * self.q_val[state][act]\\\n + ALPHA * (reward + GAMMA * q_next_max)\n \n #self.env.render()\n if done:\n return reward\n else:\n state = next_state\n\n def test(self):\n state = self.env.reset()\n for t in range(TURN_LIMIT):\n act = np.argmax(self.q_val[state])\n next_state, reward, done, info = self.env.step(act)\n if done:\n return reward\n else:\n state = next_state\n return 0.0 # over limit\n\ndef main():\n env = gym.make(\"FrozenLake-v0\")\n if IS_MONITOR:\n env = wrappers.Monitor(env, './FrozenLake-v0')\n agent = Agent(env)\n\n print(\"###### LEARNING #####\")\n reward_total = 0.0\n for i in range(LEARNING_COUNT):\n reward_total += agent.learn()\n print(\"episodes : {}\".format(LEARNING_COUNT))\n print(\"total reward : {}\".format(reward_total))\n print(\"average reward: {:.2f}\".format(reward_total / LEARNING_COUNT))\n print(\"Q Value :{}\".format(agent.q_val))\n\n print(\"###### TEST #####\")\n reward_total = 0.0\n for i in range(TEST_COUNT):\n reward_total += agent.test()\n print(\"episodes : {}\".format(TEST_COUNT))\n print(\"total reward : {}\".format(reward_total))\n print(\"average reward: {:.2f}\".format(reward_total / TEST_COUNT))\n\nif __name__ == \"__main__\":\n main()",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party modules\nfrom keras.callbacks import CSVLogger, ModelCheckpoint\nfrom keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Input, Activation, Add\nimport numpy as np\nfrom keras.layers.pooling import GlobalAveragePooling2D\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Model\nfrom keras.regularizers import l1\n\n# internal modules\nimport hasy_tools\n\n# Load the data\ndata = hasy_tools.load_data()\ndatasets = ['train', 'test']\n\n# One-Hot encoding\nfor dataset in datasets:\n key = 'y_' + dataset\n data[key] = np.eye(hasy_tools.n_classes)[data[key].squeeze()]\n\n# Preprocessing\nfor dataset in datasets:\n key = 'x_' + dataset\n data[key] = hasy_tools.preprocess(data[key])\n\n# Generate Validation Data\nsplit = train_test_split(data['x_train'], data['y_train'],\n test_size=0.20,\n random_state=0,\n stratify=data['y_train'])\ndata['x_train'], data['x_val'], data['y_train'], data['y_val'] = split\ndatasets.append('val')\n\n\ndef skip_layer_conv(x, nb_layers=16):\n x1 = Conv2D(nb_layers, (3, 3), padding='same')(x)\n x1 = Activation('relu')(x1)\n x2 = Conv2D(nb_layers, (3, 3), padding='same')(x1)\n x2 = Activation('relu')(x2)\n x3 = Add()([x1, x2])\n return x3\n\n\ndef skip_layer(x, nb_layers=16):\n x1 = Dense(nb_layers, kernel_regularizer=l1(0.01))(x)\n x1 = Activation('relu')(x1)\n x2 = Dense(nb_layers, kernel_regularizer=l1(0.01))(x1)\n x2 = Activation('relu')(x2)\n x3 = Add()([x1, x2])\n return x3\n\n# Define the model\ninput_ = Input(shape=(hasy_tools.WIDTH, hasy_tools.HEIGHT, 1))\nx = input_\nx = Conv2D(16, (3, 3), padding='same',\n kernel_initializer='he_uniform')(x)\nx = MaxPooling2D(pool_size=(2, 2))(x) # 16x16\nx = skip_layer_conv(x)\nx = MaxPooling2D(pool_size=(2, 2))(x) # 8x8\nx = skip_layer_conv(x)\nx = MaxPooling2D(pool_size=(2, 2))(x) # 4x4\nx = skip_layer_conv(x)\nx = skip_layer_conv(x, 32)\nx = Flatten()(x) # Adjust for FCN\nx = Dense(512, kernel_regularizer=l1(0.01))(x)\nx = Activation('relu')(x)\nx = Dense(hasy_tools.n_classes)(x)\nx = Activation('softmax')(x)\nmodel = Model(inputs=input_, outputs=x)\n\n# Compile model\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n# Fit the model\ncsv_logger = CSVLogger('log.csv', append=True, separator=';')\ncheckpointer = ModelCheckpoint(filepath='checkpoint.h5',\n verbose=1,\n period=10,\n save_best_only=True)\nmodel.fit(data['x_train'], data['y_train'],\n validation_data=(data['x_val'], data['y_val']),\n epochs=500,\n batch_size=128,\n callbacks=[csv_logger, checkpointer])\n\n# Serialize model\nmodel.save('model.h5')\n\n# evaluate the model\nscores = model.evaluate(data['x_test'], data['y_test'])\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\n"
] | [
[
"tensorflow.strided_slice",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.image.random_brightness",
"tensorflow.transpose",
"tensorflow.image.random_flip_left_right",
"tensorflow.image.random_contrast",
"tensorflow.gfile.Exists",
"tensorflow.summary.image",
"tensorflow.decode_raw",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.FixedLengthRecordReader",
"tensorflow.random_crop",
"tensorflow.train.string_input_producer",
"tensorflow.image.per_image_standardization",
"tensorflow.train.batch",
"tensorflow.train.shuffle_batch"
],
[
"numpy.eye",
"sklearn.model_selection.train_test_split"
],
[
"sklearn.cross_validation.train_test_split",
"sklearn.utils.shuffle",
"numpy.median",
"numpy.random.random_sample",
"numpy.std",
"numpy.mean",
"numpy.array",
"sklearn.preprocessing.LabelEncoder",
"numpy.add.accumulate"
],
[
"numpy.max",
"numpy.argmax",
"numpy.zeros"
],
[
"numpy.eye",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hypergravity/bopy | [
"90cf5bf695c4ae4f53d9a9bec7cdc9ba16994267"
] | [
"bopy/spec/lamost.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n\nAuthor\n------\nBo Zhang\n\nEmail\n-----\[email protected]\n\nCreated on\n----------\n- Fri Jul 3 13:13:06 2015 read_spectrum\n\nModifications\n-------------\n- Fri Nov 20 10:16:59 2015 reformatting code\n- Sun Feb 28 14:39:16 2016 migrated to bopy.spec.lamost\n- Fri Jul 15 16:08:00 2016 migrate read_spectrum to read_spectrum.py\n\n\nAims\n----\n- generate LAMOST spectra file name/path\n\n\"\"\"\n\n# from __future__ import print_function\nimport os\nimport numpy as np\n# from astropy.io import fits\n# from astropy.table import Table, Column\n\n\ndef lamost_filepath(planid, mjd, spid, fiberid, dirpath=\"\", extname=\".fits\"):\n \"\"\" generate file path of a LAMOST spectrum\n\n Parameters\n ----------\n planid: string\n planid\n\n mjd: 5-digit integer\n mjd (use lmjd rather than mjd for DR3 and after!)\n\n spid: 2-digit integer\n spid, the number of the spectrogragh\n\n fiberid: 3-digit integer\n fiberid\n\n dirpath: string\n the root directory for storing spectra.\n\n Returns\n --------\n filepath: string\n the path of root dir of directory (prefix).\n if un-specified, return file name.\n\n \"\"\"\n\n # pre-processing: strip\n if np.isscalar(planid):\n planid = planid.strip()\n else:\n planid = [_.strip() for _ in planid]\n\n if dirpath == \"\" or dirpath is None:\n # return file name\n if np.isscalar(mjd):\n # if only input one item\n return \"spec-%05d-%s_sp%02d-%03d%s\" \\\n % (mjd, planid, spid, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"spec-%05d-%s_sp%02d-%03d%s\" %\n (mjd[i], planid[i], spid[i], fiberid[i], extname)\n for i in range(len(mjd))])\n else:\n # return file path\n if not dirpath[-1] == os.path.sep:\n dirpath += os.path.sep\n\n if np.isscalar(mjd):\n # if only input one item\n return \"%s%s%sspec-%05d-%s_sp%02d-%03d%s\" \\\n % (dirpath, planid, os.path.sep,\n mjd, planid, spid, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"%s%s%sspec-%05d-%s_sp%02d-%03d%s\" %\n (dirpath, planid[i], os.path.sep, mjd[i],\n planid[i], spid[i], fiberid[i], extname)\n for i in range(len(mjd))])\n\n\ndef _test_lamost_filepath():\n \"\"\"test function **lamost_filepath**\n \"\"\"\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78))\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78, \"/\"))\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78, \"/pool\"))\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78, \"/pool/\"))\n\n\ndef sdss_filepath(plate, mjd, fiberid, dirpath=\"\", extname=\".fits\"):\n \"\"\" generate file path of a LAMOST spectrum\n\n Parameters\n ----------\n plate: string\n plate\n\n mjd: 5-digit integer\n mjd (use lmjd rather than mjd for DR3 and after!)\n\n fiberid: 4-digit integer\n fiberid\n\n dirpath: string\n the root directory for storing spectra.\n\n extname: string\n in case that users want to synthesize other data format\n\n Returns\n --------\n filepath: string\n the path of root dir of directory (prefix).\n if un-specified, return file name.\n\n \"\"\"\n\n if dirpath == \"\" or dirpath is None:\n # return file name\n if np.isscalar(mjd):\n # if only input one item\n return \"spec-%04d-%05d-%04d%s\" % (plate, mjd, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"spec-%04d-%05d-%04d%s\" %\n (plate[i], mjd[i], fiberid[i], extname)\n for i in range(len(mjd))])\n else:\n # return file path\n if not dirpath[-1] == os.path.sep:\n dirpath += os.path.sep\n\n if np.isscalar(mjd):\n # if only input one item\n return \"%s%04d%sspec-%04d-%05d-%04d%s\" \\\n % (dirpath, plate, os.path.sep,\n plate, mjd, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"%s%04d%sspec-%04d-%05d-%04d%s\" %\n (dirpath, plate[i], os.path.sep, plate[i],\n mjd[i], fiberid[i], extname)\n for i in range(len(mjd))])\n\n\ndef _test_sdss_filepath():\n print(sdss_filepath(2238, 52059, 1, \"/\"))\n\n\nif __name__ == \"__main__\":\n print(\"\")\n print(\"@Cham: start to test the module ...\")\n print(\"\")\n print(\"@Cham: testing \"\"lamost_filepath\"\" ...\")\n _test_lamost_filepath()\n _test_sdss_filepath()\n print(\"@Cham: OK\")\n"
] | [
[
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aleaf/flopy | [
"a5777a4d4a745e473110a167c69603ac4ad3106c",
"a5777a4d4a745e473110a167c69603ac4ad3106c"
] | [
"flopy/export/netcdf.py",
"flopy/utils/util_list.py"
] | [
"import os\nimport platform\nimport socket\nimport copy\nimport json\nimport numpy as np\nfrom datetime import datetime\nimport time\nfrom .metadata import acdd\nimport flopy\n\n# globals\nFILLVALUE = -99999.9\nITMUNI = {\n 0: \"undefined\",\n 1: \"seconds\",\n 2: \"minutes\",\n 3: \"hours\",\n 4: \"days\",\n 5: \"years\",\n}\nPRECISION_STRS = [\"f4\", \"f8\", \"i4\"]\n\nSTANDARD_VARS = [\"longitude\", \"latitude\", \"layer\", \"elevation\", \"time\"]\n\npath = os.path.split(__file__)[0]\nwith open(path + \"/longnames.json\") as f:\n NC_LONG_NAMES = json.load(f)\n\n\nclass Logger(object):\n \"\"\"\n Basic class for logging events during the linear analysis calculations\n if filename is passed, then an file handle is opened\n\n Parameters\n ----------\n filename : bool or string\n if string, it is the log file to write. If a bool, then log is\n written to the screen. echo (bool): a flag to force screen output\n\n Attributes\n ----------\n items : dict\n tracks when something is started. If a log entry is\n not in items, then it is treated as a new entry with the string\n being the key and the datetime as the value. If a log entry is\n in items, then the end time and delta time are written and\n the item is popped from the keys\n\n \"\"\"\n\n def __init__(self, filename, echo=False):\n self.items = {}\n self.echo = bool(echo)\n if filename == True:\n self.echo = True\n self.filename = None\n elif filename:\n self.f = open(filename, \"w\", 0) # unbuffered\n self.t = datetime.now()\n self.log(\"opening \" + str(filename) + \" for logging\")\n else:\n self.filename = None\n\n def log(self, phrase):\n \"\"\"\n log something that happened\n\n Parameters\n ----------\n phrase : str\n the thing that happened\n\n \"\"\"\n pass\n t = datetime.now()\n if phrase in self.items.keys():\n s = (\n str(t)\n + \" finished: \"\n + str(phrase)\n + \", took: \"\n + str(t - self.items[phrase])\n + \"\\n\"\n )\n if self.echo:\n print(s,)\n if self.filename:\n self.f.write(s)\n self.items.pop(phrase)\n else:\n s = str(t) + \" starting: \" + str(phrase) + \"\\n\"\n if self.echo:\n print(s,)\n if self.filename:\n self.f.write(s)\n self.items[phrase] = copy.deepcopy(t)\n\n def warn(self, message):\n \"\"\"\n Write a warning to the log file\n\n Parameters\n ----------\n message : str\n the warning text\n\n \"\"\"\n s = str(datetime.now()) + \" WARNING: \" + message + \"\\n\"\n if self.echo:\n print(s,)\n if self.filename:\n self.f.write(s)\n return\n\n\nclass NetCdf(object):\n \"\"\"\n Support for writing a netCDF4 compliant file from a flopy model\n\n Parameters\n ----------\n output_filename : str\n Name of the .nc file to write\n model : flopy model instance\n time_values : the entries for the time dimension\n if not None, the constructor will initialize\n the file. If None, the perlen array of ModflowDis\n will be used\n z_positive : str ('up' or 'down')\n Positive direction of vertical coordinates written to NetCDF file.\n (default 'down')\n verbose : if True, stdout is verbose. If str, then a log file\n is written to the verbose file\n forgive : what to do if a duplicate variable name is being created. If\n True, then the newly requested var is skipped. If False, then\n an exception is raised.\n **kwargs : keyword arguments\n modelgrid : flopy.discretization.Grid instance\n user supplied model grid which will be used in lieu of the model\n object modelgrid for netcdf production\n\n Notes\n -----\n This class relies heavily on the grid and modeltime objects,\n including these attributes: lenuni, itmuni, start_datetime, and proj4.\n Make sure these attributes have meaningful values.\n\n \"\"\"\n\n def __init__(\n self,\n output_filename,\n model,\n time_values=None,\n z_positive=\"up\",\n verbose=None,\n prj=None,\n logger=None,\n forgive=False,\n **kwargs\n ):\n\n assert output_filename.lower().endswith(\".nc\")\n if verbose is None:\n verbose = model.verbose\n if logger is not None:\n self.logger = logger\n else:\n self.logger = Logger(verbose)\n self.var_attr_dict = {}\n self.log = self.logger.log\n if os.path.exists(output_filename):\n self.logger.warn(\"removing existing nc file: \" + output_filename)\n os.remove(output_filename)\n self.output_filename = output_filename\n\n self.forgive = bool(forgive)\n\n self.model = model\n self.model_grid = model.modelgrid\n if \"modelgrid\" in kwargs:\n self.model_grid = kwargs.pop(\"modelgrid\")\n self.model_time = model.modeltime\n if prj is not None:\n self.model_grid.proj4 = prj\n if self.model_grid.grid_type == \"structured\":\n self.dimension_names = (\"layer\", \"y\", \"x\")\n STANDARD_VARS.extend([\"delc\", \"delr\"])\n # elif self.model_grid.grid_type == 'vertex':\n # self.dimension_names = ('layer', 'ncpl')\n else:\n raise Exception(\n \"Grid type {} not supported.\".format(self.model_grid.grid_type)\n )\n self.shape = self.model_grid.shape\n\n try:\n import dateutil.parser\n except:\n print(\n \"python-dateutil is not installed\\n\"\n + \"try pip install python-dateutil\"\n )\n return\n\n self.start_datetime = self._dt_str(\n dateutil.parser.parse(self.model_time.start_datetime)\n )\n self.logger.warn(\"start datetime:{0}\".format(str(self.start_datetime)))\n\n proj4_str = self.model_grid.proj4\n if proj4_str is None:\n proj4_str = \"epsg:4326\"\n self.log(\n \"Warning: model has no coordinate reference system specified. \"\n \"Using default proj4 string: {}\".format(proj4_str)\n )\n self.proj4_str = proj4_str\n self.grid_units = self.model_grid.units\n self.z_positive = z_positive\n if self.grid_units is None:\n self.grid_units = \"undefined\"\n assert self.grid_units in [\"feet\", \"meters\", \"undefined\"], (\n \"unsupported length units: \" + self.grid_units\n )\n\n self.time_units = self.model_time.time_units\n\n # this gives us confidence that every NetCdf instance\n # has the same attributes\n self.log(\"initializing attributes\")\n self._initialize_attributes()\n self.log(\"initializing attributes\")\n\n self.time_values_arg = time_values\n\n self.log(\"initializing file\")\n self.initialize_file(time_values=self.time_values_arg)\n self.log(\"initializing file\")\n\n def __add__(self, other):\n new_net = NetCdf.zeros_like(self)\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] + other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] + other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__add__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def __sub__(self, other):\n new_net = NetCdf.zeros_like(self)\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] - other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] - other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__sub__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def __mul__(self, other):\n new_net = NetCdf.zeros_like(self)\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] * other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] * other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__mul__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def __div__(self, other):\n return self.__truediv__(other)\n\n def __truediv__(self, other):\n new_net = NetCdf.zeros_like(self)\n with np.errstate(invalid=\"ignore\"):\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] / other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:]\n / other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__sub__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def append(self, other, suffix=\"_1\"):\n assert isinstance(other, NetCdf) or isinstance(other, dict)\n if isinstance(other, NetCdf):\n for vname in other.var_attr_dict.keys():\n attrs = other.var_attr_dict[vname].copy()\n var = other.nc.variables[vname]\n new_vname = vname\n\n if vname in self.nc.variables.keys():\n if vname not in STANDARD_VARS:\n new_vname = vname + suffix\n if \"long_name\" in attrs:\n attrs[\"long_name\"] += \" \" + suffix\n else:\n continue\n assert (\n new_vname not in self.nc.variables.keys()\n ), \"var already exists:{0} in {1}\".format(\n new_vname, \",\".join(self.nc.variables.keys())\n )\n attrs[\"max\"] = var[:].max()\n attrs[\"min\"] = var[:].min()\n new_var = self.create_variable(\n new_vname, attrs, var.dtype, dimensions=var.dimensions\n )\n new_var[:] = var[:]\n else:\n for vname, array in other.items():\n vname_norm = self.normalize_name(vname)\n assert (\n vname_norm in self.nc.variables.keys()\n ), \"dict var not in \" \"self.vars:{0}-->\".format(\n vname\n ) + \",\".join(\n self.nc.variables.keys()\n )\n\n new_vname = vname_norm + suffix\n assert new_vname not in self.nc.variables.keys()\n attrs = self.var_attr_dict[vname_norm].copy()\n attrs[\"max\"] = np.nanmax(array)\n attrs[\"min\"] = np.nanmin(array)\n attrs[\"name\"] = new_vname\n attrs[\"long_name\"] = attrs[\"long_name\"] + \" \" + suffix\n var = self.nc.variables[vname_norm]\n # assert var.shape == array.shape,\\\n # \"{0} shape ({1}) doesn't make array shape ({2})\".\\\n # format(new_vname,str(var.shape),str(array.shape))\n new_var = self.create_variable(\n new_vname, attrs, var.dtype, dimensions=var.dimensions\n )\n try:\n new_var[:] = array\n except:\n new_var[:, 0] = array\n\n return\n\n def copy(self, output_filename):\n new_net = NetCdf.zeros_like(self, output_filename=output_filename)\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = self.nc.variables[vname][:]\n return new_net\n\n @classmethod\n def zeros_like(\n cls, other, output_filename=None, verbose=None, logger=None\n ):\n new_net = NetCdf.empty_like(\n other,\n output_filename=output_filename,\n verbose=verbose,\n logger=logger,\n )\n # add the vars to the instance\n for vname in other.var_attr_dict.keys():\n if new_net.nc.variables.get(vname) is not None:\n new_net.logger.warn(\n \"variable {0} already defined, skipping\".format(vname)\n )\n continue\n new_net.log(\"adding variable {0}\".format(vname))\n var = other.nc.variables[vname]\n data = var[:]\n try:\n mask = data.mask\n data = np.array(data)\n except:\n mask = None\n new_data = np.zeros_like(data)\n new_data[mask] = FILLVALUE\n new_var = new_net.create_variable(\n vname,\n other.var_attr_dict[vname],\n var.dtype,\n dimensions=var.dimensions,\n )\n new_var[:] = new_data\n new_net.log(\"adding variable {0}\".format(vname))\n global_attrs = {}\n for attr in other.nc.ncattrs():\n if attr not in new_net.nc.ncattrs():\n global_attrs[attr] = other.nc[attr]\n new_net.add_global_attributes(global_attrs)\n return new_net\n\n @classmethod\n def empty_like(\n cls, other, output_filename=None, verbose=None, logger=None\n ):\n if output_filename is None:\n output_filename = (\n str(time.mktime(datetime.now().timetuple())) + \".nc\"\n )\n\n while os.path.exists(output_filename):\n print(\"{}...already exists\".format(output_filename))\n output_filename = (\n str(time.mktime(datetime.now().timetuple())) + \".nc\"\n )\n print(\n \"creating temporary netcdf file...\"\n + \"{}\".format(output_filename)\n )\n\n new_net = cls(\n output_filename,\n other.model,\n time_values=other.time_values_arg,\n verbose=verbose,\n logger=logger,\n )\n return new_net\n\n def difference(\n self, other, minuend=\"self\", mask_zero_diff=True, onlydiff=True\n ):\n \"\"\"\n make a new NetCDF instance that is the difference with another\n netcdf file\n\n Parameters\n ----------\n other : either an str filename of a netcdf file or\n a netCDF4 instance\n\n minuend : (optional) the order of the difference operation.\n Default is self (e.g. self - other). Can be \"self\" or \"other\"\n\n mask_zero_diff : bool flag to mask differences that are zero. If\n True, positions in the difference array that are zero will be set\n to self.fillvalue\n\n only_diff : bool flag to only add non-zero diffs to output file\n\n Returns\n -------\n net NetCDF instance\n\n Notes\n -----\n assumes the current NetCDF instance has been populated. The\n variable names and dimensions between the two files must match\n exactly. The name of the new .nc file is\n <self.output_filename>.diff.nc. The masks from both self and\n other are carried through to the new instance\n\n \"\"\"\n\n assert self.nc is not None, (\n \"can't call difference() if nc \" + \"hasn't been populated\"\n )\n try:\n import netCDF4\n except Exception as e:\n mess = \"error import netCDF4: {0}\".format(str(e))\n self.logger.warn(mess)\n raise Exception(mess)\n\n if isinstance(other, str):\n assert os.path.exists(\n other\n ), \"filename 'other' not found:\" + \"{0}\".format(other)\n other = netCDF4.Dataset(other, \"r\")\n\n assert isinstance(other, netCDF4.Dataset)\n\n # check for similar variables\n self_vars = set(self.nc.variables.keys())\n other_vars = set(other.variables)\n diff = self_vars.symmetric_difference(other_vars)\n if len(diff) > 0:\n self.logger.warn(\n \"variables are not the same between the two \"\n + \"nc files: \"\n + \",\".join(diff)\n )\n return\n\n # check for similar dimensions\n self_dimens = self.nc.dimensions\n other_dimens = other.dimensions\n for d in self_dimens.keys():\n if d not in other_dimens:\n self.logger.warn(\"missing dimension in other:{0}\".format(d))\n return\n if len(self_dimens[d]) != len(other_dimens[d]):\n self.logger.warn(\n \"dimension not consistent: \"\n + \"{0}:{1}\".format(self_dimens[d], other_dimens[d])\n )\n return\n # should be good to go\n time_values = self.nc.variables.get(\"time\")[:]\n new_net = NetCdf(\n self.output_filename.replace(\".nc\", \".diff.nc\"),\n self.model,\n time_values=time_values,\n )\n # add the vars to the instance\n for vname in self_vars:\n if (\n vname not in self.var_attr_dict\n or new_net.nc.variables.get(vname) is not None\n ):\n self.logger.warn(\"skipping variable: {0}\".format(vname))\n continue\n self.log(\"processing variable {0}\".format(vname))\n s_var = self.nc.variables[vname]\n o_var = other.variables[vname]\n s_data = s_var[:]\n o_data = o_var[:]\n o_mask, s_mask = None, None\n\n # keep the masks to apply later\n if isinstance(s_data, np.ma.MaskedArray):\n self.logger.warn(\"masked array for {0}\".format(vname))\n s_mask = s_data.mask\n s_data = np.array(s_data)\n s_data[s_mask] = 0.0\n else:\n np.nan_to_num(s_data)\n\n if isinstance(o_data, np.ma.MaskedArray):\n o_mask = o_data.mask\n o_data = np.array(o_data)\n o_data[o_mask] = 0.0\n else:\n np.nan_to_num(o_data)\n\n # difference with self\n if minuend.lower() == \"self\":\n d_data = s_data - o_data\n elif minuend.lower() == \"other\":\n d_data = o_data - s_data\n else:\n mess = \"unrecognized minuend {0}\".format(minuend)\n self.logger.warn(mess)\n raise Exception(mess)\n\n # check for non-zero diffs\n if onlydiff and d_data.sum() == 0.0:\n self.logger.warn(\n \"var {0} has zero differences, skipping...\".format(vname)\n )\n continue\n\n self.logger.warn(\n \"resetting diff attrs max,min:{0},{1}\".format(\n d_data.min(), d_data.max()\n )\n )\n attrs = self.var_attr_dict[vname].copy()\n attrs[\"max\"] = np.nanmax(d_data)\n attrs[\"min\"] = np.nanmin(d_data)\n # reapply masks\n if s_mask is not None:\n self.log(\"applying self mask\")\n s_mask[d_data != 0.0] = False\n d_data[s_mask] = FILLVALUE\n self.log(\"applying self mask\")\n if o_mask is not None:\n self.log(\"applying other mask\")\n o_mask[d_data != 0.0] = False\n d_data[o_mask] = FILLVALUE\n self.log(\"applying other mask\")\n\n d_data[np.isnan(d_data)] = FILLVALUE\n if mask_zero_diff:\n d_data[np.where(d_data == 0.0)] = FILLVALUE\n\n var = new_net.create_variable(\n vname, attrs, s_var.dtype, dimensions=s_var.dimensions\n )\n\n var[:] = d_data\n self.log(\"processing variable {0}\".format(vname))\n\n def _dt_str(self, dt):\n \"\"\" for datetime to string for year < 1900\n \"\"\"\n dt_str = \"{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02}Z\".format(\n dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second\n )\n return dt_str\n\n def write(self):\n \"\"\"write the nc object to disk\"\"\"\n self.log(\"writing nc file\")\n assert (\n self.nc is not None\n ), \"netcdf.write() error: nc file not initialized\"\n\n # write any new attributes that have been set since\n # initializing the file\n for k, v in self.global_attributes.items():\n try:\n if self.nc.attributes.get(k) is not None:\n self.nc.setncattr(k, v)\n except Exception:\n self.logger.warn(\n \"error setting global attribute {0}\".format(k)\n )\n\n self.nc.sync()\n self.nc.close()\n self.log(\"writing nc file\")\n\n def _initialize_attributes(self):\n \"\"\"private method to initial the attributes\n of the NetCdf instance\n \"\"\"\n assert (\n \"nc\" not in self.__dict__.keys()\n ), \"NetCdf._initialize_attributes() error: nc attribute already set\"\n\n self.nc_epsg_str = \"epsg:4326\"\n self.nc_crs_longname = \"http://www.opengis.net/def/crs/EPSG/0/4326\"\n self.nc_semi_major = float(6378137.0)\n self.nc_inverse_flat = float(298.257223563)\n\n self.global_attributes = {}\n self.global_attributes[\"namefile\"] = self.model.namefile\n self.global_attributes[\"model_ws\"] = self.model.model_ws\n self.global_attributes[\"exe_name\"] = self.model.exe_name\n self.global_attributes[\"modflow_version\"] = self.model.version\n\n self.global_attributes[\"create_hostname\"] = socket.gethostname()\n self.global_attributes[\"create_platform\"] = platform.system()\n self.global_attributes[\"create_directory\"] = os.getcwd()\n\n htol, rtol = -999, -999\n try:\n htol, rtol = self.model.solver_tols()\n except Exception as e:\n self.logger.warn(\n \"unable to get solver tolerances:\" + \"{0}\".format(str(e))\n )\n self.global_attributes[\"solver_head_tolerance\"] = htol\n self.global_attributes[\"solver_flux_tolerance\"] = rtol\n spatial_attribs = {\n \"xll\": self.model_grid.xoffset,\n \"yll\": self.model_grid.yoffset,\n \"rotation\": self.model_grid.angrot,\n \"proj4_str\": self.model_grid.proj4,\n }\n for n, v in spatial_attribs.items():\n self.global_attributes[\"flopy_sr_\" + n] = v\n self.global_attributes[\n \"start_datetime\"\n ] = self.model_time.start_datetime\n\n self.fillvalue = FILLVALUE\n\n # initialize attributes\n self.grid_crs = None\n self.zs = None\n self.ys = None\n self.xs = None\n self.nc = None\n\n def initialize_geometry(self):\n \"\"\" initialize the geometric information\n needed for the netcdf file\n \"\"\"\n try:\n import pyproj\n except ImportError as e:\n raise ImportError(\n \"NetCdf error importing pyproj module:\\n\" + str(e)\n )\n from distutils.version import LooseVersion\n\n # Check if using newer pyproj version conventions\n pyproj220 = LooseVersion(pyproj.__version__) >= LooseVersion(\"2.2.0\")\n\n proj4_str = self.proj4_str\n print(\"initialize_geometry::proj4_str = {}\".format(proj4_str))\n\n self.log(\"building grid crs using proj4 string: {}\".format(proj4_str))\n if pyproj220:\n self.grid_crs = pyproj.CRS(proj4_str)\n else:\n self.grid_crs = pyproj.Proj(proj4_str, preserve_units=True)\n\n print(\"initialize_geometry::self.grid_crs = {}\".format(self.grid_crs))\n\n vmin, vmax = self.model_grid.botm.min(), self.model_grid.top.max()\n if self.z_positive == \"down\":\n vmin, vmax = vmax, vmin\n else:\n self.zs = self.model_grid.xyzcellcenters[2].copy()\n\n ys = self.model_grid.xyzcellcenters[1].copy()\n xs = self.model_grid.xyzcellcenters[0].copy()\n\n # Transform to a known CRS\n if pyproj220:\n nc_crs = pyproj.CRS(self.nc_epsg_str)\n self.transformer = pyproj.Transformer.from_crs(\n self.grid_crs, nc_crs, always_xy=True\n )\n else:\n nc_crs = pyproj.Proj(self.nc_epsg_str)\n self.transformer = None\n\n print(\"initialize_geometry::nc_crs = {}\".format(nc_crs))\n\n if pyproj220:\n print(\n \"transforming coordinates using = {}\".format(self.transformer)\n )\n\n self.log(\"projecting grid cell center arrays\")\n if pyproj220:\n self.xs, self.ys = self.transformer.transform(xs, ys)\n else:\n self.xs, self.ys = pyproj.transform(self.grid_crs, nc_crs, xs, ys)\n\n # get transformed bounds and record to check against ScienceBase later\n xmin, xmax, ymin, ymax = self.model_grid.extent\n bbox = np.array(\n [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]\n )\n if pyproj220:\n x, y = self.transformer.transform(*bbox.transpose())\n else:\n x, y = pyproj.transform(self.grid_crs, nc_crs, *bbox.transpose())\n self.bounds = x.min(), y.min(), x.max(), y.max()\n self.vbounds = vmin, vmax\n\n def initialize_file(self, time_values=None):\n \"\"\"\n initialize the netcdf instance, including global attributes,\n dimensions, and grid information\n\n Parameters\n ----------\n\n time_values : list of times to use as time dimension\n entries. If none, then use the times in\n self.model.dis.perlen and self.start_datetime\n\n \"\"\"\n if self.nc is not None:\n raise Exception(\"nc file already initialized\")\n\n if self.grid_crs is None:\n self.log(\"initializing geometry\")\n self.initialize_geometry()\n self.log(\"initializing geometry\")\n try:\n import netCDF4\n except Exception as e:\n self.logger.warn(\"error importing netCDF module\")\n msg = \"NetCdf error importing netCDF4 module:\\n\" + str(e)\n raise Exception(msg)\n\n # open the file for writing\n try:\n self.nc = netCDF4.Dataset(self.output_filename, \"w\")\n except Exception as e:\n msg = \"error creating netcdf dataset:\\n{}\".format(str(e))\n raise Exception(msg)\n\n # write some attributes\n self.log(\"setting standard attributes\")\n\n self.nc.setncattr(\n \"Conventions\",\n \"CF-1.6, ACDD-1.3, flopy {}\".format(flopy.__version__),\n )\n self.nc.setncattr(\n \"date_created\", datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:00Z\")\n )\n self.nc.setncattr(\n \"geospatial_vertical_positive\", \"{}\".format(self.z_positive)\n )\n min_vertical = np.min(self.zs)\n max_vertical = np.max(self.zs)\n self.nc.setncattr(\"geospatial_vertical_min\", min_vertical)\n self.nc.setncattr(\"geospatial_vertical_max\", max_vertical)\n self.nc.setncattr(\"geospatial_vertical_resolution\", \"variable\")\n self.nc.setncattr(\"featureType\", \"Grid\")\n for k, v in self.global_attributes.items():\n try:\n self.nc.setncattr(k, v)\n except:\n self.logger.warn(\n \"error setting global attribute {0}\".format(k)\n )\n self.global_attributes = {}\n self.log(\"setting standard attributes\")\n\n # spatial dimensions\n self.log(\"creating dimensions\")\n # time\n if time_values is None:\n time_values = np.cumsum(self.model_time.perlen)\n self.nc.createDimension(\"time\", len(time_values))\n for name, length in zip(self.dimension_names, self.shape):\n self.nc.createDimension(name, length)\n self.log(\"creating dimensions\")\n\n self.log(\"setting CRS info\")\n # Metadata variables\n crs = self.nc.createVariable(\"crs\", \"i4\")\n crs.long_name = self.nc_crs_longname\n crs.epsg_code = self.nc_epsg_str\n crs.semi_major_axis = self.nc_semi_major\n crs.inverse_flattening = self.nc_inverse_flat\n self.log(\"setting CRS info\")\n\n attribs = {\n \"units\": \"{} since {}\".format(\n self.time_units, self.start_datetime\n ),\n \"standard_name\": \"time\",\n \"long_name\": NC_LONG_NAMES.get(\"time\", \"time\"),\n \"calendar\": \"gregorian\",\n \"_CoordinateAxisType\": \"Time\",\n }\n time = self.create_variable(\n \"time\", attribs, precision_str=\"f8\", dimensions=(\"time\",)\n )\n self.logger.warn(\"time_values:{0}\".format(str(time_values)))\n time[:] = np.asarray(time_values)\n\n # Elevation\n attribs = {\n \"units\": self.model_grid.units,\n \"standard_name\": \"elevation\",\n \"long_name\": NC_LONG_NAMES.get(\"elevation\", \"elevation\"),\n \"axis\": \"Z\",\n \"valid_min\": min_vertical,\n \"valid_max\": max_vertical,\n \"positive\": self.z_positive,\n }\n elev = self.create_variable(\n \"elevation\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names,\n )\n elev[:] = self.zs\n\n # Longitude\n attribs = {\n \"units\": \"degrees_east\",\n \"standard_name\": \"longitude\",\n \"long_name\": NC_LONG_NAMES.get(\"longitude\", \"longitude\"),\n \"axis\": \"X\",\n \"_CoordinateAxisType\": \"Lon\",\n }\n lon = self.create_variable(\n \"longitude\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n lon[:] = self.xs\n self.log(\"creating longitude var\")\n\n # Latitude\n self.log(\"creating latitude var\")\n attribs = {\n \"units\": \"degrees_north\",\n \"standard_name\": \"latitude\",\n \"long_name\": NC_LONG_NAMES.get(\"latitude\", \"latitude\"),\n \"axis\": \"Y\",\n \"_CoordinateAxisType\": \"Lat\",\n }\n lat = self.create_variable(\n \"latitude\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n lat[:] = self.ys\n\n # x\n self.log(\"creating x var\")\n attribs = {\n \"units\": self.model_grid.units,\n \"standard_name\": \"projection_x_coordinate\",\n \"long_name\": NC_LONG_NAMES.get(\"x\", \"x coordinate of projection\"),\n \"axis\": \"X\",\n }\n x = self.create_variable(\n \"x_proj\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n x[:] = self.model_grid.xyzcellcenters[0]\n\n # y\n self.log(\"creating y var\")\n attribs = {\n \"units\": self.model_grid.units,\n \"standard_name\": \"projection_y_coordinate\",\n \"long_name\": NC_LONG_NAMES.get(\"y\", \"y coordinate of projection\"),\n \"axis\": \"Y\",\n }\n y = self.create_variable(\n \"y_proj\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n y[:] = self.model_grid.xyzcellcenters[1]\n\n # grid mapping variable\n crs = flopy.utils.reference.crs(\n prj=self.model_grid.prj, epsg=self.model_grid.epsg\n )\n attribs = crs.grid_mapping_attribs\n if attribs is not None:\n self.log(\"creating grid mapping variable\")\n self.create_variable(\n attribs[\"grid_mapping_name\"], attribs, precision_str=\"f8\"\n )\n\n # layer\n self.log(\"creating layer var\")\n attribs = {\n \"units\": \"\",\n \"standard_name\": \"layer\",\n \"long_name\": NC_LONG_NAMES.get(\"layer\", \"layer\"),\n \"positive\": \"down\",\n \"axis\": \"Z\",\n }\n lay = self.create_variable(\"layer\", attribs, dimensions=(\"layer\",))\n lay[:] = np.arange(0, self.shape[0])\n self.log(\"creating layer var\")\n\n if self.model_grid.grid_type == \"structured\":\n # delc\n attribs = {\n \"units\": self.model_grid.units.strip(\"s\"),\n \"long_name\": NC_LONG_NAMES.get(\n \"delc\", \"Model grid cell spacing along a column\"\n ),\n }\n delc = self.create_variable(\"delc\", attribs, dimensions=(\"y\",))\n delc[:] = self.model_grid.delc[::-1]\n if self.model_grid.angrot != 0:\n delc.comments = (\n \"This is the row spacing that applied to the UNROTATED grid. \"\n + \"This grid HAS been rotated before being saved to NetCDF. \"\n + \"To compute the unrotated grid, use the origin point and this array.\"\n )\n\n # delr\n attribs = {\n \"units\": self.model_grid.units.strip(\"s\"),\n \"long_name\": NC_LONG_NAMES.get(\n \"delr\", \"Model grid cell spacing along a row\"\n ),\n }\n delr = self.create_variable(\"delr\", attribs, dimensions=(\"x\",))\n delr[:] = self.model_grid.delr[::-1]\n if self.model_grid.angrot != 0:\n delr.comments = (\n \"This is the col spacing that applied to the UNROTATED grid. \"\n + \"This grid HAS been rotated before being saved to NetCDF. \"\n + \"To compute the unrotated grid, use the origin point and this array.\"\n )\n # else:\n # vertices\n # attribs = {\"units\": self.model_grid.lenuni.strip('s'),\n # \"long_name\": NC_LONG_NAMES.get(\"vertices\",\n # \"List of vertices used in the model by cell\"),\n # }\n # vertices = self.create_variable('vertices', attribs, dimensions=('ncpl',))\n # vertices[:] = self.model_grid.vertices\n\n # Workaround for CF/CDM.\n # http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/\n # reference/StandardCoordinateTransforms.html\n # \"explicit_field\"\n exp = self.nc.createVariable(\"VerticalTransform\", \"S1\")\n exp.transform_name = \"explicit_field\"\n exp.existingDataField = \"elevation\"\n exp._CoordinateTransformType = \"vertical\"\n exp._CoordinateAxes = \"layer\"\n return\n\n def initialize_group(\n self,\n group=\"timeseries\",\n dimensions=(\"time\",),\n attributes=None,\n dimension_data=None,\n ):\n \"\"\"\n Method to initialize a new group within a netcdf file. This group\n can have independent dimensions from the global dimensions\n\n Parameters:\n ----------\n name : str\n name of the netcdf group\n dimensions : tuple\n data dimension names for group\n dimension_shape : tuple\n tuple of data dimension lengths\n attributes : dict\n nested dictionary of {dimension : {attributes}} for each netcdf\n group dimension\n dimension_data : dict\n dictionary of {dimension : [data]} for each netcdf group dimension\n\n \"\"\"\n if attributes is None:\n attributes = {}\n\n if dimension_data is None:\n dimension_data = {}\n\n if self.nc is None:\n self.initialize_file()\n\n if group in self.nc.groups:\n raise AttributeError(\"{} group already initialized\".format(group))\n\n self.log(\"creating netcdf group {}\".format(group))\n self.nc.createGroup(group)\n self.log(\"{} group created\".format(group))\n\n self.log(\"creating {} group dimensions\".format(group))\n for dim in dimensions:\n if dim == \"time\":\n if \"time\" not in dimension_data:\n time_values = np.cumsum(self.model_time.perlen)\n else:\n time_values = dimension_data[\"time\"]\n\n self.nc.groups[group].createDimension(dim, len(time_values))\n\n else:\n if dim not in dimension_data:\n raise AssertionError(\n \"{} information must be supplied \"\n \"to dimension data\".format(dim)\n )\n else:\n\n self.nc.groups[group].createDimension(\n dim, len(dimension_data[dim])\n )\n\n self.log(\"created {} group dimensions\".format(group))\n\n dim_names = tuple([i for i in dimensions if i != \"time\"])\n for dim in dimensions:\n if dim.lower() == \"time\":\n if \"time\" not in attributes:\n unit_value = \"{} since {}\".format(\n self.time_units, self.start_datetime\n )\n attribs = {\n \"units\": unit_value,\n \"standard_name\": \"time\",\n \"long_name\": NC_LONG_NAMES.get(\"time\", \"time\"),\n \"calendar\": \"gregorian\",\n \"Axis\": \"Y\",\n \"_CoordinateAxisType\": \"Time\",\n }\n else:\n attribs = attributes[\"time\"]\n\n time = self.create_group_variable(\n group,\n \"time\",\n attribs,\n precision_str=\"f8\",\n dimensions=(\"time\",),\n )\n\n time[:] = np.asarray(time_values)\n\n elif dim.lower() == \"zone\":\n if \"zone\" not in attributes:\n attribs = {\n \"units\": \"N/A\",\n \"standard_name\": \"zone\",\n \"long_name\": \"zonebudget zone\",\n \"Axis\": \"X\",\n \"_CoordinateAxisType\": \"Zone\",\n }\n\n else:\n attribs = attributes[\"zone\"]\n\n zone = self.create_group_variable(\n group,\n \"zone\",\n attribs,\n precision_str=\"i4\",\n dimensions=(\"zone\",),\n )\n zone[:] = np.asarray(dimension_data[\"zone\"])\n\n else:\n attribs = attributes[dim]\n var = self.create_group_variable(\n group,\n dim,\n attribs,\n precision_str=\"f8\",\n dimensions=dim_names,\n )\n var[:] = np.asarray(dimension_data[dim])\n\n @staticmethod\n def normalize_name(name):\n return name.replace(\".\", \"_\").replace(\" \", \"_\").replace(\"-\", \"_\")\n\n def create_group_variable(\n self, group, name, attributes, precision_str, dimensions=(\"time\",)\n ):\n \"\"\"\n Create a new group variable in the netcdf object\n\n Parameters\n ----------\n name : str\n the name of the variable\n attributes : dict\n attributes to add to the new variable\n precision_str : str\n netcdf-compliant string. e.g. f4\n dimensions : tuple\n which dimensions the variable applies to\n default : (\"time\",\"layer\",\"x\",\"y\")\n group : str\n which netcdf group the variable goes in\n default : None which creates the variable in root\n\n Returns\n -------\n nc variable\n\n Raises\n ------\n AssertionError if precision_str not right\n AssertionError if variable name already in netcdf object\n AssertionError if one of more dimensions do not exist\n\n \"\"\"\n name = self.normalize_name(name)\n\n if (\n name in STANDARD_VARS\n and name in self.nc.groups[group].variables.keys()\n ):\n return\n\n if name in self.nc.groups[group].variables.keys():\n if self.forgive:\n self.logger.warn(\n \"skipping duplicate {} group variable: {}\".format(\n group, name\n )\n )\n return\n else:\n raise Exception(\n \"duplicate {} group variable name: {}\".format(group, name)\n )\n\n self.log(\"creating group {} variable: {}\".format(group, name))\n\n if precision_str not in PRECISION_STRS:\n raise AssertionError(\n \"netcdf.create_variable() error: precision \"\n \"string {} not in {}\".format(precision_str, PRECISION_STRS)\n )\n\n if group not in self.nc.groups:\n raise AssertionError(\n \"netcdf group `{}` must be created before \"\n \"variables can be added to it\".format(group)\n )\n\n self.var_attr_dict[\"{}/{}\".format(group, name)] = attributes\n\n var = self.nc.groups[group].createVariable(\n name,\n precision_str,\n dimensions,\n fill_value=self.fillvalue,\n zlib=True,\n )\n\n for k, v in attributes.items():\n try:\n var.setncattr(k, v)\n except:\n self.logger.warn(\n \"error setting attribute\"\n + \"{} for group {} variable {}\".format(k, group, name)\n )\n self.log(\"creating group {} variable: {}\".format(group, name))\n\n return var\n\n def create_variable(\n self,\n name,\n attributes,\n precision_str=\"f4\",\n dimensions=(\"time\", \"layer\"),\n group=None,\n ):\n \"\"\"\n Create a new variable in the netcdf object\n\n Parameters\n ----------\n name : str\n the name of the variable\n attributes : dict\n attributes to add to the new variable\n precision_str : str\n netcdf-compliant string. e.g. f4\n dimensions : tuple\n which dimensions the variable applies to\n default : (\"time\",\"layer\",\"x\",\"y\")\n group : str\n which netcdf group the variable goes in\n default : None which creates the variable in root\n\n Returns\n -------\n nc variable\n\n Raises\n ------\n AssertionError if precision_str not right\n AssertionError if variable name already in netcdf object\n AssertionError if one of more dimensions do not exist\n\n \"\"\"\n # Normalize variable name\n name = self.normalize_name(name)\n # if this is a core var like a dimension...\n # long_name = attributes.pop(\"long_name\",name)\n if name in STANDARD_VARS and name in self.nc.variables.keys():\n return\n if (\n name not in self.var_attr_dict.keys()\n and name in self.nc.variables.keys()\n ):\n if self.forgive:\n self.logger.warn(\n \"skipping duplicate variable: {0}\".format(name)\n )\n return\n else:\n raise Exception(\"duplicate variable name: {0}\".format(name))\n if name in self.nc.variables.keys():\n raise Exception(\"duplicate variable name: {0}\".format(name))\n\n self.log(\"creating variable: \" + str(name))\n assert (\n precision_str in PRECISION_STRS\n ), \"netcdf.create_variable() error: precision string {0} not in {1}\".format(\n precision_str, PRECISION_STRS\n )\n\n if self.nc is None:\n self.initialize_file()\n\n # check that the requested dimension exists and\n # build up the chuck sizes\n # chunks = []\n # for dimension in dimensions:\n # assert self.nc.dimensions.get(dimension) is not None, \\\n # \"netcdf.create_variable() dimension not found:\" + dimension\n # chunk = self.chunks[dimension]\n # assert chunk is not None, \\\n # \"netcdf.create_variable() chunk size of {0} is None in self.chunks\". \\\n # format(dimension)\n # chunks.append(chunk)\n\n self.var_attr_dict[name] = attributes\n\n var = self.nc.createVariable(\n name,\n precision_str,\n dimensions,\n fill_value=self.fillvalue,\n zlib=True,\n ) # ,\n # chunksizes=tuple(chunks))\n for k, v in attributes.items():\n try:\n var.setncattr(k, v)\n except:\n self.logger.warn(\n \"error setting attribute\"\n + \"{0} for variable {1}\".format(k, name)\n )\n self.log(\"creating variable: \" + str(name))\n return var\n\n def add_global_attributes(self, attr_dict):\n \"\"\" add global attribute to an initialized file\n\n Parameters\n ----------\n attr_dict : dict(attribute name, attribute value)\n\n Returns\n -------\n None\n\n Raises\n ------\n Exception of self.nc is None (initialize_file()\n has not been called)\n\n \"\"\"\n if self.nc is None:\n # self.initialize_file()\n mess = (\n \"NetCDF.add_global_attributes() should only \"\n + \"be called after the file has been initialized\"\n )\n self.logger.warn(mess)\n raise Exception(mess)\n\n self.log(\"setting global attributes\")\n self.nc.setncatts(attr_dict)\n self.log(\"setting global attributes\")\n\n def add_sciencebase_metadata(self, id, check=True):\n \"\"\"Add metadata from ScienceBase using the\n flopy.export.metadata.acdd class.\n\n Returns\n -------\n metadata : flopy.export.metadata.acdd object\n \"\"\"\n md = acdd(id, model=self.model)\n if md.sb is not None:\n if check:\n self._check_vs_sciencebase(md)\n # get set of public attributes\n attr = {n for n in dir(md) if \"_\" not in n[0]}\n # skip some convenience attributes\n skip = {\n \"bounds\",\n \"creator\",\n \"sb\",\n \"xmlroot\",\n \"time_coverage\",\n \"get_sciencebase_xml_metadata\",\n \"get_sciencebase_metadata\",\n }\n towrite = sorted(list(attr.difference(skip)))\n for k in towrite:\n v = md.__getattribute__(k)\n if v is not None:\n # convert everything to strings\n if not isinstance(v, str):\n if isinstance(v, list):\n v = \",\".join(v)\n else:\n v = str(v)\n self.global_attributes[k] = v\n self.nc.setncattr(k, v)\n self.write()\n return md\n\n def _check_vs_sciencebase(self, md):\n \"\"\"Check that model bounds read from flopy are consistent with those in ScienceBase.\"\"\"\n xmin, ymin, xmax, ymax = self.bounds\n tol = 1e-5\n assert md.geospatial_lon_min - xmin < tol\n assert md.geospatial_lon_max - xmax < tol\n assert md.geospatial_lat_min - ymin < tol\n assert md.geospatial_lat_max - ymax < tol\n assert md.geospatial_vertical_min - self.vbounds[0] < tol\n assert md.geospatial_vertical_max - self.vbounds[1] < tol\n\n def get_longnames_from_docstrings(self, outfile=\"longnames.json\"):\n \"\"\"\n This is experimental.\n\n Scrape Flopy module docstrings and return docstrings for parameters\n included in the list of variables added to NetCdf object. Create\n a dictionary of longnames keyed by the NetCdf variable names; make each\n longname from the first sentence of the docstring for that parameter.\n\n One major limitation is that variables from mflists often aren't described\n in the docstrings.\n \"\"\"\n\n def startstop(ds):\n \"\"\"Get just the Parameters section of the docstring.\"\"\"\n start, stop = 0, -1\n for i, l in enumerate(ds):\n if \"Parameters\" in l and \"----\" in ds[i + 1]:\n start = i + 2\n if l.strip() in [\"Attributes\", \"Methods\", \"Returns\", \"Notes\"]:\n stop = i - 1\n break\n if i >= start and \"----\" in l:\n stop = i - 2\n break\n return start, stop\n\n def get_entries(ds):\n \"\"\"Parse docstring entries into dictionary.\"\"\"\n stuff = {}\n k = None\n for line in ds:\n if (\n len(line) >= 5\n and line[:4] == \" \" * 4\n and line[4] != \" \"\n and \":\" in line\n ):\n k = line.split(\":\")[0].strip()\n stuff[k] = \"\"\n # lines with parameter descriptions\n elif k is not None and len(line) > 10: # avoid orphans\n stuff[k] += line.strip() + \" \"\n return stuff\n\n # get a list of the flopy classes\n # packages = inspect.getmembers(flopy.modflow, inspect.isclass)\n packages = [(pp.name[0], pp) for pp in self.model.packagelist]\n # get a list of the NetCDF variables\n attr = [v.split(\"_\")[-1] for v in self.nc.variables]\n\n # parse docstrings to get long names\n longnames = {}\n for pkg in packages:\n # parse the docstring\n obj = pkg[-1]\n ds = obj.__doc__.split(\"\\n\")\n start, stop = startstop(ds)\n txt = ds[start:stop]\n if stop - start > 0:\n params = get_entries(txt)\n for k, v in params.items():\n if k in attr:\n longnames[k] = v.split(\". \")[0]\n\n # add in any variables that weren't found\n for var in attr:\n if var not in longnames.keys():\n longnames[var] = \"\"\n with open(outfile, \"w\") as output:\n json.dump(longnames, output, sort_keys=True, indent=2)\n return longnames\n",
"\"\"\"\nutil_list module. Contains the mflist class.\n This classes encapsulates modflow-style list inputs away\n from the individual packages. The end-user should not need to\n instantiate this class directly.\n\n some more info\n\n\"\"\"\nfrom __future__ import division, print_function\n\nimport os\nimport warnings\nimport numpy as np\nfrom ..datbase import DataInterface, DataListInterface, DataType\nfrom ..utils.recarray_utils import create_empty_recarray\n\ntry:\n from numpy.lib import NumpyVersion\n\n numpy114 = NumpyVersion(np.__version__) >= \"1.14.0\"\nexcept ImportError:\n numpy114 = False\n\n\nclass MfList(DataInterface, DataListInterface):\n \"\"\"\n a generic object for handling transient boundary condition lists\n\n Parameters\n ----------\n package : package object\n The package object (of type :class:`flopy.pakbase.Package`) to which\n this MfList will be added.\n data : varies\n the data of the transient list (optional). (the default is None)\n\n Attributes\n ----------\n mxact : int\n the max number of active bc for any stress period\n\n Methods\n -------\n add_record(kper,index,value) : None\n add a record to stress period kper at index location\n write_transient(f) : None\n write the transient sequence to the model input file f\n check_kij() : None\n checks for boundaries outside of model domain - issues warnings only\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n \"\"\"\n\n def __init__(\n self,\n package,\n data=None,\n dtype=None,\n model=None,\n list_free_format=None,\n binary=False,\n ):\n\n if isinstance(data, MfList):\n for attr in data.__dict__.items():\n setattr(self, attr[0], attr[1])\n if model is None:\n self._model = package.parent\n else:\n self._model = model\n self._package = package\n return\n\n self._package = package\n if model is None:\n self._model = package.parent\n else:\n self._model = model\n if dtype is None:\n assert isinstance(self.package.dtype, np.dtype)\n self.__dtype = self.package.dtype\n else:\n self.__dtype = dtype\n self.__binary = binary\n self.__vtype = {}\n self.__data = {}\n if data is not None:\n self.__cast_data(data)\n self.__df = None\n if list_free_format is None:\n if package.parent.version == \"mf2k\":\n list_free_format = False\n self.list_free_format = list_free_format\n return\n\n @property\n def name(self):\n return self.package.name\n\n @property\n def mg(self):\n return self._model.modelgrid\n\n @property\n def sr(self):\n return self.mg.sr\n\n @property\n def model(self):\n return self._model\n\n @property\n def package(self):\n return self._package\n\n @property\n def data_type(self):\n return DataType.transientlist\n\n @property\n def plotable(self):\n return True\n\n def get_empty(self, ncell=0):\n d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)\n return d\n\n def export(self, f, **kwargs):\n from flopy import export\n\n return export.utils.mflist_export(f, self, **kwargs)\n\n def append(self, other):\n \"\"\" append the recarrays from one MfList to another\n Parameters\n ----------\n other: variable: an item that can be cast in to an MfList\n that corresponds with self\n Returns\n -------\n dict of {kper:recarray}\n \"\"\"\n if not isinstance(other, MfList):\n other = MfList(\n self.package,\n data=other,\n dtype=self.dtype,\n model=self._model,\n list_free_format=self.list_free_format,\n )\n msg = (\n \"MfList.append(): other arg must be \"\n + \"MfList or dict, not {0}\".format(type(other))\n )\n assert isinstance(other, MfList), msg\n\n other_kpers = list(other.data.keys())\n other_kpers.sort()\n\n self_kpers = list(self.data.keys())\n self_kpers.sort()\n\n new_dict = {}\n for kper in range(self._model.nper):\n other_data = other[kper].copy()\n self_data = self[kper].copy()\n\n other_len = other_data.shape[0]\n self_len = self_data.shape[0]\n\n if (other_len == 0 and self_len == 0) or (\n kper not in self_kpers and kper not in other_kpers\n ):\n continue\n elif self_len == 0:\n new_dict[kper] = other_data\n elif other_len == 0:\n new_dict[kper] = self_data\n else:\n new_len = other_data.shape[0] + self_data.shape[0]\n new_data = np.recarray(new_len, dtype=self.dtype)\n new_data[:self_len] = self_data\n new_data[self_len : self_len + other_len] = other_data\n new_dict[kper] = new_data\n\n return new_dict\n\n def drop(self, fields):\n \"\"\"drop fields from an MfList\n\n Parameters\n ----------\n fields : list or set of field names to drop\n\n Returns\n -------\n dropped : MfList without the dropped fields\n \"\"\"\n if not isinstance(fields, list):\n fields = [fields]\n names = [n for n in self.dtype.names if n not in fields]\n dtype = np.dtype(\n [(k, d) for k, d in self.dtype.descr if k not in fields]\n )\n spd = {}\n for k, v in self.data.items():\n # because np 1.9 doesn't support indexing by list of columns\n newarr = np.array([self.data[k][n] for n in names]).transpose()\n newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(\n np.recarray\n )\n for n in dtype.names:\n newarr[n] = self.data[k][n]\n spd[k] = newarr\n return MfList(self.package, spd, dtype=dtype)\n\n @property\n def data(self):\n return self.__data\n\n @property\n def df(self):\n if self.__df is None:\n self.__df = self.get_dataframe()\n return self.__df\n\n @property\n def vtype(self):\n return self.__vtype\n\n @property\n def dtype(self):\n return self.__dtype\n\n # Get the itmp for a given kper\n def get_itmp(self, kper):\n if kper not in list(self.__data.keys()):\n return None\n if self.__vtype[kper] is None:\n return -1\n # If an external file, have to load it\n if self.__vtype[kper] == str:\n return self.__fromfile(self.__data[kper]).shape[0]\n if self.__vtype[kper] == np.recarray:\n return self.__data[kper].shape[0]\n # If not any of the above, it must be an int\n return self.__data[kper]\n\n @property\n def mxact(self):\n mxact = 0\n for kper in list(self.__data.keys()):\n mxact = max(mxact, self.get_itmp(kper))\n return mxact\n\n @property\n def fmt_string(self):\n \"\"\"Returns a C-style fmt string for numpy savetxt that corresponds to\n the dtype\"\"\"\n if self.list_free_format is not None:\n use_free = self.list_free_format\n else:\n use_free = True\n if self.package.parent.has_package(\"bas6\"):\n use_free = self.package.parent.bas6.ifrefm\n # mt3d list data is fixed format\n if \"mt3d\" in self.package.parent.version.lower():\n use_free = False\n fmts = []\n for field in self.dtype.descr:\n vtype = field[1][1].lower()\n if vtype in (\"i\", \"b\"):\n if use_free:\n fmts.append(\"%9d\")\n else:\n fmts.append(\"%10d\")\n elif vtype == \"f\":\n if use_free:\n if numpy114:\n # Use numpy's floating-point formatter (Dragon4)\n fmts.append(\"%15s\")\n else:\n fmts.append(\"%15.7E\")\n else:\n fmts.append(\"%10G\")\n elif vtype == \"o\":\n if use_free:\n fmts.append(\"%9s\")\n else:\n fmts.append(\"%10s\")\n elif vtype == \"s\":\n msg = (\n \"MfList.fmt_string error: 'str' type found in dtype. \"\n \"This gives unpredictable results when \"\n \"recarray to file - change to 'object' type\"\n )\n raise TypeError(msg)\n else:\n raise TypeError(\n \"MfList.fmt_string error: unknown vtype in \"\n \"field: {}\".format(field)\n )\n if use_free:\n fmt_string = \" \" + \" \".join(fmts)\n else:\n fmt_string = \"\".join(fmts)\n return fmt_string\n\n # Private method to cast the data argument\n # Should only be called by the constructor\n def __cast_data(self, data):\n # If data is a list, then all we can do is try to cast it to\n # an ndarray, then cast again to a recarray\n if isinstance(data, list):\n # warnings.warn(\"MfList casting list to array\")\n try:\n data = np.array(data)\n except Exception as e:\n raise Exception(\n \"MfList error: casting list to ndarray: \" + str(e)\n )\n\n # If data is a dict, the we have to assume it is keyed on kper\n if isinstance(data, dict):\n if not list(data.keys()):\n raise Exception(\"MfList error: data dict is empty\")\n for kper, d in data.items():\n try:\n kper = int(kper)\n except Exception as e:\n raise Exception(\n \"MfList error: data dict key \"\n + \"{0:s} not integer: \".format(kper)\n + str(type(kper))\n + \"\\n\"\n + str(e)\n )\n # Same as before, just try...\n if isinstance(d, list):\n # warnings.warn(\"MfList: casting list to array at \" +\\\n # \"kper {0:d}\".format(kper))\n try:\n d = np.array(d)\n except Exception as e:\n raise Exception(\n \"MfList error: casting list \"\n + \"to ndarray: \"\n + str(e)\n )\n\n # super hack - sick of recarrays already\n # if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1):\n # d = d.view(np.recarray)\n\n if isinstance(d, np.recarray):\n self.__cast_recarray(kper, d)\n elif isinstance(d, np.ndarray):\n self.__cast_ndarray(kper, d)\n elif isinstance(d, int):\n self.__cast_int(kper, d)\n elif isinstance(d, str):\n self.__cast_str(kper, d)\n elif d is None:\n self.__data[kper] = -1\n self.__vtype[kper] = None\n else:\n raise Exception(\n \"MfList error: unsupported data type: \"\n + str(type(d))\n + \" at kper \"\n + \"{0:d}\".format(kper)\n )\n\n # A single recarray - same MfList for all stress periods\n elif isinstance(data, np.recarray):\n self.__cast_recarray(0, data)\n # A single ndarray\n elif isinstance(data, np.ndarray):\n self.__cast_ndarray(0, data)\n # A single filename\n elif isinstance(data, str):\n self.__cast_str(0, data)\n else:\n raise Exception(\n \"MfList error: unsupported data type: \" + str(type(data))\n )\n\n def __cast_str(self, kper, d):\n # If d is a string, assume it is a filename and check that it exists\n assert os.path.exists(d), (\n \"MfList error: dict filename (string) '\"\n + d\n + \"' value for \"\n + \"kper {0:d} not found\".format(kper)\n )\n self.__data[kper] = d\n self.__vtype[kper] = str\n\n def __cast_int(self, kper, d):\n # If d is an integer, then it must be 0 or -1\n if d > 0:\n raise Exception(\n \"MfList error: dict integer value for \"\n \"kper {0:10d} must be 0 or -1, \"\n \"not {1:10d}\".format(kper, d)\n )\n if d == 0:\n self.__data[kper] = 0\n self.__vtype[kper] = None\n else:\n self.__data[kper] = -1\n self.__vtype[kper] = None\n\n def __cast_recarray(self, kper, d):\n assert d.dtype == self.__dtype, (\n \"MfList error: recarray dtype: \"\n + str(d.dtype)\n + \" doesn't match \"\n + \"self dtype: \"\n + str(self.dtype)\n )\n self.__data[kper] = d\n self.__vtype[kper] = np.recarray\n\n def __cast_ndarray(self, kper, d):\n d = np.atleast_2d(d)\n if d.dtype != self.__dtype:\n assert d.shape[1] == len(self.dtype), (\n \"MfList error: ndarray \"\n + \"shape \"\n + str(d.shape)\n + \" doesn't match dtype \"\n + \"len: \"\n + str(len(self.dtype))\n )\n # warnings.warn(\"MfList: ndarray dtype does not match self \" +\\\n # \"dtype, trying to cast\")\n try:\n self.__data[kper] = np.core.records.fromarrays(\n d.transpose(), dtype=self.dtype\n )\n except Exception as e:\n raise Exception(\n \"MfList error: casting ndarray to recarray: \" + str(e)\n )\n self.__vtype[kper] = np.recarray\n\n def get_dataframe(self, squeeze=True):\n \"\"\"\n Cast recarrays for stress periods into single\n dataframe containing all stress periods.\n\n Parameters\n ----------\n squeeze : bool\n Reduce number of columns in dataframe to only include\n stress periods where a variable changes.\n\n Returns\n -------\n df : dataframe\n Dataframe of shape nrow = ncells, ncol = nvar x nper. If\n the squeeze option is chosen, nper is the number of\n stress periods where at least one cells is different,\n otherwise it is equal to the number of keys in MfList.data.\n\n Notes\n -----\n Requires pandas.\n\n \"\"\"\n try:\n import pandas as pd\n except Exception as e:\n msg = \"MfList.get_dataframe() requires pandas\"\n raise ImportError(msg)\n\n # make a dataframe of all data for all stress periods\n names = [\"k\", \"i\", \"j\"]\n if \"MNW2\" in self.package.name:\n names += [\"wellid\"]\n\n # find relevant variable names\n # may have to iterate over the first stress period\n for per in range(self._model.nper):\n if hasattr(self.data[per], \"dtype\"):\n varnames = list(\n [n for n in self.data[per].dtype.names if n not in names]\n )\n break\n\n # create list of dataframes for each stress period\n # each with index of k, i, j\n dfs = []\n for per in self.data.keys():\n recs = self.data[per]\n if recs is None or len(recs) == 0:\n # add an empty dataframe if a stress period is\n # empty (e.g. no pumping during a predevelopment\n # period)\n columns = names + list(\n [\"{}{}\".format(c, per) for c in varnames]\n )\n dfi = pd.DataFrame(data=None, columns=columns)\n dfi = dfi.set_index(names)\n else:\n dfi = pd.DataFrame.from_records(recs)\n dfg = dfi.groupby(names)\n count = dfg[varnames[0]].count().rename(\"n\")\n if (count > 1).values.any():\n print(\n \"Duplicated list entry locations aggregated \"\n \"for kper {}\".format(per)\n )\n for kij in count[count > 1].index.values:\n print(\" (k,i,j) {}\".format(kij))\n dfi = dfg.sum() # aggregate\n dfi.columns = list([\"{}{}\".format(c, per) for c in varnames])\n dfs.append(dfi)\n df = pd.concat(dfs, axis=1)\n if squeeze:\n keep = []\n for var in varnames:\n diffcols = list([n for n in df.columns if var in n])\n diff = df[diffcols].fillna(0).diff(axis=1)\n diff[\n \"{}0\".format(var)\n ] = 1 # always return the first stress period\n changed = diff.sum(axis=0) != 0\n keep.append(df.loc[:, changed.index[changed]])\n df = pd.concat(keep, axis=1)\n df = df.reset_index()\n df.insert(len(names), \"node\", df.i * self._model.ncol + df.j)\n return df\n\n def add_record(self, kper, index, values):\n # Add a record to possible already set list for a given kper\n # index is a list of k,i,j or nodes.\n # values is a list of floats.\n # The length of index + values must be equal to the number of names\n # in dtype\n assert len(index) + len(values) == len(self.dtype), (\n \"MfList.add_record() error: length of index arg +\"\n + \"length of value arg != length of self dtype\"\n )\n # If we already have something for this kper, then add to it\n if kper in list(self.__data.keys()):\n if self.vtype[kper] == int:\n # If a 0 or -1, reset\n self.__data[kper] = self.get_empty(1)\n self.__vtype[kper] = np.recarray\n elif self.vtype[kper] == str:\n # If filename, load into recarray\n d = self.__fromfile(self.data[kper])\n d.resize(d.shape[0], d.shape[1])\n self.__data[kper] = d\n self.__vtype[kper] = np.recarray\n elif self.vtype[kper] == np.recarray:\n # Extend the recarray\n self.__data[kper] = np.append(\n self.__data[kper], self.get_empty(1)\n )\n else:\n self.__data[kper] = self.get_empty(1)\n self.__vtype[kper] = np.recarray\n rec = list(index)\n rec.extend(list(values))\n try:\n self.__data[kper][-1] = tuple(rec)\n except Exception as e:\n raise Exception(\n \"MfList.add_record() error: adding record to \"\n + \"recarray: \"\n + str(e)\n )\n\n def __getitem__(self, kper):\n # Get the recarray for a given kper\n # If the data entry for kper is a string,\n # return the corresponding recarray,\n # but don't reset the value in the data dict\n # assert kper in list(self.data.keys()), \"MfList.__getitem__() kper \" + \\\n # str(kper) + \" not in data.keys()\"\n try:\n kper = int(kper)\n except Exception as e:\n raise Exception(\n \"MfList error: _getitem__() passed invalid kper index:\"\n + str(kper)\n )\n if kper not in list(self.data.keys()):\n if kper == 0:\n return self.get_empty()\n else:\n return self.data[self.__find_last_kper(kper)]\n if self.vtype[kper] == int:\n if self.data[kper] == 0:\n return self.get_empty()\n else:\n return self.data[self.__find_last_kper(kper)]\n if self.vtype[kper] == str:\n return self.__fromfile(self.data[kper])\n if self.vtype[kper] == np.recarray:\n return self.data[kper]\n\n def __setitem__(self, kper, data):\n if kper in list(self.__data.keys()):\n if self._model.verbose:\n print(\"removing existing data for kper={}\".format(kper))\n self.data.pop(kper)\n # If data is a list, then all we can do is try to cast it to\n # an ndarray, then cast again to a recarray\n if isinstance(data, list):\n # warnings.warn(\"MfList casting list to array\")\n try:\n data = np.array(data)\n except Exception as e:\n raise Exception(\n \"MfList error: casting list to ndarray: \" + str(e)\n )\n # cast data\n if isinstance(data, int):\n self.__cast_int(kper, data)\n elif isinstance(data, np.recarray):\n self.__cast_recarray(kper, data)\n # A single ndarray\n elif isinstance(data, np.ndarray):\n self.__cast_ndarray(kper, data)\n # A single filename\n elif isinstance(data, str):\n self.__cast_str(kper, data)\n else:\n raise Exception(\n \"MfList error: unsupported data type: \" + str(type(data))\n )\n\n # raise NotImplementedError(\"MfList.__setitem__() not implemented\")\n\n def __fromfile(self, f):\n # d = np.fromfile(f,dtype=self.dtype,count=count)\n try:\n d = np.genfromtxt(f, dtype=self.dtype)\n except Exception as e:\n raise Exception(\n \"MfList.__fromfile() error reading recarray \"\n + \"from file \"\n + str(e)\n )\n return d\n\n def get_filenames(self):\n kpers = list(self.data.keys())\n kpers.sort()\n filenames = []\n first = kpers[0]\n for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):\n # Fill missing early kpers with 0\n if kper < first:\n itmp = 0\n kper_vtype = int\n elif kper in kpers:\n kper_vtype = self.__vtype[kper]\n\n if (\n self._model.array_free_format\n and self._model.external_path is not None\n ):\n # py_filepath = ''\n # py_filepath = os.path.join(py_filepath,\n # self._model.external_path)\n filename = self.package.name[0] + \"_{0:04d}.dat\".format(kper)\n filenames.append(filename)\n return filenames\n\n def get_filename(self, kper):\n ext = \"dat\"\n if self.binary:\n ext = \"bin\"\n return self.package.name[0] + \"_{0:04d}.{1}\".format(kper, ext)\n\n @property\n def binary(self):\n return bool(self.__binary)\n\n def write_transient(self, f, single_per=None, forceInternal=False):\n # forceInternal overrides isExternal (set below) for cases where\n # external arrays are not supported (oh hello MNW1!)\n # write the transient sequence described by the data dict\n nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()\n assert hasattr(f, \"read\"), (\n \"MfList.write() error: \" + \"f argument must be a file handle\"\n )\n kpers = list(self.data.keys())\n kpers.sort()\n first = kpers[0]\n if single_per is None:\n loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))\n else:\n if not isinstance(single_per, list):\n single_per = [single_per]\n loop_over_kpers = single_per\n\n for kper in loop_over_kpers:\n # Fill missing early kpers with 0\n if kper < first:\n itmp = 0\n kper_vtype = int\n elif kper in kpers:\n kper_data = self.__data[kper]\n kper_vtype = self.__vtype[kper]\n if kper_vtype == str:\n if not self._model.array_free_format:\n kper_data = self.__fromfile(kper_data)\n kper_vtype = np.recarray\n itmp = self.get_itmp(kper)\n if kper_vtype == np.recarray:\n itmp = kper_data.shape[0]\n elif (kper_vtype == int) or (kper_vtype is None):\n itmp = kper_data\n # Fill late missing kpers with -1\n else:\n itmp = -1\n kper_vtype = int\n\n f.write(\n \" {0:9d} {1:9d} # stress period {2:d}\\n\".format(\n itmp, 0, kper + 1\n )\n )\n\n isExternal = False\n if (\n self._model.array_free_format\n and self._model.external_path is not None\n and forceInternal is False\n ):\n isExternal = True\n if self.__binary:\n isExternal = True\n if isExternal:\n if kper_vtype == np.recarray:\n py_filepath = \"\"\n if self._model.model_ws is not None:\n py_filepath = self._model.model_ws\n if self._model.external_path is not None:\n py_filepath = os.path.join(\n py_filepath, self._model.external_path\n )\n filename = self.get_filename(kper)\n py_filepath = os.path.join(py_filepath, filename)\n model_filepath = filename\n if self._model.external_path is not None:\n model_filepath = os.path.join(\n self._model.external_path, filename\n )\n self.__tofile(py_filepath, kper_data)\n kper_vtype = str\n kper_data = model_filepath\n\n if kper_vtype == np.recarray:\n name = f.name\n if self.__binary or not numpy114:\n f.close()\n # switch file append mode to binary\n with open(name, \"ab+\") as f:\n self.__tofile(f, kper_data)\n # continue back to non-binary\n f = open(name, \"a\")\n else:\n self.__tofile(f, kper_data)\n elif kper_vtype == str:\n f.write(\" open/close \" + kper_data)\n if self.__binary:\n f.write(\" (BINARY)\")\n f.write(\"\\n\")\n\n def __tofile(self, f, data):\n # Write the recarray (data) to the file (or file handle) f\n assert isinstance(data, np.recarray), (\n \"MfList.__tofile() data arg \" + \"not a recarray\"\n )\n\n # Add one to the kij indices\n lnames = [name.lower() for name in self.dtype.names]\n # --make copy of data for multiple calls\n d = data.copy()\n for idx in [\"k\", \"i\", \"j\", \"node\"]:\n if idx in lnames:\n d[idx] += 1\n if self.__binary:\n dtype2 = []\n for name in self.dtype.names:\n dtype2.append((name, np.float32))\n dtype2 = np.dtype(dtype2)\n d = np.array(d, dtype=dtype2)\n d.tofile(f)\n else:\n np.savetxt(f, d, fmt=self.fmt_string, delimiter=\"\")\n\n def check_kij(self):\n names = self.dtype.names\n if (\"k\" not in names) or (\"i\" not in names) or (\"j\" not in names):\n warnings.warn(\n \"MfList.check_kij(): index fieldnames 'k,i,j' \"\n + \"not found in self.dtype names: \"\n + str(names)\n )\n return\n nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()\n if nl == 0:\n warnings.warn(\n \"MfList.check_kij(): unable to get dis info from \" + \"model\"\n )\n return\n for kper in list(self.data.keys()):\n out_idx = []\n data = self[kper]\n if data is not None:\n k = data[\"k\"]\n k_idx = np.where(np.logical_or(k < 0, k >= nl))\n if k_idx[0].shape[0] > 0:\n out_idx.extend(list(k_idx[0]))\n i = data[\"i\"]\n i_idx = np.where(np.logical_or(i < 0, i >= nr))\n if i_idx[0].shape[0] > 0:\n out_idx.extend(list(i_idx[0]))\n j = data[\"j\"]\n j_idx = np.where(np.logical_or(j < 0, j >= nc))\n if j_idx[0].shape[0]:\n out_idx.extend(list(j_idx[0]))\n\n if len(out_idx) > 0:\n warn_str = (\n \"MfList.check_kij(): warning the following \"\n + \"indices are out of bounds in kper \"\n + str(kper)\n + \":\\n\"\n )\n for idx in out_idx:\n d = data[idx]\n warn_str += \" {0:9d} {1:9d} {2:9d}\\n\".format(\n d[\"k\"] + 1, d[\"i\"] + 1, d[\"j\"] + 1\n )\n warnings.warn(warn_str)\n\n def __find_last_kper(self, kper):\n kpers = list(self.data.keys())\n kpers.sort()\n last = 0\n for kkper in kpers[::-1]:\n # if this entry is valid\n if self.vtype[kkper] != int or self.data[kkper] != -1:\n last = kkper\n if kkper <= kper:\n break\n return kkper\n\n def get_indices(self):\n \"\"\"\n a helper function for plotting - get all unique indices\n \"\"\"\n names = self.dtype.names\n lnames = []\n [lnames.append(name.lower()) for name in names]\n if \"k\" not in lnames or \"j\" not in lnames:\n raise NotImplementedError(\"MfList.get_indices requires kij\")\n kpers = list(self.data.keys())\n kpers.sort()\n indices = []\n for i, kper in enumerate(kpers):\n kper_vtype = self.__vtype[kper]\n if (kper_vtype != int) or (kper_vtype is not None):\n d = self.data[kper]\n if not indices:\n indices = list(zip(d[\"k\"], d[\"i\"], d[\"j\"]))\n else:\n new_indices = list(zip(d[\"k\"], d[\"i\"], d[\"j\"]))\n for ni in new_indices:\n if ni not in indices:\n indices.append(ni)\n return indices\n\n def attribute_by_kper(self, attr, function=np.mean, idx_val=None):\n assert attr in self.dtype.names\n if idx_val is not None:\n assert idx_val[0] in self.dtype.names\n kpers = list(self.data.keys())\n kpers.sort()\n values = []\n for kper in range(0, max(self._model.nper, max(kpers))):\n\n if kper < min(kpers):\n values.append(0)\n elif kper > max(kpers) or kper not in kpers:\n values.append(values[-1])\n else:\n kper_data = self.__data[kper]\n if idx_val is not None:\n kper_data = kper_data[\n np.where(kper_data[idx_val[0]] == idx_val[1])\n ]\n # kper_vtype = self.__vtype[kper]\n v = function(kper_data[attr])\n values.append(v)\n return values\n\n def plot(\n self,\n key=None,\n names=None,\n kper=0,\n filename_base=None,\n file_extension=None,\n mflay=None,\n **kwargs\n ):\n \"\"\"\n Plot stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n key : str\n MfList dictionary key. (default is None)\n names : list\n List of names for figure titles. (default is None)\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.stress_period_data.plot(ml.wel, kper=1)\n\n \"\"\"\n\n from flopy.plot import PlotUtilities\n\n axes = PlotUtilities._plot_mflist_helper(\n self,\n key=key,\n names=names,\n kper=kper,\n filename_base=filename_base,\n file_extension=file_extension,\n mflay=mflay,\n **kwargs\n )\n\n return axes\n\n def to_shapefile(self, filename, kper=None):\n \"\"\"\n Export stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n filename : str\n Shapefile name to write\n kper : int\n MODFLOW zero-based stress period number to return. (default is None)\n\n Returns\n ----------\n None\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.to_shapefile('test_hk.shp', kper=1)\n \"\"\"\n import warnings\n\n warnings.warn(\n \"Deprecation warning: to_shapefile() is deprecated. use .export()\"\n )\n\n # if self.sr is None:\n # raise Exception(\"MfList.to_shapefile: SpatialReference not set\")\n # import flopy.utils.flopy_io as fio\n # if kper is None:\n # keys = self.data.keys()\n # keys.sort()\n # else:\n # keys = [kper]\n # array_dict = {}\n # for kk in keys:\n # arrays = self.to_array(kk)\n # for name, array in arrays.items():\n # for k in range(array.shape[0]):\n # #aname = name+\"{0:03d}_{1:02d}\".format(kk, k)\n # n = fio.shape_attr_name(name, length=4)\n # aname = \"{}{:03d}{:03d}\".format(n, k+1, int(kk)+1)\n # array_dict[aname] = array[k]\n # fio.write_grid_shapefile(filename, self.sr, array_dict)\n self.export(filename, kper=kper)\n\n def to_array(self, kper=0, mask=False):\n \"\"\"\n Convert stress period boundary condition (MfList) data for a\n specified stress period to a 3-D numpy array\n\n Parameters\n ----------\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n mask : boolean\n return array with np.NaN instead of zero\n Returns\n ----------\n out : dict of numpy.ndarrays\n Dictionary of 3-D numpy arrays containing the stress period data for\n a selected stress period. The dictionary keys are the MfList dtype\n names for the stress period data ('cond', 'flux', 'bhead', etc.).\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> v = ml.wel.stress_period_data.to_array(kper=1)\n\n \"\"\"\n i0 = 3\n unstructured = False\n if \"inode\" in self.dtype.names:\n raise NotImplementedError()\n\n if \"node\" in self.dtype.names:\n if \"i\" not in self.dtype.names and \"j\" not in self.dtype.names:\n i0 = 1\n unstructured = True\n\n arrays = {}\n for name in self.dtype.names[i0:]:\n if not self.dtype.fields[name][0] == object:\n if unstructured:\n arr = np.zeros((self._model.nlay * self._model.ncpl,))\n else:\n arr = np.zeros(\n (self._model.nlay, self._model.nrow, self._model.ncol)\n )\n arrays[name] = arr.copy()\n\n # if this kper is not found\n if kper not in self.data.keys():\n kpers = list(self.data.keys())\n kpers.sort()\n # if this kper is before the first entry,\n # (maybe) mask and return\n if kper < kpers[0]:\n if mask:\n for name, arr in arrays.items():\n arrays[name][:] = np.NaN\n return arrays\n # find the last kper\n else:\n kper = self.__find_last_kper(kper)\n\n sarr = self.data[kper]\n\n if np.isscalar(sarr):\n # if there are no entries for this kper\n if sarr == 0:\n if mask:\n for name, arr in arrays.items():\n arrays[name][:] = np.NaN\n return arrays\n else:\n raise Exception(\"MfList: something bad happened\")\n\n for name, arr in arrays.items():\n if unstructured:\n cnt = np.zeros(\n (self._model.nlay * self._model.ncpl,), dtype=np.float\n )\n else:\n cnt = np.zeros(\n (self._model.nlay, self._model.nrow, self._model.ncol),\n dtype=np.float,\n )\n # print(name,kper)\n for rec in sarr:\n if unstructured:\n arr[rec[\"node\"]] += rec[name]\n cnt[rec[\"node\"]] += 1.0\n else:\n arr[rec[\"k\"], rec[\"i\"], rec[\"j\"]] += rec[name]\n cnt[rec[\"k\"], rec[\"i\"], rec[\"j\"]] += 1.0\n # average keys that should not be added\n if name not in (\"cond\", \"flux\"):\n idx = cnt > 0.0\n arr[idx] /= cnt[idx]\n if mask:\n arr = np.ma.masked_where(cnt == 0.0, arr)\n arr[cnt == 0.0] = np.NaN\n\n arrays[name] = arr.copy()\n # elif mask:\n # for name, arr in arrays.items():\n # arrays[name][:] = np.NaN\n return arrays\n\n @property\n def masked_4D_arrays(self):\n # get the first kper\n arrays = self.to_array(kper=0, mask=True)\n\n # initialize these big arrays\n m4ds = {}\n for name, array in arrays.items():\n m4d = np.zeros(\n (\n self._model.nper,\n self._model.nlay,\n self._model.nrow,\n self._model.ncol,\n )\n )\n m4d[0, :, :, :] = array\n m4ds[name] = m4d\n for kper in range(1, self._model.nper):\n arrays = self.to_array(kper=kper, mask=True)\n for name, array in arrays.items():\n m4ds[name][kper, :, :, :] = array\n return m4ds\n\n def masked_4D_arrays_itr(self):\n # get the first kper\n arrays = self.to_array(kper=0, mask=True)\n\n # initialize these big arrays\n for name, array in arrays.items():\n m4d = np.zeros(\n (\n self._model.nper,\n self._model.nlay,\n self._model.nrow,\n self._model.ncol,\n )\n )\n m4d[0, :, :, :] = array\n for kper in range(1, self._model.nper):\n arrays = self.to_array(kper=kper, mask=True)\n for tname, array in arrays.items():\n if tname == name:\n m4d[kper, :, :, :] = array\n yield name, m4d\n\n @property\n def array(self):\n return self.masked_4D_arrays\n\n @classmethod\n def from_4d(cls, model, pak_name, m4ds):\n \"\"\"construct an MfList instance from a dict of\n (attribute_name,masked 4D ndarray\n Parameters\n ----------\n model : mbase derived type\n pak_name : str package name (e.g GHB)\n m4ds : {attribute name:4d masked numpy.ndarray}\n Returns\n -------\n MfList instance\n \"\"\"\n sp_data = MfList.masked4D_arrays_to_stress_period_data(\n model.get_package(pak_name).get_default_dtype(), m4ds\n )\n return cls(model.get_package(pak_name), data=sp_data)\n\n @staticmethod\n def masked4D_arrays_to_stress_period_data(dtype, m4ds):\n \"\"\" convert a dictionary of 4-dim masked arrays to\n a stress_period_data style dict of recarray\n Parameters\n ----------\n dtype : numpy dtype\n\n m4ds : dict {name:masked numpy 4-dim ndarray}\n Returns\n -------\n dict {kper:recarray}\n \"\"\"\n assert isinstance(m4ds, dict)\n for name, m4d in m4ds.items():\n assert isinstance(m4d, np.ndarray)\n assert name in dtype.names\n assert m4d.ndim == 4\n keys = list(m4ds.keys())\n\n for i1, key1 in enumerate(keys):\n a1 = np.isnan(m4ds[key1])\n for i2, key2 in enumerate(keys[i1:]):\n a2 = np.isnan(m4ds[key2])\n if not np.array_equal(a1, a2):\n raise Exception(\n \"Transient2d error: masking not equal\"\n + \" for {0} and {1}\".format(key1, key2)\n )\n\n sp_data = {}\n for kper in range(m4d.shape[0]):\n vals = {}\n for name, m4d in m4ds.items():\n arr = m4d[kper, :, :, :]\n isnan = np.argwhere(~np.isnan(arr))\n v = []\n for k, i, j in isnan:\n v.append(arr[k, i, j])\n vals[name] = v\n kk = isnan[:, 0]\n ii = isnan[:, 1]\n jj = isnan[:, 2]\n\n spd = np.recarray(shape=isnan.shape[0], dtype=dtype)\n spd[\"i\"] = ii\n spd[\"k\"] = kk\n spd[\"j\"] = jj\n for n, v in vals.items():\n spd[n] = v\n sp_data[kper] = spd\n return sp_data\n"
] | [
[
"numpy.nanmax",
"numpy.min",
"numpy.asarray",
"numpy.arange",
"numpy.isnan",
"numpy.nanmin",
"numpy.cumsum",
"numpy.nan_to_num",
"numpy.max",
"numpy.zeros_like",
"numpy.isscalar",
"numpy.errstate",
"numpy.array",
"numpy.where"
],
[
"pandas.concat",
"numpy.array_equal",
"numpy.isnan",
"numpy.ma.masked_where",
"numpy.dtype",
"numpy.genfromtxt",
"pandas.DataFrame",
"numpy.atleast_2d",
"pandas.DataFrame.from_records",
"numpy.logical_or",
"numpy.isscalar",
"numpy.savetxt",
"numpy.lib.NumpyVersion",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.recarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Karol-G/nnUNet | [
"a30bdbd64254c94c515ee03617173eb217eea505",
"a30bdbd64254c94c515ee03617173eb217eea505",
"a30bdbd64254c94c515ee03617173eb217eea505"
] | [
"i3Deep/merge_labels.py",
"nnunet/training/loss_functions/dice_loss.py",
"nnunet/network_architecture/P_Net.py"
] | [
"import numpy as np\r\nfrom i3Deep import utils\r\nfrom tqdm import tqdm\r\nimport os\r\n\r\n\r\n# name = \"KGU-53317EB91645\"\r\n# load_mask = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\" + name + \"/mask.nii.gz\"\r\n# load_label_table = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\" + name + \"/label_table.txt\"\r\n# save_mask = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\" + name + \"/mask2.nii.gz\"\r\nload_path = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\"\r\n\r\n\r\ndef rename(case_path):\r\n filenames = utils.load_filenames(case_path + \"/\", extensions=None)\r\n for filename in filenames:\r\n name = os.path.basename(filename)\r\n if \"label\" in name and \".nii.gz\" in name:\r\n os.rename(filename, case_path + \"/mask.nii.gz\")\r\n elif \".txt\" in name:\r\n os.rename(filename, case_path + \"/label_table.txt\")\r\n elif \".nii.gz\" in name:\r\n os.rename(filename, case_path + \"/image.nii.gz\")\r\n\r\n\r\ndef get_labels(load_label_table):\r\n with open(load_label_table) as f:\r\n label_table = f.readlines()\r\n label_table = np.asarray(label_table)\r\n\r\n ggo = []\r\n cons = []\r\n pe = []\r\n for line in label_table:\r\n label = line.split()[0]\r\n if label.isnumeric():\r\n if \"Background\" in line or \"background\" in line:\r\n continue\r\n infection = line.split(\"_\")[1]\r\n keywords = [\"ggo\", \"gg\"]\r\n if any(x in infection.lower() for x in keywords):\r\n ggo.append(int(label))\r\n keywords = [\"cons\", \"cns\", \"con\", \"cos\", \"co\"]\r\n if any(x in infection.lower() for x in keywords):\r\n cons.append(int(label))\r\n keywords = [\"pe\", \"pes\"]\r\n if any(x in infection.lower() for x in keywords):\r\n pe.append(int(label))\r\n return ggo, cons, pe\r\n\r\n\r\ndef merge_labels(load_mask, save_mask, load_label_table):\r\n mask, affine, spacing, header = utils.load_nifty(load_mask)\r\n mask = mask.astype(int)\r\n ggo, cons, pe = get_labels(load_label_table)\r\n\r\n for label in tqdm(np.concatenate((ggo, cons, pe), axis=0), disable=True):\r\n mask[mask == label] = -label\r\n\r\n for label in tqdm(ggo, disable=True):\r\n mask[mask == -label] = 1\r\n\r\n for label in tqdm(cons, disable=True):\r\n mask[mask == -label] = 2\r\n\r\n for label in tqdm(pe, disable=True):\r\n mask[mask == -label] = 3\r\n\r\n mask = np.rint(mask)\r\n mask = mask.astype(int)\r\n\r\n utils.save_nifty(save_mask, mask, affine, spacing, header)\r\n\r\ndef round_mask(filename):\r\n mask, affine, spacing, header = utils.load_nifty(filename)\r\n mask = np.rint(mask)\r\n mask = mask.astype(int)\r\n utils.save_nifty(filename, mask, affine, spacing, header)\r\n\r\ndef tmp2(filename):\r\n mask, affine, spacing, header = utils.load_nifty(filename)\r\n print(mask[46-1][155-1][116-1])\r\n\r\n\r\nif __name__ == '__main__':\r\n # filenames = utils.load_filenames(load_path, extensions=None)\r\n # for filename in tqdm(filenames):\r\n # if os.path.isfile(filename + \"/mask2.nii.gz\"):\r\n # continue\r\n # rename(filename)\r\n # load_mask = filename + \"/mask.nii.gz\"\r\n # save_mask = filename + \"/mask2.nii.gz\"\r\n # load_label_table = filename + \"/label_table.txt\"\r\n # merge_labels(load_mask, save_mask, load_label_table)\r\n\r\n # for filename in tqdm(filenames):\r\n # old_mask = filename + \"/mask.nii.gz\"\r\n # new_mask = filename + \"/mask2.nii.gz\"\r\n # label_table = filename + \"/label_table.txt\"\r\n # if os.path.exists(new_mask):\r\n # os.remove(old_mask)\r\n # os.rename(new_mask, old_mask)\r\n # os.remove(label_table)\r\n\r\n # filenames = utils.load_filenames(\"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task79_frankfurt3/labelsTr/\", extensions=None)\r\n # for filename in tqdm(filenames):\r\n # mask, affine, spacing, header = utils.load_nifty(filename)\r\n # mask = np.rint(mask)\r\n # mask = mask.astype(np.uint8)\r\n # utils.save_nifty(filename, mask, affine, spacing, header)\r\n\r\n # filename = \"/gris/gris-f/homelv/kgotkows/datasets/covid19/UK_Frankfurt3/KGU-E9EC0F06F1D6/mask.nii.gz\"\r\n # mask, affine, spacing, header = utils.load_nifty(filename)\r\n # mask[mask == 5] = 2\r\n # mask[mask == 6] = 2\r\n # utils.save_nifty(filename, mask, affine, spacing, header)\r\n #tmp(\"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task077_frankfurt3Guided/imagesTr/0001_0001.nii.gz\")\r\n tmp2(\"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task77_frankfurt3Guided/tmp/900.nii.gz\")",
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport torch\nfrom nnunet.training.loss_functions.TopK_loss import TopKLoss\nfrom nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom nnunet.utilities.tensor_utilities import sum_tensor\nfrom torch import nn\nimport numpy as np\n\n\nclass GDL(nn.Module):\n def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,\n square=False, square_volumes=False):\n \"\"\"\n square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition)\n \"\"\"\n super(GDL, self).__init__()\n\n self.square_volumes = square_volumes\n self.square = square\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth\n\n def forward(self, x, y, loss_mask=None):\n shp_x = x.shape\n shp_y = y.shape\n\n if self.batch_dice:\n axes = [0] + list(range(2, len(shp_x)))\n else:\n axes = list(range(2, len(shp_x)))\n\n if len(shp_x) != len(shp_y):\n y = y.view((shp_y[0], 1, *shp_y[1:]))\n\n if all([i == j for i, j in zip(x.shape, y.shape)]):\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = y\n else:\n gt = y.long()\n y_onehot = torch.zeros(shp_x)\n if x.device.type == \"cuda\":\n y_onehot = y_onehot.cuda(x.device.index)\n y_onehot.scatter_(1, gt, 1)\n\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n if not self.do_bg:\n x = x[:, 1:]\n y_onehot = y_onehot[:, 1:]\n\n tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self.square)\n\n # GDL weight computation, we use 1/V\n volumes = sum_tensor(y_onehot, axes) + 1e-6 # add some eps to prevent div by zero\n\n if self.square_volumes:\n volumes = volumes ** 2\n\n # apply weights\n tp = tp / volumes\n fp = fp / volumes\n fn = fn / volumes\n\n # sum over classes\n if self.batch_dice:\n axis = 0\n else:\n axis = 1\n\n tp = tp.sum(axis, keepdim=False)\n fp = fp.sum(axis, keepdim=False)\n fn = fn.sum(axis, keepdim=False)\n\n # compute dice\n dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)\n\n dc = dc.mean()\n\n return -dc\n\n\ndef get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):\n \"\"\"\n net_output must be (b, c, x, y(, z)))\n gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))\n if mask is provided it must have shape (b, 1, x, y(, z)))\n :param net_output:\n :param gt:\n :param axes: can be (, ) = no summation\n :param mask: mask must be 1 for valid pixels and 0 for invalid pixels\n :param square: if True then fp, tp and fn will be squared before summation\n :return:\n \"\"\"\n if axes is None:\n axes = tuple(range(2, len(net_output.size())))\n\n shp_x = net_output.shape\n shp_y = gt.shape\n\n with torch.no_grad():\n if len(shp_x) != len(shp_y):\n gt = gt.view((shp_y[0], 1, *shp_y[1:]))\n\n if all([i == j for i, j in zip(net_output.shape, gt.shape)]):\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = gt\n else:\n gt = gt.long()\n y_onehot = torch.zeros(shp_x)\n if net_output.device.type == \"cuda\":\n y_onehot = y_onehot.cuda(net_output.device.index)\n y_onehot.scatter_(1, gt, 1)\n\n tp = net_output * y_onehot\n fp = net_output * (1 - y_onehot)\n fn = (1 - net_output) * y_onehot\n tn = (1 - net_output) * (1 - y_onehot)\n\n if mask is not None:\n tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)\n fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)\n fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)\n tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)\n\n if square:\n tp = tp ** 2\n fp = fp ** 2\n fn = fn ** 2\n tn = tn ** 2\n\n if len(axes) > 0:\n tp = sum_tensor(tp, axes, keepdim=False)\n fp = sum_tensor(fp, axes, keepdim=False)\n fn = sum_tensor(fn, axes, keepdim=False)\n tn = sum_tensor(tn, axes, keepdim=False)\n\n return tp, fp, fn, tn\n\n\nclass SoftDiceLoss(nn.Module):\n def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):\n \"\"\"\n \"\"\"\n super(SoftDiceLoss, self).__init__()\n\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth\n\n def forward(self, x, y, loss_mask=None):\n shp_x = x.shape\n\n if self.batch_dice:\n axes = [0] + list(range(2, len(shp_x)))\n else:\n axes = list(range(2, len(shp_x)))\n\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)\n\n nominator = 2 * tp + self.smooth\n denominator = 2 * tp + fp + fn + self.smooth\n\n dc = nominator / (denominator + 1e-8)\n\n if not self.do_bg:\n if self.batch_dice:\n dc = dc[1:]\n else:\n dc = dc[:, 1:]\n dc = dc.mean()\n\n return -dc\n\n\nclass MCCLoss(nn.Module):\n def __init__(self, apply_nonlin=None, batch_mcc=False, do_bg=True, smooth=0.0):\n \"\"\"\n based on matthews correlation coefficient\n https://en.wikipedia.org/wiki/Matthews_correlation_coefficient\n\n Does not work. Really unstable. F this.\n \"\"\"\n super(MCCLoss, self).__init__()\n\n self.smooth = smooth\n self.do_bg = do_bg\n self.batch_mcc = batch_mcc\n self.apply_nonlin = apply_nonlin\n\n def forward(self, x, y, loss_mask=None):\n shp_x = x.shape\n voxels = np.prod(shp_x[2:])\n\n if self.batch_mcc:\n axes = [0] + list(range(2, len(shp_x)))\n else:\n axes = list(range(2, len(shp_x)))\n\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n tp, fp, fn, tn = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)\n tp /= voxels\n fp /= voxels\n fn /= voxels\n tn /= voxels\n\n nominator = tp * tn - fp * fn + self.smooth\n denominator = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5 + self.smooth\n\n mcc = nominator / denominator\n\n if not self.do_bg:\n if self.batch_mcc:\n mcc = mcc[1:]\n else:\n mcc = mcc[:, 1:]\n mcc = mcc.mean()\n\n return -mcc\n\n\nclass SoftDiceLossSquared(nn.Module):\n def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):\n \"\"\"\n squares the terms in the denominator as proposed by Milletari et al.\n \"\"\"\n super(SoftDiceLossSquared, self).__init__()\n\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth\n\n def forward(self, x, y, loss_mask=None):\n shp_x = x.shape\n shp_y = y.shape\n\n if self.batch_dice:\n axes = [0] + list(range(2, len(shp_x)))\n else:\n axes = list(range(2, len(shp_x)))\n\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n with torch.no_grad():\n if len(shp_x) != len(shp_y):\n y = y.view((shp_y[0], 1, *shp_y[1:]))\n\n if all([i == j for i, j in zip(x.shape, y.shape)]):\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = y\n else:\n y = y.long()\n y_onehot = torch.zeros(shp_x)\n if x.device.type == \"cuda\":\n y_onehot = y_onehot.cuda(x.device.index)\n y_onehot.scatter_(1, y, 1).float()\n\n intersect = x * y_onehot\n # values in the denominator get smoothed\n denominator = x ** 2 + y_onehot ** 2\n\n # aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after\n # squaring)\n intersect = sum_tensor(intersect, axes, False) + self.smooth\n denominator = sum_tensor(denominator, axes, False) + self.smooth\n\n dc = 2 * intersect / denominator\n\n if not self.do_bg:\n if self.batch_dice:\n dc = dc[1:]\n else:\n dc = dc[:, 1:]\n dc = dc.mean()\n\n return -dc\n\n\nclass DC_and_CE_loss(nn.Module):\n def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate=\"sum\", square_dice=False, weight_ce=1, weight_dice=1,\n log_dice=False, ignore_label=None):\n \"\"\"\n CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.\n :param soft_dice_kwargs:\n :param ce_kwargs:\n :param aggregate:\n :param square_dice:\n :param weight_ce:\n :param weight_dice:\n \"\"\"\n super(DC_and_CE_loss, self).__init__()\n if ignore_label is not None:\n assert not square_dice, 'not implemented'\n ce_kwargs['reduction'] = 'none'\n self.log_dice = log_dice\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.aggregate = aggregate\n self.ce = RobustCrossEntropyLoss(**ce_kwargs)\n\n self.ignore_label = ignore_label\n\n if not square_dice:\n self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)\n else:\n self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)\n\n def forward(self, net_output, target):\n \"\"\"\n target must be b, c, x, y(, z) with c=1\n :param net_output:\n :param target:\n :return:\n \"\"\"\n if self.ignore_label is not None:\n assert target.shape[1] == 1, 'not implemented for one hot encoding'\n mask = target != self.ignore_label\n target[~mask] = 0\n mask = mask.float()\n else:\n mask = None\n\n dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0\n if self.log_dice:\n dc_loss = -torch.log(-dc_loss)\n\n ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0\n if self.ignore_label is not None:\n ce_loss *= mask[:, 0]\n ce_loss = ce_loss.sum() / mask.sum()\n\n if self.aggregate == \"sum\":\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n else:\n raise NotImplementedError(\"nah son\") # reserved for other stuff (later)\n return result\n\n\nclass ATM_and_DC_and_CE_loss(nn.Module):\n def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate=\"sum\", square_dice=False, weight_ce=1, weight_dice=1, weight_atm=0.5,\n log_dice=False, ignore_label=None):\n \"\"\"\n CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.\n :param soft_dice_kwargs:\n :param ce_kwargs:\n :param aggregate:\n :param square_dice:\n :param weight_ce:\n :param weight_dice:\n \"\"\"\n super(ATM_and_DC_and_CE_loss, self).__init__()\n if ignore_label is not None:\n assert not square_dice, 'not implemented'\n ce_kwargs['reduction'] = 'none'\n self.log_dice = log_dice\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.aggregate = aggregate\n self.ce = RobustCrossEntropyLoss(**ce_kwargs)\n self.atm = ATM(apply_nonlin=softmax_helper, weight_atm=weight_atm)\n\n self.ignore_label = ignore_label\n\n if not square_dice:\n self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)\n else:\n self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)\n\n def forward(self, net_output, target):\n \"\"\"\n target must be b, c, x, y(, z) with c=1\n :param net_output:\n :param target:\n :return:\n \"\"\"\n if self.ignore_label is not None:\n assert target.shape[1] == 1, 'not implemented for one hot encoding'\n mask = target != self.ignore_label\n target[~mask] = 0\n mask = mask.float()\n else:\n mask = None\n\n net_output = net_output * self.atm(net_output, target)\n dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0\n if self.log_dice:\n dc_loss = -torch.log(-dc_loss)\n\n ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0\n if self.ignore_label is not None:\n ce_loss *= mask[:, 0]\n ce_loss = ce_loss.sum() / mask.sum()\n\n if self.aggregate == \"sum\":\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n else:\n raise NotImplementedError(\"nah son\") # reserved for other stuff (later)\n return result\n\n\nclass ATM(nn.Module):\n def __init__(self, apply_nonlin=None, weight_atm=0.5):\n \"\"\"\n \"\"\"\n super(ATM, self).__init__()\n\n self.apply_nonlin = apply_nonlin\n self.weight_atm = weight_atm\n\n def forward(self, x, y):\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n atm = torch.exp((x-y)/self.weight_atm)\n\n return atm\n\n\nclass DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, aggregate=\"sum\"):\n \"\"\"\n DO NOT APPLY NONLINEARITY IN YOUR NETWORK!\n\n THIS LOSS IS INTENDED TO BE USED FOR BRATS REGIONS ONLY\n :param soft_dice_kwargs:\n :param bce_kwargs:\n :param aggregate:\n \"\"\"\n super(DC_and_BCE_loss, self).__init__()\n\n self.aggregate = aggregate\n self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)\n self.dc = SoftDiceLoss(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)\n\n def forward(self, net_output, target):\n ce_loss = self.ce(net_output, target)\n dc_loss = self.dc(net_output, target)\n\n if self.aggregate == \"sum\":\n result = ce_loss + dc_loss\n else:\n raise NotImplementedError(\"nah son\") # reserved for other stuff (later)\n\n return result\n\n\nclass GDL_and_CE_loss(nn.Module):\n def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate=\"sum\"):\n super(GDL_and_CE_loss, self).__init__()\n self.aggregate = aggregate\n self.ce = RobustCrossEntropyLoss(**ce_kwargs)\n self.dc = GDL(softmax_helper, **gdl_dice_kwargs)\n\n def forward(self, net_output, target):\n dc_loss = self.dc(net_output, target)\n ce_loss = self.ce(net_output, target)\n if self.aggregate == \"sum\":\n result = ce_loss + dc_loss\n else:\n raise NotImplementedError(\"nah son\") # reserved for other stuff (later)\n return result\n\n\nclass DC_and_topk_loss(nn.Module):\n def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate=\"sum\", square_dice=False):\n super(DC_and_topk_loss, self).__init__()\n self.aggregate = aggregate\n self.ce = TopKLoss(**ce_kwargs)\n if not square_dice:\n self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)\n else:\n self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)\n\n def forward(self, net_output, target):\n dc_loss = self.dc(net_output, target)\n ce_loss = self.ce(net_output, target)\n if self.aggregate == \"sum\":\n result = ce_loss + dc_loss\n else:\n raise NotImplementedError(\"nah son\") # reserved for other stuff (later?)\n return result\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom nnunet.network_architecture.neural_network import SegmentationNetwork\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom nnunet.network_architecture.initialization import InitWeights_He\n\nclass P_Net(SegmentationNetwork):\n def __init__(self, patch_size, in_channels=2, out_channels=32, num_classes=2, weightInitializer=InitWeights_He(1e-2), deep_supervision=False, conv_op=None): # or out_channels = 16/64\n super(P_Net, self).__init__()\n\n self.conv_op = conv_op\n self.num_classes = num_classes\n self.do_ds = False\n # self.patch_size = [128, 128, 128] # patch_size.tolist()\n # self.original_size = patch_size.tolist()\n self.patch_size = patch_size.tolist()\n\n self.block1 = nn.Sequential(\n nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=1), # or kernel_size=[3, 3, 3]\n nn.ReLU(),\n )\n self.block2 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=2),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=2), # or kernel_size=[3, 3, 3]\n nn.ReLU(),\n )\n self.block3 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=3), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=3),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=3),\n nn.ReLU(),\n )\n self.block4 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=4), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=4),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=4),\n nn.ReLU(),\n )\n self.block5 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=5), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=5),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=5),\n nn.ReLU(),\n )\n self.block6 = nn.Sequential(\n nn.Conv3d(in_channels=int(out_channels/4)*5, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=1), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=num_classes, kernel_size=3, stride=1, padding=0, dilation=1),\n # nn.ReLU(),\n )\n\n self.compress1 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress2 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress3 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress4 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress5 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n\n self.upsample1 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False) # [96, 160, 160]\n self.upsample2 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample3 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample4 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample5 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample6 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n\n self.apply(weightInitializer)\n\n def forward(self, x):\n # x = F.interpolate(x, self.patch_size, mode=\"trilinear\", align_corners=False)\n x = self.block1(x)\n compress1 = self.compress1(x)\n x = self.block2(x)\n compress2 = self.compress2(x)\n x = self.block3(x)\n compress3 = self.compress3(x)\n x = self.block4(x)\n compress4 = self.compress4(x)\n x = self.block5(x)\n compress5 = self.compress5(x)\n compress1 = self.upsample1(compress1)\n compress2 = self.upsample2(compress2)\n compress3 = self.upsample3(compress3)\n compress4 = self.upsample4(compress4)\n compress5 = self.upsample5(compress5)\n x = torch.cat((compress1, compress2, compress3, compress4, compress5), dim=1)\n x = self.block6(x)\n x = self.upsample6(x)\n # x = softmax_helper(x)\n return x\n\n def compute_approx_vram_consumption(self):\n return 715000000\n\n\nclass P_Net2(SegmentationNetwork):\n def __init__(self, patch_size, in_channels=2, out_channels=32, num_classes=2, weightInitializer=InitWeights_He(1e-2), deep_supervision=False, conv_op=None): # or out_channels = 16/64\n super(P_Net2, self).__init__()\n\n self.conv_op = conv_op\n self.num_classes = num_classes\n self.do_ds = False\n self.patch_size = [128, 128, 128] # patch_size.tolist()\n self.original_size = patch_size.tolist()\n # self.patch_size = patch_size.tolist()\n\n self.block1 = nn.Sequential(\n nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=1), # or kernel_size=[3, 3, 3]\n nn.ReLU(),\n )\n self.block2 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=2),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=2), # or kernel_size=[3, 3, 3]\n nn.ReLU(),\n )\n self.block3 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=3), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=3),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=3),\n nn.ReLU(),\n )\n self.block4 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=4), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=4),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=4),\n nn.ReLU(),\n )\n self.block5 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=5), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=5),\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=5),\n nn.ReLU(),\n )\n self.block6 = nn.Sequential(\n nn.Conv3d(in_channels=int(out_channels/4)*5, out_channels=out_channels, kernel_size=3, stride=1, padding=0, dilation=1), # or kernel_size=[3, 3, 1]\n nn.ReLU(),\n nn.Conv3d(in_channels=out_channels, out_channels=num_classes, kernel_size=3, stride=1, padding=0, dilation=1),\n # nn.ReLU(),\n )\n\n self.compress1 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress2 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress3 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress4 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n self.compress5 = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=int(out_channels/4), kernel_size=1, stride=1, padding=0, dilation=1),\n nn.ReLU(),\n )\n\n self.upsample1 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False) # [96, 160, 160]\n self.upsample2 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample3 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample4 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample5 = nn.Upsample(size=self.patch_size, mode='trilinear', align_corners=False)\n self.upsample6 = nn.Upsample(size=self.original_size, mode='trilinear', align_corners=False)\n\n self.apply(weightInitializer)\n\n def forward(self, x):\n x = F.interpolate(x, self.patch_size, mode=\"trilinear\", align_corners=False)\n x = self.block1(x)\n compress1 = self.compress1(x)\n x = self.block2(x)\n compress2 = self.compress2(x)\n x = self.block3(x)\n compress3 = self.compress3(x)\n x = self.block4(x)\n compress4 = self.compress4(x)\n x = self.block5(x)\n compress5 = self.compress5(x)\n compress1 = self.upsample1(compress1)\n compress2 = self.upsample2(compress2)\n compress3 = self.upsample3(compress3)\n compress4 = self.upsample4(compress4)\n compress5 = self.upsample5(compress5)\n x = torch.cat((compress1, compress2, compress3, compress4, compress5), dim=1)\n x = self.block6(x)\n x = self.upsample6(x)\n # x = softmax_helper(x)\n return x\n\n def compute_approx_vram_consumption(self):\n return 715000000\n\n"
] | [
[
"numpy.asarray",
"numpy.rint",
"numpy.concatenate"
],
[
"torch.zeros",
"torch.exp",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.log",
"numpy.prod",
"torch.unbind"
],
[
"torch.cat",
"torch.nn.Conv3d",
"torch.nn.Upsample",
"torch.nn.functional.interpolate",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pnkraemer/differentiable_likelihoods | [
"a07876dbf8fcd4aa14bf36bd3e98e06ea10d2a94",
"a07876dbf8fcd4aa14bf36bd3e98e06ea10d2a94"
] | [
"difflikelihoods/sampling.py",
"tests/test_covariance.py"
] | [
"\"\"\"\nsampling.py\n\nWe sample Metropolis-Hastings:\n * Random walk proposals\n * Langevin proposals\n * Langevin proposals with preconditioning\n * Hamiltonian MC\n * Hamiltonian MC with preconditioning\n\nNOTE:\n The functionality of this module is restricted to log-densities,\n i.e. densities of the form p(s) = exp(-E(s)). We work with E(s) only.\n The reason is that in Bayesian inference, evaluations of exp(-E(s))\n are too instable in a numerical sense. \n\"\"\"\n\nimport collections\nfrom abc import ABC, abstractmethod\nimport numpy as np\nfrom difflikelihoods import logdensity\n\n\ndef metropolishastings_rw(logpdf, nsamps, initstate, pwidth, ninits):\n \"\"\"\n Convenience function for Metropolis-Hastings sampling with\n random walk proposal kernel.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf)\n rwmh = RandomWalkMH(logdens)\n return rwmh.sample_nd(nsamps, initstate, pwidth, ninits)\n\n\ndef metropolishastings_lang(logpdf, loggrad, nsamps, initstate, pwidth, ninits):\n \"\"\"\n Convenience function for Metropolis-Hastings sampling with\n Langevin dynamics proposal kernel.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad)\n langmh = LangevinMH(logdens)\n return langmh.sample_nd(nsamps, initstate, pwidth, ninits)\n\n\ndef metropolishastings_plang(\n logpdf, loggrad, loghess, nsamps, initstate, pwidth, ninits\n):\n \"\"\"\n Convenience function for Metropolis-Hastings sampling with\n Riemannian (preconditioned) Langevin dynamics proposal kernel.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad, loghess)\n plangmh = PrecondLangevinMH(logdens)\n return plangmh.sample_nd(nsamps, initstate, pwidth, ninits)\n\n\ndef metropolishastings_ham(\n logpdf, loggrad, nsamps, initstate, stepsize, nsteps, ninits\n):\n \"\"\"\n Convenience function for Hamiltonian MCMC.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad)\n hmc = HamiltonianMC(logdens, nsteps)\n return hmc.sample_nd(nsamps, initstate, stepsize, ninits)\n\n\ndef metropolishastings_pham(\n logpdf, loggrad, loghess, nsamps, initstate, stepsize, nsteps, ninits\n):\n \"\"\"\n Convenience function for preconditioned Hamiltonian MCMC.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad, loghess)\n phmc = PrecondHamiltonianMC(logdens, nsteps)\n return phmc.sample_nd(nsamps, initstate, stepsize, ninits)\n\n\n# Convenience data structure.\nMCMCState = collections.namedtuple(\"MCMCState\", \"state logdens loggrad loghess\")\n\n\nclass MetropolisHastings(ABC):\n \"\"\"\n Abstract Metropolis-Hastings class. Contains everything but the\n proposal kernels.\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n Initialise MH sampler with a log-density function.\n\n Args:\n logdens: LogDensity object, evaluations of a negative log-\n density and derivatives\n \"\"\"\n self.logdens = logdens\n\n def sample_nd(self, nsamps, init_state, pwidth, ninits=None, *optional):\n \"\"\"\n \"\"\"\n assert init_state_is_array(\n init_state\n ), \"Please enter a (d,) dimensional initial state\"\n states, logprobs = np.zeros((nsamps, len(init_state))), np.zeros(nsamps)\n accepted = 0\n if ninits is None:\n ninits = 0\n currstate = self.evaluate_logdens(init_state)\n states[0], logprobs[0] = currstate.state, currstate.logdens\n for idx in range(1, nsamps):\n if idx < ninits:\n proposal, corrfact = self.generate_proposal(currstate, pwidth)\n else:\n proposal, corrfact = self.generate_proposal(currstate, 0.2 * pwidth)\n currstate, is_accept = self.accept_or_reject(\n currstate, proposal, corrfact, idx, ninits\n )\n states[idx], logprobs[idx] = (\n currstate.state.copy(),\n currstate.logdens.copy(),\n )\n if idx >= ninits:\n accepted = accepted + int(is_accept)\n ratio = accepted / nsamps\n return states, logprobs, ratio\n\n def evaluate_logdens(self, loc):\n \"\"\"\n \"\"\"\n logdenseval = self.logdens.eval(loc)\n if self.logdens.has_gradient:\n gradeval = self.logdens.gradeval(loc)\n else:\n gradeval = 0\n if self.logdens.has_hessian:\n hesseval = self.logdens.hesseval(loc)\n else:\n hesseval = 0\n return MCMCState(\n state=loc, logdens=logdenseval, loggrad=gradeval, loghess=hesseval\n )\n\n def accept_or_reject(self, currstate, proposal, corrfact, idx, ninits):\n \"\"\"\n \"\"\"\n logaccprob = self.get_logaccprob(currstate, proposal, corrfact, idx, ninits)\n if logaccprob < 0 or logaccprob < -np.log(np.random.rand()):\n state = proposal\n is_accept = True\n else:\n state = currstate\n is_accept = False\n return state, is_accept\n\n def get_logaccprob(self, currstate, proposal, corrfact, idx, ninits):\n \"\"\"\n Returns NEGATIVE log acceptance probability, i.e.\n corrected proposal - corrected currstate\n \"\"\"\n if idx < ninits:\n corrfact = -corrfact\n return (corrfact) + (proposal.logdens - currstate.logdens)\n\n @abstractmethod\n def generate_proposal(self, *args):\n \"\"\"\n \"\"\"\n pass\n\n\ndef init_state_is_array(init_state):\n \"\"\"\n Checks whether init_state is compliant with an Nd algorithm.\n That is, whether init_state is an (d,) np.ndarray.\n \"\"\"\n assert isinstance(init_state, np.ndarray), \"Please enter init_state of shape (d,)\"\n assert len(init_state.shape) == 1, \"Please enter init_state of shape (d,)\"\n return True\n\n\nclass RandomWalkMH(MetropolisHastings):\n \"\"\"\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n newloc = self.sample_randomwalk(currstate.state, pwidth)\n proposal = self.evaluate_logdens(newloc)\n corrfact = 0\n return proposal, corrfact\n\n def sample_randomwalk(self, mean, var):\n \"\"\"\n \"\"\"\n return mean + np.sqrt(var) * np.random.randn(len(mean))\n\n\nclass LangevinMH(MetropolisHastings):\n \"\"\"\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n newloc = self.sample_langevin(currstate, pwidth)\n proposal = self.evaluate_logdens(newloc)\n corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)\n return proposal, corrfact\n\n def sample_langevin(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n noise = np.random.randn(len(currstate.state))\n return (\n currstate.state - pwidth * currstate.loggrad + np.sqrt(2 * pwidth) * noise\n )\n\n def compute_corrfact_langevin(self, currstate, proposal, pwidth):\n \"\"\"\n \"\"\"\n lognomin = self.kernel_langevin(currstate, proposal, pwidth)\n logdenom = self.kernel_langevin(proposal, currstate, pwidth)\n return lognomin - logdenom\n\n def kernel_langevin(self, state1, state2, pwidth):\n \"\"\"\n \"\"\"\n state2_dyn = state2.state - pwidth * state2.loggrad\n dist = np.linalg.norm(state1.state - state2_dyn) ** 2\n return 0.5 * dist / (2 * pwidth)\n\n\nclass PrecondLangevinMH(MetropolisHastings):\n \"\"\"\n Preconditioning with (inverse) Hessian.\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n precondeval returns M (and not M^{-1}) as used in Cald&Gir\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n newloc = self.sample_langevin(currstate, pwidth)\n proposal = self.evaluate_logdens(newloc)\n corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)\n return proposal, corrfact\n\n def sample_langevin(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n noise = np.random.multivariate_normal(\n np.zeros(len(currstate.loghess)), np.linalg.inv(currstate.loghess)\n )\n prec_dyn = np.linalg.solve(currstate.loghess, currstate.loggrad)\n return currstate.state - pwidth * prec_dyn + np.sqrt(2 * pwidth) * noise\n\n def compute_corrfact_langevin(self, currstate, proposal, pwidth):\n \"\"\"\n \"\"\"\n lognomin = self.kernel_langevin(currstate, proposal, pwidth)\n logdenom = self.kernel_langevin(proposal, currstate, pwidth)\n return lognomin - logdenom\n\n def kernel_langevin(self, state1, state2, pwidth):\n \"\"\"\n \"\"\"\n prec_dyn = np.linalg.solve(state2.loghess, state2.loggrad)\n state2_dyn = state2.state - pwidth * prec_dyn\n difference = state1.state - state2_dyn\n return 0.5 * difference.dot(np.dot(state2.loghess, difference)) / (2 * pwidth)\n\n\nclass HamiltonianMC(MetropolisHastings):\n \"\"\"\n \"\"\"\n\n def __init__(self, logdens, nsteps):\n \"\"\"\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n self.nsteps = nsteps\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n pwidth is used as stepsize for self.nsteps leapfrog steps.\n\n The correction factor is the quotient of the hamiltonian terms.\n \"\"\"\n momentum = np.random.multivariate_normal(\n np.zeros(len(currstate.state)), np.eye(len(currstate.state))\n )\n # hamilt = self.evaluate_hamiltonian(momentum, currstate)\n momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)\n # prop_hamilt = self.evaluate_hamiltonian(momentum_new, proposal)\n corrfact = self.get_corrfact(momentum, momentum_new)\n return proposal, corrfact\n\n def leapfrog_dynamics(self, momentum, currstate, pwidth):\n \"\"\"\n \"\"\"\n proposal = currstate\n for idx in range(self.nsteps):\n momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)\n return momentum, proposal\n\n def compute_next_lfstep(self, momentum, proposal, pwidth):\n \"\"\"\n \"\"\"\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n pstate = proposal.state + pwidth * momentum\n proposal = self.evaluate_logdens(pstate)\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n return momentum, proposal\n\n def get_corrfact(self, mom_new, mom):\n \"\"\"\n \"\"\"\n return 0.5 * (mom_new.T @ mom_new - mom.T @ mom)\n\n\nclass PrecondHamiltonianMC(MetropolisHastings):\n \"\"\"\n In fact, the true name would be either\n * Riemannian-Gaussian HMC: if the preconditioner depends on the state\n * Euclidean-Gaussian HMC: if the preconditioner is constant\n [Girolami and Calderhead, 2011; Betancourt, 2018]\n \"\"\"\n\n def __init__(self, logdens, nsteps):\n \"\"\"\n evalprecond returns M (and not M^{-1}) as used in Cald&Gir.\n M is the Hessian\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n self.nsteps = nsteps\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n pwidth is used as stepsize for self.nsteps leapfrog steps.\n\n The correction factor is the quotient of the hamiltonian terms.\n \"\"\"\n momentum = np.random.multivariate_normal(\n np.zeros(len(currstate.state)), currstate.loghess\n )\n momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)\n corrfact = self.get_corrfact(momentum, momentum_new, currstate, proposal)\n return proposal, corrfact\n\n def leapfrog_dynamics(self, momentum, currstate, pwidth):\n \"\"\"\n \"\"\"\n proposal = currstate\n for idx in range(self.nsteps):\n momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)\n return momentum, proposal\n\n def compute_next_lfstep(self, momentum, proposal, pwidth):\n \"\"\"\n \"\"\"\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n pstate = proposal.state + pwidth * np.linalg.solve(proposal.loghess, momentum)\n proposal = self.evaluate_logdens(pstate)\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n return momentum, proposal\n\n def get_corrfact(self, mom, mom_new, currstate, proposal):\n \"\"\"\n \"\"\"\n return 0.5 * (\n mom_new.T @ np.linalg.solve(proposal.loghess, mom_new)\n + np.log(np.linalg.det(proposal.loghess))\n - mom.T @ np.linalg.solve(currstate.loghess, mom)\n - np.log(np.linalg.det(currstate.loghess))\n )\n",
"# coding=utf-8\n\"\"\"\ntest_covariance.py\n\nTest whether covariance functions\n 1) complain about incorrectly shaped inputs\n 2) have expected diagonal values of symmetric matrices\n 3) interpolate with an expected rate\n\"\"\"\nimport numpy as np\nimport unittest\nfrom difflikelihoods import covariance as cov\n\n\nclass TestCovLinAlg():\n \"\"\"\n Test whether covariance functions complain about\n incorrectly shaped inputs.\n \"\"\"\n def test_complains_about_floats(self):\n \"\"\"Putting floats into covariance functions should raise AssertionErrors\"\"\"\n pt1 = np.random.rand()\n pt2 = np.random.rand()\n correct_input_shape = np.random.rand(1, 1)\n with self.assertRaises(AssertionError):\n self.cov(pt1, pt2)\n with self.assertRaises(AssertionError):\n self.cov(correct_input_shape, pt2)\n with self.assertRaises(AssertionError):\n self.cov(pt1, correct_input_shape)\n\n def test_complains_about_arrays_1d(self):\n \"\"\"Putting 1d-arrays into covariance functions should raise AssertionErrors\"\"\"\n pt1 = np.random.rand(3)\n pt2 = np.random.rand(3)\n correct_input_shape = np.random.rand(3, 1)\n with self.assertRaises(AssertionError):\n self.cov(pt1, pt2)\n with self.assertRaises(AssertionError):\n self.cov(correct_input_shape, pt2)\n with self.assertRaises(AssertionError):\n self.cov(pt1, correct_input_shape)\n\n def test_complains_about_arrays_3d(self):\n \"\"\"Putting 3d-arrays into covariance functions should raise AssertionErrors\"\"\"\n pt1 = np.random.rand(3, 1, 1)\n pt2 = np.random.rand(3, 1, 1)\n correct_input_shape = np.random.rand(3, 1)\n with self.assertRaises(AssertionError):\n self.cov(pt1, pt2)\n with self.assertRaises(AssertionError):\n self.cov(correct_input_shape, pt2)\n with self.assertRaises(AssertionError):\n self.cov(pt1, correct_input_shape)\n\n def test_complains_about_inconsistent_dimensions(self):\n \"\"\"\n Putting ptsets of different dimension into covariance functions\n should raise AssertionErrors\n \"\"\"\n ptset1 = np.random.rand(20, 3)\n ptset2 = np.random.rand(21, 2)\n with self.assertRaises(AssertionError):\n self.cov(ptset1, ptset2)\n\n def test_allows_different_numbers_of_points(self):\n \"\"\"\n Putting ptsets of different size but same dimension into\n covariance functions should be allowed\n \"\"\"\n ptset1 = np.random.rand(6, 2)\n ptset2 = np.random.rand(17, 2)\n covmat = self.cov(ptset1, ptset2)\n self.assertEqual(covmat.shape[0], 6)\n self.assertEqual(covmat.shape[1], 17)\n covmat_trans = self.cov(ptset2, ptset1)\n self.assertEqual(covmat_trans.shape[1], 6)\n self.assertEqual(covmat_trans.shape[0], 17)\n\nclass TestCovInterpolate():\n \"\"\"\n Compares interpolation error to some expected value\n \"\"\"\n def test_can_it_interpolate_as_well_as_expected(self):\n \"\"\"\n Interpolates a function f(x)=2x**2+1 on 10 points and\n compares error to some expected value\n \"\"\"\n good_ptset = np.linspace(0, 1)\n good_ptset = good_ptset.reshape((len(good_ptset), 1))\n covmat = self.cov(good_ptset, good_ptset)\n rhs = 2*good_ptset**2 + 1 # interpolate f(x) = 2x^2 + 1\n coeff = np.linalg.solve(covmat, rhs)\n evalptset = np.linspace(0, 1, 500)\n evalptset = evalptset.reshape((len(evalptset), 1))\n evalcovmat = self.cov(evalptset, good_ptset)\n approx = evalcovmat.dot(coeff)\n error = np.linalg.norm(approx - (2*evalptset**2 + 1)) / 500.\n self.assertLess(error, self.expected_interpolation_error)\n\n\n\n\nclass TestMaternFamily(TestCovInterpolate, TestCovLinAlg):\n \"\"\"\n Matern family covariances: Gaussian, Exponential, Matern\n For all Matern family covariances we test linear\n algebra and interpolation, hence we collect these tests here.\n\n Covariance kernels from this family have a 1 on the diagonal.\n \"\"\"\n def test_symmetric_ptsets_give_expected_diagonal_value(self):\n \"\"\"Using the same pointset for both inputs should give a 1 on the diagonal\"\"\"\n ptset = np.random.rand(5, 1)\n covmat_large = self.cov(ptset, ptset)\n edv = 1.0\n error_large = np.linalg.norm(np.diag(covmat_large) - edv * np.ones(5))\n self.assertLess(error_large, np.finfo(float).eps)\n covmat_small = self.cov(ptset.T, ptset.T)\n error_small = np.linalg.norm(np.diag(covmat_small) - edv * np.ones(1))\n self.assertLess(error_small, np.finfo(float).eps)\n\n\n\nclass TestGaussCov(TestMaternFamily, unittest.TestCase):\n \"\"\"\n Carry out all tests with Gaussian covariance kernel\n \"\"\"\n def setUp(self):\n \"\"\"Set up parameters\"\"\"\n self.cov = cov.gausscov\n self.expected_interpolation_error = 1e-7\n\n\nclass TestExpCov(TestMaternFamily, unittest.TestCase):\n \"\"\"\n Carry out all tests with exponential covariance kernel\n \"\"\"\n def setUp(self):\n \"\"\"Set up parameters\"\"\"\n self.cov = cov.expcov\n self.expected_interpolation_error = 1e-5\n\n\nclass TestMaternCov(TestMaternFamily, unittest.TestCase):\n \"\"\"\n Carry out all tests with Matern covariance kernel\n \"\"\"\n def setUp(self):\n \"\"\"Set up parameters\"\"\"\n self.cov = cov.materncov\n self.expected_interpolation_error = 1e-5\n\n\n\nclass TestIBMAuxiliary(unittest.TestCase):\n\n def test_is_timeseries(self):\n good_ptset = np.random.rand(10)\n self.assertEqual(cov.ptset_is_timeseries(good_ptset), True)\n\n def test_is_timeseries_fails_wrong_input(self):\n bad_ptset2d = np.random.rand(20, 2)\n bad_ptset1d = np.random.rand(20, 1)\n with self.assertRaises(AssertionError):\n cov.ptset_is_timeseries(bad_ptset2d)\n with self.assertRaises(AssertionError):\n cov.ptset_is_timeseries(bad_ptset1d)\n\n def test_is_timeseries_fails_negative(self):\n neg_ptset = -1*np.random.rand(10)\n with self.assertRaises(AssertionError):\n cov.ptset_is_timeseries(neg_ptset)\n\n def test_create_aligned_copies_pass(self):\n ptset1 = np.random.rand(10)\n ptset2 = np.random.rand(14)\n copies1, copies2 = cov.create_aligned_copies(ptset1, ptset2)\n self.assertEqual(copies1.shape[0], 10)\n self.assertEqual(copies1.shape[1], 14)\n self.assertEqual(copies1.shape, copies2.shape)\n\n\nclass TestIBMFamily(unittest.TestCase):\n \"\"\"\n Test Family of IBM Covariances: BM, IBM, IBM_CovD, ...\n and associated functions.\n \"\"\"\n def test_ibm_covd_q3(self):\n \"\"\"\n Compute output for k(1, 2) and k(2, 1) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual_12 = sig**2 * 49.0 / 720.0\n manual_21 = sig**2 * 111.0 / 720.0\n res_12 = cov.ibm_covd_q3(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res_12, manual_12, places=15)\n res_21 = cov.ibm_covd_q3(t2, t1, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res_21, manual_21, places=15)\n\n def test_ibm_covd_q3_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones((1, 1))\n correct = np.ones(1)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q3(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q3(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q3(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q3(wrong2, correct)\n\n\n def test_ibm_covd_q2(self):\n \"\"\"\n Compute output for k(1, 2) and k(2, 1) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual_12 = sig**2 * 7.0 / 24.0\n manual_21 = sig**2 * 17.0 / 24.0\n res_12 = cov.ibm_covd_q2(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res_12, manual_12, places=15)\n res_21 = cov.ibm_covd_q2(t2, t1, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res_21, manual_21, places=15)\n\n def test_ibm_covd_q2_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones(1)\n correct = np.ones((1, 1))\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q2(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q2(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q2(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q2(wrong2, correct)\n\n\n def test_ibm_covd_q1(self):\n \"\"\"\n Compute output for k(1, 2) and k(2, 1) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual_12 = sig**2 * 1./2.\n manual_21 = sig**2 * 3./2.\n res_12 = cov.ibm_covd_q1(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res_12, manual_12, places=15)\n res_21 = cov.ibm_covd_q1(t2, t1, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res_21, manual_21, places=15)\n\n def test_ibm_covd_q1_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones(1)\n correct = np.ones((1, 1))\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q1(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q1(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q1(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.ibm_covd_q1(wrong2, correct)\n\n def test_q_in_valid_range(self):\n \"\"\"\n Check q_in_valid_range() for q=1,2,3 (True)\n and for q=0,4 (Error)\n \"\"\"\n self.assertEqual(cov.q_in_valid_range(1), True)\n self.assertEqual(cov.q_in_valid_range(2), True)\n self.assertEqual(cov.q_in_valid_range(3), True)\n with self.assertRaises(AssertionError):\n cov.q_in_valid_range(0)\n with self.assertRaises(AssertionError):\n cov.q_in_valid_range(4)\n\n def test_ibm_covd(self):\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual_12_q1 = sig**2 * 1./2.\n manual_12_q2 = sig**2 * 7.0 / 24.0\n manual_12_q3 = sig**2 * 49.0 / 720.0\n res_12_q1 = cov.ibm_covd(t1, t2, diffconst=sig, q=1)[0, 0]\n res_12_q2 = cov.ibm_covd(t1, t2, diffconst=sig, q=2)[0, 0]\n res_12_q3 = cov.ibm_covd(t1, t2, diffconst=sig, q=3)[0, 0]\n self.assertAlmostEqual(res_12_q1, manual_12_q1, places=15)\n self.assertAlmostEqual(res_12_q2, manual_12_q2, places=15)\n self.assertAlmostEqual(res_12_q3, manual_12_q3, places=15)\n\n\n def test_ibmcov_q3(self):\n \"\"\"\n Compute output for k(1, 2) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual = sig**2 * (1./252. + 27./720.)\n res = cov.ibmcov_q3(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res, manual, places=15)\n\n \n def test_test_ibmcov_q3_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones(1)\n correct = np.ones((1, 1))\n with self.assertRaises(AssertionError):\n cov.ibmcov_q3(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q3(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q3(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q3(wrong2, correct)\n\n\n def test_ibmcov_q2(self):\n \"\"\"\n Compute output for k(1, 2) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual = sig**2 * (1./20. + 5./24.)\n res = cov.ibmcov_q2(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res, manual, places=15)\n\n \n def test_test_ibmcov_q2_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones(1)\n correct = np.ones((1, 1))\n with self.assertRaises(AssertionError):\n cov.ibmcov_q2(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q2(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q2(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q2(wrong2, correct)\n\n\n def test_ibmcov_q1(self):\n \"\"\"\n Compute output for k(1, 2) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual = sig**2 * (1./3. + 1./2.)\n res = cov.ibmcov_q1(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res, manual, places=15)\n\n \n def test_test_ibmcov_q1_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones(1)\n correct = np.ones((1, 1))\n with self.assertRaises(AssertionError):\n cov.ibmcov_q1(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q1(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q1(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.ibmcov_q1(wrong2, correct)\n\n\n def test_ibmcov(self):\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual_q1 = sig**2 * (1./3. + 1./2.)\n manual_q2 = sig**2 * (1./20. + 5./24.)\n manual_q3 = sig**2 * (1./252. + 27./720.)\n res_q1 = cov.ibmcov(t1, t2, diffconst=sig, q=1)[0, 0]\n res_q2 = cov.ibmcov(t1, t2, diffconst=sig, q=2)[0, 0]\n res_q3 = cov.ibmcov(t1, t2, diffconst=sig, q=3)[0, 0]\n self.assertAlmostEqual(res_q1, manual_q1, places=15)\n self.assertAlmostEqual(res_q2, manual_q2, places=15)\n self.assertAlmostEqual(res_q3, manual_q3, places=15)\n\n\n def test_bmcov(self):\n \"\"\"\n Compute output for k(1, 2) manually and assert it coincides with output.\n \"\"\"\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual = sig**2 * 1.\n res = cov.bmcov(t1, t2, diffconst=sig)[0, 0]\n self.assertAlmostEqual(res, manual, places=15)\n\n \n def test_bmcov_fails_wrong_input(self):\n \"\"\"\n Raise AssertionError for inputs of shape (N,) and (N, 2).\n \"\"\"\n wrong1 = 1*np.ones((1, 2))\n wrong2 = 2*np.ones(1)\n correct = np.ones((1, 1))\n with self.assertRaises(AssertionError):\n cov.bmcov(correct, wrong1)\n with self.assertRaises(AssertionError):\n cov.bmcov(wrong1, correct)\n with self.assertRaises(AssertionError):\n cov.bmcov(correct, wrong2)\n with self.assertRaises(AssertionError):\n cov.bmcov(wrong2, correct)\n\n\n def test_ibm_dcovd(self):\n t1 = 1*np.ones(1)\n t2 = 2*np.ones(1)\n sig = 1.2345\n manual_q1 = sig**2 * 1.0\n manual_q2 = sig**2 * (1./3. + 1./2.)\n manual_q3 = sig**2 * (1./20. + 5./24.)\n res_q1 = cov.ibm_dcovd(t1, t2, diffconst=sig, q=1)[0, 0]\n res_q2 = cov.ibm_dcovd(t1, t2, diffconst=sig, q=2)[0, 0]\n res_q3 = cov.ibm_dcovd(t1, t2, diffconst=sig, q=3)[0, 0]\n self.assertAlmostEqual(res_q1, manual_q1, places=15)\n self.assertAlmostEqual(res_q2, manual_q2, places=15)\n self.assertAlmostEqual(res_q3, manual_q3, places=15)\n\n"
] | [
[
"numpy.dot",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.linalg.det",
"numpy.random.rand",
"numpy.zeros"
],
[
"numpy.diag",
"numpy.linalg.solve",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.ones",
"numpy.finfo",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChristophReich1996/kornia | [
"35f955b46e8015da1cb9faa28c6943ec2b09cc2a",
"35f955b46e8015da1cb9faa28c6943ec2b09cc2a",
"35f955b46e8015da1cb9faa28c6943ec2b09cc2a",
"35f955b46e8015da1cb9faa28c6943ec2b09cc2a"
] | [
"test/augmentation/test_random_generator.py",
"kornia/geometry/conversions.py",
"test/color/test_hls.py",
"test/feature/test_affine_shape_estimator.py"
] | [
"import pytest\nimport torch\nfrom torch.testing import assert_allclose\n\nfrom kornia.augmentation.random_generator import (\n random_prob_generator,\n random_color_jitter_generator,\n random_perspective_generator,\n random_affine_generator,\n random_rotation_generator,\n random_crop_generator,\n random_crop_size_generator,\n random_rectangles_params_generator,\n center_crop_generator,\n random_motion_blur_generator,\n random_solarize_generator,\n random_posterize_generator,\n random_sharpness_generator,\n random_mixup_generator,\n random_cutmix_generator,\n)\n\n\nclass RandomGeneratorBaseTests():\n\n def test_valid_param_combinations(self, device, dtype):\n raise NotImplementedError\n\n def test_invalid_param_combinations(self, device, dtype):\n raise NotImplementedError\n\n def test_random_gen(self, device, dtype):\n raise NotImplementedError\n\n def test_same_on_batch(self, device, dtype):\n raise NotImplementedError\n\n\nclass TestRandomProbGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('p', [0., 0.5, 1.])\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, p, batch_size, same_on_batch, device, dtype):\n random_prob_generator(batch_size=batch_size, p=p, same_on_batch=same_on_batch)\n\n @pytest.mark.parametrize(\n 'p',\n [\n # Should be failed if p > 1. or p < 0.\n (-1.),\n (2.)\n ]\n )\n def test_invalid_param_combinations(self, p, device, dtype):\n with pytest.raises(Exception):\n random_prob_generator(batch_size=8, p=p)\n\n @pytest.mark.parametrize(\n 'p,expected',\n [(0., [False] * 8), (0.5, [False, False, True, False, True, False, True, False]), (1., [True] * 8)]\n )\n def test_random_gen(self, p, expected, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_prob_generator(batch_size=batch_size, p=p)\n assert (res == torch.tensor(expected)).long().sum() == batch_size\n\n @pytest.mark.parametrize(\"seed,expected\", [\n (42, [False] * 8),\n (0, [True] * 8),\n ])\n def test_same_on_batch(self, seed, expected, device, dtype):\n torch.manual_seed(seed)\n batch_size = 8\n res = random_prob_generator(batch_size=batch_size, p=.5, same_on_batch=True)\n assert (res == torch.tensor(expected)).long().sum() == batch_size\n\n\nclass TestColorJitterGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('brightness', [None, torch.tensor([0.8, 1.2])])\n @pytest.mark.parametrize('contrast', [None, torch.tensor([0.8, 1.2])])\n @pytest.mark.parametrize('saturation', [None, torch.tensor([0.8, 1.2])])\n @pytest.mark.parametrize('hue', [None, torch.tensor([-0.1, 0.1])])\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, brightness, contrast, saturation, hue, batch_size, same_on_batch, device, dtype\n ):\n random_color_jitter_generator(\n batch_size,\n brightness.to(device=device, dtype=dtype) if brightness is not None else None,\n contrast.to(device=device, dtype=dtype) if contrast is not None else None,\n saturation.to(device=device, dtype=dtype) if saturation is not None else None,\n hue.to(device=device, dtype=dtype) if hue is not None else None, same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'brightness,contrast,saturation,hue',\n [\n # Should be failed if value out of bounds or tensor.shape != [1, 2]\n (torch.tensor([-1., 2.]), None, None, None),\n (torch.tensor([0., 3.]), None, None, None),\n (torch.tensor(0.), None, None, None),\n (torch.tensor([0.]), None, None, None),\n (torch.tensor([0., 1., 2.]), None, None, None),\n (None, torch.tensor([-1., 2.]), None, None),\n (None, torch.tensor(0.), None, None),\n (None, torch.tensor([0.]), None, None),\n (None, torch.tensor([0., 1., 2.]), None, None),\n (None, None, torch.tensor([-1., 2.]), None),\n (None, None, torch.tensor(0.), None),\n (None, None, torch.tensor([0.]), None),\n (None, None, torch.tensor([0., 1., 2.]), None),\n (None, None, None, torch.tensor([-1., 0.])),\n (None, None, None, torch.tensor([0, 1.])),\n (None, None, None, torch.tensor(0.)),\n (None, None, None, torch.tensor([0.])),\n (None, None, None, torch.tensor([0., 1., 2.])),\n ]\n )\n def test_invalid_param_combinations(self, brightness, contrast, saturation, hue, device, dtype):\n with pytest.raises(Exception):\n random_color_jitter_generator(\n 8,\n brightness.to(device=device, dtype=dtype) if brightness is not None else None,\n contrast.to(device=device, dtype=dtype) if contrast is not None else None,\n saturation.to(device=device, dtype=dtype) if saturation is not None else None,\n hue.to(device=device, dtype=dtype) if hue is not None else None\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n jitter_params = random_color_jitter_generator(\n batch_size,\n brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),\n contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),\n hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype)\n )\n\n expected_jitter_params = {\n 'brightness_factor': torch.tensor(\n [1.1529, 1.1660, 0.9531, 1.1837, 0.9562, 1.0404, 0.9026, 1.1175], device=device, dtype=dtype\n ),\n 'contrast_factor': torch.tensor(\n [1.2645, 0.7799, 1.2608, 1.0561, 1.2216, 1.0406, 1.1447, 0.9576], device=device, dtype=dtype\n ),\n 'hue_factor': torch.tensor(\n [0.0771, 0.0148, -0.0467, 0.0255, -0.0461, -0.0117, -0.0406, 0.0663], device=device, dtype=dtype\n ),\n 'saturation_factor': torch.tensor(\n [0.6843, 0.8156, 0.8871, 0.7595, 1.0378, 0.6049, 1.3612, 0.6602], device=device, dtype=dtype\n ),\n 'order': torch.tensor([3, 2, 0, 1], device=device, dtype=dtype)\n }\n\n assert set(list(jitter_params.keys())) == set([\n 'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order']), \\\n \"Redundant keys found apart from \\\n 'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order'\"\n\n assert_allclose(\n jitter_params['brightness_factor'], expected_jitter_params['brightness_factor'], rtol=1e-4, atol=1e-4\n )\n assert_allclose(\n jitter_params['contrast_factor'], expected_jitter_params['contrast_factor'], rtol=1e-4, atol=1e-4\n )\n assert_allclose(jitter_params['hue_factor'], expected_jitter_params['hue_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(\n jitter_params['saturation_factor'], expected_jitter_params['saturation_factor'], rtol=1e-4, atol=1e-4\n )\n assert_allclose(jitter_params['order'].to(dtype), expected_jitter_params['order'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n jitter_params = random_color_jitter_generator(\n batch_size,\n brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),\n contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),\n hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype),\n same_on_batch=True\n )\n\n expected_res = {\n 'brightness_factor': torch.tensor([1.1529] * batch_size, device=device, dtype=dtype),\n 'contrast_factor': torch.tensor([1.2490] * batch_size, device=device, dtype=dtype),\n 'hue_factor': torch.tensor([-0.0234] * batch_size, device=device, dtype=dtype),\n 'saturation_factor': torch.tensor([1.3674] * batch_size, device=device, dtype=dtype),\n 'order': torch.tensor([2, 3, 0, 1], device=device, dtype=dtype)\n }\n\n assert_allclose(jitter_params['brightness_factor'], expected_res['brightness_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['contrast_factor'], expected_res['contrast_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['hue_factor'], expected_res['hue_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['saturation_factor'], expected_res['saturation_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['order'].to(dtype), expected_res['order'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomPerspectiveGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('height,width', [(200, 200)])\n @pytest.mark.parametrize('distortion_scale', [torch.tensor(0.), torch.tensor(0.5), torch.tensor(1.)])\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, height, width, distortion_scale, batch_size, same_on_batch, device, dtype):\n random_perspective_generator(\n batch_size=8,\n height=height,\n width=width,\n distortion_scale=distortion_scale.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'height,width,distortion_scale',\n [\n # Should be failed if distortion_scale > 1. or distortion_scale < 0.\n (-100, 100, torch.tensor(0.5)),\n (100, -100, torch.tensor(0.5)),\n (100, 100, torch.tensor(-0.5)),\n (100, 100, torch.tensor(1.5)),\n (100, 100, torch.tensor([0., 0.5])),\n ]\n )\n def test_invalid_param_combinations(self, height, width, distortion_scale, device, dtype):\n with pytest.raises(Exception):\n random_perspective_generator(\n batch_size=8,\n height=height,\n width=width,\n distortion_scale=distortion_scale.to(device=device, dtype=dtype)\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_perspective_generator(batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype))\n expected = dict(\n start_points=torch.tensor(\n [[[0., 0.], [199., 0.], [199., 199.], [0., 199.]], [[0., 0.], [199., 0.], [199., 199.], [0., 199.]]],\n device=device,\n dtype=dtype\n ),\n end_points=torch.tensor(\n [\n [[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]],\n [[47.0386, 6.6593], [152.2701, 29.6790], [155.5298, 170.6142], [37.0547, 177.5298]]\n ],\n device=device,\n dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['start_points'], expected['start_points'])\n assert_allclose(res['end_points'], expected['end_points'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_perspective_generator(\n batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(\n start_points=torch.tensor([[[0., 0.], [199., 0.], [199., 199.], [0., 199.]]], device=device,\n dtype=dtype).repeat(2, 1, 1),\n end_points=torch.tensor(\n [[[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]]],\n device=device,\n dtype=dtype\n ).repeat(2, 1, 1),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['start_points'], expected['start_points'])\n assert_allclose(res['end_points'], expected['end_points'])\n\n\nclass TestRandomAffineGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 4])\n @pytest.mark.parametrize('height', [200])\n @pytest.mark.parametrize('width', [300])\n @pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])\n @pytest.mark.parametrize('translate', [None, torch.tensor([0.1, 0.1])])\n @pytest.mark.parametrize('scale', [None, torch.tensor([0.7, 1.2])])\n @pytest.mark.parametrize('shear', [None, torch.tensor([[0, 20], [0, 20]])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, batch_size, height, width, degrees, translate, scale, shear, same_on_batch, device, dtype\n ):\n random_affine_generator(\n batch_size=batch_size,\n height=height,\n width=width,\n degrees=degrees.to(device=device, dtype=dtype),\n translate=translate.to(device=device, dtype=dtype) if translate is not None else None,\n scale=scale.to(device=device, dtype=dtype) if scale is not None else None,\n shear=shear.to(device=device, dtype=dtype) if shear is not None else None,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'height,width,degrees,translate,scale,shear', [\n (-100, 100, torch.tensor([10, 20]), None, None, None),\n (100, -100, torch.tensor([10, 20]), None, None, None),\n (100, 100, 0.5, None, None, None),\n (100, 100, torch.tensor([10, 20, 30]), None, None, None),\n (100, 100, torch.tensor([10, 20]), torch.tensor([0.1]), None, None),\n (10, 10, torch.tensor([1, 2]), torch.tensor([0.1, 0.2, 0.3]), None, None),\n (100, 100, torch.tensor([10, 20]), None, torch.tensor([1]), None),\n (100, 100, torch.tensor([10, 20]), None, torch.tensor([1, 2, 3]), None),\n (100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1])),\n (100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1, 2])),\n (10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3])),\n (10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4])),\n (10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4, 5])),\n ]\n )\n def test_invalid_param_combinations(self, height, width, degrees, translate, scale, shear, device, dtype):\n with pytest.raises(Exception):\n random_affine_generator(\n batch_size=8,\n height=height,\n width=width,\n degrees=degrees.to(device=device, dtype=dtype),\n translate=translate.to(device=device, dtype=dtype) if translate is not None else None,\n scale=scale.to(device=device, dtype=dtype) if scale is not None else None,\n shear=shear.to(device=device, dtype=dtype) if shear is not None else None\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)\n scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)\n shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)\n res = random_affine_generator(\n batch_size=2,\n height=200,\n width=200,\n degrees=degrees,\n translate=translate,\n scale=scale,\n shear=shear,\n same_on_batch=False\n )\n expected = dict(\n translations=torch.tensor([[-4.3821, -9.7371], [4.0358, 11.7457]], device=device, dtype=dtype),\n center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),\n scale=torch.tensor([[0.8914, 0.8914], [1.1797, 1.1797]], device=device, dtype=dtype),\n angle=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype),\n sx=torch.tensor([19.4077, 11.3319], device=device, dtype=dtype),\n sy=torch.tensor([19.3460, 15.9358], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)\n scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)\n shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)\n res = random_affine_generator(\n batch_size=2,\n height=200,\n width=200,\n degrees=degrees,\n translate=translate,\n scale=scale,\n shear=shear,\n same_on_batch=True\n )\n expected = dict(\n translations=torch.tensor([[-4.6854, 18.3722], [-4.6854, 18.3722]], device=device, dtype=dtype),\n center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),\n scale=torch.tensor([[1.1575, 1.1575], [1.1575, 1.1575]], device=device, dtype=dtype),\n angle=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype),\n sx=torch.tensor([13.9045, 13.9045], device=device, dtype=dtype),\n sy=torch.tensor([16.0090, 16.0090], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomRotationGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, degrees, same_on_batch, device, dtype):\n random_rotation_generator(\n batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize('degrees', [(torch.tensor(10)), (torch.tensor([10])), (torch.tensor([10, 20, 30]))])\n def test_invalid_param_combinations(self, degrees, device, dtype):\n batch_size = 8\n with pytest.raises(Exception):\n random_rotation_generator(batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype))\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20])\n res = random_rotation_generator(\n batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=False\n )\n expected = dict(degrees=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype))\n assert res.keys() == expected.keys()\n assert_allclose(res['degrees'], expected['degrees'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20])\n res = random_rotation_generator(\n batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(degrees=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype))\n assert res.keys() == expected.keys()\n assert_allclose(res['degrees'], expected['degrees'])\n\n\nclass TestRandomCropGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 2])\n @pytest.mark.parametrize('input_size', [(200, 200)])\n @pytest.mark.parametrize('size', [(100, 100), torch.tensor([50, 50])])\n @pytest.mark.parametrize('resize_to', [None, (100, 100)])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, input_size, size, resize_to, same_on_batch, device, dtype):\n if isinstance(size, torch.Tensor):\n size = size.repeat(batch_size, 1).to(device=device, dtype=dtype)\n random_crop_generator(\n batch_size=batch_size,\n input_size=input_size,\n size=size,\n resize_to=resize_to,\n same_on_batch=same_on_batch,\n device=device,\n dtype=dtype\n )\n\n @pytest.mark.parametrize(\n 'input_size,size,resize_to', [\n ((-300, 300), (200, 200), (100, 100)),\n ((200, 200), torch.tensor([50, 50]), (100, 100)),\n ]\n )\n def test_invalid_param_combinations(self, input_size, size, resize_to, device, dtype):\n batch_size = 2\n with pytest.raises(Exception):\n random_crop_generator(\n batch_size=batch_size,\n input_size=input_size,\n size=size.to(device=device, dtype=dtype) if isinstance(size, torch.Tensor) else size,\n resize_to=resize_to\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n res = random_crop_generator(\n batch_size=2,\n input_size=(100, 100),\n size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),\n resize_to=(200, 200)\n )\n expected = dict(\n src=torch.tensor(\n [[[36, 19], [95, 19], [95, 68], [36, 68]], [[19, 29], [98, 29], [98, 98], [19, 98]]],\n device=device,\n dtype=dtype\n ),\n dst=torch.tensor(\n [[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],\n device=device,\n dtype=dtype\n ),\n input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['src'], expected['src'])\n assert_allclose(res['dst'], expected['dst'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n res = random_crop_generator(\n batch_size=2,\n input_size=(100, 100),\n size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),\n resize_to=(200, 200),\n same_on_batch=True\n )\n expected = dict(\n src=torch.tensor(\n [[[36, 46], [95, 46], [95, 95], [36, 95]], [[36, 46], [115, 46], [115, 115], [36, 115]]],\n device=device,\n dtype=dtype\n ),\n dst=torch.tensor(\n [[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],\n device=device,\n dtype=dtype\n ),\n input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['src'], expected['src'])\n assert_allclose(res['dst'], expected['dst'])\n\n\nclass TestRandomCropSizeGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('size', [(200, 200)])\n @pytest.mark.parametrize('scale', [torch.tensor([.7, 1.3])])\n @pytest.mark.parametrize('ratio', [torch.tensor([.9, 1.1])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, size, scale, ratio, same_on_batch, device, dtype):\n random_crop_size_generator(\n batch_size=batch_size,\n size=size,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'size,scale,ratio', [\n ((100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),\n ((100, 100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),\n ((100, 100), torch.tensor([.7]), torch.tensor([.9, 1.1])),\n ((100, 100), torch.tensor([.7, 1.3, 1.5]), torch.tensor([.9, 1.1])),\n ((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9])),\n ((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1, 1.3])),\n ]\n )\n def test_invalid_param_combinations(self, size, scale, ratio, device, dtype):\n batch_size = 2\n with pytest.raises(Exception):\n random_crop_size_generator(\n batch_size=batch_size,\n size=size,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n res = random_crop_size_generator(\n batch_size=8,\n size=(100, 100),\n scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n size=torch.tensor(\n [[99, 94], [91, 95], [90, 96], [87, 86], [94, 98], [87, 81], [85, 93], [83, 90]],\n device=device,\n dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['size'], expected['size'])\n\n res = random_crop_size_generator(\n batch_size=100,\n size=(100, 100),\n scale=torch.tensor([0.999, 1.], device=device, dtype=dtype),\n ratio=torch.tensor([1., 1.], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(size=torch.tensor([[100, 100]], device=device, dtype=dtype).repeat(100, 1))\n assert res.keys() == expected.keys()\n assert_allclose(res['size'], expected['size'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20])\n res = random_crop_size_generator(\n batch_size=8,\n size=(100, 100),\n scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n size=torch.tensor(\n [[99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95]],\n device=device,\n dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['size'], expected['size'])\n\n\nclass TestRandomRectangleGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('height', [200])\n @pytest.mark.parametrize('width', [300])\n @pytest.mark.parametrize('scale', [torch.tensor([.7, 1.1])])\n @pytest.mark.parametrize('ratio', [torch.tensor([.7, 1.1])])\n @pytest.mark.parametrize('value', [0])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, batch_size, height, width, scale, ratio, value, same_on_batch, device, dtype\n ):\n random_rectangles_params_generator(\n batch_size=batch_size,\n height=height,\n width=width,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n value=value,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'height,width,scale,ratio,value', [\n (-100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),\n (100, -100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),\n (100, -100, torch.tensor([0.7]), torch.tensor([0.7, 1.3]), 0),\n (100, 100, torch.tensor([0.7, 1.3, 1.5]), torch.tensor([0.7, 1.3]), 0),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7]), 0),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3, 1.5]), 0),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), -1),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 2),\n (100, 100, torch.tensor([.5, .7]), torch.tensor([.7, .9]), torch.tensor(0.5)),\n ]\n )\n def test_invalid_param_combinations(self, height, width, scale, ratio, value, device, dtype):\n batch_size = 8\n with pytest.raises(Exception):\n random_rectangles_params_generator(\n batch_size=batch_size,\n height=height,\n width=width,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n value=value,\n same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n width, height = 100, 150\n scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n value = 0.5\n res = random_rectangles_params_generator(\n batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=False\n )\n expected = dict(\n widths=torch.tensor([100, 100], device=device, dtype=dtype),\n heights=torch.tensor([0, 0], device=device, dtype=dtype),\n xs=torch.tensor([0, 0], device=device, dtype=dtype),\n ys=torch.tensor([6, 8], device=device, dtype=dtype),\n values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['xs'], expected['xs'])\n assert_allclose(res['ys'], expected['ys'])\n assert_allclose(res['values'], expected['values'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n width, height = 100, 150\n scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n value = 0.5\n res = random_rectangles_params_generator(\n batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=True\n )\n expected = dict(\n widths=torch.tensor([100, 100], device=device, dtype=dtype),\n heights=torch.tensor([0, 0], device=device, dtype=dtype),\n xs=torch.tensor([0, 0], device=device, dtype=dtype),\n ys=torch.tensor([10, 10], device=device, dtype=dtype),\n values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['xs'], expected['xs'])\n assert_allclose(res['ys'], expected['ys'])\n assert_allclose(res['values'], expected['values'])\n\n\nclass TestCenterCropGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 2])\n @pytest.mark.parametrize('height', [200])\n @pytest.mark.parametrize('width', [200])\n @pytest.mark.parametrize('size', [(100, 100)])\n def test_valid_param_combinations(self, batch_size, height, width, size, device, dtype):\n center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)\n\n @pytest.mark.parametrize(\n 'height,width,size', [\n (200, -200, (100, 100)),\n (-200, 200, (100, 100)),\n (100, 100, (120, 120)),\n (150, 100, (120, 120)),\n (100, 150, (120, 120)),\n ]\n )\n def test_invalid_param_combinations(self, height, width, size, device, dtype):\n batch_size = 2\n with pytest.raises(Exception):\n center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n res = center_crop_generator(batch_size=2, height=200, width=200, size=(120, 150))\n expected = dict(\n src=torch.tensor(\n [[[25, 40], [174, 40], [174, 159], [25, 159]], [[25, 40], [174, 40], [174, 159], [25, 159]]],\n device=device,\n dtype=torch.long\n ),\n dst=torch.tensor(\n [[[0, 0], [149, 0], [149, 119], [0, 119]], [[0, 0], [149, 0], [149, 119], [0, 119]]],\n device=device,\n dtype=torch.long\n ),\n input_size=torch.tensor([[200, 200], [200, 200]], device=device, dtype=torch.long)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['src'].to(device=device), expected['src'])\n assert_allclose(res['dst'].to(device=device), expected['dst'])\n\n def test_same_on_batch(self, device, dtype):\n pass\n\n\nclass TestRandomMotionBlur(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('kernel_size', [3, (3, 5)])\n @pytest.mark.parametrize('angle', [torch.tensor([10, 30])])\n @pytest.mark.parametrize('direction', [torch.tensor([-1, -1]), torch.tensor([1, 1])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, kernel_size, angle, direction, same_on_batch, device, dtype):\n random_motion_blur_generator(\n batch_size=batch_size,\n kernel_size=kernel_size,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'kernel_size,angle,direction', [\n (4, torch.tensor([30, 100]), torch.tensor([-1, 1])),\n (1, torch.tensor([30, 100]), torch.tensor([-1, 1])),\n ((1, 2, 3), torch.tensor([30, 100]), torch.tensor([-1, 1])),\n (3, torch.tensor([30, 100]), torch.tensor([-2, 1])),\n (3, torch.tensor([30, 100]), torch.tensor([-1, 2])),\n ]\n )\n def test_invalid_param_combinations(self, kernel_size, angle, direction, device, dtype):\n with pytest.raises(Exception):\n random_motion_blur_generator(\n batch_size=8,\n kernel_size=kernel_size,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype)\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n angle = torch.tensor([30, 90])\n direction = torch.tensor([-1, 1])\n res = random_motion_blur_generator(\n batch_size=2,\n kernel_size=3,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),\n angle_factor=torch.tensor([82.9362, 84.9002], device=device, dtype=dtype),\n direction_factor=torch.tensor([-0.2343, 0.9186], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n angle = torch.tensor([30, 90])\n direction = torch.tensor([-1, 1])\n res = random_motion_blur_generator(\n batch_size=2,\n kernel_size=3,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),\n angle_factor=torch.tensor([82.9362, 82.9362], device=device, dtype=dtype),\n direction_factor=torch.tensor([0.8300, 0.8300], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomSolarizeGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('thresholds', [torch.tensor([0, 1]), torch.tensor([0.4, 0.6])])\n @pytest.mark.parametrize('additions', [torch.tensor([-0.5, 0.5])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, thresholds, additions, same_on_batch, device, dtype):\n random_solarize_generator(\n batch_size=batch_size,\n thresholds=thresholds.to(device=device, dtype=dtype),\n additions=additions.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'thresholds,additions', [\n (torch.tensor([0, 2]), torch.tensor([-0.5, 0.5])),\n (torch.tensor([-1, 1]), torch.tensor([-0.5, 0.5])),\n ([0, 1], torch.tensor([-0.5, 0.5])),\n (torch.tensor([0, 1]), torch.tensor([-0.5, 1])),\n (torch.tensor([0, 1]), torch.tensor([-1, 0.5])),\n (torch.tensor([0, 1]), [-0.5, 0.5]),\n ]\n )\n def test_invalid_param_combinations(self, thresholds, additions, device, dtype):\n with pytest.raises(Exception):\n random_solarize_generator(\n batch_size=batch_size,\n thresholds=thresholds.to(device=device, dtype=dtype),\n additions=additions.to(device=device, dtype=dtype)\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_solarize_generator(\n batch_size=batch_size,\n thresholds=torch.tensor([0, 1], device=device, dtype=dtype),\n additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n thresholds_factor=torch.tensor(\n [0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype\n ),\n additions_factor=torch.tensor(\n [0.4408, -0.3668, 0.4346, 0.0936, 0.3694, 0.0677, 0.2411, -0.0706], device=device, dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_solarize_generator(\n batch_size=batch_size,\n thresholds=torch.tensor([0, 1], device=device, dtype=dtype),\n additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n thresholds_factor=torch.tensor(\n [0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype\n ),\n additions_factor=torch.tensor(\n [0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150], device=device, dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomPosterizeGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('bits', [torch.tensor([0, 8])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, bits, same_on_batch, device, dtype):\n random_posterize_generator(\n batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'bits', [\n (torch.tensor([-1, 1])),\n (torch.tensor([0, 9])),\n (torch.tensor([3])),\n ([0, 8]),\n ]\n )\n def test_invalid_param_combinations(self, bits, device, dtype):\n with pytest.raises(Exception):\n random_posterize_generator(\n batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(9)\n batch_size = 8\n res = random_posterize_generator(\n batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=False\n )\n expected = dict(bits_factor=torch.tensor([5, 2, 3, 6, 7, 7, 2, 7], device=device, dtype=torch.int32))\n assert res.keys() == expected.keys()\n assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(9)\n batch_size = 8\n res = random_posterize_generator(\n batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(bits_factor=torch.tensor([5, 5, 5, 5, 5, 5, 5, 5], device=device, dtype=torch.int32))\n assert res.keys() == expected.keys()\n assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomSharpnessGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('sharpness', [torch.tensor([0., 1.])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, sharpness, same_on_batch, device, dtype):\n random_sharpness_generator(\n batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize('sharpness', [\n (torch.tensor([-1, 5])),\n (torch.tensor([3])),\n ([0, 1.]),\n ])\n def test_invalid_param_combinations(self, sharpness, device, dtype):\n with pytest.raises(Exception):\n random_sharpness_generator(\n batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_sharpness_generator(\n batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=False\n )\n expected = dict(\n sharpness_factor=torch.\n tensor([0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_sharpness_generator(\n batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(\n sharpness_factor=torch.\n tensor([0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomMixUpGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('p', [0., 0.5, 1.])\n @pytest.mark.parametrize('lambda_val', [None, torch.tensor([0., 1.])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, p, lambda_val, same_on_batch, device, dtype):\n random_mixup_generator(\n batch_size=batch_size,\n p=p,\n lambda_val=lambda_val.to(device=device, dtype=dtype) if isinstance(lambda_val,\n (torch.Tensor)) else lambda_val,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'lambda_val', [\n (torch.tensor([-1, 1])),\n (torch.tensor([0, 2])),\n (torch.tensor([0, 0.5, 1])),\n ([0., 1.]),\n ]\n )\n def test_invalid_param_combinations(self, lambda_val, device, dtype):\n with pytest.raises(Exception):\n random_mixup_generator(batch_size=8, lambda_val=lambda_val.to(device=device, dtype=dtype))\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_mixup_generator(\n batch_size=batch_size,\n p=0.5,\n lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n mixup_pairs=torch.tensor([6, 1, 0, 7, 2, 5, 3, 4], device=device, dtype=torch.long),\n mixup_lambdas=torch.tensor(\n [0.0000, 0.0000, 0.5739, 0.0000, 0.6274, 0.0000, 0.4414, 0.0000], device=device, dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(9)\n batch_size = 8\n res = random_mixup_generator(\n batch_size=batch_size,\n p=.9999,\n lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n mixup_pairs=torch.tensor([4, 6, 7, 5, 0, 1, 3, 2], device=device, dtype=torch.long),\n mixup_lambdas=torch.tensor(\n [0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804], device=device, dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomCutMixGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('p', [0, 0.5, 1.])\n @pytest.mark.parametrize('width,height', [(200, 200)])\n @pytest.mark.parametrize('num_mix', [1, 3])\n @pytest.mark.parametrize('beta', [None, torch.tensor(1e-15), torch.tensor(1.)])\n @pytest.mark.parametrize('cut_size', [None, torch.tensor([0., 1.]), torch.tensor([0.3, 0.6])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, batch_size, p, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype\n ):\n random_cutmix_generator(\n batch_size=batch_size,\n p=p,\n width=width,\n height=height,\n num_mix=num_mix,\n beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,\n cut_size=cut_size.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'width,height,num_mix,beta,cut_size', [\n (200, -200, 1, None, None),\n (-200, 200, 1, None, None),\n (200, 200, 0, None, None),\n (200, 200, 1.5, None, None),\n (200, 200, 1, torch.tensor([0., 1.]), None),\n (200, 200, 1, None, torch.tensor([-1., 1.])),\n (200, 200, 1, None, torch.tensor([0., 2.])),\n ]\n )\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_invalid_param_combinations(self, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype):\n with pytest.raises(Exception):\n random_cutmix_generator(\n batch_size=8,\n p=0.5,\n width=width,\n height=height,\n num_mix=num_mix,\n beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,\n cut_size=beta.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,\n same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_cutmix_generator(\n batch_size=batch_size,\n width=200,\n height=200,\n p=0.5,\n num_mix=1,\n beta=torch.tensor(1., device=device, dtype=dtype),\n cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n mix_pairs=torch.tensor([[0, 1]], device=device, dtype=torch.long),\n crop_src=torch.tensor(\n [[[[71, 108], [70, 108], [70, 107], [71, 107]], [[39, 1], [38, 1], [38, 0], [39, 0]]]],\n device=device,\n dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_cutmix_generator(\n batch_size=batch_size,\n width=200,\n height=200,\n p=0.5,\n num_mix=1,\n beta=torch.tensor(1., device=device, dtype=dtype),\n cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n mix_pairs=torch.tensor([[1, 0]], device=device, dtype=torch.long),\n crop_src=torch.tensor(\n [[[[114, 53], [113, 53], [113, 52], [114, 52]], [[114, 53], [113, 53], [113, 52], [114, 52]]]],\n device=device,\n dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)\n",
"import warnings\nfrom typing import Tuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport enum\n\nfrom kornia.constants import pi\n\n__all__ = [\n # functional api\n \"rad2deg\",\n \"deg2rad\",\n \"pol2cart\",\n \"cart2pol\",\n \"convert_points_from_homogeneous\",\n \"convert_points_to_homogeneous\",\n \"convert_affinematrix_to_homography\",\n \"convert_affinematrix_to_homography3d\",\n \"angle_axis_to_rotation_matrix\",\n \"angle_axis_to_quaternion\",\n \"rotation_matrix_to_angle_axis\",\n \"rotation_matrix_to_quaternion\",\n \"quaternion_to_angle_axis\",\n \"quaternion_to_rotation_matrix\",\n \"quaternion_log_to_exp\",\n \"quaternion_exp_to_log\",\n \"denormalize_pixel_coordinates\",\n \"normalize_pixel_coordinates\",\n \"normalize_quaternion\",\n \"denormalize_pixel_coordinates3d\",\n \"normalize_pixel_coordinates3d\",\n]\n\n\nclass QuaternionCoeffOrder(enum.Enum):\n XYZW = 'xyzw'\n WXYZ = 'wxyz'\n\n\ndef rad2deg(tensor: torch.Tensor) -> torch.Tensor:\n r\"\"\"Function that converts angles from radians to degrees.\n\n Args:\n tensor (torch.Tensor): Tensor of arbitrary shape.\n\n Returns:\n torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)\n >>> output = rad2deg(input)\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(tensor)))\n\n return 180. * tensor / pi.to(tensor.device).type(tensor.dtype)\n\n\ndef deg2rad(tensor: torch.Tensor) -> torch.Tensor:\n r\"\"\"Function that converts angles from degrees to radians.\n\n Args:\n tensor (torch.Tensor): Tensor of arbitrary shape.\n\n Returns:\n torch.Tensor: tensor with same shape as input.\n\n Examples::\n\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = deg2rad(input)\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(tensor)))\n\n return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.\n\n\ndef pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"Function that converts polar coordinates to cartesian coordinates.\n\n Args:\n rho (torch.Tensor): Tensor of arbitrary shape.\n phi (torch.Tensor): Tensor of same arbitrary shape.\n\n Returns:\n torch.Tensor, torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> rho = torch.rand(1, 3, 3)\n >>> phi = torch.rand(1, 3, 3)\n >>> x, y = pol2cart(rho, phi)\n \"\"\"\n if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}, {}\".format(type(rho), type(phi)))\n\n x = rho * torch.cos(phi)\n y = rho * torch.sin(phi)\n return x, y\n\n\ndef cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1.e-8) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Function that converts cartesian coordinates to polar coordinates.\n\n Args:\n rho (torch.Tensor): Tensor of arbitrary shape.\n phi (torch.Tensor): Tensor of same arbitrary shape.\n eps (float): To avoid division by zero. Default is 1e-8\n\n Returns:\n torch.Tensor, torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> x = torch.rand(1, 3, 3)\n >>> y = torch.rand(1, 3, 3)\n >>> rho, phi = cart2pol(x, y)\n \"\"\"\n if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}, {}\".format(type(x), type(y)))\n\n rho = torch.sqrt(x**2 + y**2 + eps)\n phi = torch.atan2(y, x)\n return rho, phi\n\n\ndef convert_points_from_homogeneous(points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:\n r\"\"\"Function that converts points from homogeneous to Euclidean space.\n\n Examples::\n\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = convert_points_from_homogeneous(input) # BxNx2\n \"\"\"\n if not isinstance(points, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(points)))\n\n if len(points.shape) < 2:\n raise ValueError(\"Input must be at least a 2D tensor. Got {}\".format(points.shape))\n\n # we check for points at infinity\n z_vec: torch.Tensor = points[..., -1:]\n\n # set the results of division by zeror/near-zero to 1.0\n # follow the convention of opencv:\n # https://github.com/opencv/opencv/pull/14411/files\n mask: torch.Tensor = torch.abs(z_vec) > eps\n scale = torch.where(mask, 1. / (z_vec + eps), torch.ones_like(z_vec))\n\n return scale * points[..., :-1]\n\n\ndef convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor:\n r\"\"\"Function that converts points from Euclidean to homogeneous space.\n\n Examples::\n\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = convert_points_to_homogeneous(input) # BxNx4\n \"\"\"\n if not isinstance(points, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(points)))\n if len(points.shape) < 2:\n raise ValueError(\"Input must be at least a 2D tensor. Got {}\".format(points.shape))\n\n return torch.nn.functional.pad(points, [0, 1], \"constant\", 1.0)\n\n\ndef _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor:\n H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], \"constant\", value=0.)\n H[..., -1, -1] += 1.0\n return H\n\n\ndef convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor:\n r\"\"\"Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].\n\n Examples::\n\n >>> input = torch.rand(2, 2, 3) # Bx2x3\n >>> output = convert_affinematrix_to_homography(input) # Bx3x3\n \"\"\"\n if not isinstance(A, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(A)))\n if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)):\n raise ValueError(\"Input matrix must be a Bx2x3 tensor. Got {}\".format(A.shape))\n return _convert_affinematrix_to_homography_impl(A)\n\n\ndef convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor:\n r\"\"\"Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].\n\n Examples::\n\n >>> input = torch.rand(2, 3, 4) # Bx3x4\n >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4\n \"\"\"\n if not isinstance(A, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(A)))\n if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)):\n raise ValueError(\"Input matrix must be a Bx3x4 tensor. Got {}\".format(A.shape))\n return _convert_affinematrix_to_homography_impl(A)\n\n\ndef angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert 3d vector of axis-angle rotation to 3x3 rotation matrix\n\n Args:\n angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.\n\n Returns:\n torch.Tensor: tensor of 3x3 rotation matrices.\n\n Shape:\n - Input: :math:`(N, 3)`\n - Output: :math:`(N, 3, 3)`\n\n Example:\n >>> input = torch.rand(1, 3) # Nx3\n >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3\n \"\"\"\n if not isinstance(angle_axis, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(angle_axis)))\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(\"Input size must be a (*, 3) tensor. Got {}\".format(angle_axis.shape))\n\n def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):\n # We want to be careful to only evaluate the square root if the\n # norm of the angle_axis vector is greater than zero. Otherwise\n # we get a division by zero.\n k_one = 1.0\n theta = torch.sqrt(theta2)\n wxyz = angle_axis / (theta + eps)\n wx, wy, wz = torch.chunk(wxyz, 3, dim=1)\n cos_theta = torch.cos(theta)\n sin_theta = torch.sin(theta)\n\n r00 = cos_theta + wx * wx * (k_one - cos_theta)\n r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)\n r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)\n r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta\n r11 = cos_theta + wy * wy * (k_one - cos_theta)\n r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)\n r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)\n r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)\n r22 = cos_theta + wz * wz * (k_one - cos_theta)\n rotation_matrix = torch.cat([r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)\n return rotation_matrix.view(-1, 3, 3)\n\n def _compute_rotation_matrix_taylor(angle_axis):\n rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)\n k_one = torch.ones_like(rx)\n rotation_matrix = torch.cat([k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)\n return rotation_matrix.view(-1, 3, 3)\n\n # stolen from ceres/rotation.h\n\n _angle_axis = torch.unsqueeze(angle_axis, dim=1)\n theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))\n theta2 = torch.squeeze(theta2, dim=1)\n\n # compute rotation matrices\n rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)\n rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)\n\n # create mask to handle both cases\n eps = 1e-6\n mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)\n mask_pos = (mask).type_as(theta2)\n mask_neg = (mask == False).type_as(theta2) # noqa\n\n # create output pose matrix\n batch_size = angle_axis.shape[0]\n rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)\n rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)\n # fill output matrix with masked values\n rotation_matrix[..., :3, :3] = \\\n mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor\n return rotation_matrix # Nx3x3\n\n\ndef rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert 3x3 rotation matrix to Rodrigues vector.\n\n Args:\n rotation_matrix (torch.Tensor): rotation matrix.\n\n Returns:\n torch.Tensor: Rodrigues vector transformation.\n\n Shape:\n - Input: :math:`(N, 3, 3)`\n - Output: :math:`(N, 3)`\n\n Example:\n >>> input = torch.rand(2, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_angle_axis(input) # Nx3\n \"\"\"\n if not isinstance(rotation_matrix, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(rotation_matrix)}\")\n\n if not rotation_matrix.shape[-2:] == (3, 3):\n raise ValueError(f\"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}\")\n quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix, order=QuaternionCoeffOrder.WXYZ)\n return quaternion_to_angle_axis(quaternion, order=QuaternionCoeffOrder.WXYZ)\n\n\ndef rotation_matrix_to_quaternion(\n rotation_matrix: torch.Tensor,\n eps: float = 1.e-8,\n order: QuaternionCoeffOrder = QuaternionCoeffOrder.XYZW\n) -> torch.Tensor:\n r\"\"\"Convert 3x3 rotation matrix to 4d quaternion vector.\n\n The quaternion vector has components in (w, x, y, z) or (x, y, z, w) format.\n\n .. note::\n The (x, y, z, w) order is going to be deprecated in favor of efficiency.\n\n Args:\n rotation_matrix (torch.Tensor): the rotation matrix to convert.\n eps (float): small value to avoid zero division. Default: 1e-8.\n order (QuaternionCoeffOrder): quaternion coefficient order. Default: 'xyzw'.\n Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n torch.Tensor: the rotation in quaternion.\n\n Shape:\n - Input: :math:`(*, 3, 3)`\n - Output: :math:`(*, 4)`\n\n Example:\n >>> input = torch.rand(4, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_quaternion(input, eps=torch.finfo(input.dtype).eps,\n ... order=QuaternionCoeffOrder.WXYZ) # Nx4\n \"\"\"\n if not isinstance(rotation_matrix, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(rotation_matrix)}\")\n\n if not rotation_matrix.shape[-2:] == (3, 3):\n raise ValueError(f\"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor:\n eps: float = torch.finfo(numerator.dtype).tiny # type: ignore\n return numerator / torch.clamp(denominator, min=eps)\n\n rotation_matrix_vec: torch.Tensor = rotation_matrix.view(*rotation_matrix.shape[:-2], 9)\n\n m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk(rotation_matrix_vec, chunks=9, dim=-1)\n\n trace: torch.Tensor = m00 + m11 + m22\n\n def trace_positive_cond():\n sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw.\n qw = 0.25 * sq\n qx = safe_zero_division(m21 - m12, sq)\n qy = safe_zero_division(m02 - m20, sq)\n qz = safe_zero_division(m10 - m01, sq)\n if order == QuaternionCoeffOrder.XYZW:\n return torch.cat((qx, qy, qz, qw), dim=-1)\n else:\n return torch.cat((qw, qx, qy, qz), dim=-1)\n\n def cond_1():\n sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx.\n qw = safe_zero_division(m21 - m12, sq)\n qx = 0.25 * sq\n qy = safe_zero_division(m01 + m10, sq)\n qz = safe_zero_division(m02 + m20, sq)\n if order == QuaternionCoeffOrder.XYZW:\n return torch.cat((qx, qy, qz, qw), dim=-1)\n else:\n return torch.cat((qw, qx, qy, qz), dim=-1)\n\n def cond_2():\n sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy.\n qw = safe_zero_division(m02 - m20, sq)\n qx = safe_zero_division(m01 + m10, sq)\n qy = 0.25 * sq\n qz = safe_zero_division(m12 + m21, sq)\n if order == QuaternionCoeffOrder.XYZW:\n return torch.cat((qx, qy, qz, qw), dim=-1)\n else:\n return torch.cat((qw, qx, qy, qz), dim=-1)\n\n def cond_3():\n sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz.\n qw = safe_zero_division(m10 - m01, sq)\n qx = safe_zero_division(m02 + m20, sq)\n qy = safe_zero_division(m12 + m21, sq)\n qz = 0.25 * sq\n if order == QuaternionCoeffOrder.XYZW:\n return torch.cat((qx, qy, qz, qw), dim=-1)\n else:\n return torch.cat((qw, qx, qy, qz), dim=-1)\n\n where_2 = torch.where(m11 > m22, cond_2(), cond_3())\n where_1 = torch.where((m00 > m11) & (m00 > m22), cond_1(), where_2)\n\n quaternion: torch.Tensor = torch.where(trace > 0., trace_positive_cond(), where_1)\n return quaternion\n\n\ndef normalize_quaternion(quaternion: torch.Tensor, eps: float = 1.e-12) -> torch.Tensor:\n r\"\"\"Normalizes a quaternion.\n\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n normalized. The tensor can be of shape :math:`(*, 4)`.\n eps (Optional[bool]): small value to avoid division by zero.\n Default: 1e-12.\n\n Return:\n torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.\n\n Example:\n >>> quaternion = torch.tensor((1., 0., 1., 0.))\n >>> normalize_quaternion(quaternion)\n tensor([0.7071, 0.0000, 0.7071, 0.0000])\n \"\"\"\n if not isinstance(quaternion, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(quaternion)))\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(\"Input must be a tensor of shape (*, 4). Got {}\".format(quaternion.shape))\n return F.normalize(quaternion, p=2.0, dim=-1, eps=eps)\n\n\n# based on:\n# https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101\n# https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247\n\n\ndef quaternion_to_rotation_matrix(\n quaternion: torch.Tensor, order: QuaternionCoeffOrder = QuaternionCoeffOrder.XYZW\n) -> torch.Tensor:\n r\"\"\"Converts a quaternion to a rotation matrix.\n\n The quaternion should be in (x, y, z, w) or (w, x, y, z) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 4)`.\n order (QuaternionCoeffOrder): quaternion coefficient order. Default: 'xyzw'.\n Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.\n\n Example:\n >>> quaternion = torch.tensor((0., 0., 0., 1.))\n >>> quaternion_to_rotation_matrix(quaternion, order=QuaternionCoeffOrder.WXYZ)\n tensor([[-1., 0., 0.],\n [ 0., -1., 0.],\n [ 0., 0., 1.]])\n \"\"\"\n if not isinstance(quaternion, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(quaternion)}\")\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(f\"Input must be a tensor of shape (*, 4). Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # normalize the input quaternion\n quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)\n\n # unpack the normalized quaternion components\n if order == QuaternionCoeffOrder.XYZW:\n x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1)\n else:\n w, x, y, z = torch.chunk(quaternion_norm, chunks=4, dim=-1)\n\n # compute the actual conversion\n tx: torch.Tensor = 2.0 * x\n ty: torch.Tensor = 2.0 * y\n tz: torch.Tensor = 2.0 * z\n twx: torch.Tensor = tx * w\n twy: torch.Tensor = ty * w\n twz: torch.Tensor = tz * w\n txx: torch.Tensor = tx * x\n txy: torch.Tensor = ty * x\n txz: torch.Tensor = tz * x\n tyy: torch.Tensor = ty * y\n tyz: torch.Tensor = tz * y\n tzz: torch.Tensor = tz * z\n one: torch.Tensor = torch.tensor(1.)\n\n matrix: torch.Tensor = torch.stack(\n (\n one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one -\n (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy)\n ),\n dim=-1\n ).view(-1, 3, 3)\n\n if len(quaternion.shape) == 1:\n matrix = torch.squeeze(matrix, dim=0)\n return matrix\n\n\ndef quaternion_to_angle_axis(\n quaternion: torch.Tensor, order: QuaternionCoeffOrder = QuaternionCoeffOrder.XYZW\n) -> torch.Tensor:\n \"\"\"Convert quaternion vector to angle axis of rotation.\n\n The quaternion should be in (x, y, z, w) or (w, x, y, z) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n order (QuaternionCoeffOrder): quaternion coefficient order. Default: 'xyzw'.\n Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`\n\n Example:\n >>> quaternion = torch.rand(2, 4) # Nx4\n >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3\n \"\"\"\n if not torch.is_tensor(quaternion):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(quaternion)}\")\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(f\"Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n # unpack input and compute conversion\n q1: torch.Tensor = torch.tensor([])\n q2: torch.Tensor = torch.tensor([])\n q3: torch.Tensor = torch.tensor([])\n cos_theta: torch.Tensor = torch.tensor([])\n\n if order == QuaternionCoeffOrder.XYZW:\n q1 = quaternion[..., 0]\n q2 = quaternion[..., 1]\n q3 = quaternion[..., 2]\n cos_theta = quaternion[..., 3]\n else:\n cos_theta = quaternion[..., 0]\n q1 = quaternion[..., 1]\n q2 = quaternion[..., 2]\n q3 = quaternion[..., 3]\n\n sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3\n\n sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)\n two_theta: torch.Tensor = 2.0 * torch.where(\n cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)\n )\n\n k_pos: torch.Tensor = two_theta / sin_theta\n k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)\n k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)\n\n angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]\n angle_axis[..., 0] += q1 * k\n angle_axis[..., 1] += q2 * k\n angle_axis[..., 2] += q3 * k\n return angle_axis\n\n\ndef quaternion_log_to_exp(\n quaternion: torch.Tensor,\n eps: float = 1.e-8,\n order: QuaternionCoeffOrder = QuaternionCoeffOrder.XYZW\n) -> torch.Tensor:\n r\"\"\"Applies exponential map to log quaternion.\n\n The quaternion should be in (x, y, z, w) or (w, x, y, z) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 3)`.\n order (QuaternionCoeffOrder): quaternion coefficient order. Default: 'xyzw'.\n Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.\n\n Example:\n >>> quaternion = torch.tensor((0., 0., 0.))\n >>> quaternion_log_to_exp(quaternion, eps=torch.finfo(quaternion.dtype).eps,\n ... order=QuaternionCoeffOrder.WXYZ)\n tensor([1., 0., 0., 0.])\n \"\"\"\n if not isinstance(quaternion, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(quaternion)}\")\n\n if not quaternion.shape[-1] == 3:\n raise ValueError(f\"Input must be a tensor of shape (*, 3). Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # compute quaternion norm\n norm_q: torch.Tensor = torch.norm(quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps)\n\n # compute scalar and vector\n quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q\n quaternion_scalar: torch.Tensor = torch.cos(norm_q)\n\n # compose quaternion and return\n quaternion_exp: torch.Tensor = torch.tensor([])\n if order == QuaternionCoeffOrder.XYZW:\n quaternion_exp = torch.cat((quaternion_vector, quaternion_scalar), dim=-1)\n else:\n quaternion_exp = torch.cat((quaternion_scalar, quaternion_vector), dim=-1)\n\n return quaternion_exp\n\n\ndef quaternion_exp_to_log(\n quaternion: torch.Tensor,\n eps: float = 1.e-8,\n order: QuaternionCoeffOrder = QuaternionCoeffOrder.XYZW\n) -> torch.Tensor:\n r\"\"\"Applies the log map to a quaternion.\n\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 4)`.\n eps (float): A small number for clamping.\n order (QuaternionCoeffOrder): quaternion coefficient order. Default: 'xyzw'.\n Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.\n\n Example:\n >>> quaternion = torch.tensor((1., 0., 0., 0.))\n >>> quaternion_exp_to_log(quaternion, eps=torch.finfo(quaternion.dtype).eps,\n ... order=QuaternionCoeffOrder.WXYZ)\n tensor([0., 0., 0.])\n \"\"\"\n if not isinstance(quaternion, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(quaternion)}\")\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(f\"Input must be a tensor of shape (*, 4). Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # unpack quaternion vector and scalar\n quaternion_vector: torch.Tensor = torch.tensor([])\n quaternion_scalar: torch.Tensor = torch.tensor([])\n\n if order == QuaternionCoeffOrder.XYZW:\n quaternion_vector = quaternion[..., 0:3]\n quaternion_scalar = quaternion[..., 3:4]\n else:\n quaternion_scalar = quaternion[..., 0:1]\n quaternion_vector = quaternion[..., 1:4]\n\n # compute quaternion norm\n norm_q: torch.Tensor = torch.norm(quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps)\n\n # apply log map\n quaternion_log: torch.Tensor = quaternion_vector * torch.acos(\n torch.clamp(quaternion_scalar, min=-1.0, max=1.0)\n ) / norm_q\n\n return quaternion_log\n\n\n# based on:\n# https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138\n\n\ndef angle_axis_to_quaternion(\n angle_axis: torch.Tensor, order: QuaternionCoeffOrder = QuaternionCoeffOrder.XYZW\n) -> torch.Tensor:\n r\"\"\"Convert an angle axis to a quaternion.\n\n The quaternion vector has components in (x, y, z, w) or (w, x, y, z) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n angle_axis (torch.Tensor): tensor with angle axis.\n order (QuaternionCoeffOrder): quaternion coefficient order. Default: 'xyzw'.\n Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n torch.Tensor: tensor with quaternion.\n\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`\n\n Example:\n >>> angle_axis = torch.rand(2, 3) # Nx3\n >>> quaternion = angle_axis_to_quaternion(angle_axis, order=QuaternionCoeffOrder.WXYZ) # Nx4\n \"\"\"\n if not torch.is_tensor(angle_axis):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(angle_axis)}\")\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(f\"Input must be a tensor of shape Nx3 or 3. Got {angle_axis.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # unpack input and compute conversion\n a0: torch.Tensor = angle_axis[..., 0:1]\n a1: torch.Tensor = angle_axis[..., 1:2]\n a2: torch.Tensor = angle_axis[..., 2:3]\n theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2\n\n theta: torch.Tensor = torch.sqrt(theta_squared)\n half_theta: torch.Tensor = theta * 0.5\n\n mask: torch.Tensor = theta_squared > 0.0\n ones: torch.Tensor = torch.ones_like(half_theta)\n\n k_neg: torch.Tensor = 0.5 * ones\n k_pos: torch.Tensor = torch.sin(half_theta) / theta\n k: torch.Tensor = torch.where(mask, k_pos, k_neg)\n w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)\n\n quaternion: torch.Tensor = torch.zeros(\n size=(*angle_axis.shape[:-1], 4), dtype=angle_axis.dtype, device=angle_axis.device\n )\n if order == QuaternionCoeffOrder.XYZW:\n quaternion[..., 0:1] = a0 * k\n quaternion[..., 1:2] = a1 * k\n quaternion[..., 2:3] = a2 * k\n quaternion[..., 3:4] = w\n else:\n quaternion[..., 1:2] = a0 * k\n quaternion[..., 2:3] = a1 * k\n quaternion[..., 3:4] = a2 * k\n quaternion[..., 0:1] = w\n return quaternion\n\n\n# based on:\n# https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71\n\n\ndef normalize_pixel_coordinates(\n pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8\n) -> torch.Tensor:\n r\"\"\"Normalize pixel coordinates between -1 and 1.\n\n Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the grid with pixel coordinates.\n Shape can be :math:`(*, 2)`.\n width (int): the maximum width in the x-axis.\n height (int): the maximum height in the y-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the normalized pixel coordinates.\n \"\"\"\n if pixel_coordinates.shape[-1] != 2:\n raise ValueError(\"Input pixel_coordinates must be of shape (*, 2). \" \"Got {}\".format(pixel_coordinates.shape))\n # compute normalization factor\n hw: torch.Tensor = torch.stack(\n [\n torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype),\n torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)\n ]\n )\n\n factor: torch.Tensor = torch.tensor(2., device=pixel_coordinates.device,\n dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps)\n\n return factor * pixel_coordinates - 1\n\n\ndef denormalize_pixel_coordinates(\n pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8\n) -> torch.Tensor:\n r\"\"\"Denormalize pixel coordinates.\n\n The input is assumed to be -1 if on extreme left, 1 if on\n extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the normalized grid coordinates.\n Shape can be :math:`(*, 2)`.\n width (int): the maximum width in the x-axis.\n height (int): the maximum height in the y-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the denormalized pixel coordinates.\n \"\"\"\n if pixel_coordinates.shape[-1] != 2:\n raise ValueError(\"Input pixel_coordinates must be of shape (*, 2). \" \"Got {}\".format(pixel_coordinates.shape))\n # compute normalization factor\n hw: torch.Tensor = torch.stack([torch.tensor(width),\n torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n\n factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps)\n\n return torch.tensor(1.) / factor * (pixel_coordinates + 1)\n\n\ndef normalize_pixel_coordinates3d(\n pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8\n) -> torch.Tensor:\n r\"\"\"Normalize pixel coordinates between -1 and 1.\n\n Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the grid with pixel coordinates.\n Shape can be :math:`(*, 3)`.\n depth (int): the maximum depth in the z-axis.\n height (int): the maximum height in the y-axis.\n width (int): the maximum width in the x-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the normalized pixel coordinates.\n \"\"\"\n if pixel_coordinates.shape[-1] != 3:\n raise ValueError(\"Input pixel_coordinates must be of shape (*, 3). \" \"Got {}\".format(pixel_coordinates.shape))\n # compute normalization factor\n dhw: torch.Tensor = torch.stack([torch.tensor(depth),\n torch.tensor(width),\n torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n\n factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)\n\n return factor * pixel_coordinates - 1\n\n\ndef denormalize_pixel_coordinates3d(\n pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8\n) -> torch.Tensor:\n r\"\"\"Denormalize pixel coordinates.\n\n The input is assumed to be -1 if on extreme left, 1 if on\n extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the normalized grid coordinates.\n Shape can be :math:`(*, 3)`.\n depth (int): the maximum depth in the x-axis.\n height (int): the maximum height in the y-axis.\n width (int): the maximum width in the x-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n\n Return:\n torch.Tensor: the denormalized pixel coordinates.\n \"\"\"\n if pixel_coordinates.shape[-1] != 3:\n raise ValueError(\"Input pixel_coordinates must be of shape (*, 3). \" \"Got {}\".format(pixel_coordinates.shape))\n # compute normalization factor\n dhw: torch.Tensor = torch.stack([torch.tensor(depth),\n torch.tensor(width),\n torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n\n factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)\n\n return torch.tensor(1.) / factor * (pixel_coordinates + 1)\n",
"import math\n\nimport pytest\nimport torch\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\nimport kornia\nfrom kornia.testing import BaseTester\n\n\nclass TestRgbToHls(BaseTester):\n\n def test_smoke(self, device, dtype):\n C, H, W = 3, 4, 5\n img = torch.rand(C, H, W, device=device, dtype=dtype)\n assert isinstance(kornia.color.rgb_to_hls(img), torch.Tensor)\n\n @pytest.mark.parametrize(\"shape\", [(1, 3, 4, 4), (2, 3, 2, 4), (3, 3, 4, 1), (3, 2, 1)])\n def test_cardinality(self, device, dtype, shape):\n img = torch.ones(shape, device=device, dtype=dtype)\n assert kornia.color.rgb_to_hls(img).shape == shape\n\n def test_exception(self, device, dtype):\n with pytest.raises(TypeError):\n assert kornia.color.rgb_to_hls([0.])\n\n with pytest.raises(ValueError):\n img = torch.ones(1, 1, device=device, dtype=dtype)\n assert kornia.color.rgb_to_hls(img)\n\n with pytest.raises(ValueError):\n img = torch.ones(2, 1, 1, device=device, dtype=dtype)\n assert kornia.color.rgb_to_hls(img)\n\n def test_unit(self, device, dtype):\n data = torch.tensor(\n [\n [\n [0.4237059, 0.1935902, 0.8585021, 0.3790484, 0.1389151],\n [0.5933651, 0.0474544, 0.2801555, 0.1691061, 0.9221829],\n [0.2351739, 0.5852075, 0.5789326, 0.8411915, 0.5960411],\n [0.0290176, 0.6459382, 0.8581501, 0.4755400, 0.7735767],\n [0.9497226, 0.0919441, 0.5462211, 0.7836787, 0.6403612]\n ],\n [\n [0.2280025, 0.1352853, 0.7999730, 0.6658246, 0.4910861],\n [0.3499791, 0.1250734, 0.6315800, 0.4785843, 0.8477826],\n [0.3646359, 0.2415122, 0.5301932, 0.0782518, 0.8710389],\n [0.6957581, 0.6162295, 0.6259052, 0.1753750, 0.6737530],\n [0.7678874, 0.9825978, 0.0234877, 0.2485284, 0.8159551]\n ],\n [\n [0.7330830, 0.9015747, 0.0229067, 0.4280063, 0.5400181],\n [0.0037299, 0.3259412, 0.3467951, 0.9575506, 0.1525899],\n [0.9660432, 0.5287710, 0.6654660, 0.3797526, 0.4981400],\n [0.7422802, 0.9926301, 0.5334370, 0.7852844, 0.4397180],\n [0.2281681, 0.2560037, 0.5134379, 0.5800887, 0.8685090]\n ]\n ],\n device=device,\n dtype=dtype\n )\n\n # OpenCV\n expected = torch.tensor(\n [\n [\n [4.59454770, 4.26846900, 0.97384680, 2.27317070, 3.26934400],\n [0.61494170, 3.89691880, 2.29297200, 3.77774720, 0.94595980],\n [4.00329600, 5.40794320, 4.56610100, 5.86935100, 1.81946310],\n [3.20989560, 4.27144400, 0.29820946, 4.70416550, 0.73408560],\n [0.78329855, 2.28729030, 5.30166340, 5.63437900, 3.38281500]\n ],\n [\n [0.48054275, 0.51843000, 0.44070444, 0.52243650, 0.33946657],\n [0.29854750, 0.18669781, 0.45586777, 0.56332830, 0.53738640],\n [0.60060860, 0.41335985, 0.59782960, 0.45972168, 0.68458940],\n [0.38564888, 0.80442977, 0.69579350, 0.48032972, 0.60664740],\n [0.58894540, 0.53727096, 0.28485440, 0.51610350, 0.75443510]\n ],\n [\n [0.52553130, 0.79561585, 0.94802250, 0.30024928, 0.59078425],\n [0.98750657, 0.74582230, 0.38544560, 0.90278864, 0.83178820],\n [0.91497860, 0.41573380, 0.16817844, 0.82978433, 0.59113250],\n [0.92475650, 0.96231550, 0.53370523, 0.63488615, 0.42437580],\n [0.87768690, 0.96239233, 0.91754496, 0.55295944, 0.46453667]\n ]\n ],\n device=device,\n dtype=dtype\n )\n\n assert_allclose(kornia.color.rgb_to_hls(data), expected)\n\n def test_nan_rgb_to_hls(self, device, dtype):\n data = torch.ones(2, 3, 5, 5, device=device, dtype=dtype)\n\n # OpenCV\n expected = torch.cat(\n [\n torch.zeros(2, 1, 5, 5, device=device, dtype=dtype),\n torch.ones(2, 1, 5, 5, device=device, dtype=dtype),\n torch.zeros(2, 1, 5, 5, device=device, dtype=dtype)\n ],\n dim=1\n )\n\n assert_allclose(kornia.color.rgb_to_hls(data), expected)\n\n @pytest.mark.grad\n def test_gradcheck(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.rand(B, C, H, W, device=device, dtype=torch.float64, requires_grad=True)\n assert gradcheck(kornia.color.rgb_to_hls, (img, ), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.ones(B, C, H, W, device=device, dtype=dtype)\n op = kornia.color.rgb_to_hls\n op_jit = torch.jit.script(op)\n assert_allclose(op(img), op_jit(img))\n\n @pytest.mark.nn\n def test_module(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.ones(B, C, H, W, device=device, dtype=dtype)\n ops = kornia.color.RgbToHls().to(device, dtype)\n fcn = kornia.color.rgb_to_hls\n assert_allclose(ops(img), fcn(img))\n\n\nclass TestHlsToRgb(BaseTester):\n\n def test_smoke(self, device, dtype):\n C, H, W = 3, 4, 5\n img = torch.rand(C, H, W, device=device, dtype=dtype)\n assert isinstance(kornia.color.hls_to_rgb(img), torch.Tensor)\n\n @pytest.mark.parametrize(\"shape\", [(1, 3, 4, 4), (2, 3, 2, 4), (3, 3, 4, 1), (3, 2, 1)])\n def test_cardinality(self, device, dtype, shape):\n img = torch.ones(shape, device=device, dtype=dtype)\n assert kornia.color.hls_to_rgb(img).shape == shape\n\n def test_exception(self, device, dtype):\n with pytest.raises(TypeError):\n assert kornia.color.hls_to_rgb([0.])\n\n with pytest.raises(ValueError):\n img = torch.ones(1, 1, device=device, dtype=dtype)\n assert kornia.color.hls_to_rgb(img)\n\n with pytest.raises(ValueError):\n img = torch.ones(2, 1, 1, device=device, dtype=dtype)\n assert kornia.color.hls_to_rgb(img)\n\n def test_unit(self, device, dtype):\n data = torch.tensor(\n [\n [\n [\n [0.5513626, 0.8487718, 0.1822479, 0.2851745, 0.2669488],\n [0.7596772, 0.4565057, 0.6181599, 0.3852497, 0.7746902],\n [0.5742747, 0.1957062, 0.7530835, 0.2104362, 0.9449323],\n [0.9918052, 0.2437515, 0.4718738, 0.8502576, 0.1675640],\n [0.9210159, 0.0538564, 0.5801026, 0.6110542, 0.3768399]\n ],\n [\n [0.4111853, 0.0183454, 0.7832276, 0.2975794, 0.1139528],\n [0.6207729, 0.1073406, 0.8335325, 0.5700451, 0.2594557],\n [0.7520493, 0.5097187, 0.4719872, 0.9477938, 0.1640292],\n [0.8973427, 0.6455371, 0.7567374, 0.3159562, 0.8135307],\n [0.0855004, 0.6645504, 0.9923756, 0.6209313, 0.2356791]\n ],\n [\n [0.4734681, 0.0422099, 0.7405791, 0.9671807, 0.1793800],\n [0.8221875, 0.7219887, 0.3627397, 0.4403201, 0.0024084],\n [0.0803350, 0.9432759, 0.0241543, 0.8292291, 0.7745832],\n [0.3707901, 0.0851424, 0.5805428, 0.1098685, 0.4238486],\n [0.1058410, 0.0816052, 0.5792874, 0.9578886, 0.6281684]\n ]\n ]\n ],\n device=device,\n dtype=dtype\n )\n\n data[:, 0] = 2 * math.pi * data[:, 0]\n\n # OpenCV\n expected = torch.tensor(\n [\n [\n [\n [0.21650219, 0.01911971, 0.91374826, 0.17609520, 0.10979544],\n [0.65698080, 0.02984191, 0.77314806, 0.38072730, 0.25964087],\n [0.73213010, 0.81102980, 0.47240910, 0.96834683, 0.29108350],\n [0.93540700, 0.64780010, 0.61551300, 0.35066980, 0.89171433],\n [0.09454980, 0.69192480, 0.98795897, 0.25782573, 0.08763295]\n ],\n [\n [0.48587522, 0.01757100, 0.94376480, 0.58539250, 0.13439366],\n [0.30897713, 0.18483935, 0.80829670, 0.75936294, 0.25883088],\n [0.75421450, 0.97218925, 0.46058673, 0.99108470, 0.03697497],\n [0.85927840, 0.67571700, 0.89796180, 0.28124255, 0.89256540],\n [0.07645091, 0.65486740, 0.99254686, 0.50014400, 0.38372523]\n ],\n [\n [0.60586834, 0.01897625, 0.62269044, 0.00976634, 0.09351197],\n [0.93256867, 0.14439031, 0.89391685, 0.49867177, 0.26008060],\n [0.77196840, 0.04724807, 0.48338777, 0.90450300, 0.12093388],\n [0.86302150, 0.61535730, 0.85029656, 0.34361976, 0.73449594],\n [0.08502806, 0.63717590, 0.99679226, 0.98403690, 0.16492467]\n ]\n ]\n ],\n device=device,\n dtype=dtype\n )\n\n f = kornia.color.hls_to_rgb\n assert_allclose(f(data), expected)\n\n data[:, 0] += 2 * math.pi\n assert_allclose(f(data), expected)\n\n data[:, 0] -= 4 * math.pi\n assert_allclose(f(data), expected)\n\n @pytest.mark.grad\n def test_gradcheck(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.rand(B, C, H, W, device=device, dtype=torch.float64, requires_grad=True)\n assert gradcheck(kornia.color.hls_to_rgb, (img, ), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.ones(B, C, H, W, device=device, dtype=dtype)\n op = kornia.color.hls_to_rgb\n op_jit = torch.jit.script(op)\n assert_allclose(op(img), op_jit(img))\n\n @pytest.mark.nn\n def test_module(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.ones(B, C, H, W, device=device, dtype=dtype)\n ops = kornia.color.HlsToRgb().to(device, dtype)\n fcn = kornia.color.hls_to_rgb\n assert_allclose(ops(img), fcn(img))\n",
"import pytest\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\nimport kornia.testing as utils # test utils\nfrom kornia.feature.affine_shape import *\n\n\nclass TestPatchAffineShapeEstimator:\n\n def test_shape(self, device):\n inp = torch.rand(1, 1, 32, 32, device=device)\n ori = PatchAffineShapeEstimator(32).to(device)\n ang = ori(inp)\n assert ang.shape == torch.Size([1, 1, 3])\n\n def test_shape_batch(self, device):\n inp = torch.rand(2, 1, 32, 32, device=device)\n ori = PatchAffineShapeEstimator(32).to(device)\n ang = ori(inp)\n assert ang.shape == torch.Size([2, 1, 3])\n\n def test_print(self, device):\n sift = PatchAffineShapeEstimator(32)\n sift.__repr__()\n\n def test_toy(self, device):\n aff = PatchAffineShapeEstimator(19).to(device)\n inp = torch.zeros(1, 1, 19, 19, device=device)\n inp[:, :, 5:-5, 1:-1] = 1\n abc = aff(inp)\n expected = torch.tensor([[[0.4146, 0.0000, 1.0000]]], device=device)\n assert_allclose(abc, expected, atol=1e-4, rtol=1e-4)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 1, 13, 13\n ori = PatchAffineShapeEstimator(width).to(device)\n patches = torch.rand(batch_size, channels, height, width, device=device)\n patches = utils.tensor_to_gradcheck_var(patches) # to var\n assert gradcheck(ori, (patches, ), raise_exception=True, nondet_tol=1e-4)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n B, C, H, W = 2, 1, 13, 13\n patches = torch.ones(B, C, H, W, device=device, dtype=dtype)\n tfeat = PatchAffineShapeEstimator(W).to(patches.device, patches.dtype).eval()\n tfeat_jit = torch.jit.script(PatchAffineShapeEstimator(W).to(patches.device, patches.dtype).eval())\n assert_allclose(tfeat_jit(patches), tfeat(patches))\n\n\nclass TestLAFAffineShapeEstimator:\n\n def test_shape(self, device):\n inp = torch.rand(1, 1, 32, 32, device=device)\n laf = torch.rand(1, 1, 2, 3, device=device)\n ori = LAFAffineShapeEstimator().to(device)\n out = ori(laf, inp)\n assert out.shape == laf.shape\n\n def test_shape_batch(self, device):\n inp = torch.rand(2, 1, 32, 32, device=device)\n laf = torch.rand(2, 34, 2, 3, device=device)\n ori = LAFAffineShapeEstimator().to(device)\n out = ori(laf, inp)\n assert out.shape == laf.shape\n\n def test_print(self, device):\n sift = LAFAffineShapeEstimator()\n sift.__repr__()\n\n def test_toy(self, device):\n aff = LAFAffineShapeEstimator(32).to(device)\n inp = torch.zeros(1, 1, 32, 32, device=device)\n inp[:, :, 15:-15, 9:-9] = 1\n laf = torch.tensor([[[[20., 0., 16.], [0., 20., 16.]]]], device=device)\n new_laf = aff(laf, inp)\n expected = torch.tensor([[[[36.643, 0., 16.], [0., 10.916, 16.]]]], device=device)\n assert_allclose(new_laf, expected, atol=1e-4, rtol=1e-4)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 1, 40, 40\n patches = torch.rand(batch_size, channels, height, width, device=device)\n patches = utils.tensor_to_gradcheck_var(patches) # to var\n laf = torch.tensor([[[[5., 0., 26.], [0., 5., 26.]]]], device=device)\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(\n LAFAffineShapeEstimator(11).to(device), (laf, patches),\n raise_exception=True,\n rtol=1e-3,\n atol=1e-3,\n nondet_tol=1e-4\n )\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Failing because of extract patches\")\n def test_jit(self, device, dtype):\n B, C, H, W = 1, 1, 13, 13\n inp = torch.zeros(B, C, H, W, device=device)\n inp[:, :, 15:-15, 9:-9] = 1\n laf = torch.tensor([[[[20., 0., 16.], [0., 20., 16.]]]], device=device)\n tfeat = LAFAffineShapeEstimator(W).to(inp.device, inp.dtype).eval()\n tfeat_jit = torch.jit.script(LAFAffineShapeEstimator(W).to(inp.device, inp.dtype).eval())\n assert_allclose(tfeat_jit(laf, inp), tfeat(laf, inp))\n\n\nclass TestLAFAffNetShapeEstimator:\n\n def test_shape(self, device):\n inp = torch.rand(1, 1, 32, 32, device=device)\n laf = torch.rand(1, 1, 2, 3, device=device)\n ori = LAFAffNetShapeEstimator(False).to(device).eval()\n out = ori(laf, inp)\n assert out.shape == laf.shape\n\n def test_pretrained(self, device):\n inp = torch.rand(1, 1, 32, 32, device=device)\n laf = torch.rand(1, 1, 2, 3, device=device)\n ori = LAFAffNetShapeEstimator(True).to(device).eval()\n out = ori(laf, inp)\n assert out.shape == laf.shape\n\n def test_shape_batch(self, device):\n inp = torch.rand(2, 1, 32, 32, device=device)\n laf = torch.rand(2, 5, 2, 3, device=device)\n ori = LAFAffNetShapeEstimator().to(device).eval()\n out = ori(laf, inp)\n assert out.shape == laf.shape\n\n def test_print(self, device):\n sift = LAFAffNetShapeEstimator()\n sift.__repr__()\n\n def test_toy(self, device):\n aff = LAFAffNetShapeEstimator(True).to(device).eval()\n inp = torch.zeros(1, 1, 32, 32, device=device)\n inp[:, :, 15:-15, 9:-9] = 1\n laf = torch.tensor([[[[20., 0., 16.], [0., 20., 16.]]]], device=device)\n new_laf = aff(laf, inp)\n expected = torch.tensor([[[[40.8758, 0., 16.], [-0.3824, 9.7857, 16.]]]], device=device)\n assert_allclose(new_laf, expected, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.skip(\"jacobian not well computed\")\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 1, 35, 35\n patches = torch.rand(batch_size, channels, height, width, device=device)\n patches = utils.tensor_to_gradcheck_var(patches) # to var\n laf = torch.tensor([[[[8., 0., 16.], [0., 8., 16.]]]], device=device)\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(\n LAFAffNetShapeEstimator(True).to(device, dtype=patches.dtype), (laf, patches),\n raise_exception=True,\n rtol=1e-3,\n atol=1e-3,\n nondet_tol=1e-4\n )\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Laf type is not a torch.Tensor????\")\n def test_jit(self, device, dtype):\n B, C, H, W = 1, 1, 32, 32\n patches = torch.rand(B, C, H, W, device=device, dtype=dtype)\n laf = torch.tensor([[[[8., 0., 16.], [0., 8., 16.]]]], device=device)\n laf_estimator = LAFAffNetShapeEstimator(True).to(device, dtype=patches.dtype).eval()\n laf_estimator_jit = torch.jit.script(LAFAffNetShapeEstimator(True).to(device, dtype=patches.dtype).eval())\n assert_allclose(laf_estimator(laf, patches), laf_estimator_jit(laf, patches))\n"
] | [
[
"torch.manual_seed",
"torch.testing.assert_allclose",
"torch.tensor"
],
[
"torch.abs",
"torch.zeros",
"torch.sin",
"torch.cat",
"torch.jit.is_scripting",
"torch.where",
"torch.finfo",
"torch.norm",
"torch.sqrt",
"torch.eye",
"torch.tensor",
"torch.ones_like",
"torch.cos",
"torch.squeeze",
"torch.nn.functional.pad",
"torch.zeros_like",
"torch.is_tensor",
"torch.unsqueeze",
"torch.stack",
"torch.atan2",
"torch.nn.functional.normalize",
"torch.chunk",
"torch.clamp"
],
[
"torch.jit.script",
"torch.ones",
"torch.zeros",
"torch.tensor",
"torch.rand",
"torch.autograd.gradcheck"
],
[
"torch.autograd.gradcheck",
"torch.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vermaakarsh/Code-Vulnerability | [
"38791e2f2bc970bed4c4e8af397ac1f4ac4d7363"
] | [
"tpot_output _pipeline.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\n train_test_split(features, tpot_data['target'], random_state=42)\n\n# Average CV score on the training set was: 0.9996457287206185\nexported_pipeline = KNeighborsClassifier(n_neighbors=2, p=1, weights=\"distance\")\n# Fix random state in exported estimator\nif hasattr(exported_pipeline, 'random_state'):\n setattr(exported_pipeline, 'random_state', 42)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
DMIRLAB-Group/Dassl.pytorch | [
"79052448cc0b0622f14e9768dbd6e6c0598fe6d1"
] | [
"dassl/engine/dg/crossgrad.py"
] | [
"import torch\nfrom torch.nn import functional as F\n\nfrom dassl.optim import build_optimizer, build_lr_scheduler\nfrom dassl.utils import count_num_param\nfrom dassl.engine import TRAINER_REGISTRY, TrainerX\nfrom dassl.engine.trainer import SimpleNet\n\n\n@TRAINER_REGISTRY.register()\nclass CrossGrad(TrainerX):\n \"\"\"Cross-gradient training.\n\n https://arxiv.org/abs/1804.10745.\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.eps_f = cfg.TRAINER.CG.EPS_F\n self.eps_d = cfg.TRAINER.CG.EPS_D\n self.alpha_f = cfg.TRAINER.CG.ALPHA_F\n self.alpha_d = cfg.TRAINER.CG.ALPHA_D\n\n def build_model(self):\n cfg = self.cfg\n\n print('Building F')\n self.F = SimpleNet(cfg, cfg.MODEL, self.num_classes)\n self.F.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.F)))\n self.optim_F = build_optimizer(self.F, cfg.OPTIM)\n self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)\n self.register_model('F', self.F, self.optim_F, self.sched_F)\n\n print('Building D')\n self.D = SimpleNet(cfg, cfg.MODEL, self.dm.num_source_domains)\n self.D.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.D)))\n self.optim_D = build_optimizer(self.D, cfg.OPTIM)\n self.sched_D = build_lr_scheduler(self.optim_D, cfg.OPTIM)\n self.register_model('D', self.D, self.optim_D, self.sched_D)\n\n def forward_backward(self, batch):\n input, label, domain = self.parse_batch_train(batch)\n\n input.requires_grad = True\n\n # Compute domain perturbation\n loss_d = F.cross_entropy(self.D(input), domain)\n loss_d.backward()\n grad_d = torch.clamp(input.grad.data, min=-0.1, max=0.1)\n input_d = input.data + self.eps_f * grad_d\n\n # Compute label perturbation\n input.grad.data.zero_()\n loss_f = F.cross_entropy(self.F(input), label)\n loss_f.backward()\n grad_f = torch.clamp(input.grad.data, min=-0.1, max=0.1)\n input_f = input.data + self.eps_d * grad_f\n\n input = input.detach()\n\n # Update label net\n loss_f1 = F.cross_entropy(self.F(input), label)\n loss_f2 = F.cross_entropy(self.F(input_d), label)\n loss_f = (1 - self.alpha_f) * loss_f1 + self.alpha_f * loss_f2\n self.model_backward_and_update(loss_f, 'F')\n\n # Update domain net\n loss_d1 = F.cross_entropy(self.D(input), domain)\n loss_d2 = F.cross_entropy(self.D(input_f), domain)\n loss_d = (1 - self.alpha_d) * loss_d1 + self.alpha_d * loss_d2\n self.model_backward_and_update(loss_d, 'D')\n\n output_dict = {\n 'loss_f': loss_f.item(),\n 'loss_d': loss_d.item(),\n 'lr': self.optim_F.param_groups[0]['lr']\n }\n\n if (self.batch_idx + 1) == self.num_batches:\n self.update_lr()\n\n return output_dict\n\n def model_inference(self, input):\n return self.F(input)\n"
] | [
[
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
0HenryH/ai2021s | [
"1cadc3f963f7f2fba99441607e62c8da88183327"
] | [
"AI-lec8-rnn/classify.py"
] | [
"import time\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nimport torch.nn as nn\r\nimport itertools\r\nimport collections\r\nimport matplotlib.pyplot as plt\r\n\r\n# Read in data\r\ndf = pd.read_csv(\"Chinese_Names_Corpus_Gender(120W).txt\", header=2)\r\ndf = df[df.sex != \"未知\"]\r\nnames = df[\"dict\"].values\r\n\r\n# Compute character frequency\r\nchars = [list(name) for name in names]\r\nchars_flatten = list(itertools.chain(*chars))\r\nfreq = collections.Counter(chars_flatten)\r\nfreq = pd.DataFrame(freq.items(), columns=[\"char\", \"freq\"])\r\nfreq = freq.sort_values(by=\"freq\", ascending=False)\r\n\r\n# Power law (?)\r\nchar_rank = np.arange(freq.shape[0])\r\nchar_freq = freq[\"freq\"].values\r\nplt.plot(char_rank, char_freq)\r\nplt.plot(np.log(1.0 + char_rank), np.log(char_freq))\r\n\r\n# Prepare data\r\ndict_size = 500\r\ndict = list(freq[\"char\"].values[:dict_size])\r\ndict_set = set(dict)\r\nfiltered = list(filter(lambda item: set(item[1]).issubset(dict_set), enumerate(names)))\r\nind = [idx for idx, name in filtered]\r\ndat = df.iloc[ind]\r\ndat[\"y\"] = np.where(dat[\"sex\"] == \"男\", 0, 1)\r\n\r\n# Split training set and test set\r\n# train = dat.sample(frac=0.8, random_state=123)\r\n# test = dat.drop(train.index)\r\ntrain = dat.sample(n=10000, random_state=123)\r\ntest = dat.sample(n=1000, random_state=321)\r\n\r\n# One-hot encoding\r\ndef char2index(char):\r\n return dict.index(char)\r\n\r\ndef name2index(name):\r\n return [char2index(char) for char in name]\r\n\r\ndef name2tensor(name):\r\n tensor = torch.zeros(len(name), 1, dict_size)\r\n for i, char in enumerate(name):\r\n tensor[i, 0, char2index(char)] = 1\r\n return tensor\r\n\r\nchar2index(\"李\")\r\nname2index(\"李兴\")\r\nname2tensor(\"李兴\")\r\n\r\n\r\n\r\n# Build model\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hidden_size):\r\n super(RNN, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\r\n self.h2o = nn.Linear(hidden_size, 1)\r\n\r\n def forward(self, input, hidden):\r\n combined = torch.cat((input, hidden), dim=1)\r\n hidden = torch.tanh(self.i2h(combined))\r\n output = torch.sigmoid(self.h2o(hidden))\r\n return output, hidden\r\n\r\n def init_hidden(self):\r\n return torch.zeros(1, self.hidden_size)\r\n\r\n# n_hidden = 128\r\n# rnn = RNN(dict_size, n_hidden)\r\n# input = name2tensor(\"李兴\")\r\n# hidden = rnn.init_hidden()\r\n# output, next_hidden = rnn(input[0], hidden)\r\n\r\n\r\n\r\nnp.random.seed(123)\r\ntorch.random.manual_seed(123)\r\n\r\nn = train.shape[0]\r\nn_hidden = 64\r\nnepoch = 5\r\nbs = 100\r\n\r\nrnn = RNN(dict_size, n_hidden)\r\nopt = torch.optim.Adam(rnn.parameters(), lr=0.001)\r\ntrain_ind = np.arange(n)\r\nlosses = []\r\n\r\nt1 = time.time()\r\nfor k in range(nepoch):\r\n np.random.shuffle(train_ind)\r\n # Update on mini-batches\r\n for j in range(0, n, bs):\r\n # Create mini-batch\r\n mb = train.iloc[train_ind[j:(j + bs)]]\r\n mb_size = mb.shape[0]\r\n loss = 0.0\r\n # Loop over each name in the mini-batch\r\n for i in range(mb_size):\r\n name = mb[\"dict\"].values[i]\r\n input = name2tensor(name)\r\n hidden = rnn.init_hidden()\r\n y = mb[\"y\"].values[i]\r\n for s in range(input.shape[0]):\r\n output, hidden = rnn(input[s], hidden)\r\n loss = loss - y * torch.log(output) - (1.0 - y) * torch.log(1.0 - output)\r\n\r\n loss = loss / mb_size\r\n opt.zero_grad()\r\n loss.backward()\r\n opt.step()\r\n\r\n losses.append(loss.item())\r\n if j // bs % 10 == 0:\r\n print(f\"epoch {k}, batch {j // bs}, loss = {loss.item()}\")\r\nt2 = time.time()\r\nprint(t2 - t1)\r\n\r\nplt.plot(losses)\r\n\r\n# Prediction on test set\r\nntest = test.shape[0]\r\ntrue_label = test[\"y\"].values\r\npred = np.zeros(ntest)\r\nrnn.eval()\r\nfor i in range(ntest):\r\n input = name2tensor(test[\"dict\"].values[i])\r\n hidden = rnn.init_hidden()\r\n with torch.no_grad():\r\n for s in range(input.shape[0]):\r\n output, hidden = rnn(input[s], hidden)\r\n pred[i] = output.item()\r\n if i % 100 == 0:\r\n print(f\"processed {i}\")\r\nloss = -np.mean(true_label * np.log(pred) + (1.0 - true_label) * np.log(1.0 - pred))\r\nprint(loss)\r\npred_label = (pred > 0.5).astype(int)\r\nprint(np.mean(pred_label == true_label))\r\n\r\n# Random cases\r\nnp.random.seed(123)\r\ntorch.random.manual_seed(123)\r\nind = np.random.choice(ntest, 10)\r\nypred = 1 * (pred[ind] > 0.5)\r\nprint(test.iloc[ind])\r\nprint(test[\"y\"].values[ind])\r\nprint(ypred)\r\n\r\n\r\n\r\nnames = [\"李\", \"李雪\", \"李雪峰\"]\r\nfor name in names:\r\n input = name2tensor(name)\r\n hidden = rnn.init_hidden()\r\n with torch.no_grad():\r\n for s in range(input.shape[0]):\r\n output, hidden = rnn(input[s], hidden)\r\n pred = output.item()\r\n print(f\"namae: {name}, P(female) = {pred}\")\r\n"
] | [
[
"numpy.log",
"pandas.read_csv",
"numpy.random.seed",
"numpy.random.choice",
"torch.cat",
"numpy.arange",
"torch.zeros",
"torch.random.manual_seed",
"numpy.random.shuffle",
"matplotlib.pyplot.plot",
"torch.nn.Linear",
"numpy.mean",
"torch.no_grad",
"torch.log",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Jerry2001Qu/pennylane-qiskit | [
"7ba24ac6ab695d83508cd0f5064f7dfb1670a79b"
] | [
"tests/test_integration.py"
] | [
"import sys\n\nimport numpy as np\nimport pennylane as qml\nimport pytest\nimport qiskit\n\nfrom pennylane_qiskit import AerDevice, BasicAerDevice\n\nfrom conftest import state_backends\n\npldevices = [(\"qiskit.aer\", qiskit.Aer), (\"qiskit.basicaer\", qiskit.BasicAer)]\n\n\nclass TestDeviceIntegration:\n \"\"\"Test the devices work correctly from the PennyLane frontend.\"\"\"\n\n @pytest.mark.parametrize(\"d\", pldevices)\n def test_load_device(self, d, backend):\n \"\"\"Test that the qiskit device loads correctly\"\"\"\n dev = qml.device(d[0], wires=2, backend=backend, shots=1024)\n assert dev.num_wires == 2\n assert dev.shots == 1024\n assert dev.short_name == d[0]\n assert dev.provider == d[1]\n\n def test_incorrect_backend(self):\n \"\"\"Test that exception is raised if name is incorrect\"\"\"\n with pytest.raises(ValueError, match=\"Backend 'none' does not exist\"):\n qml.device(\"qiskit.aer\", wires=2, backend=\"none\")\n\n def test_incorrect_backend_wires(self):\n \"\"\"Test that exception is raised if number of wires is too large\"\"\"\n with pytest.raises(ValueError, match=r\"Backend 'statevector\\_simulator' supports maximum\"):\n qml.device(\"qiskit.aer\", wires=100, backend=\"statevector_simulator\")\n\n def test_args(self):\n \"\"\"Test that the device requires correct arguments\"\"\"\n with pytest.raises(TypeError, match=\"missing 1 required positional argument\"):\n qml.device(\"qiskit.aer\")\n\n with pytest.raises(qml.DeviceError, match=\"specified number of shots needs to be at least 1\"):\n qml.device(\"qiskit.aer\", backend=\"qasm_simulator\", wires=1, shots=0)\n\n @pytest.mark.parametrize(\"d\", pldevices)\n @pytest.mark.parametrize(\"analytic\", [True, False])\n @pytest.mark.parametrize(\"shots\", [8192])\n def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):\n \"\"\"Test that devices provide correct result for a simple circuit\"\"\"\n if backend not in state_backends and analytic:\n pytest.skip(\"Hardware simulators do not support analytic mode\")\n\n dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)\n\n a = 0.543\n b = 0.123\n c = 0.987\n\n @qml.qnode(dev)\n def circuit(x, y, z):\n \"\"\"Reference QNode\"\"\"\n qml.BasisState(np.array([1]), wires=0)\n qml.Hadamard(wires=0)\n qml.Rot(x, y, z, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(circuit(a, b, c), np.cos(a) * np.sin(b), **tol)\n\n @pytest.mark.parametrize(\"d\", pldevices)\n @pytest.mark.parametrize(\"analytic\", [False])\n @pytest.mark.parametrize(\"shots\", [8192])\n def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):\n \"\"\"Integration test for the Basisstate and Rot operations for when analytic\n is False\"\"\"\n dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)\n\n a = 0\n b = 0\n c = np.pi\n expected = 1\n\n @qml.qnode(dev)\n def circuit(x, y, z):\n \"\"\"Reference QNode\"\"\"\n qml.BasisState(np.array([0]), wires=0)\n qml.Rot(x, y, z, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(circuit(a, b, c), expected, **tol)\n\n def test_gradient_for_tensor_product(self):\n \"\"\"Test that the gradient of a circuit containing a tensor product is\n computed without any errors.\"\"\"\n n_qubits = 2\n depth = 2\n\n def ansatz(weights):\n weights = weights.reshape(depth, n_qubits)\n qml.RX(weights[0][0], wires=[0])\n qml.RZ(weights[0][1], wires=[0])\n qml.RX(weights[1][0], wires=[0])\n qml.RZ(weights[1][1], wires=[0])\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n dev_qsk = qml.device(\n \"qiskit.aer\",\n wires=n_qubits,\n shots=1000,\n backend=\"qasm_simulator\",\n )\n\n weights = np.random.random((depth, n_qubits)).flatten()\n\n # Want to get expectation value and gradient\n exp_sampled = qml.QNode(ansatz, dev_qsk, diff_method=\"parameter-shift\")\n grad_shift = qml.grad(exp_sampled, argnum=0)\n exp_sampled(weights)\n grad_shift(weights)\n\nclass TestKeywordArguments:\n \"\"\"Test keyword argument logic is correct\"\"\"\n\n @pytest.mark.parametrize(\"d\", pldevices)\n def test_compile_backend(self, d):\n \"\"\"Test that the compile backend argument is properly\n extracted\"\"\"\n dev = qml.device(d[0], wires=2, compile_backend=\"test value\")\n assert dev.compile_backend == \"test value\"\n\n def test_noise_model(self):\n \"\"\"Test that the noise model argument is properly\n extracted if the backend supports it\"\"\"\n dev = qml.device(\"qiskit.aer\", wires=2, noise_model=\"test value\")\n assert dev.noise_model == \"test value\"\n\n def test_invalid_noise_model(self):\n \"\"\"Test that the noise model argument causes an exception to be raised\n if the backend does not support it\"\"\"\n with pytest.raises(ValueError, match=\"does not support noisy simulations\"):\n dev = qml.device(\"qiskit.basicaer\", wires=2, noise_model=\"test value\")\n\n def test_overflow_kwargs(self):\n \"\"\"Test all overflow kwargs are extracted for the AerDevice\"\"\"\n dev = qml.device('qiskit.aer', wires=2, k1=\"v1\", k2=\"v2\")\n assert dev.run_args[\"k1\"] == \"v1\"\n assert dev.run_args[\"k2\"] == \"v2\"\n\n\nclass TestLoadIntegration:\n \"\"\"Integration tests for the PennyLane load function. This test ensures that the PennyLane-Qiskit\n specific load functions integrate properly with the PennyLane-Qiskit plugin.\"\"\"\n\n hadamard_qasm = 'OPENQASM 2.0;' \\\n 'include \"qelib1.inc\";' \\\n 'qreg q[1];' \\\n 'h q[0];'\n\n def test_load_qiskit_circuit(self):\n \"\"\"Test that the default load function works correctly.\"\"\"\n theta = qiskit.circuit.Parameter('θ')\n\n qc = qiskit.QuantumCircuit(2)\n qc.rx(theta, 0)\n\n my_template = qml.load(qc, format='qiskit')\n\n dev = qml.device('default.qubit', wires=2)\n\n angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])\n\n @qml.qnode(dev)\n def loaded_quantum_circuit(angle):\n my_template({theta: angle})\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev)\n def quantum_circuit(angle):\n qml.RX(angle, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n for x in angles:\n assert np.allclose(loaded_quantum_circuit(x), quantum_circuit(x))\n\n def test_load_from_qasm_string(self):\n \"\"\"Test that quantum circuits can be loaded from a qasm string.\"\"\"\n\n dev = qml.device('default.qubit', wires=2)\n\n @qml.qnode(dev)\n def loaded_quantum_circuit():\n qml.from_qasm(TestLoadIntegration.hadamard_qasm)(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev)\n def quantum_circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(loaded_quantum_circuit(), quantum_circuit())\n\n @pytest.mark.skipif(sys.version_info < (3, 6), reason=\"tmpdir fixture requires Python >=3.6\")\n def test_load_qasm_from_file(self, tmpdir):\n \"\"\"Test that quantum circuits can be loaded from a qasm file.\"\"\"\n apply_hadamard = tmpdir.join(\"hadamard.qasm\")\n\n with open(apply_hadamard, \"w\") as f:\n f.write(TestLoadIntegration.hadamard_qasm)\n\n hadamard = qml.from_qasm_file(apply_hadamard)\n\n dev = qml.device('default.qubit', wires=2)\n\n @qml.qnode(dev)\n def loaded_quantum_circuit():\n hadamard(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev)\n def quantum_circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(loaded_quantum_circuit(), quantum_circuit())\n\n\nclass TestPLOperations:\n \"\"\"Integration tests for checking certain PennyLane specific operations.\"\"\"\n\n @pytest.mark.parametrize(\"shots\", [1000])\n @pytest.mark.parametrize(\"analytic\", [True, False])\n def test_rotation(self, init_state, state_vector_device, shots, analytic, tol):\n \"\"\"Test that the QubitStateVector and Rot operations are decomposed using a\n Qiskit device with statevector backend\"\"\"\n\n dev = state_vector_device(1)\n\n if dev.backend_name == \"unitary_simulator\":\n pytest.skip(\"Test only runs for backends that are not the unitary simulator.\")\n\n state = init_state(1)\n\n a = 0.542\n b = 1.3432\n c = -0.654\n\n I = np.eye(2)\n Y = np.array([[0, -1j], [1j, 0]]) #: Pauli-Y matrix\n Z = np.array([[1, 0], [0, -1]]) #: Pauli-Z matrix\n\n def ry(theta):\n return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Y\n\n def rz(theta):\n return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Z\n\n @qml.qnode(dev)\n def qubitstatevector_and_rot():\n qml.QubitStateVector(state, wires=[0])\n qml.Rot(a, b, c, wires=[0])\n return qml.expval(qml.Identity(0))\n\n qubitstatevector_and_rot()\n\n assert np.allclose(np.abs(dev.state) ** 2, np.abs(rz(c) @ ry(b) @ rz(a) @ state) ** 2, **tol)\n\n @pytest.mark.parametrize(\"shots\", [1000])\n @pytest.mark.parametrize(\"analytic\", [True, False])\n def test_basisstate(self, init_state, state_vector_device, shots, analytic, tol):\n \"\"\"Test that the Basisstate is decomposed using a Qiskit device with\n statevector backend\"\"\"\n\n dev = state_vector_device(2)\n state = np.array([1, 0])\n\n @qml.qnode(dev)\n def basisstate():\n qml.BasisState(state, wires=[0, 1])\n return qml.expval(qml.Identity(0))\n\n basisstate()\n\n expected_state = np.zeros(2**dev.num_wires)\n expected_state[2] = 1\n\n assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)\n\n @pytest.mark.parametrize(\"shots\", [1000])\n @pytest.mark.parametrize(\"analytic\", [True, False])\n def test_basisstate_init_all_zero_states(self, init_state, state_vector_device, shots, analytic, tol):\n \"\"\"Test that the Basisstate that receives the all zero state is decomposed using\n a Qiskit device with statevector backend\"\"\"\n\n dev = state_vector_device(4)\n state = np.array([0, 0, 0, 0])\n\n @qml.qnode(dev)\n def basisstate():\n qml.BasisState(state, wires=[0, 1, 2, 3])\n return qml.expval(qml.Identity(0))\n\n basisstate()\n\n expected_state = np.zeros(2**dev.num_wires)\n expected_state[0] = 1\n\n assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)\n\n\nclass TestInverses:\n \"\"\"Integration tests checking that the inverse of the operations are applied.\"\"\"\n\n def test_inverse_of_operation(self):\n \"\"\"Test that the inverse of operations works as expected\n by comparing a simple circuit with default.qubit.\"\"\"\n dev = qml.device('default.qubit', wires=2)\n\n dev2 = qml.device('qiskit.aer', backend='statevector_simulator', shots=5, wires=2, analytic=True)\n\n angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])\n\n @qml.qnode(dev)\n def circuit_with_inverses(angle):\n qml.Hadamard(0).inv()\n qml.RX(angle, wires=0).inv()\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev2)\n def circuit_with_inverses_default_qubit(angle):\n qml.Hadamard(0).inv()\n qml.RX(angle, wires=0).inv()\n return qml.expval(qml.PauliZ(0))\n\n for x in angles:\n assert np.allclose(circuit_with_inverses(x), circuit_with_inverses_default_qubit(x))\n"
] | [
[
"numpy.random.random",
"numpy.abs",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
darkxaze/PINNs | [
"f344a907cf8b585e5f667465178c4442b907024d",
"f344a907cf8b585e5f667465178c4442b907024d"
] | [
"mycode/src/def_Net_u_B.py",
"mycode/run_NavierStokes.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 20 11:36:58 2020\r\n\r\n@author: nastavirs\r\n\"\"\"\r\nimport numpy as np\r\nimport tensorflow as tf\r\ndef net_u(self, x, t): \r\n u = self.neural_net(tf.concat([x,t],1), self.weights, self.biases)\r\n return u",
"\"\"\"\n@author: Maziar Raissi\n\"\"\"\n\nimport sys\n#sys.path.insert(0, '../../Utilities/')\nsys.path.append('F:/PINNs-master/PINN/src')\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom scipy.interpolate import griddata\nimport time\nfrom itertools import product, combinations\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom plotting import newfig, savefig\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.gridspec as gridspec\n\nnp.random.seed(1234)\ntf.set_random_seed(1234)\n\nclass PhysicsInformedNN:\n from setup_PINN_ns import __init__ \n from initialize_PINN_ns import initialize_NN\n from xavier_init_ns import xavier_init\n from def_NN_ns import neural_net\n from def_Net_NS import net_NS \n from func_call_ns import callback\n from train_NN_ns import train\n from func_pred_ns import predict\n from axeq3d import axisEqual3D\n from plot_sol import plot_solution\n \nif __name__ == \"__main__\": \n \n N_train = 5000\n \n layers = [3, 20, 20, 20, 20, 20, 20, 20, 20, 2]\n \n # Load Data\n data = scipy.io.loadmat('F:/PINNs-master/PINN/Data/cylinder_nektar_wake.mat')\n \n U_star = data['U_star'] # N x 2 x T\n P_star = data['p_star'] # N x T\n t_star = data['t'] # T x 1\n X_star = data['X_star'] # N x 2\n \n N = X_star.shape[0]\n T = t_star.shape[0]\n \n # Rearrange Data \n XX = np.tile(X_star[:,0:1], (1,T)) # N x T\n YY = np.tile(X_star[:,1:2], (1,T)) # N x T\n TT = np.tile(t_star, (1,N)).T # N x T\n \n UU = U_star[:,0,:] # N x T\n VV = U_star[:,1,:] # N x T\n PP = P_star # N x T\n \n x = XX.flatten()[:,None] # NT x 1\n y = YY.flatten()[:,None] # NT x 1\n t = TT.flatten()[:,None] # NT x 1\n \n u = UU.flatten()[:,None] # NT x 1\n v = VV.flatten()[:,None] # NT x 1\n p = PP.flatten()[:,None] # NT x 1\n \n ######################################################################\n ######################## Noiseles Data ###############################\n ######################################################################\n # Training Data \n idx = np.random.choice(N*T, N_train, replace=False)\n x_train = x[idx,:]\n y_train = y[idx,:]\n t_train = t[idx,:]\n u_train = u[idx,:]\n v_train = v[idx,:]\n\n # Training\n model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers)\n model.train(200000)\n \n # Test Data\n snap = np.array([100])\n x_star = X_star[:,0:1]\n y_star = X_star[:,1:2]\n t_star = TT[:,snap]\n \n u_star = U_star[:,0,snap]\n v_star = U_star[:,1,snap]\n p_star = P_star[:,snap]\n \n # Prediction\n u_pred, v_pred, p_pred = model.predict(x_star, y_star, t_star)\n lambda_1_value = model.sess.run(model.lambda_1)\n lambda_2_value = model.sess.run(model.lambda_2)\n \n # Error\n error_u = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2)\n error_v = np.linalg.norm(v_star-v_pred,2)/np.linalg.norm(v_star,2)\n error_p = np.linalg.norm(p_star-p_pred,2)/np.linalg.norm(p_star,2)\n\n error_lambda_1 = np.abs(lambda_1_value - 1.0)*100\n error_lambda_2 = np.abs(lambda_2_value - 0.01)/0.01 * 100\n \n print('Error u: %e' % (error_u)) \n print('Error v: %e' % (error_v)) \n print('Error p: %e' % (error_p)) \n print('Error l1: %.5f%%' % (error_lambda_1)) \n print('Error l2: %.5f%%' % (error_lambda_2)) \n \n # Plot Results\n plot_solution(X_star, u_pred, 1)\n plot_solution(X_star, v_pred, 2)\n plot_solution(X_star, p_pred, 3) \n plot_solution(X_star, p_star, 4)\n plot_solution(X_star, p_star - p_pred, 5)\n \n # Predict for plotting\n lb = X_star.min(0)\n ub = X_star.max(0)\n nn = 200\n x = np.linspace(lb[0], ub[0], nn)\n y = np.linspace(lb[1], ub[1], nn)\n X, Y = np.meshgrid(x,y)\n \n UU_star = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic')\n VV_star = griddata(X_star, v_pred.flatten(), (X, Y), method='cubic')\n PP_star = griddata(X_star, p_pred.flatten(), (X, Y), method='cubic')\n P_exact = griddata(X_star, p_star.flatten(), (X, Y), method='cubic')\n \n \n ######################################################################\n ########################### Noisy Data ###############################\n ######################################################################\n noise = 0.01 \n u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1])\n v_train = v_train + noise*np.std(v_train)*np.random.randn(v_train.shape[0], v_train.shape[1]) \n\n # Training\n model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers)\n model.train(200000)\n \n lambda_1_value_noisy = model.sess.run(model.lambda_1)\n lambda_2_value_noisy = model.sess.run(model.lambda_2)\n \n error_lambda_1_noisy = np.abs(lambda_1_value_noisy - 1.0)*100\n error_lambda_2_noisy = np.abs(lambda_2_value_noisy - 0.01)/0.01 * 100\n \n print('Error l1: %.5f%%' % (error_lambda_1_noisy)) \n print('Error l2: %.5f%%' % (error_lambda_2_noisy)) \n\n \n \n ######################################################################\n ############################# Plotting ###############################\n ###################################################################### \n # Load Data\n data_vort = scipy.io.loadmat('../Data/cylinder_nektar_t0_vorticity.mat')\n \n x_vort = data_vort['x'] \n y_vort = data_vort['y'] \n w_vort = data_vort['w'] \n modes = np.asscalar(data_vort['modes'])\n nel = np.asscalar(data_vort['nel']) \n \n xx_vort = np.reshape(x_vort, (modes+1,modes+1,nel), order = 'F')\n yy_vort = np.reshape(y_vort, (modes+1,modes+1,nel), order = 'F')\n ww_vort = np.reshape(w_vort, (modes+1,modes+1,nel), order = 'F')\n \n box_lb = np.array([1.0, -2.0])\n box_ub = np.array([8.0, 2.0])\n \n fig, ax = newfig(1.0, 1.2)\n ax.axis('off')\n \n ####### Row 0: Vorticity ################## \n gs0 = gridspec.GridSpec(1, 2)\n gs0.update(top=1-0.06, bottom=1-2/4 + 0.12, left=0.0, right=1.0, wspace=0)\n ax = plt.subplot(gs0[:, :])\n \n for i in range(0, nel):\n h = ax.pcolormesh(xx_vort[:,:,i], yy_vort[:,:,i], ww_vort[:,:,i], cmap='seismic',shading='gouraud', vmin=-3, vmax=3) \n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n \n ax.plot([box_lb[0],box_lb[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1)\n ax.plot([box_ub[0],box_ub[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1)\n ax.plot([box_lb[0],box_ub[0]],[box_lb[1],box_lb[1]],'k',linewidth = 1)\n ax.plot([box_lb[0],box_ub[0]],[box_ub[1],box_ub[1]],'k',linewidth = 1)\n \n ax.set_aspect('equal', 'box')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n ax.set_title('Vorticity', fontsize = 10)\n \n \n ####### Row 1: Training data ##################\n ######## u(t,x,y) ################### \n gs1 = gridspec.GridSpec(1, 2)\n gs1.update(top=1-2/4, bottom=0.0, left=0.01, right=0.99, wspace=0)\n ax = plt.subplot(gs1[:, 0], projection='3d')\n ax.axis('off')\n\n r1 = [x_star.min(), x_star.max()]\n r2 = [data['t'].min(), data['t'].max()] \n r3 = [y_star.min(), y_star.max()]\n \n for s, e in combinations(np.array(list(product(r1,r2,r3))), 2):\n if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]:\n ax.plot3D(*zip(s,e), color=\"k\", linewidth = 0.5) \n\n ax.scatter(x_train, t_train, y_train, s = 0.1)\n ax.contourf(X,UU_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8)\n \n ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$')\n ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$')\n ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$')\n ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$u(t,x,y)$') \n ax.set_xlim3d(r1)\n ax.set_ylim3d(r2)\n ax.set_zlim3d(r3)\n axisEqual3D(ax)\n \n ######## v(t,x,y) ################### \n ax = plt.subplot(gs1[:, 1], projection='3d')\n ax.axis('off')\n \n r1 = [x_star.min(), x_star.max()]\n r2 = [data['t'].min(), data['t'].max()] \n r3 = [y_star.min(), y_star.max()]\n \n for s, e in combinations(np.array(list(product(r1,r2,r3))), 2):\n if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]:\n ax.plot3D(*zip(s,e), color=\"k\", linewidth = 0.5) \n\n ax.scatter(x_train, t_train, y_train, s = 0.1)\n ax.contourf(X,VV_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8)\n \n ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$')\n ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$')\n ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$')\n ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$v(t,x,y)$') \n ax.set_xlim3d(r1)\n ax.set_ylim3d(r2)\n ax.set_zlim3d(r3)\n axisEqual3D(ax)\n \n # savefig('./figures/NavierStokes_data') \n\n \n fig, ax = newfig(1.015, 0.8)\n ax.axis('off')\n \n ######## Row 2: Pressure #######################\n ######## Predicted p(t,x,y) ########### \n gs2 = gridspec.GridSpec(1, 2)\n gs2.update(top=1, bottom=1-1/2, left=0.1, right=0.9, wspace=0.5)\n ax = plt.subplot(gs2[:, 0])\n h = ax.imshow(PP_star, interpolation='nearest', cmap='rainbow', \n extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()], \n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n ax.set_aspect('equal', 'box')\n ax.set_title('Predicted pressure', fontsize = 10)\n \n ######## Exact p(t,x,y) ########### \n ax = plt.subplot(gs2[:, 1])\n h = ax.imshow(P_exact, interpolation='nearest', cmap='rainbow', \n extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()], \n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n ax.set_aspect('equal', 'box')\n ax.set_title('Exact pressure', fontsize = 10)\n \n \n ######## Row 3: Table #######################\n gs3 = gridspec.GridSpec(1, 2)\n gs3.update(top=1-1/2, bottom=0.0, left=0.0, right=1.0, wspace=0)\n ax = plt.subplot(gs3[:, :])\n ax.axis('off')\n \n s = r'$\\begin{tabular}{|c|c|}';\n s = s + r' \\hline'\n s = s + r' Correct PDE & $\\begin{array}{c}'\n s = s + r' u_t + (u u_x + v u_y) = -p_x + 0.01 (u_{xx} + u_{yy})\\\\'\n s = s + r' v_t + (u v_x + v v_y) = -p_y + 0.01 (v_{xx} + v_{yy})'\n s = s + r' \\end{array}$ \\\\ '\n s = s + r' \\hline'\n s = s + r' Identified PDE (clean data) & $\\begin{array}{c}'\n s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value, lambda_2_value)\n s = s + r' \\\\'\n s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value, lambda_2_value)\n s = s + r' \\end{array}$ \\\\ '\n s = s + r' \\hline'\n s = s + r' Identified PDE (1\\% noise) & $\\begin{array}{c}'\n s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy)\n s = s + r' \\\\'\n s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy)\n s = s + r' \\end{array}$ \\\\ '\n s = s + r' \\hline'\n s = s + r' \\end{tabular}$'\n \n ax.text(0.015,0.0,s)\n \n savefig('./figures/NavierStokes_prediction') \n\n"
] | [
[
"tensorflow.concat"
],
[
"numpy.asscalar",
"numpy.abs",
"numpy.random.seed",
"numpy.random.choice",
"numpy.linspace",
"numpy.meshgrid",
"numpy.reshape",
"numpy.tile",
"numpy.linalg.norm",
"numpy.std",
"matplotlib.pyplot.subplot",
"numpy.random.randn",
"matplotlib.gridspec.GridSpec",
"tensorflow.set_random_seed",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
jgasthaus/gluon-ts | [
"e14ad69058e58e1ce51c40551674318341781331"
] | [
"test/distribution/test_distribution_inference.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"\nTest that maximizing likelihood allows to correctly recover distribution parameters for all\ndistributions exposed to the user.\n\"\"\"\n# Standard library imports\nfrom typing import Iterable, List, Tuple\n\n# Third-party imports\nimport mxnet as mx\nimport numpy as np\nimport pytest\nfrom pydantic import PositiveFloat, PositiveInt\n\n# First-party imports\nfrom gluonts.model.common import NPArrayLike\nfrom gluonts.distribution.box_cox_tranform import (\n InverseBoxCoxTransform,\n InverseBoxCoxTransformOutput,\n)\nfrom gluonts.distribution import (\n DistributionOutput,\n StudentT,\n StudentTOutput,\n MultivariateGaussian,\n MultivariateGaussianOutput,\n LowrankMultivariateGaussian,\n LowrankMultivariateGaussianOutput,\n NegativeBinomial,\n NegativeBinomialOutput,\n Laplace,\n LaplaceOutput,\n Gaussian,\n GaussianOutput,\n PiecewiseLinear,\n PiecewiseLinearOutput,\n Binned,\n BinnedOutput,\n)\nfrom gluonts.distribution.transformed_distribution_output import (\n TransformedDistributionOutput,\n)\nfrom gluonts.distribution.transformed_distribution import (\n TransformedDistribution,\n)\n\n\nNUM_SAMPLES = 2000\nBATCH_SIZE = 32\nTOL = 0.3\nSTART_TOL_MULTIPLE = 1\n\nnp.random.seed(1)\nmx.random.seed(1)\n\n\ndef inv_softplus(y: NPArrayLike) -> np.ndarray:\n # y = log(1 + exp(x)) ==> x = log(exp(y) - 1)\n return np.log(np.exp(y) - 1)\n\n\ndef maximum_likelihood_estimate_sgd(\n distr_output: DistributionOutput,\n samples: mx.ndarray,\n init_biases: List[mx.ndarray.NDArray] = None,\n num_epochs: PositiveInt = PositiveInt(5),\n learning_rate: PositiveFloat = PositiveFloat(1e-2),\n hybridize: bool = True,\n) -> Iterable[float]:\n model_ctx = mx.cpu()\n\n arg_proj = distr_output.get_args_proj()\n arg_proj.initialize()\n\n if hybridize:\n arg_proj.hybridize()\n\n if init_biases is not None:\n for param, bias in zip(arg_proj.proj, init_biases):\n param.params[param.prefix + \"bias\"].initialize(\n mx.initializer.Constant(bias), force_reinit=True\n )\n\n trainer = mx.gluon.Trainer(\n arg_proj.collect_params(),\n \"sgd\",\n {\"learning_rate\": learning_rate, \"clip_gradient\": 10.0},\n )\n\n # The input data to our model is one-dimensional\n dummy_data = mx.nd.array(np.ones((len(samples), 1)))\n\n train_data = mx.gluon.data.DataLoader(\n mx.gluon.data.ArrayDataset(dummy_data, samples),\n batch_size=BATCH_SIZE,\n shuffle=True,\n )\n\n for e in range(num_epochs):\n cumulative_loss = 0\n num_batches = 0\n # inner loop\n for i, (data, sample_label) in enumerate(train_data):\n data = data.as_in_context(model_ctx)\n sample_label = sample_label.as_in_context(model_ctx)\n with mx.autograd.record():\n distr_args = arg_proj(data)\n distr = distr_output.distribution(distr_args)\n loss = distr.loss(sample_label)\n if not hybridize:\n assert loss.shape == distr.batch_shape\n loss.backward()\n trainer.step(BATCH_SIZE)\n num_batches += 1\n\n cumulative_loss += mx.nd.mean(loss).asscalar()\n print(\"Epoch %s, loss: %s\" % (e, cumulative_loss / num_batches))\n\n return [\n param[0].asnumpy() for param in arg_proj(mx.nd.array(np.ones((1, 1))))\n ]\n\n\[email protected](\"mu, sigma, nu\", [(2.3, 0.7, 6.0)])\[email protected](\"hybridize\", [True, False])\ndef test_studentT_likelihood(\n mu: float, sigma: float, nu: float, hybridize: bool\n) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma\n nus = mx.nd.zeros((NUM_SAMPLES,)) + nu\n\n distr = StudentT(mus, sigmas, nus)\n samples = distr.sample()\n\n # nu takes very long to learn, so we initialize it at the true value.\n # transform used is softplus(x) + 2\n init_bias = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),\n inv_softplus(nu - 2),\n ]\n\n mu_hat, sigma_hat, nu_hat = maximum_likelihood_estimate_sgd(\n StudentTOutput(),\n samples,\n init_biases=init_bias,\n hybridize=hybridize,\n num_epochs=PositiveInt(10),\n learning_rate=PositiveFloat(1e-2),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(sigma_hat - sigma) < TOL * sigma\n ), f\"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}\"\n assert (\n np.abs(nu_hat - nu) < TOL * nu\n ), \"nu0 did not match: nu0 = %s, nu_hat = %s\" % (nu, nu_hat)\n\n\[email protected](\"mu, sigma\", [(1.0, 0.1)])\[email protected](\"hybridize\", [True, False])\ndef test_gaussian_likelihood(mu: float, sigma: float, hybridize: bool):\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma\n\n distr = Gaussian(mus, sigmas)\n samples = distr.sample()\n\n init_biases = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),\n ]\n\n mu_hat, sigma_hat = maximum_likelihood_estimate_sgd(\n GaussianOutput(),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.001),\n num_epochs=PositiveInt(5),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(sigma_hat - sigma) < TOL * sigma\n ), f\"alpha did not match: sigma = {sigma}, sigma_hat = {sigma_hat}\"\n\n\[email protected](10)\ndef test_multivariate_gaussian() -> None:\n num_samples = 2000\n dim = 2\n\n mu = np.arange(0, dim) / float(dim)\n\n L_diag = np.ones((dim,))\n L_low = 0.1 * np.ones((dim, dim)) * np.tri(dim, k=-1)\n L = np.diag(L_diag) + L_low\n Sigma = L.dot(L.transpose())\n\n distr = MultivariateGaussian(mu=mx.nd.array(mu), L=mx.nd.array(L))\n\n samples = distr.sample(num_samples)\n\n mu_hat, L_hat = maximum_likelihood_estimate_sgd(\n MultivariateGaussianOutput(dim=dim),\n samples,\n init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case\n hybridize=False,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(10),\n )\n\n distr = MultivariateGaussian(\n mu=mx.nd.array([mu_hat]), L=mx.nd.array([L_hat])\n )\n\n Sigma_hat = distr.variance[0].asnumpy()\n\n assert np.allclose(\n mu_hat, mu, atol=0.1, rtol=0.1\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert np.allclose(\n Sigma_hat, Sigma, atol=0.1, rtol=0.1\n ), f\"Sigma did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}\"\n\n\[email protected](10)\ndef test_lowrank_multivariate_gaussian() -> None:\n num_samples = 2000\n dim = 2\n rank = 1\n\n mu = np.arange(0, dim) / float(dim)\n D = np.eye(dim) * (np.arange(dim) / dim + 0.5)\n W = np.sqrt(np.ones((dim, rank)) * 0.2)\n Sigma = D + W.dot(W.transpose())\n\n distr = LowrankMultivariateGaussian(\n mu=mx.nd.array([mu]),\n D=mx.nd.array([np.diag(D)]),\n W=mx.nd.array([W]),\n dim=dim,\n rank=rank,\n )\n\n assert np.allclose(\n distr.variance[0].asnumpy(), Sigma, atol=0.1, rtol=0.1\n ), f\"did not match: sigma = {Sigma}, sigma_hat = {distr.variance[0]}\"\n\n samples = distr.sample(num_samples).squeeze().asnumpy()\n\n mu_hat, D_hat, W_hat = maximum_likelihood_estimate_sgd(\n LowrankMultivariateGaussianOutput(dim=dim, rank=rank),\n samples,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(10),\n init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case\n hybridize=False,\n )\n\n distr = LowrankMultivariateGaussian(\n dim=dim,\n rank=rank,\n mu=mx.nd.array([mu_hat]),\n D=mx.nd.array([D_hat]),\n W=mx.nd.array([W_hat]),\n )\n\n Sigma_hat = distr.variance.asnumpy()\n\n assert np.allclose(\n mu_hat, mu, atol=0.2, rtol=0.1\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n\n assert np.allclose(\n Sigma_hat, Sigma, atol=0.1, rtol=0.1\n ), f\"alpha did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}\"\n\n\[email protected](\"mu\", [6.0])\[email protected](\"hybridize\", [True, False])\ndef test_deterministic_l2(mu: float, hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters.\n This tests uses the Gaussian distribution with fixed variance and sample mean.\n This essentially reduces to determistic L2.\n \"\"\"\n # generate samples\n mu = mu\n mus = mx.nd.zeros(NUM_SAMPLES) + mu\n\n deterministic_distr = Gaussian(mu=mus, sigma=0.1 * mx.nd.ones_like(mus))\n samples = deterministic_distr.sample()\n\n class GaussianFixedVarianceOutput(GaussianOutput):\n @classmethod\n def domain_map(cls, F, mu, sigma):\n sigma = 0.1 * F.ones_like(sigma)\n return mu.squeeze(axis=-1), sigma.squeeze(axis=-1)\n\n mu_hat, _ = maximum_likelihood_estimate_sgd(\n GaussianFixedVarianceOutput(),\n samples,\n init_biases=[3 * mu, 0.1],\n hybridize=hybridize,\n num_epochs=PositiveInt(1),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n\n\[email protected](\"mu\", [1.0])\[email protected](\"hybridize\", [True, False])\ndef test_deterministic_l1(mu: float, hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters.\n This tests uses the Laplace distribution with fixed variance and sample mean.\n This essentially reduces to determistic L1.\n \"\"\"\n # generate samples\n mu = mu\n mus = mx.nd.zeros(NUM_SAMPLES) + mu\n\n class LaplaceFixedVarianceOutput(LaplaceOutput):\n @classmethod\n def domain_map(cls, F, mu, b):\n b = 0.1 * F.ones_like(b)\n return mu.squeeze(axis=-1), b.squeeze(axis=-1)\n\n deterministic_distr = Laplace(mu=mus, b=0.1 * mx.nd.ones_like(mus))\n samples = deterministic_distr.sample()\n\n mu_hat, _ = maximum_likelihood_estimate_sgd(\n LaplaceFixedVarianceOutput(),\n samples,\n init_biases=[3 * mu, 0.1],\n learning_rate=PositiveFloat(1e-3),\n hybridize=hybridize,\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n\n\[email protected](\"mu_alpha\", [(2.5, 0.7)])\[email protected](\"hybridize\", [True, False])\ndef test_neg_binomial(mu_alpha: Tuple[float, float], hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n # test instance\n mu, alpha = mu_alpha\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n alphas = mx.nd.zeros((NUM_SAMPLES,)) + alpha\n\n neg_bin_distr = NegativeBinomial(mu=mus, alpha=alphas)\n samples = neg_bin_distr.sample()\n\n init_biases = [\n inv_softplus(mu - START_TOL_MULTIPLE * TOL * mu),\n inv_softplus(alpha + START_TOL_MULTIPLE * TOL * alpha),\n ]\n\n mu_hat, alpha_hat = maximum_likelihood_estimate_sgd(\n NegativeBinomialOutput(),\n samples,\n hybridize=hybridize,\n init_biases=init_biases,\n num_epochs=PositiveInt(15),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(alpha_hat - alpha) < TOL * alpha\n ), f\"alpha did not match: alpha = {alpha}, alpha_hat = {alpha_hat}\"\n\n\[email protected](10)\[email protected](\"mu_b\", [(3.3, 0.7)])\[email protected](\"hybridize\", [True, False])\ndef test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n # test instance\n mu, b = mu_b\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n bs = mx.nd.zeros((NUM_SAMPLES,)) + b\n\n laplace_distr = Laplace(mu=mus, b=bs)\n samples = laplace_distr.sample()\n\n init_biases = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(b + START_TOL_MULTIPLE * TOL * b),\n ]\n\n mu_hat, b_hat = maximum_likelihood_estimate_sgd(\n LaplaceOutput(), samples, hybridize=hybridize, init_biases=init_biases\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(b_hat - b) < TOL * b\n ), f\"b did not match: b = {b}, b_hat = {b_hat}\"\n\n\[email protected](\n \"gamma, slopes, knot_spacings\",\n [(2.0, np.array([3, 1, 3, 4]), np.array([0.3, 0.2, 0.35, 0.15]))],\n)\[email protected](\"hybridize\", [True, False])\ndef test_piecewise_linear(\n gamma: float,\n slopes: np.ndarray,\n knot_spacings: np.ndarray,\n hybridize: bool,\n) -> None:\n \"\"\"\n Test to check that minimizing the CRPS recovers the quantile function\n \"\"\"\n num_samples = 500 # use a few samples for timeout failure\n\n gammas = mx.nd.zeros((num_samples,)) + gamma\n slopess = mx.nd.zeros((num_samples, len(slopes))) + mx.nd.array(slopes)\n knot_spacingss = mx.nd.zeros(\n (num_samples, len(knot_spacings))\n ) + mx.nd.array(knot_spacings)\n\n pwl_sqf = PiecewiseLinear(gammas, slopess, knot_spacingss)\n\n samples = pwl_sqf.sample()\n\n # Parameter initialization\n gamma_init = gamma - START_TOL_MULTIPLE * TOL * gamma\n slopes_init = slopes - START_TOL_MULTIPLE * TOL * slopes\n knot_spacings_init = knot_spacings\n # We perturb knot spacings such that even after the perturbation they sum to 1.\n mid = len(slopes) // 2\n knot_spacings_init[:mid] = (\n knot_spacings[:mid] - START_TOL_MULTIPLE * TOL * knot_spacings[:mid]\n )\n knot_spacings_init[mid:] = (\n knot_spacings[mid:] + START_TOL_MULTIPLE * TOL * knot_spacings[mid:]\n )\n\n init_biases = [gamma_init, slopes_init, knot_spacings_init]\n\n # check if it returns original parameters of mapped\n gamma_hat, slopes_hat, knot_spacings_hat = maximum_likelihood_estimate_sgd(\n PiecewiseLinearOutput(len(slopes)),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(20),\n )\n\n # Since the problem is highly non-convex we may not be able to recover the exact parameters\n # Here we check if the estimated parameters yield similar function evaluations at different quantile levels.\n quantile_levels = np.arange(0.1, 1.0, 0.1)\n\n # create a LinearSplines instance with the estimated parameters to have access to .quantile\n pwl_sqf_hat = PiecewiseLinear(\n mx.nd.array(gamma_hat),\n mx.nd.array(slopes_hat).expand_dims(axis=0),\n mx.nd.array(knot_spacings_hat).expand_dims(axis=0),\n )\n\n # Compute quantiles with the estimated parameters\n quantiles_hat = np.squeeze(\n pwl_sqf_hat.quantile(\n mx.nd.array(quantile_levels).expand_dims(axis=0), axis=1\n ).asnumpy()\n )\n\n # Compute quantiles with the original parameters\n # Since params is replicated across samples we take only the first entry\n quantiles = np.squeeze(\n pwl_sqf.quantile(\n mx.nd.array(quantile_levels)\n .expand_dims(axis=0)\n .repeat(axis=0, repeats=num_samples),\n axis=1,\n ).asnumpy()[0, :]\n )\n\n for ix, (quantile, quantile_hat) in enumerate(\n zip(quantiles, quantiles_hat)\n ):\n assert np.abs(quantile_hat - quantile) < TOL * quantile, (\n f\"quantile level {quantile_levels[ix]} didn't match:\"\n f\" \"\n f\"q = {quantile}, q_hat = {quantile_hat}\"\n )\n\n\[email protected](\"this test fails when run locally\")\[email protected](\"lam_1, lam_2\", [(0.1, 0.01)])\[email protected](\"mu, sigma\", [(-1.5, 0.5)])\[email protected](\"hybridize\", [True])\ndef test_box_cox_tranform(\n lam_1: float, lam_2: float, mu: float, sigma: float, hybridize: bool\n):\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n # generate samples\n lamdas_1 = mx.nd.zeros((NUM_SAMPLES,)) + lam_1\n lamdas_2 = mx.nd.zeros((NUM_SAMPLES,)) + lam_2\n transform = InverseBoxCoxTransform(lamdas_1, lamdas_2)\n\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma\n gausian_distr = Gaussian(mus, sigmas)\n\n # Here the base distribution is Guassian which is transformed to\n # non-Gaussian via the inverse Box-Cox transform.\n # Sampling from `trans_distr` gives non-Gaussian samples\n trans_distr = TransformedDistribution(gausian_distr, transform)\n\n # Given the non-Gaussian samples find the true parameters\n # of the Box-Cox transformation as well as the underlying Gaussian distribution.\n samples = trans_distr.sample()\n\n init_biases = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),\n lam_1 - START_TOL_MULTIPLE * TOL * lam_1,\n inv_softplus(lam_2 - START_TOL_MULTIPLE * TOL * lam_2),\n ]\n\n mu_hat, sigma_hat, lam_1_hat, lam_2_hat = maximum_likelihood_estimate_sgd(\n TransformedDistributionOutput(\n GaussianOutput(),\n InverseBoxCoxTransformOutput(lb_obs=lam_2, fix_lambda_2=True),\n ),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(18),\n )\n\n assert (\n np.abs(lam_1_hat - lam_1) < TOL * lam_1\n ), f\"lam_1 did not match: lam_1 = {lam_1}, lam_1_hat = {lam_1_hat}\"\n # assert (\n # np.abs(lam_2_hat - lam_2) < TOL * lam_2\n # ), f\"lam_2 did not match: lam_2 = {lam_2}, lam_2_hat = {lam_2_hat}\"\n\n assert np.abs(mu_hat - mu) < TOL * np.abs(\n mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(sigma_hat - sigma) < TOL * sigma\n ), f\"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}\"\n\n\[email protected](\"num_bins\", [6])\[email protected](\n \"bin_probabilites\", [np.array([0.3, 0.1, 0.05, 0.2, 0.1, 0.25])]\n)\[email protected](\"hybridize\", [True, False])\ndef test_binned_likelihood(\n num_bins: float, bin_probabilites: np.ndarray, hybridize: bool\n):\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n bin_prob = mx.nd.array(bin_probabilites)\n bin_center = mx.nd.array(np.logspace(-1, 1, num_bins))\n\n # generate samples\n bin_probs = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_prob\n bin_centers = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_center\n\n distr = Binned(bin_probs, bin_centers)\n samples = distr.sample()\n\n # add some jitter to the uniform initialization and normalize\n bin_prob_init = mx.nd.random_uniform(1 - TOL, 1 + TOL, num_bins) * bin_prob\n bin_prob_init = bin_prob_init / bin_prob_init.sum()\n\n init_biases = [bin_prob_init]\n\n bin_prob_hat, = maximum_likelihood_estimate_sgd(\n BinnedOutput(list(bin_center.asnumpy())),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.05),\n num_epochs=PositiveInt(25),\n )\n\n assert all(\n mx.nd.abs(mx.nd.array(bin_prob_hat) - bin_prob) < TOL * bin_prob\n ), f\"bin_prob did not match: bin_prob = {bin_prob}, bin_prob_hat = {bin_prob_hat}\"\n"
] | [
[
"numpy.diag",
"numpy.abs",
"numpy.allclose",
"numpy.random.seed",
"numpy.logspace",
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.tri",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nicoperetti/metadata-sadosky-santander | [
"a0d686ba8dfa6c3929727248fc52d802d74f4c45"
] | [
"pre-process.py"
] | [
"import click\nimport pandas as pd\n# Due textacy problems\ntry:\n from textacy.preprocess import preprocess_text\nexcept Exception:\n from textacy.preprocess import preprocess_text\n\n\ndef preprocess_f(text, fix_unicode=True, lowercase=True,\n no_urls=True, no_emails=True,\n no_phone_numbers=True,\n no_numbers=True, no_currency_symbols=True,\n no_punct=True, no_accents=True):\n \"\"\"Preprocess text.\"\"\"\n clean_text = preprocess_text(text, fix_unicode=fix_unicode,\n lowercase=lowercase,\n no_urls=no_urls, no_emails=no_emails,\n no_phone_numbers=no_phone_numbers,\n no_numbers=no_numbers,\n no_currency_symbols=no_currency_symbols,\n no_punct=no_punct,\n no_accents=no_accents)\n return clean_text\n\n\[email protected]()\[email protected]('--input_path', type=click.STRING, help='Path to input file')\[email protected]('--output_path', type=click.STRING, help='Path to input file')\[email protected]('--set_', type=click.Choice(['train', 'test']), help=\"set\")\ndef preprocess(input_path, output_path, set_):\n \"\"\"pre-process script\n\n :param input_path: path to input file\n :type input_path: str\n :param output_path: path to output file\n :type output_path: str\n :param set_: kind of data\n :type set_: str\n \"\"\"\n if set_ == \"train\":\n df = pd.read_csv(input_path, sep='|')\n else:\n df = pd.read_csv(input_path)\n\n df[\"clean_txt\"] = df[\"Pregunta\"].apply(lambda x: preprocess_f(x))\n\n df.to_csv(output_path, index=False)\n\n\nif __name__ == \"__main__\":\n preprocess()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
GRAVITYLab/edda | [
"2acd00373db1003922db9f5959644e7506de5726"
] | [
"pyEdda/test_uni_gaussian.py"
] | [
"#make print in python 2, 3 compatible\nfrom __future__ import print_function \nimport numpy as np\nimport pyedda as edda\n\n\n#Univariate Gaussian\nprint(\"//////////Univariate Gaussian///////\")\ndummy_data = np.random.rand(100)\ngaussian = edda.Gaussian(100, 20)\nprint(\"gaussian.getMean():\", gaussian.getMean())\nprint(\"gaussian.getVar():\", gaussian.getVar())\nprint(\"gaussian.getPdf(105):\", gaussian.getPdf(105))\nprint(\"gaussian.getSample():\", gaussian.getSample())\nprint(\"gaussian.getCdf(105):\", gaussian.getCdf(105))\nprint(\"gaussian.getCdfPrecise():\", gaussian.getCdfPrecise(105))\nprint(\"Output gaussian:\")\ngaussian.output()\nprint()\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1130310223/Static-Dynamic-Attention | [
"1da223b06ae41f14575960e247fb13506ed8a124",
"1da223b06ae41f14575960e247fb13506ed8a124"
] | [
"hybrid/opensubtitle/hyb/bid/type1/len2/hybrid_len2_t1_predict.py",
"hybrid/ubuntu/hyb/bid/type2/len6/hybrid_len6_t2_train.py"
] | [
" #-*- coding: utf-8 -*-\r\nimport sys\r\nimport os\r\nimport random\r\nimport re\r\nimport time\r\nimport torch \r\nfrom torch.autograd import Variable\r\nfrom torch import optim\r\nimport torch.nn as nn\r\n#sys.path.append('../')\r\nfrom hybrid_bid_t1_model import Seq2Seq\r\nfrom hybrid_data_utils import *\r\n\r\nsub = '-'*20\r\ndef init_command_line(argv):\r\n\tfrom argparse import ArgumentParser\r\n\tusage = \"seq2seq\"\r\n\tdescription = ArgumentParser(usage)\r\n\tdescription.add_argument(\"--w2v_path\", type=str, default=\"/users3/yfwang/data/w2v/opensubtitle/\")\r\n\tdescription.add_argument(\"--corpus_path\", type=str, default=\"/users3/yfwang/data/corpus/opensubtitle/\")\r\n\tdescription.add_argument(\"--w2v\", type=str, default=\"train_all_200e.w2v\")\r\n\tdescription.add_argument(\"--test_file\", type=str, default=\"test_sessions.txt\")\r\n\t\r\n\tdescription.add_argument(\"--max_context_size\", type=int, default=2)\r\n\tdescription.add_argument(\"--batch_size\", type=int, default=64)\r\n\tdescription.add_argument(\"--enc_hidden_size\", type=int, default=512)\r\n\tdescription.add_argument(\"--max_senten_len\", type=int, default=15)\r\n\r\n\tdescription.add_argument(\"--dropout\", type=float, default=0.5)\r\n\r\n\tdescription.add_argument(\"--teach_forcing\", type=int, default=1)\r\n\tdescription.add_argument(\"--print_every\", type=int, default=100, help=\"print every batches when training\")\r\n\tdescription.add_argument(\"--weights\", type=str, default=None)\r\n\treturn description.parse_args(argv)\r\n\r\nopts = init_command_line(sys.argv[1:])\r\nprint (\"Configure:\")\r\nprint (\" w2v:\",os.path.join(opts.w2v_path,opts.w2v))\r\nprint (\" test_file:\",os.path.join(opts.corpus_path,opts.test_file))\r\n\r\nprint (\" max_context_size:\",opts.max_context_size)\r\nprint (\" batch_size:\",opts.batch_size)\r\nprint (\" enc_hidden_size:\",opts.enc_hidden_size)\r\nprint (\" max_senten_len:\",opts.max_senten_len)\r\n\r\nprint (\" dropout:\",opts.dropout)\r\n\r\nprint (\" teach_forcing:\",opts.teach_forcing)\r\nprint (\" print_every:\",opts.print_every)\r\nprint (\" weights:\",opts.weights)\r\nprint (\"\")\r\n\r\ndef readingTestCorpus(test_file_path):\r\n\tprint (\"reading...\")\r\n\ttest_file = open(test_file_path,'r')\r\n\tlist_pairs = []\r\n\ttmp_pair = []\r\n\tfor line in test_file:\r\n\t\tline = line.strip('\\n')\r\n\t\tif line == sub:\r\n\t\t\tlist_pairs.append(tmp_pair)\r\n\t\t\ttmp_pair = []\r\n\t\telse:\r\n\t\t\ttmp_pair.append(line)\r\n\ttest_file.close()\r\n\r\n\ttest_contexts = []\r\n\ttest_replys = []\r\n\tmax_con_size = 0\r\n\tmin_con_size = 10000\r\n\tfor pair in list_pairs:\r\n\t\tif len(pair) >= 3:\r\n\t\t\ttest_contexts.append(pair[0:-1])\r\n\t\t\ttest_replys.append(pair[-1])\r\n\t\t\tmax_con_size = max(len(pair[0:-1]),max_con_size)\r\n\t\t\tmin_con_size = min(len(pair[0:-1]),min_con_size)\r\n\t\telse:\r\n\t\t\tpass\r\n\tprint (max_con_size)\r\n\tprint (min_con_size)\r\n\treturn test_contexts,test_replys\r\n\r\ndef preProcess(word2index,test_contexts,unk_char,ini_char,max_senten_len,max_context_size):\r\n\tprint (\"preprocessing...\")\r\n\tfilter_test_contexts = []\r\n\tfor context in test_contexts:\r\n\t\tfilter_context = [filteringSenten(word2index,senten,unk_char,ini_char) for senten in context]\r\n\t\tfilter_test_contexts.append(filter_context)\r\n\r\n\tpadded_test_pairs = []\r\n\tfor context in filter_test_contexts:\r\n\t\tpad_list = [0]*len(context)\r\n\t\tif len(context) <= max_context_size:\r\n\t\t\tpad_list = [1]*(max_context_size-len(context)) + pad_list\r\n\t\t\tcontext = ['<unk>']*(max_context_size-len(context)) + context\r\n\t\telse:\r\n\t\t\tpad_list = pad_list[-max_context_size:]\r\n\t\t\tcontext = context[-max_context_size:]\r\n\t\tpadded_context = [paddingSenten(senten,max_senten_len) for senten in context]\r\n\t\tpadded_test_pairs.append([padded_context,pad_list])\r\n\r\n\treturn padded_test_pairs\r\n\r\n\r\n# 读入一个句子的list,构建batch后进行预测\r\ndef predictSentences(index2word,unk_char,ini_char,ini_idx,model,test_pairs,\r\n\t\t\t\t\tprint_every,batch_size,max_senten_len,max_context_size):\r\n\tmodel.eval()\r\n\t#构造batch的list\r\n\tpairs_batches,num_batches = buildingPairsBatch(test_pairs,batch_size,shuffle=False)\r\n\tprint (\"\")\r\n\tprint (\"num of batch:\",num_batches)\r\n\t\r\n\tpredict_sentences = []\r\n\tidx_batch = 0\r\n\tfor contexts_tensor_batch, pad_matrix_batch in getTensorsContextPairsBatch(word2index,pairs_batches,max_context_size):\r\n\t\tpredict_batch = model.predict(contexts_tensor_batch,index2word,pad_matrix_batch,ini_idx,sep_char='\\t')\r\n\t\tpredict_sentences.extend(predict_batch)\r\n\t\tif (idx_batch+1)%print_every == 0:\r\n\t\t\tprint (\"{} batches finished\".format(idx_batch+1))\r\n\t\tidx_batch += 1\r\n\r\n\tpredict_sentences = predict_sentences[0:len(test_pairs)]\r\n\treturn predict_sentences\r\n\r\nif __name__ == '__main__':\r\n\tini_char = '</i>'\r\n\tunk_char = '<unk>'\r\n\tt0 = time.time()\r\n\tprint (\"loading word2vec...\")\r\n\tctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)\r\n\tprint(\" dict size:\",ctable.getDictSize())\r\n\tprint (\" emb size:\",ctable.getEmbSize())\r\n\tprint (time.time()-t0)\r\n\tprint (\"\")\r\n\r\n\tseq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,\r\n\t\t\t\t\topts.max_senten_len,opts.teach_forcing).cuda()\r\n\r\n\tif opts.weights != None:\r\n\t\tprint (\"load model parameters...\")\r\n\t\tseq2seq.load_state_dict(torch.load(opts.weights))\r\n\telse:\r\n\t\tprint (\"No model parameters!\")\r\n\t\texit()\r\n\r\n\ttest_contexts,test_replys = readingTestCorpus(os.path.join(opts.corpus_path,opts.test_file))\r\n\tprint (\"len(test_contexts):\",len(test_contexts))\r\n\tprint (\"len(test_replys):\",len(test_replys))\r\n\r\n\tword2index = ctable.getWord2Index()\r\n\ttest_pairs = preProcess(word2index,test_contexts,unk_char,ini_char,opts.max_senten_len,opts.max_context_size)\r\n\tprint (\"len(test_pairs):\",len(test_pairs))\r\n\t'''test_pair = test_pairs[100]\r\n\ttest_context = test_pair[0]\r\n\tpad_list = test_pair[1]\r\n\r\n\tfor senten in test_context:\r\n\t\tprint senten\r\n\tprint pad_list'''\r\n\t\r\n\tprint (\"start predicting...\")\r\n\tini_idx = word2index[ini_char]\r\n\tpredict_sentences = predictSentences(ctable.getIndex2Word(),unk_char,ini_char,ini_idx,seq2seq,test_pairs,\r\n\t\t\t\t\t\t\t\t\topts.print_every,opts.batch_size,opts.max_senten_len,opts.max_context_size)\r\n\r\n\tprint (\"writing...\")\r\n\tif not os.path.exists('./result/'):\r\n\t\tos.mkdir('./result/')\r\n\tpred_res_file = open(\"./result/open_pred_res_hyb_t1_len2\",'w')\r\n\tpred_ans_file = open(\"./result/open_pred_ans_hyb_t1_len2\",'w')\r\n\tfor idx,senten in enumerate(predict_sentences):\r\n\t\ttest_context = test_contexts[idx]\r\n\t\tfor test_post in test_context:\r\n\t\t\tpred_res_file.write(test_post+'\\n')\r\n\t\tpred_res_file.write(senten+'\\n')\r\n\t\tpred_res_file.write(sub+'\\n')\r\n\t\tsenten_l = [c for c in senten.split('\\t') if c != '</s>']\r\n\t\tpred_ans_file.write(' '.join(senten_l)+' __eou__'+'\\n')\r\n\r\n\tpred_res_file.close()\r\n\tpred_ans_file.close()\r\n\tprint (\"end\")\r\n\t\r\n",
"# -*- coding: utf-8 -*-\r\nimport sys\r\nimport os\r\nimport random\r\nimport re\r\nimport time\r\nimport torch \r\nfrom torch.autograd import Variable\r\nfrom torch import optim\r\nimport torch.nn as nn\r\n#sys.path.append('../')\r\nfrom hybrid_t2_model import Seq2Seq\r\nfrom hybrid_data_utils import *\r\nimport psutil\r\n\r\nproc = psutil.Process(os.getpid())\r\ndef init_command_line(argv):\r\n\tfrom argparse import ArgumentParser\r\n\tusage = \"seq2seq\"\r\n\tdescription = ArgumentParser(usage)\r\n\tdescription.add_argument(\"--w2v_path\", type=str, default=\"/users3/yfwang/data/w2v/ubuntu/\")\r\n\tdescription.add_argument(\"--corpus_path\", type=str, default=\"/users3/yfwang/data/corpus/ubuntu/\")\r\n\tdescription.add_argument(\"--w2v\", type=str, default=\"ubuntu_train_all_200e.w2v\")\r\n\tdescription.add_argument(\"--train_file\", type=str, default=\"ubuntu_train_sessions.txt\")\r\n\t\r\n\tdescription.add_argument(\"--max_context_size\", type=int, default=6)\r\n\tdescription.add_argument(\"--batch_size\", type=int, default=64)\r\n\tdescription.add_argument(\"--enc_hidden_size\", type=int, default=512)\r\n\tdescription.add_argument(\"--max_senten_len\", type=int, default=15)\r\n\r\n\tdescription.add_argument(\"--lr\", type=float, default=0.001)\r\n\tdescription.add_argument(\"--weight_decay\", type=float, default=1e-5)\r\n\tdescription.add_argument(\"--dropout\", type=float, default=0.5)\r\n\r\n\tdescription.add_argument(\"--epochs\", type=int, default=10)\r\n\tdescription.add_argument(\"--teach_forcing\", type=int, default=1)\r\n\tdescription.add_argument(\"--shuffle\", type=int, default=1)\r\n\tdescription.add_argument(\"--print_every\", type=int, default=200, help=\"print every batches when training\")\r\n\tdescription.add_argument(\"--save_model\", type=int, default=1)\r\n\tdescription.add_argument(\"--weights\", type=str, default=None)\r\n\treturn description.parse_args(argv)\r\n\r\nopts = init_command_line(sys.argv[1:])\r\nprint (\"Configure:\")\r\nprint (\" w2v:\",os.path.join(opts.w2v_path,opts.w2v))\r\nprint (\" train_file:\",os.path.join(opts.corpus_path,opts.train_file))\r\n\r\nprint (\" max_context_size:\",opts.max_context_size)\r\nprint (\" batch_size:\",opts.batch_size)\r\nprint (\" enc_hidden_size:\",opts.enc_hidden_size)\r\nprint (\" max_senten_len:\",opts.max_senten_len)\r\n\r\nprint (\" learning rate:\",opts.lr)\r\nprint (\" weight_decay:\",opts.weight_decay)\r\nprint (\" dropout:\",opts.dropout)\r\n\r\nprint (\" epochs:\",opts.epochs)\r\nprint (\" teach_forcing:\",opts.teach_forcing)\r\nprint (\" shuffle:\",opts.shuffle)\r\nprint (\" print_every:\",opts.print_every)\r\nprint (\" save_model:\",opts.save_model)\r\n\r\nprint (\" weights:\",opts.weights)\r\nprint (\"\")\r\n\r\n'''单个batch的训练函数'''\r\ndef train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx):\r\n\tloss = 0\r\n\tmodel_optimizer.zero_grad()\r\n\r\n\tlist_pred = model(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,ini_idx)\r\n\r\n\t# 预测的每个字的loss相加,构成整句的loss\r\n\tfor idx,reply_tensor in enumerate(reply_tensor_batch):\r\n\t\tloss_s = criterion(list_pred[idx],Variable(reply_tensor).cuda())\r\n\t\tloss += loss_s\r\n\r\n\tloss.backward()\r\n\tmodel_optimizer.step()\r\n\t\r\n\treturn loss.data[0]\r\n\r\n# 多轮训练函数\r\ndef train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,epochs,\r\n\t\t\t\tbatch_size,max_senten_len,max_context_size,print_every,save_model,shuffle):\r\n\tprint (\"start training...\")\r\n\tmodel.train()\r\n\tstate_loss = 10000.0\r\n\tfor ei in range(epochs):\r\n\t\tprint (\"Iteration {}: \".format(ei+1))\r\n\t\tepoch_loss = 0\r\n\t\tevery_loss = 0\r\n\t\tt0 = time.time()\r\n\t\tpairs_batches,num_batches = buildingPairsBatch(corpus_pairs,batch_size,shuffle=shuffle)\r\n\t\tprint (\"num_batches:\",num_batches)\r\n\r\n\t\tidx_batch = 0\r\n\t\tfor reply_tensor_batch, contexts_tensor_batch, pad_matrix_batch in getTensorsPairsBatch(word2index,pairs_batches,max_context_size):\r\n\t\t\tloss = train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx)\r\n\t\t\tepoch_loss += loss\r\n\t\t\tevery_loss += loss\r\n\t\t\tif (idx_batch+1)%print_every == 0:\r\n\t\t\t\tevery_avg_loss = every_loss/(max_senten_len*(idx_batch+1))\r\n\t\t\t\t#every_loss = 0\r\n\t\t\t\tt = round((time.time()-t0),2)\r\n\t\t\t\tprint (\"{} batches finished, avg_loss:{},{}\".format(idx_batch+1, every_avg_loss,str(t)))\r\n\t\t\tidx_batch += 1\r\n\t\t\t\r\n\t\tprint (\"memory percent: %.2f%%\" % (proc.memory_percent()))\r\n\t\tmem_info = proc.memory_info()\r\n\t\tres_mem_use = mem_info[0]\r\n\t\tprint (\"res_mem_use: {:.2f}MB\".format(float(res_mem_use)/1024/1024))\r\n\r\n\t\tepoch_avg_loss = epoch_loss/(max_senten_len*num_batches)\r\n\t\tprint (\"epoch_avg_loss:\",epoch_avg_loss)\r\n\t\tif save_model and epoch_avg_loss < state_loss:\r\n\t\t\tprint (\"save model...\")\r\n\t\t\ttorch.save(model.state_dict(), \"./seq2seq_parameters_IterEnd\")\r\n\t\t\tstate_loss = epoch_avg_loss\r\n\r\n\t\tprint (\"Iteration time:\",time.time()-t0)\r\n\t\tprint (\"=============================================\" )\r\n\t\tprint (\"\")\r\n\r\nif __name__ == '__main__':\r\n\tini_char = '</i>'\r\n\tunk_char = '<unk>'\r\n\tt0 = time.time()\r\n\tprint (\"loading word2vec...\")\r\n\tctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)\r\n\tprint(\" dict size:\",ctable.getDictSize())\r\n\tprint (\" emb size:\",ctable.getEmbSize())\r\n\tprint (\"\")\r\n\r\n\ttrain_file_name = os.path.join(opts.corpus_path,opts.train_file)\r\n\tctable,corpus_pairs = readingData(ctable,train_file_name,opts.max_senten_len,opts.max_context_size)\r\n\tprint (time.time()-t0)\r\n\tprint (\"\")\r\n\r\n\tseq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,\r\n\t\t\t\t\topts.max_senten_len,opts.teach_forcing).cuda()\r\n\t\r\n\t# 加载保存好的模型继续训练\r\n\tif opts.weights != None:\r\n\t\tprint (\"load weights...\")\r\n\t\tseq2seq.load_state_dict(torch.load(opts.weights))\r\n\telse:\r\n\t\tseq2seq.init_parameters(ctable.getEmbMatrix())\r\n\r\n\tmodel_optimizer = optim.Adam(seq2seq.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)\r\n\tcriterion = nn.NLLLoss()\r\n\t\r\n\tprint (\"memory percent: %.2f%%\" % (proc.memory_percent()))\r\n\tmem_info = proc.memory_info()\r\n\tres_mem_use = mem_info[0]\r\n\tprint (\"res_mem_use: {:.2f}MB\".format(float(res_mem_use)/1024/1024))\r\n\tprint (\"\")\r\n\r\n\tword2index = ctable.getWord2Index()\r\n\tini_idx = word2index[ini_char]\r\n\ttrain_model(word2index,ini_idx,corpus_pairs,seq2seq,model_optimizer,criterion,opts.epochs,opts.batch_size,\r\n\t\t\t\topts.max_senten_len,opts.max_context_size,opts.print_every,opts.save_model,opts.shuffle)\r\n\tprint (\"\")\r\n"
] | [
[
"torch.load"
],
[
"torch.autograd.Variable",
"torch.nn.NLLLoss",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aligapaul/automlbenchmark | [
"59e796fe6632637233a7104dfffe65f210f9eef5",
"97c1ef8f7529f9814c5228ae0ff91d4055223d50"
] | [
"frameworks/shared/callee.py",
"amlb/openml.py"
] | [
"import json\nimport logging\nimport os\nimport re\nimport sys\nimport time\n\n\ndef setup_logger():\n console = logging.StreamHandler(sys.stdout)\n handlers = [console]\n logging.basicConfig(handlers=handlers)\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n\nsetup_logger()\n\nlog = logging.getLogger(__name__)\n\n\nclass NS:\n\n @staticmethod\n def dict(ns, deep=True):\n dic = ns.__dict__\n if not deep:\n return dic\n for k, v in dic.items():\n if isinstance(v, NS):\n dic[k] = NS.dict(v)\n return dic\n\n @staticmethod\n def from_dict(dic, deep=True):\n ns = NS(dic)\n if not deep:\n return ns\n for k, v in ns.__dict__.items():\n if isinstance(v, dict):\n ns.__dict__[k] = NS.from_dict(v)\n return ns\n\n @staticmethod\n def walk(ns, fn, inplace=False):\n nns = ns if inplace else NS()\n for k, v in ns.__dict__.items():\n nk, nv = fn(k, v)\n if nk is not None:\n if v is nv and isinstance(v, NS):\n nv = NS.walk(nv, fn, inplace)\n nns.__dict__[nk] = nv\n return nns\n\n def __init__(self, *args, **kwargs):\n self.__dict__.update(dict(*args, **kwargs))\n\n def __str__(self):\n return str(self.__dict__)\n\n def __repr__(self):\n return repr(self.__dict__)\n\n\nclass Timer:\n\n @staticmethod\n def _zero():\n return 0\n\n def __init__(self, clock=time.time, enabled=True):\n self.start = 0\n self.stop = 0\n self._time = clock if enabled else Timer._zero\n\n def __enter__(self):\n self.start = self._time()\n return self\n\n def __exit__(self, *args):\n self.stop = self._time()\n\n @property\n def duration(self):\n if self.stop > 0:\n return self.stop - self.start\n return self._time() - self.start\n\n\ndef result(output_file=None,\n predictions=None, truth=None,\n probabilities=None, probabilities_labels=None,\n target_is_encoded=False,\n error_message=None,\n models_count=None,\n training_duration=None):\n return locals()\n\n\ndata_keys = re.compile(\"^(X|y|data)(_.+)?$\")\n\n\ndef call_run(run_fn):\n import numpy as np\n\n params = NS.from_dict(json.loads(sys.stdin.read()))\n\n def load_data(name, path):\n if isinstance(path, str) and data_keys.match(name):\n return name, np.load(path, allow_pickle=True)\n return name, path\n\n print(params.dataset)\n ds = NS.walk(params.dataset, load_data)\n\n config = params.config\n config.framework_params = NS.dict(config.framework_params)\n\n try:\n result = run_fn(ds, config)\n res = dict(result)\n for name in ['predictions', 'truth', 'probabilities']:\n arr = result[name]\n if arr is not None:\n res[name] = os.path.join(config.result_dir, '.'.join([name, 'npy']))\n np.save(res[name], arr, allow_pickle=True)\n except Exception as e:\n log.exception(e)\n res = dict(\n error_message=str(e),\n models_count=0\n )\n\n print(config.result_token)\n print(json.dumps(res, separators=(',', ':')))\n",
"\"\"\"\n**openml** module implements the abstractions defined in **data** module\nto expose `OpenML<https://www.openml.org>`_ datasets.\n\"\"\"\nimport logging\nimport os\nimport re\n\nimport openml as oml\nimport arff\nimport numpy as np\n\nfrom .data import Dataset, DatasetType, Datasplit, Feature\nfrom .utils import lazy_property, obj_size, profile, to_mb\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Openml():\n\n def __init__(self, api_key, cache_dir=None):\n oml.config.apikey = api_key\n if cache_dir:\n oml.config.set_cache_directory(cache_dir)\n\n @profile(logger=log)\n def load(self, task_id=None, dataset_id=None, fold=0):\n if task_id is not None:\n if dataset_id is not None:\n log.warning(\"Ignoring dataset id {} as a task id {} was already provided.\".format(dataset_id, task_id))\n task = oml.tasks.get_task(task_id)\n dataset = task.get_dataset()\n _, nfolds, _ = task.get_split_dimensions()\n if fold >= nfolds:\n raise ValueError(\"OpenML task {} only accepts `fold` < {}.\".format(task_id, nfolds))\n elif dataset_id is not None:\n raise NotImplementedError(\"OpenML raw datasets are not supported yet, please use an OpenML task instead.\")\n dataset = oml.datasets.get_dataset(dataset_id)\n task = AutoTask(dataset)\n if fold > 0:\n raise ValueError(\"OpenML raw datasets {} only accepts `fold` = 0.\".format(task_id))\n else:\n raise ValueError(\"A task id or a dataset id are required when using OpenML.\")\n return OpenmlDataset(task, dataset, fold)\n\n\nclass AutoTask(oml.OpenMLTask):\n \"\"\"A minimal task implementation providing only the information necessary to get the logic of this current module working.\"\"\"\n\n def __init__(self, oml_dataset: oml.OpenMLDataset):\n self._dataset = oml_dataset\n self._nrows = oml_dataset.qualities['NumberOfInstances']\n self.target_name = oml_dataset.default_target_attribute\n\n\n def get_train_test_split_indices(self, fold=0):\n # TODO: make auto split 80% train, 20% test (make this configurable, also random vs sequential) and save it to disk\n pass\n\n\nclass OpenmlDataset(Dataset):\n\n def __init__(self, oml_task: oml.OpenMLTask, oml_dataset: oml.OpenMLDataset, fold=0):\n super().__init__()\n self._oml_task = oml_task\n self._oml_dataset = oml_dataset\n self.fold = fold\n self._train = None\n self._test = None\n self._attributes = None\n self._unique_values = {}\n\n @property\n def type(self):\n nclasses = self._oml_dataset.qualities.get('NumberOfClasses', 0)\n if nclasses > 2:\n return DatasetType.multiclass\n elif nclasses == 2:\n return DatasetType.binary\n else:\n return DatasetType.regression\n\n @property\n @profile(logger=log)\n def train(self):\n self._ensure_loaded()\n return self._train\n\n @property\n @profile(logger=log)\n def test(self):\n self._ensure_loaded()\n return self._test\n\n @lazy_property\n @profile(logger=log)\n def features(self):\n def get_values(f):\n \"\"\"\n workaround to retrieve nominal values from arff file as openml (version 0.7.0) doesn't support yet\n retrieval of nominal values from the features.xml file\n :param f: openml feature\n :return: an array with nominal values\n \"\"\"\n if f.data_type == 'nominal' and not f.nominal_values:\n f.nominal_values = next(values for name, values in self.attributes if name.lower() == f.name.lower())\n if not f.nominal_values:\n f.nominal_values = self._unique_values.get(f.name)\n return f.nominal_values\n\n has_missing_values = lambda f: f.number_missing_values > 0\n is_target = lambda f: f.name == self._oml_task.target_name\n return [Feature(f.index,\n f.name,\n f.data_type,\n values=get_values(f),\n has_missing_values=has_missing_values(f),\n is_target=is_target(f)\n ) for i, f in sorted(self._oml_dataset.features.items())]\n\n @lazy_property\n def target(self):\n return next(f for f in self.features if f.is_target)\n\n @property\n def attributes(self):\n if not self._attributes:\n log.debug(\"Loading attributes from dataset %s.\", self._oml_dataset.data_file)\n with open(self._oml_dataset.data_file) as f:\n ds = arff.load(f)\n self._attributes = ds['attributes']\n return self._attributes\n\n @profile(logger=log)\n def _ensure_loaded(self):\n if self._train is None and self._test is None:\n self._load_split()\n\n def _load_split(self):\n ds_path = self._oml_dataset.data_file\n train_path = _get_split_path_for_dataset(ds_path, 'train', self.fold)\n test_path = _get_split_path_for_dataset(ds_path, 'test', self.fold)\n\n if not os.path.exists(train_path) or not os.path.exists(test_path):\n self._prepare_split_data(train_path, test_path)\n\n self._train = OpenmlDatasplit(self, train_path)\n self._test = OpenmlDatasplit(self, test_path)\n\n def _prepare_split_data(self, train_path, test_path):\n train_ind, test_ind = self._oml_task.get_train_test_split_indices(self.fold)\n #X, y = self._oml_task.get_X_and_y() #numpy arrays\n ods = self._oml_dataset\n\n # X, y, attr_is_categorical, attr_names = ods.get_data(self.target,\n # return_categorical_indicator=True,\n # return_attribute_names=True)\n # ods.retrieve_class_labels(self.target)\n\n log.debug(\"Loading dataset %s.\", ods.data_file)\n with open(ods.data_file) as f:\n ds = arff.load(f)\n self._attributes = ds['attributes']\n self._extract_unique_values(ds)\n\n name_template = \"{name}_{{split}}_{fold}\".format(name=ods.name, fold=self.fold)\n _save_split_set(path=train_path,\n name=name_template.format(split='train'),\n full_dataset=ds,\n indexes=train_ind)\n _save_split_set(path=test_path,\n name=name_template.format(split='test'),\n full_dataset=ds,\n indexes=test_ind)\n\n def _extract_unique_values(self, arff_dataset):\n # TODO: support encoded string columns?\n pass\n\n\nclass OpenmlDatasplit(Datasplit):\n\n def __init__(self, dataset: Dataset, path: str):\n super().__init__(dataset, 'arff')\n self._path = path\n\n @property\n def path(self):\n return self._path\n\n @lazy_property\n @profile(logger=log)\n def data(self):\n # use codecs for unicode support: path = codecs.load(self._path, 'rb', 'utf-8')\n log.debug(\"Loading datasplit %s.\", self.path)\n with open(self.path) as file:\n ds = arff.load(file)\n return np.asarray(ds['data'], dtype=object)\n\n\ndef _get_split_path_for_dataset(ds_path, split='train', fold=0):\n ds_dir, ds_base = os.path.split(ds_path)\n split_base = re.sub(r'\\.(\\w+)$', r'_{split}_{fold}.\\1'.format(split=split, fold=fold), ds_base)\n split_path = os.path.join(ds_dir, split_base)\n return split_path\n\n\n@profile(logger=log)\ndef _save_split_set(path, name, full_dataset=None, indexes=None):\n # X_split = X[indexes, :]\n # y_split = y.reshape(-1, 1)[indexes, :]\n log.debug(\"Saving %s split dataset to %s.\", name, path)\n with open(path, 'w') as file:\n split_data = np.asarray(full_dataset['data'], dtype=object)[indexes, :]\n arff.dump({\n 'description': full_dataset['description'],\n 'relation': name,\n 'attributes': full_dataset['attributes'],\n 'data': split_data\n }, file)\n\n\n"
] | [
[
"numpy.load",
"numpy.save"
],
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rokdd/yahoo_fin | [
"da55c89582bc8e858131581da1bd380d19d68bf2"
] | [
"yahoo_fin/stock_info.py"
] | [
"import requests\r\nimport pandas as pd\r\nimport ftplib\r\nimport io\r\nimport re\r\nimport json\r\nimport datetime\r\n\r\ntry:\r\n from requests_html import HTMLSession\r\nexcept Exception:\r\n print(\"\"\"Warning - Certain functionality \r\n requires requests_html, which is not installed.\r\n \r\n Install using: \r\n pip install requests_html\r\n \r\n After installation, you may have to restart your Python session.\"\"\")\r\n\r\n \r\nbase_url = \"https://query1.finance.yahoo.com/v8/finance/chart/\"\r\n\r\ndef build_url(ticker, start_date = None, end_date = None, interval = \"1d\"):\r\n \r\n if end_date is None: \r\n end_seconds = int(pd.Timestamp(\"now\").timestamp())\r\n \r\n else:\r\n end_seconds = int(pd.Timestamp(end_date).timestamp())\r\n \r\n if start_date is None:\r\n start_seconds = 7223400 \r\n \r\n else:\r\n start_seconds = int(pd.Timestamp(start_date).timestamp())\r\n \r\n site = base_url + ticker\r\n \r\n params = {\"period1\": start_seconds, \"period2\": end_seconds,\r\n \"interval\": interval.lower(), \"events\": \"div,splits\"}\r\n \r\n \r\n return site, params\r\n\r\n\r\ndef force_float(elt):\r\n \r\n try:\r\n return float(elt)\r\n except:\r\n return elt\r\n \r\ndef _convert_to_numeric(s):\r\n\r\n if \"M\" in s:\r\n s = s.strip(\"M\")\r\n return force_float(s) * 1_000_000\r\n \r\n if \"B\" in s:\r\n s = s.strip(\"B\")\r\n return force_float(s) * 1_000_000_000\r\n \r\n return force_float(s)\r\n\r\n\r\ndef get_data(ticker, start_date = None, end_date = None, index_as_date = True,\r\n interval = \"1d\", headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n '''Downloads historical stock price data into a pandas data frame. Interval\r\n must be \"1d\", \"1wk\", \"1mo\", or \"1m\" for daily, weekly, monthly, or minute data.\r\n Intraday minute data is limited to 7 days.\r\n \r\n @param: ticker\r\n @param: start_date = None\r\n @param: end_date = None\r\n @param: index_as_date = True\r\n @param: interval = \"1d\"\r\n '''\r\n \r\n if interval not in (\"1d\", \"1wk\", \"1mo\", \"1m\"):\r\n raise AssertionError(\"interval must be of of '1d', '1wk', '1mo', or '1m'\")\r\n \r\n \r\n # build and connect to URL\r\n site, params = build_url(ticker, start_date, end_date, interval)\r\n resp = requests.get(site, params = params, headers = headers)\r\n \r\n \r\n if not resp.ok:\r\n raise AssertionError(resp.json())\r\n \r\n \r\n # get JSON response\r\n data = resp.json()\r\n \r\n # get open / high / low / close data\r\n frame = pd.DataFrame(data[\"chart\"][\"result\"][0][\"indicators\"][\"quote\"][0])\r\n\r\n # get the date info\r\n temp_time = data[\"chart\"][\"result\"][0][\"timestamp\"]\r\n\r\n if interval != \"1m\":\r\n \r\n # add in adjclose\r\n frame[\"adjclose\"] = data[\"chart\"][\"result\"][0][\"indicators\"][\"adjclose\"][0][\"adjclose\"] \r\n frame.index = pd.to_datetime(temp_time, unit = \"s\")\r\n frame.index = frame.index.map(lambda dt: dt.floor(\"d\"))\r\n frame = frame[[\"open\", \"high\", \"low\", \"close\", \"adjclose\", \"volume\"]]\r\n \r\n else:\r\n\r\n frame.index = pd.to_datetime(temp_time, unit = \"s\")\r\n frame = frame[[\"open\", \"high\", \"low\", \"close\", \"volume\"]]\r\n \r\n \r\n frame['ticker'] = ticker.upper()\r\n \r\n if not index_as_date: \r\n frame = frame.reset_index()\r\n frame.rename(columns = {\"index\": \"date\"}, inplace = True)\r\n \r\n return frame\r\n\r\n\r\n\r\ndef tickers_sp500(include_company_data = False):\r\n '''Downloads list of tickers currently listed in the S&P 500 '''\r\n # get list of all S&P 500 stocks\r\n sp500 = pd.read_html(\"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\")[0]\r\n sp500[\"Symbol\"] = sp500[\"Symbol\"].str.replace(\".\", \"-\", regex=True)\r\n\r\n if include_company_data:\r\n return sp500\r\n\r\n sp_tickers = sp500.Symbol.tolist()\r\n sp_tickers = sorted(sp_tickers)\r\n \r\n return sp_tickers\r\n\r\n\r\ndef tickers_nasdaq(include_company_data = False):\r\n \r\n '''Downloads list of tickers currently listed in the NASDAQ'''\r\n \r\n ftp = ftplib.FTP(\"ftp.nasdaqtrader.com\")\r\n ftp.login()\r\n ftp.cwd(\"SymbolDirectory\")\r\n \r\n r = io.BytesIO()\r\n ftp.retrbinary('RETR nasdaqlisted.txt', r.write)\r\n \r\n if include_company_data:\r\n r.seek(0)\r\n data = pd.read_csv(r, sep = \"|\")\r\n return data\r\n \r\n info = r.getvalue().decode()\r\n splits = info.split(\"|\")\r\n \r\n \r\n tickers = [x for x in splits if \"\\r\\n\" in x]\r\n tickers = [x.split(\"\\r\\n\")[1] for x in tickers if \"NASDAQ\" not in x != \"\\r\\n\"]\r\n tickers = [ticker for ticker in tickers if \"File\" not in ticker] \r\n \r\n ftp.close() \r\n\r\n return tickers\r\n \r\n \r\n\r\ndef tickers_other(include_company_data = False):\r\n '''Downloads list of tickers currently listed in the \"otherlisted.txt\"\r\n file on \"ftp.nasdaqtrader.com\" '''\r\n ftp = ftplib.FTP(\"ftp.nasdaqtrader.com\")\r\n ftp.login()\r\n ftp.cwd(\"SymbolDirectory\")\r\n \r\n r = io.BytesIO()\r\n ftp.retrbinary('RETR otherlisted.txt', r.write)\r\n \r\n if include_company_data:\r\n r.seek(0)\r\n data = pd.read_csv(r, sep = \"|\")\r\n return data\r\n \r\n info = r.getvalue().decode()\r\n splits = info.split(\"|\") \r\n \r\n tickers = [x for x in splits if \"\\r\\n\" in x]\r\n tickers = [x.split(\"\\r\\n\")[1] for x in tickers]\r\n tickers = [ticker for ticker in tickers if \"File\" not in ticker] \r\n \r\n ftp.close() \r\n\r\n return tickers\r\n \r\n \r\ndef tickers_dow(include_company_data = False):\r\n \r\n '''Downloads list of currently traded tickers on the Dow'''\r\n\r\n site = \"https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average\"\r\n \r\n table = pd.read_html(site, attrs = {\"id\":\"constituents\"})[0]\r\n \r\n if include_company_data:\r\n return table\r\n\r\n dow_tickers = sorted(table['Symbol'].tolist())\r\n \r\n return dow_tickers \r\n \r\n\r\ndef tickers_ibovespa(include_company_data = False):\r\n \r\n '''Downloads list of currently traded tickers on the Ibovespa, Brazil'''\r\n\r\n table = pd.read_html(\"https://pt.wikipedia.org/wiki/Lista_de_companhias_citadas_no_Ibovespa\")[0]\r\n table.columns = [\"Symbol\", \"Share\", \"Sector\", \"Type\", \"Site\"]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n ibovespa_tickers = sorted(table.Symbol.tolist())\r\n \r\n return ibovespa_tickers \r\n\r\n\r\n\r\ndef tickers_nifty50(include_company_data = False, headers = {'User-agent': 'Mozilla/5.0'}):\r\n\r\n '''Downloads list of currently traded tickers on the NIFTY 50, India'''\r\n\r\n site = \"https://finance.yahoo.com/quote/%5ENSEI/components?p=%5ENSEI\"\r\n table = pd.read_html(requests.get(site, headers=headers).text)[0]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n nifty50 = sorted(table['Symbol'].tolist())\r\n\r\n return nifty50\r\n\r\ndef tickers_niftybank():\r\n ''' Currently traded tickers on the NIFTY BANK, India '''\r\n \r\n niftybank = ['AXISBANK', 'KOTAKBANK', 'HDFCBANK', 'SBIN', 'BANKBARODA', 'INDUSINDBK', 'PNB', 'IDFCFIRSTB', 'ICICIBANK', 'RBLBANK', 'FEDERALBNK', 'BANDHANBNK']\r\n \r\n return niftybank\r\n\r\n\r\n\r\ndef tickers_ftse100(include_company_data = False):\r\n \r\n '''Downloads a list of the tickers traded on the FTSE 100 index'''\r\n \r\n table = pd.read_html(\"https://en.wikipedia.org/wiki/FTSE_100_Index\", attrs = {\"id\": \"constituents\"})[0]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n return sorted(table.EPIC.tolist())\r\n \r\n\r\ndef tickers_ftse250(include_company_data = False):\r\n \r\n \r\n '''Downloads a list of the tickers traded on the FTSE 250 index'''\r\n \r\n table = pd.read_html(\"https://en.wikipedia.org/wiki/FTSE_250_Index\", attrs = {\"id\": \"constituents\"})[0]\r\n \r\n table.columns = [\"Company\", \"Ticker\"]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n return sorted(table.Ticker.tolist())\r\n \r\n\r\n\r\n\r\ndef get_quote_table(ticker , dict_result = True, headers = {'User-agent': 'Mozilla/5.0'}): \r\n \r\n '''Scrapes data elements found on Yahoo Finance's quote page \r\n of input ticker\r\n \r\n @param: ticker\r\n @param: dict_result = True\r\n '''\r\n\r\n site = \"https://finance.yahoo.com/quote/\" + ticker + \"?p=\" + ticker\r\n \r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n data = tables[0].append(tables[1])\r\n\r\n data.columns = [\"attribute\" , \"value\"]\r\n \r\n quote_price = pd.DataFrame([\"Quote Price\", get_live_price(ticker)]).transpose()\r\n quote_price.columns = data.columns.copy()\r\n \r\n data = data.append(quote_price)\r\n \r\n data = data.sort_values(\"attribute\")\r\n \r\n data = data.drop_duplicates().reset_index(drop = True)\r\n \r\n data[\"value\"] = data.value.map(force_float)\r\n\r\n if dict_result:\r\n \r\n result = {key : val for key,val in zip(data.attribute , data.value)}\r\n return result\r\n \r\n return data \r\n \r\n \r\ndef get_stats(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes information from the statistics tab on Yahoo Finance \r\n for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n\r\n stats_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/key-statistics?p=\" + ticker\r\n \r\n\r\n tables = pd.read_html(requests.get(stats_site, headers=headers).text)\r\n \r\n tables = [table for table in tables[1:] if table.shape[1] == 2]\r\n \r\n table = tables[0]\r\n for elt in tables[1:]:\r\n table = table.append(elt)\r\n\r\n table.columns = [\"Attribute\" , \"Value\"]\r\n \r\n table = table.reset_index(drop = True)\r\n \r\n return table\r\n\r\n\r\ndef get_stats_valuation(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes Valuation Measures table from the statistics tab on Yahoo Finance \r\n for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n\r\n stats_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/key-statistics?p=\" + ticker\r\n \r\n \r\n tables = pd.read_html(requests.get(stats_site, headers=headers).text)\r\n \r\n tables = [table for table in tables if \"Trailing P/E\" in table.iloc[:,0].tolist()]\r\n \r\n \r\n table = tables[0].reset_index(drop = True)\r\n \r\n return table\r\n\r\n\r\n\r\n\r\n\r\ndef _parse_json(url, headers = {'User-agent': 'Mozilla/5.0'}):\r\n html = requests.get(url=url, headers = headers).text\r\n\r\n json_str = html.split('root.App.main =')[1].split(\r\n '(this)')[0].split(';\\n}')[0].strip()\r\n \r\n try:\r\n data = json.loads(json_str)[\r\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\r\n except:\r\n return '{}'\r\n else:\r\n # return data\r\n new_data = json.dumps(data).replace('{}', 'null')\r\n new_data = re.sub(r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\r\n\r\n json_info = json.loads(new_data)\r\n\r\n return json_info\r\n\r\n\r\ndef _parse_table(json_info):\r\n\r\n df = pd.DataFrame(json_info)\r\n \r\n if df.empty:\r\n return df\r\n \r\n del df[\"maxAge\"]\r\n\r\n df.set_index(\"endDate\", inplace=True)\r\n df.index = pd.to_datetime(df.index, unit=\"s\")\r\n \r\n df = df.transpose()\r\n df.index.name = \"Breakdown\"\r\n\r\n return df\r\n\r\n\r\ndef get_income_statement(ticker, yearly = True):\r\n \r\n '''Scrape income statement from Yahoo Finance for a given ticker\r\n \r\n @param: ticker\r\n '''\r\n \r\n income_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/financials?p=\" + ticker\r\n\r\n json_info = _parse_json(income_site)\r\n \r\n if yearly:\r\n temp = json_info[\"incomeStatementHistory\"][\"incomeStatementHistory\"]\r\n else:\r\n temp = json_info[\"incomeStatementHistoryQuarterly\"][\"incomeStatementHistory\"]\r\n \r\n return _parse_table(temp) \r\n \r\n\r\ndef get_balance_sheet(ticker, yearly = True):\r\n \r\n '''Scrapes balance sheet from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n ''' \r\n \r\n balance_sheet_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/balance-sheet?p=\" + ticker\r\n \r\n\r\n json_info = _parse_json(balance_sheet_site)\r\n \r\n try:\r\n if yearly:\r\n temp = json_info[\"balanceSheetHistory\"][\"balanceSheetStatements\"]\r\n else:\r\n temp = json_info[\"balanceSheetHistoryQuarterly\"][\"balanceSheetStatements\"]\r\n except:\r\n temp = []\r\n \r\n return _parse_table(temp) \r\n\r\n\r\ndef get_cash_flow(ticker, yearly = True):\r\n \r\n '''Scrapes the cash flow statement from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n \r\n cash_flow_site = \"https://finance.yahoo.com/quote/\" + \\\r\n ticker + \"/cash-flow?p=\" + ticker\r\n \r\n \r\n json_info = _parse_json(cash_flow_site)\r\n \r\n if yearly:\r\n temp = json_info[\"cashflowStatementHistory\"][\"cashflowStatements\"]\r\n else:\r\n temp = json_info[\"cashflowStatementHistoryQuarterly\"][\"cashflowStatements\"]\r\n \r\n return _parse_table(temp) \r\n\r\n\r\ndef get_financials(ticker, yearly = True, quarterly = True):\r\n\r\n '''Scrapes financials data from Yahoo Finance for an input ticker, including\r\n balance sheet, cash flow statement, and income statement. Returns dictionary\r\n of results.\r\n \r\n @param: ticker\r\n @param: yearly = True\r\n @param: quarterly = True\r\n '''\r\n\r\n if not yearly and not quarterly:\r\n raise AssertionError(\"yearly or quarterly must be True\")\r\n \r\n financials_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/financials?p=\" + ticker\r\n \r\n json_info = _parse_json(financials_site)\r\n \r\n result = {}\r\n \r\n if yearly:\r\n\r\n temp = json_info[\"incomeStatementHistory\"][\"incomeStatementHistory\"]\r\n table = _parse_table(temp)\r\n result[\"yearly_income_statement\"] = table\r\n \r\n temp = json_info[\"balanceSheetHistory\"][\"balanceSheetStatements\"]\r\n table = _parse_table(temp)\r\n result[\"yearly_balance_sheet\"] = table\r\n \r\n temp = json_info[\"cashflowStatementHistory\"][\"cashflowStatements\"]\r\n table = _parse_table(temp)\r\n result[\"yearly_cash_flow\"] = table\r\n\r\n if quarterly:\r\n temp = json_info[\"incomeStatementHistoryQuarterly\"][\"incomeStatementHistory\"]\r\n table = _parse_table(temp)\r\n result[\"quarterly_income_statement\"] = table\r\n \r\n temp = json_info[\"balanceSheetHistoryQuarterly\"][\"balanceSheetStatements\"]\r\n table = _parse_table(temp)\r\n result[\"quarterly_balance_sheet\"] = table\r\n \r\n temp = json_info[\"cashflowStatementHistoryQuarterly\"][\"cashflowStatements\"]\r\n table = _parse_table(temp)\r\n result[\"quarterly_cash_flow\"] = table\r\n\r\n \r\n return result\r\n\r\n\r\ndef get_holders(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes the Holders page from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n ''' \r\n \r\n holders_site = \"https://finance.yahoo.com/quote/\" + \\\r\n ticker + \"/holders?p=\" + ticker\r\n \r\n \r\n tables = pd.read_html(requests.get(holders_site, headers=headers).text)\r\n \r\n \r\n table_names = [\"Major Holders\" , \"Direct Holders (Forms 3 and 4)\" ,\r\n \"Top Institutional Holders\" , \"Top Mutual Fund Holders\"]\r\n \r\n \r\n table_mapper = {key : val for key,val in zip(table_names , tables)}\r\n \r\n \r\n return table_mapper \r\n\r\ndef get_analysts_info(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes the Analysts page from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n ''' \r\n \r\n \r\n analysts_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/analysts?p=\" + ticker\r\n \r\n tables = pd.read_html(requests.get(analysts_site, headers=headers).text)\r\n \r\n table_names = [table.columns[0] for table in tables]\r\n\r\n table_mapper = {key : val for key , val in zip(table_names , tables)}\r\n \r\n\r\n return table_mapper\r\n \r\n\r\ndef get_live_price(ticker):\r\n \r\n '''Gets the live price of input ticker\r\n \r\n @param: ticker\r\n ''' \r\n \r\n df = get_data(ticker, end_date = pd.Timestamp.today() + pd.DateOffset(10))\r\n \r\n \r\n return df.close[-1]\r\n \r\n \r\ndef _raw_get_daily_info(site):\r\n \r\n session = HTMLSession()\r\n \r\n resp = session.get(site)\r\n \r\n tables = pd.read_html(resp.html.raw_html) \r\n \r\n df = tables[0].copy()\r\n \r\n df.columns = tables[0].columns\r\n \r\n del df[\"52 Week Range\"]\r\n \r\n df[\"% Change\"] = df[\"% Change\"].map(lambda x: float(x.strip(\"%+\").replace(\",\", \"\")))\r\n \r\n\r\n fields_to_change = [x for x in df.columns.tolist() if \"Vol\" in x \\\r\n or x == \"Market Cap\"]\r\n \r\n for field in fields_to_change:\r\n \r\n if type(df[field][0]) == str:\r\n df[field] = df[field].map(_convert_to_numeric)\r\n \r\n session.close()\r\n \r\n return df\r\n \r\n\r\ndef get_day_most_active(count: int = 100):\r\n\r\n return _raw_get_daily_info(f\"https://finance.yahoo.com/most-active?offset=0&count={count}\")\r\n\r\n\r\ndef get_day_gainers(count: int = 100):\r\n\r\n return _raw_get_daily_info(f\"https://finance.yahoo.com/gainers?offset=0&count={count}\")\r\n\r\n\r\ndef get_day_losers(count: int = 100):\r\n\r\n return _raw_get_daily_info(f\"https://finance.yahoo.com/losers?offset=0&count={count}\")\r\n\r\n\r\ndef get_top_crypto():\r\n \r\n '''Gets the top 100 Cryptocurrencies by Market Cap''' \r\n\r\n session = HTMLSession()\r\n \r\n resp = session.get(\"https://finance.yahoo.com/cryptocurrencies?offset=0&count=100\")\r\n \r\n tables = pd.read_html(resp.html.raw_html) \r\n \r\n df = tables[0].copy()\r\n\r\n \r\n df[\"% Change\"] = df[\"% Change\"].map(lambda x: float(str(x).strip(\"%\").\\\r\n strip(\"+\").\\\r\n replace(\",\", \"\")))\r\n del df[\"52 Week Range\"]\r\n del df[\"1 Day Chart\"]\r\n \r\n fields_to_change = [x for x in df.columns.tolist() if \"Volume\" in x \\\r\n or x == \"Market Cap\" or x == \"Circulating Supply\"]\r\n \r\n for field in fields_to_change:\r\n \r\n if type(df[field][0]) == str:\r\n df[field] = df[field].map(lambda x: _convert_to_numeric(str(x)))\r\n \r\n \r\n session.close() \r\n \r\n return df\r\n \r\n \r\ndef get_dividends(ticker, start_date = None, end_date = None, index_as_date = True, \r\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n '''Downloads historical dividend data into a pandas data frame.\r\n \r\n @param: ticker\r\n @param: start_date = None\r\n @param: end_date = None\r\n @param: index_as_date = True\r\n '''\r\n \r\n # build and connect to URL\r\n site, params = build_url(ticker, start_date, end_date, \"1d\")\r\n resp = requests.get(site, params = params, headers = headers)\r\n \r\n \r\n if not resp.ok:\r\n return pd.DataFrame()\r\n \r\n \r\n # get JSON response\r\n data = resp.json()\r\n \r\n # check if there is data available for dividends\r\n if \"events\" not in data[\"chart\"][\"result\"][0] or \"dividends\" not in data[\"chart\"][\"result\"][0]['events']:\r\n return pd.DataFrame()\r\n \r\n # get the dividend data\r\n frame = pd.DataFrame(data[\"chart\"][\"result\"][0]['events']['dividends'])\r\n \r\n frame = frame.transpose()\r\n \r\n frame.index = pd.to_datetime(frame.index, unit = \"s\")\r\n frame.index = frame.index.map(lambda dt: dt.floor(\"d\"))\r\n \r\n # sort in chronological order\r\n frame = frame.sort_index()\r\n \r\n frame['ticker'] = ticker.upper()\r\n \r\n # remove old date column\r\n frame = frame.drop(columns='date')\r\n \r\n frame = frame.rename({'amount': 'dividend'}, axis = 'columns')\r\n \r\n if not index_as_date: \r\n frame = frame.reset_index()\r\n frame.rename(columns = {\"index\": \"date\"}, inplace = True)\r\n \r\n return frame\r\n\r\n\r\n\r\ndef get_splits(ticker, start_date = None, end_date = None, index_as_date = True,\r\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n '''Downloads historical stock split data into a pandas data frame.\r\n \r\n @param: ticker\r\n @param: start_date = None\r\n @param: end_date = None\r\n @param: index_as_date = True\r\n '''\r\n \r\n # build and connect to URL\r\n site, params = build_url(ticker, start_date, end_date, \"1d\")\r\n resp = requests.get(site, params = params, headers = headers)\r\n \r\n \r\n if not resp.ok:\r\n raise AssertionError(resp.json())\r\n \r\n \r\n # get JSON response\r\n data = resp.json()\r\n \r\n # check if there is data available for events\r\n if \"events\" not in data[\"chart\"][\"result\"][0]:\r\n raise AssertionError(\"There is no data available on stock events, or none have occured\") \r\n\r\n # check if there is data available for splits\r\n if \"splits\" not in data[\"chart\"][\"result\"][0]['events']:\r\n raise AssertionError(\"There is no data available on stock splits, or none have occured\")\r\n \r\n # get the split data\r\n frame = pd.DataFrame(data[\"chart\"][\"result\"][0]['events']['splits'])\r\n \r\n frame = frame.transpose()\r\n \r\n frame.index = pd.to_datetime(frame.index, unit = \"s\")\r\n frame.index = frame.index.map(lambda dt: dt.floor(\"d\"))\r\n \r\n # sort in to chronological order\r\n frame = frame.sort_index()\r\n \r\n frame['ticker'] = ticker.upper()\r\n \r\n # remove unnecessary columns\r\n frame = frame.drop(columns=['date', 'denominator', 'numerator'])\r\n \r\n if not index_as_date: \r\n frame = frame.reset_index()\r\n frame.rename(columns = {\"index\": \"date\"}, inplace = True)\r\n \r\n return frame\r\n \r\n \r\n\r\n\r\ndef get_earnings(ticker):\r\n \r\n '''Scrapes earnings data from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n\r\n result = {\r\n \"quarterly_results\": pd.DataFrame(),\r\n \"yearly_revenue_earnings\": pd.DataFrame(),\r\n \"quarterly_revenue_earnings\": pd.DataFrame()\r\n }\r\n\r\n financials_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/financials?p=\" + ticker\r\n\r\n json_info = _parse_json(financials_site)\r\n\r\n if \"earnings\" not in json_info:\r\n return result\r\n\r\n temp = json_info[\"earnings\"]\r\n\r\n if temp == None:\r\n return result\r\n \r\n result[\"quarterly_results\"] = pd.DataFrame.from_dict(temp[\"earningsChart\"][\"quarterly\"])\r\n \r\n result[\"yearly_revenue_earnings\"] = pd.DataFrame.from_dict(temp[\"financialsChart\"][\"yearly\"])\r\n \r\n result[\"quarterly_revenue_earnings\"] = pd.DataFrame.from_dict(temp[\"financialsChart\"][\"quarterly\"])\r\n \r\n return result\r\n\r\n\r\n\r\n### Earnings functions\r\ndef _parse_earnings_json(url, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n resp = requests.get(url, headers = headers)\r\n \r\n content = resp.content.decode(encoding='utf-8', errors='strict')\r\n \r\n page_data = [row for row in content.split(\r\n '\\n') if row.startswith('root.App.main = ')][0][:-1]\r\n \r\n page_data = page_data.split('root.App.main = ', 1)[1]\r\n \r\n return json.loads(page_data)\r\n\r\ndef get_next_earnings_date(ticker):\r\n \r\n base_earnings_url = 'https://finance.yahoo.com/quote'\r\n new_url = base_earnings_url + \"/\" + ticker\r\n\r\n parsed_result = _parse_earnings_json(new_url)\r\n \r\n temp = parsed_result['context']['dispatcher']['stores']['QuoteSummaryStore']['calendarEvents']['earnings']['earningsDate'][0]['raw']\r\n\r\n return datetime.datetime.fromtimestamp(temp)\r\n\r\n\r\ndef get_earnings_history(ticker):\r\n \r\n '''Inputs: @ticker\r\n Returns the earnings calendar history of the input ticker with \r\n EPS actual vs. expected data.'''\r\n\r\n url = 'https://finance.yahoo.com/calendar/earnings?symbol=' + ticker\r\n \r\n result = _parse_earnings_json(url)\r\n \r\n return result[\"context\"][\"dispatcher\"][\"stores\"][\"ScreenerResultsStore\"][\"results\"][\"rows\"]\r\n\r\n\r\n\r\ndef get_earnings_for_date(date, offset = 0, count = 1):\r\n\r\n '''Inputs: @date\r\n Returns a dictionary of stock tickers with earnings expected on the\r\n input date. The dictionary contains the expected EPS values for each\r\n stock if available.'''\r\n \r\n base_earnings_url = 'https://finance.yahoo.com/calendar/earnings'\r\n \r\n if offset >= count:\r\n return []\r\n \r\n temp = pd.Timestamp(date)\r\n date = temp.strftime(\"%Y-%m-%d\")\r\n\r\n dated_url = '{0}?day={1}&offset={2}&size={3}'.format(\r\n base_earnings_url, date, offset, 100)\r\n \r\n result = _parse_earnings_json(dated_url)\r\n \r\n stores = result['context']['dispatcher']['stores']\r\n \r\n earnings_count = stores['ScreenerCriteriaStore']['meta']['total']\r\n\r\n new_offset = offset + 100\r\n \r\n more_earnings = get_earnings_for_date(date, new_offset, earnings_count)\r\n \r\n current_earnings = stores['ScreenerResultsStore']['results']['rows']\r\n\r\n total_earnings = current_earnings + more_earnings\r\n\r\n return total_earnings\r\n\r\n\r\ndef get_earnings_in_date_range(start_date, end_date):\r\n\r\n '''Inputs: @start_date\r\n @end_date\r\n \r\n Returns the stock tickers with expected EPS data for all dates in the\r\n input range (inclusive of the start_date and end_date.'''\r\n \r\n earnings_data = []\r\n\r\n days_diff = pd.Timestamp(end_date) - pd.Timestamp(start_date)\r\n days_diff = days_diff.days\r\n\r\n \r\n current_date = pd.Timestamp(start_date)\r\n \r\n dates = [current_date + datetime.timedelta(diff) for diff in range(days_diff + 1)]\r\n dates = [d.strftime(\"%Y-%m-%d\") for d in dates]\r\n \r\n i = 0\r\n while i < len(dates):\r\n try:\r\n earnings_data += get_earnings_for_date(dates[i])\r\n except Exception:\r\n pass\r\n \r\n i += 1\r\n \r\n return earnings_data\r\n\r\n\r\ndef get_currencies(headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Returns the currencies table from Yahoo Finance'''\r\n \r\n site = \"https://finance.yahoo.com/currencies\"\r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n result = tables[0]\r\n \r\n return result\r\n\r\n\r\ndef get_futures(headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Returns the futures table from Yahoo Finance'''\r\n \r\n site = \"https://finance.yahoo.com/commodities\"\r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n result = tables[0]\r\n \r\n return result\r\n\r\n\r\ndef get_undervalued_large_caps(headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Returns the undervalued large caps table from Yahoo Finance'''\r\n \r\n site = \"https://finance.yahoo.com/screener/predefined/undervalued_large_caps?offset=0&count=100\"\r\n \r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n result = tables[0]\r\n \r\n return result\r\n\r\n\r\ndef get_quote_data(ticker, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n \r\n '''Inputs: @ticker\r\n \r\n Returns a dictionary containing over 70 elements corresponding to the \r\n input ticker, including company name, book value, moving average data,\r\n pre-market / post-market price (when applicable), and more.'''\r\n \r\n site = \"https://query1.finance.yahoo.com/v7/finance/quote?symbols=\" + ticker\r\n \r\n resp = requests.get(site, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n)\r\n \r\n if not resp.ok:\r\n raise AssertionError(\"\"\"Invalid response from server. Check if ticker is\r\n valid.\"\"\")\r\n \r\n \r\n json_result = resp.json()\r\n info = json_result[\"quoteResponse\"][\"result\"]\r\n \r\n return info[0]\r\n \r\n\r\ndef get_market_status():\r\n \r\n '''Returns the current state of the market - PRE, POST, OPEN, or CLOSED'''\r\n \r\n quote_data = get_quote_data(\"^dji\")\r\n\r\n return quote_data[\"marketState\"]\r\n\r\ndef get_premarket_price(ticker):\r\n\r\n '''Inputs: @ticker\r\n \r\n Returns the current pre-market price of the input ticker\r\n (returns value if pre-market price is available.'''\r\n \r\n quote_data = get_quote_data(ticker)\r\n \r\n if \"preMarketPrice\" in quote_data:\r\n return quote_data[\"preMarketPrice\"]\r\n \r\n raise AssertionError(\"Premarket price not currently available.\")\r\n\r\ndef get_postmarket_price(ticker):\r\n\r\n '''Inputs: @ticker\r\n \r\n Returns the current post-market price of the input ticker\r\n (returns value if pre-market price is available.'''\r\n \r\n quote_data = get_quote_data(ticker)\r\n \r\n if \"postMarketPrice\" in quote_data:\r\n return quote_data[\"postMarketPrice\"]\r\n \r\n raise AssertionError(\"Postmarket price not currently available.\")\r\n \r\n\r\n# Company Information Functions\r\ndef get_company_info(ticker):\r\n '''Scrape the company information for a ticker\r\n\r\n @param: ticker\r\n '''\r\n site = f\"https://finance.yahoo.com/quote/{ticker}/profile?p={ticker}\"\r\n json_info = _parse_json(site)\r\n json_info = json_info[\"assetProfile\"]\r\n info_frame = pd.DataFrame.from_dict(json_info,\r\n orient=\"index\",\r\n columns=[\"Value\"])\r\n info_frame = info_frame.drop(\"companyOfficers\", axis=\"index\")\r\n info_frame.index.name = \"Breakdown\"\r\n return info_frame\r\n\r\n\r\ndef get_company_officers(ticker):\r\n '''Scrape the company information and return a table of the officers\r\n\r\n @param: ticker\r\n '''\r\n site = f\"https://finance.yahoo.com/quote/{ticker}/profile?p={ticker}\"\r\n json_info = _parse_json(site)\r\n json_info = json_info[\"assetProfile\"][\"companyOfficers\"]\r\n info_frame = pd.DataFrame.from_dict(json_info)\r\n info_frame = info_frame.set_index(\"name\")\r\n return info_frame\r\n"
] | [
[
"pandas.to_datetime",
"pandas.read_csv",
"pandas.DateOffset",
"pandas.DataFrame",
"pandas.read_html",
"pandas.DataFrame.from_dict",
"pandas.Timestamp",
"pandas.Timestamp.today"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
michaelosthege/aesara | [
"55c88832ba71f87c9612d573ede74a4c042ef570",
"55c88832ba71f87c9612d573ede74a4c042ef570",
"55c88832ba71f87c9612d573ede74a4c042ef570",
"55c88832ba71f87c9612d573ede74a4c042ef570",
"511c778f8a595444e009bcad738d552413b16f2c"
] | [
"theano/sparse/sandbox/sp2.py",
"theano/sparse/opt.py",
"theano/gpuarray/elemwise.py",
"theano/tensor/random/basic.py",
"theano/tensor/extra_ops.py"
] | [
"import numpy as np\nimport scipy.sparse\n\nimport theano\nfrom theano import gof, tensor\nfrom theano.gof.op import Op\nfrom theano.sparse.basic import (\n Remove0,\n SparseType,\n _is_sparse,\n as_sparse_variable,\n remove0,\n)\n\n# Also for compatibility\nfrom theano.tensor import discrete_dtypes, float_dtypes\n\n\n# Probability Ops are currently back in sandbox, because they do not respect\n# Theano's Op contract, as their behaviour is not reproducible: calling\n# the perform() method twice with the same argument will yield different\n# results.\n# from theano.sparse.basic import (\n# Multinomial, multinomial, Poisson, poisson,\n# Binomial, csr_fbinomial, csc_fbinomial, csr_dbinomial, csc_dbinomial)\n\n\n# Alias to maintain compatibility\nEliminateZeros = Remove0\neliminate_zeros = remove0\n\n\n# Probability\nclass Poisson(Op):\n \"\"\"Return a sparse having random values from a Poisson density\n with mean from the input.\n\n WARNING: This Op is NOT deterministic, as calling it twice with the\n same inputs will NOT give the same result. This is a violation of\n Theano's contract for Ops\n\n :param x: Sparse matrix.\n\n :return: A sparse matrix of random integers of a Poisson density\n with mean of `x` element wise.\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, x):\n x = as_sparse_variable(x)\n return gof.Apply(self, [x], [x.type()])\n\n def perform(self, node, inputs, outputs):\n (x,) = inputs\n (out,) = outputs\n assert _is_sparse(x)\n assert x.format in [\"csr\", \"csc\"]\n out[0] = x.copy()\n out[0].data = np.asarray(np.random.poisson(out[0].data), dtype=x.dtype)\n out[0].eliminate_zeros()\n\n def grad(self, inputs, outputs_gradients):\n comment = \"No gradient exists for class Poisson in\\\n theano/sparse/sandbox/sp2.py\"\n return [\n theano.gradient.grad_undefined(\n op=self, x_pos=0, x=inputs[0], comment=comment\n )\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return ins_shapes\n\n\npoisson = Poisson()\n\n\nclass Binomial(Op):\n \"\"\"Return a sparse matrix having random values from a binomial\n density having number of experiment `n` and probability of succes\n `p`.\n\n WARNING: This Op is NOT deterministic, as calling it twice with the\n same inputs will NOT give the same result. This is a violation of\n Theano's contract for Ops\n\n :param n: Tensor scalar representing the number of experiment.\n :param p: Tensor scalar representing the probability of success.\n :param shape: Tensor vector for the output shape.\n\n :return: A sparse matrix of integers representing the number\n of success.\n \"\"\"\n\n __props__ = (\"format\", \"dtype\")\n\n def __init__(self, format, dtype):\n self.format = format\n self.dtype = dtype\n\n def make_node(self, n, p, shape):\n n = tensor.as_tensor_variable(n)\n p = tensor.as_tensor_variable(p)\n shape = tensor.as_tensor_variable(shape)\n\n assert n.dtype in discrete_dtypes\n assert p.dtype in float_dtypes\n assert shape.dtype in discrete_dtypes\n\n return gof.Apply(\n self, [n, p, shape], [SparseType(dtype=self.dtype, format=self.format)()]\n )\n\n def perform(self, node, inputs, outputs):\n (n, p, shape) = inputs\n (out,) = outputs\n binomial = np.random.binomial(n, p, size=shape)\n csx_matrix = getattr(scipy.sparse, self.format + \"_matrix\")\n out[0] = csx_matrix(binomial, dtype=self.dtype)\n\n def connection_pattern(self, node):\n return [[True], [True], [False]]\n\n def grad(self, inputs, gout):\n (n, p, shape) = inputs\n (gz,) = gout\n comment_n = \"No gradient exists for the number of samples in class\\\n Binomial of theano/sparse/sandbox/sp2.py\"\n comment_p = \"No gradient exists for the prob of success in class\\\n Binomial of theano/sparse/sandbox/sp2.py\"\n return [\n theano.gradient.grad_undefined(op=self, x_pos=0, x=n, comment=comment_n),\n theano.gradient.grad_undefined(op=self, x_pos=1, x=p, comment=comment_p),\n theano.gradient.disconnected_type(),\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return [(node.inputs[2][0], node.inputs[2][1])]\n\n\ncsr_fbinomial = Binomial(\"csr\", \"float32\")\ncsc_fbinomial = Binomial(\"csc\", \"float32\")\ncsr_dbinomial = Binomial(\"csr\", \"float64\")\ncsc_dbinomial = Binomial(\"csc\", \"float64\")\n\n\nclass Multinomial(Op):\n \"\"\"Return a sparse matrix having random values from a multinomial\n density having number of experiment `n` and probability of succes\n `p`.\n\n WARNING: This Op is NOT deterministic, as calling it twice with the\n same inputs will NOT give the same result. This is a violation of\n Theano's contract for Ops\n\n :param n: Tensor type vector or scalar representing the number of\n experiment for each row. If `n` is a scalar, it will be\n used for each row.\n :param p: Sparse matrix of probability where each row is a probability\n vector representing the probability of succes. N.B. Each row\n must sum to one.\n\n :return: A sparse matrix of random integers from a multinomial density\n for each row.\n\n :note: It will works only if `p` have csr format.\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, n, p):\n n = tensor.as_tensor_variable(n)\n p = as_sparse_variable(p)\n assert p.format in [\"csr\", \"csc\"]\n\n return gof.Apply(self, [n, p], [p.type()])\n\n def perform(self, node, inputs, outputs):\n (n, p) = inputs\n (out,) = outputs\n assert _is_sparse(p)\n\n if p.format != \"csr\":\n raise NotImplementedError\n\n out[0] = p.copy()\n\n if n.ndim == 0:\n for i in range(p.shape[0]):\n k, l = p.indptr[i], p.indptr[i + 1]\n out[0].data[k:l] = np.random.multinomial(n, p.data[k:l])\n elif n.ndim == 1:\n if n.shape[0] != p.shape[0]:\n raise ValueError(\n \"The number of element of n must be \"\n \"the same as the number of row of p.\"\n )\n for i in range(p.shape[0]):\n k, l = p.indptr[i], p.indptr[i + 1]\n out[0].data[k:l] = np.random.multinomial(n[i], p.data[k:l])\n\n def grad(self, inputs, outputs_gradients):\n comment_n = \"No gradient exists for the number of samples in class\\\n Multinomial of theano/sparse/sandbox/sp2.py\"\n comment_p = \"No gradient exists for the prob of success in class\\\n Multinomial of theano/sparse/sandbox/sp2.py\"\n return [\n theano.gradient.grad_undefined(\n op=self, x_pos=0, x=inputs[0], comment=comment_n\n ),\n theano.gradient.grad_undefined(\n op=self, x_pos=1, x=inputs[1], comment=comment_p\n ),\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return [ins_shapes[1]]\n\n\nmultinomial = Multinomial()\n",
"import numpy as np\nimport scipy\n\nimport theano\nfrom theano import gof, scalar, tensor\nfrom theano.configdefaults import config\nfrom theano.gof.op import COp\nfrom theano.misc.safe_asarray import _asarray\nfrom theano.sparse import basic as sparse\nfrom theano.sparse.basic import (\n CSC,\n CSR,\n csm_data,\n csm_grad,\n csm_indices,\n csm_indptr,\n csm_properties,\n usmm,\n)\nfrom theano.tensor import blas\nfrom theano.tensor.opt import register_canonicalize, register_specialize\n\n\n_is_sparse_variable = sparse._is_sparse_variable\n_is_dense = sparse._is_dense\n\n# This is tested in tests/test_opt.py:test_local_csm_properties_csm\n\n\[email protected]_optimizer([csm_properties])\ndef local_csm_properties_csm(fgraph, node):\n \"\"\"\n If we find csm_properties(CSM(*args)), then we can replace that with the\n *args directly.\n\n \"\"\"\n if node.op == csm_properties:\n (csm,) = node.inputs\n if csm.owner and (csm.owner.op == CSC or csm.owner.op == CSR):\n # csm.owner.inputs could be broadcastable. In that case, we have\n # to adjust the broadcasting flag here.\n ret_var = [\n theano.tensor.patternbroadcast(i, o.broadcastable)\n for i, o in zip(csm.owner.inputs, node.outputs)\n ]\n return ret_var\n\n return False\n\n\nregister_specialize(local_csm_properties_csm)\n\n\n# This is tested in tests/test_basic.py:test_remove0\[email protected]_optimizer([sparse.Remove0])\ndef local_inplace_remove0(fgraph, node):\n \"\"\"\n Optimization to insert inplace versions of Remove0.\n\n \"\"\"\n # If inplace is not enabled, enable it and replace that op with a\n # new op which has inplace enabled\n if isinstance(node.op, sparse.Remove0) and not node.op.inplace:\n new_op = node.op.__class__(inplace=True)\n new_node = new_op(*node.inputs)\n return [new_node]\n return False\n\n\ntheano.compile.optdb.register(\n \"local_inplace_remove0\",\n gof.TopoOptimizer(\n local_inplace_remove0, failure_callback=gof.TopoOptimizer.warn_inplace\n ),\n 60,\n \"fast_run\",\n \"inplace\",\n)\n\n\nclass AddSD_ccode(COp):\n \"\"\"\n Add a sparse and a dense matrix.\n\n Parameters\n ----------\n x\n A sparse matrix.\n y\n A dense matrix\n\n Returns\n -------\n matrix\n `x`+`y`\n\n Notes\n -----\n The grad implemented is structured on `x`.\n\n \"\"\"\n\n __props__ = (\"format\", \"inplace\")\n\n def __init__(self, format, inplace=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Should we do inplace addition or not ?\n self.inplace = inplace\n self.format = format\n if self.inplace:\n self.destroy_map = {0: [3]}\n\n def __str__(self):\n inp = \"\"\n if self.inplace:\n inp = \",inplace\"\n return f\"{self.__class__.__name__}{{{self.format}{inp}}}\"\n\n def make_node(self, x, y):\n x, y = sparse.as_sparse_variable(x), tensor.as_tensor_variable(y)\n out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)\n if self.inplace:\n assert out_dtype == y.dtype\n\n indices, indptr, data = csm_indices(x), csm_indptr(x), csm_data(x)\n # We either use CSC or CSR depending on the format of input\n assert self.format == x.type.format\n # The magic number two here arises because L{scipy.sparse}\n # objects must be matrices (have dimension 2)\n assert y.type.ndim == 2\n out = tensor.TensorType(dtype=out_dtype, broadcastable=y.type.broadcastable)()\n return gof.Apply(self, [data, indices, indptr, y], [out])\n\n def c_code(self, node, name, inputs, outputs, sub):\n (_data, _indices, _indptr, y) = inputs\n (z,) = outputs\n inplace = int(self.inplace)\n format = {\"csc\": 0, \"csr\": 1}[self.format]\n out_typenum = node.outputs[0].type.dtype_specs()[2]\n code = \"\"\"\n Py_XDECREF(%(z)s);\n if (!%(inplace)s){\n if(PyArray_TYPE(%(y)s) != %(out_typenum)s){\n %(z)s = (PyArrayObject *) PyArray_FromArray(%(y)s, PyArray_DescrFromType(%(out_typenum)s), 0);\n }else{\n %(z)s = (PyArrayObject *) PyArray_NewCopy(%(y)s, NPY_CORDER);\n }\n }else{\n %(z)s = %(y)s;\n Py_XINCREF(%(z)s);\n }\n\n npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_indptr)s* __restrict__ indptr = (dtype_%(_indptr)s*)PyArray_DATA(%(_indptr)s);\n const dtype_%(_indices)s* __restrict__ indices = (dtype_%(_indices)s*)PyArray_DATA(%(_indices)s);\n const dtype_%(_data)s* __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n\n dtype_%(y)s* ydata = (dtype_%(y)s*)PyArray_DATA(%(y)s);\n dtype_%(z)s* zdata = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n npy_intp Yi = PyArray_STRIDES(%(y)s)[0]/PyArray_DESCR(%(y)s)->elsize;\n npy_intp Yj = PyArray_STRIDES(%(y)s)[1]/PyArray_DESCR(%(y)s)->elsize;\n\n npy_intp pos;\n if (%(format)s == 0){\n for (npy_intp col = 0; col < N; ++col){\n for (dtype_%(_indptr)s ind = indptr[col]; ind < indptr[col+1]; ++ind){\n npy_intp row = indices[ind];\n pos = row * Yi + col * Yj;\n zdata[pos] = ydata[pos] + data[ind];\n }\n }\n }else{\n for (npy_intp row = 0; row < N; ++row){\n for (dtype_%(_indptr)s ind = indptr[row]; ind < indptr[row+1]; ++ind){\n npy_intp col = indices[ind];\n pos = row * Yi + col * Yj;\n zdata[pos] = ydata[pos] + data[ind];\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n return code\n\n def infer_shape(self, fgraph, node, shapes):\n return [shapes[3]]\n\n def c_code_cache_version(self):\n return (2,)\n\n\[email protected]_optimizer([sparse.AddSD])\ndef local_inplace_addsd_ccode(fgraph, node):\n \"\"\"\n Optimization to insert inplace versions of AddSD.\n\n \"\"\"\n if isinstance(node.op, sparse.AddSD) and config.cxx:\n out_dtype = scalar.upcast(*node.inputs)\n if out_dtype != node.inputs[1].dtype:\n return\n new_node = AddSD_ccode(format=node.inputs[0].type.format, inplace=True)(\n *node.inputs\n )\n return [new_node]\n return False\n\n\ntheano.compile.optdb.register(\n \"local_inplace_addsd_ccode\",\n gof.TopoOptimizer(\n local_inplace_addsd_ccode, failure_callback=gof.TopoOptimizer.warn_inplace\n ),\n 60,\n \"fast_run\",\n \"inplace\",\n)\n\n\n@register_canonicalize(\"fast_compile\")\n@register_specialize\[email protected]_optimizer([sparse.DenseFromSparse])\ndef local_dense_from_sparse_sparse_from_dense(fgraph, node):\n if isinstance(node.op, sparse.DenseFromSparse):\n inp = node.inputs[0]\n if inp.owner and isinstance(inp.owner.op, sparse.SparseFromDense):\n return inp.owner.inputs\n\n\[email protected]_optimizer([sparse.AddSD])\ndef local_addsd_ccode(fgraph, node):\n \"\"\"\n Convert AddSD to faster AddSD_ccode.\n\n \"\"\"\n if isinstance(node.op, sparse.AddSD) and config.cxx:\n new_node = AddSD_ccode(format=node.inputs[0].type.format)(*node.inputs)\n return [new_node]\n return False\n\n\ntheano.compile.optdb.register(\n \"local_addsd_ccode\",\n gof.TopoOptimizer(local_addsd_ccode),\n # Must be after local_inplace_addsd_ccode at 60\n 61,\n \"fast_run\",\n)\n\n\nclass StructuredDotCSC(COp):\n \"\"\"\n Structured Dot CSC is like dot, except that only the gradient wrt non-zero\n elements of the sparse matrix `a` are calculated and propagated.\n\n The output is presumed to be a dense matrix, and is represented by a\n TensorType instance.\n\n Parameters\n ----------\n a\n A sparse matrix in csc format.\n b\n A sparse or dense matrix.\n\n Returns\n -------\n The dot product of `a` and `b`.\n\n Notes\n -----\n The grad implemented is structured.\n This op is used as an optimization for StructuredDot.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):\n dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)\n r = gof.Apply(\n self,\n [a_val, a_ind, a_ptr, a_nrows, b],\n [tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))],\n )\n return r\n\n def perform(self, node, inputs, outputs):\n (a_val, a_ind, a_ptr, a_nrows, b) = inputs\n (out,) = outputs\n a = scipy.sparse.csc_matrix(\n (a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False\n )\n # out[0] = a.dot(b)\n out[0] = _asarray(a * b, dtype=node.outputs[0].type.dtype)\n assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense\n\n def c_code(self, node, name, inputs, outputs, sub):\n # C-implementation of the dot product of the sparse matrix A and matrix\n # B.\n # @param a_val: non-zero values of the sparse matrix\n # @param a_ind: column indices of the non-null values (.indices of a\n # scipy.csc_matrix)\n # @param a_ptr: a_ptr indicates col indices for col. i are in the range\n # a_ptr[i]:a_ptr[i+1]\n # @param n_rows: number of rows of sparse matrix\n # @param b: dense matrix to perform dot product with, as in dot(a, b)\n # @param z: return value\n # @param sub: TODO, not too sure, something to do with weave probably\n\n (a_val, a_ind, a_ptr, a_nrows, b) = inputs\n (z,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a_val\")\n if node.inputs[4].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n typenum_z = node.outputs[0].type.dtype_specs()[2] # retrieve dtype number\n typenum_a_val = node.inputs[0].type.dtype_specs()[2] # retrieve dtype number\n typenum_b = node.inputs[4].type.dtype_specs()[2] # retrieve dtype number\n\n rval = \"\"\"\n\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, \"rank(nrows) != 0\"); %(fail)s;}\n if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_val)s) != %(typenum_a_val)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for a_val\"); %(fail)s;}\n\n if (PyArray_TYPE(%(b)s) != %(typenum_b)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for b\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_nrows)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_nrows dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_ptr)s)[0] != PyArray_DIMS(%(b)s)[0]+1)\n {PyErr_SetString(PyExc_NotImplementedError, \"a's number of columns doesn't match b's rows\"); %(fail)s;}\n\n if ((!%(z)s)\n || (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(a_nrows)s))[0])\n || (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])\n )\n {\n {Py_XDECREF(%(z)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = ((npy_int32 *)PyArray_DATA(%(a_nrows)s))[0];\n dims[1] = PyArray_DIMS(%(b)s)[1];\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(z)s)[0];\n npy_intp N = PyArray_DIMS(%(z)s)[1];\n npy_intp K = PyArray_DIMS(%(b)s)[0];\n if (N > 0x7fffffffL)\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big (overflows int32 index)\"); %(fail)s;}\n\n // strides tell you how many bytes to skip to go to next column/row entry\n npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n //npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\n\n // pointers to access actual data in the arrays passed as params.\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\n\n //npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\n\n //clear the output array\n memset(Dz, 0, M*N*sizeof(dtype_%(z)s));\n\n //iterate over the sparse array, making the most of an entry wherever we find it.\n //\n // Normal matrix matrix multiply: A MxK, B KxN => Z = AB\n // for m\n // for n\n // for k\n // z[m, n] += a[m, k] * b[k, n]\n // Here instead: Z =\n // for k\n // for m (sparse)\n // for n\n // z[m, n] += a[m, k] * b[k, n]\n\n // loop over inner dimension\n for (npy_int32 k = 0; k < K; ++k)\n {\n // get pointer to k-th row of dense matrix\n const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);\n\n // loop over sparse column indices through index pointer array\n // (amounts to looping over rows M of sparse matrix)\n\n for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1) * Sptr]; ++m_idx)\n {\n npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K\n const dtype_%(a_val)s Amk = Dval[m_idx * Sval]; // actual value at that location\n\n // pointer to m-th row of the output matrix Z\n dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);\n\n //RESOLVE: a.shape[0] equals z.shape[0], why is this not an equality constraint?\n if (m >= PyArray_DIMS(%(z)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"illegal row index in a\"); %(fail)s;}\n\n // loop over final dimension (cols of dense matrix) and perform dot product\n if ((Szn == 1) && (Sbn == 1)) {\n for(npy_int32 n = 0; n < N; ++n)\n {\n zm[n] += Amk * bk[n];\n }\n }\n else\n {\n for(npy_int32 n = 0; n < N; ++n)\n {\n zm[n*Szn] += Amk * bk[n*Sbn];\n }\n }\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n\n return rval\n\n def c_code_cache_version(self):\n return (3,)\n\n\nsd_csc = StructuredDotCSC()\n\n\nclass StructuredDotCSR(COp):\n \"\"\"\n Structured Dot CSR is like dot, except that only the\n gradient wrt non-zero elements of the sparse matrix\n `a` are calculated and propagated.\n\n The output is presumed to be a dense matrix, and is represented by a\n TensorType instance.\n\n Parameters\n ----------\n a\n A sparse matrix in csr format.\n b\n A sparse or dense matrix.\n\n Returns\n -------\n matrix\n The dot product of `a` and `b`.\n\n Notes\n -----\n The grad implemented is structured.\n This op is used as an optimization for StructuredDot.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_val, a_ind, a_ptr, b):\n self.dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)\n r = gof.Apply(\n self,\n [a_val, a_ind, a_ptr, b],\n [tensor.tensor(self.dtype_out, (False, b.type.broadcastable[1]))],\n )\n return r\n\n def perform(self, node, inputs, outputs):\n (a_val, a_ind, a_ptr, b) = inputs\n (out,) = outputs\n a = scipy.sparse.csr_matrix(\n (a_val, a_ind, a_ptr), (len(a_ptr) - 1, b.shape[0]), copy=True\n ) # use view_map before setting this to False\n # out[0] = a.dot(b)\n out[0] = a * b\n # scipy 0.7 automatically converts to dense, but not .6 sometimes\n assert _is_dense(out[0])\n\n def c_code(self, node, name, inputs, outputs, sub):\n \"\"\"\n C-implementation of the dot product of the sparse matrix A and matrix B.\n\n Parameters\n ----------\n a_val\n Non-zero values of the sparse matrix.\n a_ind\n Column indices of the non-null values (.indices of a\n scipy.csc_matrix).\n a_ptr\n Indicates col indices for col. i are in the range\n a_ptr[i]:a_ptr[i+1].\n n_cols\n Number of columns of sparse matrix.\n b\n Dense matrix to perform dot product with, as in dot(a, b).\n z\n Return value.\n sub\n TODO, not too sure, something to do with weave probably.\n\n \"\"\"\n (a_val, a_ind, a_ptr, b) = inputs\n (z,) = outputs\n typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a_val\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\n\n if ((!%(z)s)\n || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_ptr)s)[0]-1) //a's rows\n || (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1]) //b's columns\n )\n {\n {Py_XDECREF(%(z)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = PyArray_DIMS(%(a_ptr)s)[0]-1;\n dims[1] = PyArray_DIMS(%(b)s)[1];\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(z)s)[0];\n npy_intp N = PyArray_DIMS(%(z)s)[1];\n npy_intp K = PyArray_DIMS(%(b)s)[0];\n if (N > 0x7fffffffL)\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big (overflows int32 index)\"); %(fail)s;}\n\n // strides tell you how many bytes to skip to go to next column/row entry\n npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\n\n // pointers to access actual data in the arrays passed as params.\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\n\n //npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\n\n //clear the output array\n memset(Dz, 0, M*N*sizeof(dtype_%(z)s));\n\n //iterate over the sparse array, making the most of an entry wherever we find it.\n // Normal matrix matrix multiply:\n // for m\n // for n\n // for k\n // z[m, n] += a[m, k] * b[k, n]\n // Here instead:\n // for m\n // for k (sparse)\n // for n\n // z[m, n] += a[m, k] * b[k, n]\n\n // loop over inner dimension\n for (npy_int64 m = 0; m < M; ++m)\n {\n // pointer to m-th row of the output matrix Z\n dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);\n\n // loop over sparse rows indices through index pointer array\n // (amounts to looping over cols k of sparse matrix)\n for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx)\n {\n npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m\n const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location\n\n // get pointer to k-th row of dense matrix\n const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);\n\n // loop over final dimension (cols of dense matrix) and perform dot product\n for(npy_int32 n = 0; n < N; ++n)\n {\n zm[n*Szn] += Amk * bk[n*Sbn];\n }\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def c_code_cache_version(self):\n return (2,)\n\n\nsd_csr = StructuredDotCSR()\n\n\n# register a specialization to replace StructuredDot -> StructuredDotCSx\n# This is tested in tests/test_basic.py:792\[email protected]_optimizer([sparse._structured_dot])\ndef local_structured_dot(fgraph, node):\n if node.op == sparse._structured_dot:\n a, b = node.inputs\n if a.type.format == \"csc\":\n a_val, a_ind, a_ptr, a_shape = csm_properties(a)\n a_nsparse = a_shape[0]\n return [sd_csc(a_val, a_ind, a_ptr, a_nsparse, b)]\n if a.type.format == \"csr\":\n a_val, a_ind, a_ptr, a_shape = csm_properties(a)\n return [sd_csr(a_val, a_ind, a_ptr, b)]\n return False\n\n\n# Commented out because\n# a) it is only slightly faster than scipy these days, and sometimes a little\n# slower, and\n# b) the resulting graphs make it very difficult for an op to do size checking\n# on the matrices involved. dimension mismatches are hard to detect sensibly.\n# register_specialize(local_structured_dot)\n\n\nclass UsmmCscDense(COp):\n \"\"\"\n Performs the expression is `alpha` * `x` `y` + `z`.\n\n Parameters\n ----------\n x\n Matrix variable.\n y\n Matrix variable.\n z\n Dense matrix.\n alpha\n A tensor scalar.\n\n Returns\n -------\n The dense matrix resulting from `alpha` * `x` `y` + `z`.\n\n Notes\n -----\n The grad is not implemented for this op.\n Optimized version os Usmm when `x` is in csc format and `y` is dense.\n \"\"\"\n\n __props__ = (\"inplace\",)\n\n def __init__(self, inplace):\n self.inplace = inplace\n if inplace:\n self.destroy_map = {0: [6]}\n\n def __str__(self):\n if self.inplace:\n return \"UsmmCscDense{inplace}\"\n else:\n return \"UsmmCscDense{no_inplace}\"\n\n def make_node(self, alpha, x_val, x_ind, x_ptr, x_nrows, y, z):\n alpha = tensor.as_tensor_variable(alpha)\n x_val = tensor.as_tensor_variable(x_val)\n x_ind = tensor.as_tensor_variable(x_ind)\n x_ptr = tensor.as_tensor_variable(x_ptr)\n x_nrows = tensor.as_tensor_variable(x_nrows)\n y = tensor.as_tensor_variable(y)\n z = tensor.as_tensor_variable(z)\n assert x_ind.dtype == \"int32\"\n assert x_ptr.dtype == \"int32\"\n assert x_nrows.dtype == \"int32\"\n assert alpha.ndim == 2 and alpha.type.broadcastable == (True, True)\n assert x_val.ndim == 1\n assert y.ndim == 2\n assert z.ndim == 2\n\n dtype_out = scalar.upcast(\n alpha.type.dtype, x_val.type.dtype, y.type.dtype, z.type.dtype\n )\n\n if dtype_out not in (\"float32\", \"float64\"):\n raise NotImplementedError(\"only float types are supported in \" \"operands\")\n\n if self.inplace:\n assert z.type.dtype == dtype_out\n\n # axpy work only with the same dtype, so we should upcast the input\n if dtype_out != alpha.type.dtype:\n alpha = tensor.cast(alpha, dtype_out)\n if dtype_out != x_val.type.dtype:\n x_val = tensor.cast(x_val, dtype_out)\n if dtype_out != y.type.dtype:\n y = tensor.cast(y, dtype_out)\n if dtype_out != z.type.dtype:\n z = tensor.cast(z, dtype_out)\n\n r = gof.Apply(\n self,\n [alpha, x_val, x_ind, x_ptr, x_nrows, y, z],\n [tensor.tensor(dtype_out, (False, y.type.broadcastable[1]))],\n )\n return r\n\n def c_support_code(self):\n return blas.blas_header_text()\n\n def c_libraries(self):\n return blas.ldflags()\n\n def c_compile_args(self):\n return blas.ldflags(libs=False, flags=True)\n\n def c_lib_dirs(self):\n return blas.ldflags(libs=False, libs_dir=True)\n\n def c_header_dirs(self):\n return blas.ldflags(libs=False, include_dir=True)\n\n def c_code(self, node, name, inputs, outputs, sub):\n alpha, x_val, x_ind, x_ptr, x_nrows, y, z = inputs\n zn = outputs[0]\n if node.inputs[1].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for \" \"x_val\")\n if node.inputs[5].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for y\")\n if node.inputs[6].type.dtype != node.outputs[0].type.dtype:\n raise NotImplementedError(\"z and output must have same type\")\n\n if node.inputs[1].type.dtype == \"float32\":\n conv_type = \"float\"\n axpy = \"saxpy_\"\n else:\n conv_type = \"double\"\n axpy = \"daxpy_\"\n # retrieve dtype numbers\n typenum_alpha = node.inputs[0].type.dtype_specs()[2]\n typenum_x_val = node.inputs[1].type.dtype_specs()[2]\n typenum_y = node.inputs[5].type.dtype_specs()[2]\n typenum_z = node.inputs[6].type.dtype_specs()[2]\n typenum_zn = node.outputs[0].type.dtype_specs()[2]\n\n inplace = int(self.inplace)\n\n rval = \"\"\"\n\n if (PyArray_NDIM(%(x_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(x_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(x_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(x_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(x_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(x_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(x_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, \"rank(nrows) != 0\"); %(fail)s;}\n if (PyArray_NDIM(%(y)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(y) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_val)s) != %(typenum_x_val)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for x_val\"); %(fail)s;}\n\n if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for y\"); %(fail)s;}\n\n if (PyArray_TYPE(%(z)s) != %(typenum_z)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for z\"); %(fail)s;}\n\n if (PyArray_TYPE(%(alpha)s) != %(typenum_alpha)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for alpha\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"x_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"x_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_nrows)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"x_nrows dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(x_val)s)[0] != PyArray_DIMS(%(x_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"x_val and x_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(x_ptr)s)[0] != PyArray_DIMS(%(y)s)[0]+1)\n {PyErr_SetString(PyExc_NotImplementedError, \"x's number of columns doesn't match y's rows\"); %(fail)s;}\n\n if (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0] || PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(y)s)[1])\n {PyErr_SetString(PyExc_NotImplementedError, \"The dimension of the allocated output doesn't match the correct output size.\"); %(fail)s;}\n\n if (PyArray_SIZE(%(alpha)s) != 1)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number of element in alpha must be 1\"); %(fail)s;}\n\n if (PyArray_NDIM(%(alpha)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of alpha must be 2\"); %(fail)s;}\n\n if (PyArray_NDIM(%(x_val)s) != 1)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of x_val must be 1\"); %(fail)s;}\n\n if (PyArray_NDIM(%(y)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of y must be 2\"); %(fail)s;}\n\n if (PyArray_NDIM(%(z)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of z must be 2\"); %(fail)s;}\n\n if (%(inplace)s)\n {\n if (%(typenum_zn)s != %(typenum_z)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"When inplace the output dtype must be the same as the input\"); %(fail)s;}\n\n Py_XDECREF(%(zn)s);\n %(zn)s = %(z)s;\n Py_INCREF(%(zn)s);\n }\n else if (!%(zn)s\n || (PyArray_DIMS(%(zn)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0])\n || (PyArray_DIMS(%(zn)s)[1] != PyArray_DIMS(%(y)s)[1])\n )\n {\n {Py_XDECREF(%(zn)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0];\n dims[1] = PyArray_DIMS(%(y)s)[1];\n %(zn)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_zn)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(zn)s)[0];\n npy_intp N = PyArray_DIMS(%(zn)s)[1];\n npy_intp K = PyArray_DIMS(%(y)s)[0];\n\n // pointers to access actual data in the arrays passed as params.\n const dtype_%(x_val)s* __restrict__ Dval = (dtype_%(x_val)s*)PyArray_DATA(%(x_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(x_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(x_ptr)s);\n const dtype_%(alpha)s alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];\n\n npy_intp Sz = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(zn)s)[1] / PyArray_DESCR(%(zn)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(x_val)s)[0] / PyArray_DESCR(%(x_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(x_ind)s)[0] / PyArray_DESCR(%(x_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(x_ptr)s)[0] / PyArray_DESCR(%(x_ptr)s)->elsize;\n npy_intp Sy = PyArray_STRIDES(%(y)s)[1] / PyArray_DESCR(%(y)s)->elsize;\n\n // blas expects ints; convert here (rather than just making N etc ints) to avoid potential overflow in the negative-stride correction\n if ((N > 0x7fffffffL)||(Sy > 0x7fffffffL)||(Szn > 0x7fffffffL)||(Sy < -0x7fffffffL)||(Szn < -0x7fffffffL))\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big for BLAS (overflows int32 index)\"); %(fail)s;}\n int N32 = N;\n int Sy32 = Sy;\n int Szn32 = Szn;\n\n if (!(%(inplace)s))\n {\n if (PyArray_CopyInto(%(zn)s, %(z)s))\n {\n Py_XDECREF(%(zn)s);\n %(fail)s;\n }\n }\n\n for (npy_intp k = 0; k < K; ++k)\n {\n for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1)*Sptr]; ++m_idx)\n {\n const npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K\n\n const dtype_%(x_val)s Amk = alpha * Dval[m_idx * Sval]; // actual value at that location\n\n dtype_%(y)s* y_row = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * k);\n // axpy expects pointer to the beginning of memory arrays,\n // so when the stride is negative, we need to get the\n // last element\n if (Sy < 0)\n y_row += (K - 1) * Sy;\n\n dtype_%(zn)s* z_row = (dtype_%(zn)s*)(PyArray_BYTES(%(zn)s) + PyArray_STRIDES(%(zn)s)[0] * m);\n if (Szn < 0)\n z_row += (N - 1) * Szn;\n\n %(axpy)s(&N32, (%(conv_type)s*)&Amk, (%(conv_type)s*)y_row, &Sy32, (%(conv_type)s*)z_row, &Szn32);\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n\n return rval\n\n def c_code_cache_version(self):\n return (3, blas.blas_header_version())\n\n\nusmm_csc_dense = UsmmCscDense(inplace=False)\nusmm_csc_dense_inplace = UsmmCscDense(inplace=True)\n\n\n# This is tested in tests/test_basic.py:UsmmTests\nlocal_usmm = gof.opt.PatternSub(\n (\n theano.tensor.sub,\n \"z\",\n (\n theano.tensor.mul,\n {\n \"pattern\": \"alpha\",\n \"constraint\": lambda expr: (\n np.all(expr.type.broadcastable) and config.blas__ldflags\n ),\n },\n (sparse._dot, \"x\", \"y\"),\n ),\n ),\n (usmm, (theano.tensor.neg, \"alpha\"), \"x\", \"y\", \"z\"),\n)\nregister_specialize(local_usmm, name=\"local_usmm\")\n\n\n# register a specialization to replace usmm_csc_dense -> usmm_csc_dense_inplace\n# This is tested in tests/test_basic.py:UsmmTests\[email protected]_optimizer([usmm_csc_dense])\ndef local_usmm_csc_dense_inplace(fgraph, node):\n if node.op == usmm_csc_dense:\n return [usmm_csc_dense_inplace(*node.inputs)]\n\n\nregister_specialize(local_usmm_csc_dense_inplace, \"cxx_only\", \"inplace\")\n\n\n# This is tested in tests/test_basic.py:UsmmTests\[email protected]_optimizer([usmm])\ndef local_usmm_csx(fgraph, node):\n \"\"\"\n usmm -> usmm_csc_dense\n\n \"\"\"\n if node.op == usmm:\n alpha, x, y, z = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n y_is_sparse_variable = _is_sparse_variable(y)\n\n if x_is_sparse_variable and not y_is_sparse_variable:\n if x.type.format == \"csc\":\n x_val, x_ind, x_ptr, x_shape = csm_properties(x)\n x_nsparse = x_shape[0]\n dtype_out = scalar.upcast(\n alpha.type.dtype, x.type.dtype, y.type.dtype, z.type.dtype\n )\n if dtype_out not in (\"float32\", \"float64\"):\n return False\n # Sparse cast is not implemented.\n if y.type.dtype != dtype_out:\n return False\n\n return [usmm_csc_dense(alpha, x_val, x_ind, x_ptr, x_nsparse, y, z)]\n return False\n\n\nregister_specialize(local_usmm_csx, \"cxx_only\")\n\n\nclass CSMGradC(COp):\n\n __props__ = ()\n\n def make_node(self, a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim):\n return gof.Apply(\n self,\n [a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim],\n [b_val.type()],\n )\n\n def c_code(self, node, name, inputs, outputs, sub):\n # retrieve dtype number\n (a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim) = inputs\n (z,) = outputs\n typenum_z = node.outputs[0].type.dtype_specs()[2]\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a_val\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b_val\")\n\n return \"\"\"\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b_ptr) != 1\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(b_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"b_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(b_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"b_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(b_val)s)[0] != PyArray_DIMS(%(b_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"b_val and b_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_ptr)s)[0] != PyArray_DIMS(%(b_ptr)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr and b_ptr have different lengths\"); %(fail)s;}\n\n if ((!%(z)s) || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_val)s)[0]))\n {\n {Py_XDECREF(%(z)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(a_val)s)[0];\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, %(typenum_z)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(a_ptr)s)[0] - 1;\n npy_intp a_dim_0 = ((npy_int32 *)PyArray_DATA(%(a_dim)s))[0];\n npy_intp a_dim_1 = ((npy_int32 *)PyArray_DATA(%(a_dim)s))[1];\n\n npy_intp sp_dim = (M == a_dim_0)?a_dim_1:a_dim_0;\n\n // strides tell you how many bytes to skip to go to next column/row entry\n npy_intp Sz = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Sa_val = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\n npy_intp Sa_ind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\n npy_intp Sa_ptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\n npy_intp Sb_val = PyArray_STRIDES(%(b_val)s)[0] / PyArray_DESCR(%(b_val)s)->elsize;\n npy_intp Sb_ind = PyArray_STRIDES(%(b_ind)s)[0] / PyArray_DESCR(%(b_ind)s)->elsize;\n npy_intp Sb_ptr = PyArray_STRIDES(%(b_ptr)s)[0] / PyArray_DESCR(%(b_ptr)s)->elsize;\n\n // pointers to access actual data in the arrays passed as params.\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n const dtype_%(a_val)s* __restrict__ Da_val = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\n const npy_int32 * __restrict__ Da_ind = (npy_int32*)PyArray_DATA(%(a_ind)s);\n const npy_int32 * __restrict__ Da_ptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\n const dtype_%(b_val)s* __restrict__ Db_val = (dtype_%(b_val)s*)PyArray_DATA(%(b_val)s);\n const npy_int32 * __restrict__ Db_ind = (npy_int32*)PyArray_DATA(%(b_ind)s);\n const npy_int32 * __restrict__ Db_ptr = (npy_int32*)PyArray_DATA(%(b_ptr)s);\n\n npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\n\n dtype_%(b_val)s b_row[sp_dim];\n\n //clear the output array\n for (npy_int64 i = 0; i < nnz; ++i)\n {\n Dz[i*Sz] = 0;\n }\n memset(b_row, 0, sp_dim*sizeof(dtype_%(b_val)s));\n\n // loop over inner dimension\n for (npy_int64 m = 0; m < M; ++m)\n {\n for (npy_int32 j_ptr = Db_ptr[m * Sb_ptr];\n j_ptr < Db_ptr[(m + 1) * Sb_ptr]; j_ptr++) {\n b_row[Db_ind[j_ptr * Sb_ind]] += Db_val[j_ptr*Sb_val];\n }\n\n for (npy_int32 j_ptr = Da_ptr[m * Sa_ptr];\n j_ptr < Da_ptr[(m + 1) * Sa_ptr]; j_ptr++) {\n Dz[j_ptr*Sz] = b_row[Da_ind[j_ptr * Sa_ind]];\n }\n\n for (npy_int32 j_ptr = Db_ptr[m * Sb_ptr];\n j_ptr < Db_ptr[(m + 1) * Sb_ptr]; j_ptr++) {\n b_row[Db_ind[j_ptr * Sb_ind]] = 0;\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n\ncsm_grad_c = CSMGradC()\n\n\n# register a specialization to replace csm_grad -> csm_grad_c\n# This is tested in tests/test_opt.py:test_local_csm_grad_c\[email protected]_optimizer([csm_grad(None)])\ndef local_csm_grad_c(fgraph, node):\n \"\"\"\n csm_grad(None) -> csm_grad_c\n\n \"\"\"\n if node.op == csm_grad(None):\n return [csm_grad_c(*node.inputs)]\n return False\n\n\n# DISABLED AS IT IS BROKEN FOR UNSORTED INDICES!\n# register_specialize(local_csm_grad_c, 'cxx_only')\n\n\nclass MulSDCSC(COp):\n \"\"\"\n Multiplication of sparse matrix by a broadcasted dense vector\n element wise.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type matrix.\n\n Returns\n -------\n The multiplication of the two matrices element-wise.\n\n Notes\n -----\n `a_data`, `a_indices` and `a_indptr` must be the properties of a sparse\n matrix in csc format.\n\n The dtype of `a_data`, i.e. the dtype of the sparse matrix, cannot be a\n complex type.\n\n This op is used as an optimization of mul_s_d.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n assert b.type.ndim == 2\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n # def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):\n # return NotImplementedError()\n\n def c_code(self, node, name, inputs, outputs, sub):\n\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 2) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\");\n %(fail)s;}\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;}\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s ||\n (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]) ||\n !(PyArray_ISCONTIGUOUS(%(_zout)s)))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n if (!%(_zout)s)\n {\n PyErr_SetString(PyExc_MemoryError,\n \"Could not allocate output memory.\");\n %(fail)s;\n }\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0];\n\n // loop over columns\n for (npy_intp j = 0; j < N; ++j)\n {\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n // extract i-th row of dense matrix\n const dtype_%(_b)s* __restrict__ b_row = (dtype_%(_b)s*)(PyArray_BYTES(%(_b)s) + Sb * i);\n\n // write resulting gradient to sparse output\n zout[i_idx] = data[i_idx] * b_row[j];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nmul_s_d_csc = MulSDCSC()\n\n\nclass MulSDCSR(COp):\n \"\"\"\n Multiplication of sparse matrix by a broadcasted dense vector\n element wise.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type matrix.\n\n Returns\n -------\n The multiplication of the two matrix element wise.\n\n Notes\n -----\n `a_data`, `a_indices` and `a_indptr` must be the properties\n of a sparse matrix in csr format.\n\n The dtype of `a_data`, i.e. the dtype of the sparse matrix,\n cannot be a complex type.\n\n This op is used as an optimization of mul_s_d.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n assert b.type.ndim == 2\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n # def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):\n # return NotImplemented()\n\n def c_code(self, node, name, inputs, outputs, sub):\n\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 2) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\");\n %(fail)s;}\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;}\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s ||\n (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]) ||\n !(PyArray_ISCONTIGUOUS(%(_zout)s)))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n if (!%(_zout)s)\n {\n PyErr_SetString(PyExc_MemoryError,\n \"Could not allocate output memory.\");\n %(fail)s;\n }\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0];\n\n // loop over columns\n for (npy_intp j = 0; j < N; ++j)\n {\n // extract i-th row of dense matrix\n const dtype_%(_b)s* __restrict__ b_row = (dtype_%(_b)s*)(PyArray_BYTES(%(_b)s) + Sb * j);\n\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n // write resulting gradient to sparse output\n zout[i_idx] = data[i_idx] * b_row[i];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nmul_s_d_csr = MulSDCSR()\n\n\n# register a specialization to replace MulSD -> MulSDCSX\[email protected]_optimizer([sparse.mul_s_d])\ndef local_mul_s_d(fgraph, node):\n if node.op == sparse.mul_s_d:\n x, y = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n\n if x_is_sparse_variable:\n svar = x\n dvar = y\n else:\n svar = y\n dvar = x\n\n if dvar.type.ndim != 2:\n return False\n if svar.type.format == \"csc\":\n CSx = sparse.CSC\n mul_s_d_csx = mul_s_d_csc\n elif svar.type.format == \"csr\":\n CSx = sparse.CSR\n mul_s_d_csx = mul_s_d_csr\n else:\n raise NotImplementedError\n if x.dtype != y.dtype:\n # mul_s_d_csx don't support that case\n return\n\n c_data = mul_s_d_csx(\n sparse.csm_data(svar),\n sparse.csm_indices(svar),\n sparse.csm_indptr(svar),\n dvar,\n )\n\n return [\n CSx(\n c_data,\n sparse.csm_indices(svar),\n sparse.csm_indptr(svar),\n sparse.csm_shape(svar),\n )\n ]\n\n return False\n\n\nregister_specialize(local_mul_s_d, \"cxx_only\")\n\n\nclass MulSVCSR(COp):\n \"\"\"\n Multiplication of sparse matrix by a broadcasted dense vector\n element wise.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type matrix.\n\n Returns\n -------\n The multiplication of the two matrix element wise.\n\n Notes\n -----\n `a_data`, `a_indices` and `a_indptr` must be the properties\n of a sparse matrix in csr format.\n\n The dtype of `a_data`, i.e. the dtype of the sparse matrix,\n cannot be a complex type.\n\n This op is used as an optimization of MulSV.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n assert b.type.ndim == 1\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;\n }\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s\n || PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]\n || !PyArray_ISCONTIGUOUS(%(_zout)s))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n const dtype_%(_b)s* __restrict__ Db = (dtype_%(_b)s*)PyArray_DATA(%(_b)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0] / PyArray_DESCR(%(_b)s)->elsize;\n\n // loop over rows\n for (npy_intp j = 0; j < N; ++j)\n {\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n zout[i_idx] = data[i_idx] * Db[i * Sb];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nmul_s_v_csr = MulSVCSR()\n\n\n# register a specialization to replace MulSV -> MulSVCSR\[email protected]_optimizer([sparse.mul_s_v])\ndef local_mul_s_v(fgraph, node):\n if node.op == sparse.mul_s_v:\n x, y = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n\n if x_is_sparse_variable:\n svar = x\n dvar = y\n else:\n svar = y\n dvar = x\n\n if dvar.type.ndim != 1:\n return False\n elif svar.type.format == \"csr\":\n CSx = sparse.CSR\n mul_s_v_csx = mul_s_v_csr\n else:\n return False\n\n s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar)\n\n c_data = mul_s_v_csx(s_val, s_ind, s_ptr, dvar)\n\n return [CSx(c_data, s_ind, s_ptr, s_shape)]\n\n return False\n\n\nregister_specialize(local_mul_s_v, \"cxx_only\")\n\n\nclass StructuredAddSVCSR(COp):\n \"\"\"\n Structured addition of a sparse matrix and a dense vector.\n The elements of the vector are are only added to the corresponding\n non-zero elements. Therefore, this operation outputs another sparse\n matrix.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type vector.\n\n Returns\n -------\n A sparse matrix containing the addition of the vector to the data of the\n sparse matrix.\n\n Notes\n -----\n The a_* are the properties of a sparse matrix in csr format.\n\n This op is used as an optimization for StructuredAddSV.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n b = tensor.as_tensor_variable(b)\n a_data = tensor.as_tensor_variable(a_data)\n a_indices = tensor.as_tensor_variable(a_indices)\n a_indptr = tensor.as_tensor_variable(a_indptr)\n assert a_data.type.ndim == 1\n assert a_indices.type.ndim == 1\n assert a_indptr.type.ndim == 1\n assert b.type.ndim == 1\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;\n }\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s\n || (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0])\n || !(PyArray_ISCONTIGUOUS(%(_zout)s)))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n if (!%(_zout)s)\n {\n PyErr_SetString(PyExc_MemoryError,\n \"Could not allocate output memory.\");\n %(fail)s;\n }\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n const dtype_%(_b)s* __restrict__ Db = (dtype_%(_b)s*)PyArray_DATA(%(_b)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0] / PyArray_DESCR(%(_b)s)->elsize;\n\n // loop over columns\n for (npy_intp j = 0; j < N; ++j)\n {\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n // write resulting gradient to sparse output\n zout[i_idx] = data[i_idx] + Db[i * Sb];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nstructured_add_s_v_csr = StructuredAddSVCSR()\n\n\n# register a specialization to replace\n# structured_add_s_v -> structured_add_s_v_csr\[email protected]_optimizer([sparse.structured_add_s_v])\ndef local_structured_add_s_v(fgraph, node):\n if node.op == sparse.structured_add_s_v:\n x, y = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n # y_is_sparse_variable = _is_sparse_variable(y)\n\n if x_is_sparse_variable:\n svar = x\n dvar = y\n else:\n svar = y\n dvar = x\n\n if dvar.type.ndim != 1:\n return False\n elif svar.type.format == \"csr\":\n CSx = sparse.CSR\n structured_add_s_v_csx = structured_add_s_v_csr\n else:\n return False\n\n s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar)\n\n c_data = structured_add_s_v_csx(s_val, s_ind, s_ptr, dvar)\n\n return [CSx(c_data, s_ind, s_ptr, s_shape)]\n\n return False\n\n\nregister_specialize(local_structured_add_s_v, \"cxx_only\")\n\n\nclass SamplingDotCSR(COp):\n \"\"\"\n Operand optimized for calculating the dot product dot(`x`, `y`.T) = `z`\n when you only want to calculate a subset of `z`.\n\n It is equivalent to `p` o (`x` . `y`.T) where o is the element-wise\n product, `x` and `y` operands of the dot product and `p` is a matrix\n that contains 1 when the corresponding element of `z` should be\n calculated and 0 when it shouldn't. Note that SamplingDot has a different\n interface than `dot` because SamplingDot requires `x` to be a `m`x`k`\n matrix while `y` is a `n`x`k` matrix instead of the usual `k`x`n` matrix.\n\n Parameters\n ----------\n x\n Tensor matrix.\n y\n Tensor matrix.\n p_data\n Sparse matrix data.\n p_ind\n Sparse matrix indices.\n p_ptr\n Sparse matric indptr.\n p_ncols\n Sparse matrix number of columns.\n\n Returns\n -------\n A dense matrix containing the dot product of `x` by `y`.T only\n where `p` is 1.\n\n Notes\n -----\n It will work if the pattern is not binary value, but if the\n pattern doesn't have a high sparsity proportion it will be slower\n then a more optimized dot followed by a normal elemwise\n multiplication.\n\n If we have the input of mixed dtype, we insert cast elemwise\n in the graph to be able to call blas function as they don't\n allow mixed dtype.\n\n This op is used as an optimization for SamplingDot.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, x, y, p_data, p_ind, p_ptr, p_ncols):\n x = tensor.as_tensor_variable(x)\n y = tensor.as_tensor_variable(y)\n p_data = tensor.as_tensor_variable(p_data)\n p_ind = tensor.as_tensor_variable(p_ind)\n p_ptr = tensor.as_tensor_variable(p_ptr)\n p_ncols = tensor.as_tensor_variable(p_ncols)\n\n assert p_ncols.dtype == \"int32\"\n\n dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p_data.type.dtype)\n dot_out = scalar.upcast(x.type.dtype, y.type.dtype)\n\n # We call blas ?dot function that take only param of the same type\n x = tensor.cast(x, dot_out)\n y = tensor.cast(y, dot_out)\n\n return gof.Apply(\n self,\n [x, y, p_data, p_ind, p_ptr, p_ncols],\n [\n tensor.tensor(dtype=dtype_out, broadcastable=(False,)),\n tensor.tensor(dtype=p_ind.type.dtype, broadcastable=(False,)),\n tensor.tensor(dtype=p_ptr.type.dtype, broadcastable=(False,)),\n ],\n )\n\n def c_code_cache_version(self):\n return (4, blas.blas_header_version())\n\n def c_support_code(self):\n return blas.blas_header_text()\n\n def c_libraries(self):\n return blas.ldflags()\n\n def c_compile_args(self):\n return blas.ldflags(libs=False, flags=True)\n\n def c_lib_dirs(self):\n return blas.ldflags(libs=False, libs_dir=True)\n\n def c_header_dirs(self):\n return blas.ldflags(libs=False, include_dir=True)\n\n def c_code(self, node, name, inputs, outputs, sub):\n x, y, p_data, p_ind, p_ptr, p_ncols = inputs\n z_data, z_ind, z_ptr = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for x\")\n if node.inputs[1].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for y\")\n if node.inputs[2].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for pattern\")\n\n dot_out = scalar.upcast(node.inputs[0].type.dtype, node.inputs[1].type.dtype)\n\n if dot_out == \"float32\":\n conv_type = \"float\"\n cdot = \"sdot_\"\n else:\n conv_type = \"double\"\n cdot = \"ddot_\"\n\n # retrieve dtype number\n typenum_x = node.inputs[0].type.dtype_specs()[2]\n typenum_y = node.inputs[1].type.dtype_specs()[2]\n typenum_p = node.inputs[2].type.dtype_specs()[2]\n typenum_zd = tensor.TensorType(node.outputs[0].dtype, []).dtype_specs()[2]\n typenum_zi = tensor.TensorType(node.outputs[1].dtype, []).dtype_specs()[2]\n typenum_zp = tensor.TensorType(node.outputs[2].dtype, []).dtype_specs()[2]\n\n rval = \"\"\"\n if (PyArray_NDIM(%(x)s) != 2) {\nPyErr_SetString(PyExc_NotImplementedError, \"rank(x) != 2\"); %(fail)s;}\n if (PyArray_NDIM(%(y)s) != 2) {\nPyErr_SetString(PyExc_NotImplementedError, \"rank(y) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x)s) != %(typenum_x)s) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"Invalid type for x\");\n %(fail)s;}\n\n if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"Invalid type for y\");\n %(fail)s;}\n\n if (PyArray_TYPE(%(p_data)s) != %(typenum_p)s) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"Invalid type for pattern\");\n %(fail)s;}\n\n if (PyArray_DIMS(%(x)s)[1] != PyArray_DIMS(%(y)s)[1]) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"x's number of columns doesn't match y's rows! Note: sampling_dot is different from dot because y is assumed to be transposed.\");\n %(fail)s;}\n\n if (PyArray_DIMS(%(y)s)[0] != ((npy_int32 *)PyArray_DATA(%(p_ncols)s))[0] ||\n PyArray_DIMS(%(x)s)[0] != (PyArray_DIMS(%(p_ptr)s)[0] - 1))\n {PyErr_SetString(PyExc_NotImplementedError,\n \"The dimension of the pattern and the output must match\"); %(fail)s;}\n\n // Allocate output\n if (!%(z_data)s\n || (PyArray_DIMS(%(z_data)s)[0] != PyArray_DIMS(%(p_data)s)[0])\n || (PyArray_TYPE(%(z_data)s) != %(typenum_zd)s)\n || !(PyArray_ISCONTIGUOUS(%(z_data)s)))\n {\n {Py_XDECREF(%(z_data)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(p_data)s)[0];\n %(z_data)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,\n %(typenum_zd)s);\n }\n if (!%(z_ind)s\n || (PyArray_DIMS(%(z_ind)s)[0] != PyArray_DIMS(%(p_ind)s)[0])\n || (PyArray_TYPE(%(z_ind)s) != %(typenum_zi)s)\n || !(PyArray_ISCONTIGUOUS(%(z_ind)s)))\n {\n {Py_XDECREF(%(z_ind)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(p_ind)s)[0];\n %(z_ind)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,\n %(typenum_zi)s);\n }\n if (!%(z_ptr)s\n || (PyArray_DIMS(%(z_ptr)s)[0] != PyArray_DIMS(%(p_ptr)s)[0])\n || (PyArray_TYPE(%(z_ptr)s) != %(typenum_zp)s)\n || !(PyArray_ISCONTIGUOUS(%(z_ptr)s)))\n {\n {Py_XDECREF(%(z_ptr)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(p_ptr)s)[0];\n %(z_ptr)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,\n %(typenum_zp)s);\n }\n\n {\n // Product of MxK and NxK, output MxN\n npy_intp M = PyArray_DIMS(%(x)s)[0];\n npy_intp N = PyArray_DIMS(%(y)s)[0];\n npy_intp K = PyArray_DIMS(%(y)s)[1];\n\n // pointers to access actual data in the arrays passed as params.\n const dtype_%(x)s* __restrict__ Dx = (dtype_%(x)s*)PyArray_DATA(%(x)s);\n const dtype_%(y)s* __restrict__ Dy = (dtype_%(y)s*)PyArray_DATA(%(y)s);\n const dtype_%(p_data)s* __restrict__ Dpd = (dtype_%(p_data)s*)PyArray_DATA(%(p_data)s);\n const dtype_%(p_ind)s* __restrict__ Dpi = (dtype_%(p_ind)s*)PyArray_DATA(%(p_ind)s);\n const dtype_%(p_ptr)s* __restrict__ Dpp = (dtype_%(p_ptr)s*)PyArray_DATA(%(p_ptr)s);\n dtype_%(z_data)s* __restrict__ Dzd = (dtype_%(z_data)s*)PyArray_DATA(%(z_data)s);\n dtype_%(z_ind)s* __restrict__ Dzi = (dtype_%(z_ind)s*)PyArray_DATA(%(z_ind)s);\n dtype_%(z_ptr)s* __restrict__ Dzp = (dtype_%(z_ptr)s*)PyArray_DATA(%(z_ptr)s);\n\n const npy_intp Sdx = PyArray_STRIDES(%(x)s)[1]/PyArray_DESCR(%(x)s)->elsize;\n const npy_intp Sdy = PyArray_STRIDES(%(y)s)[1]/PyArray_DESCR(%(y)s)->elsize;\n const npy_intp Sdpd = PyArray_STRIDES(%(p_data)s)[0] / PyArray_DESCR(%(p_data)s)->elsize;\n const npy_intp Sdpi = PyArray_STRIDES(%(p_ind)s)[0] / PyArray_DESCR(%(p_ind)s)->elsize;\n const npy_intp Sdpp = PyArray_STRIDES(%(p_ptr)s)[0] / PyArray_DESCR(%(p_ptr)s)->elsize;\n const npy_intp Sdzd = PyArray_STRIDES(%(z_data)s)[0] / PyArray_DESCR(%(z_data)s)->elsize;\n const npy_intp Sdzi = PyArray_STRIDES(%(z_ind)s)[0] / PyArray_DESCR(%(z_ind)s)->elsize;\n const npy_intp Sdzp = PyArray_STRIDES(%(z_ptr)s)[0] / PyArray_DESCR(%(z_ptr)s)->elsize;\n\n memcpy(Dzi, Dpi, PyArray_DIMS(%(p_ind)s)[0]*sizeof(dtype_%(p_ind)s));\n memcpy(Dzp, Dpp, PyArray_DIMS(%(p_ptr)s)[0]*sizeof(dtype_%(p_ptr)s));\n\n // blas expects ints; convert here (rather than just making K etc ints) to avoid potential overflow in the negative-stride correction\n if ((K > 0x7fffffffL)||(Sdx > 0x7fffffffL)||(Sdy > 0x7fffffffL)||(Sdx < -0x7fffffffL)||(Sdy < -0x7fffffffL))\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big for BLAS (overflows int32 index)\"); %(fail)s;}\n int K32 = K;\n int Sdx32 = Sdx;\n int Sdy32 = Sdy;\n\n for (npy_intp m = 0; m < M; ++m) {\n for (npy_int32 n_idx = Dpp[m * Sdpp]; n_idx < Dpp[(m+1)*Sdpp]; ++n_idx) {\n const npy_int32 n = Dpi[n_idx * Sdpi]; // row index of non-null value for column K\n\n const dtype_%(x)s* x_row = (dtype_%(x)s*)(PyArray_BYTES(%(x)s) + PyArray_STRIDES(%(x)s)[0] * m);\n\n const dtype_%(y)s* y_col = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * n);\n // dot expects pointer to the beginning of memory arrays,\n // so when the stride is negative, we need to get the\n // last element\n if (Sdx < 0)\n x_row += (K - 1) * Sdx;\n if (Sdy < 0)\n y_col += (K - 1) * Sdy;\n\n Dzd[n_idx * Sdzd] = Dpd[n_idx * Sdpd] * %(cdot)s(&K32, (const %(conv_type)s*)x_row, &Sdx32, (const %(conv_type)s*)y_col, &Sdy32);\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n\n return rval\n\n\nsampling_dot_csr = SamplingDotCSR()\n\n\n# register a specialization to replace SamplingDot -> SamplingDotCsr\[email protected]_optimizer([sparse.sampling_dot])\ndef local_sampling_dot_csr(fgraph, node):\n if not config.blas__ldflags:\n # The C implementation of SamplingDotCsr relies on BLAS routines\n return\n if node.op == sparse.sampling_dot:\n x, y, p = node.inputs\n if p.type.format == \"csr\":\n p_data, p_ind, p_ptr, p_shape = sparse.csm_properties(p)\n\n z_data, z_ind, z_ptr = sampling_dot_csr(\n x, y, p_data, p_ind, p_ptr, p_shape[1]\n )\n\n return [sparse.CSR(z_data, z_ind, z_ptr, p_shape)]\n return False\n\n\nregister_specialize(local_sampling_dot_csr, \"cxx_only\", name=\"local_sampling_dot_csr\")\n",
"import copy\nfrom io import StringIO\n\nimport numpy as np\n\nfrom theano import scalar\nfrom theano.gof.graph import Apply\nfrom theano.gof.op import Op\nfrom theano.gof.utils import MethodNotDefined\nfrom theano.link.c.interface import HideC\nfrom theano.scalar import Composite, Scalar\nfrom theano.scalar.basic import complex_types, upgrade_to_float_no_complex\nfrom theano.scalar.basic_scipy import Erfcinv, Erfinv\nfrom theano.tensor.elemwise import CAReduceDtype, DimShuffle, Elemwise\n\n\ntry:\n import pygpu\n from pygpu import gpuarray\n from pygpu.gpuarray import dtype_to_typecode\n from pygpu.reduction import ReductionKernel\n from pygpu.tools import ArrayArg\nexcept ImportError:\n pass\n\nfrom .basic_ops import GpuKernelBase, Kernel, as_gpuarray_variable, infer_context_name\nfrom .fp16_help import load_w, write_w\nfrom .type import GpuArrayType, gpu_context_type\n\n\ndef make_argument(v, name):\n return ArrayArg(np.dtype(v.type.dtype), name)\n\n\ndef as_C_string_const(s):\n return \"\\n\".join('\"%s\\\\n\"' % (l.replace('\"', '\\\\\"')) for l in s.split(\"\\n\"))\n\n\ndef get_scal(dt):\n if dt == \"float16\":\n dt = \"float32\"\n return scalar.get_scalar_type(dt)\n\n\ndef max_inputs_to_GpuElemwise(node_or_outputs):\n \"\"\"\n Compute the maximum number of inputs that fit in a kernel call.\n \"\"\"\n if isinstance(node_or_outputs, Apply):\n outputs = node_or_outputs.outputs\n else:\n outputs = node_or_outputs\n\n n_out = len(outputs)\n ndim = outputs[0].type.ndim\n\n ptr_size = 8\n # Even with call32, the interface does not change, and shapes,\n # strides, and offset are passed as 64-bits (8 bytes)\n int_size = 8\n\n # we take the limit from CUDA for now\n nb_bytes_total = 4096\n\n # Regardless of the number of arguments, we have:\n # - The total number of elements (int)\n # - The shape (int) on each dimension\n fixed_size = int_size + int_size * ndim\n\n # Each argument (input or output) has:\n # - 1 pointer (ptr)\n # - 1 offset (int)\n # - 1 stride (int) per dimension\n # Even if the tensor ends up being contiguous, code for the\n # non-contiguous case still needs to be generated.\n param_size = ptr_size + int_size + int_size * ndim\n\n # Remaining for inputs\n nb_bytes_for_inputs = nb_bytes_total - fixed_size - param_size * n_out\n\n # Maximum number of inputs\n max_nb_inputs = nb_bytes_for_inputs // param_size\n\n return max_nb_inputs\n\n\nclass GpuElemwise(HideC, Elemwise):\n \"\"\"\n Elemwise on the GPU.\n\n \"\"\"\n\n params_type = gpu_context_type\n nin = property(lambda self: self.scalar_op.nin)\n nout = property(lambda self: self.scalar_op.nout)\n _f16_ok = True\n\n def __str__(self):\n if self.name is not None:\n return self.name\n items = str(sorted(self.inplace_pattern.items()))\n return f\"GpuElemwise{{{self.scalar_op}}}{items}<gpuarray>\"\n\n def max_inputs(self, node_or_outputs):\n return max_inputs_to_GpuElemwise(node_or_outputs)\n\n def make_node(self, *inputs):\n ctx_name = infer_context_name(*inputs)\n inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs]\n out_info = Elemwise.get_output_info(self, GpuDimShuffle, *inputs)\n inputs = out_info[2]\n outputs = [\n GpuArrayType(broadcastable=br, context_name=ctx_name, dtype=dtype)()\n for dtype, br in zip(out_info[0], out_info[1])\n ]\n if len(outputs) > 1:\n raise NotImplementedError()\n\n if len(inputs) > max_inputs_to_GpuElemwise(outputs):\n raise NotImplementedError(\n \"Can not make this GpuElemwise with that much inputs\"\n )\n\n # Try to generate the kernel to catch SupportCodeErrors\n scal_ins = [get_scal(i.dtype) for i in inputs]\n fake_node = self.scalar_op.make_node(*[i() for i in scal_ins])\n try:\n code = fake_node.op.c_support_code_apply(fake_node, \"test\")\n if code:\n raise SupportCodeError(code)\n except MethodNotDefined:\n pass\n try:\n support_code = fake_node.op.c_support_code()\n if \"struct\" in support_code:\n # The macro is fine, the C++ struct is not.\n raise SupportCodeError(\n \"struct aren't supported in GpuElemwise support_code\" + support_code\n )\n except MethodNotDefined:\n pass\n\n node = Apply(self, inputs, outputs)\n return node\n\n def get_params(self, node):\n return node.inputs[0].type.context\n\n def _get_vnames(self, node):\n inps = [f\"i{n}\" for n, _ in enumerate(node.inputs)]\n outs = [\n f\"o{n}\" if n not in self.inplace_pattern else inps[self.inplace_pattern[n]]\n for n, _ in enumerate(node.outputs)\n ]\n return inps, outs\n\n def _generate_op_string(self, node):\n inps, outs = self._get_vnames(node)\n scal_v_ins = [get_scal(i.dtype)() for i in node.inputs]\n\n # As float16 isn't a c type and most GPU don't compute on it,\n # We convert the computation to float32, and let libgpuarray\n # load in float16 and cast to float32 and do the reverse for\n # the output.\n scalar_op = self.scalar_op\n if isinstance(scalar_op, (scalar.Cast, Composite)):\n scalar_op = scalar_op.clone_float32()\n fake_node = scalar_op.make_node(*scal_v_ins)\n scal_v_out = fake_node.outputs\n assert len(scal_v_out) == len(node.outputs)\n\n try:\n kop = fake_node.op.c_code(\n fake_node, \"elem_scalar\", inps, outs, dict(fail=\"return;\")\n )\n except MethodNotDefined:\n raise AssertionError(\n \"No c code for this scalar. Can not make a GpuElemwise\"\n )\n # If the following assert fail, then we need to update the\n # code handler above.\n assert \"npy_float16\" not in kop\n\n support_code = \"\"\n try:\n # We accept only some c_support_code().\n # This filter is done in the make_node()\n support_code += fake_node.op.c_support_code()\n except MethodNotDefined:\n pass\n for npy, ga in [\n (\"npy_bool\", \"ga_bool\"),\n (\"npy_uint8\", \"ga_ubyte\"),\n (\"npy_uint16\", \"ga_ushort\"),\n (\"npy_uint32\", \"ga_uint\"),\n (\"npy_uint64\", \"ga_ulong\"),\n (\"npy_int8\", \"ga_byte\"),\n (\"npy_int16\", \"ga_short\"),\n (\"npy_int32\", \"ga_int\"),\n (\"npy_int64\", \"ga_long\"),\n (\"npy_float16\", \"ga_half\"),\n (\"npy_float32\", \"ga_float\"),\n (\"npy_float64\", \"ga_double\"),\n ]:\n kop = kop.replace(npy, ga)\n return support_code, kop\n\n def c_headers(self):\n return [\"<numpy_compat.h>\", \"<gpuarray/types.h>\", \"<gpuarray/elemwise.h>\"]\n\n def c_support_code_struct(self, node, name):\n return \"\\nGpuElemwise *ge;\\n\"\n\n def c_init_code_struct(self, node, name, sub):\n inps, outs = self._get_vnames(node)\n nargs = len(inps) + len(outs) - len(self.inplace_pattern)\n support_code, kop = self._generate_op_string(node)\n res = \"\"\"\n gpuelemwise_arg args[%(nargs)s] = {{0}};\n \"\"\" % dict(\n nargs=nargs\n )\n\n for n, (i, name) in enumerate(zip(node.inputs, inps)):\n res += \"\"\"\n args[%(n)s].name = %(name)s;\n args[%(n)s].typecode = %(typecode)s;\n args[%(n)s].flags = GE_READ;\n \"\"\" % dict(\n n=n, name='\"{}\"'.format(name), typecode=i.type.typecode\n )\n\n p = len(inps)\n for n, o in enumerate(node.outputs):\n if n in self.inplace_pattern:\n assert len(node.outputs) == 1\n res += \"\\nargs[%(n)s].flags |= GE_WRITE;\\n\" % dict(\n n=self.inplace_pattern[n]\n )\n else:\n res += \"\"\"\n args[%(n)s].name = %(name)s;\n args[%(n)s].typecode = %(typecode)s;\n args[%(n)s].flags = GE_WRITE;\n \"\"\" % dict(\n n=p, name='\"{}\"'.format(outs[n]), typecode=o.type.typecode\n )\n p += 1\n\n res += \"\"\"\n ge = GpuElemwise_new(%(ctx)s->ctx, %(support)s, %(kop)s, %(nargs)s, args, %(nd)s, GE_CONVERT_F16);\n if (ge == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"Could not initialize elemwise support\");\n %(fail)s\n }\n \"\"\" % dict(\n nargs=nargs,\n ctx=sub[\"params\"],\n fail=sub[\"fail\"],\n support=as_C_string_const(support_code),\n kop=as_C_string_const(kop),\n nd=node.inputs[0].ndim,\n )\n\n return res\n\n def c_cleanup_code_struct(self, node, name):\n return \"\"\"\n GpuElemwise_free(ge);\n \"\"\"\n\n def c_code(self, node, name, inputs, outputs, sub):\n nd = node.outputs[0].ndim\n fail = sub[\"fail\"]\n initial_dims = \",\".join(\"1\" for i in range(nd))\n opname = str(self.scalar_op)\n ctx = sub[\"params\"]\n nargs = len(node.inputs) + len(node.outputs) - len(self.inplace_pattern)\n\n # check that all inputs have valid dimensions\n emitted_inames = {}\n code = (\n \"\"\"\n // +1 is so that MSVC is happy when nd == 0\n size_t dims[%(nd)s+1] = {%(initial_dims)s};\n void *rargs[%(nargs)s] = {0};\n int err;\n \"\"\"\n % locals()\n )\n for idx, iname in enumerate(inputs):\n if iname in emitted_inames:\n assert emitted_inames[iname] is node.inputs[idx]\n continue\n\n broadcasts = map(int, node.inputs[idx].broadcastable)\n broadcasts = \", \".join(map(str, broadcasts))\n nd = node.inputs[idx].ndim\n code += (\n \"\"\"\n int broadcasts_%(iname)s[%(nd)s+1] = {%(broadcasts)s};\n \"\"\"\n % locals()\n )\n emitted_inames[iname] = node.inputs[idx]\n\n # check that all inputs have valid dimensions\n emitted_inames = {}\n for idx, iname in enumerate(inputs):\n code += f\"rargs[{idx}] = &{iname}->ga;\\n\"\n if iname in emitted_inames:\n continue\n code += (\n \"\"\"\n if (%(nd)s != PyGpuArray_NDIM(%(iname)s))\n {\n PyErr_Format(PyExc_TypeError,\n \"need %(nd)s dims, not %%u\",\n PyGpuArray_NDIM(%(iname)s));\n %(fail)s;\n }\n for (int i = 0; i< %(nd)s; ++i)\n {\n dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i];\n if ((!(broadcasts_%(iname)s[i] &&\n PyGpuArray_DIMS(%(iname)s)[i] == 1)) &&\n (dims[i] != PyGpuArray_DIMS(%(iname)s)[i]))\n {\n PyErr_Format(PyExc_ValueError,\n \"GpuElemwise. Input dimension mis-match. Input\"\n \" %(idx)d (indices start at 0) has shape[%%d] == %%llu\"\n \", but the output's size on that axis is %%llu.\",\n i,\n (unsigned long long)PyGpuArray_DIMS(%(iname)s)[i],\n (unsigned long long)dims[i]\n );\n %(fail)s;\n }\n }\n \"\"\"\n % locals()\n )\n emitted_inames[iname] = True\n # check that all outputs have valid dimensions\n p = len(node.inputs)\n for idx, oname in enumerate(outputs):\n typecode = dtype_to_typecode(node.outputs[idx].dtype)\n if idx not in self.inplace_pattern.keys():\n code += (\n \"\"\"\n for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {\n if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])\n {\n Py_DECREF(%(oname)s);\n %(oname)s = NULL;\n }\n }\n if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS))\n {\n Py_XDECREF(%(oname)s);\n %(oname)s = NULL;\n }\n if (NULL == %(oname)s)\n {\n %(oname)s = pygpu_empty(%(nd)d, dims,\n %(typecode)s, GA_C_ORDER,\n %(ctx)s, Py_None);\n if (!%(oname)s) {\n %(fail)s\n }\n }\n rargs[%(p)s] = &%(oname)s->ga;\n \"\"\"\n % locals()\n )\n p += 1\n else:\n input_idx = self.inplace_pattern[idx]\n iname = inputs[input_idx]\n code += (\n \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_INCREF(%(oname)s);\n for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {\n if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])\n {\n PyErr_Format(PyExc_ValueError,\n \"GpuElemwise. Output dimension mis-match. Output\"\n \" %(idx)d (indices start at 0), working inplace\"\n \" on input %(input_idx)s, has shape[%%i] == %%llu\"\n \", but the output's size on that axis is %%llu.\",\n i,\n (unsigned long long)PyGpuArray_DIMS(%(oname)s)[i],\n (unsigned long long)dims[i]\n );\n Py_DECREF(%(oname)s);\n %(oname)s = NULL;\n %(fail)s;\n }\n }\n \"\"\"\n % locals()\n )\n\n code += \"\"\"\n if (GpuElemwise_call(ge, rargs, GE_BROADCAST) != GA_NO_ERROR) {\n PyErr_SetString(PyExc_RuntimeError, \"Error in the elemwise call\");\n %(fail)s\n }\n \"\"\" % dict(\n fail=sub[\"fail\"]\n )\n\n return str(code)\n\n # To disable the superclass perform.\n perform = Op.perform\n\n # Since we don't have a perform ...\n def python_constant_folding(self, node):\n return False\n\n def c_code_cache_version(self):\n ver = self.scalar_op.c_code_cache_version()\n if ver:\n return (10, ver)\n else:\n return ver\n\n\nclass SupportCodeError(Exception):\n \"\"\"\n We do not support certain things (such as the C++ complex struct).\n\n \"\"\"\n\n\nclass GpuDimShuffle(DimShuffle):\n \"\"\"\n DimShuffle on the GPU.\n\n \"\"\"\n\n _f16_ok = True\n c_func_name = \"APPLY_SPECIFIC(gpu_dimshuffle)\"\n\n def make_node(self, input):\n ctx_name = infer_context_name(input)\n res = DimShuffle.make_node(self, input)\n otype = GpuArrayType(\n dtype=res.outputs[0].type.dtype,\n broadcastable=res.outputs[0].type.broadcastable,\n context_name=ctx_name,\n )\n input = as_gpuarray_variable(input, ctx_name)\n return Apply(self, [input], [otype()])\n\n def __str__(self):\n if self.inplace:\n s = \"InplaceGpuDimShuffle{%s}\"\n else:\n s = \"GpuDimShuffle{%s}\"\n return s % (\",\".join(str(x) for x in self.new_order))\n\n def perform(self, node, inp, out, params):\n (input,) = inp\n (storage,) = out\n\n res = input\n\n res = res.transpose(self.shuffle + self.drop)\n\n shape = list(res.shape[: len(self.shuffle)])\n for augm in self.augment:\n shape.insert(augm, 1)\n res = res.reshape(shape)\n\n if not self.inplace:\n res = res.copy()\n\n storage[0] = res\n\n\nclass GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype):\n \"\"\"\n GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.\n\n Parameters\n ----------\n reduce_mask\n The dimensions along which to reduce. The `reduce_mask` is a tuple of\n booleans (actually integers 0 or 1) that specify for each input\n dimension, whether to reduce it (1) or not (0).\n pre_scalar_op\n If present, must be a scalar op with only 1 input. We will execute it\n on the input value before reduction.\n\n Examples\n --------\n When scalar_op is a theano.scalar.basic.Add instance:\n\n - reduce_mask == (1,) sums a vector to a scalar\n\n - reduce_mask == (1,0) computes the sum of each column in a matrix\n\n - reduce_mask == (0,1) computes the sum of each row in a matrix\n\n - reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor.\n\n Notes\n -----\n Any reduce_mask of all zeros is a sort of 'copy', and may be removed during\n graph optimization.\n\n This Op is a work in progress.\n\n This op was recently upgraded from just GpuSum a general CAReduce. Not\n many code cases are supported for scalar_op being anything other than\n scalar.Add instances yet.\n\n Important note: if you implement new cases for this op, be sure to\n benchmark them and make sure that they actually result in a speedup.\n GPUs are not especially well-suited to reduction operations so it is\n quite possible that the GPU might be slower for some cases.\n\n \"\"\"\n\n __props__ = (\n \"axis\",\n \"reduce_mask\",\n \"dtype\",\n \"acc_dtype\",\n \"scalar_op\",\n \"pre_scalar_op\",\n )\n _f16_ok = True\n verbose = 0\n\n def __init__(\n self,\n scalar_op,\n axis=None,\n reduce_mask=None,\n dtype=None,\n acc_dtype=None,\n pre_scalar_op=None,\n ):\n if reduce_mask is not None:\n reduce_mask = tuple(reduce_mask)\n self.reduce_mask = reduce_mask\n\n # used to make sure that calls to scalar op\n # have unique name arguments\n self._n_scalar_op_calls = 0\n CAReduceDtype.__init__(\n self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype\n )\n self.pre_scalar_op = pre_scalar_op\n if pre_scalar_op:\n assert pre_scalar_op.nin == 1\n\n def __str__(self):\n pre = \"\"\n if self.pre_scalar_op:\n pre = f\"pre={self.pre_scalar_op},red=\"\n ax = \"\"\n if self.axis is not None:\n ax = f\"{{{', '.join(str(x) for x in self.axis)}}}\"\n return f\"GpuCAReduceCuda{{{pre}{str(self.scalar_op)}}}{ax}\"\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n # For unpickling of old ops.\n if not hasattr(self, \"pre_scalar_op\"):\n self.pre_scalar_op = None\n\n def make_node(self, x):\n x = as_gpuarray_variable(x, infer_context_name(x))\n if x.type.context.kind != b\"cuda\":\n raise TypeError(\"GpuCAReduceCuda doesn't work for non-cuda devices\")\n ret = super().make_node(x)\n self = copy.copy(self)\n self.axis = ret.op.axis\n if self.pre_scalar_op:\n # Currently we only tested pre_scalar_op that don't cause\n # upcast.\n assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype\n if self.reduce_mask is None:\n if self.axis is None:\n reduce_mask = [1] * x.type.ndim\n else:\n reduce_mask = [0] * x.type.ndim\n for a in self.axis:\n assert reduce_mask[a] == 0\n reduce_mask[a] = 1\n self.reduce_mask = tuple(reduce_mask)\n\n if x.type.ndim != len(self.reduce_mask):\n raise TypeError(f\"x must have rank {len(self.reduce_mask)}\")\n if (\n \"complex\" in x.dtype\n or \"complex\" in ret.outputs[0].dtype\n or \"complex\" in self._acc_dtype(x.dtype)\n ):\n raise NotImplementedError(\"We don't support complex in gpu reduction\")\n return Apply(\n self,\n [x],\n [\n GpuArrayType(\n ret.outputs[0].dtype,\n ret.outputs[0].type.broadcastable,\n context_name=x.type.context_name,\n )()\n ],\n )\n\n def perform(self, node, inp, out, ctx):\n Op.perform(self, node, inp, out, ctx)\n\n def supports_c_code(self, inputs):\n \"\"\"\n Returns True if the current op and reduce pattern has functioning C code.\n\n \"\"\"\n # If we don't even have the right method, we certainly\n # don't support the C code\n # (This is the test that used to be implemented by\n # local_gpu_sum)\n pattern = \"\".join(str(i) for i in self.reduce_mask)\n if not hasattr(self, f\"c_code_reduce_{pattern}\"):\n return False\n\n # Now that this is a general reduction op, we might\n # have a method for a pattern, but that pattern\n # might not be implemented for the current scalar op.\n # To detect this more complicated situation, we\n # make fake arguments to c_code, try to run them,\n # and see if NotImplementedError gets raised.\n\n node = self.make_node(*inputs)\n\n name = \"fake_name\"\n\n inp = [f\"fake_input_name_{i}\" for i in range(len(inputs))]\n out = [f\"fake_output_name_{i}\" for i in range(len(node.outputs))]\n\n sub = {\"fail\": \"fake failure code\", \"params\": \"fake context\"}\n\n try:\n self.c_code(node, name, inp, out, sub)\n if not self.gpu_kernels(node, name):\n return False\n except NotImplementedError:\n return False\n return True\n\n def c_headers(self):\n return [\"<numpy_compat.h>\", \"<gpuarray/types.h>\"]\n\n def c_support_code(self):\n return \"\"\"\n template <typename T>\n static T ceil_intdiv(T a, T b)\n {\n return (a/b) + ((a % b) ? 1: 0);\n }\n \"\"\"\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (z,) = out\n\n nd_in = node.inputs[0].type.ndim\n nd_out = node.outputs[0].type.ndim\n # For complex, we need to use theano_complex* in the c code to\n # have it run. But libgpuarray don't understand it.\n in_dtype = node.inputs[0].type.dtype_specs()[1]\n out_dtype = node.outputs[0].type.dtype_specs()[1]\n gin_dtype = \"npy_\" + node.inputs[0].dtype\n gout_dtype = \"npy_\" + node.outputs[0].dtype\n assert nd_in - nd_out == sum(self.reduce_mask)\n\n sio = StringIO()\n fail = sub[\"fail\"]\n ctx = sub[\"params\"]\n\n # check input\n print(\n \"\"\"\n if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s)\n {\n PyErr_Format(PyExc_TypeError,\n \"required nd=%(nd_in)s, got nd=%%u\", PyGpuArray_NDIM(%(x)s));\n %(fail)s;\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n # It might be nice to use a property of the op class to do this,\n # but tensor.elemwise.CAReduce has this exact same check so I guess\n # this is OK to do\n if self.scalar_op in [scalar.scalar_minimum, scalar.scalar_maximum]:\n conds = [\n f\"(PyGpuArray_DIMS({x})[{i}] == 0)\"\n for i in range(nd_in)\n if self.reduce_mask[i]\n ]\n assert len(conds) > 0\n cond = \"(\" + \" || \".join(conds) + \")\"\n print(\n \"\"\"\n if %(cond)s\n {\n PyErr_Format(PyExc_ValueError,\" tried to reduce a 0-length axis.\");\n %(fail)s;\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n #\n # alloc an output if we need one\n #\n\n # check the basics of out output\n print(\n f\"\"\"\n if ( !{z}\n || (PyGpuArray_NDIM({z}) != {nd_out})\n \"\"\",\n file=sio,\n )\n\n # ensure that the output has the right non-reduced dimensions\n j = 0\n for i in range(nd_in):\n if not self.reduce_mask[i]:\n print(\n \" || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) \"\n % locals(),\n file=sio,\n )\n j += 1\n\n print(\n \"\"\"\n )\n {\n \"\"\"\n % locals(),\n file=sio,\n )\n if nd_out > 0:\n print(f\"size_t new_dims[{nd_out}]; \", file=sio)\n else:\n print(\"size_t *new_dims=NULL; \", file=sio)\n\n j = 0\n for i in range(nd_in):\n if not self.reduce_mask[i]:\n print(\n f\"new_dims[{j}] = PyGpuArray_DIMS({x})[{i}];\",\n file=sio,\n )\n j += 1\n out_typecode = dtype_to_typecode(gout_dtype[4:])\n print(\n \"\"\"\n Py_XDECREF(%(z)s);\n %(z)s = pygpu_empty(%(nd_out)s, new_dims,\n %(out_typecode)s, GA_C_ORDER,\n %(ctx)s, Py_None);\n if (NULL == %(z)s)\n {\n PyErr_Format(PyExc_RuntimeError, \"Failed to allocate output\");\n %(fail)s;\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n # \\begin bracket the reduction in a check that there is\n # actually work to do\n if getattr(self.scalar_op, \"identity\", None) == 0:\n zero_shp = f\"GpuArray_memset(&{z}->ga, 0)\"\n # TODO: elif getattr(self.scalar_op, 'identity', None) == 1:\n else:\n scalar_op = self.scalar_op\n zero_shp = (\n \"\"\"\n PyErr_Format(PyExc_NotImplementedError,\n \"GpuCAReduceCuda not implemented when input shape is 0\"\n \" for this scalar_op: %(scalar_op)s\");\n %(fail)s;\n \"\"\"\n % locals()\n )\n print(\n \"\"\"\n if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){\n %(zero_shp)s;\n }\n else if (PyGpuArray_SIZE(%(z)s))\n {\n \"\"\"\n % locals(),\n file=sio,\n )\n\n #\n # Now perform the reduction\n #\n\n if all(i == 1 for i in self.reduce_mask):\n # check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code.\n # TODO: check if we are ccontiguous when we un-dimshuffle\n # TODO: if only some dims are ccontiguous, call version with less dims.\n print(\"if(%(x)s->ga.flags & GA_C_CONTIGUOUS){\" % locals(), file=sio)\n self.c_code_reduce_ccontig(sio, node, name, x, z, fail)\n print(\"}else{\", file=sio)\n getattr(self, f\"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}\")(\n sio, node, name, x, z, fail\n )\n print(\"}\", file=sio)\n else:\n getattr(self, f\"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}\")(\n sio, node, name, x, z, fail\n )\n\n # \\end bracket the reduction ...\n print(\n \"\"\"\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n return sio.getvalue()\n\n def _makecall(\n self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()\n ):\n \"\"\"\n Return a string for making a kernel call.\n\n The return value looks something like:\n\n .. code-block:: c\n\n ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);\n ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);\n ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);\n if (verbose)\n printf(\"running kernel_reduce_10_%(name)s\\\\n\");\n size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];\n void *kernel_params[] = {\n (void *)&PyGpuArray_DIMS(%(x)s)[0],\n (void *)&PyGpuArray_DIMS(%(x)s)[1],\n (void *)%(x)s->ga.data,\n (void *)&%(x)s->ga.offset,\n (void *)&stride_A0,\n (void *)&stride_A1,\n (void *)%(z)s->ga.data,\n (void *)&%(z)s->ga.offset,\n (void *)&stride_Z0};\n int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);\n %(err_check)s\n \"\"\"\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n acc_dtype = \"npy_\" + self._acc_dtype(node.inputs[0].dtype)\n sio = StringIO()\n if pattern is None:\n pattern = \"\".join(str(c) for c in self.reduce_mask)\n ndim = len(self.reduce_mask)\n nd_out = ndim - sum(self.reduce_mask)\n shapes_format = f\"shape=({','.join(['%llu'] * node.inputs[0].ndim)})\"\n shapes_data = \",\".join(\n [f\"(size_t) PyGpuArray_DIMS({x})[{i}]\" for i in range(node.inputs[0].ndim)]\n )\n k_var = f\"kernel_reduce_{pattern}_{name}\"\n params = []\n\n for i in range(ndim):\n params.append(f\"(void *)&PyGpuArray_DIMS({x})[{i}]\")\n for declaration, value in extra_dims:\n print(declaration % locals(), file=sio)\n params.append(value)\n params.append(f\"(void *){x}->ga.data\")\n params.append(f\"(void *)&{x}->ga.offset\")\n for i in range(ndim):\n print(\n \"\"\"\n ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s);\n \"\"\"\n % locals(),\n file=sio,\n )\n params.append(\"(void *)&stride_A%(i)d\" % locals())\n for declaration, value in extra_strides:\n print(declaration % locals(), file=sio)\n params.append(value)\n\n params.append(f\"(void *){z}->ga.data\")\n params.append(f\"(void *)&{z}->ga.offset\")\n for i in range(nd_out):\n print(\n \"\"\"\n ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s);\n \"\"\"\n % locals(),\n file=sio,\n )\n params.append(\"(void *)&stride_Z%(i)d\" % locals())\n kernel_params = \", \".join(params)\n err_check = (\n \"\"\"\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: %(k_var)s: %%s.\",\n GpuKernel_error(&%(k_var)s, err));\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n print(\n \"\"\"\n if (verbose)\n printf(\"running kernel_reduce_%(pattern)s_%(name)s\\\\n\");\n size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];\n void *kernel_params[] = { %(kernel_params)s };\n if (verbose>1)\n printf(\"n_threads[0]=%%lu, n_threads[1]=%%lu, \"\n \"n_threads[2]=%%lu, n_threads=%%lu, \"\n \"n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, \"\n \"n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\\\n\",\n n_threads[0],n_threads[1],\n n_threads[2],\n n_threads[0]*n_threads[1]*\n n_threads[2],\n n_blocks[0],n_blocks[1],n_blocks[2],\n n_blocks[0]*n_blocks[1]*n_blocks[2],\n n_shared, %(shapes_data)s);\n int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);\n %(err_check)s\n \"\"\"\n % locals(),\n file=sio,\n )\n\n return sio.getvalue()\n\n def _k_decl(self, node, nodename, pattern=None, ndim=None, reduce_mask=None):\n \"\"\"\n Return a string to declare a kernel function.\n\n The result will look something like this:\n\n .. code-block:: c\n\n KERNEL void kernel_reduce_110_%(nodename)s(\n const ga_size d0,\n const ga_size d1,\n const ga_size d2,\n const %(in_type)s *A,\n const ga_size offset_A,\n const ga_ssize sA0,\n const ga_ssize sA1,\n const ga_ssize sA2,\n %(out_type)s * Z,\n const ga_size offset_Z,\n const ga_ssize sZ0)\n\n Since the nodename is unique, we don't need to put the name\n of the scalar_op in here.\n\n \"\"\"\n in_dtype = node.inputs[0].dtype\n out_dtype = node.outputs[0].dtype\n in_type = gpuarray.dtype_to_ctype(in_dtype)\n out_type = gpuarray.dtype_to_ctype(out_dtype)\n if reduce_mask is None:\n reduce_mask = self.reduce_mask\n if ndim is None:\n ndim = len(reduce_mask)\n if pattern is None:\n pattern = \"\".join(str(i) for i in reduce_mask)\n kname = f\"kernel_reduce_{pattern}\"\n k_var = f\"kernel_reduce_{pattern}_{nodename}\"\n params = []\n sio = StringIO()\n\n print(\n f\"\"\"\n KERNEL void {kname}(\n \"\"\",\n file=sio,\n )\n for i in range(ndim):\n params.append(\"uintp\")\n print(\n f\"\"\"\n const ga_size d{i},\n \"\"\",\n file=sio,\n )\n params.append(gpuarray.GpuArray)\n params.append(\"uintp\")\n print(\n f\"\"\"\n const {in_type} *A, const ga_size offset_A,\n \"\"\",\n file=sio,\n )\n for i in range(ndim):\n params.append(\"intp\")\n print(\n f\"\"\"\n const ga_ssize sA{i},\n \"\"\",\n file=sio,\n )\n params.append(gpuarray.GpuArray)\n params.append(\"uintp\")\n print(\n f\"\"\"\n {out_type} * Z, const ga_size offset_Z\n \"\"\",\n file=sio,\n )\n for i in range(ndim - sum(reduce_mask)):\n params.append(\"intp\")\n print(\n f\"\"\"\n , const ga_ssize sZ{i}\n \"\"\",\n file=sio,\n )\n print(\")\", file=sio)\n return sio.getvalue(), kname, params, k_var\n\n def _k_init(self, node, nodename):\n in_dtype = node.inputs[0].dtype\n out_dtype = node.outputs[0].dtype\n acc_dtype = self._acc_dtype(node.inputs[0].dtype)\n # We need to use theano_complex* and not npy_complex*\n in_type = gpuarray.dtype_to_ctype(in_dtype)\n out_type = gpuarray.dtype_to_ctype(out_dtype)\n acc_type = gpuarray.dtype_to_ctype(acc_dtype)\n\n return (\n \"\"\"\n const int threadCount = blockDim.x * blockDim.y * blockDim.z;\n const int threadNum = threadIdx.z * blockDim.x * blockDim.y\n + threadIdx.y * blockDim.x + threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = 0;\n \"\"\"\n % locals()\n )\n\n def _assign_init(self, first_item, dtype):\n \"\"\"\n This return the initial value for myresult.\n If the scalar op have an identity value, return it.\n\n Otherwise, check that the scalar op is maximum or minimum\n and return first_item. It should be the first element of the reduction.\n As the maximum and minimum of the same value don't change, this work.\n\n \"\"\"\n if hasattr(self.scalar_op, \"identity\"):\n return str(self.scalar_op.identity)\n else:\n assert isinstance(\n self.scalar_op, (scalar.ScalarMaximum, scalar.ScalarMinimum)\n )\n if self.pre_scalar_op: # TODO: multiple dtypes\n # dtype = node.inputs[0].dtype\n\n dummy_var = scalar.Scalar(dtype=dtype)()\n\n dummy_node = self.pre_scalar_op.make_node(dummy_var)\n\n dummy_name = \"assign_init_pre_scalar_op\" + str(self._n_scalar_op_calls)\n self._n_scalar_op_calls += 1\n t = self.pre_scalar_op.c_code(\n dummy_node, dummy_name, (first_item,), (\"\",), {}\n )\n assert t.startswith(\" = \")\n first_item = t[3:]\n if first_item[-1] == \";\":\n first_item = first_item[:-1]\n\n return first_item\n\n def _assign_reduce(self, node, name, left, right, sub, pre):\n \"\"\"\n\n Parameters\n ----------\n node\n The node argument to this op's c_code.\n name\n The name argument to this op's c_code.\n left\n A C code string identifying an lvalue.\n right\n A C code string identifying an expression.\n sub\n The sub argument to this op's c_code.\n pre\n If True, we will add the pre_scalar_op.c_code.\n\n Returns\n -------\n str\n C code to reduce left and right, assigning the result to left.\n\n \"\"\"\n\n (x,) = node.inputs\n in_dtype = x.dtype\n out_dtype = node.outputs[0].dtype\n\n dummy_left = Scalar(dtype=out_dtype)()\n dummy_right = Scalar(dtype=in_dtype)()\n\n dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)\n\n dummy_name = name + \"_scalar_op\" + str(self._n_scalar_op_calls)\n self._n_scalar_op_calls += 1\n\n if pre and self.pre_scalar_op:\n assert left == \"myresult\"\n dummy_node = self.pre_scalar_op.make_node(dummy_left)\n dummy_name = name + \"_scalar_op\" + str(self._n_scalar_op_calls)\n self._n_scalar_op_calls += 1\n t = self.pre_scalar_op.c_code(dummy_node, dummy_name, (right,), (\"\",), sub)\n assert t.startswith(\" = \")\n right = t[3:]\n if right[-1] == \";\":\n right = right[:-1]\n\n return self.scalar_op.c_code(\n dummy_node, dummy_name, (left, right), (left,), sub\n )\n\n def _k_reduce_buf(self, z_pos, node, name, sub):\n \"\"\"\n WRITEME\n\n Parameters\n ----------\n node, name, sub\n These should be passed through from the original call to c_code.\n\n \"\"\"\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n acc_dtype = \"npy_\" + self._acc_dtype(node.inputs[0].dtype)\n write_out = write_w(node.outputs[0].dtype)\n\n current_version = \"\"\"\n __syncthreads(); // some kernel do multiple reduction.\n buf[threadNum] = myresult;\n __syncthreads();\n\n // rest of function is handled by one warp\n if (threadNum < warpSize) {\n //round up all the partial sums into the first `warpSize` elements\n for (int i = threadNum + warpSize; i < threadCount; i += warpSize)\n {\n \"\"\"\n current_version += (\n self._assign_reduce(node, name, \"myresult\", \"buf[i]\", sub, False)\n + \"\"\"\n }\n buf[threadNum] = myresult;\n }\n __syncthreads();\n for (unsigned int _n = warpSize / 2; _n > 0; _n /= 2) {\n if (threadNum < _n && threadNum + _n < threadCount)\n \"\"\"\n )\n current_version += self._assign_reduce(\n node, name, \"buf[threadNum]\", \"buf[threadNum+_n]\", sub, False\n )\n\n current_version += \"\"\"\n __syncthreads();\n }\n if (threadNum == 0) {\n %(z_pos)s = %(write_out)s(buf[0]);\n }\n \"\"\"\n\n current_version = current_version % locals()\n\n return current_version\n\n # Threads must be organized as: threadNum%nb_reduce correspond to the same sum\n # nb_reduce<=warpSize\n def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce):\n reduce_fct = self._assign_reduce(node, name, \"myresult\", \"buf[i]\", {}, False)\n write_out = write_w(node.outputs[0].dtype)\n\n return (\n \"\"\"\n __syncthreads(); // some kernel do multiple reduction.\n buf[threadNum] = myresult;\n __syncthreads();\n\n // rest of function is handled by one warp\n if (threadNum < %(nb_reduce)s)\n {\n //round up all the partial sums into the first `nb_reduce` elements\n for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s)\n {\n %(reduce_fct)s;\n }\n %(z_pos)s = %(write_out)s(myresult);\n }\n \"\"\"\n % locals()\n )\n\n def c_code_reduce_ccontig(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n if getattr(self.scalar_op, \"identity\", None) == 0:\n zero_shp = f\"GpuArray_memset(&{z}->ga, 0)\"\n # TODO: elif getattr(self.scalar_op, 'identity', None) == 1:\n else:\n zero_shp = (\n \"\"\"\n PyErr_Format(PyExc_NotImplementedError,\n \"GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op\");\n %(fail)s;\n \"\"\"\n % locals()\n )\n\n acc_dtype = \"npy_\" + self._acc_dtype(node.inputs[0].dtype)\n k_var = f\"kernel_reduce_ccontig_{name}\"\n err_check = (\n \"\"\"\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: %(k_var)s: %%s.\",\n GpuKernel_error(&%(k_var)s, err));\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n\n print(\n \"\"\"\n {\n if(PyGpuArray_SIZE(%(x)s)==0){\n %(zero_shp)s;\n }else{\n int verbose = %(verbose)s;\n size_t numEls = PyGpuArray_SIZE(%(x)s);\n size_t n_threads = std::min(numEls, (size_t) 256);\n size_t n_blocks = 1;\n void *kernel_params[] = {(void *)&numEls,\n (void *)%(x)s->ga.data,\n (void *)&%(x)s->ga.offset,\n (void *)%(z)s->ga.data,\n (void *)&%(z)s->ga.offset};\n if (verbose) printf(\"running kernel_reduce_ccontig_%(name)s\"\n \" n_threads=%%llu, size=%%llu, ndim=%%u\\\\n\",\n n_threads, numEls,\n PyGpuArray_NDIM(%(x)s));\n size_t n_shared = sizeof(%(acc_dtype)s) * n_threads;\n int err = GpuKernel_call(&%(k_var)s, 1, &n_blocks, &n_threads, n_shared, kernel_params);\n %(err_check)s\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_1(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};\n size_t n_blocks[3] = {1, 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_11(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};\n while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1];\n n_threads[1] -= 1;\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])\n n_threads[1] = PyGpuArray_DIMS(%(x)s)[0];\n\n size_t n_blocks[3] = {1, 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_01X(self, sio, node, name, x, z, fail, N):\n \"\"\"\n\n Parameters\n ----------\n N\n The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111\n Work for N=1,2,3.\n\n \"\"\"\n\n assert N in [1, 2, 3]\n verbose = self.verbose\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n makecall = self._makecall(node, name, x, z, fail)\n N_pattern = \"\".join([\"1\"] * N)\n param_dim = \",\".join([f\"PyGpuArray_DIMS({x})[{i}]\" for i in range(N + 1)])\n strides_dim = \",\".join(\n [f\"PyGpuArray_STRIDES({x})[{i}]/sizeof({in_dtype})\" for i in range(N + 1)]\n )\n\n threads_y = (\n \"\"\"\n //get as many y threads as we can fit\n while (n_threads[0] * (n_threads[1]+1) <= 256)\n {\n if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1])\n n_threads[1] += 1;\n else\n break;\n }\"\"\"\n % locals()\n )\n\n threads_z = (\n \"\"\"\n //get as many z threads as we can fit\n while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)\n {\n if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2])\n n_threads[2] += 1;\n else\n break;\n }\n //Maximum for Fermi GPU on that dimensions.\n n_threads[2] = std::min(n_threads[2], (size_t)64);\n \"\"\"\n % locals()\n )\n\n if len(self.reduce_mask) == 2:\n threads_y = \"\"\n threads_z = \"\"\n\n if len(self.reduce_mask) == 3:\n threads_z = \"\"\n\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1};\n %(threads_y)s\n %(threads_z)s\n size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_01(self, sio, node, name, x, z, fail):\n self.c_code_reduce_01X(sio, node, name, x, z, fail, 1)\n\n def c_code_reduce_011(self, sio, node, name, x, z, fail):\n self.c_code_reduce_01X(sio, node, name, x, z, fail, 2)\n\n def c_code_reduce_0111(self, sio, node, name, x, z, fail):\n self.c_code_reduce_01X(sio, node, name, x, z, fail, 3)\n\n def c_code_reduce_10(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n acc_dtype = \"npy_\" + self._acc_dtype(node.inputs[0].dtype)\n k_var = f\"kernel_reduce_10_{name}\"\n err_check = (\n \"\"\"\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: %(k_var)s: %%s.\",\n GpuKernel_error(%(k_var)s, err));\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n if(PyGpuArray_STRIDES(%(x)s)[0]>\n PyGpuArray_STRIDES(%(x)s)[1]){\n // If there are a lot of summations to do, then we can use simple parallelization -\n // use each thread to do one sum.\n\n // we might as well launch blocks of 32 threads because that's the warp size.\n // we could schedule more threads if we were maxing out the gridsize below, but\n // the gridsize is way more than the physical hardware and I think 32 threads\n // on a huge grid is enough to fully use the hardware.\n size_t n_threads[3] = {32, 1, 1};\n\n // We kindof reshape the input implicitly to something 4D:\n // the shape A,B,C -> A, B, D, E\n // where C <= D*E < C+32\n // where E==32\n\n GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s;\n size_t A = 1;\n size_t B = PyGpuArray_DIMS(%(x)s)[0];\n size_t C = PyGpuArray_DIMS(%(x)s)[1];\n size_t D = C/32;\n if (32*D < C) D+= 1;\n assert ((C <= 32*D) && (32*D < C+32));\n\n // The gridsize would ideally be (A, D). But we do the following logic to make\n // sure we don't ask for a grid that is too big.\n size_t n_blocks[3] = {A, D, 1};\n if (n_blocks[0] > 4096) n_blocks[0] = 4096;\n if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];\n ssize_t stride_A0 = 1;\n ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);\n ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);\n ssize_t stride_Z0 = 1;\n ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);\n void *kernel_params[] = {\n (void *)&A, (void *)&B, (void *)&C, (void *)&D,\n (void *)%(x)s->ga.data,\n (void *)&%(x)s->ga.offset,\n (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,\n (void *)%(z)s->ga.data,\n (void *)&%(z)s->ga.offset,\n (void *)&stride_Z0, (void *)&stride_Z1};\n int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);\n %(err_check)s\n }else{\n GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};\n size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1};\n if (verbose) {\n fprintf(stderr,\n \"running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\\\n\",\n (unsigned long long)n_blocks[0],\n (unsigned long long)n_blocks[1]);\n }\n assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);\n size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0];\n size_t dim_0 = 1;\n ssize_t stride_A0 = 1;\n ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);\n ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);\n ssize_t stride_Z0 = 1;\n ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);\n void *kernel_params[] = {\n (void *)&dim_0,\n (void *)&PyGpuArray_DIMS(%(x)s)[0],\n (void *)&PyGpuArray_DIMS(%(x)s)[1],\n (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset,\n (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,\n (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset,\n (void *)&stride_Z0, (void *)&stride_Z1};\n int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);\n %(err_check)s\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_010(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n makecall_inner = self._makecall(node, name, x, z, fail, pattern=\"010_inner\")\n pattern = \"\".join(str(i) for i in self.reduce_mask)\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n k_var = f\"kernel_reduce_010_AD_{name}\"\n err_check = (\n \"\"\"\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: %(k_var)s: %%s.\",\n GpuKernel_error(&%(k_var)s, err));\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n print(\n \"\"\"\n {\n //int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2];\n\n //if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16))\n if (1) // if the alternative is less buggy, consider not using this branch\n {\n // If there are a lot of summations to do, then we can use simple parallelization -\n // use each thread to do one sum.\n\n // we might as well launch blocks of 32 threads because that's the warp size.\n // we could schedule more threads if we were maxing out the gridsize below, but\n // the gridsize is way more than the physical hardware and I think 32 threads\n // on a huge grid is enough to fully use the hardware.\n size_t n_threads[3] = {32, 1, 1};\n\n // We kindof reshape the input implicitly to something 4D:\n // the shape A,B,C -> A, B, D, E\n // where C <= D*E < C+32\n // where E==32\n\n size_t A = PyGpuArray_DIMS(%(x)s)[0];\n size_t B = PyGpuArray_DIMS(%(x)s)[1];\n size_t C = PyGpuArray_DIMS(%(x)s)[2];\n size_t D = C/32;\n if (32*D < C) D+= 1;\n assert ((C <= 32*D) && (32*D < C+32));\n\n // The gridsize would ideally be (A, D). But we do the following logic to make\n // sure we don't ask for a grid that is too big.\n size_t n_blocks[3] = {A, D, 1};\n if (n_blocks[0] > 4096) n_blocks[0] = 4096;\n if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];\n ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);\n ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);\n ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);\n ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);\n ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);\n void *kernel_params[] = {\n (void *)&A, (void *)&B, (void *)&C, (void *)&D,\n (void *)%(x)s->ga.data,\n (void *)&%(x)s->ga.offset,\n (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,\n (void *)%(z)s->ga.data,\n (void *)&%(z)s->ga.offset,\n (void *)&stride_Z0, (void *)&stride_Z1};\n int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);\n %(err_check)s\n }\n else\n {\n int verbose = %(verbose)s;\n\n size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1};\n while( (n_threads[0]*(n_threads[1]+1)<=256)\n && (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){\n n_threads[1]++;\n }\n\n size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1};\n n_blocks[1] = std::min(\n ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],\n (size_t)n_threads[0]),\n (size_t)(4096 / n_blocks[0])\n );\n if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),\n PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)),\n PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s))\n ==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)\n && n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],\n (size_t)n_threads[0])){\n if(verbose>1)\n printf(\"n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\\\n\",\n PyGpuArray_DIMS(%(x)s)[0],4096,\n ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]),\n (size_t)(4096 / n_blocks[0]));\n assert(n_threads[0]<=32);\n %(makecall_inner)s\n }else{\n n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1],\n (size_t) 256);\n n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096);\n n_blocks[1] = std::min(\n PyGpuArray_DIMS(%(x)s)[2],\n (size_t)(4096 / n_blocks[0])\n );\n %(makecall)s\n }\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_0101(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};\n while (n_threads[0] * n_threads[1] <= 256)\n {\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break;\n n_threads[1] += 1;\n }\n n_threads[1] -= 1;\n size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_100(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n acc_dtype = \"npy_\" + self._acc_dtype(node.inputs[0].dtype)\n k_var = f\"kernel_reduce_010_AD_{name}\"\n err_check = (\n \"\"\"\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: %(k_var)s: %%s.\",\n GpuKernel_error(&%(k_var)s, err));\n %(fail)s;\n }\n \"\"\"\n % locals()\n )\n # use threadIdx.x for i0\n # use blockIdx.x for i1\n # use blockIdx.y for i2\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};\n size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1};\n while (n_blocks[0] * (n_blocks[1]+1) <= 4096 &&\n n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2])\n {\n n_blocks[1] += 1;\n }\n %(makecall)s\n }\n else\n { // reuse 010_AD kernel, we transpose the 2 first dim\n // See the reduction for the real 010_AD kernel for\n // explanation. We do this to get coalesced read.\n size_t n_threads[3] = {32, 1, 1};\n\n size_t A = PyGpuArray_DIMS(%(x)s)[1];\n size_t B = PyGpuArray_DIMS(%(x)s)[0];\n size_t C = PyGpuArray_DIMS(%(x)s)[2];\n size_t D = C/32;\n if (32*D < C) D+= 1;\n assert ((C <= 32*D) && (32*D < C+32));\n\n // The gridsize would ideally be (A, D). But we do the following logic to make\n // sure we don't ask for a grid that is too big.\n size_t n_blocks[3] = {A, D, 1};\n if (n_blocks[0] > 4096) n_blocks[0] = 4096;\n if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];\n size_t n_shared = 0;\n ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);\n ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);\n ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);\n ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);\n ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);\n void *kernel_params[] = {\n (void *)&A, (void *)&B, (void *)&C, (void *)&D,\n (void *)%(x)s->ga.data,\n (void *)&%(x)s->ga.offset,\n (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,\n (void *)%(z)s->ga.data,\n (void *)&%(z)s->ga.offset,\n (void *)&stride_Z0, (void *)&stride_Z1};\n int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);\n %(err_check)s\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_110(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};\n while (n_threads[0]*n_threads[1] <= 256)\n {\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])\n break;\n n_threads[1] += 1;\n }\n n_threads[1] -= 1;\n\n size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_001(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};\n size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};\n while (n_blocks[0] * n_blocks[1] <= 4096)\n {\n if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1])\n break;\n n_blocks[1] += 1;\n }\n n_blocks[1] -= 1;\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_101(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(\n node,\n name,\n x,\n z,\n fail,\n extra_dims=[(\"size_t one = 1;\", \"(void *) &one\")],\n extra_strides=[(\"ssize_t sone = 1;\", \"(void *) &sone\")],\n pattern=\"1011\",\n )\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n// size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3],\n// (size_t) 256), 1, 1};\n size_t n_threads[3] = {1, 1, 1};\n\n while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])\n n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];\n\n while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)\n ++n_threads[2];\n if (n_threads[2] > 64)\n n_threads[2] = 64;\n if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])\n n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];\n\n size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_111(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};\n\n //get as many y threads as we can fit\n while (n_threads[0] * n_threads[1] <= 256)\n {\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])\n break;\n n_threads[1] += 1;\n }\n n_threads[1] -= 1;\n\n //get as many z threads as we can fit\n while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)\n {\n if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])\n break;\n n_threads[2] += 1;\n }\n n_threads[2] -= 1;\n //Maximum for Fermi GPU on that dimensions.\n n_threads[2] = std::min(n_threads[2], (size_t)64);\n\n size_t n_blocks[3] = {1, 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_0011(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n in_dtype = \"npy_\" + node.inputs[0].dtype\n out_dtype = \"npy_\" + node.outputs[0].dtype\n acc_dtype = \"npy_\" + self._acc_dtype(node.inputs[0].dtype)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n\n size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};\n\n while (n_blocks[0] * n_blocks[1] <= 4096 &&\n n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1])\n {\n n_blocks[1] += 1;\n }\n\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};\n while (n_threads[0] * n_threads[1] <= 256\n && n_threads[1] < PyGpuArray_DIMS(%(x)s)[2]\n && n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200))\n {\n n_threads[1] += 1;\n }\n\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_1111(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};\n\n //get as many y threads as we can fit\n while (n_threads[0] * n_threads[1] <= 256)\n {\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])\n break;\n n_threads[1] += 1;\n }\n n_threads[1] -= 1;\n\n //get as many z threads as we can fit\n while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)\n {\n if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])\n break;\n n_threads[2] += 1;\n }\n n_threads[2] -= 1;\n\n //Maximum for Fermi GPU on that dimensions.\n n_threads[2] = std::min(n_threads[2], (size_t)64);\n\n size_t n_blocks[3] = {1, 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_reduce_1011(self, sio, node, name, x, z, fail):\n verbose = self.verbose\n makecall = self._makecall(node, name, x, z, fail)\n print(\n \"\"\"\n {\n int verbose = %(verbose)s;\n size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};\n\n while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];\n if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])\n n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];\n\n while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2];\n if (n_threads[2] > 64)\n n_threads[2] = 64;\n if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])\n n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];\n\n size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};\n %(makecall)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n\n def c_code_cache_version_apply(self, node):\n version = [\n 24,\n self.verbose,\n ] # the version corresponding to the c code in this Op\n\n # now we insert versions for the ops on which we depend...\n scalar_node = Apply(\n self.scalar_op,\n [Scalar(dtype=input.type.dtype)() for input in node.inputs],\n [Scalar(dtype=output.type.dtype)() for output in node.outputs],\n )\n version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))\n for i in node.inputs + node.outputs:\n version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())\n version.extend(self.kernel_version(node))\n if all(version):\n return tuple(version)\n else:\n return ()\n\n def gpu_kernels(self, node, nodename):\n nd_in = len(self.reduce_mask)\n in_dtype = node.inputs[0].dtype\n out_dtype = node.outputs[0].dtype\n acc_dtype = self._acc_dtype(node.inputs[0].dtype)\n assign_dtype = in_dtype\n flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype)\n in_type = gpuarray.dtype_to_ctype(in_dtype)\n out_type = gpuarray.dtype_to_ctype(out_dtype)\n acc_type = gpuarray.dtype_to_ctype(acc_dtype)\n load_in = load_w(in_dtype)\n write_out = write_w(out_dtype)\n kernels = []\n\n if all(i == 1 for i in self.reduce_mask):\n # this kernel is ok for up to a few thousand elements, but\n # it only runs on ONE multiprocessor\n reducebuf = self._k_reduce_buf(\"Z[0]\", node, nodename, sub={})\n reduce_fct = self._assign_reduce(\n node, nodename, \"myresult\", load_in + \"(A[i0])\", {}, True\n )\n reduce_init = self._assign_init(load_in + \"(A[0])\", assign_dtype)\n kname = \"kernel_reduce_ccontig\"\n k_var = \"kernel_reduce_ccontig_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size d0,\n const %(in_type)s *A, const ga_size offset_A,\n %(out_type)s *Z, const ga_size offset_Z)\n {\n const int threadCount = blockDim.x;\n const int threadNum = threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = %(reduce_init)s;\n\n for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)\n {\n %(reduce_fct)s\n }\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\"uintp\", gpuarray.GpuArray, \"uintp\", gpuarray.GpuArray, \"uintp\"]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1,):\n # this kernel is ok for up to a few thousand elements, but\n # it only runs on ONE multiprocessor\n reducebuf = self._k_reduce_buf(\"Z[0]\", node, nodename, sub={})\n reduce_fct = self._assign_reduce(\n node, nodename, \"myresult\", load_in + \"(A[i0 * sA0])\", {}, True\n )\n reduce_init = self._assign_init(load_in + \"(A[0])\", assign_dtype)\n kname = \"kernel_reduce_1\"\n k_var = \"kernel_reduce_1_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size d0,\n const %(in_type)s *A, const ga_size offset_A,\n const ga_ssize sA0,\n %(out_type)s * Z, const ga_size offset_Z)\n {\n const int threadCount = blockDim.x;\n const int threadNum = threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = %(reduce_init)s;\n\n for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)\n {\n %(reduce_fct)s\n }\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1, 1):\n # this kernel is ok for up to a few thousand elements, but\n # it only runs on ONE multiprocessor\n reducebuf = self._k_reduce_buf(\"Z[0]\", node, nodename, sub={})\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1])\",\n {},\n True,\n )\n reduce_init = self._assign_init(load_in + \"(A[0])\", assign_dtype)\n kname = \"kernel_reduce_11\"\n k_var = \"kernel_reduce_11_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size d0, const ga_size d1,\n const %(in_type)s *A, const ga_size offset_A,\n const ga_ssize sA0, const ga_ssize sA1,\n %(out_type)s * Z, const ga_size offset_Z)\n {\n const int threadCount = blockDim.x * blockDim.y;\n const int threadNum = threadIdx.y*blockDim.x + threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = %(reduce_init)s;\n\n for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)\n {\n for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n # 01, 011, 0111\n if (\n 0 == self.reduce_mask[0]\n and all(self.reduce_mask[1:])\n and nd_in in [2, 3, 4]\n ):\n # this kernel uses one block for each row.\n # threads per block for each element per row.\n\n N_pattern = \"\".join([\"1\"] * (nd_in - 1))\n # TODO: is it faster to hardcode sA3, etc. in the later\n # code, rather than have the for_* variables declare them\n # and the later code use their names?\n if nd_in == 2:\n for_i1 = \"for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)\"\n first_i1 = \"threadIdx.x\"\n sA1 = \"sA1\"\n for_i2 = \"int i2=0, sA2=0;\"\n sA2 = \"0\"\n first_i2 = \"0\"\n for_i3 = \"int i3=0, sA3=0;\"\n sA3 = \"0\"\n first_i3 = \"0\"\n if nd_in == 3:\n for_i1 = \"for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)\"\n first_i1 = \"threadIdx.y\"\n sA1 = \"sA1\"\n for_i2 = \"for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)\"\n first_i2 = \"threadIdx.x\"\n sA2 = \"sA2\"\n for_i3 = \"int i3=0, sA3=0;\"\n first_i3 = 0\n sA3 = \"0\"\n if nd_in == 4:\n for_i1 = \"for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)\"\n first_i1 = \"threadIdx.z\"\n sA1 = \"sA1\"\n for_i2 = \"for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)\"\n first_i2 = \"threadIdx.y\"\n sA2 = \"sA2\"\n for_i3 = \"for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)\"\n first_i3 = \"threadIdx.x\"\n sA3 = \"sA3\"\n\n reducebuf = self._k_reduce_buf(\"Z[i0 * sZ0]\", node, nodename, sub={})\n param_dim = \",\".join([f\"const ga_size d{i}\" for i in range(nd_in)])\n param_strides = \",\".join([f\"const ga_ssize sA{i}\" for i in range(nd_in)])\n decl, kname, params, k_var = self._k_decl(node, nodename)\n init = self._k_init(node, nodename)\n reduce_init = self._assign_init(\n load_in\n + \"(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])\"\n % locals(),\n assign_dtype,\n )\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])\",\n {},\n True,\n )\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s{\n %(init)s\n for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){\n myresult = %(reduce_init)s;\n %(for_i1)s{\n %(for_i2)s{\n %(for_i3)s{\n %(reduce_fct)s;\n }\n }\n }\n %(reducebuf)s\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):\n # this kernel uses one block for each column,\n # threads per block for each element per column.\n\n # TODO: This kernel is pretty inefficient in terms of reading, because if A is\n # c_contiguous (typical case) then each warp is accessing non-contigous\n # memory (a segment of a column).\n reducebuf = self._k_reduce_buf(\n \"Z[i0 * sZ0 + i2*sZ1]\", node, nodename, sub={}\n )\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])\", assign_dtype\n )\n kname = \"kernel_reduce_010\"\n k_var = \"kernel_reduce_010_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size d0, const ga_size d1, const ga_size d2,\n const %(in_type)s *A, const ga_size offset_A,\n const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,\n %(out_type)s * Z, const ga_size offset_Z,\n const ga_ssize sZ0, const ga_ssize sZ1)\n {\n const int threadCount = blockDim.x;\n const int threadNum = threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n\n for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)\n {\n for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)\n {\n %(acc_type)s myresult = %(reduce_init)s;\n for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n %(reducebuf)s\n }\n }\n\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n \"uintp\",\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]:\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(X[a * sX0 + b * sX1 + c * sX2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(X[a * sX0 + 0 * sX1 + c * sX2])\", assign_dtype\n )\n kname = \"kernel_reduce_010_AD\"\n k_var = \"kernel_reduce_010_AD_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size A, const ga_size B, const ga_size C, const ga_size D,\n const %(in_type)s *X, const ga_size offset_X,\n const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2,\n %(out_type)s * Z, const ga_size offset_Z,\n const ga_ssize sZ0, const ga_ssize sZ1)\n {\n const int threadCount = blockDim.x;\n const int threadNum = threadIdx.x;\n X = (const %(in_type)s *)(((char *)X)+offset_X);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = 0;\n\n for (int a = blockIdx.x; a < A; a += gridDim.x)\n {\n for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y)\n {\n int c = i2_D * 32 + threadIdx.x;\n if (c < C)\n {\n myresult = %(reduce_init)s;\n for (int b = 0; b < B; ++b)\n {\n %(reduce_fct)s;\n }\n Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult);\n }\n }\n }\n\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n \"uintp\",\n \"uintp\",\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (0, 1, 0):\n #\n # This kernel is optimized when the inner most dimensions\n # have the smallest stride.\n\n # this kernel uses one block for multiple column(up to 32TODO),\n # threads per block for each element per column.\n\n # thread.x = dim 2 contiguous\n # thread.y = dim 1\n # block.x = dim 0\n # block.y = dim 1 rest\n init = self._k_init(node, nodename)\n decl, kname, params, k_var = self._k_decl(\n node, nodename, pattern=\"010_inner\"\n )\n reducebuf = self._k_reduce_buf_multiple(\n \"Z[i0 * sZ0 + i2*sZ1]\", node, nodename, \"blockDim.x\"\n )\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[i0 * sA0 + 0 * sA1 + i2 * sA2])\", assign_dtype\n )\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s\n {\n %(init)s\n for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)\n {\n for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x)\n {\n myresult = %(reduce_init)s;\n for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)\n {\n %(reduce_fct)s;\n }\n %(reducebuf)s\n }\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1, 1, 0):\n # this kernel uses one block for each column,\n # threads per block for each element per column.\n\n # TODO: This kernel is pretty inefficient in terms of reading, because if A is\n # c_contiguous (typical case) then each warp is accessing non-contigous\n # memory (a segment of a column).\n reducebuf = self._k_reduce_buf(\n \"Z[blockIdx.x * sZ0]\", node, nodename, sub={}\n )\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[blockIdx.x * sA2])\", assign_dtype\n )\n kname = \"kernel_reduce_110\"\n k_var = \"kernel_reduce_110_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size d0, const ga_size d1, const ga_size d2,\n const %(in_type)s *A, const ga_size offset_A,\n const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,\n %(out_type)s * Z, const ga_size offset_Z,\n const ga_ssize sZ0)\n {\n const int threadCount = blockDim.x * blockDim.y;\n const int threadNum = threadIdx.y * blockDim.x + threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = %(reduce_init)s;\n\n for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)\n {\n for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n \"uintp\",\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1, 0, 0):\n reducebuf = self._k_reduce_buf(\n \"Z[i1 * sZ0 + i2 * sZ1]\", node, nodename, sub={}\n )\n decl, kname, params, k_var = self._k_decl(node, nodename)\n init = self._k_init(node, nodename)\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[i1 * sA1 + i2 * sA2])\", assign_dtype\n )\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s\n {\n %(init)s\n for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)\n {\n for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x)\n {\n myresult = %(reduce_init)s;\n for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)\n {\n %(reduce_fct)s\n }\n %(reducebuf)s\n }\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1, 1, 1):\n reducebuf = self._k_reduce_buf(\"Z[0]\", node, nodename, sub={})\n decl, kname, params, k_var = self._k_decl(node, nodename)\n init = self._k_init(node, nodename)\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(load_in + \"(A[0])\", assign_dtype)\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s\n {\n %(init)s\n myresult = %(reduce_init)s;\n for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)\n {\n for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)\n {\n for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n }\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (0, 0, 1):\n # this kernel uses one block for each row,\n # threads per block for each element per row.\n reducebuf = self._k_reduce_buf(\n \"Z[i0 * sZ0 + i1 * sZ1]\", node, nodename, sub={}\n )\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[i0 * sA0 + i1 * sA1])\", assign_dtype\n )\n kname = \"kernel_reduce_001\"\n k_var = \"kernel_reduce_001_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n KERNEL void %(kname)s(\n const ga_size d0, const ga_size d1, const ga_size d2,\n const %(in_type)s *A, const ga_size offset_A,\n const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,\n %(out_type)s * Z, const ga_size offset_Z,\n const ga_ssize sZ0, const ga_ssize sZ1)\n {\n const int threadCount = blockDim.x;\n const int threadNum = threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n\n for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)\n {\n for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)\n {\n %(acc_type)s myresult = %(reduce_init)s;\n for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n %(reducebuf)s\n }\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n \"uintp\",\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (0, 0, 1, 1):\n # this kernel uses one block for each row,\n # threads per block for each element per row.\n reducebuf = self._k_reduce_buf(\n \"Z[i0 * sZ0 + i1 * sZ1]\", node, nodename, sub={}\n )\n decl, kname, params, k_var = self._k_decl(node, nodename)\n init = self._k_init(node, nodename)\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[i0 * sA0 + i1 * sA1])\", assign_dtype\n )\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s\n {\n %(init)s\n\n for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)\n {\n for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)\n {\n %(acc_type)s myresult = %(reduce_init)s;\n for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)\n {\n for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n %(reducebuf)s\n }\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (0, 1, 0, 1):\n # this kernel uses one block for each row,\n # threads per block for each element per row.\n reducebuf = self._k_reduce_buf(\n \"Z[i0 * sZ0 + i2 * sZ1]\", node, nodename, sub={}\n )\n decl, kname, params, k_var = self._k_decl(node, nodename)\n init = self._k_init(node, nodename)\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[i0 * sA0 + i2 * sA2])\", assign_dtype\n )\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s\n {\n %(init)s\n\n for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)\n {\n for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)\n {\n %(acc_type)s myresult = %(reduce_init)s;\n for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)\n {\n for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n %(reducebuf)s\n }\n }\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1, 1, 1, 1):\n reducebuf = self._k_reduce_buf(\"Z[0]\", node, nodename, sub={})\n decl, kname, params, k_var = self._k_decl(node, nodename)\n init = self._k_init(node, nodename)\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])\",\n {},\n True,\n )\n reduce_init = self._assign_init(load_in + \"(A[0])\", assign_dtype)\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n %(decl)s\n {\n %(init)s\n myresult = %(reduce_init)s;\n for (int i0 = 0; i0 < d0; i0++)\n for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)\n {\n for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)\n {\n for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n }\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1):\n reducebuf = self._k_reduce_buf(\"Z[blockIdx.x*sZ0]\", node, nodename, sub={})\n reduce_fct = self._assign_reduce(\n node,\n nodename,\n \"myresult\",\n load_in + \"(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])\",\n {},\n True,\n )\n reduce_init = self._assign_init(\n load_in + \"(A[blockIdx.x * sA1])\", assign_dtype\n )\n kname = \"kernel_reduce_1011\"\n k_var = \"kernel_reduce_1011_\" + nodename\n sio = StringIO()\n print(\n \"\"\"#include \"cluda.h\"\n\n KERNEL void %(kname)s(\n const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3,\n const %(in_type)s *A, const ga_size offset_A,\n const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3,\n %(out_type)s * Z, const ga_size offset_Z,\n const ga_ssize sZ0)\n {\n const int threadCount = blockDim.x * blockDim.y * blockDim.z;\n const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;\n extern __shared__ %(acc_type)s buf[];\n A = (const %(in_type)s *)(((char *)A)+offset_A);\n Z = (%(out_type)s *)(((char *)Z)+offset_Z);\n %(acc_type)s myresult = %(reduce_init)s;\n\n for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)\n {\n for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)\n {\n for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)\n {\n %(reduce_fct)s;\n }\n }\n }\n %(reducebuf)s\n }\n \"\"\"\n % locals(),\n file=sio,\n )\n params = [\n \"uintp\",\n \"uintp\",\n \"uintp\",\n \"uintp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n \"intp\",\n \"intp\",\n \"intp\",\n gpuarray.GpuArray,\n \"uintp\",\n \"intp\",\n ]\n kernels.append(\n Kernel(\n code=sio.getvalue(),\n name=kname,\n params=params,\n flags=flags,\n objvar=k_var,\n )\n )\n return kernels\n\n\nclass GpuErfinv(Erfinv):\n \"\"\"\n Inverse error function for GPU.\n\n \"\"\"\n\n def c_headers(self):\n return [\"math_functions.h\", \"cublas_v2.h\"]\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (z,) = out\n if node.inputs[0].type in complex_types:\n raise NotImplementedError(\"type not supported\", type)\n # NB: CUDA erfinv function (GPU op) returns NaN if x not in [-1;1],\n # while `scipy.special.erfinv` (CPU op) returns an infinite (-inf if x < -1, +inf if x > 1).\n # For consistency of CPU and GPU ops, we wrap the CUDA erfinv in the following conditions\n # to ensure that GPU op returns the same values as CPU op.\n return (\n \"%(z)s = (%(x)s <= -1) ? erfinv(-1.0): ((%(x)s >= 1) ? erfinv(1.0): erfinv(%(x)s));\"\n % locals()\n )\n\n\ngpu_erfinv = GpuErfinv(upgrade_to_float_no_complex, name=\"gpu_erfinv\")\n\n\nclass GpuErfcinv(Erfcinv):\n \"\"\"\n Inverse complementary error function for GPU.\n\n \"\"\"\n\n def c_headers(self):\n return [\"math_functions.h\", \"cublas_v2.h\"]\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (z,) = out\n if node.inputs[0].type in complex_types:\n raise NotImplementedError(\"type not supported\", type)\n # NB: CUDA erfcinv function (GPU op) returns NaN if x not in [0;2],\n # while `scipy.special.erfcinv` (CPU op) returns an infinite (+inf if x < 0, -inf if x > 2).\n # For consistency of CPU and GPU ops, we wrap the CUDA erfcinv in the following conditions\n # to ensure that GPU op returns the same values as CPU op.\n return (\n \"%(z)s = (%(x)s <= 0) ? erfcinv(0.0): ((%(x)s >= 2) ? erfcinv(2.0): erfcinv(%(x)s));\"\n % locals()\n )\n\n\ngpu_erfcinv = GpuErfcinv(upgrade_to_float_no_complex, name=\"gpu_erfcinv\")\n\n\n# Caching GpuCAReduceCuda\ndef gpu_ca_reduce_cuda(\n scalar_op,\n axis=None,\n reduce_mask=None,\n dtype=None,\n acc_dtype=None,\n pre_scalar_op=None,\n):\n key = (scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op)\n if key not in gpu_ca_reduce_cuda.cache:\n gpu_ca_reduce_cuda.cache[key] = GpuCAReduceCuda(\n scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op\n )\n return gpu_ca_reduce_cuda.cache[key]\n\n\ngpu_ca_reduce_cuda.cache = {}\n\n\nclass GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):\n \"\"\"\n CAReduce that reuse the python code from gpuarray.\n\n \"\"\"\n\n def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):\n if not hasattr(scalar_op, \"identity\"):\n raise ValueError(\"No identity on scalar op\")\n CAReduceDtype.__init__(\n self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype\n )\n\n def __str__(self):\n ax = \"\"\n if self.axis is not None:\n ax = f\"{{{', '.join(str(x) for x in self.axis)}}}\"\n return f\"GpuReduce{{{self.scalar_op}}}{ax}\"\n\n def make_node(self, input):\n ctx_name = infer_context_name(input)\n res = CAReduceDtype.make_node(self, input)\n input = as_gpuarray_variable(input, ctx_name)\n otype = GpuArrayType(\n dtype=res.outputs[0].dtype,\n broadcastable=res.outputs[0].broadcastable,\n context_name=ctx_name,\n )\n\n if res.op.axis is not None:\n redux = []\n for i in range(len(input.type.broadcastable)):\n redux.append(i in res.op.axis)\n # since redux is just another way to describe what is in axis\n # it doesn't need to be compared in __eq__ or __hash__\n res.op.redux = redux\n\n return Apply(res.op, [input], [otype()])\n\n def get_params(self, node):\n return node.outputs[0].type.context\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n # cache the kernel object\n self.get_kernel_cache(node)\n\n def get_kernel_cache(self, node):\n attr = \"@cache_reduction_k\"\n if self.axis is None:\n redux = [True] * node.inputs[0].ndim\n else:\n redux = self.redux\n if not hasattr(node, attr):\n acc_dtype = getattr(self, \"acc_dtype\", None)\n if acc_dtype is None:\n acc_dtype = node.outputs[0].type.dtype\n if any(redux):\n setattr(node, attr, self.generate_kernel(node, acc_dtype, redux))\n\n if any(redux):\n return getattr(node, attr)\n\n def gpu_kernels(self, node, name):\n if not any(getattr(self, \"redux\", [node.inputs[0].ndim != 0])):\n # Some OpenCL compilers do not accept no-arguments empty kernels\n src = '#include \"cluda.h\"\\nKERNEL void reduk(GLOBAL_MEM float *a) { a[0] = 0; }'\n params = [\"float32\"]\n else:\n k = self.get_kernel_cache(node)\n _, src, _, _ = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)\n nd = node.inputs[0].ndim\n params = [\"uint32\", gpuarray.GpuArray, \"uint32\"]\n params.extend(\"uint32\" for _ in range(nd))\n params.append(gpuarray.GpuArray)\n params.append(\"uint32\")\n params.extend(\"int32\" for _ in range(nd))\n acc_dtype = getattr(self, \"acc_dtype\", None)\n if acc_dtype is None:\n acc_dtype = node.outputs[0].type.dtype\n return [\n Kernel(\n code=src,\n name=\"reduk\",\n params=params,\n flags=Kernel.get_flags(\n node.inputs[0].type.dtype, acc_dtype, node.outputs[0].type.dtype\n ),\n objvar=\"k_reduk_\" + name,\n )\n ]\n\n def c_code(self, node, name, inp, out, sub):\n if not any(getattr(self, \"redux\", [node.inputs[0].ndim != 0])):\n # We special case the no-reduction case since the gpu\n # kernel has trouble handling it.\n return \"\"\"\n Py_XDECREF(%(out)s);\n %(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);\n if (!%(out)s) {\n %(fail)s\n }\n\n \"\"\" % dict(\n out=out[0], inp=inp[0], fail=sub[\"fail\"]\n )\n k = self.get_kernel_cache(node)\n _, src, _, ls = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)\n if self.axis is None:\n redux = [True] * node.inputs[0].ndim\n else:\n redux = self.redux\n acc_dtype = getattr(self, \"acc_dtype\", None)\n if acc_dtype is None:\n acc_dtype = node.outputs[0].type.dtype\n input = inp[0]\n output = out[0]\n nd_out = node.outputs[0].ndim\n code = \"\"\"\n size_t gs = 1;\n size_t ls;\n unsigned int n = 1;\n unsigned int proxy_dim[%(nd_in)s];\n unsigned int proxy_off;\n int proxy_str[%(nd_in)s];\n void *args[%(n_args)s];\n PyGpuArrayObject *tmp;\n int err;\n\"\"\" % dict(\n n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim\n )\n\n if nd_out != 0:\n code += \"\"\"\n size_t out_dims[%(nd_out)s];\n int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s;\n\"\"\" % dict(\n nd_out=nd_out, output=output\n )\n j = 0\n for i in range(node.inputs[0].ndim):\n if not self.redux[i]:\n code += \"\"\"\n out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s];\n if (!need_out)\n need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s];\n\"\"\" % dict(\n j=j, i=i, input=input, output=output\n )\n j += 1\n code += \"\"\"\n if (need_out) {\n %(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None);\n if (!%(output)s) {\n %(fail)s\n }\n }\n \"\"\" % dict(\n output=output,\n nd_out=nd_out,\n fail=sub[\"fail\"],\n ctx=sub[\"params\"],\n out_type=dtype_to_typecode(node.outputs[0].type.dtype),\n )\n else:\n code += \"\"\"\n if (%(output)s == NULL || %(output)s->ga.nd != 0) {\n Py_XDECREF(%(output)s);\n %(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER,\n %(ctx)s, Py_None);\n if (!%(output)s) {\n %(fail)s\n }\n }\n \"\"\" % dict(\n output=output,\n fail=sub[\"fail\"],\n ctx=sub[\"params\"],\n out_type=dtype_to_typecode(node.outputs[0].type.dtype),\n )\n\n if acc_dtype != node.outputs[0].type.dtype:\n code += \"\"\"\n tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions,\n %(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None);\n if (!tmp) %(fail)s\n \"\"\" % dict(\n output=output,\n fail=sub[\"fail\"],\n ctx=sub[\"params\"],\n acc_type=dtype_to_typecode(acc_dtype),\n )\n else:\n code += f\"\"\"\n tmp = {output};\n Py_INCREF(tmp);\n \"\"\"\n\n # We need the proxies since we are passing a pointer to the\n # data into the call and therefore we need a real copy of the\n # data in the proper type.\n code += \"\"\"\n args[0] = &n;\n args[1] = tmp->ga.data;\n args[2] = &tmp->ga.offset;\n \"\"\" % dict(\n output=output\n )\n\n p = 3\n for i in range(node.inputs[0].ndim):\n code += \"\"\"\n proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s];\n args[%(p)s] = &proxy_dim[%(i)s];\n n *= %(input)s->ga.dimensions[%(i)s];\n \"\"\" % dict(\n i=i, p=p, input=input\n )\n p += 1\n if not redux[i]:\n code += \"gs *= %(input)s->ga.dimensions[%(i)s];\" % dict(\n input=input, i=i\n )\n\n code += \"\"\"\n args[%(p)s] = %(input)s->ga.data;\n proxy_off = %(input)s->ga.offset;\n args[%(p)s+1] = &proxy_off;\n \"\"\" % dict(\n p=p, input=input\n )\n p += 2\n\n for i in range(node.inputs[0].ndim):\n code += \"\"\"\n proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s];\n args[%(p)s] = &proxy_str[%(i)s];\n \"\"\" % dict(\n p=p, i=i, input=input\n )\n p += 1\n\n code += \"\"\"\n if (gs == 0) gs = 1;\n n /= gs;\n ls = %(ls)s;\n err = GpuKernel_call(&%(k_var)s, 1, &gs, &ls, 0, args);\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: GpuCAReduceCPY: %%s.\",\n GpuKernel_error(&%(k_var)s, err));\n %(fail)s\n }\n\n if (%(cast_out)d) {\n err = GpuArray_move(&%(output)s->ga, &tmp->ga);\n Py_XDECREF(tmp);\n if (err != GA_NO_ERROR) {\n PyErr_Format(PyExc_RuntimeError,\n \"gpuarray error: GpuCAReduceCPY [cast]: %%s.\",\n GpuArray_error(&tmp->ga, err));\n %(fail)s\n }\n } else {\n Py_XDECREF(%(output)s);\n %(output)s = tmp;\n }\n\n \"\"\" % dict(\n k_var=\"k_reduk_\" + name,\n ls=ls,\n fail=sub[\"fail\"],\n output=output,\n input=input,\n cast_out=bool(acc_dtype != node.outputs[0].type.dtype),\n )\n\n return code\n\n def c_code_cache_version_apply(self, node):\n return (4, self.kernel_version(node))\n\n def generate_kernel(self, node, odtype, redux):\n if isinstance(self.scalar_op, scalar.basic.Add):\n reduce_expr = \"a + b\"\n elif isinstance(self.scalar_op, scalar.basic.Mul):\n reduce_expr = \"a * b\"\n else:\n raise NotImplementedError()\n return ReductionKernel(\n node.inputs[0].type.context,\n odtype,\n self.scalar_op.identity,\n reduce_expr,\n redux,\n arguments=[make_argument(node.inputs[0], \"a\")],\n init_nd=node.inputs[0].ndim,\n )\n\n def perform(self, node, inp, out, ctx):\n (input,) = inp\n (output,) = out\n\n if self.axis is None:\n redux = [True] * input.ndim\n else:\n redux = self.redux\n\n if any(redux):\n output[0] = self.get_kernel_cache(node)(input).astype(\n copy=False, dtype=node.outputs[0].type.dtype\n )\n else:\n output[0] = pygpu.gpuarray.array(\n input, copy=True, dtype=node.outputs[0].type.dtype, context=ctx\n )\n\n\n# To allow reloading old pickled files\nGpuCAReduce = GpuCAReduceCPY\n",
"import numpy as np\nimport scipy.stats as stats\n\nimport theano\nfrom theano.tensor.basic import as_tensor_variable\nfrom theano.tensor.random.op import RandomVariable, default_shape_from_params\nfrom theano.tensor.random.utils import broadcast_params\n\n\ntry:\n from pypolyagamma import PyPolyaGamma\nexcept ImportError: # pragma: no cover\n\n def PyPolyaGamma(*args, **kwargs):\n raise RuntimeError(\"pypolygamma not installed!\")\n\n\nclass UniformRV(RandomVariable):\n name = \"uniform\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"U\", \"\\\\operatorname{U}\")\n\n def __call__(self, low=0.0, high=1.0, size=None, **kwargs):\n return super().__call__(low, high, size=size, **kwargs)\n\n\nuniform = UniformRV()\n\n\nclass BetaRV(RandomVariable):\n name = \"beta\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"Beta\", \"\\\\operatorname{Beta}\")\n\n\nbeta = BetaRV()\n\n\nclass NormalRV(RandomVariable):\n name = \"normal\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"N\", \"\\\\operatorname{N}\")\n\n def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)\n\n\nnormal = NormalRV()\n\n\nclass HalfNormalRV(RandomVariable):\n name = \"halfnormal\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"N**+\", \"\\\\operatorname{N^{+}}\")\n\n def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)\n\n @classmethod\n def rng_fn(cls, rng, loc, scale, size):\n return stats.halfnorm.rvs(loc, scale, random_state=rng, size=size)\n\n\nhalfnormal = HalfNormalRV()\n\n\nclass GammaRV(RandomVariable):\n name = \"halfnormal\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"Gamma\", \"\\\\operatorname{Gamma}\")\n\n def __call__(self, shape, rate, size=None, **kwargs):\n return super().__call__(shape, 1.0 / rate, size=size, **kwargs)\n\n @classmethod\n def rng_fn(cls, rng, shape, scale, size):\n return stats.gamma.rvs(shape, scale=scale, size=size, random_state=rng)\n\n\ngamma = GammaRV()\n\n\nclass ExponentialRV(RandomVariable):\n name = \"exponential\"\n ndim_supp = 0\n ndims_params = [0]\n dtype = \"floatX\"\n _print_name = (\"Exp\", \"\\\\operatorname{Exp}\")\n\n def __call__(self, scale=1.0, size=None, **kwargs):\n return super().__call__(scale, size=size, **kwargs)\n\n\nexponential = ExponentialRV()\n\n\ndef safe_multivariate_normal(mean, cov, size=None, rng=None):\n \"\"\"A shape consistent multivariate normal sampler.\n\n What we mean by \"shape consistent\": SciPy will return scalars when the\n arguments are vectors with dimension of size 1. We require that the output\n be at least 1D, so that it's consistent with the underlying random\n variable.\n\n \"\"\"\n res = np.atleast_1d(\n stats.multivariate_normal(mean=mean, cov=cov, allow_singular=True).rvs(\n size=size, random_state=rng\n )\n )\n\n if size is not None:\n res = res.reshape(list(size) + [-1])\n\n return res\n\n\nclass MvNormalRV(RandomVariable):\n name = \"multivariate_normal\"\n ndim_supp = 1\n ndims_params = [1, 2]\n dtype = \"floatX\"\n _print_name = (\"N\", \"\\\\operatorname{N}\")\n\n def __call__(self, mean=None, cov=None, size=None, **kwargs):\n\n dtype = theano.config.floatX if self.dtype == \"floatX\" else self.dtype\n\n if mean is None:\n mean = np.array([0.0], dtype=dtype)\n if cov is None:\n cov = np.array([[1.0]], dtype=dtype)\n return super().__call__(mean, cov, size=size, **kwargs)\n\n @classmethod\n def rng_fn(cls, rng, mean, cov, size):\n\n if mean.ndim > 1 or cov.ndim > 2:\n # Neither SciPy nor NumPy implement parameter broadcasting for\n # multivariate normals (or many other multivariate distributions),\n # so we have implement a quick and dirty one here\n mean, cov = broadcast_params([mean, cov], cls.ndims_params)\n size = tuple(size or ())\n\n if size:\n mean = np.broadcast_to(mean, size + mean.shape)\n cov = np.broadcast_to(cov, size + cov.shape)\n\n res = np.empty(mean.shape)\n for idx in np.ndindex(mean.shape[:-1]):\n m = mean[idx]\n c = cov[idx]\n res[idx] = safe_multivariate_normal(m, c, rng=rng)\n return res\n else:\n return safe_multivariate_normal(mean, cov, size=size, rng=rng)\n\n\nmultivariate_normal = MvNormalRV()\n\n\nclass DirichletRV(RandomVariable):\n name = \"dirichlet\"\n ndim_supp = 1\n ndims_params = [1]\n dtype = \"floatX\"\n _print_name = (\"Dir\", \"\\\\operatorname{Dir}\")\n\n @classmethod\n def rng_fn(cls, rng, alphas, size):\n if size is None:\n size = ()\n samples_shape = tuple(np.atleast_1d(size)) + alphas.shape\n samples = np.empty(samples_shape)\n alphas_bcast = np.broadcast_to(alphas, samples_shape)\n\n for index in np.ndindex(*samples_shape[:-1]):\n samples[index] = rng.dirichlet(alphas_bcast[index])\n\n return samples\n\n\ndirichlet = DirichletRV()\n\n\nclass PoissonRV(RandomVariable):\n name = \"poisson\"\n ndim_supp = 0\n ndims_params = [0]\n dtype = \"int64\"\n _print_name = (\"Pois\", \"\\\\operatorname{Pois}\")\n\n def __call__(self, lam=1.0, size=None, **kwargs):\n return super().__call__(lam, size=size, **kwargs)\n\n\npoisson = PoissonRV()\n\n\nclass CauchyRV(RandomVariable):\n name = \"cauchy\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"C\", \"\\\\operatorname{C}\")\n\n def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)\n\n @classmethod\n def rng_fn(cls, rng, loc, scale, size):\n return stats.cauchy.rvs(loc=loc, scale=scale, random_state=rng, size=size)\n\n\ncauchy = CauchyRV()\n\n\nclass HalfCauchyRV(RandomVariable):\n name = \"cauchy\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"C**+\", \"\\\\operatorname{C^{+}}\")\n\n def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)\n\n @classmethod\n def rng_fn(cls, rng, loc, scale, size):\n return stats.halfcauchy.rvs(loc=loc, scale=scale, random_state=rng, size=size)\n\n\nhalfcauchy = HalfCauchyRV()\n\n\nclass InvGammaRV(RandomVariable):\n name = \"invgamma\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"InvGamma\", \"\\\\operatorname{Gamma^{-1}}\")\n\n @classmethod\n def rng_fn(cls, rng, shape, rate, size=None):\n return stats.invgamma.rvs(shape, scale=rate, size=size, random_state=rng)\n\n\ninvgamma = InvGammaRV()\n\n\nclass TruncExponentialRV(RandomVariable):\n name = \"truncexpon\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"floatX\"\n _print_name = (\"TruncExp\", \"\\\\operatorname{TruncExp}\")\n\n @classmethod\n def rng_fn(cls, rng, b, loc, scale, size=None):\n return stats.truncexpon.rvs(\n b, loc=loc, scale=scale, size=size, random_state=rng\n )\n\n\ntruncexpon = TruncExponentialRV()\n\n\nclass BernoulliRV(RandomVariable):\n name = \"bernoulli\"\n ndim_supp = 0\n ndims_params = [0]\n dtype = \"int64\"\n _print_name = (\"Bern\", \"\\\\operatorname{Bern}\")\n\n @classmethod\n def rng_fn(cls, rng, p, size=None):\n return stats.bernoulli.rvs(p, size=size, random_state=rng)\n\n\nbernoulli = BernoulliRV()\n\n\nclass BinomialRV(RandomVariable):\n name = \"binomial\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"int64\"\n _print_name = (\"Binom\", \"\\\\operatorname{Binom}\")\n\n\nbinomial = BinomialRV()\n\n\nclass NegBinomialRV(RandomVariable):\n name = \"nbinom\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"int64\"\n _print_name = (\"NB\", \"\\\\operatorname{NB}\")\n\n @classmethod\n def rng_fn(cls, rng, n, p, size=None):\n return stats.nbinom.rvs(n, p, size=size, random_state=rng)\n\n\nnbinom = NegBinomialRV()\n\n\nclass BetaBinomialRV(RandomVariable):\n name = \"beta_binomial\"\n ndim_supp = 0\n ndims_params = [0, 0, 0]\n dtype = \"int64\"\n _print_name = (\"BetaBinom\", \"\\\\operatorname{BetaBinom}\")\n\n @classmethod\n def rng_fn(cls, rng, n, a, b, size=None):\n return stats.betabinom.rvs(n, a, b, size=size, random_state=rng)\n\n\nbetabinom = BetaBinomialRV()\n\n\nclass MultinomialRV(RandomVariable):\n \"\"\"A Multinomial random variable type.\n\n FYI: Support shape is determined by the first dimension in the *second*\n parameter (i.e. the probabilities vector).\n\n \"\"\"\n\n name = \"multinomial\"\n ndim_supp = 1\n ndims_params = [0, 1]\n dtype = \"int64\"\n _print_name = (\"MN\", \"\\\\operatorname{MN}\")\n\n def _shape_from_params(self, dist_params, rep_param_idx=1, param_shapes=None):\n return default_shape_from_params(\n self.ndim_supp, dist_params, rep_param_idx, param_shapes\n )\n\n\nmultinomial = MultinomialRV()\n\nvsearchsorted = np.vectorize(np.searchsorted, otypes=[np.int], signature=\"(n),()->()\")\n\n\nclass CategoricalRV(RandomVariable):\n name = \"categorical\"\n ndim_supp = 0\n ndims_params = [1]\n dtype = \"int64\"\n _print_name = (\"Cat\", \"\\\\operatorname{Cat}\")\n\n @classmethod\n def rng_fn(cls, rng, p, size):\n if size is None:\n size = ()\n\n size = tuple(np.atleast_1d(size))\n ind_shape = p.shape[:-1]\n\n if len(size) > 0 and size[-len(ind_shape) :] != ind_shape:\n raise ValueError(\"Parameters shape and size do not match.\")\n\n samples_shape = size[: -len(ind_shape)] + ind_shape\n unif_samples = rng.uniform(size=samples_shape)\n samples = vsearchsorted(p.cumsum(axis=-1), unif_samples)\n\n return samples\n\n\ncategorical = CategoricalRV()\n\n\nclass PolyaGammaRV(RandomVariable):\n \"\"\"Polya-Gamma random variable.\n\n XXX: This doesn't really use the given RNG, due to the narrowness of the\n sampler package's implementation.\n \"\"\"\n\n name = \"polya-gamma\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"floatX\"\n _print_name = (\"PG\", \"\\\\operatorname{PG}\")\n\n @classmethod\n def rng_fn(cls, rng, b, c, size):\n pg = PyPolyaGamma(rng.randint(2 ** 16))\n\n if not size and b.shape == c.shape == ():\n return pg.pgdraw(b, c)\n else:\n b, c = np.broadcast_arrays(b, c)\n size = tuple(size or ())\n\n if len(size) > 0:\n b = np.broadcast_to(b, size)\n c = np.broadcast_to(c, size)\n\n smpl_val = np.empty(b.shape, dtype=\"double\")\n\n pg.pgdrawv(\n np.asarray(b.flat).astype(\"double\", copy=True),\n np.asarray(c.flat).astype(\"double\", copy=True),\n np.asarray(smpl_val.flat),\n )\n return smpl_val\n\n\npolyagamma = PolyaGammaRV()\n\n\nclass RandIntRV(RandomVariable):\n name = \"randint\"\n ndim_supp = 0\n ndims_params = [0, 0]\n dtype = \"int64\"\n _print_name = (\"randint\", \"\\\\operatorname{randint}\")\n\n def __call__(self, low, high=None, size=None, **kwargs):\n if high is None:\n low, high = 0, low\n return super().__call__(low, high, size=size, **kwargs)\n\n\nrandint = RandIntRV()\n\n\nclass ChoiceRV(RandomVariable):\n name = \"choice\"\n ndim_supp = 0\n ndims_params = [1, 1, 0]\n dtype = None\n _print_name = (\"choice\", \"\\\\operatorname{choice}\")\n\n @classmethod\n def rng_fn(cls, rng, a, p, replace, size):\n return rng.choice(a, size, replace, p)\n\n def _shape_from_params(self, *args, **kwargs):\n raise NotImplementedError()\n\n def _infer_shape(self, size, dist_params, param_shapes=None):\n return size\n\n def __call__(self, a, size=None, replace=True, p=None, **kwargs):\n\n a = as_tensor_variable(a, ndim=1)\n\n if p is None:\n p = theano.tensor.type_other.NoneConst.clone()\n\n if isinstance(replace, bool):\n replace = theano.tensor.constant(np.array(replace))\n\n return super().__call__(a, p, replace, size=size, dtype=a.dtype, **kwargs)\n\n\nchoice = ChoiceRV()\n\n\nclass PermutationRV(RandomVariable):\n name = \"permutation\"\n ndim_supp = 1\n ndims_params = [1]\n dtype = None\n _print_name = (\"permutation\", \"\\\\operatorname{permutation}\")\n\n @classmethod\n def rng_fn(cls, rng, x, size):\n return rng.permutation(x if x.ndim > 0 else x.item())\n\n def _infer_shape(self, size, dist_params, param_shapes=None):\n\n param_shapes = param_shapes or [p.shape for p in dist_params]\n\n (x,) = dist_params\n (x_shape,) = param_shapes\n\n if x.ndim == 0:\n return (x,)\n else:\n return x_shape\n\n def __call__(self, x, **kwargs):\n x = as_tensor_variable(x)\n return super().__call__(x, dtype=x.dtype, **kwargs)\n\n\npermutation = PermutationRV()\n",
"from collections.abc import Collection\n\nimport numpy as np\n\nimport theano\nfrom theano.gof import EnumList, Generic, ParamsType\nfrom theano.gof.graph import Apply\nfrom theano.gof.op import COp, Op\nfrom theano.gradient import (\n DisconnectedType,\n _float_zeros_like,\n disconnected_type,\n grad_undefined,\n)\nfrom theano.misc.safe_asarray import _asarray\nfrom theano.scalar import int32 as int_t\nfrom theano.scalar import upcast\nfrom theano.tensor import basic, nlinalg\nfrom theano.utils import LOCAL_BITWIDTH, PYTHON_INT_BITWIDTH\n\n\nclass CpuContiguous(COp):\n \"\"\"\n Check to see if the input is c-contiguous,\n if it is, do nothing, else return a contiguous array.\n \"\"\"\n\n __props__ = ()\n view_map = {0: [0]}\n check_input = False\n\n def make_node(self, x):\n x_ = theano.tensor.as_tensor_variable(x)\n return theano.Apply(self, [x_], [x_.type()])\n\n def perform(self, node, inputs, output_storage):\n (x,) = inputs\n y = output_storage[0]\n # if the ouput is contiguous do nothing, else copy\n # the input\n if not x.flags[\"C_CONTIGUOUS\"]:\n x = x.copy()\n assert x.flags[\"C_CONTIGUOUS\"]\n y[0] = x\n\n def grad(self, inputs, dout):\n return [theano.tensor.as_tensor_variable(dout[0])]\n\n def c_code(self, node, name, inames, onames, sub):\n (x,) = inames\n (y,) = onames\n code = (\n \"\"\"\n if (!PyArray_CHKFLAGS(%(x)s, NPY_ARRAY_C_CONTIGUOUS)){\n // check to see if output is contiguous first\n if (%(y)s != NULL &&\n PyArray_CompareLists(PyArray_DIMS(%(y)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s)) &&\n PyArray_CHKFLAGS(%(y)s, NPY_ARRAY_C_CONTIGUOUS)){\n PyArray_CopyInto(%(y)s, %(x)s);\n }\n else{\n Py_XDECREF(%(y)s);\n %(y)s = PyArray_GETCONTIGUOUS(%(x)s);\n }\n }\n else{\n Py_XINCREF(%(x)s);\n Py_XDECREF(%(y)s);\n %(y)s = %(x)s;\n }\n \"\"\"\n % locals()\n )\n return code\n\n def c_code_cache_version(self):\n return (1,)\n\n\ncpu_contiguous = CpuContiguous()\n\n\nclass SearchsortedOp(COp):\n \"\"\"Wrapper of numpy.searchsorted.\n\n For full documentation, see :func:`searchsorted`.\n\n See Also\n --------\n searchsorted : numpy-like function to use the SearchsortedOp\n\n \"\"\"\n\n params_type = Generic()\n __props__ = (\"side\",)\n check_input = False\n\n def __init__(self, side=\"left\"):\n if side == \"left\" or side == \"right\":\n self.side = side\n else:\n raise ValueError(f\"'{side}' is an invalid value for keyword 'side'\")\n\n def get_params(self, node):\n return self.side\n\n def make_node(self, x, v, sorter=None):\n x = basic.as_tensor(x, ndim=1)\n v = basic.as_tensor(v)\n out_type = v.type.clone(dtype=\"int64\")\n if sorter is None:\n return theano.Apply(self, [x, v], [out_type()])\n else:\n sorter = basic.as_tensor(sorter, ndim=1)\n if PYTHON_INT_BITWIDTH == 32 and sorter.dtype == \"int64\":\n raise TypeError(\n \"numpy.searchsorted with Python 32bit do not support a\"\n \" sorter of int64.\"\n )\n if sorter.type not in basic.int_vector_types:\n raise TypeError(\"sorter must be an integer vector\", sorter.type)\n return theano.Apply(self, [x, v, sorter], [out_type()])\n\n def infer_shape(self, fgraph, node, shapes):\n return [shapes[1]]\n\n def perform(self, node, inputs, output_storage, params):\n x = inputs[0]\n v = inputs[1]\n if len(node.inputs) == 3:\n sorter = inputs[2]\n else:\n sorter = None\n z = output_storage[0]\n\n z[0] = np.searchsorted(x, v, side=params, sorter=sorter).astype(\n node.outputs[0].dtype\n )\n\n def c_support_code_struct(self, node, name):\n return f\"\"\"\n int right_{name};\n \"\"\"\n\n def c_init_code_struct(self, node, name, sub):\n side = sub[\"params\"]\n fail = sub[\"fail\"]\n return (\n \"\"\"\n PyObject* tmp_%(name)s = PyUnicode_FromString(\"right\");\n if (tmp_%(name)s == NULL)\n %(fail)s;\n right_%(name)s = PyUnicode_Compare(%(side)s, tmp_%(name)s);\n Py_DECREF(tmp_%(name)s);\n \"\"\"\n % locals()\n )\n\n def c_code(self, node, name, inames, onames, sub):\n sorter = None\n if len(node.inputs) == 3:\n x, v, sorter = inames\n else:\n x, v = inames\n if not sorter:\n sorter = \"NULL\"\n (z,) = onames\n fail = sub[\"fail\"]\n\n return (\n \"\"\"\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_SearchSorted(%(x)s, (PyObject*) %(v)s,\n right_%(name)s ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, (PyObject*) %(sorter)s);\n if (!%(z)s)\n %(fail)s;\n if (PyArray_TYPE(%(z)s) != NPY_INT64){\n PyObject * tmp = PyArray_Cast(%(z)s, NPY_INT64);\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) tmp;\n }\n \"\"\"\n % locals()\n )\n\n def c_code_cache_version(self):\n return (2,)\n\n def grad(self, inputs, output_gradients):\n num_ins = len(inputs)\n if num_ins == 3:\n x, v, sorter = inputs\n else:\n x, v = inputs\n\n x_grad = _float_zeros_like(x)\n v_grad = _float_zeros_like(v)\n if num_ins == 3:\n return [x_grad, v_grad, disconnected_type()]\n else:\n return [x_grad, v_grad]\n\n\ndef searchsorted(x, v, side=\"left\", sorter=None):\n \"\"\"Find indices where elements should be inserted to maintain order.\n\n Wrapping of numpy.searchsorted. Find the indices into a sorted array\n `x` such that, if the corresponding elements in `v` were inserted\n before the indices, the order of `x` would be preserved.\n\n Parameters\n ----------\n x: 1-D tensor (array-like)\n Input array. If `sorter` is None, then it must be sorted in\n ascending order, otherwise `sorter` must be an array of indices\n which sorts it.\n v: tensor (array-like)\n Contains the values to be inserted into `x`.\n side: {'left', 'right'}, optional.\n If 'left' (default), the index of the first suitable\n location found is given. If 'right', return the last such index. If\n there is no suitable index, return either 0 or N (where N is the length\n of `x`).\n sorter: 1-D tensor of integers (array-like), optional\n Contains indices that sort array `x` into ascending order.\n They are typically the result of argsort.\n\n Returns\n -------\n indices : tensor of integers (int64)\n Array of insertion points with the same shape as `v`.\n\n See Also\n --------\n `numpy.searchsorted <https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.searchsorted.html>`_\n\n Notes\n -----\n * Binary search is used to find the required insertion points.\n * This Op is working **only on CPU** currently.\n\n Examples\n --------\n >>> from theano import tensor\n >>> x = tensor.dvector()\n >>> idx = x.searchsorted(3)\n >>> idx.eval({x: [1,2,3,4,5]})\n array(2)\n >>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3).eval()\n array(2)\n >>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3, side='right').eval()\n array(3)\n >>> tensor.extra_ops.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]).eval()\n array([0, 5, 1, 2])\n\n .. versionadded:: 0.9\n\n \"\"\"\n return SearchsortedOp(side=side)(x, v, sorter)\n\n\nclass CumOp(COp):\n # See function cumsum/cumprod for docstring\n\n __props__ = (\"axis\", \"mode\")\n check_input = False\n params_type = ParamsType(\n c_axis=int_t, mode=EnumList((\"MODE_ADD\", \"add\"), (\"MODE_MUL\", \"mul\"))\n )\n\n def __init__(self, axis=None, mode=\"add\"):\n if mode not in (\"add\", \"mul\"):\n raise ValueError(f'{type(self).__name__}: Unknown mode \"{mode}\"')\n self.axis = axis\n self.mode = mode\n\n c_axis = property(lambda self: np.MAXDIMS if self.axis is None else self.axis)\n\n def make_node(self, x):\n x = basic.as_tensor_variable(x)\n out_type = x.type()\n\n if self.axis is None:\n out_type = theano.tensor.vector(dtype=x.dtype) # Flatten\n elif self.axis >= x.ndim or self.axis < -x.ndim:\n raise ValueError(f\"axis(={self.axis}) out of bounds\")\n\n return theano.Apply(self, [x], [out_type])\n\n def perform(self, node, inputs, output_storage, params):\n x = inputs[0]\n z = output_storage[0]\n if self.mode == \"add\":\n z[0] = np.cumsum(x, axis=self.axis)\n else:\n z[0] = np.cumprod(x, axis=self.axis)\n\n def grad(self, inputs, output_gradients):\n (x,) = inputs\n (gi,) = output_gradients\n\n if self.axis is None:\n if self.mode == \"add\":\n return [cumsum(gi[::-1])[::-1].reshape(x.shape)]\n elif self.mode == \"mul\":\n fx = cumprod(x, axis=self.axis)\n return [cumsum((fx * gi)[::-1])[::-1].reshape(x.shape) / x]\n else:\n raise NotImplementedError(\n f'{type(self).__name__}: unknown gradient for mode \"{self.mode}\"'\n )\n\n reverse_slicing = [slice(None, None, None)] * gi.ndim\n reverse_slicing[self.axis] = slice(None, None, -1)\n reverse_slicing = tuple(reverse_slicing)\n # We need to reverse the gradients along ``self.axis``,\n # compute cumsum, then reverse again\n if self.mode == \"add\":\n return [cumsum(gi[reverse_slicing], self.axis)[reverse_slicing]]\n elif self.mode == \"mul\":\n fx = cumprod(x, axis=self.axis)\n return [cumsum((fx * gi)[reverse_slicing], self.axis)[reverse_slicing] / x]\n else:\n raise NotImplementedError(\n f'{type(self).__name__}: unknown gradient for mode \"{self.mode}\"'\n )\n\n def infer_shape(self, fgraph, node, shapes):\n if self.axis is None:\n return [(basic.prod(shapes[0]),)] # Flatten\n\n return shapes\n\n def c_code(self, node, name, inames, onames, sub):\n (x,) = inames\n (z,) = onames\n axis = self.axis\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n\n code = (\n \"\"\"\n int axis = %(params)s->c_axis;\n if (axis == 0 && PyArray_NDIM(%(x)s) == 1)\n axis = NPY_MAXDIMS;\n npy_intp shape[1] = { PyArray_SIZE(%(x)s) };\n if(axis == NPY_MAXDIMS && !(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))\n {\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));\n }\n\n else if(axis != NPY_MAXDIMS && !(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s))))\n {\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE(%(x)s));\n }\n\n if (!%(z)s)\n %(fail)s;\n {\n\n PyObject * t = NULL;\n if(%(params)s->mode == MODE_ADD)\n t = PyArray_CumSum(\n %(x)s, axis,\n PyArray_TYPE(%(x)s), %(z)s);\n else if(%(params)s->mode == MODE_MUL)\n t = PyArray_CumProd(\n %(x)s, axis,\n PyArray_TYPE(%(x)s), %(z)s);\n\n if (!t){\n %(fail)s;\n }\n // Because PyArray_CumSum/CumProd returns a newly created reference on t.\n Py_XDECREF(t);\n }\n \"\"\"\n % locals()\n )\n\n return code\n\n def c_code_cache_version(self):\n return (8,)\n\n def __str__(self):\n return f\"{self.__class__.__name__}{{{self.axis}, {self.mode}}}\"\n\n\ndef cumsum(x, axis=None):\n \"\"\"Return the cumulative sum of the elements along a given axis.\n\n Wraping of numpy.cumsum.\n\n Parameters\n ----------\n x\n Input tensor variable.\n axis\n The axis along which the cumulative sum is computed.\n The default (None) is to compute the cumsum over the flattened array.\n\n\n .. versionadded:: 0.7\n\n \"\"\"\n return CumOp(axis=axis, mode=\"add\")(x)\n\n\ndef cumprod(x, axis=None):\n \"\"\"Return the cumulative product of the elements along a given axis.\n\n Wraping of numpy.cumprod.\n\n Parameters\n ----------\n x\n Input tensor variable.\n\n axis\n The axis along which the cumulative product is computed.\n The default (None) is to compute the cumprod over the flattened array.\n\n\n .. versionadded:: 0.7\n\n \"\"\"\n return CumOp(axis=axis, mode=\"mul\")(x)\n\n\n# CumsumOp and CumprodOp are for compatibility with old version,\n# just in case unpickling a theano function with old Ops.\nclass CumsumOp(Op):\n __props__ = (\"axis\",)\n\n def __new__(typ, *args, **kwargs):\n obj = object.__new__(CumOp, *args, **kwargs)\n obj.mode = \"add\"\n return obj\n\n\nclass CumprodOp(Op):\n __props__ = (\"axis\",)\n\n def __new__(typ, *args, **kwargs):\n obj = object.__new__(CumOp, *args, **kwargs)\n obj.mode = \"mul\"\n return obj\n\n\nclass DiffOp(Op):\n # See function diff for docstring\n\n __props__ = (\"n\", \"axis\")\n\n def __init__(self, n=1, axis=-1):\n self.n = n\n self.axis = axis\n # numpy return a view in that case.\n # TODO, make an optimization that remove this op in this case.\n if n == 0:\n self.view_map = {0: [0]}\n\n def make_node(self, x):\n x = basic.as_tensor_variable(x)\n return theano.Apply(self, [x], [x.type()])\n\n def perform(self, node, inputs, output_storage):\n x = inputs[0]\n z = output_storage[0]\n z[0] = np.diff(x, n=self.n, axis=self.axis)\n\n def grad(self, inputs, outputs_gradients):\n inputs = inputs[0]\n\n if inputs.ndim != 1:\n raise NotImplementedError(\n \"Grad is not implemented for inputs with\"\n \"number of dimension other than 1.\"\n )\n\n z = outputs_gradients[0]\n\n def _grad_helper(z):\n pre = basic.concatenate([[0.0], z])\n app = basic.concatenate([z, [0.0]])\n return pre - app\n\n for k in range(self.n):\n z = _grad_helper(z)\n return [z]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n i0_shapes = ins_shapes[0]\n out_shape = list(i0_shapes)\n out_shape[self.axis] = out_shape[self.axis] - self.n\n return [out_shape]\n\n\ndef diff(x, n=1, axis=-1):\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n The first order difference is given by out[i] = a[i + 1] - a[i]\n along the given axis, higher order differences are calculated by\n using diff recursively. Wraping of numpy.diff.\n\n Parameters\n ----------\n x\n Input tensor variable.\n\n n\n The number of times values are differenced, default is 1.\n\n axis\n The axis along which the difference is taken, default is the last axis.\n\n\n .. versionadded:: 0.6\n\n \"\"\"\n return DiffOp(n=n, axis=axis)(x)\n\n\ndef bincount(x, weights=None, minlength=None, assert_nonneg=False):\n \"\"\"Count number of occurrences of each value in array of ints.\n\n The number of bins (of size 1) is one larger than the largest\n value in x. If minlength is specified, there will be at least\n this number of bins in the output array (though it will be longer\n if necessary, depending on the contents of x). Each bin gives the\n number of occurrences of its index value in x. If weights is\n specified the input array is weighted by it, i.e. if a value n\n is found at position i, out[n] += weight[i] instead of out[n] += 1.\n\n Parameters\n ----------\n x : 1 dimension, nonnegative ints\n weights : array of the same shape as x with corresponding weights.\n Optional.\n minlength : A minimum number of bins for the output array.\n Optional.\n assert_nonneg : A flag that inserts an assert_op to check if\n every input x is nonnegative.\n Optional.\n\n\n .. versionadded:: 0.6\n\n \"\"\"\n if x.ndim != 1:\n raise TypeError(\"Inputs must be of dimension 1.\")\n\n if assert_nonneg:\n from theano.tensor.opt import Assert\n\n assert_op = Assert(\"Input to bincount has negative values!\")\n x = assert_op(x, theano.tensor.all(x >= 0))\n\n max_value = theano.tensor.cast(x.max() + 1, \"int64\")\n\n if minlength is not None:\n max_value = theano.tensor.maximum(max_value, minlength)\n\n # Note: we do not use inc_subtensor(out[x], ...) in the following lines,\n # since out[x] raises an exception if the indices (x) are int8.\n if weights is None:\n out = theano.tensor.zeros([max_value], dtype=x.dtype)\n out = theano.tensor.advanced_inc_subtensor1(out, 1, x)\n else:\n out = theano.tensor.zeros([max_value], dtype=weights.dtype)\n out = theano.tensor.advanced_inc_subtensor1(out, weights, x)\n return out\n\n\ndef squeeze(x, axis=None):\n \"\"\"\n Remove broadcastable dimensions from the shape of an array.\n\n It returns the input array, but with the\n broadcastable dimensions removed. This is\n always `x` itself or a view into `x`.\n\n .. versionadded:: 0.6\n\n Parameters\n ----------\n x\n Input data, tensor variable.\n\n axis : None or int or tuple of ints, optional\n\n Selects a subset of the single-dimensional entries in the\n shape. If an axis is selected with shape entry greater than\n one, an error is raised.\n\n Returns\n -------\n object\n `x` without its broadcastable dimensions.\n\n \"\"\"\n if axis is None:\n axis = range(x.ndim)\n elif not isinstance(axis, Collection):\n axis = (axis,)\n\n view = x.dimshuffle(\n [i for i in range(x.ndim) if not x.broadcastable[i] or i not in axis]\n )\n return view\n\n\ndef compress(condition, x, axis=None):\n \"\"\"\n Return selected slices of an array along given axis.\n\n It returns the input tensor, but with selected slices along a given axis\n retained. If no axis is provided, the tensor is flattened.\n Corresponds to numpy.compress\n\n .. versionadded:: 0.7\n\n Parameters\n ----------\n x\n Input data, tensor variable.\n condition\n 1 dimensional array of non-zero and zero values\n corresponding to indices of slices along a selected axis.\n\n Returns\n -------\n object\n `x` with selected slices.\n\n \"\"\"\n indices = theano.tensor.basic.flatnonzero(condition)\n return x.take(indices, axis=axis)\n\n\nclass RepeatOp(Op):\n # See the repeat function for docstring\n\n __props__ = (\"axis\",)\n\n def __init__(self, axis=None):\n self.axis = axis\n\n def make_node(self, x, repeats):\n x = basic.as_tensor_variable(x)\n repeats = basic.as_tensor_variable(repeats)\n\n if repeats.dtype not in basic.integer_dtypes:\n raise TypeError(\"repeats.dtype must be an integer.\")\n\n # Some dtypes are not supported by numpy's implementation of repeat.\n # Until another one is available, we should fail at graph construction\n # time, not wait for execution.\n ptr_bitwidth = LOCAL_BITWIDTH\n if ptr_bitwidth == 64:\n numpy_unsupported_dtypes = (\"uint64\",)\n if ptr_bitwidth == 32:\n numpy_unsupported_dtypes = (\"uint32\", \"int64\", \"uint64\")\n\n if repeats.dtype in numpy_unsupported_dtypes:\n raise TypeError(\n (\n \"dtypes %s are not supported by numpy.repeat \"\n \"for the 'repeats' parameter, \" % str(numpy_unsupported_dtypes)\n ),\n repeats.dtype,\n )\n\n if self.axis is None:\n broadcastable = [False]\n else:\n try:\n const_reps = basic.get_scalar_constant_value(repeats)\n except basic.NotScalarConstantError:\n const_reps = None\n if const_reps == 1:\n broadcastable = x.broadcastable\n else:\n broadcastable = list(x.broadcastable)\n broadcastable[self.axis] = False\n\n out_type = theano.tensor.TensorType(x.dtype, broadcastable)\n\n return theano.Apply(self, [x, repeats], [out_type()])\n\n def perform(self, node, inputs, output_storage):\n x = inputs[0]\n repeats = inputs[1]\n z = output_storage[0]\n z[0] = np.repeat(x, repeats=repeats, axis=self.axis)\n\n def connection_pattern(self, node):\n\n return [[True], [False]]\n\n def grad(self, inputs, gout):\n (x, repeats) = inputs\n (gz,) = gout\n if repeats.ndim == 0:\n if self.axis is None:\n axis = x.ndim\n else:\n if self.axis >= 0:\n axis = self.axis + 1\n else:\n axis = self.axis + x.ndim + 1\n\n shape = [x.shape[k] for k in range(x.ndim)]\n shape.insert(axis, repeats)\n\n return [gz.reshape(shape, x.ndim + 1).sum(axis=axis), DisconnectedType()()]\n elif repeats.ndim == 1:\n # For this implementation, we would need to specify the length\n # of repeats in order to split gz in the right way to sum\n # the good part.\n raise NotImplementedError()\n else:\n raise ValueError()\n\n def infer_shape(self, fgraph, node, ins_shapes):\n i0_shapes = ins_shapes[0]\n repeats = node.inputs[1]\n out_shape = list(i0_shapes)\n\n # uint64 shape are not supported.\n dtype = None\n if repeats.dtype in [\"uint8\", \"uint16\", \"uint32\"]:\n dtype = \"int64\"\n if self.axis is None:\n if repeats.ndim == 0:\n if len(i0_shapes) == 0:\n out_shape = [repeats]\n else:\n res = 1\n for d in i0_shapes:\n res = res * d\n out_shape = (res * repeats,)\n else:\n out_shape = [theano.tensor.sum(repeats, dtype=dtype)]\n else:\n if repeats.ndim == 0:\n out_shape[self.axis] = out_shape[self.axis] * repeats\n else:\n out_shape[self.axis] = theano.tensor.sum(repeats, dtype=dtype)\n return [out_shape]\n\n\ndef repeat(x, repeats, axis=None):\n \"\"\"Repeat elements of an array.\n\n It returns an array which has the same shape as `x`, except\n along the given axis. The axis is used to speficy along which\n axis to repeat values. By default, use the flattened input\n array, and return a flat output array.\n\n The number of repetitions for each element is `repeat`.\n `repeats` is broadcasted to fit the length of the given `axis`.\n\n Parameters\n ----------\n x\n Input data, tensor variable.\n repeats\n int, scalar or tensor variable\n axis : int, optional\n\n See Also\n --------\n tensor.tile\n\n .. versionadded:: 0.6\n\n \"\"\"\n repeats = basic.as_tensor_variable(repeats)\n\n if repeats.ndim > 1:\n raise ValueError(\"The dimension of repeats should not exceed 1.\")\n\n if repeats.ndim == 1 and not repeats.broadcastable[0]:\n return RepeatOp(axis=axis)(x, repeats)\n else:\n if repeats.ndim == 1:\n repeats = repeats[0]\n\n if x.dtype == \"uint64\":\n raise TypeError(\"theano.tensor.repeat don't support dtype uint64\")\n\n if axis is None:\n axis = 0\n x = x.flatten()\n else:\n if axis >= x.ndim:\n raise ValueError(\"Axis should not exceed x.ndim-1.\")\n if axis < 0:\n axis = x.ndim + axis\n\n shape = [x.shape[i] for i in range(x.ndim)]\n\n # shape_ is the shape of the intermediate tensor which has\n # an additional dimension comparing to x. We use alloc to\n # allocate space for this intermediate tensor to replicate x\n # along that additional dimension.\n shape_ = shape[:]\n shape_.insert(axis + 1, repeats)\n\n # shape is now the shape of output, where shape[axis] becomes\n # shape[axis]*repeats.\n shape[axis] = shape[axis] * repeats\n\n # dims_ is the dimension of that intermediate tensor.\n dims_ = list(np.arange(x.ndim))\n dims_.insert(axis + 1, \"x\")\n\n # After the original tensor is duplicated along the additional\n # dimension, we reshape it to the expected output shape, and\n # return the output z.\n z = basic.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)\n return z\n\n\nclass Bartlett(Op):\n # See function bartlett for docstring\n __props__ = ()\n\n def make_node(self, M):\n M = basic.as_tensor_variable(M)\n if M.ndim != 0:\n raise TypeError(f\"{self.__class__.__name__} only works on scalar input\")\n elif M.dtype not in theano.tensor.integer_dtypes:\n # dtype is a theano attribute here\n raise TypeError(f\"{self.__class__.__name__} only works on integer input\")\n return Apply(self, [M], [basic.dvector()])\n\n def perform(self, node, inputs, out_):\n M = inputs[0]\n (out,) = out_\n out[0] = np.bartlett(M)\n\n def infer_shape(self, fgraph, node, in_shapes):\n temp = node.inputs[0]\n M = basic.switch(basic.lt(temp, 0), basic.cast(0, temp.dtype), temp)\n return [[M]]\n\n def grad(self, inputs, output_grads):\n return [None for i in inputs]\n\n\nbartlett_ = Bartlett()\n\n\n# I create a function only to have the doc show well.\ndef bartlett(M):\n \"\"\"\n An instance of this class returns the Bartlett spectral window in the\n time-domain. The Bartlett window is very similar to a triangular window,\n except that the end points are at zero. It is often used in signal\n processing for tapering a signal, without generating too much ripple in\n the frequency domain.\n\n .. versionadded:: 0.6\n\n Parameters\n ----------\n M : integer scalar\n Number of points in the output window. If zero or less,\n an empty vector is returned.\n\n Returns\n -------\n vector of doubles\n The triangular window, with the maximum value normalized to one\n (the value one appears only if the number of samples is odd), with\n the first and last samples equal to zero.\n\n \"\"\"\n return bartlett_(M)\n\n\nclass FillDiagonal(Op):\n # See function fill_diagonal for docstring\n __props__ = ()\n\n def infer_shape(self, fgraph, node, in_shapes):\n return [in_shapes[0]]\n\n def make_node(self, a, val):\n a = basic.as_tensor_variable(a)\n val = basic.as_tensor_variable(val)\n if a.ndim < 2:\n raise TypeError(\n \"%s: first parameter must have at least\"\n \" two dimensions\" % self.__class__.__name__\n )\n elif val.ndim != 0:\n raise TypeError(\n f\"{self.__class__.__name__}: second parameter must be a scalar\"\n )\n val = basic.cast(val, dtype=upcast(a.dtype, val.dtype))\n if val.dtype != a.dtype:\n raise TypeError(\n \"%s: type of second parameter must be the same as\"\n \" the first's\" % self.__class__.__name__\n )\n return Apply(self, [a, val], [a.type()])\n\n def perform(self, node, inputs, output_storage):\n a = inputs[0].copy()\n val = inputs[1]\n if a.ndim == 2:\n # numpy.fill_diagonal up to date(including 1.6.2) have a\n # bug for tall matrix.\n # For 2-d arrays, we accept rectangular ones.\n step = a.shape[1] + 1\n end = a.shape[1] * a.shape[1]\n # Write the value out into the diagonal.\n a.flat[:end:step] = val\n else:\n np.fill_diagonal(a, val)\n\n output_storage[0][0] = a\n\n def grad(self, inp, cost_grad):\n \"\"\"\n Notes\n -----\n The gradient is currently implemented for matrices only.\n\n \"\"\"\n a, val = inp\n grad = cost_grad[0]\n if a.dtype.startswith(\"complex\"):\n return [None, None]\n elif a.ndim > 2:\n raise NotImplementedError(\n \"%s: gradient is currently implemented\"\n \" for matrices only\" % self.__class__.__name__\n )\n wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions\n # diag is only valid for matrices\n wr_val = nlinalg.diag(grad).sum()\n return [wr_a, wr_val]\n\n\nfill_diagonal_ = FillDiagonal()\n\n\n# I create a function only to have the doc show well.\ndef fill_diagonal(a, val):\n \"\"\"\n Returns a copy of an array with all\n elements of the main diagonal set to a specified scalar value.\n\n .. versionadded:: 0.6\n\n Parameters\n ----------\n a\n Rectangular array of at least two dimensions.\n val\n Scalar value to fill the diagonal whose type must be\n compatible with that of array 'a' (i.e. 'val' cannot be viewed\n as an upcast of 'a').\n\n Returns\n -------\n array\n An array identical to 'a' except that its main diagonal\n is filled with scalar 'val'. (For an array 'a' with a.ndim >=\n 2, the main diagonal is the list of locations a[i, i, ..., i]\n (i.e. with indices all identical).)\n\n Support rectangular matrix and tensor with more than 2 dimensions\n if the later have all dimensions are equals.\n\n\n\n \"\"\"\n return fill_diagonal_(a, val)\n\n\nclass FillDiagonalOffset(Op):\n # See function fill_diagonal_offset for docstring\n __props__ = ()\n\n def infer_shape(self, fgraph, node, in_shapes):\n return [in_shapes[0]]\n\n def make_node(self, a, val, offset):\n a = basic.as_tensor_variable(a)\n val = basic.as_tensor_variable(val)\n offset = basic.as_tensor_variable(offset)\n if a.ndim != 2:\n raise TypeError(\n \"%s: first parameter must have exactly\"\n \" two dimensions\" % self.__class__.__name__\n )\n elif val.ndim != 0:\n raise TypeError(\n f\"{self.__class__.__name__}: second parameter must be a scalar\"\n )\n elif offset.ndim != 0:\n raise TypeError(\n f\"{self.__class__.__name__}: third parameter must be a scalar\"\n )\n val = basic.cast(val, dtype=upcast(a.dtype, val.dtype))\n if val.dtype != a.dtype:\n raise TypeError(\n \"%s: type of second parameter must be the same\"\n \" as the first's\" % self.__class__.__name__\n )\n elif offset.dtype not in theano.tensor.integer_dtypes:\n raise TypeError(\n f\"{self.__class__.__name__}: type of third parameter must be as integer\"\n \" use theano.tensor.cast( input, 'int32/int64')\"\n )\n\n return Apply(self, [a, val, offset], [a.type()])\n\n def perform(self, node, inputs, output_storage):\n a = inputs[0].copy()\n val = inputs[1]\n offset = inputs[2]\n height, width = a.shape\n\n \"\"\"\n Notes\n -----\n The fill_diagonal only support rectangular matrix. The output\n of tall matrix is \"wrapped\", which is an option in numpy 1.9.0\n but was regarded as a bug in numpy 1.6.2. Here I implement the\n fill_diagonal_offset with unwrapped output, so fill_diagonal_offset\n supports tall matrix.(This make a little difference between the output\n of fill_diagonal and fill_diagonal_offset only in the case of tall\n matrix)\n\n \"\"\"\n if offset >= 0:\n start = offset\n num_of_step = min(min(width, height), width - offset)\n else:\n start = -offset * a.shape[1]\n num_of_step = min(min(width, height), height + offset)\n step = a.shape[1] + 1\n end = start + step * num_of_step\n # Write the value out into the diagonal.\n a.flat[start:end:step] = val\n\n output_storage[0][0] = a\n\n def grad(self, inp, cost_grad):\n \"\"\"\n Notes\n -----\n The gradient is currently implemented for matrices only.\n \"\"\"\n a, val, offset = inp\n grad = cost_grad[0]\n height, width = grad.shape\n\n if a.dtype.startswith(\"complex\"):\n return [None, None]\n\n # only valid for matrices\n wr_a = fill_diagonal_offset(grad, 0, offset)\n\n offset_abs = basic.abs_(offset)\n pos_offset_flag = basic.ge(offset, 0)\n neg_offset_flag = basic.lt(offset, 0)\n min_wh = basic.minimum(width, height)\n\n start = offset * pos_offset_flag + offset_abs * width * neg_offset_flag\n num_of_step = basic.minimum(\n min_wh, width * pos_offset_flag + height * neg_offset_flag - offset_abs\n )\n\n step = a.shape[1] + 1\n end = start + step * num_of_step\n\n # input of slice should be integer\n start = basic.cast(start, \"int32\")\n step = basic.cast(step, \"int32\")\n end = basic.cast(end, \"int32\")\n\n wr_val = grad.flatten()[start:end:step].sum()\n\n wr_offset = grad_undefined(\n self,\n 2,\n offset,\n \"offset is not defined for non-integer offset so\"\n \" fill_diagonal_offset(a,val,offset+eps) is undefined\",\n )\n\n return [wr_a, wr_val, wr_offset]\n\n\nfill_diagonal_offset_ = FillDiagonalOffset()\n\n\ndef fill_diagonal_offset(a, val, offset):\n \"\"\"\n Returns a copy of an array with all\n elements of the main diagonal set to a specified scalar value.\n\n Parameters\n ----------\n a\n Rectangular array of two dimensions.\n val\n Scalar value to fill the diagonal whose type must be\n compatible with that of array 'a' (i.e. 'val' cannot be viewed\n as an upcast of 'a').\n offset\n Scalar value Offset of the diagonal from the main\n diagonal. Can be positive or negative integer.\n\n Returns\n -------\n array\n An array identical to 'a' except that its offset diagonal\n is filled with scalar 'val'. The output is unwrapped.\n\n \"\"\"\n return fill_diagonal_offset_(a, val, offset)\n\n\ndef to_one_hot(y, nb_class, dtype=None):\n \"\"\"\n Return a matrix where each row correspond to the one hot\n encoding of each element in y.\n\n Parameters\n ----------\n y\n A vector of integer value between 0 and nb_class - 1.\n nb_class : int\n The number of class in y.\n dtype : data-type\n The dtype of the returned matrix. Default floatX.\n\n Returns\n -------\n object\n A matrix of shape (y.shape[0], nb_class), where each row ``i`` is\n the one hot encoding of the corresponding ``y[i]`` value.\n\n \"\"\"\n ret = theano.tensor.zeros((y.shape[0], nb_class), dtype=dtype)\n ret = theano.tensor.set_subtensor(ret[theano.tensor.arange(y.shape[0]), y], 1)\n return ret\n\n\nclass Unique(Op):\n \"\"\"\n Wraps numpy.unique. This op is not implemented on the GPU.\n\n Examples\n --------\n >>> import numpy as np\n >>> import theano\n\n >>> x = theano.tensor.vector()\n >>> f = theano.function([x], Unique(True, True, False)(x))\n >>> f([1, 2., 3, 4, 3, 2, 1.])\n [array([ 1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])]\n\n >>> y = theano.tensor.matrix()\n >>> g = theano.function([y], Unique(True, True, False)(y))\n >>> g([[1, 1, 1.0], (2, 3, 3.0)])\n [array([ 1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])]\n\n \"\"\"\n\n __props__ = (\"return_index\", \"return_inverse\", \"return_counts\", \"axis\")\n\n def __init__(\n self, return_index=False, return_inverse=False, return_counts=False, axis=None\n ):\n self.return_index = return_index\n self.return_inverse = return_inverse\n self.return_counts = return_counts\n self.axis = axis\n numpy_ver = [int(n) for n in np.__version__.split(\".\")[:2]]\n if self.axis is not None and bool(numpy_ver < [1, 13]):\n raise RuntimeError(\n \"Numpy version = \"\n + np.__version__\n + f\". Option 'axis={axis}' works starting from version 1.13.0.\"\n )\n\n def make_node(self, x):\n x = basic.as_tensor_variable(x)\n self_axis = self.axis\n if self_axis is None:\n broadcastable = [False]\n else:\n if self_axis < 0:\n self_axis += len(x.broadcastable)\n if self_axis < 0 or self_axis >= len(x.broadcastable):\n raise RuntimeError(\n \"Unique axis `{}` is outside of input ndim = \"\n \"{}.\".format(self.axis, len(x.broadcastable))\n )\n broadcastable = [\n b if axis != self_axis else False\n for axis, b in enumerate(x.broadcastable)\n ]\n outputs = [basic.TensorType(broadcastable=broadcastable, dtype=x.dtype)()]\n typ = basic.TensorType(broadcastable=[False], dtype=\"int64\")\n if self.return_index:\n outputs.append(typ())\n if self.return_inverse:\n outputs.append(typ())\n if self.return_counts:\n outputs.append(typ())\n return theano.Apply(self, [x], outputs)\n\n def perform(self, node, inputs, output_storage):\n x = inputs[0]\n z = output_storage\n param = {}\n if self.return_index:\n param[\"return_index\"] = True\n if self.return_inverse:\n param[\"return_inverse\"] = True\n if self.return_counts:\n param[\"return_counts\"] = True\n if self.axis is not None:\n param[\"axis\"] = self.axis\n outs = np.unique(x, **param)\n if (\n (not self.return_inverse)\n and (not self.return_index)\n and (not self.return_counts)\n ):\n z[0][0] = outs\n else:\n for i in range(len(outs)):\n z[i][0] = outs[i]\n\n def infer_shape(self, fgraph, node, i0_shapes):\n ret = fgraph.shape_feature.default_infer_shape(fgraph, node, i0_shapes)\n if self.axis is not None:\n self_axis = self.axis\n ndim = len(i0_shapes[0])\n if self_axis < 0:\n self_axis += ndim\n if self_axis < 0 or self_axis >= ndim:\n raise RuntimeError(\n f\"Unique axis `{self.axis}` is outside of input ndim = {ndim}.\"\n )\n ret[0] = tuple(\n [fgraph.shape_feature.shape_ir(i, node.outputs[0]) for i in range(ndim)]\n )\n if self.return_inverse:\n if self.axis is None:\n shape = (basic.prod(i0_shapes[0]),)\n else:\n shape = (i0_shapes[0][self_axis],)\n if self.return_index:\n ret[2] = shape\n return ret\n ret[1] = shape\n return ret\n return ret\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n # For backwards compatibility with pickled instances of Unique that\n # did not have the axis parameter specified\n if \"axis\" not in state:\n self.axis = None\n\n\nclass UnravelIndex(Op):\n __props__ = (\"order\",)\n\n def __init__(self, order=\"C\"):\n assert order in (\"C\", \"F\")\n self.order = order\n\n def make_node(self, indices, dims):\n indices = basic.as_tensor_variable(indices)\n dims = basic.as_tensor_variable(dims)\n\n if indices.dtype not in basic.int_dtypes:\n raise TypeError(\n f\"'{indices.dtype}' object cannot be interpreted as an index\"\n )\n if dims.dtype not in basic.int_dtypes:\n raise TypeError(f\"'{dims.dtype}' object cannot be interpreted as an index\")\n if dims.ndim != 1:\n raise TypeError(\"dims must be a 1D array\")\n\n return Apply(\n self,\n [indices, dims],\n [\n basic.TensorType(dtype=\"int64\", broadcastable=(False,) * indices.ndim)()\n for i in range(basic.get_vector_length(dims))\n ],\n )\n\n def infer_shape(self, fgraph, node, input_shapes):\n return [input_shapes[0]] * len(node.outputs)\n\n def perform(self, node, inp, out):\n indices, dims = inp\n res = np.unravel_index(indices, dims, order=self.order)\n assert len(res) == len(out)\n for i in range(len(out)):\n ret = _asarray(res[i], node.outputs[0].dtype)\n if ret.base is not None:\n # NumPy will return a view when it can.\n # But we don't want that.\n ret = ret.copy()\n out[i][0] = ret\n\n\ndef unravel_index(indices, dims, order=\"C\"):\n \"\"\"\n Converts a flat index or array of flat indices into a tuple\n of coordinate arrays.\n\n Parameters\n ----------\n indices : Theano or NumPy array\n An integer array whose elements are indices into the flattened\n version of an array of dimensions ``dims``.\n dims : tuple of ints\n The shape of the array to use for unraveling ``indices``.\n order : {'C', 'F'}, optional\n Determines whether the indices should be viewed as indexing in\n row-major (C-style) or column-major (Fortran-style) order.\n\n Returns\n -------\n unraveled_coords : tuple of ndarray\n Each array in the tuple has the same shape as the ``indices``\n array.\n\n See Also\n --------\n ravel_multi_index\n\n \"\"\"\n res = UnravelIndex(order=order)(indices, dims)\n if not isinstance(res, (list, tuple)):\n return (res,)\n else:\n return tuple(res)\n\n\nclass RavelMultiIndex(Op):\n __props__ = (\"mode\", \"order\")\n\n def __init__(self, mode=\"raise\", order=\"C\"):\n assert mode in (\"raise\", \"wrap\", \"clip\")\n assert order in (\"C\", \"F\")\n self.mode = mode\n self.order = order\n\n def make_node(self, *inp):\n multi_index = [basic.as_tensor_variable(i) for i in inp[:-1]]\n dims = basic.as_tensor_variable(inp[-1])\n\n for i in multi_index:\n if i.dtype not in basic.int_dtypes:\n raise TypeError(f\"'{i.dtype}' object cannot be interpreted as an index\")\n if dims.dtype not in basic.int_dtypes:\n raise TypeError(f\"'{dims.dtype}' object cannot be interpreted as an index\")\n if dims.ndim != 1:\n raise TypeError(\"dims must be a 1D array\")\n\n return Apply(\n self,\n multi_index + [dims],\n [\n basic.TensorType(\n dtype=\"int64\", broadcastable=(False,) * multi_index[0].ndim\n )()\n ],\n )\n\n def infer_shape(self, fgraph, node, input_shapes):\n return [input_shapes[0]]\n\n def perform(self, node, inp, out):\n multi_index, dims = inp[:-1], inp[-1]\n res = np.ravel_multi_index(multi_index, dims, mode=self.mode, order=self.order)\n out[0][0] = _asarray(res, node.outputs[0].dtype)\n\n\ndef ravel_multi_index(multi_index, dims, mode=\"raise\", order=\"C\"):\n \"\"\"\n Converts a tuple of index arrays into an array of flat\n indices, applying boundary modes to the multi-index.\n\n Parameters\n ----------\n multi_index : tuple of Theano or NumPy arrays\n A tuple of integer arrays, one array for each dimension.\n dims : tuple of ints\n The shape of array into which the indices from ``multi_index`` apply.\n mode : {'raise', 'wrap', 'clip'}, optional\n Specifies how out-of-bounds indices are handled. Can specify\n either one mode or a tuple of modes, one mode per index.\n * 'raise' -- raise an error (default)\n * 'wrap' -- wrap around\n * 'clip' -- clip to the range\n In 'clip' mode, a negative index which would normally\n wrap will clip to 0 instead.\n order : {'C', 'F'}, optional\n Determines whether the multi-index should be viewed as\n indexing in row-major (C-style) or column-major\n (Fortran-style) order.\n\n Returns\n -------\n raveled_indices : Theano array\n An array of indices into the flattened version of an array\n of dimensions ``dims``.\n\n See Also\n --------\n unravel_index\n\n \"\"\"\n if not isinstance(multi_index, (tuple, list)):\n raise TypeError(\"multi_index must be a tuple or a list.\")\n args = tuple(multi_index) + (dims,)\n return RavelMultiIndex(mode=mode, order=order)(*args)\n\n\ndef broadcast_shape(*arrays, **kwargs):\n \"\"\"Compute the shape resulting from broadcasting arrays.\n\n Parameters\n ----------\n *arrays: `TensorVariable`s\n The tensor variables, or their shapes (as tuples),\n for which the broadcast shape is computed.\n arrays_are_shapes: bool (Optional)\n Indicates whether or not the `arrays` contains shape tuples.\n If you use this approach, make sure that the broadcastable dimensions\n are (scalar) constants with the value `1` or `1` exactly.\n\n \"\"\"\n return broadcast_shape_iter(arrays, **kwargs)\n\n\ndef broadcast_shape_iter(arrays, **kwargs):\n \"\"\"Compute the shape resulting from broadcasting arrays.\n\n Parameters\n ----------\n arrays: Iterable[TensorVariable] or Iterable[Tuple[Variable]]\n An iterable of tensors, or a tuple of shapes (as tuples),\n for which the broadcast shape is computed.\n XXX: Do not call this with a generator/iterator; this function will not\n make copies!\n arrays_are_shapes: bool (Optional)\n Indicates whether or not the `arrays` contains shape tuples.\n If you use this approach, make sure that the broadcastable dimensions\n are (scalar) constants with the value `1` or `1` exactly.\n\n \"\"\"\n one = theano.scalar.ScalarConstant(theano.scalar.int64, 1)\n\n arrays_are_shapes = kwargs.pop(\"arrays_are_shapes\", False)\n if arrays_are_shapes:\n max_dims = max(len(a) for a in arrays)\n\n array_shapes = [\n (one,) * (max_dims - len(a))\n + tuple(one if getattr(sh, \"value\", sh) == 1 else sh for sh in a)\n for a in arrays\n ]\n else:\n max_dims = max(a.ndim for a in arrays)\n\n array_shapes = [\n (one,) * (max_dims - a.ndim)\n + tuple(one if bcast else sh for sh, bcast in zip(a.shape, a.broadcastable))\n for a in arrays\n ]\n\n result_dims = []\n\n for dim_shapes in zip(*array_shapes):\n non_bcast_shapes = [shape for shape in dim_shapes if shape != one]\n\n if len(non_bcast_shapes) > 0:\n # Either there's only one non-broadcastable dimensions--and that's\n # what determines the dimension size, or there are multiple\n # non-broadcastable dimensions that must be equal\n i_dim = non_bcast_shapes.pop()\n\n potentially_unequal_dims = [\n dim\n for dim in non_bcast_shapes\n # TODO FIXME: This is a largely deficient means of comparing graphs\n # (and especially shapes)\n if not theano.gof.graph.equal_computations([i_dim], [dim])\n ]\n\n if potentially_unequal_dims:\n from theano.tensor.opt import Assert\n\n # In this case, we can't tell whether or not the dimensions are\n # equal, so we'll need to assert their equality and move the error\n # handling to evaluation time.\n assert_dim = Assert(\"Could not broadcast dimensions\")\n eq_condition = basic.all(\n [\n basic.or_(basic.eq(dim, one), basic.eq(i_dim, dim))\n for dim in potentially_unequal_dims\n ]\n )\n eq_condition = basic.or_(basic.eq(i_dim, one), eq_condition)\n result_dims.append(assert_dim(i_dim, eq_condition))\n else:\n result_dims.append(i_dim)\n else:\n # Every array was broadcastable in this dimension\n result_dims.append(one)\n\n return tuple(result_dims)\n\n\nclass BroadcastTo(Op):\n\n view_map = {0: [0]}\n\n def __call__(self, a, shape, **kwargs):\n return super().__call__(a, *shape, **kwargs)\n\n def make_node(self, a, *shape):\n a = basic.as_tensor_variable(a)\n shape = basic.as_tensor_variable(shape, ndim=1)\n\n shape, bcast = basic.alloc_validate_shape(shape)\n\n out = type(a.type)(dtype=a.type.dtype, broadcastable=bcast)()\n\n return theano.Apply(self, [a] + shape, [out])\n\n def perform(self, node, inputs, output_storage):\n a, *shape = inputs\n z = output_storage[0]\n z[0] = np.broadcast_to(a, shape)\n\n def grad(self, inputs, outputs_gradients):\n a, *shape = inputs\n (dout,) = outputs_gradients\n\n # Determine the dimensions that were added by broadcasting\n new_dims = list(range(dout.ndim - a.ndim))\n\n d_wrt_a = broadcast_to(dout, shape).sum(axis=new_dims)\n\n # Determine the dimensions that were broadcast\n _, shape_bcast = basic.alloc_validate_shape(shape)\n bcast_sums = [\n i\n for i, (a_b, s_b) in enumerate(zip(a.broadcastable, shape_bcast[-a.ndim :]))\n if a_b and not s_b\n ]\n\n if bcast_sums:\n d_wrt_a = d_wrt_a.sum(axis=bcast_sums, keepdims=True)\n\n return [d_wrt_a] + [\n grad_undefined(self, i, shp) for i, shp in enumerate(shape, 1)\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return [node.inputs[1:]]\n\n\nbroadcast_to = BroadcastTo()\n"
] | [
[
"numpy.random.binomial",
"numpy.random.poisson",
"numpy.random.multinomial"
],
[
"numpy.all",
"scipy.sparse.csc_matrix"
],
[
"numpy.dtype"
],
[
"scipy.stats.halfnorm.rvs",
"numpy.broadcast_arrays",
"numpy.asarray",
"scipy.stats.halfcauchy.rvs",
"scipy.stats.invgamma.rvs",
"scipy.stats.betabinom.rvs",
"scipy.stats.cauchy.rvs",
"numpy.atleast_1d",
"scipy.stats.gamma.rvs",
"numpy.vectorize",
"scipy.stats.bernoulli.rvs",
"numpy.broadcast_to",
"scipy.stats.truncexpon.rvs",
"scipy.stats.nbinom.rvs",
"scipy.stats.multivariate_normal",
"numpy.ndindex",
"numpy.array",
"numpy.empty"
],
[
"numpy.unique",
"numpy.__version__.split",
"numpy.arange",
"numpy.bartlett",
"numpy.cumsum",
"numpy.cumprod",
"numpy.diff",
"numpy.broadcast_to",
"numpy.fill_diagonal",
"numpy.ravel_multi_index",
"numpy.searchsorted",
"numpy.repeat",
"numpy.unravel_index"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZurMaD/DeepGrabCut-PyTorch | [
"13d9e81e6e438ad3394fb3a78aca26c2cc63c825"
] | [
"dataloaders/combine_dbs.py"
] | [
"import torch.utils.data as data\n\n\nclass CombineDBs(data.Dataset):\n def __init__(self, dataloaders, excluded=None):\n self.dataloaders = dataloaders\n self.excluded = excluded\n self.im_ids = []\n\n # Combine object lists\n for dl in dataloaders:\n for elem in dl.im_ids:\n if elem not in self.im_ids:\n self.im_ids.append(elem)\n\n # Exclude\n if excluded:\n for dl in excluded:\n for elem in dl.im_ids:\n if elem in self.im_ids:\n self.im_ids.remove(elem)\n\n # Get object pointers\n self.obj_list = []\n self.im_list = []\n new_im_ids = []\n obj_counter = 0\n num_images = 0\n for ii, dl in enumerate(dataloaders):\n for jj, curr_im_id in enumerate(dl.im_ids):\n if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):\n flag = False\n new_im_ids.append(curr_im_id)\n for kk in range(len(dl.obj_dict[curr_im_id])):\n if dl.obj_dict[curr_im_id][kk] != -1:\n self.obj_list.append({'db_ii': ii, 'obj_ii': dl.obj_list.index([jj, kk])})\n flag = True\n obj_counter += 1\n self.im_list.append({'db_ii': ii, 'im_ii': jj})\n if flag:\n num_images += 1\n\n self.im_ids = new_im_ids\n print('Combined number of images: {:d}\\nCombined number of objects: {:d}'.format(num_images, len(self.obj_list)))\n\n def __getitem__(self, index):\n\n _db_ii = self.obj_list[index][\"db_ii\"]\n _obj_ii = self.obj_list[index]['obj_ii']\n sample = self.dataloaders[_db_ii].__getitem__(_obj_ii)\n\n if 'meta' in sample.keys():\n sample['meta']['db'] = str(self.dataloaders[_db_ii])\n\n return sample\n\n def __len__(self):\n return len(self.obj_list)\n\n def __str__(self):\n include_db = [str(db) for db in self.dataloaders]\n exclude_db = [str(db) for db in self.excluded]\n return 'Included datasets:'+str(include_db)+'\\n'+'Excluded datasets:'+str(exclude_db)\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n from dataloaders import pascal\n from dataloaders import sbd\n import torch\n import numpy as np\n import dataset.custom_transforms as tr\n from torchvision import transforms\n\n composed_transforms_tr = transforms.Compose([\n tr.RandomHorizontalFlip(),\n tr.ScaleNRotate(rots=(-15, 15), scales=(.75, 1.25)),\n tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),\n tr.DistanceMap(v=0.15, elem='gt'),\n tr.ConcatInputs(elems=('image', 'distance_map')),\n tr.ToTensor()])\n\n composed_transforms_ts = transforms.Compose([\n tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),\n tr.DistanceMap(v=0.15, elem='gt'),\n tr.ConcatInputs(elems=('image', 'distance_map')),\n tr.ToTensor()])\n\n pascal_voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts, retname=True)\n sbd = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr, retname=True)\n pascal_voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr, retname=True)\n\n dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0)\n\n for ii, sample in enumerate(dataloader):\n for jj in range(sample[\"image\"].size()[0]):\n dismap = sample['distance_map'][jj].numpy()\n gt = sample['gt'][jj].numpy()\n gt[gt > 0] = 255\n gt = np.array(gt[0]).astype(np.uint8)\n dismap = np.array(dismap[0]).astype(np.uint8)\n display = 0.9 * gt + 0.4 * dismap\n display = display.astype(np.uint8)\n plt.figure()\n plt.title('display')\n plt.imshow(display, cmap='gray')\n\n if ii == 1:\n break\n plt.show(block=True)"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"torch.utils.data.DataLoader",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scrambler-crypto/pyecsca | [
"491abfb548455669abd470382a48dcd07b2eda87",
"491abfb548455669abd470382a48dcd07b2eda87"
] | [
"test/sca/test_edit.py",
"test/sca/test_filter.py"
] | [
"from unittest import TestCase\n\nimport numpy as np\n\nfrom pyecsca.sca import Trace, trim, reverse, pad\n\n\nclass EditTests(TestCase):\n\n def setUp(self):\n self._trace = Trace(np.array([10, 20, 30, 40, 50], dtype=np.dtype(\"i1\")))\n\n def test_trim(self):\n result = trim(self._trace, 2)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples, np.array([30, 40, 50], dtype=np.dtype(\"i1\")))\n\n result = trim(self._trace, end=3)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples, np.array([10, 20, 30], dtype=np.dtype(\"i1\")))\n\n with self.assertRaises(ValueError):\n trim(self._trace, 5, 1)\n\n def test_reverse(self):\n result = reverse(self._trace)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples,\n np.array([50, 40, 30, 20, 10], dtype=np.dtype(\"i1\")))\n\n def test_pad(self):\n result = pad(self._trace, 2)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples,\n np.array([0, 0, 10, 20, 30, 40, 50, 0, 0], dtype=np.dtype(\"i1\")))\n\n result = pad(self._trace, (1, 3))\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples,\n np.array([0, 10, 20, 30, 40, 50, 0, 0, 0], dtype=np.dtype(\"i1\")))\n",
"from unittest import TestCase\n\nimport numpy as np\nfrom pyecsca.sca import Trace, filter_lowpass, filter_highpass, filter_bandpass, filter_bandstop\nfrom .utils import Plottable\n\n\nclass FilterTests(Plottable):\n\n def setUp(self):\n self._trace = Trace(\n np.array([5, 12, 15, 13, 15, 11, 7, 2, -4, -8, -10, -8, -13, -9, -11, -8, -5],\n dtype=np.dtype(\"i1\")), None)\n\n def test_lowpass(self):\n result = filter_lowpass(self._trace, 100, 20)\n self.assertIsNotNone(result)\n self.assertEqual(len(self._trace.samples), len(result.samples))\n self.plot(self._trace, result)\n\n def test_highpass(self):\n result = filter_highpass(self._trace, 128, 20)\n self.assertIsNotNone(result)\n self.assertEqual(len(self._trace.samples), len(result.samples))\n self.plot(self._trace, result)\n\n def test_bandpass(self):\n result = filter_bandpass(self._trace, 128, 20, 60)\n self.assertIsNotNone(result)\n self.assertEqual(len(self._trace.samples), len(result.samples))\n self.plot(self._trace, result)\n\n def test_bandstop(self):\n result = filter_bandstop(self._trace, 128, 20, 60)\n self.assertIsNotNone(result)\n self.assertEqual(len(self._trace.samples), len(result.samples))\n self.plot(self._trace, result)\n"
] | [
[
"numpy.dtype"
],
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | [
"66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2",
"66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2",
"66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2",
"66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2",
"66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2"
] | [
"rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_legendre.py",
"rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_chebyshev.py",
"rootfs/usr/lib/python3/dist-packages/numpy/core/tests/test_dtype.py",
"rootfs/usr/lib/python3/dist-packages/numpy/lib/_iotools.py",
"rootfs/usr/lib/python3/dist-packages/numpy/polynomial/chebyshev.py"
] | [
"\"\"\"Tests for legendre module.\n\n\"\"\"\n\n\nimport numpy as np\nimport numpy.polynomial.legendre as leg\nimport numpy.polynomial.polynomial as poly\nfrom numpy.testing import *\n\nP0 = np.array([ 1])\nP1 = np.array([ 0, 1])\nP2 = np.array([-1, 0, 3])/2\nP3 = np.array([ 0, -3, 0, 5])/2\nP4 = np.array([ 3, 0, -30, 0, 35])/8\nP5 = np.array([ 0, 15, 0, -70, 0, 63])/8\nP6 = np.array([-5, 0, 105, 0,-315, 0, 231])/16\nP7 = np.array([ 0,-35, 0, 315, 0, -693, 0, 429])/16\nP8 = np.array([35, 0,-1260, 0,6930, 0,-12012, 0,6435])/128\nP9 = np.array([ 0,315, 0,-4620, 0,18018, 0,-25740, 0,12155])/128\n\nPlist = [P0, P1, P2, P3, P4, P5, P6, P7, P8, P9]\n\ndef trim(x) :\n return leg.legtrim(x, tol=1e-6)\n\n\nclass TestConstants(TestCase) :\n\n def test_legdomain(self) :\n assert_equal(leg.legdomain, [-1, 1])\n\n def test_legzero(self) :\n assert_equal(leg.legzero, [0])\n\n def test_legone(self) :\n assert_equal(leg.legone, [1])\n\n def test_legx(self) :\n assert_equal(leg.legx, [0, 1])\n\n\nclass TestArithmetic(TestCase) :\n x = np.linspace(-1, 1, 100)\n y0 = poly.polyval(x, P0)\n y1 = poly.polyval(x, P1)\n y2 = poly.polyval(x, P2)\n y3 = poly.polyval(x, P3)\n y4 = poly.polyval(x, P4)\n y5 = poly.polyval(x, P5)\n y6 = poly.polyval(x, P6)\n y7 = poly.polyval(x, P7)\n y8 = poly.polyval(x, P8)\n y9 = poly.polyval(x, P9)\n y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9]\n\n def test_legval(self) :\n def f(x) :\n return x*(x**2 - 1)\n\n #check empty input\n assert_equal(leg.legval([], [1]).size, 0)\n\n #check normal input)\n for i in range(10) :\n msg = \"At i=%d\" % i\n ser = np.zeros\n tgt = self.y[i]\n res = leg.legval(self.x, [0]*i + [1])\n assert_almost_equal(res, tgt, err_msg=msg)\n\n #check that shape is preserved\n for i in range(3) :\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(leg.legval(x, [1]).shape, dims)\n assert_equal(leg.legval(x, [1,0]).shape, dims)\n assert_equal(leg.legval(x, [1,0,0]).shape, dims)\n\n def test_legadd(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = leg.legadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_legsub(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = leg.legsub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_legmulx(self):\n assert_equal(leg.legmulx([0]), [0])\n assert_equal(leg.legmulx([1]), [0,1])\n for i in range(1, 5):\n tmp = 2*i + 1\n ser = [0]*i + [1]\n tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]\n assert_equal(leg.legmulx(ser), tgt)\n\n def test_legmul(self) :\n # check values of result\n for i in range(5) :\n pol1 = [0]*i + [1]\n val1 = leg.legval(self.x, pol1)\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n pol2 = [0]*j + [1]\n val2 = leg.legval(self.x, pol2)\n pol3 = leg.legmul(pol1, pol2)\n val3 = leg.legval(self.x, pol3)\n assert_(len(pol3) == i + j + 1, msg)\n assert_almost_equal(val3, val1*val2, err_msg=msg)\n\n def test_legdiv(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n ci = [0]*i + [1]\n cj = [0]*j + [1]\n tgt = leg.legadd(ci, cj)\n quo, rem = leg.legdiv(tgt, ci)\n res = leg.legadd(leg.legmul(quo, ci), rem)\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n\nclass TestCalculus(TestCase) :\n\n def test_legint(self) :\n # check exceptions\n assert_raises(ValueError, leg.legint, [0], .5)\n assert_raises(ValueError, leg.legint, [0], -1)\n assert_raises(ValueError, leg.legint, [0], 1, [0,0])\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = leg.legint([0], m=i, k=k)\n assert_almost_equal(res, [0, 1])\n\n # check single integration with integration constant\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n legpol = leg.poly2leg(pol)\n legint = leg.legint(legpol, m=1, k=[i])\n res = leg.leg2poly(legint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n legpol = leg.poly2leg(pol)\n legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(leg.legval(-1, legint), i)\n\n # check single integration with integration constant and scaling\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n legpol = leg.poly2leg(pol)\n legint = leg.legint(legpol, m=1, k=[i], scl=2)\n res = leg.leg2poly(legint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1)\n res = leg.legint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1, k=[k])\n res = leg.legint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)\n res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1, k=[k], scl=2)\n res = leg.legint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_legder(self) :\n # check exceptions\n assert_raises(ValueError, leg.legder, [0], .5)\n assert_raises(ValueError, leg.legder, [0], -1)\n\n # check that zeroth deriviative does nothing\n for i in range(5) :\n tgt = [1] + [0]*i\n res = leg.legder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = leg.legder(leg.legint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n\nclass TestMisc(TestCase) :\n\n def test_legfromroots(self) :\n res = leg.legfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1,5) :\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n pol = leg.legfromroots(roots)\n res = leg.legval(roots, pol)\n tgt = 0\n assert_(len(pol) == i + 1)\n assert_almost_equal(leg.leg2poly(pol)[-1], 1)\n assert_almost_equal(res, tgt)\n\n def test_legroots(self) :\n assert_almost_equal(leg.legroots([1]), [])\n assert_almost_equal(leg.legroots([1, 2]), [-.5])\n for i in range(2,5) :\n tgt = np.linspace(-1, 1, i)\n res = leg.legroots(leg.legfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_legvander(self) :\n # check for 1d x\n x = np.arange(3)\n v = leg.legvander(x, 3)\n assert_(v.shape == (3,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], leg.legval(x, coef))\n\n # check for 2d x\n x = np.array([[1,2],[3,4],[5,6]])\n v = leg.legvander(x, 3)\n assert_(v.shape == (3,2,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], leg.legval(x, coef))\n\n def test_legfit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n\n # Test exceptions\n assert_raises(ValueError, leg.legfit, [1], [1], -1)\n assert_raises(TypeError, leg.legfit, [[1]], [1], 0)\n assert_raises(TypeError, leg.legfit, [], [1], 0)\n assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)\n assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)\n assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)\n assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1])\n\n # Test fit\n x = np.linspace(0,2)\n y = f(x)\n #\n coef3 = leg.legfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(leg.legval(x, coef3), y)\n #\n coef4 = leg.legfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(leg.legval(x, coef4), y)\n #\n coef2d = leg.legfit(x, np.array([y,y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3,coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n y[0::2] = 0\n wcoef3 = leg.legfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)\n\n def test_legtrim(self) :\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, leg.legtrim, coef, -1)\n\n # Test results\n assert_equal(leg.legtrim(coef), coef[:-1])\n assert_equal(leg.legtrim(coef, 1), coef[:-3])\n assert_equal(leg.legtrim(coef, 2), [0])\n\n def test_legline(self) :\n assert_equal(leg.legline(3,4), [3, 4])\n\n def test_leg2poly(self) :\n for i in range(10) :\n assert_almost_equal(leg.leg2poly([0]*i + [1]), Plist[i])\n\n def test_poly2leg(self) :\n for i in range(10) :\n assert_almost_equal(leg.poly2leg(Plist[i]), [0]*i + [1])\n\n\ndef assert_poly_almost_equal(p1, p2):\n assert_almost_equal(p1.coef, p2.coef)\n assert_equal(p1.domain, p2.domain)\n\n\nclass TestLegendreClass(TestCase) :\n\n p1 = leg.Legendre([1,2,3])\n p2 = leg.Legendre([1,2,3], [0,1])\n p3 = leg.Legendre([1,2])\n p4 = leg.Legendre([2,2,3])\n p5 = leg.Legendre([3,2,3])\n\n def test_equal(self) :\n assert_(self.p1 == self.p1)\n assert_(self.p2 == self.p2)\n assert_(not self.p1 == self.p2)\n assert_(not self.p1 == self.p3)\n assert_(not self.p1 == [1,2,3])\n\n def test_not_equal(self) :\n assert_(not self.p1 != self.p1)\n assert_(not self.p2 != self.p2)\n assert_(self.p1 != self.p2)\n assert_(self.p1 != self.p3)\n assert_(self.p1 != [1,2,3])\n\n def test_add(self) :\n tgt = leg.Legendre([2,4,6])\n assert_(self.p1 + self.p1 == tgt)\n assert_(self.p1 + [1,2,3] == tgt)\n assert_([1,2,3] + self.p1 == tgt)\n\n def test_sub(self) :\n tgt = leg.Legendre([1])\n assert_(self.p4 - self.p1 == tgt)\n assert_(self.p4 - [1,2,3] == tgt)\n assert_([2,2,3] - self.p1 == tgt)\n\n def test_mul(self) :\n tgt = leg.Legendre([4.13333333, 8.8, 11.23809524, 7.2, 4.62857143])\n assert_poly_almost_equal(self.p1 * self.p1, tgt)\n assert_poly_almost_equal(self.p1 * [1,2,3], tgt)\n assert_poly_almost_equal([1,2,3] * self.p1, tgt)\n\n def test_floordiv(self) :\n tgt = leg.Legendre([1])\n assert_(self.p4 // self.p1 == tgt)\n assert_(self.p4 // [1,2,3] == tgt)\n assert_([2,2,3] // self.p1 == tgt)\n\n def test_mod(self) :\n tgt = leg.Legendre([1])\n assert_((self.p4 % self.p1) == tgt)\n assert_((self.p4 % [1,2,3]) == tgt)\n assert_(([2,2,3] % self.p1) == tgt)\n\n def test_divmod(self) :\n tquo = leg.Legendre([1])\n trem = leg.Legendre([2])\n quo, rem = divmod(self.p5, self.p1)\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod(self.p5, [1,2,3])\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod([3,2,3], self.p1)\n assert_(quo == tquo and rem == trem)\n\n def test_pow(self) :\n tgt = leg.Legendre([1])\n for i in range(5) :\n res = self.p1**i\n assert_(res == tgt)\n tgt = tgt*self.p1\n\n def test_call(self) :\n # domain = [-1, 1]\n x = np.linspace(-1, 1)\n tgt = 3*(1.5*x**2 - .5) + 2*x + 1\n assert_almost_equal(self.p1(x), tgt)\n\n # domain = [0, 1]\n x = np.linspace(0, 1)\n xx = 2*x - 1\n assert_almost_equal(self.p2(x), self.p1(xx))\n\n def test_degree(self) :\n assert_equal(self.p1.degree(), 2)\n\n def test_cutdeg(self) :\n assert_raises(ValueError, self.p1.cutdeg, .5)\n assert_raises(ValueError, self.p1.cutdeg, -1)\n assert_equal(len(self.p1.cutdeg(3)), 3)\n assert_equal(len(self.p1.cutdeg(2)), 3)\n assert_equal(len(self.p1.cutdeg(1)), 2)\n assert_equal(len(self.p1.cutdeg(0)), 1)\n\n def test_convert(self) :\n x = np.linspace(-1,1)\n p = self.p1.convert(domain=[0,1])\n assert_almost_equal(p(x), self.p1(x))\n\n def test_mapparms(self) :\n parms = self.p2.mapparms()\n assert_almost_equal(parms, [-1, 2])\n\n def test_trim(self) :\n coef = [1, 1e-6, 1e-12, 0]\n p = leg.Legendre(coef)\n assert_equal(p.trim().coef, coef[:3])\n assert_equal(p.trim(1e-10).coef, coef[:2])\n assert_equal(p.trim(1e-5).coef, coef[:1])\n\n def test_truncate(self) :\n assert_raises(ValueError, self.p1.truncate, .5)\n assert_raises(ValueError, self.p1.truncate, 0)\n assert_equal(len(self.p1.truncate(4)), 3)\n assert_equal(len(self.p1.truncate(3)), 3)\n assert_equal(len(self.p1.truncate(2)), 2)\n assert_equal(len(self.p1.truncate(1)), 1)\n\n def test_copy(self) :\n p = self.p1.copy()\n assert_(self.p1 == p)\n\n def test_integ(self) :\n p = self.p2.integ()\n assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 0, scl=.5))\n p = self.p2.integ(lbnd=0)\n assert_almost_equal(p(0), 0)\n p = self.p2.integ(1, 1)\n assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 1, scl=.5))\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.coef, leg.legint([1,2,3], 2, [1,2], scl=.5))\n\n def test_deriv(self) :\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)\n assert_almost_equal(p.deriv(2).coef, self.p2.coef)\n\n def test_roots(self) :\n p = leg.Legendre(leg.poly2leg([0, -1, 0, 1]), [0, 1])\n res = p.roots()\n tgt = [0, .5, 1]\n assert_almost_equal(res, tgt)\n\n def test_linspace(self):\n xdes = np.linspace(0, 1, 20)\n ydes = self.p2(xdes)\n xres, yres = self.p2.linspace(20)\n assert_almost_equal(xres, xdes)\n assert_almost_equal(yres, ydes)\n\n def test_fromroots(self) :\n roots = [0, .5, 1]\n p = leg.Legendre.fromroots(roots, domain=[0, 1])\n res = p.coef\n tgt = leg.poly2leg([0, -1, 0, 1])\n assert_almost_equal(res, tgt)\n\n def test_fit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n x = np.linspace(0,3)\n y = f(x)\n\n # test default value of domain\n p = leg.Legendre.fit(x, y, 3)\n assert_almost_equal(p.domain, [0,3])\n\n # test that fit works in given domains\n p = leg.Legendre.fit(x, y, 3, None)\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [0,3])\n p = leg.Legendre.fit(x, y, 3, [])\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [-1, 1])\n # test that fit accepts weights.\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n yw[0::2] = 0\n p = leg.Legendre.fit(x, yw, 3, w=w)\n assert_almost_equal(p(x), y)\n\n def test_identity(self) :\n x = np.linspace(0,3)\n p = leg.Legendre.identity()\n assert_almost_equal(p(x), x)\n p = leg.Legendre.identity([1,3])\n assert_almost_equal(p(x), x)\n#\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\"Tests for chebyshev module.\n\n\"\"\"\n\n\nimport numpy as np\nimport numpy.polynomial.chebyshev as ch\nfrom numpy.testing import *\n\ndef trim(x) :\n return ch.chebtrim(x, tol=1e-6)\n\nT0 = [ 1]\nT1 = [ 0, 1]\nT2 = [-1, 0, 2]\nT3 = [ 0, -3, 0, 4]\nT4 = [ 1, 0, -8, 0, 8]\nT5 = [ 0, 5, 0, -20, 0, 16]\nT6 = [-1, 0, 18, 0, -48, 0, 32]\nT7 = [ 0, -7, 0, 56, 0, -112, 0, 64]\nT8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]\nT9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]\n\nTlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]\n\n\nclass TestPrivate(TestCase) :\n\n def test__cseries_to_zseries(self) :\n for i in range(5) :\n inp = np.array([2] + [1]*i, np.double)\n tgt = np.array([.5]*i + [2] + [.5]*i, np.double)\n res = ch._cseries_to_zseries(inp)\n assert_equal(res, tgt)\n\n def test__zseries_to_cseries(self) :\n for i in range(5) :\n inp = np.array([.5]*i + [2] + [.5]*i, np.double)\n tgt = np.array([2] + [1]*i, np.double)\n res = ch._zseries_to_cseries(inp)\n assert_equal(res, tgt)\n\n\nclass TestConstants(TestCase) :\n\n def test_chebdomain(self) :\n assert_equal(ch.chebdomain, [-1, 1])\n\n def test_chebzero(self) :\n assert_equal(ch.chebzero, [0])\n\n def test_chebone(self) :\n assert_equal(ch.chebone, [1])\n\n def test_chebx(self) :\n assert_equal(ch.chebx, [0, 1])\n\n\nclass TestArithmetic(TestCase) :\n\n def test_chebadd(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = ch.chebadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebsub(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = ch.chebsub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebmulx(self):\n assert_equal(ch.chebmulx([0]), [0])\n assert_equal(ch.chebmulx([1]), [0,1])\n for i in range(1, 5):\n ser = [0]*i + [1]\n tgt = [0]*(i - 1) + [.5, 0, .5]\n assert_equal(ch.chebmulx(ser), tgt)\n\n def test_chebmul(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(i + j + 1)\n tgt[i + j] += .5\n tgt[abs(i - j)] += .5\n res = ch.chebmul([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebdiv(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n ci = [0]*i + [1]\n cj = [0]*j + [1]\n tgt = ch.chebadd(ci, cj)\n quo, rem = ch.chebdiv(tgt, ci)\n res = ch.chebadd(ch.chebmul(quo, ci), rem)\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebval(self) :\n def f(x) :\n return x*(x**2 - 1)\n\n #check empty input\n assert_equal(ch.chebval([], [1]).size, 0)\n\n #check normal input)\n for i in range(5) :\n tgt = 1\n res = ch.chebval(1, [0]*i + [1])\n assert_almost_equal(res, tgt)\n tgt = (-1)**i\n res = ch.chebval(-1, [0]*i + [1])\n assert_almost_equal(res, tgt)\n zeros = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n tgt = 0\n res = ch.chebval(zeros, [0]*i + [1])\n assert_almost_equal(res, tgt)\n x = np.linspace(-1,1)\n tgt = f(x)\n res = ch.chebval(x, [0, -.25, 0, .25])\n assert_almost_equal(res, tgt)\n\n #check that shape is preserved\n for i in range(3) :\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(ch.chebval(x, [1]).shape, dims)\n assert_equal(ch.chebval(x, [1,0]).shape, dims)\n assert_equal(ch.chebval(x, [1,0,0]).shape, dims)\n\n\nclass TestCalculus(TestCase) :\n\n def test_chebint(self) :\n # check exceptions\n assert_raises(ValueError, ch.chebint, [0], .5)\n assert_raises(ValueError, ch.chebint, [0], -1)\n assert_raises(ValueError, ch.chebint, [0], 1, [0,0])\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = ch.chebint([0], m=i, k=k)\n assert_almost_equal(res, [0, 1])\n\n # check single integration with integration constant\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n chebpol = ch.poly2cheb(pol)\n chebint = ch.chebint(chebpol, m=1, k=[i])\n res = ch.cheb2poly(chebint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n chebpol = ch.poly2cheb(pol)\n chebint = ch.chebint(chebpol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(ch.chebval(-1, chebint), i)\n\n # check single integration with integration constant and scaling\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n chebpol = ch.poly2cheb(pol)\n chebint = ch.chebint(chebpol, m=1, k=[i], scl=2)\n res = ch.cheb2poly(chebint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1)\n res = ch.chebint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1, k=[k])\n res = ch.chebint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1, k=[k], lbnd=-1)\n res = ch.chebint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1, k=[k], scl=2)\n res = ch.chebint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_chebder(self) :\n # check exceptions\n assert_raises(ValueError, ch.chebder, [0], .5)\n assert_raises(ValueError, ch.chebder, [0], -1)\n\n # check that zeroth deriviative does nothing\n for i in range(5) :\n tgt = [1] + [0]*i\n res = ch.chebder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = ch.chebder(ch.chebint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = ch.chebder(ch.chebint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n\nclass TestMisc(TestCase) :\n\n def test_chebfromroots(self) :\n res = ch.chebfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1,5) :\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n tgt = [0]*i + [1]\n res = ch.chebfromroots(roots)*2**(i-1)\n assert_almost_equal(trim(res),trim(tgt))\n\n def test_chebroots(self) :\n assert_almost_equal(ch.chebroots([1]), [])\n assert_almost_equal(ch.chebroots([1, 2]), [-.5])\n for i in range(2,5) :\n tgt = np.linspace(-1, 1, i)\n res = ch.chebroots(ch.chebfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_chebvander(self) :\n # check for 1d x\n x = np.arange(3)\n v = ch.chebvander(x, 3)\n assert_(v.shape == (3,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], ch.chebval(x, coef))\n\n # check for 2d x\n x = np.array([[1,2],[3,4],[5,6]])\n v = ch.chebvander(x, 3)\n assert_(v.shape == (3,2,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], ch.chebval(x, coef))\n\n def test_chebfit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n\n # Test exceptions\n assert_raises(ValueError, ch.chebfit, [1], [1], -1)\n assert_raises(TypeError, ch.chebfit, [[1]], [1], 0)\n assert_raises(TypeError, ch.chebfit, [], [1], 0)\n assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0)\n assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0)\n assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0)\n assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1,1])\n\n # Test fit\n x = np.linspace(0,2)\n y = f(x)\n #\n coef3 = ch.chebfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(ch.chebval(x, coef3), y)\n #\n coef4 = ch.chebfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(ch.chebval(x, coef4), y)\n #\n coef2d = ch.chebfit(x, np.array([y,y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3,coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n y[0::2] = 0\n wcoef3 = ch.chebfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = ch.chebfit(x, np.array([yw,yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)\n\n def test_chebtrim(self) :\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, ch.chebtrim, coef, -1)\n\n # Test results\n assert_equal(ch.chebtrim(coef), coef[:-1])\n assert_equal(ch.chebtrim(coef, 1), coef[:-3])\n assert_equal(ch.chebtrim(coef, 2), [0])\n\n def test_chebline(self) :\n assert_equal(ch.chebline(3,4), [3, 4])\n\n def test_cheb2poly(self) :\n for i in range(10) :\n assert_almost_equal(ch.cheb2poly([0]*i + [1]), Tlist[i])\n\n def test_poly2cheb(self) :\n for i in range(10) :\n assert_almost_equal(ch.poly2cheb(Tlist[i]), [0]*i + [1])\n\n def test_chebpts1(self):\n #test exceptions\n assert_raises(ValueError, ch.chebpts1, 1.5)\n assert_raises(ValueError, ch.chebpts1, 0)\n\n #test points\n tgt = [0]\n assert_almost_equal(ch.chebpts1(1), tgt)\n tgt = [-0.70710678118654746, 0.70710678118654746]\n assert_almost_equal(ch.chebpts1(2), tgt)\n tgt = [-0.86602540378443871, 0, 0.86602540378443871]\n assert_almost_equal(ch.chebpts1(3), tgt)\n tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]\n assert_almost_equal(ch.chebpts1(4), tgt)\n\n\n def test_chebpts2(self):\n #test exceptions\n assert_raises(ValueError, ch.chebpts2, 1.5)\n assert_raises(ValueError, ch.chebpts2, 1)\n\n #test points\n tgt = [-1, 1]\n assert_almost_equal(ch.chebpts2(2), tgt)\n tgt = [-1, 0, 1]\n assert_almost_equal(ch.chebpts2(3), tgt)\n tgt = [-1, -0.5, .5, 1]\n assert_almost_equal(ch.chebpts2(4), tgt)\n tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]\n assert_almost_equal(ch.chebpts2(5), tgt)\n\n\n\n\nclass TestChebyshevClass(TestCase) :\n\n p1 = ch.Chebyshev([1,2,3])\n p2 = ch.Chebyshev([1,2,3], [0,1])\n p3 = ch.Chebyshev([1,2])\n p4 = ch.Chebyshev([2,2,3])\n p5 = ch.Chebyshev([3,2,3])\n\n def test_equal(self) :\n assert_(self.p1 == self.p1)\n assert_(self.p2 == self.p2)\n assert_(not self.p1 == self.p2)\n assert_(not self.p1 == self.p3)\n assert_(not self.p1 == [1,2,3])\n\n def test_not_equal(self) :\n assert_(not self.p1 != self.p1)\n assert_(not self.p2 != self.p2)\n assert_(self.p1 != self.p2)\n assert_(self.p1 != self.p3)\n assert_(self.p1 != [1,2,3])\n\n def test_add(self) :\n tgt = ch.Chebyshev([2,4,6])\n assert_(self.p1 + self.p1 == tgt)\n assert_(self.p1 + [1,2,3] == tgt)\n assert_([1,2,3] + self.p1 == tgt)\n\n def test_sub(self) :\n tgt = ch.Chebyshev([1])\n assert_(self.p4 - self.p1 == tgt)\n assert_(self.p4 - [1,2,3] == tgt)\n assert_([2,2,3] - self.p1 == tgt)\n\n def test_mul(self) :\n tgt = ch.Chebyshev([7.5, 10., 8., 6., 4.5])\n assert_(self.p1 * self.p1 == tgt)\n assert_(self.p1 * [1,2,3] == tgt)\n assert_([1,2,3] * self.p1 == tgt)\n\n def test_floordiv(self) :\n tgt = ch.Chebyshev([1])\n assert_(self.p4 // self.p1 == tgt)\n assert_(self.p4 // [1,2,3] == tgt)\n assert_([2,2,3] // self.p1 == tgt)\n\n def test_mod(self) :\n tgt = ch.Chebyshev([1])\n assert_((self.p4 % self.p1) == tgt)\n assert_((self.p4 % [1,2,3]) == tgt)\n assert_(([2,2,3] % self.p1) == tgt)\n\n def test_divmod(self) :\n tquo = ch.Chebyshev([1])\n trem = ch.Chebyshev([2])\n quo, rem = divmod(self.p5, self.p1)\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod(self.p5, [1,2,3])\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod([3,2,3], self.p1)\n assert_(quo == tquo and rem == trem)\n\n def test_pow(self) :\n tgt = ch.Chebyshev([1])\n for i in range(5) :\n res = self.p1**i\n assert_(res == tgt)\n tgt *= self.p1\n\n def test_call(self) :\n # domain = [-1, 1]\n x = np.linspace(-1, 1)\n tgt = 3*(2*x**2 - 1) + 2*x + 1\n assert_almost_equal(self.p1(x), tgt)\n\n # domain = [0, 1]\n x = np.linspace(0, 1)\n xx = 2*x - 1\n assert_almost_equal(self.p2(x), self.p1(xx))\n\n def test_degree(self) :\n assert_equal(self.p1.degree(), 2)\n\n def test_cutdeg(self) :\n assert_raises(ValueError, self.p1.cutdeg, .5)\n assert_raises(ValueError, self.p1.cutdeg, -1)\n assert_equal(len(self.p1.cutdeg(3)), 3)\n assert_equal(len(self.p1.cutdeg(2)), 3)\n assert_equal(len(self.p1.cutdeg(1)), 2)\n assert_equal(len(self.p1.cutdeg(0)), 1)\n\n def test_convert(self) :\n x = np.linspace(-1,1)\n p = self.p1.convert(domain=[0,1])\n assert_almost_equal(p(x), self.p1(x))\n\n def test_mapparms(self) :\n parms = self.p2.mapparms()\n assert_almost_equal(parms, [-1, 2])\n\n def test_trim(self) :\n coef = [1, 1e-6, 1e-12, 0]\n p = ch.Chebyshev(coef)\n assert_equal(p.trim().coef, coef[:3])\n assert_equal(p.trim(1e-10).coef, coef[:2])\n assert_equal(p.trim(1e-5).coef, coef[:1])\n\n def test_truncate(self) :\n assert_raises(ValueError, self.p1.truncate, .5)\n assert_raises(ValueError, self.p1.truncate, 0)\n assert_equal(len(self.p1.truncate(4)), 3)\n assert_equal(len(self.p1.truncate(3)), 3)\n assert_equal(len(self.p1.truncate(2)), 2)\n assert_equal(len(self.p1.truncate(1)), 1)\n\n def test_copy(self) :\n p = self.p1.copy()\n assert_(self.p1 == p)\n\n def test_integ(self) :\n p = self.p2.integ()\n assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 0, scl=.5))\n p = self.p2.integ(lbnd=0)\n assert_almost_equal(p(0), 0)\n p = self.p2.integ(1, 1)\n assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 1, scl=.5))\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.coef, ch.chebint([1,2,3], 2, [1,2], scl=.5))\n\n def test_deriv(self) :\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)\n assert_almost_equal(p.deriv(2).coef, self.p2.coef)\n\n def test_roots(self) :\n p = ch.Chebyshev(ch.poly2cheb([0, -1, 0, 1]), [0, 1])\n res = p.roots()\n tgt = [0, .5, 1]\n assert_almost_equal(res, tgt)\n\n def test_linspace(self):\n xdes = np.linspace(0, 1, 20)\n ydes = self.p2(xdes)\n xres, yres = self.p2.linspace(20)\n assert_almost_equal(xres, xdes)\n assert_almost_equal(yres, ydes)\n\n def test_fromroots(self) :\n roots = [0, .5, 1]\n p = ch.Chebyshev.fromroots(roots, domain=[0, 1])\n res = p.coef\n tgt = ch.poly2cheb([0, -1, 0, 1])\n assert_almost_equal(res, tgt)\n\n def test_fit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n x = np.linspace(0,3)\n y = f(x)\n\n # test default value of domain\n p = ch.Chebyshev.fit(x, y, 3)\n assert_almost_equal(p.domain, [0,3])\n\n # test that fit works in given domains\n p = ch.Chebyshev.fit(x, y, 3, None)\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [0,3])\n p = ch.Chebyshev.fit(x, y, 3, [])\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [-1, 1])\n # test that fit accepts weights.\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n yw[0::2] = 0\n p = ch.Chebyshev.fit(x, yw, 3, w=w)\n assert_almost_equal(p(x), y)\n\n def test_identity(self) :\n x = np.linspace(0,3)\n p = ch.Chebyshev.identity()\n assert_almost_equal(p(x), x)\n p = ch.Chebyshev.identity([1,3])\n assert_almost_equal(p(x), x)\n#\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"import numpy as np\nfrom numpy.testing import *\n\ndef assert_dtype_equal(a, b):\n assert_equal(a, b)\n assert_equal(hash(a), hash(b),\n \"two equivalent types do not hash to the same value !\")\n\ndef assert_dtype_not_equal(a, b):\n assert_(a != b)\n assert_(hash(a) != hash(b),\n \"two different types hash to the same value !\")\n\nclass TestBuiltin(TestCase):\n def test_run(self):\n \"\"\"Only test hash runs at all.\"\"\"\n for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,\n np.str]:\n dt = np.dtype(t)\n hash(dt)\n\n def test_dtype(self):\n # Make sure equivalent byte order char hash the same (e.g. < and = on\n # little endian)\n for t in [np.int, np.float]:\n dt = np.dtype(t)\n dt2 = dt.newbyteorder(\"<\")\n dt3 = dt.newbyteorder(\">\")\n if dt == dt2:\n self.assertTrue(dt.byteorder != dt2.byteorder, \"bogus test\")\n assert_dtype_equal(dt, dt2)\n else:\n self.assertTrue(dt.byteorder != dt3.byteorder, \"bogus test\")\n assert_dtype_equal(dt, dt3)\n\n def test_equivalent_dtype_hashing(self):\n # Make sure equivalent dtypes with different type num hash equal\n uintp = np.dtype(np.uintp)\n if uintp.itemsize == 4:\n left = uintp\n right = np.dtype(np.uint32)\n else:\n left = uintp\n right = np.dtype(np.ulonglong)\n self.assertTrue(left == right)\n self.assertTrue(hash(left) == hash(right))\n\nclass TestRecord(TestCase):\n def test_equivalent_record(self):\n \"\"\"Test whether equivalent record dtypes hash the same.\"\"\"\n a = np.dtype([('yo', np.int)])\n b = np.dtype([('yo', np.int)])\n assert_dtype_equal(a, b)\n\n def test_different_names(self):\n # In theory, they may hash the same (collision) ?\n a = np.dtype([('yo', np.int)])\n b = np.dtype([('ye', np.int)])\n assert_dtype_not_equal(a, b)\n\n def test_different_titles(self):\n # In theory, they may hash the same (collision) ?\n a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],\n 'titles': ['RRed pixel', 'Blue pixel']})\n assert_dtype_not_equal(a, b)\n\n def test_not_lists(self):\n \"\"\"Test if an appropriate exception is raised when passing bad values to\n the dtype constructor.\n \"\"\"\n self.assertRaises(TypeError, np.dtype,\n dict(names=set(['A', 'B']), formats=['f8', 'i4']))\n self.assertRaises(TypeError, np.dtype,\n dict(names=['A', 'B'], formats=set(['f8', 'i4'])))\n\nclass TestSubarray(TestCase):\n def test_single_subarray(self):\n a = np.dtype((np.int, (2)))\n b = np.dtype((np.int, (2,)))\n assert_dtype_equal(a, b)\n\n assert_equal(type(a.subdtype[1]), tuple)\n assert_equal(type(b.subdtype[1]), tuple)\n\n def test_equivalent_record(self):\n \"\"\"Test whether equivalent subarray dtypes hash the same.\"\"\"\n a = np.dtype((np.int, (2, 3)))\n b = np.dtype((np.int, (2, 3)))\n assert_dtype_equal(a, b)\n\n def test_nonequivalent_record(self):\n \"\"\"Test whether different subarray dtypes hash differently.\"\"\"\n a = np.dtype((np.int, (2, 3)))\n b = np.dtype((np.int, (3, 2)))\n assert_dtype_not_equal(a, b)\n\n a = np.dtype((np.int, (2, 3)))\n b = np.dtype((np.int, (2, 2)))\n assert_dtype_not_equal(a, b)\n\n a = np.dtype((np.int, (1, 2, 3)))\n b = np.dtype((np.int, (1, 2)))\n assert_dtype_not_equal(a, b)\n\n def test_shape_equal(self):\n \"\"\"Test some data types that are equal\"\"\"\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',tuple())))\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',1)))\n assert_dtype_equal(np.dtype((np.int,2)), np.dtype((np.int,(2,))))\n assert_dtype_equal(np.dtype(('<f4',(3,2))), np.dtype(('<f4',(3,2))))\n d = ([('a','f4',(1,2)),('b','f8',(3,1))],(3,2))\n assert_dtype_equal(np.dtype(d), np.dtype(d))\n\n def test_shape_simple(self):\n \"\"\"Test some simple cases that shouldn't be equal\"\"\"\n assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8',(1,))))\n assert_dtype_not_equal(np.dtype(('f8',(1,))), np.dtype(('f8',(1,1))))\n assert_dtype_not_equal(np.dtype(('f4',(3,2))), np.dtype(('f4',(2,3))))\n\n def test_shape_monster(self):\n \"\"\"Test some more complicated cases that shouldn't be equal\"\"\"\n assert_dtype_not_equal(\n np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))),\n np.dtype(([('a','f4',(1,2)), ('b','f8',(1,3))],(2,2))))\n assert_dtype_not_equal(\n np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))),\n np.dtype(([('a','f4',(2,1)), ('b','i8',(1,3))],(2,2))))\n assert_dtype_not_equal(\n np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))),\n np.dtype(([('e','f8',(1,3)), ('d','f4',(2,1))],(2,2))))\n assert_dtype_not_equal(\n np.dtype(([('a',[('a','i4',6)],(2,1)), ('b','f8',(1,3))],(2,2))),\n np.dtype(([('a',[('a','u4',6)],(2,1)), ('b','f8',(1,3))],(2,2))))\n\nclass TestMonsterType(TestCase):\n \"\"\"Test deeply nested subtypes.\"\"\"\n def test1(self):\n simple1 = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n a = np.dtype([('yo', np.int), ('ye', simple1),\n ('yi', np.dtype((np.int, (3, 2))))])\n b = np.dtype([('yo', np.int), ('ye', simple1),\n ('yi', np.dtype((np.int, (3, 2))))])\n assert_dtype_equal(a, b)\n\n c = np.dtype([('yo', np.int), ('ye', simple1),\n ('yi', np.dtype((a, (3, 2))))])\n d = np.dtype([('yo', np.int), ('ye', simple1),\n ('yi', np.dtype((a, (3, 2))))])\n assert_dtype_equal(c, d)\n\nclass TestMetadata(TestCase):\n def test_no_metadata(self):\n d = np.dtype(int)\n self.assertEqual(d.metadata, None)\n\n def test_metadata_takes_dict(self):\n d = np.dtype(int, metadata={'datum': 1})\n self.assertEqual(d.metadata, {'datum': 1})\n\n def test_metadata_rejects_nondict(self):\n self.assertRaises(TypeError, np.dtype, int, metadata='datum')\n self.assertRaises(TypeError, np.dtype, int, metadata=1)\n self.assertRaises(TypeError, np.dtype, int, metadata=None)\n\n def test_nested_metadata(self):\n d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])\n self.assertEqual(d['a'].metadata, {'datum': 1})\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\"A collection of functions designed to help I/O with ascii files.\"\"\"\n__docformat__ = \"restructuredtext en\"\n\nimport sys\nimport numpy as np\nimport numpy.core.numeric as nx\nfrom builtins import bool, int, int, float, complex, object, str, str\n\nfrom numpy.compat import asbytes, bytes, asbytes_nested\n\nif sys.version_info[0] >= 3:\n def _bytes_to_complex(s):\n return complex(s.decode('ascii'))\n def _bytes_to_name(s):\n return s.decode('ascii')\nelse:\n _bytes_to_complex = complex\n _bytes_to_name = str\n\ndef _is_string_like(obj):\n \"\"\"\n Check whether obj behaves like a string.\n \"\"\"\n try:\n obj + ''\n except (TypeError, ValueError):\n return False\n return True\n\ndef _is_bytes_like(obj):\n \"\"\"\n Check whether obj behaves like a bytes object.\n \"\"\"\n try:\n obj + asbytes('')\n except (TypeError, ValueError):\n return False\n return True\n\n\ndef _to_filehandle(fname, flag='r', return_opened=False):\n \"\"\"\n Returns the filehandle corresponding to a string or a file.\n If the string ends in '.gz', the file is automatically unzipped.\n \n Parameters\n ----------\n fname : string, filehandle\n Name of the file whose filehandle must be returned.\n flag : string, optional\n Flag indicating the status of the file ('r' for read, 'w' for write).\n return_opened : boolean, optional\n Whether to return the opening status of the file.\n \"\"\"\n if _is_string_like(fname):\n if fname.endswith('.gz'):\n import gzip\n fhd = gzip.open(fname, flag)\n elif fname.endswith('.bz2'):\n import bz2\n fhd = bz2.BZ2File(fname)\n else:\n fhd = file(fname, flag)\n opened = True\n elif hasattr(fname, 'seek'):\n fhd = fname\n opened = False\n else:\n raise ValueError('fname must be a string or file handle')\n if return_opened:\n return fhd, opened\n return fhd\n\n\ndef has_nested_fields(ndtype):\n \"\"\"\n Returns whether one or several fields of a dtype are nested.\n\n Parameters\n ----------\n ndtype : dtype\n Data-type of a structured array.\n\n Raises\n ------\n AttributeError : If `ndtype` does not have a `names` attribute.\n\n Examples\n --------\n >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])\n >>> np.lib._iotools.has_nested_fields(dt)\n False\n\n \"\"\"\n for name in ndtype.names or ():\n if ndtype[name].names:\n return True\n return False\n\n\ndef flatten_dtype(ndtype, flatten_base=False):\n \"\"\"\n Unpack a structured data-type by collapsing nested fields and/or fields\n with a shape.\n\n Note that the field names are lost.\n\n Parameters\n ----------\n ndtype : dtype\n The datatype to collapse\n flatten_base : {False, True}, optional\n Whether to transform a field with a shape into several fields or not.\n\n Examples\n --------\n >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),\n ... ('block', int, (2, 3))])\n >>> np.lib._iotools.flatten_dtype(dt)\n [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]\n >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)\n [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),\n dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),\n dtype('int32')]\n\n \"\"\"\n names = ndtype.names\n if names is None:\n if flatten_base:\n return [ndtype.base] * int(np.prod(ndtype.shape))\n return [ndtype.base]\n else:\n types = []\n for field in names:\n info = ndtype.fields[field]\n flat_dt = flatten_dtype(info[0], flatten_base)\n types.extend(flat_dt)\n return types\n\n\n\n\n\n\nclass LineSplitter:\n \"\"\"\n Object to split a string at a given delimiter or at given places.\n\n Parameters\n ----------\n delimiter : str, int, or sequence of ints, optional\n If a string, character used to delimit consecutive fields.\n If an integer or a sequence of integers, width(s) of each field.\n comment : str, optional\n Character used to mark the beginning of a comment. Default is '#'.\n autostrip : bool, optional\n Whether to strip each individual field. Default is True.\n\n \"\"\"\n\n def autostrip(self, method):\n \"\"\"\n Wrapper to strip each member of the output of `method`.\n\n Parameters\n ----------\n method : function\n Function that takes a single argument and returns a sequence of\n strings.\n\n Returns\n -------\n wrapped : function\n The result of wrapping `method`. `wrapped` takes a single input\n argument and returns a list of strings that are stripped of\n white-space.\n\n \"\"\"\n return lambda input: [_.strip() for _ in method(input)]\n #\n def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):\n self.comments = comments\n # Delimiter is a character\n if isinstance(delimiter, str):\n delimiter = delimiter.encode('ascii')\n if (delimiter is None) or _is_bytes_like(delimiter):\n delimiter = delimiter or None\n _handyman = self._delimited_splitter\n # Delimiter is a list of field widths\n elif hasattr(delimiter, '__iter__'):\n _handyman = self._variablewidth_splitter\n idx = np.cumsum([0] + list(delimiter))\n delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]\n # Delimiter is a single integer\n elif int(delimiter):\n (_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))\n else:\n (_handyman, delimiter) = (self._delimited_splitter, None)\n self.delimiter = delimiter\n if autostrip:\n self._handyman = self.autostrip(_handyman)\n else:\n self._handyman = _handyman\n #\n def _delimited_splitter(self, line):\n line = line.split(self.comments)[0].strip(asbytes(\" \\r\\n\"))\n if not line:\n return []\n return line.split(self.delimiter)\n #\n def _fixedwidth_splitter(self, line):\n line = line.split(self.comments)[0].strip(asbytes(\"\\r\\n\"))\n if not line:\n return []\n fixed = self.delimiter\n slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]\n return [line[s] for s in slices]\n #\n def _variablewidth_splitter(self, line):\n line = line.split(self.comments)[0]\n if not line:\n return []\n slices = self.delimiter\n return [line[s] for s in slices]\n #\n def __call__(self, line):\n return self._handyman(line)\n\n\n\nclass NameValidator:\n \"\"\"\n Object to validate a list of strings to use as field names.\n\n The strings are stripped of any non alphanumeric character, and spaces\n are replaced by '_'. During instantiation, the user can define a list of\n names to exclude, as well as a list of invalid characters. Names in the\n exclusion list are appended a '_' character.\n\n Once an instance has been created, it can be called with a list of names,\n and a list of valid names will be created.\n The `__call__` method accepts an optional keyword \"default\" that sets\n the default name in case of ambiguity. By default this is 'f', so\n that names will default to `f0`, `f1`, etc.\n\n Parameters\n ----------\n excludelist : sequence, optional\n A list of names to exclude. This list is appended to the default list\n ['return', 'file', 'print']. Excluded names are appended an underscore:\n for example, `file` becomes `file_` if supplied.\n deletechars : str, optional\n A string combining invalid characters that must be deleted from the\n names.\n casesensitive : {True, False, 'upper', 'lower'}, optional\n * If True, field names are case-sensitive.\n * If False or 'upper', field names are converted to upper case.\n * If 'lower', field names are converted to lower case.\n\n The default value is True.\n replace_space: '_', optional\n Character(s) used in replacement of white spaces.\n\n Notes\n -----\n Calling an instance of `NameValidator` is the same as calling its method\n `validate`.\n\n Examples\n --------\n >>> validator = np.lib._iotools.NameValidator()\n >>> validator(['file', 'field2', 'with space', 'CaSe'])\n ['file_', 'field2', 'with_space', 'CaSe']\n\n >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],\n deletechars='q',\n case_sensitive='False')\n >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])\n ['excl_', 'field2', 'no_', 'with_space', 'case']\n\n \"\"\"\n #\n defaultexcludelist = ['return', 'file', 'print']\n defaultdeletechars = set(\"\"\"~!@#$%^&*()-=+~\\|]}[{';: /?.>,<\"\"\")\n #\n def __init__(self, excludelist=None, deletechars=None,\n case_sensitive=None, replace_space='_'):\n # Process the exclusion list ..\n if excludelist is None:\n excludelist = []\n excludelist.extend(self.defaultexcludelist)\n self.excludelist = excludelist\n # Process the list of characters to delete\n if deletechars is None:\n delete = self.defaultdeletechars\n else:\n delete = set(deletechars)\n delete.add('\"')\n self.deletechars = delete\n # Process the case option .....\n if (case_sensitive is None) or (case_sensitive is True):\n self.case_converter = lambda x: x\n elif (case_sensitive is False) or ('u' in case_sensitive):\n self.case_converter = lambda x: x.upper()\n elif 'l' in case_sensitive:\n self.case_converter = lambda x: x.lower()\n else:\n self.case_converter = lambda x: x\n #\n self.replace_space = replace_space\n\n def validate(self, names, defaultfmt=\"f%i\", nbfields=None):\n \"\"\"\n Validate a list of strings to use as field names for a structured array.\n\n Parameters\n ----------\n names : sequence of str\n Strings to be validated.\n defaultfmt : str, optional\n Default format string, used if validating a given string reduces its\n length to zero.\n nboutput : integer, optional\n Final number of validated names, used to expand or shrink the initial\n list of names.\n\n Returns\n -------\n validatednames : list of str\n The list of validated field names.\n\n Notes\n -----\n A `NameValidator` instance can be called directly, which is the same as\n calling `validate`. For examples, see `NameValidator`.\n\n \"\"\"\n # Initial checks ..............\n if (names is None):\n if (nbfields is None):\n return None\n names = []\n if isinstance(names, str):\n names = [names, ]\n if nbfields is not None:\n nbnames = len(names)\n if (nbnames < nbfields):\n names = list(names) + [''] * (nbfields - nbnames)\n elif (nbnames > nbfields):\n names = names[:nbfields]\n # Set some shortcuts ...........\n deletechars = self.deletechars\n excludelist = self.excludelist\n case_converter = self.case_converter\n replace_space = self.replace_space\n # Initializes some variables ...\n validatednames = []\n seen = dict()\n nbempty = 0\n #\n for item in names:\n item = case_converter(item).strip()\n if replace_space:\n item = item.replace(' ', replace_space)\n item = ''.join([c for c in item if c not in deletechars])\n if item == '':\n item = defaultfmt % nbempty\n while item in names:\n nbempty += 1\n item = defaultfmt % nbempty\n nbempty += 1\n elif item in excludelist:\n item += '_'\n cnt = seen.get(item, 0)\n if cnt > 0:\n validatednames.append(item + '_%d' % cnt)\n else:\n validatednames.append(item)\n seen[item] = cnt + 1\n return tuple(validatednames)\n #\n def __call__(self, names, defaultfmt=\"f%i\", nbfields=None):\n return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)\n\n\n\ndef str2bool(value):\n \"\"\"\n Tries to transform a string supposed to represent a boolean to a boolean.\n\n Parameters\n ----------\n value : str\n The string that is transformed to a boolean.\n\n Returns\n -------\n boolval : bool\n The boolean representation of `value`.\n\n Raises\n ------\n ValueError\n If the string is not 'True' or 'False' (case independent)\n\n Examples\n --------\n >>> np.lib._iotools.str2bool('TRUE')\n True\n >>> np.lib._iotools.str2bool('false')\n False\n\n \"\"\"\n value = value.upper()\n if value == asbytes('TRUE'):\n return True\n elif value == asbytes('FALSE'):\n return False\n else:\n raise ValueError(\"Invalid boolean\")\n\n\nclass ConverterError(Exception):\n \"\"\"\n Exception raised when an error occurs in a converter for string values.\n\n \"\"\"\n pass\n\nclass ConverterLockError(ConverterError):\n \"\"\"\n Exception raised when an attempt is made to upgrade a locked converter.\n\n \"\"\"\n pass\n\nclass ConversionWarning(UserWarning):\n \"\"\"\n Warning issued when a string converter has a problem.\n\n Notes\n -----\n In `genfromtxt` a `ConversionWarning` is issued if raising exceptions\n is explicitly suppressed with the \"invalid_raise\" keyword.\n\n \"\"\"\n pass\n\n\n\nclass StringConverter:\n \"\"\"\n Factory class for function transforming a string into another object (int,\n float).\n\n After initialization, an instance can be called to transform a string\n into another object. If the string is recognized as representing a missing\n value, a default value is returned.\n\n Attributes\n ----------\n func : function\n Function used for the conversion.\n default : any\n Default value to return when the input corresponds to a missing value.\n type : type\n Type of the output.\n _status : int\n Integer representing the order of the conversion.\n _mapper : sequence of tuples\n Sequence of tuples (dtype, function, default value) to evaluate in\n order.\n _locked : bool\n Holds `locked` parameter.\n\n Parameters\n ----------\n dtype_or_func : {None, dtype, function}, optional\n If a `dtype`, specifies the input data type, used to define a basic\n function and a default value for missing data. For example, when\n `dtype` is float, the `func` attribute is set to `float` and the\n default value to `np.nan`.\n If a function, this function is used to convert a string to another\n object. In this case, it is recommended to give an associated default\n value as input.\n default : any, optional\n Value to return by default, that is, when the string to be converted\n is flagged as missing. If not given, `StringConverter` tries to supply\n a reasonable default value.\n missing_values : sequence of str, optional\n Sequence of strings indicating a missing value.\n locked : bool, optional\n Whether the StringConverter should be locked to prevent automatic\n upgrade or not. Default is False.\n\n \"\"\"\n #\n _mapper = [(nx.bool_, str2bool, False),\n (nx.integer, int, -1),\n (nx.floating, float, nx.nan),\n (complex, _bytes_to_complex, nx.nan + 0j),\n (nx.string_, bytes, asbytes('???'))]\n (_defaulttype, _defaultfunc, _defaultfill) = list(zip(*_mapper))\n #\n @classmethod\n def _getsubdtype(cls, val):\n \"\"\"Returns the type of the dtype of the input variable.\"\"\"\n return np.array(val).dtype.type\n #\n @classmethod\n def upgrade_mapper(cls, func, default=None):\n \"\"\"\n Upgrade the mapper of a StringConverter by adding a new function and its\n corresponding default.\n \n The input function (or sequence of functions) and its associated default \n value (if any) is inserted in penultimate position of the mapper.\n The corresponding type is estimated from the dtype of the default value.\n \n Parameters\n ----------\n func : var\n Function, or sequence of functions\n\n Examples\n --------\n >>> import dateutil.parser\n >>> import datetime\n >>> dateparser = datetustil.parser.parse\n >>> defaultdate = datetime.date(2000, 1, 1)\n >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)\n \"\"\"\n # Func is a single functions\n if hasattr(func, '__call__'):\n cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))\n return\n elif hasattr(func, '__iter__'):\n if isinstance(func[0], (tuple, list)):\n for _ in func:\n cls._mapper.insert(-1, _)\n return\n if default is None:\n default = [None] * len(func)\n else:\n default = list(default)\n default.append([None] * (len(func) - len(default)))\n for (fct, dft) in zip(func, default):\n cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))\n #\n def __init__(self, dtype_or_func=None, default=None, missing_values=None,\n locked=False):\n # Convert unicode (for Py3)\n if isinstance(missing_values, str):\n missing_values = asbytes(missing_values)\n elif isinstance(missing_values, (list, tuple)):\n missing_values = asbytes_nested(missing_values)\n # Defines a lock for upgrade\n self._locked = bool(locked)\n # No input dtype: minimal initialization\n if dtype_or_func is None:\n self.func = str2bool\n self._status = 0\n self.default = default or False\n ttype = np.bool\n else:\n # Is the input a np.dtype ?\n try:\n self.func = None\n ttype = np.dtype(dtype_or_func).type\n except TypeError:\n # dtype_or_func must be a function, then\n if not hasattr(dtype_or_func, '__call__'):\n errmsg = \"The input argument `dtype` is neither a function\"\\\n \" or a dtype (got '%s' instead)\"\n raise TypeError(errmsg % type(dtype_or_func))\n # Set the function\n self.func = dtype_or_func\n # If we don't have a default, try to guess it or set it to None\n if default is None:\n try:\n default = self.func(asbytes('0'))\n except ValueError:\n default = None\n ttype = self._getsubdtype(default)\n # Set the status according to the dtype\n _status = -1\n for (i, (deftype, func, default_def)) in enumerate(self._mapper):\n if np.issubdtype(ttype, deftype):\n _status = i\n if default is None:\n self.default = default_def\n else:\n self.default = default\n break\n if _status == -1:\n # We never found a match in the _mapper...\n _status = 0\n self.default = default\n self._status = _status\n # If the input was a dtype, set the function to the last we saw\n if self.func is None:\n self.func = func\n # If the status is 1 (int), change the function to\n # something more robust.\n if self.func == self._mapper[1][1]:\n if issubclass(ttype, np.uint64):\n self.func = np.uint64\n elif issubclass(ttype, np.int64):\n self.func = np.int64\n else:\n self.func = lambda x : int(float(x))\n # Store the list of strings corresponding to missing values.\n if missing_values is None:\n self.missing_values = set([asbytes('')])\n else:\n if isinstance(missing_values, bytes):\n missing_values = missing_values.split(asbytes(\",\"))\n self.missing_values = set(list(missing_values) + [asbytes('')])\n #\n self._callingfunction = self._strict_call\n self.type = ttype\n self._checked = False\n self._initial_default = default\n #\n def _loose_call(self, value):\n try:\n return self.func(value)\n except ValueError:\n return self.default\n #\n def _strict_call(self, value):\n try:\n return self.func(value)\n except ValueError:\n if value.strip() in self.missing_values:\n if not self._status:\n self._checked = False\n return self.default\n raise ValueError(\"Cannot convert string '%s'\" % value)\n #\n def __call__(self, value):\n return self._callingfunction(value)\n #\n def upgrade(self, value):\n \"\"\"\n Try to find the best converter for a given string, and return the result.\n\n The supplied string `value` is converted by testing different\n converters in order. First the `func` method of the `StringConverter`\n instance is tried, if this fails other available converters are tried.\n The order in which these other converters are tried is determined by the\n `_status` attribute of the instance.\n\n Parameters\n ----------\n value : str\n The string to convert.\n\n Returns\n -------\n out : any\n The result of converting `value` with the appropriate converter.\n\n \"\"\"\n self._checked = True\n try:\n self._strict_call(value)\n except ValueError:\n # Raise an exception if we locked the converter...\n if self._locked:\n errmsg = \"Converter is locked and cannot be upgraded\"\n raise ConverterLockError(errmsg)\n _statusmax = len(self._mapper)\n # Complains if we try to upgrade by the maximum\n _status = self._status\n if _status == _statusmax:\n errmsg = \"Could not find a valid conversion function\"\n raise ConverterError(errmsg)\n elif _status < _statusmax - 1:\n _status += 1\n (self.type, self.func, default) = self._mapper[_status]\n self._status = _status\n if self._initial_default is not None:\n self.default = self._initial_default\n else:\n self.default = default\n self.upgrade(value)\n\n def iterupgrade(self, value):\n self._checked = True\n if not hasattr(value, '__iter__'):\n value = (value,)\n _strict_call = self._strict_call\n try:\n list(map(_strict_call, value))\n except ValueError:\n # Raise an exception if we locked the converter...\n if self._locked:\n errmsg = \"Converter is locked and cannot be upgraded\"\n raise ConverterLockError(errmsg)\n _statusmax = len(self._mapper)\n # Complains if we try to upgrade by the maximum\n _status = self._status\n if _status == _statusmax:\n raise ConverterError(\"Could not find a valid conversion function\")\n elif _status < _statusmax - 1:\n _status += 1\n (self.type, self.func, default) = self._mapper[_status]\n if self._initial_default is not None:\n self.default = self._initial_default\n else:\n self.default = default\n self._status = _status\n self.iterupgrade(value)\n\n def update(self, func, default=None, testing_value=None,\n missing_values=asbytes(''), locked=False):\n \"\"\"\n Set StringConverter attributes directly.\n\n Parameters\n ----------\n func : function\n Conversion function.\n default : any, optional\n Value to return by default, that is, when the string to be converted\n is flagged as missing. If not given, `StringConverter` tries to supply\n a reasonable default value.\n testing_value : str, optional\n A string representing a standard input value of the converter.\n This string is used to help defining a reasonable default value.\n missing_values : sequence of str, optional\n Sequence of strings indicating a missing value.\n locked : bool, optional\n Whether the StringConverter should be locked to prevent automatic\n upgrade or not. Default is False.\n\n Notes\n -----\n `update` takes the same parameters as the constructor of `StringConverter`,\n except that `func` does not accept a `dtype` whereas `dtype_or_func` in\n the constructor does.\n\n \"\"\"\n self.func = func\n self._locked = locked\n # Don't reset the default to None if we can avoid it\n if default is not None:\n self.default = default\n self.type = self._getsubdtype(default)\n else:\n try:\n tester = func(testing_value or asbytes('1'))\n except (TypeError, ValueError):\n tester = None\n self.type = self._getsubdtype(tester)\n # Add the missing values to the existing set\n if missing_values is not None:\n if _is_bytes_like(missing_values):\n self.missing_values.add(missing_values)\n elif hasattr(missing_values, '__iter__'):\n for val in missing_values:\n self.missing_values.add(val)\n else:\n self.missing_values = []\n\n\n\ndef easy_dtype(ndtype, names=None, defaultfmt=\"f%i\", **validationargs):\n \"\"\"\n Convenience function to create a `np.dtype` object.\n\n The function processes the input `dtype` and matches it with the given\n names.\n\n Parameters\n ----------\n ndtype : var\n Definition of the dtype. Can be any string or dictionary\n recognized by the `np.dtype` function, or a sequence of types.\n names : str or sequence, optional\n Sequence of strings to use as field names for a structured dtype.\n For convenience, `names` can be a string of a comma-separated list of\n names.\n defaultfmt : str, optional\n Format string used to define missing names, such as ``\"f%i\"``\n (default) or ``\"fields_%02i\"``.\n validationargs : optional\n A series of optional arguments used to initialize a `NameValidator`.\n\n Examples\n --------\n >>> np.lib._iotools.easy_dtype(float)\n dtype('float64')\n >>> np.lib._iotools.easy_dtype(\"i4, f8\")\n dtype([('f0', '<i4'), ('f1', '<f8')])\n >>> np.lib._iotools.easy_dtype(\"i4, f8\", defaultfmt=\"field_%03i\")\n dtype([('field_000', '<i4'), ('field_001', '<f8')])\n\n >>> np.lib._iotools.easy_dtype((int, float, float), names=\"a,b,c\")\n dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])\n >>> np.lib._iotools.easy_dtype(float, names=\"a,b,c\")\n dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])\n\n \"\"\"\n try:\n ndtype = np.dtype(ndtype)\n except TypeError:\n validate = NameValidator(**validationargs)\n nbfields = len(ndtype)\n if names is None:\n names = [''] * len(ndtype)\n elif isinstance(names, str):\n names = names.split(\",\")\n names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)\n ndtype = np.dtype(dict(formats=ndtype, names=names))\n else:\n nbtypes = len(ndtype)\n # Explicit names\n if names is not None:\n validate = NameValidator(**validationargs)\n if isinstance(names, str):\n names = names.split(\",\")\n # Simple dtype: repeat to match the nb of names\n if nbtypes == 0:\n formats = tuple([ndtype.type] * len(names))\n names = validate(names, defaultfmt=defaultfmt)\n ndtype = np.dtype(list(zip(names, formats)))\n # Structured dtype: just validate the names as needed\n else:\n ndtype.names = validate(names, nbfields=nbtypes,\n defaultfmt=defaultfmt)\n # No implicit names\n elif (nbtypes > 0):\n validate = NameValidator(**validationargs)\n # Default initial names : should we change the format ?\n if (ndtype.names == tuple(\"f%i\" % i for i in range(nbtypes))) and \\\n (defaultfmt != \"f%i\"):\n ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)\n # Explicit initial names : just validate\n else:\n ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)\n return ndtype\n\n",
"\"\"\"\nObjects for dealing with Chebyshev series.\n\nThis module provides a number of objects (mostly functions) useful for\ndealing with Chebyshev series, including a `Chebyshev` class that\nencapsulates the usual arithmetic operations. (General information\non how this module represents and works with such polynomials is in the\ndocstring for its \"parent\" sub-package, `numpy.polynomial`).\n\nConstants\n---------\n- `chebdomain` -- Chebyshev series default domain, [-1,1].\n- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates\n identically to 0.\n- `chebone` -- (Coefficients of the) Chebyshev series that evaluates\n identically to 1.\n- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,\n ``f(x) = x``.\n\nArithmetic\n----------\n- `chebadd` -- add two Chebyshev series.\n- `chebsub` -- subtract one Chebyshev series from another.\n- `chebmul` -- multiply two Chebyshev series.\n- `chebdiv` -- divide one Chebyshev series by another.\n- `chebpow` -- raise a Chebyshev series to an positive integer power\n- `chebval` -- evaluate a Chebyshev series at given points.\n\nCalculus\n--------\n- `chebder` -- differentiate a Chebyshev series.\n- `chebint` -- integrate a Chebyshev series.\n\nMisc Functions\n--------------\n- `chebfromroots` -- create a Chebyshev series with specified roots.\n- `chebroots` -- find the roots of a Chebyshev series.\n- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.\n- `chebfit` -- least-squares fit returning a Chebyshev series.\n- `chebpts1` -- Chebyshev points of the first kind.\n- `chebpts2` -- Chebyshev points of the second kind.\n- `chebtrim` -- trim leading coefficients from a Chebyshev series.\n- `chebline` -- Chebyshev series representing given straight line.\n- `cheb2poly` -- convert a Chebyshev series to a polynomial.\n- `poly2cheb` -- convert a polynomial to a Chebyshev series.\n\nClasses\n-------\n- `Chebyshev` -- A Chebyshev series class.\n\nSee also\n--------\n`numpy.polynomial`\n\nNotes\n-----\nThe implementations of multiplication, division, integration, and\ndifferentiation use the algebraic identities [1]_:\n\n.. math ::\n T_n(x) = \\\\frac{z^n + z^{-n}}{2} \\\\\\\\\n z\\\\frac{dx}{dz} = \\\\frac{z - z^{-1}}{2}.\n\nwhere\n\n.. math :: x = \\\\frac{z + z^{-1}}{2}.\n\nThese identities allow a Chebyshev series to be expressed as a finite,\nsymmetric Laurent series. In this module, this sort of Laurent series\nis referred to as a \"z-series.\"\n\nReferences\n----------\n.. [1] A. T. Benjamin, et al., \"Combinatorial Trigonometry with Chebyshev\n Polynomials,\" *Journal of Statistical Planning and Inference 14*, 2008\n (preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)\n\n\"\"\"\n\n\n__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',\n 'chebadd', 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow',\n 'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb',\n 'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots',\n 'chebpts1', 'chebpts2', 'Chebyshev']\n\nimport numpy as np\nimport numpy.linalg as la\nfrom . import polyutils as pu\nimport warnings\nfrom .polytemplate import polytemplate\n\nchebtrim = pu.trimcoef\n\n#\n# A collection of functions for manipulating z-series. These are private\n# functions and do minimal error checking.\n#\n\ndef _cseries_to_zseries(cs) :\n \"\"\"Covert Chebyshev series to z-series.\n\n Covert a Chebyshev series to the equivalent z-series. The result is\n never an empty array. The dtype of the return is the same as that of\n the input. No checks are run on the arguments as this routine is for\n internal use.\n\n Parameters\n ----------\n cs : 1-d ndarray\n Chebyshev coefficients, ordered from low to high\n\n Returns\n -------\n zs : 1-d ndarray\n Odd length symmetric z-series, ordered from low to high.\n\n \"\"\"\n n = cs.size\n zs = np.zeros(2*n-1, dtype=cs.dtype)\n zs[n-1:] = cs/2\n return zs + zs[::-1]\n\ndef _zseries_to_cseries(zs) :\n \"\"\"Covert z-series to a Chebyshev series.\n\n Covert a z series to the equivalent Chebyshev series. The result is\n never an empty array. The dtype of the return is the same as that of\n the input. No checks are run on the arguments as this routine is for\n internal use.\n\n Parameters\n ----------\n zs : 1-d ndarray\n Odd length symmetric z-series, ordered from low to high.\n\n Returns\n -------\n cs : 1-d ndarray\n Chebyshev coefficients, ordered from low to high.\n\n \"\"\"\n n = (zs.size + 1)//2\n cs = zs[n-1:].copy()\n cs[1:n] *= 2\n return cs\n\ndef _zseries_mul(z1, z2) :\n \"\"\"Multiply two z-series.\n\n Multiply two z-series to produce a z-series.\n\n Parameters\n ----------\n z1, z2 : 1-d ndarray\n The arrays must be 1-d but this is not checked.\n\n Returns\n -------\n product : 1-d ndarray\n The product z-series.\n\n Notes\n -----\n This is simply convolution. If symmetic/anti-symmetric z-series are\n denoted by S/A then the following rules apply:\n\n S*S, A*A -> S\n S*A, A*S -> A\n\n \"\"\"\n return np.convolve(z1, z2)\n\ndef _zseries_div(z1, z2) :\n \"\"\"Divide the first z-series by the second.\n\n Divide `z1` by `z2` and return the quotient and remainder as z-series.\n Warning: this implementation only applies when both z1 and z2 have the\n same symmetry, which is sufficient for present purposes.\n\n Parameters\n ----------\n z1, z2 : 1-d ndarray\n The arrays must be 1-d and have the same symmetry, but this is not\n checked.\n\n Returns\n -------\n\n (quotient, remainder) : 1-d ndarrays\n Quotient and remainder as z-series.\n\n Notes\n -----\n This is not the same as polynomial division on account of the desired form\n of the remainder. If symmetic/anti-symmetric z-series are denoted by S/A\n then the following rules apply:\n\n S/S -> S,S\n A/A -> S,A\n\n The restriction to types of the same symmetry could be fixed but seems like\n uneeded generality. There is no natural form for the remainder in the case\n where there is no symmetry.\n\n \"\"\"\n z1 = z1.copy()\n z2 = z2.copy()\n len1 = len(z1)\n len2 = len(z2)\n if len2 == 1 :\n z1 /= z2\n return z1, z1[:1]*0\n elif len1 < len2 :\n return z1[:1]*0, z1\n else :\n dlen = len1 - len2\n scl = z2[0]\n z2 /= scl\n quo = np.empty(dlen + 1, dtype=z1.dtype)\n i = 0\n j = dlen\n while i < j :\n r = z1[i]\n quo[i] = z1[i]\n quo[dlen - i] = r\n tmp = r*z2\n z1[i:i+len2] -= tmp\n z1[j:j+len2] -= tmp\n i += 1\n j -= 1\n r = z1[i]\n quo[i] = r\n tmp = r*z2\n z1[i:i+len2] -= tmp\n quo /= scl\n rem = z1[i+1:i-1+len2].copy()\n return quo, rem\n\ndef _zseries_der(zs) :\n \"\"\"Differentiate a z-series.\n\n The derivative is with respect to x, not z. This is achieved using the\n chain rule and the value of dx/dz given in the module notes.\n\n Parameters\n ----------\n zs : z-series\n The z-series to differentiate.\n\n Returns\n -------\n derivative : z-series\n The derivative\n\n Notes\n -----\n The zseries for x (ns) has been multiplied by two in order to avoid\n using floats that are incompatible with Decimal and likely other\n specialized scalar types. This scaling has been compensated by\n multiplying the value of zs by two also so that the two cancels in the\n division.\n\n \"\"\"\n n = len(zs)//2\n ns = np.array([-1, 0, 1], dtype=zs.dtype)\n zs *= np.arange(-n, n+1)*2\n d, r = _zseries_div(zs, ns)\n return d\n\ndef _zseries_int(zs) :\n \"\"\"Integrate a z-series.\n\n The integral is with respect to x, not z. This is achieved by a change\n of variable using dx/dz given in the module notes.\n\n Parameters\n ----------\n zs : z-series\n The z-series to integrate\n\n Returns\n -------\n integral : z-series\n The indefinite integral\n\n Notes\n -----\n The zseries for x (ns) has been multiplied by two in order to avoid\n using floats that are incompatible with Decimal and likely other\n specialized scalar types. This scaling has been compensated by\n dividing the resulting zs by two.\n\n \"\"\"\n n = 1 + len(zs)//2\n ns = np.array([-1, 0, 1], dtype=zs.dtype)\n zs = _zseries_mul(zs, ns)\n div = np.arange(-n, n+1)*2\n zs[:n] /= div[:n]\n zs[n+1:] /= div[n+1:]\n zs[n] = 0\n return zs\n\n#\n# Chebyshev series functions\n#\n\n\ndef poly2cheb(pol) :\n \"\"\"\n Convert a polynomial to a Chebyshev series.\n\n Convert an array representing the coefficients of a polynomial (relative\n to the \"standard\" basis) ordered from lowest degree to highest, to an\n array of the coefficients of the equivalent Chebyshev series, ordered\n from lowest to highest degree.\n\n Parameters\n ----------\n pol : array_like\n 1-d array containing the polynomial coefficients\n\n Returns\n -------\n cs : ndarray\n 1-d array containing the coefficients of the equivalent Chebyshev\n series.\n\n See Also\n --------\n cheb2poly\n\n Notes\n -----\n The easy way to do conversions between polynomial basis sets\n is to use the convert method of a class instance.\n\n Examples\n --------\n >>> from numpy import polynomial as P\n >>> p = P.Polynomial(range(4))\n >>> p\n Polynomial([ 0., 1., 2., 3.], [-1., 1.])\n >>> c = p.convert(kind=P.Chebyshev)\n >>> c\n Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])\n >>> P.poly2cheb(range(4))\n array([ 1. , 3.25, 1. , 0.75])\n\n \"\"\"\n [pol] = pu.as_series([pol])\n deg = len(pol) - 1\n res = 0\n for i in range(deg, -1, -1) :\n res = chebadd(chebmulx(res), pol[i])\n return res\n\n\ndef cheb2poly(cs) :\n \"\"\"\n Convert a Chebyshev series to a polynomial.\n\n Convert an array representing the coefficients of a Chebyshev series,\n ordered from lowest degree to highest, to an array of the coefficients\n of the equivalent polynomial (relative to the \"standard\" basis) ordered\n from lowest to highest degree.\n\n Parameters\n ----------\n cs : array_like\n 1-d array containing the Chebyshev series coefficients, ordered\n from lowest order term to highest.\n\n Returns\n -------\n pol : ndarray\n 1-d array containing the coefficients of the equivalent polynomial\n (relative to the \"standard\" basis) ordered from lowest order term\n to highest.\n\n See Also\n --------\n poly2cheb\n\n Notes\n -----\n The easy way to do conversions between polynomial basis sets\n is to use the convert method of a class instance.\n\n Examples\n --------\n >>> from numpy import polynomial as P\n >>> c = P.Chebyshev(range(4))\n >>> c\n Chebyshev([ 0., 1., 2., 3.], [-1., 1.])\n >>> p = c.convert(kind=P.Polynomial)\n >>> p\n Polynomial([ -2., -8., 4., 12.], [-1., 1.])\n >>> P.cheb2poly(range(4))\n array([ -2., -8., 4., 12.])\n\n \"\"\"\n from .polynomial import polyadd, polysub, polymulx\n\n [cs] = pu.as_series([cs])\n n = len(cs)\n if n < 3:\n return cs\n else:\n c0 = cs[-2]\n c1 = cs[-1]\n # i is the current degree of c1\n for i in range(n - 1, 1, -1) :\n tmp = c0\n c0 = polysub(cs[i - 2], c1)\n c1 = polyadd(tmp, polymulx(c1)*2)\n return polyadd(c0, polymulx(c1))\n\n\n#\n# These are constant arrays are of integer type so as to be compatible\n# with the widest range of other types, such as Decimal.\n#\n\n# Chebyshev default domain.\nchebdomain = np.array([-1,1])\n\n# Chebyshev coefficients representing zero.\nchebzero = np.array([0])\n\n# Chebyshev coefficients representing one.\nchebone = np.array([1])\n\n# Chebyshev coefficients representing the identity x.\nchebx = np.array([0,1])\n\ndef chebline(off, scl) :\n \"\"\"\n Chebyshev series whose graph is a straight line.\n\n\n\n Parameters\n ----------\n off, scl : scalars\n The specified line is given by ``off + scl*x``.\n\n Returns\n -------\n y : ndarray\n This module's representation of the Chebyshev series for\n ``off + scl*x``.\n\n See Also\n --------\n polyline\n\n Examples\n --------\n >>> import numpy.polynomial.chebyshev as C\n >>> C.chebline(3,2)\n array([3, 2])\n >>> C.chebval(-3, C.chebline(3,2)) # should be -3\n -3.0\n\n \"\"\"\n if scl != 0 :\n return np.array([off,scl])\n else :\n return np.array([off])\n\ndef chebfromroots(roots) :\n \"\"\"\n Generate a Chebyshev series with the given roots.\n\n Return the array of coefficients for the C-series whose roots (a.k.a.\n \"zeros\") are given by *roots*. The returned array of coefficients is\n ordered from lowest order \"term\" to highest, and zeros of multiplicity\n greater than one must be included in *roots* a number of times equal\n to their multiplicity (e.g., if `2` is a root of multiplicity three,\n then [2,2,2] must be in *roots*).\n\n Parameters\n ----------\n roots : array_like\n Sequence containing the roots.\n\n Returns\n -------\n out : ndarray\n 1-d array of the C-series' coefficients, ordered from low to\n high. If all roots are real, ``out.dtype`` is a float type;\n otherwise, ``out.dtype`` is a complex type, even if all the\n coefficients in the result are real (see Examples below).\n\n See Also\n --------\n polyfromroots\n\n Notes\n -----\n What is returned are the :math:`c_i` such that:\n\n .. math::\n\n \\\\sum_{i=0}^{n} c_i*T_i(x) = \\\\prod_{i=0}^{n} (x - roots[i])\n\n where ``n == len(roots)`` and :math:`T_i(x)` is the `i`-th Chebyshev\n (basis) polynomial over the domain `[-1,1]`. Note that, unlike\n `polyfromroots`, due to the nature of the C-series basis set, the\n above identity *does not* imply :math:`c_n = 1` identically (see\n Examples).\n\n Examples\n --------\n >>> import numpy.polynomial.chebyshev as C\n >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis\n array([ 0. , -0.25, 0. , 0.25])\n >>> j = complex(0,1)\n >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis\n array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])\n\n \"\"\"\n if len(roots) == 0 :\n return np.ones(1)\n else :\n [roots] = pu.as_series([roots], trim=False)\n roots.sort()\n p = [chebline(-r, 1) for r in roots]\n n = len(p)\n while n > 1:\n m, r = divmod(n, 2)\n tmp = [chebmul(p[i], p[i+m]) for i in range(m)]\n if r:\n tmp[0] = chebmul(tmp[0], p[-1])\n p = tmp\n n = m\n return p[0]\n\n\ndef chebadd(c1, c2):\n \"\"\"\n Add one Chebyshev series to another.\n\n Returns the sum of two Chebyshev series `c1` + `c2`. The arguments\n are sequences of coefficients ordered from lowest order term to\n highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-d arrays of Chebyshev series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Array representing the Chebyshev series of their sum.\n\n See Also\n --------\n chebsub, chebmul, chebdiv, chebpow\n\n Notes\n -----\n Unlike multiplication, division, etc., the sum of two Chebyshev series\n is a Chebyshev series (without having to \"reproject\" the result onto\n the basis set) so addition, just like that of \"standard\" polynomials,\n is simply \"component-wise.\"\n\n Examples\n --------\n >>> from numpy.polynomial import chebyshev as C\n >>> c1 = (1,2,3)\n >>> c2 = (3,2,1)\n >>> C.chebadd(c1,c2)\n array([ 4., 4., 4.])\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if len(c1) > len(c2) :\n c1[:c2.size] += c2\n ret = c1\n else :\n c2[:c1.size] += c1\n ret = c2\n return pu.trimseq(ret)\n\n\ndef chebsub(c1, c2):\n \"\"\"\n Subtract one Chebyshev series from another.\n\n Returns the difference of two Chebyshev series `c1` - `c2`. The\n sequences of coefficients are from lowest order term to highest, i.e.,\n [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-d arrays of Chebyshev series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Of Chebyshev series coefficients representing their difference.\n\n See Also\n --------\n chebadd, chebmul, chebdiv, chebpow\n\n Notes\n -----\n Unlike multiplication, division, etc., the difference of two Chebyshev\n series is a Chebyshev series (without having to \"reproject\" the result\n onto the basis set) so subtraction, just like that of \"standard\"\n polynomials, is simply \"component-wise.\"\n\n Examples\n --------\n >>> from numpy.polynomial import chebyshev as C\n >>> c1 = (1,2,3)\n >>> c2 = (3,2,1)\n >>> C.chebsub(c1,c2)\n array([-2., 0., 2.])\n >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)\n array([ 2., 0., -2.])\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if len(c1) > len(c2) :\n c1[:c2.size] -= c2\n ret = c1\n else :\n c2 = -c2\n c2[:c1.size] += c1\n ret = c2\n return pu.trimseq(ret)\n\n\ndef chebmulx(cs):\n \"\"\"Multiply a Chebyshev series by x.\n\n Multiply the polynomial `cs` by x, where x is the independent\n variable.\n\n\n Parameters\n ----------\n cs : array_like\n 1-d array of Chebyshev series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Array representing the result of the multiplication.\n\n Notes\n -----\n .. versionadded:: 1.5.0\n\n \"\"\"\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n # The zero series needs special treatment\n if len(cs) == 1 and cs[0] == 0:\n return cs\n\n prd = np.empty(len(cs) + 1, dtype=cs.dtype)\n prd[0] = cs[0]*0\n prd[1] = cs[0]\n if len(cs) > 1:\n tmp = cs[1:]/2\n prd[2:] = tmp\n prd[0:-2] += tmp\n return prd\n\n\ndef chebmul(c1, c2):\n \"\"\"\n Multiply one Chebyshev series by another.\n\n Returns the product of two Chebyshev series `c1` * `c2`. The arguments\n are sequences of coefficients, from lowest order \"term\" to highest,\n e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-d arrays of Chebyshev series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Of Chebyshev series coefficients representing their product.\n\n See Also\n --------\n chebadd, chebsub, chebdiv, chebpow\n\n Notes\n -----\n In general, the (polynomial) product of two C-series results in terms\n that are not in the Chebyshev polynomial basis set. Thus, to express\n the product as a C-series, it is typically necessary to \"re-project\"\n the product onto said basis set, which typically produces\n \"un-intuitive\" (but correct) results; see Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial import chebyshev as C\n >>> c1 = (1,2,3)\n >>> c2 = (3,2,1)\n >>> C.chebmul(c1,c2) # multiplication requires \"reprojection\"\n array([ 6.5, 12. , 12. , 4. , 1.5])\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n z1 = _cseries_to_zseries(c1)\n z2 = _cseries_to_zseries(c2)\n prd = _zseries_mul(z1, z2)\n ret = _zseries_to_cseries(prd)\n return pu.trimseq(ret)\n\n\ndef chebdiv(c1, c2):\n \"\"\"\n Divide one Chebyshev series by another.\n\n Returns the quotient-with-remainder of two Chebyshev series\n `c1` / `c2`. The arguments are sequences of coefficients from lowest\n order \"term\" to highest, e.g., [1,2,3] represents the series\n ``T_0 + 2*T_1 + 3*T_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-d arrays of Chebyshev series coefficients ordered from low to\n high.\n\n Returns\n -------\n [quo, rem] : ndarrays\n Of Chebyshev series coefficients representing the quotient and\n remainder.\n\n See Also\n --------\n chebadd, chebsub, chebmul, chebpow\n\n Notes\n -----\n In general, the (polynomial) division of one C-series by another\n results in quotient and remainder terms that are not in the Chebyshev\n polynomial basis set. Thus, to express these results as C-series, it\n is typically necessary to \"re-project\" the results onto said basis\n set, which typically produces \"un-intuitive\" (but correct) results;\n see Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial import chebyshev as C\n >>> c1 = (1,2,3)\n >>> c2 = (3,2,1)\n >>> C.chebdiv(c1,c2) # quotient \"intuitive,\" remainder not\n (array([ 3.]), array([-8., -4.]))\n >>> c2 = (0,1,2,3)\n >>> C.chebdiv(c2,c1) # neither \"intuitive\"\n (array([ 0., 2.]), array([-2., -4.]))\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if c2[-1] == 0 :\n raise ZeroDivisionError()\n\n lc1 = len(c1)\n lc2 = len(c2)\n if lc1 < lc2 :\n return c1[:1]*0, c1\n elif lc2 == 1 :\n return c1/c2[-1], c1[:1]*0\n else :\n z1 = _cseries_to_zseries(c1)\n z2 = _cseries_to_zseries(c2)\n quo, rem = _zseries_div(z1, z2)\n quo = pu.trimseq(_zseries_to_cseries(quo))\n rem = pu.trimseq(_zseries_to_cseries(rem))\n return quo, rem\n\ndef chebpow(cs, pow, maxpower=16) :\n \"\"\"Raise a Chebyshev series to a power.\n\n Returns the Chebyshev series `cs` raised to the power `pow`. The\n arguement `cs` is a sequence of coefficients ordered from low to high.\n i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``\n\n Parameters\n ----------\n cs : array_like\n 1d array of chebyshev series coefficients ordered from low to\n high.\n pow : integer\n Power to which the series will be raised\n maxpower : integer, optional\n Maximum power allowed. This is mainly to limit growth of the series\n to umanageable size. Default is 16\n\n Returns\n -------\n coef : ndarray\n Chebyshev series of power.\n\n See Also\n --------\n chebadd, chebsub, chebmul, chebdiv\n\n Examples\n --------\n\n \"\"\"\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n power = int(pow)\n if power != pow or power < 0 :\n raise ValueError(\"Power must be a non-negative integer.\")\n elif maxpower is not None and power > maxpower :\n raise ValueError(\"Power is too large\")\n elif power == 0 :\n return np.array([1], dtype=cs.dtype)\n elif power == 1 :\n return cs\n else :\n # This can be made more efficient by using powers of two\n # in the usual way.\n zs = _cseries_to_zseries(cs)\n prd = zs\n for i in range(2, power + 1) :\n prd = np.convolve(prd, zs)\n return _zseries_to_cseries(prd)\n\ndef chebder(cs, m=1, scl=1) :\n \"\"\"\n Differentiate a Chebyshev series.\n\n Returns the series `cs` differentiated `m` times. At each iteration the\n result is multiplied by `scl` (the scaling factor is for use in a linear\n change of variable). The argument `cs` is the sequence of coefficients\n from lowest order \"term\" to highest, e.g., [1,2,3] represents the series\n ``T_0 + 2*T_1 + 3*T_2``.\n\n Parameters\n ----------\n cs: array_like\n 1-d array of Chebyshev series coefficients ordered from low to high.\n m : int, optional\n Number of derivatives taken, must be non-negative. (Default: 1)\n scl : scalar, optional\n Each differentiation is multiplied by `scl`. The end result is\n multiplication by ``scl**m``. This is for use in a linear change of\n variable. (Default: 1)\n\n Returns\n -------\n der : ndarray\n Chebyshev series of the derivative.\n\n See Also\n --------\n chebint\n\n Notes\n -----\n In general, the result of differentiating a C-series needs to be\n \"re-projected\" onto the C-series basis set. Thus, typically, the\n result of this function is \"un-intuitive,\" albeit correct; see Examples\n section below.\n\n Examples\n --------\n >>> from numpy.polynomial import chebyshev as C\n >>> cs = (1,2,3,4)\n >>> C.chebder(cs)\n array([ 14., 12., 24.])\n >>> C.chebder(cs,3)\n array([ 96.])\n >>> C.chebder(cs,scl=-1)\n array([-14., -12., -24.])\n >>> C.chebder(cs,2,-1)\n array([ 12., 96.])\n\n \"\"\"\n cnt = int(m)\n\n if cnt != m:\n raise ValueError(\"The order of derivation must be integer\")\n if cnt < 0 :\n raise ValueError(\"The order of derivation must be non-negative\")\n\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n if cnt == 0:\n return cs\n elif cnt >= len(cs):\n return cs[:1]*0\n else :\n zs = _cseries_to_zseries(cs)\n for i in range(cnt):\n zs = _zseries_der(zs)*scl\n return _zseries_to_cseries(zs)\n\n\ndef chebint(cs, m=1, k=[], lbnd=0, scl=1):\n \"\"\"\n Integrate a Chebyshev series.\n\n Returns, as a C-series, the input C-series `cs`, integrated `m` times\n from `lbnd` to `x`. At each iteration the resulting series is\n **multiplied** by `scl` and an integration constant, `k`, is added.\n The scaling factor is for use in a linear change of variable. (\"Buyer\n beware\": note that, depending on what one is doing, one may want `scl`\n to be the reciprocal of what one might expect; for more information,\n see the Notes section below.) The argument `cs` is a sequence of\n coefficients, from lowest order C-series \"term\" to highest, e.g.,\n [1,2,3] represents the series :math:`T_0(x) + 2T_1(x) + 3T_2(x)`.\n\n Parameters\n ----------\n cs : array_like\n 1-d array of C-series coefficients, ordered from low to high.\n m : int, optional\n Order of integration, must be positive. (Default: 1)\n k : {[], list, scalar}, optional\n Integration constant(s). The value of the first integral at zero\n is the first value in the list, the value of the second integral\n at zero is the second value, etc. If ``k == []`` (the default),\n all constants are set to zero. If ``m == 1``, a single scalar can\n be given instead of a list.\n lbnd : scalar, optional\n The lower bound of the integral. (Default: 0)\n scl : scalar, optional\n Following each integration the result is *multiplied* by `scl`\n before the integration constant is added. (Default: 1)\n\n Returns\n -------\n S : ndarray\n C-series coefficients of the integral.\n\n Raises\n ------\n ValueError\n If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or\n ``np.isscalar(scl) == False``.\n\n See Also\n --------\n chebder\n\n Notes\n -----\n Note that the result of each integration is *multiplied* by `scl`.\n Why is this important to note? Say one is making a linear change of\n variable :math:`u = ax + b` in an integral relative to `x`. Then\n :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`\n - perhaps not what one would have first thought.\n\n Also note that, in general, the result of integrating a C-series needs\n to be \"re-projected\" onto the C-series basis set. Thus, typically,\n the result of this function is \"un-intuitive,\" albeit correct; see\n Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial import chebyshev as C\n >>> cs = (1,2,3)\n >>> C.chebint(cs)\n array([ 0.5, -0.5, 0.5, 0.5])\n >>> C.chebint(cs,3)\n array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,\n 0.00625 ])\n >>> C.chebint(cs, k=3)\n array([ 3.5, -0.5, 0.5, 0.5])\n >>> C.chebint(cs,lbnd=-2)\n array([ 8.5, -0.5, 0.5, 0.5])\n >>> C.chebint(cs,scl=-2)\n array([-1., 1., -1., -1.])\n\n \"\"\"\n cnt = int(m)\n if not np.iterable(k):\n k = [k]\n\n if cnt != m:\n raise ValueError(\"The order of integration must be integer\")\n if cnt < 0 :\n raise ValueError(\"The order of integration must be non-negative\")\n if len(k) > cnt :\n raise ValueError(\"Too many integration constants\")\n\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n if cnt == 0:\n return cs\n\n k = list(k) + [0]*(cnt - len(k))\n for i in range(cnt) :\n n = len(cs)\n cs *= scl\n if n == 1 and cs[0] == 0:\n cs[0] += k[i]\n else:\n zs = _cseries_to_zseries(cs)\n zs = _zseries_int(zs)\n cs = _zseries_to_cseries(zs)\n cs[0] += k[i] - chebval(lbnd, cs)\n return cs\n\ndef chebval(x, cs):\n \"\"\"Evaluate a Chebyshev series.\n\n If `cs` is of length `n`, this function returns :\n\n ``p(x) = cs[0]*T_0(x) + cs[1]*T_1(x) + ... + cs[n-1]*T_{n-1}(x)``\n\n If x is a sequence or array then p(x) will have the same shape as x.\n If r is a ring_like object that supports multiplication and addition\n by the values in `cs`, then an object of the same type is returned.\n\n Parameters\n ----------\n x : array_like, ring_like\n Array of numbers or objects that support multiplication and\n addition with themselves and with the elements of `cs`.\n cs : array_like\n 1-d array of Chebyshev coefficients ordered from low to high.\n\n Returns\n -------\n values : ndarray, ring_like\n If the return is an ndarray then it has the same shape as `x`.\n\n See Also\n --------\n chebfit\n\n Examples\n --------\n\n Notes\n -----\n The evaluation uses Clenshaw recursion, aka synthetic division.\n\n Examples\n --------\n\n \"\"\"\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n if isinstance(x, tuple) or isinstance(x, list) :\n x = np.asarray(x)\n\n if len(cs) == 1 :\n c0 = cs[0]\n c1 = 0\n elif len(cs) == 2 :\n c0 = cs[0]\n c1 = cs[1]\n else :\n x2 = 2*x\n c0 = cs[-2]\n c1 = cs[-1]\n for i in range(3, len(cs) + 1) :\n tmp = c0\n c0 = cs[-i] - c1\n c1 = tmp + c1*x2\n return c0 + c1*x\n\ndef chebvander(x, deg) :\n \"\"\"Vandermonde matrix of given degree.\n\n Returns the Vandermonde matrix of degree `deg` and sample points `x`.\n This isn't a true Vandermonde matrix because `x` can be an arbitrary\n ndarray and the Chebyshev polynomials aren't powers. If ``V`` is the\n returned matrix and `x` is a 2d array, then the elements of ``V`` are\n ``V[i,j,k] = T_k(x[i,j])``, where ``T_k`` is the Chebyshev polynomial\n of degree ``k``.\n\n Parameters\n ----------\n x : array_like\n Array of points. The values are converted to double or complex\n doubles. If x is scalar it is converted to a 1D array.\n deg : integer\n Degree of the resulting matrix.\n\n Returns\n -------\n vander : Vandermonde matrix.\n The shape of the returned matrix is ``x.shape + (deg+1,)``. The last\n index is the degree.\n\n \"\"\"\n ideg = int(deg)\n if ideg != deg:\n raise ValueError(\"deg must be integer\")\n if ideg < 0:\n raise ValueError(\"deg must be non-negative\")\n\n x = np.array(x, copy=0, ndmin=1) + 0.0\n v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype)\n # Use forward recursion to generate the entries.\n v[0] = x*0 + 1\n if ideg > 0 :\n x2 = 2*x\n v[1] = x\n for i in range(2, ideg + 1) :\n v[i] = v[i-1]*x2 - v[i-2]\n return np.rollaxis(v, 0, v.ndim)\n\n\ndef chebfit(x, y, deg, rcond=None, full=False, w=None):\n \"\"\"\n Least squares fit of Chebyshev series to data.\n\n Return the coefficients of a Legendre series of degree `deg` that is the\n least squares fit to the data values `y` given at points `x`. If `y` is\n 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple\n fits are done, one for each column of `y`, and the resulting\n coefficients are stored in the corresponding columns of a 2-D return.\n The fitted polynomial(s) are in the form\n\n .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),\n\n where `n` is `deg`.\n\n Parameters\n ----------\n x : array_like, shape (M,)\n x-coordinates of the M sample points ``(x[i], y[i])``.\n y : array_like, shape (M,) or (M, K)\n y-coordinates of the sample points. Several data sets of sample\n points sharing the same x-coordinates can be fitted at once by\n passing in a 2D-array that contains one dataset per column.\n deg : int\n Degree of the fitting series\n rcond : float, optional\n Relative condition number of the fit. Singular values smaller than\n this relative to the largest singular value will be ignored. The\n default value is len(x)*eps, where eps is the relative precision of\n the float type, about 2e-16 in most cases.\n full : bool, optional\n Switch determining nature of return value. When it is False (the\n default) just the coefficients are returned, when True diagnostic\n information from the singular value decomposition is also returned.\n w : array_like, shape (`M`,), optional\n Weights. If not None, the contribution of each point\n ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the\n weights are chosen so that the errors of the products ``w[i]*y[i]``\n all have the same variance. The default value is None.\n\n .. versionadded:: 1.5.0\n\n Returns\n -------\n coef : ndarray, shape (M,) or (M, K)\n Chebyshev coefficients ordered from low to high. If `y` was 2-D,\n the coefficients for the data in column k of `y` are in column\n `k`.\n\n [residuals, rank, singular_values, rcond] : present when `full` = True\n Residuals of the least-squares fit, the effective rank of the\n scaled Vandermonde matrix and its singular values, and the\n specified value of `rcond`. For more details, see `linalg.lstsq`.\n\n Warns\n -----\n RankWarning\n The rank of the coefficient matrix in the least-squares fit is\n deficient. The warning is only raised if `full` = False. The\n warnings can be turned off by\n\n >>> import warnings\n >>> warnings.simplefilter('ignore', RankWarning)\n\n See Also\n --------\n polyfit, legfit, lagfit, hermfit, hermefit\n chebval : Evaluates a Chebyshev series.\n chebvander : Vandermonde matrix of Chebyshev series.\n chebweight : Chebyshev weight function.\n linalg.lstsq : Computes a least-squares fit from the matrix.\n scipy.interpolate.UnivariateSpline : Computes spline fits.\n\n Notes\n -----\n The solution is the coefficients of the Chebyshev series `p` that\n minimizes the sum of the weighted squared errors\n\n .. math:: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\n\n where :math:`w_j` are the weights. This problem is solved by setting up\n as the (typically) overdetermined matrix equation\n\n .. math:: V(x) * c = w * y,\n\n where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the\n coefficients to be solved for, `w` are the weights, and `y` are the\n observed values. This equation is then solved using the singular value\n decomposition of `V`.\n\n If some of the singular values of `V` are so small that they are\n neglected, then a `RankWarning` will be issued. This means that the\n coeficient values may be poorly determined. Using a lower order fit\n will usually get rid of the warning. The `rcond` parameter can also be\n set to a value smaller than its default, but the resulting fit may be\n spurious and have large contributions from roundoff error.\n\n Fits using Chebyshev series are usually better conditioned than fits\n using power series, but much can depend on the distribution of the\n sample points and the smoothness of the data. If the quality of the fit\n is inadequate splines may be a good alternative.\n\n References\n ----------\n .. [1] Wikipedia, \"Curve fitting\",\n http://en.wikipedia.org/wiki/Curve_fitting\n\n Examples\n --------\n\n \"\"\"\n order = int(deg) + 1\n x = np.asarray(x) + 0.0\n y = np.asarray(y) + 0.0\n\n # check arguments.\n if deg < 0 :\n raise ValueError(\"expected deg >= 0\")\n if x.ndim != 1:\n raise TypeError(\"expected 1D vector for x\")\n if x.size == 0:\n raise TypeError(\"expected non-empty vector for x\")\n if y.ndim < 1 or y.ndim > 2 :\n raise TypeError(\"expected 1D or 2D array for y\")\n if len(x) != len(y):\n raise TypeError(\"expected x and y to have same length\")\n\n # set up the least squares matrices\n lhs = chebvander(x, deg)\n rhs = y\n if w is not None:\n w = np.asarray(w) + 0.0\n if w.ndim != 1:\n raise TypeError(\"expected 1D vector for w\")\n if len(x) != len(w):\n raise TypeError(\"expected x and w to have same length\")\n # apply weights\n if rhs.ndim == 2:\n lhs *= w[:, np.newaxis]\n rhs *= w[:, np.newaxis]\n else:\n lhs *= w[:, np.newaxis]\n rhs *= w\n\n # set rcond\n if rcond is None :\n rcond = len(x)*np.finfo(x.dtype).eps\n\n # scale the design matrix and solve the least squares equation\n scl = np.sqrt((lhs*lhs).sum(0))\n c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)\n c = (c.T/scl).T\n\n # warn on rank reduction\n if rank != order and not full:\n msg = \"The fit may be poorly conditioned\"\n warnings.warn(msg, pu.RankWarning)\n\n if full :\n return c, [resids, rank, s, rcond]\n else :\n return c\n\n\ndef chebcompanion(cs):\n \"\"\"Return the scaled companion matrix of cs.\n\n The basis polynomials are scaled so that the companion matrix is\n symmetric when `cs` represents a single Chebyshev polynomial. This\n provides better eigenvalue estimates than the unscaled case and in the\n single polynomial case the eigenvalues are guaranteed to be real if\n np.eigvalsh is used to obtain them.\n\n Parameters\n ----------\n cs : array_like\n 1-d array of Legendre series coefficients ordered from low to high\n degree.\n\n Returns\n -------\n mat : ndarray\n Scaled companion matrix of dimensions (deg, deg).\n\n \"\"\"\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n if len(cs) < 2:\n raise ValueError('Series must have maximum degree of at least 1.')\n if len(cs) == 2:\n return np.array(-cs[0]/cs[1])\n\n n = len(cs) - 1\n mat = np.zeros((n, n), dtype=cs.dtype)\n scl = np.array([1.] + [np.sqrt(.5)]*(n-1))\n top = mat.reshape(-1)[1::n+1]\n bot = mat.reshape(-1)[n::n+1]\n top[0] = np.sqrt(.5)\n top[1:] = 1/2\n bot[...] = top\n mat[:,-1] -= (cs[:-1]/cs[-1])*(scl/scl[-1])*.5\n return mat\n\n\ndef chebroots(cs):\n \"\"\"\n Compute the roots of a Chebyshev series.\n\n Return the roots (a.k.a \"zeros\") of the C-series represented by `cs`,\n which is the sequence of the C-series' coefficients from lowest order\n \"term\" to highest, e.g., [1,2,3] represents the C-series\n ``T_0 + 2*T_1 + 3*T_2``.\n\n Parameters\n ----------\n cs : array_like\n 1-d array of C-series coefficients ordered from low to high.\n\n Returns\n -------\n out : ndarray\n Array of the roots. If all the roots are real, then so is the\n dtype of ``out``; otherwise, ``out``'s dtype is complex.\n\n See Also\n --------\n polyroots\n\n Notes\n -----\n Algorithm(s) used:\n\n Remember: because the C-series basis set is different from the\n \"standard\" basis set, the results of this function *may* not be what\n one is expecting.\n\n Examples\n --------\n >>> import numpy.polynomial.chebyshev as cheb\n >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots\n array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])\n\n \"\"\"\n # cs is a trimmed copy\n [cs] = pu.as_series([cs])\n if len(cs) < 2:\n return np.array([], dtype=cs.dtype)\n if len(cs) == 2:\n return np.array([-cs[0]/cs[1]])\n\n m = chebcompanion(cs)\n r = la.eigvals(m)\n r.sort()\n return r\n\n\ndef chebpts1(npts):\n \"\"\"Chebyshev points of the first kind.\n\n Chebyshev points of the first kind are the set ``{cos(x_k)}``,\n where ``x_k = pi*(k + .5)/npts`` for k in ``range(npts}``.\n\n Parameters\n ----------\n npts : int\n Number of sample points desired.\n\n Returns\n -------\n pts : ndarray\n The Chebyshev points of the second kind.\n\n Notes\n -----\n .. versionadded:: 1.5.0\n\n \"\"\"\n _npts = int(npts)\n if _npts != npts:\n raise ValueError(\"npts must be integer\")\n if _npts < 1:\n raise ValueError(\"npts must be >= 1\")\n\n x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)\n return np.cos(x)\n\n\ndef chebpts2(npts):\n \"\"\"Chebyshev points of the second kind.\n\n Chebyshev points of the second kind are the set ``{cos(x_k)}``,\n where ``x_k = pi*/(npts - 1)`` for k in ``range(npts}``.\n\n Parameters\n ----------\n npts : int\n Number of sample points desired.\n\n Returns\n -------\n pts : ndarray\n The Chebyshev points of the second kind.\n\n Notes\n -----\n .. versionadded:: 1.5.0\n\n \"\"\"\n _npts = int(npts)\n if _npts != npts:\n raise ValueError(\"npts must be integer\")\n if _npts < 2:\n raise ValueError(\"npts must be >= 2\")\n\n x = np.linspace(-np.pi, 0, _npts)\n return np.cos(x)\n\n\n#\n# Chebyshev series class\n#\n\nexec(polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]'))\n"
] | [
[
"numpy.polynomial.legendre.Legendre.fromroots",
"numpy.polynomial.legendre.Legendre.fit",
"numpy.linspace",
"numpy.polynomial.legendre.leg2poly",
"numpy.polynomial.legendre.legdiv",
"numpy.zeros_like",
"numpy.polynomial.legendre.legfromroots",
"numpy.polynomial.legendre.Legendre.identity",
"numpy.arange",
"numpy.polynomial.legendre.legtrim",
"numpy.polynomial.legendre.legmul",
"numpy.polynomial.legendre.Legendre",
"numpy.polynomial.legendre.legint",
"numpy.polynomial.legendre.legroots",
"numpy.zeros",
"numpy.polynomial.legendre.legadd",
"numpy.polynomial.legendre.poly2leg",
"numpy.polynomial.polynomial.polyval",
"numpy.polynomial.legendre.legval",
"numpy.polynomial.legendre.legline",
"numpy.polynomial.legendre.legder",
"numpy.array",
"numpy.polynomial.legendre.legmulx",
"numpy.polynomial.legendre.legvander",
"numpy.polynomial.legendre.legsub",
"numpy.polynomial.legendre.legfit"
],
[
"numpy.linspace",
"numpy.polynomial.chebyshev.Chebyshev.fromroots",
"numpy.polynomial.chebyshev.chebder",
"numpy.polynomial.chebyshev.chebline",
"numpy.polynomial.chebyshev.chebtrim",
"numpy.polynomial.chebyshev.chebdiv",
"numpy.polynomial.chebyshev.chebfromroots",
"numpy.zeros_like",
"numpy.polynomial.chebyshev._cseries_to_zseries",
"numpy.polynomial.chebyshev.chebmulx",
"numpy.polynomial.chebyshev.Chebyshev",
"numpy.polynomial.chebyshev.chebpts1",
"numpy.polynomial.chebyshev.chebpts2",
"numpy.arange",
"numpy.polynomial.chebyshev.chebsub",
"numpy.polynomial.chebyshev.Chebyshev.identity",
"numpy.polynomial.chebyshev.chebfit",
"numpy.polynomial.chebyshev.chebvander",
"numpy.zeros",
"numpy.polynomial.chebyshev.chebmul",
"numpy.polynomial.chebyshev.chebval",
"numpy.polynomial.chebyshev.chebint",
"numpy.polynomial.chebyshev.chebadd",
"numpy.polynomial.chebyshev.chebroots",
"numpy.array",
"numpy.polynomial.chebyshev._zseries_to_cseries",
"numpy.polynomial.chebyshev.poly2cheb",
"numpy.polynomial.chebyshev.cheb2poly",
"numpy.polynomial.chebyshev.Chebyshev.fit"
],
[
"numpy.dtype"
],
[
"numpy.issubdtype",
"numpy.dtype",
"numpy.compat.asbytes",
"numpy.prod",
"numpy.compat.asbytes_nested",
"numpy.array"
],
[
"numpy.convolve",
"numpy.rollaxis",
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.arange",
"numpy.cos",
"numpy.ones",
"numpy.finfo",
"numpy.linalg.lstsq",
"numpy.iterable",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.15",
"1.14",
"1.17"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AceCoooool/segmentation | [
"2f4d5ac193cab580eb8ba789e79db6dadcfecfd0"
] | [
"model/seg_models/pspnet.py"
] | [
"\"\"\"Pyramid Scene Parsing Network\"\"\"\nimport os\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom model.seg_models.segbase import SegBaseModel\nfrom model.module.basic import _FCNHead\n\n__all__ = ['PSPNet', 'get_psp',\n 'get_psp_resnet101_voc',\n 'get_psp_resnet101_citys']\n\n\n# head\ndef _PSP1x1Conv(in_channels, out_channels):\n return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))\n\n\nclass _PyramidPooling(nn.Module):\n def __init__(self, in_channels):\n super(_PyramidPooling, self).__init__()\n out_channels = in_channels // 4\n self.conv1 = _PSP1x1Conv(in_channels, out_channels)\n self.conv2 = _PSP1x1Conv(in_channels, out_channels)\n self.conv3 = _PSP1x1Conv(in_channels, out_channels)\n self.conv4 = _PSP1x1Conv(in_channels, out_channels)\n\n @staticmethod\n def pool(x, size):\n return F.adaptive_avg_pool2d(x, output_size=size)\n\n @staticmethod\n def upsample(x, h, w):\n return F.interpolate(x, (h, w), mode='bilinear', align_corners=True)\n\n def forward(self, x):\n _, _, h, w = x.shape\n feat1 = self.upsample(self.conv1(self.pool(x, 1)), h, w)\n feat2 = self.upsample(self.conv2(self.pool(x, 2)), h, w)\n feat3 = self.upsample(self.conv3(self.pool(x, 3)), h, w)\n feat4 = self.upsample(self.conv4(self.pool(x, 4)), h, w)\n return torch.cat([x, feat1, feat2, feat3, feat4], dim=1)\n\n\nclass _PSPHead(nn.Module):\n def __init__(self, nclass, **kwargs):\n super(_PSPHead, self).__init__(**kwargs)\n self.psp = _PyramidPooling(2048)\n self.block = list()\n self.block.append(nn.Conv2d(4096, 512, kernel_size=3, padding=1, bias=False))\n self.block.append(nn.BatchNorm2d(512))\n self.block.append(nn.ReLU(inplace=True))\n self.block.append(nn.Dropout(0.1))\n self.block.append(nn.Conv2d(512, nclass, kernel_size=1))\n self.block = nn.Sequential(*self.block)\n\n def forward(self, x):\n x = self.psp(x)\n return self.block(x)\n\n\nclass PSPNet(SegBaseModel):\n def __init__(self, nclass, backbone='resnet50', aux=True, dilated=True, jpu=False,\n pretrained_base=True, base_size=520, crop_size=480, **kwargs):\n super(PSPNet, self).__init__(nclass, aux, backbone, base_size=base_size, dilated=dilated, jpu=jpu,\n crop_size=crop_size, pretrained_base=pretrained_base, **kwargs)\n self.head = _PSPHead(nclass, **kwargs)\n if self.aux:\n self.auxlayer = _FCNHead(1024, nclass, **kwargs)\n\n self.__setattr__('others', ['head', 'auxlayer'] if self.aux else ['head'])\n\n def forward(self, x):\n c3, c4 = self.base_forward(x)\n outputs = []\n x = self.head(c4)\n x = F.interpolate(x, self._up_kwargs, mode='bilinear', align_corners=True)\n outputs.append(x)\n\n if self.aux:\n auxout = self.auxlayer(c3)\n auxout = F.interpolate(auxout, self._up_kwargs, mode='bilinear', align_corners=True)\n outputs.append(auxout)\n return tuple(outputs)\n\n\ndef get_psp(dataset='pascal_voc', backbone='resnet101', pretrained=False, pretrained_base=True,\n jpu=False, root=os.path.expanduser('~/.torch/models'), **kwargs):\n acronyms = {\n 'pascal_voc': 'voc',\n 'citys': 'citys',\n }\n from data import datasets\n # infer number of classes\n model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone,\n pretrained_base=pretrained_base, jpu=jpu, **kwargs)\n if pretrained:\n from model.model_store import get_model_file\n name = 'psp_%s_%s' % (backbone, acronyms[dataset])\n name = name + '_jpu' if jpu else name\n model.load_state_dict(torch.load(get_model_file(name, root=root)))\n return model\n\n\ndef get_psp_resnet101_voc(**kwargs):\n return get_psp('pascal_voc', 'resnet101', **kwargs)\n\n\ndef get_psp_resnet101_citys(**kwargs):\n return get_psp('citys', 'resnet101', **kwargs)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joanvaquer/SDV | [
"83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28"
] | [
"tests/test_modeler.py"
] | [
"from unittest import TestCase\nfrom unittest.mock import Mock, call\n\nimport pandas as pd\n\nfrom sdv.metadata import Metadata\nfrom sdv.modeler import Modeler\nfrom sdv.models.base import SDVModel\nfrom sdv.models.copulas import GaussianCopula\n\n\nclass TestModeler(TestCase):\n\n def test___init__default(self):\n \"\"\"Test create new Modeler instance with default values\"\"\"\n # Run\n modeler = Modeler('test')\n\n # Asserts\n assert modeler.models == dict()\n assert modeler.metadata == 'test'\n assert modeler.model == GaussianCopula\n assert modeler.model_kwargs == dict()\n\n def test___init__with_arguments(self):\n # Run\n model = Mock()\n modeler = Modeler({'some': 'metadata'}, model=model, model_kwargs={'some': 'kwargs'})\n\n # Asserts\n assert modeler.models == dict()\n assert modeler.metadata == {'some': 'metadata'}\n assert modeler.model == model\n assert modeler.model_kwargs == {'some': 'kwargs'}\n\n def test__get_extensions(self):\n \"\"\"Test get list of extensions from childs\"\"\"\n # Setup\n model = Mock(spec=SDVModel)\n model.return_value = model\n model.get_parameters.side_effect = [\n {'model': 'data 1'},\n {'model': 'data 2'},\n {'model': 'data 3'}\n ]\n\n modeler = Mock(spec=Modeler)\n modeler.model = model\n modeler.model_kwargs = dict()\n modeler.metadata = Mock(spec=Metadata)\n\n # Run\n child_table = pd.DataFrame({'foo': ['aaa', 'bbb', 'ccc']})\n result = Modeler._get_extension(modeler, 'some_name', child_table, 'foo')\n\n # Asserts\n expected = pd.DataFrame({\n '__some_name__model': ['data 1', 'data 2', 'data 3'],\n '__some_name__child_rows': [1, 1, 1]\n }, index=['aaa', 'bbb', 'ccc'])\n pd.testing.assert_frame_equal(result, expected)\n assert model.get_parameters.call_count == 3\n\n def test_cpa_with_tables_no_primary_key(self):\n \"\"\"Test CPA with tables and no primary key.\"\"\"\n # Setup\n modeler = Mock(spec=Modeler)\n modeler.metadata = Mock(spec=Metadata)\n modeler.model = Mock(spec=SDVModel)\n modeler.model_kwargs = dict()\n modeler.models = dict()\n modeler.table_sizes = {'data': 5}\n modeler.metadata.transform.return_value = pd.DataFrame({'data': [1, 2, 3]})\n modeler.metadata.get_primary_key.return_value = None\n\n # Run\n tables = {'test': pd.DataFrame({'data': ['a', 'b', 'c']})}\n result = Modeler.cpa(modeler, 'test', tables)\n\n # Asserts\n expected = pd.DataFrame({'data': [1, 2, 3]})\n expected_transform_call = pd.DataFrame({'data': ['a', 'b', 'c']})\n\n assert modeler.metadata.load_table.call_count == 0\n assert modeler.metadata.transform.call_args[0][0] == 'test'\n pd.testing.assert_frame_equal(\n modeler.metadata.transform.call_args[0][1],\n expected_transform_call\n )\n pd.testing.assert_frame_equal(result, expected)\n\n def test_model_database(self):\n \"\"\"Test model using RCPA\"\"\"\n # Setup\n def rcpa_side_effect(table_name, tables):\n tables[table_name] = table_name\n\n metadata_table_names = ['foo', 'bar', 'tar']\n metadata_parents = [None, 'bar_parent', None]\n\n modeler = Mock()\n modeler.metadata.get_tables.return_value = metadata_table_names\n modeler.metadata.get_parents.side_effect = metadata_parents\n modeler.rcpa.side_effect = rcpa_side_effect\n modeler.models = dict()\n\n # Run\n Modeler.model_database(modeler)\n\n # Asserts\n expected_metadata_parents_call_count = 3\n expected_metadata_parents_call = [call('foo'), call('bar'), call('tar')]\n assert modeler.metadata.get_parents.call_count == expected_metadata_parents_call_count\n assert modeler.metadata.get_parents.call_args_list == expected_metadata_parents_call\n"
] | [
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
greerviau/HackUMass | [
"25ef2ea9fecbe4bbfa91f0a9f32bd9f2703a176a"
] | [
"cache.py"
] | [
"import numpy as np\nimport math\n\nclass Cache():\n def __init__(self, max_size=10):\n self.cache = []\n self.size = 0\n self.max_size=max_size\n \n def add(self, element):\n self.cache.append(element)\n self.size+=1\n if self.size > self.max_size:\n del self.cache[0]\n self.size = self.max_size\n \n def mean(self):\n return np.mean(np.array(self.cache), axis=0)\n\n def empty(self):\n return self.size == 0\n\n def get_size(self):\n return self.size\n\n def get_last(self):\n return self.cache[self.size-1]\n\n def print_cache(self):\n for e in self.cache:\n print(e)\n\n \nif __name__ == '__main__':\n print('===Test Cache===')\n cache = Cache(max_size=5)\n cache.add([5,4])\n print(cache.get_size())\n print(cache.print_cache())\n\n cache.add([8,1])\n cache.add([3,2])\n cache.add([4,5])\n cache.add([6,2])\n print(cache.get_size())\n print(cache.print_cache())\n\n cache.add([1,4])\n print(cache.get_size())\n print(cache.print_cache())\n print(cache.mean())\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rohinkumar/CorrelCalc | [
"d7887448af8d3dc3170c00c0aae6ee2561b8a3d5"
] | [
"correlcalc/antpcf.py"
] | [
"__author__ = 'Rohin Kumar Y'\n\n\n# Calculate anisotropic 2pCF\nfrom tpcf import *\nimport scipy as sp\n# antpcf(dat,datR,bins,parmetric,permetric) returns numpy 2d array DD, RR, DR correl\n# poserr(xi,DD) returns (1.0+xi)/np.sqrt(DD)\n\n\ndef atpcf(datfile, binspar, binsper, **kwargs):\n \"\"\"Main function to calculate anisotropic 2pCF. Takes multiple arguments such as randfile, maskfile, calculation estimator etc. for different geometry, cosmology models\n Usage of the package is given in jupyter notebook \"Using correlcalc example-anisotropic.nb\" and in `main.py`\n\n All the methods in correlcalc can be imported using the following command\n\n `from correlcalc import *`\n\n We first need to define bins (in $c/H_0$ units) to calculate 2pCF. For e.g. to calculate correlation between 0-180Mpc in steps of 6Mpc, we say\n\n `bins=np.arange(0.002,0.06,0.002)`\n\n To calculate anisotropic 2pCF using input data file (both ascii and fits files are supported), use `atpcf` method as follows\n\n `correl3d, poserr=atpcf('/path/to/datfile.dat',binspar, binsper, randfile='/path/to/randomfile.dat', vtype='sigpi', weights=True)`\n\n\n If random file is not available or not provided, we can generate random catalog by providing the mangle mask file in `.ply` format along with specifying the size of the catalog in multiples of size of data catalog (default 2x size). To do this\n\n `correl3d, poserr=atpcf('/path/to/datfile.dat', binspar, binsper, maskfile='/path/to/maskfile.ply', vtype='smu', weights='eq', randfact=3)`\n\n This returns `correl3d` and `poserr` `numpy` arrays corresponding to anisotropic Two-point correlation and Poisson error\n\n ### Keyword Arguments\n The following keyword arguments can be included as needed\n\n #### Data file (Mandatory)\n\n Data file of galaxy/quasar redshift survey must be passed as the first argument to both `tpcf` and `atpcf` methods.\n\n **Supported filetypes**: ascii text files with columns, csv files or fits files are all supported. Most files provided by SDSS Value added catalogs should be directly usable.\n\n **To contain**: Any type of file provided must at least have columns named **Z** (redshift), **RA** (Right Ascension), **DEC** (Declination). These column names can be in any case.\n\n If one intends to use `weights=True` option (must to obtain accurate results) the data file must also contain radial weights with column title **radial_weight** or **WEIGHT_SYSTOT**\n\n #### binspar (Mandatory)\n\n A numpy array with ascending values in $c/H_0$ units (for distances) or $\\delta z$ as per choice of `'vtype'` must be provided as the second argument to `atpcf` method.\n\n #### binsper (Mandatory)\n\n A numpy array with ascending values in $c/H_0$ units (for distances), $z\\delta \\theta$ or $\\mu = \\cos \\alpha$ must be provided as the third argument to `atpcf` method.\n\n\n #### `randfile=` Path to random file (semi-Optional)\n\n If not provided, `maskfile=` argument must be given `.ply` file.\n\n **Supported filetypes**: ascii text files with columns, csv files or fits files are all supported. Most files provided by SDSS Value added catalogs should be directly usable.\n\n **To contain**: Any type of file provided must at least have columns named **Z** (redshift), **RA** (Right Ascension), **DEC** (Declination). These column names can be in any case.\n\n If one intends to use `weights=True` option the data file must also contain radial weights with column title **radial_weight** or **WEIGHT_SYSTOT**\n\n **Beta Testing:** Beta support for other column titles for weights is added.\n\n Also added is calculation of weights from n(z) during random catalog generation.\n\n #### `mask=` Path to mangle polygon file (semi-Optional)\n\n If not provided, `randfile=` argument must be provided.\n\n **Supported filetypes**: `.ply` file containing Mangle polygons describing survey geometry in the standard format. Most files provided by SDSS Value added catalogs should be directly usable.\n\n #### `randfact=` (Optional)\n\n Size of the random catalog in integer multiples of size of data catalog if random catalog file is not provided. Default value is `2`\n\n #### `weights=` (Optional)\n\n It is highly recommended to use weights argument by providing `weights=True` or `weights='eq'` to obtain accurate two-point correlation calculations. This picks up radial weights in the prescribed format (with column title **radial_weight** or **WEIGHT_SYSTOT** ) from the data and random files provided.\n\n `weights=`eq'` sets equal weights and hence adds *+1* - This implementation is parallelized and is faster than `weights=False` implementation on most machines\n\n If `weights=False`, by default *+1* will be added for each galaxy/random pair found within the bin instead of adding total weight. For more details on weights and references, see http://www.sdss3.org/dr9/tutorials/lss_galaxy.php\n\n #### Metrics in parallel and perpendicular directions\n\n Calculates anisotropic 2pCF for the following cases.\n\n #### `vtype=`\n\n Valuation method\n\n **Available options**:\n\n `'smu'` (default)- Calculates 2pCF in s - mu\n\n `'sigpi'` - Calculates 2pCF using parallel and perpendicular distances\n\n `'ap'` calculates 2pCF for small $\\Delta \\theta$ and $z \\Delta\\theta$ . But results can be converted to any cosmology model of choice (ref: https://arxiv.org/pdf/1312.0003.pdf)\n\n **Customization**\n\n Formulae for calculation of distances in parallel and perpendicular directions is taken from https://arxiv.org/pdf/1312.0003.pdf. Using the formulae in this paper, $\\Delta z$ and $z \\Delta \\theta$ are computed in the `metrics.pyx` file for the above mentioned. `Cython` is chosen for implementation to obtain faster results in building `BallTree`s calculating `cdist` and to reduce `query` time.\n\n One can customize metric definitions as per one's need by editing the `metrics.pyx` file. After changing this compile it using `python metricsetup.py build_ext --inplace`\n\n **To add:**\n\n Direct calculation of distances in LOS and perpendicular to the LOS to be added to support standard model Cosmology and other popular models. For now, one needs to manually convert the angular bins to physical distances to get the approximate results\n\n\n #### `cosmology='lcdm'` (Optional)\n\n Used to calculate co-moving distances from redshifts.\n\n **Available options**:\n\n `'lcdm'` (default)- for Lambda CDM model\n\n `'lc'` - for $R_h=ct$ and linear coasting models\n\n **To add**: `wcdm` and other popular cosmology models soon\n\n #### `geometry='flat'` (Optional)\n\n Used to calculate co-moving distances between a pair of objects\n\n **Available options**:\n\n `'flat'` (default)- for Lambda CDM model\n\n `'open'`\n\n `'close'`\n\n\n #### `estimator=` (Optional)\n\n **Available options**:\n\n `'dp'` - Davis - Peebles estimator (default - fastest)\n\n `'ls'`- Landy - Szalay estimator\n\n `'ph'` - Peebles- Hauser estimator\n\n `'hew'` - Hewitt estimator\n\n `'h'` - Hamilton estimator\n\n For more details on estimator formulae see https://arxiv.org/pdf/1211.6211.pdf\n\n \"\"\"\n # Default function arguments\n global binsparv\n global binsperv\n global maxrad\n global dat\n global datR\n global Nd\n global Nr\n DD = DR = RD = RR = np.zeros((len(binspar)-1, len(binsper)-1))\n weightsflag = True\n useones = True\n cosmology = 'lcdm'\n sflag = True\n geometry='flat'\n filtermetric = flatdistsq\n permetric = musqlcdmf\n parmetric = flatdistsq\n vtype = 'smu'\n randcatfact = 2\n estimator = 'dp'\n binsparv = binspar**2\n binsperv = binsper**2\n randfile = None\n maskfile = None\n\n # Options for correl calculation estimators and cosmology models\n mlist = ['dp', 'ls', 'ph', 'hew', 'h']\n clist = ['lcdm', 'lc'] # to add wcdm\n glist = ['flat', 'open', 'close']\n parper = ['ap', 'sigpi', 'smu']\n\n if kwargs is not None:\n for key, value in kwargs.items():\n # print (key, value)\n if key.lower() == 'randfile':\n randfile = value\n\n elif key.lower() == 'randfact':\n randcatfact = value\n\n elif key.lower() == 'geometry':\n if value.lower() in glist:\n geometry = value.lower()\n # geometry = 'flat'\n # filtermetric = flatdistsq\n # elif value.lower() == 'open':\n # geometry = 'open'\n # filtermetric = opendistsq\n # elif value.lower() == 'close':\n # geometry = 'close'\n # filtermetric = closedistsq\n\n elif key.lower() == 'cosmology':\n if value.lower() in clist:\n cosmology = value.lower()\n else:\n print(\"Incorrect Cosmology provided! Using 'lcdm' as default\")\n\n elif key.lower() == 'vtype':\n if value.lower() in parper:\n vtype = value.lower()\n\n elif key.lower() == 'estimator':\n if value.lower() in mlist:\n estimator = value.lower()\n else:\n print(\"Incorrect estimator provided! Using 'dp' as default\")\n\n elif key.lower() == 'mask':\n maskfile = value\n elif key.lower() == 'weights':\n if value is True:\n weightsflag = True\n useones = False\n elif isinstance(value, str):\n if value.lower() == 'eq':\n weightsflag = True\n useones = True\n else:\n weightsflag = False\n else:\n print (\"key argument `%s` not valid\" % key)\n else:\n print (\"Refer documentation to enter valid keyword arguments\")\n\n if vtype == 'ap':\n parmetric = APdz\n binsparv = binspar\n binsperv = binsper\n sflag = False\n filtermetric = APzdth\n permetric = APzdth\n maxrad = max(np.sqrt(binsparv**2 + binsperv**2))\n\n elif vtype == 'smu':\n # binsparv = binspar**2\n # binsperv = binsper**2\n maxrad = max(binsparv)\n if geometry == 'open':\n parmetric = opendistsq\n filtermetric = opendistsq\n if cosmology == 'lc':\n permetric = musqlco\n else:\n permetric = musqlcdmo\n\n elif geometry == 'close':\n parmetric = closedistsq\n filtermetric = closedistsq\n if cosmology == 'lc':\n permetric = musqlcc\n else:\n permetric = musqlcdmc\n else:\n parmetric = flatdistsq\n filtermetric = flatdistsq\n if cosmology == 'lc':\n permetric = musqlcf\n else:\n permetric = musqlcdmf\n\n elif vtype == 'sigpi':\n # binsparv = binspar**2\n # binsperv = binsper**2\n maxrad = max(binsparv+binsperv)\n if geometry == 'open':\n filtermetric = opendistsq\n if cosmology == 'lc':\n parmetric = sparsqlc\n permetric = spersqlco\n else:\n parmetric = sparsqlcdm\n permetric = spersqlcdmo\n\n elif geometry == 'close':\n filtermetric = closedistsq\n if cosmology == 'lc':\n parmetric = sparsqlc\n permetric = spersqlcc\n else:\n parmetric = sparsqlcdm\n permetric = spersqlcdmc\n else:\n filtermetric = flatdistsq\n if cosmology == 'lc':\n parmetric = sparsqlc\n permetric = spersqlcf\n else:\n parmetric = sparsqlcdm\n permetric = spersqlcdmf\n\n else:\n print (\"No valid valuation method provided. Using 'smu' as default\")\n\n\n print(\"Calculating Anisotropic Correlation function with the following parameters\")\n print(\"data file=\")\n print(datfile)\n print(\"random file=\")\n print(randfile)\n print(\"Random catalog size factor(if random file is None)=\")\n print(randcatfact)\n print(\"mask/window file=\")\n print(maskfile)\n print (\"Cosmology=\")\n print(cosmology)\n print (\"Geometry=\")\n print (geometry)\n print(\"Weights=\")\n print(weightsflag)\n print (\"Using ones as weights?=\")\n print (useones)\n print(\"perpendicular metric=\")\n print(permetric)\n print(\"parallel metric=\")\n print(parmetric)\n print(\"Correl estimator=\")\n print(estimator)\n print(\"Valuation type=\")\n print(vtype)\n print (\"binsparv=\")\n print (binsparv)\n print (\"binsperv=\")\n print (binsperv)\n print(\"---------------------------------------------------------------------------\")\n\n if sflag is False:\n # Prepare dat from data file\n dat, weights = datprepz(datfile, 'data', cosmology)\n Nd = len(dat)\n # Prepare datR from random file or generate a random catalog\n if randfile is None:\n randcatsize = randcatfact*Nd\n if maskfile is None:\n print (\"Mask file compulsory. Please provide mask='maskfilepath.ply'\")\n else:\n datR, rweights = randcatprepz(datfile, randcatsize, maskfile, cosmology)\n else:\n datR, rweights = datprepz(randfile, 'random', cosmology)\n\n else:\n # Prepare dat from data file\n dat, weights = datprep(datfile, 'data', cosmology)\n\n Nd = len(dat)\n # Prepare datR from random file or generate a random catalog\n if randfile is None:\n randcatsize = randcatfact*Nd\n if maskfile is None:\n print (\"Mask file compulsory. Please provide mask='maskfilepath.ply'\")\n else:\n datR, rweights = randcatprep(datfile, randcatsize, maskfile, cosmology)\n else:\n datR, rweights = datprep(randfile, 'random', cosmology)\n\n Nr = len(datR)\n fact = (1.0*Nr)/Nd\n global adbt\n global arbt\n\n print (\"Creating BallTree for data points using ...\")\n print (filtermetric)\n adbt = BallTree(dat, metric='pyfunc', func=filtermetric)\n\n print (\"Creating BallTree for random points using ...\")\n print (filtermetric)\n arbt = BallTree(datR, metric='pyfunc', func=filtermetric)\n\n rng = np.array([[min(binsparv), max(binsparv)], [min(binsperv), max(binsperv)]])\n print (\"Calculating anisotropic 2pCF...\")\n\n # Reference: arXiv: 1211.6211\n if estimator == 'dp':\n if weightsflag is False: # or len(weights) != Nd\n # print (weightsflag)\n # print(len(weights))\n # print(len(datR))\n DD = aDDcalc(dat, binsparv, binsperv, parmetric, permetric, rng)\n DR = aDRcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n RD = aRDcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n else:\n # if len(rweights)!=len(datR):\n # DD = aDDwcalc(dat, binsq, parmetric, permetric, rng, weights)\n if useones is True or len(weights) != Nd:\n weights = np.ones(Nd)\n rweights = np.ones(Nr)\n print (\"Calculating anisotropic DD with weights (parallelized)...\\n DD=\")\n DD = amulti_autocp(dat, binsparv, binsperv, parmetric, permetric, rng, weights, Nd, pcpus)\n # DR = aRDwcalc(dat, datR, binsq, parmetric, permetric, rng, weights)\n print (\"Calculating anisotropic DR with weights (parallelized)...\\n DR=\")\n DR = amulti_crosscp(dat, datR, binsparv, binsperv, parmetric, permetric, rng, weights, Nr, pcpus)\n print (\"Calculating anisotropic RD with weights (parallelized)...\\n RD=\")\n RD = amulti_crosscpr(dat, datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nd, pcpus)\n # else:\n # DD=aDDwcalc(dat,binsq,parmetric,permetric,rng,weights)\n # DR=aDRwcalc(dat,datR,binsq,parmetric,permetric,rng,weights,rweights)\n\n print (\"Using Davis-Peebles estimator\")\n correl = fact*(DD*2.0/(DR+RD))-1.0\n\n elif estimator == 'ph':\n if weightsflag is False: # or len(weights) != Nd or len(rweights) != len(datR):\n DD = aDDcalc(dat, binsparv, binsperv, parmetric, permetric, rng)\n RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n else:\n if useones is True or len(weights) != Nd:\n weights = np.ones(Nd)\n rweights = np.ones(Nr)\n print (\"Calculating anisotropic DD with weights (parallelized)...\\n DD=\")\n # DD = aDDwcalc(dat, binsq, parmetric, permetric, rng, weights)\n DD = amulti_autocp(dat, binsparv, binsperv, parmetric, permetric, rng, weights, Nd, pcpus)\n # if len(rweights) != Nr:\n # RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n # else:\n print (\"Calculating anisotropic RR with weights (parallelized)...\\n RR=\")\n RR = amulti_autocpr(datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nr, pcpus)\n print (\"Using Peebles-Hauser estimator\")\n correl = fact**2*(DD/RR)-1.0\n else:\n if weightsflag is False: # or len(weights) != Nd or len(rweights) != len(datR):\n DD = aDDcalc(dat, binsparv, binsperv, parmetric, permetric, rng)\n RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n DR = aDRcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n RD = aRDcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n else:\n if useones is True or len(weights) != Nd:\n weights = np.ones(Nd)\n rweights = np.ones(Nr)\n print (\"Calculating anisotropic DD with weights (parallelized)...\\n DD=\")\n # DD = aDDwcalc(dat, binsq, parmetric, permetric, rng, weights)\n DD = amulti_autocp(dat, binsparv, binsperv, parmetric, permetric, rng, weights, Nd, pcpus)\n # print (\"Calculating anisotropic RR with weights (parallelized)...\\n RR=\")\n # RR = aRRwcalc(datR, binsq, parmetric, permetric, rng, rweights)\n # RR = amulti_autocpr(datR, binsq, parmetric, permetric, rng, rweights, Nr, pcpus)\n # DR = aRDwcalc(dat, datR, binsq, parmetric, permetric, rng, weights)\n print (\"Calculating anisotropic DR with weights (parallelized)...\\n DR=\")\n DR = amulti_crosscp(dat, datR, binsparv, binsperv, parmetric, permetric, rng, weights, Nr, pcpus)\n print (\"Calculating anisotropic RD with weights (parallelized)...\\n RD=\")\n RD = amulti_crosscpr(dat, datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nd, pcpus)\n # if len(rweights) != Nr:\n # RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n # else:\n print (\"Calculating anisotropic RR with weights (parallelized)...\\n RR=\")\n RR = amulti_autocpr(datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nr, pcpus)\n if estimator == 'ls':\n print (\"Using Landy-Szalay estimator\")\n correl = fact**2*(DD/RR)-fact*(DR+RD)/RR+1.0\n # correl = fact**2*(DD/RR)-2.0*fact*(DR/RR)+1.0\n elif estimator == 'hew':\n print (\"Using Hewett estimator\")\n correl = fact**2*(DD/RR)-fact*0.5*(DR+RD)/RR\n # correl = fact**2*(DD/RR)-fact*(DR/RR)\n elif estimator == 'h':\n print (\"Using Hamilton estimator\")\n correl = (4.0*DD*RR)/(DR+RD)**2 - 1.0\n # correl = (DD*RR)/DR**2 - 1.0\n correlerr = poserr(correl, DD)\n print(\"Anisotropic Two-point correlation=\")\n np.savetxt(\"aDD_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", DD)\n np.savetxt(\"aDR_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", DR)\n np.savetxt(\"aRD_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", RD)\n np.savetxt(\"aRR_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", RR)\n np.savetxt(\"abinspar_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", binspar)\n np.savetxt(\"abinsper_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", binsper)\n np.savetxt(\"atpcf_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", correl)\n np.savetxt(\"atpcferr_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", correlerr)\n print (correl, correlerr)\n return correl, correlerr\n\n\ndef aDDcalc(dat, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic DD...\\n DD=\")\n dd = np.zeros((len(binspar)-1, len(binsper)-1))\n for i in tqdm(range(len(dat))):\n ind = adbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], dat[j[j>i]], parmetric)[0]\n # print(\"dist0\")\n # print dist0\n dist1 = dist.cdist([dat[i], ], dat[j[j>i]], permetric)[0]\n # print(\"dist1\")\n # print dist1\n # print np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n dd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n # print (\"rng\")\n # print rng\n # print(\"binspar\")\n # print binspar\n # print(\"binsper\")\n # print binsper\n # print dd\n dd[dd == 0] = 1.0\n # Nd = len(dat)\n # DD = dd/(Nd*(Nd-1.0))\n print (dd)\n return dd\n\n\ndef aRRcalc(datR, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic RR...\\n RR=\")\n rr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], datR[j[j>i]], permetric)[0]\n rr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n rr[rr == 0] = 1.0\n # Nr = len(datR)\n # RR = rr/(Nr*(Nr-1.0))\n print (rr)\n return rr\n\n\ndef aDRcalc(dat, datR, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic DR...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(dat))):\n ind = arbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], datR[j[j>i]], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n dr[dr == 0] = 1.0\n # Nd = len(dat)\n # Nr = len(datR)\n # DR = dr/(Nd*Nr)\n print (dr)\n return dr\n\n\ndef aRDcalc(dat, datR, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic RD...\\n RD=\")\n rd = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], dat[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], dat[j[j>i]], permetric)[0]\n rd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n rd[rd == 0] = 1.0\n # Nd = len(dat)\n # Nr = len(datR)\n # DR = dr/(Nd*Nr)\n print (rd)\n return rd\n\n\ndef aDDwcalc(dat, binspar, binsper, parmetric, permetric, rng, weights):\n print (\"Calculating anisotropic DD with weights...\\n DD=\")\n dd = np.zeros((len(binspar)-1, len(binsper)-1))\n # ddbt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(dat))):\n ind = adbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], dat[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], dat[j[j>i]], permetric)[0]\n dd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j[j>i]])[0]\n dd[dd == 0] = 1.0\n # Nd = len(dat)\n # DD = dd/(Nd*(Nd-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n # print (dd)\n return dd\n\n\ndef aRRwcalc(datR, binspar, binsper, parmetric, permetric, rng, rweights):\n print (\"Calculating anisotropic RR with weights...\\n RR=\")\n rr = np.zeros((len(binspar)-1, len(binsper)-1))\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], datR[j[j>i]], permetric)[0]\n rr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j[j>i]])[0]\n rr[rr == 0] = 1.0\n # Nr = len(datR)\n # RR = rr/(Nr*(Nr-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n # print (rr)\n return rr\n\n\ndef aDRwcalc(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights):\n print (\"Calculating anisotropic DR with weights...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(dat))):\n ind = arbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], datR[j], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], datR[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j])[0]\n dr[dr == 0] = 1.0\n # Nd = len(dat)\n # Nr = len(datR)\n # DR = dr/(Nd*Nr)\n # print (dr/2.0)\n return dr/2.0\n\n\ndef aRDwcalc(dat, datR, binspar, binsper, parmetric, permetric, rng, weights):\n print (\"Calculating anisotropic RD with weights...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # bt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], dat[j], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], dat[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j])[0]\n dr[dr == 0] = 1.0\n # DR = dr/(Nd*Nr)\n # print (dr/2.0)\n return dr/2.0\n\n\ndef aDDwcalcp(dat, binspar, binsper, parmetric, permetric, rng, weights, rNd, multi=False, queue=0):\n dd = np.zeros((len(binspar)-1, len(binsper)-1))\n # ddbt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(rNd):\n ind = adbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], dat[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], dat[j[j>i]], permetric)[0]\n dd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j[j>i]])[0]\n if multi:\n queue.put(dd)\n else:\n return dd\n # print (DD)\n return dd\n\n\ndef aRRwcalcp(datR, binspar, binsper, parmetric, permetric, rng, rweights, rNr, multi=False, queue=0):\n rr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(rNr):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], datR[j[j>i]], permetric)[0]\n rr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j[j>i]])[0]\n if multi:\n queue.put(rr)\n else:\n return rr\n # rr[rr == 0] = 1.0\n # Nr = len(datR)\n # RR = rr/(Nr*(Nr-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n # print (RR)\n return rr\n\n\ndef aDRwcalcp(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights, rNd, multi=False, queue=0):\n # print (\"Calculating anisotropic DR with weights (parallelized)...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(rNd):\n ind = arbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], datR[j], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], datR[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j])[0]\n if multi:\n queue.put(dr)\n else:\n return dr\n # print (DR)\n return dr\n\n\ndef aRDwcalcp(dat, datR, binspar, binsper, parmetric, permetric, rng, weights, rNr, multi=False, queue=0):\n # print (\"Calculating anisotropic RD with weights (parallelized)...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # bt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(rNr):\n ind = adbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], dat[j], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], dat[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j])[0]\n if multi:\n queue.put(dr)\n else:\n return dr\n return dr\n\n\ndef amulti_autocp(dat, binspar, binsper, parmetric, permetric, rng, weights, Nd, CORES=pcpus):\n\n DD = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(dat, binspar, binsper, parmetric, permetric, rng, weights, range(int(Nd*i/CORES),int(Nd*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aDDwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: DD += q.get()\n for j in jobs: j.join()\n DD[DD == 0] = 1.0\n # DD = DD/(Nd*(Nd-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n print (DD)\n return DD\n\n\ndef amulti_autocpr(datR, binspar, binsper, parmetric, permetric, rng, rweights, Nr, CORES=pcpus):\n\n RR = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(datR, binspar, binsper, parmetric, permetric, rng, rweights, range(int(Nr*i/CORES),int(Nr*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aRRwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: RR += q.get()\n for j in jobs: j.join()\n RR[RR == 0] = 1.0\n # RR = RR/(Nr*(Nr-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n print (RR)\n return RR\n\n\ndef amulti_crosscp(dat, datR, binspar, binsper, parmetric, permetric, rng, weights, Nr, CORES=pcpus):\n\n RD = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(dat, datR, binspar, binsper, parmetric, permetric, rng, weights, range(int(Nr*i/CORES), int(Nr*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aRDwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: RD += q.get()\n for j in jobs: j.join()\n RD[RD == 0] = 1.0\n # Nd=len(dat)\n # DR = DR/(Nd*Nr)\n print (RD/2.0)\n return RD/2.0\n\n\ndef amulti_crosscpr(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights, Nd, CORES=pcpus):\n\n DR = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights, range(int(Nd*i/CORES), int(Nd*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aDRwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: DR += q.get()\n for j in jobs: j.join()\n DR[DR == 0] = 1.0\n # Nd=len(dat)\n # DR = DR/(Nd*Nr)\n print (DR/2.0)\n return DR/2.0\n\n\ndef ximonopole(correlsmu, mu):\n xi0 = np.sum(correlsmu*sp.special.legendre(0)(mu),axis=1)/len(mu)\n np.savetxt(\"xi0.txt\",xi0)\n return xi0\n\n\ndef xidipole(correlsmu, mu):\n xi2 = np.sum(5.0*correlsmu*sp.special.legendre(2)(mu),axis=1)/len(mu)\n np.savetxt(\"xi2.txt\",xi2)\n return xi2\n\n\ndef xiquadpole(correlsmu, mu):\n xi4 = np.sum(9.0*correlsmu*sp.special.legendre(4)(mu),axis=1)/len(mu)\n np.savetxt(\"xi4.txt\",xi4)\n return xi4\n\ndef beta(correlsmu, mu):\n xis0 = ximonopole(correlsmu,mu)\n xis2 = xidipole(correlsmu,mu)\n xis4 = xiquadpole(correlsmu,mu)\n xir = xis0*sp.special.legendre(0)(mu) + xis2*sp.special.legendre(2)(mu) + xis4*sp.special.legendre(4)(mu)\n r = xir/xis0\n return 5.0/3.0*(np.sqrt(1.8*r-0.8)-1.0)\n\n# def beta(correlsmu, mu):\n# betav =\n"
] | [
[
"scipy.special.legendre"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
davidcrowland/layer_vb_tagging | [
"83865d67b7a931a9eff4ba6fd4d033b2219225f1",
"83865d67b7a931a9eff4ba6fd4d033b2219225f1"
] | [
"extra/tsfresh_examples/tsfresh/feature_selection/feature_selector.py",
"extra/neurodsp/neurodsp/tests/test_cyclefeatures.py"
] | [
"# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\"\"\"\nContains a feature selection method that evaluates the importance of the different extracted features. To do so,\nfor every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.\nThe methods that calculate the p-values are called feature selectors.\n\nAfterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and\nwhich to cut off (solely based on the p-values).\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom functools import partial\n\nfrom builtins import zip\nfrom builtins import range\nimport os\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom multiprocessing import Pool\nfrom tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \\\n target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test\nfrom tsfresh import defaults\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef check_fs_sig_bh(X, y,\n n_processes=defaults.N_PROCESSES,\n chunksize=defaults.CHUNKSIZE,\n fdr_level=defaults.FDR_LEVEL,\n hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,\n test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE):\n \"\"\"\n The wrapper function that calls the significance test functions in this package.\n In total, for each feature from the input pandas.DataFrame an univariate feature significance test is conducted.\n Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to decide which features\n to keep and which to delete.\n\n We are testing\n \n :math:`H_0` = the Feature is not relevant and can not be added\n\n against\n\n :math:`H_1` = the Feature is relevant and should be kept\n \n or in other words\n \n :math:`H_0` = Target and Feature are independent / the Feature has no influence on the target\n\n :math:`H_1` = Target and Feature are associated / dependent\n\n When the target is binary this becomes\n \n :math:`H_0 = \\\\left( F_{\\\\text{target}=1} = F_{\\\\text{target}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( F_{\\\\text{target}=1} \\\\neq F_{\\\\text{target}=0} \\\\right)`\n \n Where :math:`F` is the distribution of the target.\n\n In the same way we can state the hypothesis when the feature is binary\n \n :math:`H_0 = \\\\left( T_{\\\\text{feature}=1} = T_{\\\\text{feature}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( T_{\\\\text{feature}=1} \\\\neq T_{\\\\text{feature}=0} \\\\right)`\n\n Here :math:`T` is the distribution of the target.\n\n TODO: And for real valued?\n\n :param X: The DataFrame containing all the features and the target\n :type X: pandas.DataFrame\n\n :param y: The target vector\n :type y: pandas.Series\n\n :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature\n :type test_for_binary_target_real_feature: str\n\n :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant\n features among all created features.\n :type fdr_level: float\n\n :param hypotheses_independent: Can the significance of the features be assumed to be independent?\n Normally, this should be set to False as the features are never\n independent (e.g. mean and median)\n :type hypotheses_independent: bool\n\n :param n_processes: Number of processes to use during the p-value calculation\n :type n_processes: int\n\n :param chunksize: Size of the chunks submitted to the worker processes\n :type chunksize: int\n\n :return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance\n of this particular feature. The DataFrame has the columns\n \"Feature\",\n \"type\" (binary, real or const),\n \"p_value\" (the significance of this feature as a p-value, lower means more significant)\n \"rejected\" (if the Benjamini Hochberg procedure rejected this feature)\n :rtype: pandas.DataFrame\n\n \"\"\"\n target_is_binary = len(set(y)) == 2\n\n # todo: solve the multiclassification case. for a multi classification the algorithm considers the target to be\n # regression. Instead one could perform a binary one versus all classification.\n\n # Only allow entries for which the target is known!\n y = y.astype(np.float)\n X = X.copy().loc[~(y == np.NaN), :]\n\n # Create the DataFrame df_features containing the information about the different hypotheses\n # Every row contains information over one feature column from X\n df_features = pd.DataFrame()\n\n df_features['Feature'] = list(set(X.columns))\n df_features = df_features.set_index('Feature', drop=False)\n\n # Add relevant columns to df_features\n df_features[\"rejected\"] = np.nan\n df_features[\"type\"] = np.nan\n df_features[\"p_value\"] = np.nan\n\n # Calculate the feature significance in parallel\n pool = Pool(n_processes)\n\n # Helper function which wrapps the _calculate_p_value with many arguments already set\n f = partial(_calculate_p_value, y=y,\n target_is_binary=target_is_binary,\n test_for_binary_target_real_feature=test_for_binary_target_real_feature)\n results = pool.map(f, [X[feature] for feature in df_features['Feature']], chunksize=chunksize)\n p_values_of_features = pd.DataFrame(results)\n df_features.update(p_values_of_features)\n\n pool.close()\n pool.join()\n\n # Perform the real feature rejection\n if \"const\" in set(df_features.type):\n df_features_bh = benjamini_hochberg_test(df_features.loc[~(df_features.type == \"const\")],\n hypotheses_independent, fdr_level)\n df_features = pd.concat([df_features_bh, df_features.loc[df_features.type == \"const\"]])\n else:\n df_features = benjamini_hochberg_test(df_features, hypotheses_independent, fdr_level)\n \n # It is very important that we have a boolean \"rejected\" column, so we do a cast here to be sure\n df_features[\"rejected\"] = df_features[\"rejected\"].astype(\"bool\")\n\n if defaults.WRITE_SELECTION_REPORT:\n # Write results of BH - Test to file\n if not os.path.exists(defaults.RESULT_DIR):\n os.mkdir(defaults.RESULT_DIR)\n\n with open(os.path.join(defaults.RESULT_DIR, \"fs_bh_results.txt\"), 'w') as file_out:\n file_out.write((\"Performed BH Test to control the false discovery rate(FDR); \\n\"\n \"FDR-Level={0};Hypothesis independent={1}\\n\"\n ).format(fdr_level, hypotheses_independent))\n df_features.to_csv(index=False, path_or_buf=file_out, sep=';', float_format='%.4f')\n return df_features\n\n\ndef _calculate_p_value(feature_column, y, target_is_binary, test_for_binary_target_real_feature):\n \"\"\"\n Internal helper function to calculate the p-value of a given feature using one of the dedicated\n functions target_*_feature_*_test.\n\n :param feature_column: the feature column.\n :type feature_column: pandas.Series\n\n :param y: the binary target vector\n :type y: pandas.Series\n\n :param target_is_binary: Whether the target is binary or not\n :type target_is_binary: bool\n\n :param test_for_binary_target_real_feature: The significance test to be used for binary target and real valued\n features. Either ``'mann'`` for the Mann-Whitney-U test or ``'smir'``\n for the Kolmogorov-Smirnov test.\n :type test_for_binary_target_real_feature: str\n\n :return: the p-value of the feature significance test and the type of the tested feature as a Series.\n Lower p-values indicate a higher feature significance.\n :rtype: pd.Series\n \"\"\"\n # Do not process constant features\n if len(pd.unique(feature_column.values)) == 1:\n _logger.warning(\"[test_feature_significance] Feature {} is constant\".format(feature_column.name))\n return pd.Series({\"type\": \"const\", \"rejected\": False}, name=feature_column.name)\n\n else:\n if target_is_binary:\n # Decide if the current feature is binary or not\n if len(set(feature_column.values)) == 2:\n type = \"binary\"\n p_value = target_binary_feature_binary_test(feature_column, y)\n else:\n type = \"real\"\n p_value = target_binary_feature_real_test(feature_column, y, test_for_binary_target_real_feature)\n else:\n # Decide if the current feature is binary or not\n if len(set(feature_column.values)) == 2:\n type = \"binary\"\n p_value = target_real_feature_binary_test(feature_column, y)\n else:\n type = \"real\"\n p_value = target_real_feature_real_test(feature_column, y)\n\n return pd.Series({\"p_value\": p_value, \"type\": type}, name=feature_column.name)\n\n\ndef benjamini_hochberg_test(df_pvalues, hypotheses_independent, fdr_level):\n \"\"\"\n This is an implementation of the benjamini hochberg procedure that calculates which of the hypotheses belonging\n to the different p-Values from df_p to reject. While doing so, this test controls the false discovery rate,\n which is the ratio of false rejections by all rejections:\n\n .. math::\n\n FDR = \\\\mathbb{E} \\\\left [ \\\\frac{ |\\\\text{false rejections}| }{ |\\\\text{all rejections}|} \\\\right]\n\n\n References\n ----------\n\n .. [1] Benjamini, Yoav and Yekutieli, Daniel (2001).\n The control of the false discovery rate in multiple testing under dependency.\n Annals of statistics, 1165--1188\n\n\n :param df_pvalues: This DataFrame should contain the p_values of the different hypotheses in a column named\n \"p_values\".\n :type df_pvalues: pandas.DataFrame\n\n :param hypotheses_independent: Can the significance of the features be assumed to be independent?\n Normally, this should be set to False as the features are never\n independent (e.g. mean and median)\n :type hypotheses_independent: bool\n\n :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant\n features among all created features.\n :type fdr_level: float\n\n :return: The same DataFrame as the input, but with an added boolean column \"rejected\".\n :rtype: pandas.DataFrame\n \"\"\"\n\n # Get auxiliary variables and vectors\n df_pvalues = df_pvalues.sort_values(by=\"p_value\")\n m = len(df_pvalues)\n K = list(range(1, m + 1))\n\n # Calculate the weight vector C\n if hypotheses_independent:\n # c(k) = 1\n C = [1] * m\n else:\n # c(k) = \\sum_{i=1}^m 1/i\n C = [sum([1.0 / i for i in range(1, k + 1)]) for k in K]\n\n # Calculate the vector T to compare to the p_value\n T = [fdr_level * k / m * 1.0 / c for k, c in zip(K, C)]\n\n # Get the last rejected p_value\n try:\n k_max = list(df_pvalues.p_value <= T).index(False)\n except ValueError:\n k_max = m\n\n # Add the column denoting if hypothesis was rejected\n df_pvalues[\"rejected\"] = [True] * k_max + [False] * (m - k_max)\n\n return df_pvalues\n",
"\"\"\"\ntest_cyclefeatures.py\nTest measurement of cycle-by-cycle features of oscillatory waveforms\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport neurodsp\nfrom neurodsp import shape\nfrom neurodsp.tests import _load_example_data\n\n\ndef test_cyclefeatures_consistent():\n \"\"\"\n Confirm consistency in peak finding\n \"\"\"\n # Load data\n data_idx = 1\n x = _load_example_data(data_idx=data_idx)\n Fs = 1000\n f_range = (13, 30)\n\n # Load ground truth lagged coherence\n df_true = pd.read_csv(os.path.dirname(neurodsp.__file__) +\n '/tests/data/sample_data_' + str(data_idx) + '_cyclefeatures.csv')\n\n # Compute lagged coherence\n true_oscillating_periods_kwargs = {'restrict_by_amplitude_consistency': False,\n 'restrict_by_period_consistency': False,\n 'amplitude_fraction_threshold': .3}\n\n df = shape.features_by_cycle(x, Fs, f_range, center_extrema='T',\n estimate_oscillating_periods=True,\n true_oscillating_periods_kwargs=true_oscillating_periods_kwargs)\n\n # Compute difference between calculated and ground truth values for each column\n for k in df.keys():\n signal_diff = df[k].values - df_true[k].values\n assert np.allclose(np.sum(np.abs(signal_diff)), 0, atol=10 ** -5)\n"
] | [
[
"pandas.concat",
"pandas.unique",
"pandas.Series",
"pandas.DataFrame"
],
[
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
prakass1/InteractiveSimilarityExplorer | [
"2fa5fb91c7df6424b9ed777ef4373ed7094c2348"
] | [
"machine_learning_model.py"
] | [
"import utility\nimport static_sim_functions as smf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import *\nfrom time_series_grp import TimeSeriesGroupProcessing\nfrom RandomNeighbors import RandomNeighbors\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.model_selection import KFold\n\nimport ml_modelling_ts as ml_ts\n\n'''\nThis is just a run of the approaches using the methodologies, save the neighborhood for UI.\n'''\n\n\ndef common_processing(df):\n # Getting percentage between 0 to 1 rather than score values\n df[\"tschq12\"] = df[\"tschq12\"].apply(lambda x: x / 100)\n df[\"tschq16\"] = df[\"tschq16\"].apply(lambda x: x / 100)\n df[\"tschq17\"] = df[\"tschq17\"].apply(lambda x: x / 100)\n\n # Feature engineering family history\n df[\"tschq04\"] = df.apply(smf.create_cols_family_hist, axis=1)\n\n return df\n\n\ndef get_common_cols(col1, col2):\n common_elements = set(col1).intersection(col2)\n return common_elements\n\nimport properties\nimport pandas as pd\ndef initial_processing():\n # Read the csv of the tschq data and make the necessary things\n tschq = pd.read_pickle(properties.data_location + \"/input_pckl/\" + \"3_q.pckl\")\n\n # Cleaning tschq05 question. There is an abstraction for a row we add common value\n\n def filter_age(x):\n if isinstance(x, int):\n # Append the most common value obtained\n return tschq[\"tschq05\"].value_counts().head(1).index[0]\n else:\n return x\n\n tschq[\"tschq05\"] = tschq[\"tschq05\"].apply(filter_age)\n\n # Drop the questionnaire_id and created_at\n tschq.drop([\"questionnaire_id\", \"created_at\"], axis=1, inplace=True)\n\n # Lets read and join two questionnaires tschq and hq\n hq = pd.read_pickle(\"data/input_pckl/4_q.pckl\")\n hq.isna().sum(axis=0)\n # By looking at the output we are sure that h5 and h6 do not contribute much and can be dropped\n hq.drop([\"hq05\", \"hq06\"], axis=1, inplace=True)\n hq_df = hq.set_index(\"user_id\")\n df = tschq.join(hq_df.iloc[:, 2:], on=\"user_id\")\n\n drop_cols = [\"tschq01\", \"tschq25\", \"tschq07-2\",\n \"tschq13\", \"tschq04-1\", \"tschq04-2\"]\n\n # Getting percentage between 0 to 1 rather than score values\n df[\"tschq12\"] = df[\"tschq12\"].apply(lambda x: x / 100)\n df[\"tschq16\"] = df[\"tschq16\"].apply(lambda x: x / 100)\n df[\"tschq17\"] = df[\"tschq17\"].apply(lambda x: x / 100)\n\n df[\"tschq04\"] = df.apply(smf.create_cols_family_hist, axis=1)\n\n df.drop(drop_cols, axis=1, inplace=True)\n\n # Set the heom object, while using the required similarity\n # Alternative\n # Categorical boolean mask\n categorical_feature_mask = df.iloc[:, 1:].infer_objects().dtypes == object\n other_feature_mask = df.iloc[:, 1:].infer_objects().dtypes != object\n # filter categorical columns using mask and turn it into a list\n categorical_cols = df.iloc[:, 1:].columns[categorical_feature_mask].tolist()\n num_cols = df.iloc[:, 1:].columns[other_feature_mask].tolist()\n cat_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in categorical_cols]\n num_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in num_cols]\n\n return cat_idx, num_idx, df\n\nimport os\nimport traceback\ndef save_data_objs(df, quest_cmbs=\"all\"):\n try:\n if not os.path.isdir(properties.model_location + quest_cmbs):\n os.makedirs(properties.model_location + quest_cmbs)\n utility.save_model(\"\".join(quest_cmbs + \"/\" + quest_cmbs + \"_stat_q_data\"), df)\n\n encoded_combined_df = smf.preprocess(df, quest_cmbs, age_bin=False,\n process_model_name=\"\".join(quest_cmbs + \"/\" +\n quest_cmbs + \"_stat_q_data_oe_model\"),\n prediction=False)\n\n # Save this encoded_data\n utility.save_model(\"\".join(quest_cmbs + \"/\" +\n quest_cmbs + \"_stat_q_data_encoded\"), encoded_combined_df)\n\n return encoded_combined_df\n\n # Use this data to build the data over static data.\n except Exception:\n print(traceback.print_exc())\n\n\ndef weighted_average(distress_list):\n average = np.asarray(distress_list, dtype=float).mean()\n return average\n\n\n\n# Function computes the weighted average as predictions for given prediction time point\ndef compute_weighted_avg(n_idx, encoded_data, pred_at_list, method=\"mean\", dist_nn=None, wt_flag=False):\n\n preds = list()\n # Prediction for four time points\n for pval in pred_at_list:\n distress_list = list()\n for vals in n_idx:\n u_id = encoded_data[\"user_id\"].iloc[vals]\n user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id))\n # 3rd val of the series is s03 of the neighbor\n print(\"{}, {} Values \".format(int(pval), int(u_id)))\n if len(user_ts) > int(pval):\n value = user_ts[int(pval), :][3]\n elif len(user_ts) <= int(pval):\n value = user_ts[len(user_ts)-1, :][3]\n\n distress_list.append(value)\n\n\n if wt_flag:\n print(\"Calling by weighted distance prediction for distress\")\n preds.append(weighted_distance_prediction(distress_list, dist_nn))\n else:\n print(\"Calling weighted average to predict distress\")\n preds.append(weighted_average(distress_list))\n return preds\n\n\ndef weighted_distance_prediction(p_preds, distance):\n # Inverse distance so that highest weight is given to the nearest one and least to the farther\n inv_dist = np.divide(1, distance)\n\n #s03 - tinnitus distress weighted by distance is given as\n s03_pred = (np.sum(np.multiply(p_preds, inv_dist)) / (np.sum(inv_dist)))\n\n return s03_pred\n\n\ndef compute(test_nn, encoded_data,\n pred_list, method=\"mean\", dist_nn=None, wt_dist=False):\n from sklearn.linear_model import LinearRegression\n\n preds = list()\n for point in pred_list:\n nn_preds = list()\n intercepts_list = list()\n coeff_list = list()\n for nn in test_nn:\n u_id = encoded_data[\"user_id\"].iloc[nn]\n user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id))\n # Obtain the time series until time point and fit the data for linear regression\n diff_arr = np.abs(np.subtract(point, user_ts[:, 1]))\n diff_near_idx = np.where(diff_arr == diff_arr.min())\n print(\"minimum to the time point is at -- \", diff_near_idx)\n # difference near index. Handling for the length of users\n usr_idx = diff_near_idx[0][0]\n\n user_ts_p = user_ts[:usr_idx]\n user_ts_df = pd.DataFrame(user_ts_p, columns=[\"day\", \"day_sess_index\",\n \"s02\", \"s03\", \"s04\",\n \"s05\", \"s06\", \"s07\"])\n X = user_ts_df[[\"day_sess_index\"]]\n # We show for tinnitus distress. This can be extended to other physiological variables as well.\n y = user_ts_df[[\"s03\"]]\n\n # Fit on X axis as time and Y as the s03 predictive value.\n reg_fit = LinearRegression(normalize=True)\n reg_fit.fit(X, y)\n\n # If weighted_distance is true, then predict by each of the nn_user and add to list. This will be used for\n # calculating weighted_distance_predictions.\n if wt_dist:\n nn_pred = reg_fit.predict(np.asarray(point).reshape(1, -1))\n nn_preds.append(nn_pred[0][0])\n else:\n intercepts_list.append(reg_fit.intercept_)\n coeff_list.append(reg_fit.coef_)\n\n if wt_dist:\n print(\"Predicting the value of s03 for the user by a weighted average weighted by distance\")\n preds.append(weighted_distance_prediction(nn_preds, dist_nn))\n else:\n print(\"Predicting the value of s3 over the averaged slope and intercepts of \"\n \"observations of the neighbors\")\n\n # y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained.\n print(\"The equation to estimate s03 for the user is {}\".format(\"\".join(str(np.asarray(coeff_list).mean())) +\n \"* time_index + \" +\n str(np.asarray(intercepts_list).mean())))\n y = np.multiply(np.asarray(coeff_list).mean(), point) + np.asarray(intercepts_list).mean()\n preds.append(y)\n\n return preds\n\n\ndef compute_linear_regression(test_nn, encoded_data, pred_list, method=\"mean\"):\n #test_nn = test_user_nn\n #pred_list = prediction_at_list\n from sklearn.linear_model import LinearRegression\n preds = list()\n for point in pred_list:\n attr_list = list()\n intercepts_list = list()\n coeff_list = list()\n for nn in test_nn:\n u_id = encoded_data[\"user_id\"].iloc[nn]\n user_ts = tsg_data.get_m_day_ts_enumerate(int(11))\n diff_arr = np.abs(np.subtract(point, user_ts[:, 1]))\n diff_near_idx = np.where(diff_arr == diff_arr.min())\n print(diff_near_idx)\n # difference near index\n usr_vals = np.array([user_ts[n_id] for n_id in diff_near_idx[0]])\n if len(usr_vals) > 1:\n value = usr_vals.mean(axis=0)\n print(\"vavg\" + str(value))\n else:\n value = usr_vals[0]\n print(\"v\" + str(value))\n\n attr_list.append(value)\n\n\n df = pd.DataFrame(user_ts)\n df.columns = [\"day\", \"day_session_id\",\n \"s02\", \"s03\",\n \"s04\", \"s05\",\n \"s06\", \"s07\"]\n reg_model = LinearRegression(normalize=True)\n user_x = df[[\"day_session_id\", \"s04\", \"s05\", \"s06\"]].to_numpy()\n user_s03 = df[[\"s03\"]].to_numpy().ravel()\n reg_model.fit(user_x, user_s03)\n intercepts_list.append(reg_model.intercept_)\n coeff_list.append(reg_model.coef_)\n # y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained.\n\n # convert coeff's to numpy for manipulations\n numpy_attr_list = np.array(attr_list)\n print(numpy_attr_list)\n avg_np_attr_list = numpy_attr_list[:, 4:].mean(axis=0)\n\n print(avg_np_attr_list)\n\n numpy_coeff_list = np.array(coeff_list)\n\n print(numpy_coeff_list)\n print(numpy_coeff_list.mean(axis=0))\n\n # Day_index, s02, s04, s05, s06 ,s07 - Use only the fit independent features to estimate the dependent\n y = np.multiply(numpy_coeff_list[:, 0].mean(), point) + \\\n np.multiply(numpy_coeff_list[:, 1].mean(), avg_np_attr_list[0]) + \\\n np.multiply(numpy_coeff_list[:, 2].mean(), avg_np_attr_list[1]) + \\\n np.multiply(numpy_coeff_list[:, 3].mean(), avg_np_attr_list[2]) + \\\n np.asarray(intercepts_list).mean()\n preds.append(y)\n print(preds)\n return preds\n\n\n# Create test label as ground truth at prediction point.\ndef create_y_labels(test_data, prediction_at, method=\"mean\"):\n y_test = list()\n for i in range(0, len(test_data)):\n test_ts_test1 = tsg_data.get_usr_mday_ts_predict(int(test_data.iloc[i][\"user_id\"]))\n # print(len(test_ts_test1))\n if len(test_ts_test1) >= prediction_at:\n y_test.append(test_ts_test1[prediction_at - 1][2])\n elif len(test_ts_test1) < prediction_at:\n y_test.append(test_ts_test1[len(test_ts_test1) - 1][2])\n return y_test\n\n\n# Create reference points for multiple reference predictions\ndef get_pred_ref_points(user_id, ndays, method=\"mean\"):\n # Using the default tsg which is mean observations of the user\n test_user_ts = tsg_data.get_usr_mday_ts_predict(user_id)\n\n user_ts_idx = test_user_ts[:, 1]\n # [\"date\", \"time_idx\", \"s02\", \"s03\", \"s04\", \"s05\", \"s06\", \"s07]\n user_distress = test_user_ts[:, 3]\n\n # Near evaluation. Change this for farther evaluations\n # Near -> 0.20, 0.10\n # Far -> 1 - (Near)\n\n # Near points are of the sequence of observation because we are sure all stay until here.\n #prediction_at = 10\n\n # Far prediction point is the last N% of the test user time series\n # It is tested for 0.75, 0.8, 0.9\n prediction_at = round(len(user_ts_idx) * 0.80)\n y_labels = user_distress[prediction_at:prediction_at + ndays].tolist()\n prediction_at_list = user_ts_idx[prediction_at:prediction_at + ndays].tolist()\n\n return y_labels, prediction_at_list\n\n\ndef do_test(test_data, out_writer, csv_out_writer,\n ndays, near_idxs, encoded_data, fold_count=\"final\",\n method=\"mean\", dist_nn=None, wt_dist_flag=False):\n for i in range(0, len(test_data)):\n user_id = int(test_data.iloc[i][\"user_id\"])\n print(\"User- Id \", user_id)\n y_labels, prediction_at_list = get_pred_ref_points(user_id, ndays, method=method)\n\n # y_labels = create_y_labels(X_test, preds, method=\"mean\")\n # Weighting by inverse of neighbor\n if wt_dist_flag:\n test_user_nn = near_idxs[i]\n test_user_dist = dist_nn[i]\n pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=test_user_dist, wt_flag=wt_dist_flag)\n\n pred_lr = compute(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=test_user_dist, wt_dist=wt_dist_flag)\n else:\n test_user_nn = near_idxs[i]\n pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=None, wt_flag=False)\n pred_lr = compute(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=None, wt_dist=False)\n\n\n # calculate\n if not fold_count == \"final\":\n print(\"Evaluating for the fold-\" + str(fold_count) + \" for the forecast reference points - \" +\n str(prediction_at_list))\n out_writer.write(\"Evaluating for the forecast reference points -- \" +\n str(prediction_at_list) + \"for the method evaluation -- \" + str(method) + \"\\n\")\n else:\n print(\"Evaluating for forecast reference points - \" +\n str(prediction_at_list))\n out_writer.write(\"Evaluating over the forecast reference points -- \" +\n str(prediction_at_list) + \"for the method evaluation -- \" + str(method) + \"\\n\")\n\n print(\"Computing RMSE for weighted average based predictions on the User -- \" + str(user_id))\n print(\"---------------------------------------------------------------\")\n out_writer.write(\"---------------------------------------------------------------\\n\")\n\n print(\"RMSE -- \", np.sqrt(mean_squared_error(y_labels, pred_weighted_average)))\n out_writer.write(\"RMSE -- \" + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + \"\\n\")\n\n\n # Writing to csv file\n if not fold_count == \"final\":\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + \",\" +\n \"weighted_average\" + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_weighted_average[0]) + \",\" + str(pred_weighted_average[1])\n + \",\" + str(pred_weighted_average[2]) + \"\\n\"))\n else:\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + \",\" +\n \"weighted_average\" + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_weighted_average[0]) + \",\" + str(pred_weighted_average[1])\n + \",\" + str(pred_weighted_average[2]) + \"\\n\"))\n\n print(\"-----------------------------------------------------------------------------\")\n out_writer.write(\"---------------------------------------------------------------\\n\")\n print(\"Computing RMSE for {} {} based predictions for the user -- {}\"\n .format(str(\"weighted_distance\" + str(wt_dist_flag)), str(\"linear_regression\"), str(user_id)))\n out_writer.write(\"Computing RMSE for {} {} based predictions for the user -- {} \\n\"\n .format(str(\"weighted_distance\" + str(wt_dist_flag)), str(\"linear_regression\"), str(user_id)))\n print(\"RMSE -- \", np.sqrt(mean_squared_error(y_labels, pred_lr)))\n out_writer.write(\"RMSE -- \" + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + \"\\n\")\n print(\"---------------------------------------------------------------\")\n out_writer.write(\"---------------------------------------------------------------\\n\")\n\n # Write to csv file\n if not fold_count == \"final\":\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + \",\" +\n str(\"lr\") + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_lr[0]) + \",\" + str(pred_lr[1]) + \",\" + str(\n pred_lr[2]) + \"\\n\"))\n else:\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + \",\" +\n str(\"lr\") + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_lr[0]) + \",\" + str(pred_lr[1]) + \",\" + str(\n pred_lr[2]) + \"\\n\"))\n\n\n# Change method and execute to get the predictions appropriately, these are configurations\neval_method = \"mean\"\n# Default day readings for all test users must be at mean and prediction are between min - mean - max\n\ntsg_data = TimeSeriesGroupProcessing(method=eval_method)\n# For all combinations evaluation it must be set to True\nquest_cmb_all = False\n# Same random state needs to be maintained to get consistent test data over all combinations and repeatable results\nrandom_state = 1220\n# It is the setting to get the ahead prediction for tinnitus distress, 3 here means for 3 days\n# min it is a day and max of about 60days between points which is not an usual scenario\nndays = 3\n\n# Build the default NN with all the combination.\nif not quest_cmb_all:\n for key, val in properties.quest_comb.items():\n # Build NN for each category\n print(\"Building NN for the question combination -- \" + str(key))\n\n cat_idx, num_idx, combined_df = smf.initial_processing(key, val, append_synthethic=False)\n # Build and get the knn NN for prediction over test instances.\n # Save the data objs\n\n encoded_data = save_data_objs(combined_df, key)\n\n out_writer = open(\"\".join(\"output/output_\" + str(key) + \"_\" + str(eval_method) + \"_heom_norm.txt\"), \"w+\")\n csv_out_writer = open(\"\".join(\"output/output_\" + str(key) + \"_\" + str(eval_method) + \"_heom_norm.csv\"), \"w+\")\n\n csv_out_writer.write(\"\".join(\"user_id,rmse,algorithm,\"\n \"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\\n\"))\n\n #Create a test set\n X, test = train_test_split(encoded_data,\n test_size=0.20,\n random_state=random_state)\n\n def filter_train_ids(x):\n # print(x)\n if x[\"user_id\"] in train_user_ids:\n return x\n\n def filter_test_ids(x):\n # print(x)\n if x[\"user_id\"] in test_user_ids:\n return x\n\n train_user_ids = X[\"user_id\"].to_list()\n\n X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type=\"broadcast\").dropna()\n X_train_data_ui[\"user_id\"] = X_train_data_ui[\"user_id\"].apply(int)\n # Save the non encoded train data for visualization purposes\n utility.save_model(\"\".join(key + \"/\" + key + \"_train_stat_q_data\"), X_train_data_ui)\n\n # filter and get the data to show to the UI for the test data.\n test_user_ids = test[\"user_id\"].to_list()\n\n X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type=\"broadcast\").dropna()\n\n X_test_data_ui[\"user_id\"] = X_test_data_ui[\"user_id\"].apply(int)\n\n # Save the data_ui object as json\n #test_data = {}\n #test_data[\"users\"] = X_test_data_ui.to_dict(\"r\")\n #utility.save_data(\"\".join(\"test_data_ui_\" + key), test_data)\n\n from HEOM import HEOM\n # Can be done at prediction too.\n from sklearn.metrics.pairwise import cosine_distances\n from sklearn.linear_model import LinearRegression\n from scipy.spatial.distance import pdist, squareform\n from scipy.stats import zscore\n\n heom = HEOM(X.to_numpy(), cat_idx, num_idx)\n sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)\n mean_heom_distance = sim_matrix.mean()\n\n knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)\n knn.fit(X.iloc[:, 1:])\n dist, test_idx = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)\n\n # Execute without any varying for saving the KNN as pickle to be used by UI\n do_test(test, out_writer, csv_out_writer, ndays, test_idx, X,\n fold_count=\"final\", method=eval_method, dist_nn=None, wt_dist_flag=False)\n\n utility.save_model(\"\".join(key + \"/\" + \"knn_static\"), knn)\n utility.save_model(\"\".join(key + \"/\" + \"train_sim_data.pckl\"), X)\n\n out_writer.close()\n csv_out_writer.close()\n\n\n# All feature combinations\n\ncat_idx, num_idx, combined_df = initial_processing()\n\n\n# Build KNN for each category\nprint(\"Building KNN for the question combination -- \" + str(\"overall\"))\n\n# Save the data objs\nencoded_data = save_data_objs(combined_df, \"overall\")\n\n\nX, test = train_test_split(encoded_data,\n test_size=0.20,\n random_state=random_state)\n\n\ndef filter_train_ids(x):\n # print(x)\n if x[\"user_id\"] in train_user_ids:\n return x\n\n\ndef filter_test_ids(x):\n # print(x)\n if x[\"user_id\"] in test_user_ids:\n return x\n\n\ntrain_user_ids = X[\"user_id\"].to_list()\n\nX_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type=\"broadcast\").dropna()\nX_train_data_ui[\"user_id\"] = X_train_data_ui[\"user_id\"].apply(int)\n\n# Save in overall.\nutility.save_model(\"\".join(\"overall\" + \"/\" + \"overall\" + \"_train_stat_q_data\"), X_train_data_ui)\n\n# filter and get the data to show to the UI for the test data.\ntest_user_ids = test[\"user_id\"].to_list()\n\nX_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type=\"broadcast\").dropna()\n\nX_test_data_ui[\"user_id\"] = X_test_data_ui[\"user_id\"].apply(int)\n\n# Save the data_ui object as json\ntest_data = {}\ntest_data[\"users\"] = X_test_data_ui.to_dict(\"r\")\nutility.save_data(\"test_data_ui_x_test\", test_data)\n\n# Save the results to out_writer\nout_writer = open(\"output/overall_output_folds_\" + str(eval_method) + \".txt\", \"w+\")\ncsv_out_writer = open(\"output/overall_output_folds_\" + str(eval_method) + \".csv\", \"w+\")\n\n# First get the time series for a given test patient and the reference point and iterate to evaluate\ncsv_out_writer.write(\"user_id,rmse,algorithm,\"\n \"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\\n\")\n\n\n# Split the data into train and test\nfrom sklearn.model_selection import train_test_split\nimport utility\nfrom HEOM import HEOM\n#Can be done at prediction too.\nfrom sklearn.metrics.pairwise import cosine_distances\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.stats import zscore\n\n\nheom = HEOM(X.to_numpy()[:, 1:], cat_idx, num_idx)\nsim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)\nmean_heom_distance = sim_matrix.mean()\n\nknn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)\nknn.fit(X.to_numpy()[:, 1:])\ndist, idx_test = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)\n\n# First get the time series for a given test patient and the reference point and iterate to evaluate\n\ndo_test(test, out_writer, csv_out_writer, ndays, idx_test, X,\n fold_count=\"final\", method=eval_method, dist_nn=None, wt_dist_flag=False)\n\nout_writer.close()\ncsv_out_writer.close()\n\n# End save the nearest neighbor as data objects, so that can be used from the UI\nutility.save_model(\"\".join(\"overall/\" + \"knn_static\"), knn)\nutility.save_model(\"\".join(\"overall\" + \"/\" + \"train_sim_data.pckl\"), X)\n\n\n'''\n ML Modelling based on s02 - loudness.\n Note: This has to be run once the all feature execution is completed since we build upon a custom similarity matrix,\n it is essential that the same split of train test happen so that it can be verified from the application.\n'''\n\n# Create train and test containing same users in train and test as per static data. (Note: Run above code and then this\n# because same set of train test users are used)\n\ndef splitData(dataset, test_user_ids):\n train_data = dataset[~dataset[\"user_id\"].isin(test_user_ids)]\n test_data = dataset[dataset[\"user_id\"].isin(test_user_ids)]\n return train_data, test_data\n\n\n# Save both train and test matrix\ndef save_ts_objs(train, test, location_name):\n try:\n if not os.path.isdir(properties.model_location + location_name):\n os.makedirs(properties.model_location + location_name)\n utility.save_model(\"\".join(location_name + \"/\" + location_name + \"_train_data\"), train)\n utility.save_model(\"\".join(location_name + \"/\" + location_name + \"_test_data\"), test)\n\n except Exception:\n print(traceback.print_exc())\n\n\nX = ml_ts.process_data(grouping=\"day\")\n\n# Calculate pairwise distance and create a dataframe for the same\nfrom scipy.spatial.distance import pdist, squareform\n# Cross validate here based on the same split of static data here.\n# Note: Only one combination will be present\nC = np.zeros((X.shape[0], X.shape[0]))\nfor i in range(0, len(X)):\n for j in range(0, len(X)):\n dist = ml_ts.compute_dist(X[:, 1][i], X[:, 1][j])\n C[i][j] = dist\n\nC_df = pd.DataFrame(C)\n\n\n#C_df.to_csv(\"sim_ema.csv\")\n\n# Threshold overall distance for making within radius\nthreshold_distance = sum(C_df.mean())/len(C_df)\n\n\nuser_ids = []\nfor val in X:\n user_ids.append(val[0])\n\nC_df[\"user_id\"] = user_ids\n\n\ntrain_data, test_data = splitData(C_df, test_user_ids)\n# Save the time series data objects as dynamic_ts into model folder\nsave_ts_objs(train_data, test_data, \"dynamic_ts\")\n\nout_writer = open(\"\".join(\"output/output_ema_\" + str(eval_method) + \"_.txt\"), \"w+\")\ncsv_out_writer = open(\"\".join(\"output/output_ema_\" + str(eval_method) + \"_.csv\"), \"w+\")\n\ncsv_out_writer.write(\"user_id,rmse,algorithm,\"\n \"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\\n\")\n\n# Test on the final test set. Note there is no varying K just to save the NN here.\n# It should be noted we use NearesetNeighbors and not KNearestNeighbors classifier.\nknn_ema = NearestNeighbors(n_neighbors=5, metric=\"precomputed\", radius=threshold_distance)\nknn_ema.fit(train_data[train_data.index])\nema_dist, ema_idx = knn_ema.kneighbors(test_data[train_data.index], n_neighbors=5)\n# First get the time series for a given test patient and the reference point and iterate to evaluate\ndo_test(test_data, out_writer, csv_out_writer, ndays, ema_idx, encoded_data,\n fold_count=\"final\", method=eval_method, dist_nn=None, wt_dist_flag=False)\n\n# Close the writers\nout_writer.close()\ncsv_out_writer.close()\n\n# Save the similarity search index KNN\nutility.save_model(\"\".join(\"dynamic_ts\" + \"/\" + \"dynamic_ts\" + \"_knn\"), knn_ema)\n"
] | [
[
"numpy.multiply",
"numpy.asarray",
"numpy.subtract",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.neighbors.NearestNeighbors",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"pandas.read_pickle",
"numpy.zeros",
"numpy.sum",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
felmoltor/kismet-heatmap | [
"d145a865f80db16ad8c6d0bb1dd35e0238706f3b"
] | [
"gpsxml2png.py"
] | [
"#!/usr/bin/env python2\n\n\"\"\"\nCopyright (c) 2016, Bliksem Labs B.V.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, \nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this \n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND \nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED \nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; \nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON \nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS \nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport sys\nimport numpy\nfrom scipy.interpolate import griddata\nimport matplotlib.pyplot as plt\n\n\ntry:\n from lxml import etree\nexcept ImportError:\n try:\n # Python 2.5\n import xml.etree.cElementTree as etree\n except ImportError:\n try:\n # Python 2.5\n import xml.etree.ElementTree as etree\n except ImportError:\n try:\n # normal cElementTree install\n import cElementTree as etree\n except ImportError:\n try:\n # normal ElementTree install\n import elementtree.ElementTree as etree\n except ImportError:\n print(\"Failed to import ElementTree from any known place\")\n\n\n# Process the Kismet GPSXML into columns.\n\ndef parse_xml(filename):\n\ttree = etree.parse(open(filename, 'rb'))\n\n\tts = []\n\tbssid = []\n\tsignal = []\n\tlat = []\n\tlon = []\n\twalked_lon = []\n\twalked_lat = []\n\n\tfor z in tree.findall('.//gps-point'):\n\t\t# A lon/lat filter might be applied here\n\t\t# if float(z.get('lon')) < 3.942:\n\t\t#\tcontinue\n\n\t\tif z.get('bssid') == 'GP:SD:TR:AC:KL:OG':\n\t\t\twalked_lon.append(float(z.get('lon')))\n\t\t\twalked_lat.append(float(z.get('lat')))\n\n\t\telif z.get('signal_dbm') is not None:\n\t\t\tbssid.append(z.get('bssid'))\n\t\t\tts.append(int(z.get('time-sec')))\n\t\t\tlat.append(float(z.get('lat')))\n\t\t\tlon.append(float(z.get('lon')))\n\t\t\tsignal.append(int(z.get('signal_dbm')))\n\n\treturn (ts, bssid, signal, lat, lon, walked_lon, walked_lat,)\n\n\n# Draw parsed data on a surface\n\ndef draw_data(ts, bssid, signal, lat, lon, walked_lon, walked_lat):\n\n\t# We create a grid of 1000x1000\n\tgrid_x, grid_y = numpy.mgrid[min(walked_lon):max(walked_lon):1000j, min(walked_lat):max(walked_lat):1000j]\n\n\t# We want to draw all unique APs\n\tbssids = list(set(bssid))\n\n\t# For each BSSID...\n\tfor s in bssids:\n\t\tpoints_lon = []\n\t\tpoints_lat = []\n\t\tvalues = []\n\t\th = []\n\t\t\n\t\t# Apply all points on an intermediate surface\n\t\t# so we can distinct points where we were, without reception\n\t\tfor i in range(0, len(bssid)):\n\t\t\tif bssid[i] == s:\n\t\t\t\thc = hash((lon[i], lat[i]))\n\t\t\t\tif hc not in h:\n\t\t\t\t\tpoints_lon.append(lon[i])\n\t\t\t\t\tpoints_lat.append(lat[i])\n\t\t\t\t\tvalues.append(float(signal[i]))\n\t\t\t\t\th.append(hash((lon[i], lat[i])))\n\n\t\t# Optional: apply -100dBm where we don't have gathered data\n\t\tfor i in range(0, len(walked_lon)):\n\t\t\thc = hash((walked_lon[i], walked_lat[i]))\n\t\t\tif hc not in h:\n\t\t\t\tpoints_lon.append(lon[i])\n\t\t\t\tpoints_lat.append(lat[i])\n\t\t\t\tvalues.append(float(-100))\n\t\t\t\th.append(hash((walked_lon[i], walked_lat[i])))\n\n\t\t# Interpolate the data\n\t\tgrid = griddata((points_lon, points_lat), numpy.array(values), (grid_x, grid_y), method='cubic')\n\n\t\t# Store the bitmap in the current folder.\n\t\tplt.show()\n\t\tplt.imsave('%s.png' % (s), grid.T)\n\n\t\t# Calculate the World File for use in Qgis\n\t\ta = ((max(walked_lon)-min(walked_lon))/1000)\n\t\tb = 0\n\t\tc = 0\n\t\td = ((max(walked_lat)-min(walked_lat))/1000)\n\t\te = min(walked_lon)\n\t\tf = min(walked_lat)\n\n\t\t# Write the World File\n\t\topen('%s.pngw' % (s), 'w').write('%.16f\\n%d\\n%d\\n%.16f\\n%.16f\\n%.16f' % (a, b, c, d, e, f,))\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) != 2:\n\t\tprint(\"Usage %s << /path/to/Kismet.gpsxml >>\" % (sys.argv[0]))\n\t\tsys.exit(-1)\n\t\n\tdraw_data(*parse_xml(sys.argv[1]))\n\n\n\t\n"
] | [
[
"matplotlib.pyplot.imsave",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
keshabb/GamestonkTerminal | [
"419c3691db220c467d2979b19ca308b3b800c0bd",
"7a8a4f868c548505c36287d16f969e80daeed431"
] | [
"gamestonk_terminal/options/op_helpers.py",
"gamestonk_terminal/technical_analysis/volume.py"
] | [
"\"\"\"Option helper functions\"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\n\nimport pandas as pd\nimport numpy as np\n\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n check_non_negative,\n)\n\n# pylint: disable=R1710\n\n\ndef load(other_args: List[str]) -> str:\n \"\"\"Load ticker into object\n\n Parameters\n ----------\n other_args: List[str]\n Agrparse arguments\n\n Returns\n -------\n str:\n Ticker\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"opload\",\n description=\"Load a ticker into option menu\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--ticker\",\n action=\"store\",\n dest=\"ticker\",\n required=\"-h\" not in other_args,\n help=\"Stock ticker\",\n )\n\n try:\n if other_args:\n if \"-t\" not in other_args and \"-h\" not in other_args:\n other_args.insert(0, \"-t\")\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return \"\"\n print(\"\")\n return ns_parser.ticker\n except Exception as e:\n print(e, \"\\n\")\n return \"\"\n except SystemExit:\n print(\"\")\n return \"\"\n\n\n# pylint: disable=no-else-return\n\n\ndef select_option_date(avalaiable_dates: List[str], other_args: List[str]) -> str:\n \"\"\"Select an option date out of a supplied list\n\n Parameters\n ----------\n avalaiable_dates: List[str]\n Possible date options\n other_args: List[str]\n Arparse arguments\n Returns\n -------\n expiry_date: str\n Selected expiry date\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"exp\",\n description=\"See and set expiration date\",\n )\n parser.add_argument(\n \"-d\",\n \"--date\",\n dest=\"n_date\",\n action=\"store\",\n type=int,\n default=-1,\n choices=range(len(avalaiable_dates)),\n help=\"Select index for expiry date.\",\n )\n\n parser.add_argument(\n \"-D\",\n dest=\"date\",\n type=str,\n choices=avalaiable_dates + [\"\"],\n help=\"Select date (YYYY-MM-DD)\",\n default=\"\",\n )\n\n try:\n if other_args:\n if \"-\" not in other_args[0]:\n other_args.insert(0, \"-d\")\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return \"\"\n\n # Print possible expiry dates\n if ns_parser.n_date == -1 and not ns_parser.date:\n print(\"\\nAvailable expiry dates:\")\n for i, d in enumerate(avalaiable_dates):\n print(f\" {(2 - len(str(i))) * ' '}{i}. {d}\")\n print(\"\")\n return \"\"\n\n # It means an expiry date was correctly selected\n else:\n if ns_parser.date:\n if ns_parser.date in avalaiable_dates:\n print(f\"Expiraration set to {ns_parser.date} \\n\")\n return ns_parser.date\n else:\n print(\"Expiration not an option\")\n return \"\"\n else:\n expiry_date = avalaiable_dates[ns_parser.n_date]\n print(f\"Expiraration set to {expiry_date} \\n\")\n return expiry_date\n\n except Exception as e:\n print(e, \"\\n\")\n return \"\"\n\n\ndef get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:\n \"\"\"Function to get the loss at the given expiry\n\n Parameters\n ----------\n strike: Union[int,float]\n Value to calculate total loss at\n chain: Dataframe:\n Dataframe containing at least strike and openInterest\n\n Returns\n -------\n loss: Union[float,int]\n Total loss\n \"\"\"\n\n itm_calls = chain[chain.index < strike][[\"OI_call\"]]\n itm_calls[\"loss\"] = (strike - itm_calls.index) * itm_calls[\"OI_call\"]\n call_loss = itm_calls[\"loss\"].sum()\n\n itm_puts = chain[chain.index > strike][[\"OI_put\"]]\n itm_puts[\"loss\"] = (itm_puts.index - strike) * itm_puts[\"OI_put\"]\n put_loss = itm_puts.loss.sum()\n loss = call_loss + put_loss\n\n return loss\n\n\ndef calculate_max_pain(chain: pd.DataFrame) -> int:\n \"\"\"Returns the max pain for a given call/put dataframe\n\n Parameters\n ----------\n chain: DataFrame\n Dataframe to calculate value from\n\n Returns\n -------\n max_pain : int\n Max pain value\n \"\"\"\n\n strikes = np.array(chain.index)\n if (\"OI_call\" not in chain.columns) or (\"OI_put\" not in chain.columns):\n print(\"Incorrect columns. Unable to parse max pain\")\n return np.nan\n\n loss = []\n for price_at_exp in strikes:\n loss.append(get_loss_at_strike(price_at_exp, chain))\n\n chain[\"loss\"] = loss\n max_pain = chain[\"loss\"].idxmin()\n\n return max_pain\n\n\ndef vol(other_args: List[str]):\n \"\"\"Parse volume argparse\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n -------\n ns_parser: argparse.Namespace\n Parsed namespace\n \"\"\"\n\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"vol\",\n description=\"Plot volume. Volume refers to the number of contracts traded today.\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--min\",\n default=-1,\n type=check_non_negative,\n help=\"Min strike to plot\",\n dest=\"min\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n default=-1,\n type=check_non_negative,\n help=\"Max strike to plot\",\n dest=\"max\",\n )\n\n parser.add_argument(\n \"--calls\",\n action=\"store_true\",\n default=False,\n dest=\"calls\",\n help=\"Flag to plot call options only\",\n )\n\n parser.add_argument(\n \"--puts\",\n action=\"store_true\",\n default=False,\n dest=\"puts\",\n help=\"Flag to plot put options only\",\n )\n\n parser.add_argument(\n \"--source\",\n type=str,\n default=\"tr\",\n choices=[\"tr\", \"yf\"],\n dest=\"source\",\n help=\"Source to get data from\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n return ns_parser\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef voi(other_args: List[str]):\n \"\"\"Parse Volume + open interest argparse\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n -------\n ns_parser: argparse.Namespace\n Parsed namespace\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"voi\",\n description=\"\"\"\n Plots Volume + Open Interest of calls vs puts.\n \"\"\",\n )\n parser.add_argument(\n \"-v\",\n \"--minv\",\n dest=\"min_vol\",\n type=check_non_negative,\n default=-1,\n help=\"minimum volume (considering open interest) threshold of the plot.\",\n )\n parser.add_argument(\n \"-m\",\n \"--min\",\n dest=\"min_sp\",\n type=check_non_negative,\n default=-1,\n help=\"minimum strike price to consider in the plot.\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n dest=\"max_sp\",\n type=check_non_negative,\n default=-1,\n help=\"maximum strike price to consider in the plot.\",\n )\n parser.add_argument(\n \"--source\",\n type=str,\n default=\"tr\",\n choices=[\"tr\", \"yf\"],\n dest=\"source\",\n help=\"Source to get data from\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return None\n return ns_parser\n\n except Exception as e:\n print(e, \"\\n\")\n return None\n\n\ndef oi(other_args: List[str]):\n \"\"\"Parse Open Interest argparse\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n -------\n ns_parser: argparse.Namespace\n Parsed namespace\n \"\"\"\n\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"oi\",\n description=\"Plot open interest. Open interest represents the number of contracts that exist.\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--min\",\n default=-1,\n type=check_non_negative,\n help=\"Min strike to plot\",\n dest=\"min\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n default=-1,\n type=check_non_negative,\n help=\"Max strike to plot\",\n dest=\"max\",\n )\n\n parser.add_argument(\n \"--calls\",\n action=\"store_true\",\n default=False,\n dest=\"calls\",\n help=\"Flag to plot call options only\",\n )\n\n parser.add_argument(\n \"--puts\",\n action=\"store_true\",\n default=False,\n dest=\"puts\",\n help=\"Flag to plot put options only\",\n )\n parser.add_argument(\n \"--source\",\n type=str,\n default=\"tr\",\n choices=[\"tr\", \"yf\"],\n dest=\"source\",\n help=\"Source to get data from\",\n )\n\n try:\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n\n if not ns_parser:\n return None\n\n return ns_parser\n\n except Exception as e:\n print(e, \"\\n\")\n return None\n",
"\"\"\"Volume Technical Analysis\"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\nfrom datetime import timedelta\n\nimport matplotlib.pyplot as plt\nimport pandas_ta as ta\nimport pandas as pd\n\nfrom pandas.plotting import register_matplotlib_converters\nfrom gamestonk_terminal.helper_funcs import (\n check_positive,\n parse_known_args_and_warn,\n plot_autoscale,\n)\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal import feature_flags as gtff\n\nregister_matplotlib_converters()\n\n\ndef ad(other_args: List[str], s_ticker: str, s_interval: str, df_stock: pd.DataFrame):\n \"\"\"Accumulation Dictribution Line\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n s_ticker: str\n Ticker\n s_interval: str\n Stock data interval\n df_stock: pd.DataFrame\n Dataframe of stock prices\n \"\"\"\n\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"ad\",\n description=\"\"\"\n The Accumulation/Distribution Line is similar to the On Balance\n Volume (OBV), which sums the volume times +1/-1 based on whether the close is\n higher than the previous close. The Accumulation/Distribution indicator, however\n multiplies the volume by the close location value (CLV). The CLV is based on the\n movement of the issue within a single bar and can be +1, -1 or zero. \\n \\n\n The Accumulation/Distribution Line is interpreted by looking for a divergence in\n the direction of the indicator relative to price. If the Accumulation/Distribution\n Line is trending upward it indicates that the price may follow. Also, if the\n Accumulation/Distribution Line becomes flat while the price is still rising (or falling)\n then it signals an impending flattening of the price.\n \"\"\",\n )\n\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"offset\",\n )\n parser.add_argument(\n \"--open\",\n action=\"store_true\",\n default=False,\n dest=\"b_use_open\",\n help=\"uses open value of stock\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n bar_colors = [\n \"r\" if x[1].Open < x[1].Close else \"g\" for x in df_stock.iterrows()\n ]\n\n if s_interval == \"1440min\":\n bar_width = timedelta(days=1)\n else:\n bar_width = timedelta(minutes=int(s_interval.split(\"m\")[0]))\n\n if ns_parser.b_use_open:\n df_ta = ta.ad(\n high=df_stock[\"High\"],\n low=df_stock[\"Low\"],\n close=df_stock[\"Adj Close\"],\n volume=df_stock[\"Volume\"],\n offset=ns_parser.n_offset,\n open_=df_stock[\"Open\"],\n ).dropna()\n # Do not use open stock values\n else:\n # Note this should always be Close not Adj Close\n df_ta = ta.ad(\n high=df_stock[\"High\"],\n low=df_stock[\"Low\"],\n close=df_stock[\"Close\"],\n volume=df_stock[\"Volume\"],\n offset=ns_parser.n_offset,\n ).dropna()\n\n fig, axes = plt.subplots(\n 3,\n 1,\n gridspec_kw={\"height_ratios\": [2, 1, 1]},\n figsize=plot_autoscale(),\n dpi=PLOT_DPI,\n )\n ax = axes[0]\n if s_interval == \"1440min\":\n ax.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n else:\n ax.plot(df_stock.index, df_stock[\"Close\"].values, \"k\", lw=2)\n ax.set_title(f\"{s_ticker} AD\")\n ax.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax.set_ylabel(\"Share Price ($)\")\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax.minorticks_on()\n ax.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n ax2 = axes[1]\n ax2.set_ylabel(\"Volume\")\n if s_interval == \"1440min\":\n ax2.bar(\n df_stock.index,\n df_stock[\"Volume\"].values,\n color=bar_colors,\n alpha=0.8,\n width=0.3,\n )\n else:\n ax2.bar(\n df_stock.index,\n df_stock[\"Volume\"].values,\n color=bar_colors,\n alpha=0.8,\n width=bar_width,\n )\n ax2.set_xlim(df_stock.index[0], df_stock.index[-1])\n\n ax3 = axes[2]\n ax3.plot(df_ta.index, df_ta.values, \"b\", lw=1)\n ax3.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax3.axhline(0, linewidth=2, color=\"k\", ls=\"--\")\n ax3.set_ylabel(\"A/D\")\n ax3.set_xlabel(\"Time\")\n ax3.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax3.minorticks_on()\n ax3.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.gcf().autofmt_xdate()\n fig.tight_layout(pad=1)\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef obv(other_args: List[str], s_ticker: str, s_interval: str, df_stock: pd.DataFrame):\n \"\"\"On Balance Volume\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n s_ticker: str\n Ticker\n s_interval: str\n Stock data interval\n df_stock: pd.DataFrame\n Dataframe of stock prices\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"obv\",\n description=\"\"\"\n The On Balance Volume (OBV) is a cumulative total of the up and\n down volume. When the close is higher than the previous close, the volume is added\n to the running total, and when the close is lower than the previous close, the volume\n is subtracted from the running total. \\n \\n To interpret the OBV, look for the OBV\n to move with the price or precede price moves. If the price moves before the OBV,\n then it is a non-confirmed move. A series of rising peaks, or falling troughs, in the\n OBV indicates a strong trend. If the OBV is flat, then the market is not trending.\n \"\"\",\n )\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"offset\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n bar_colors = [\n \"r\" if x[1].Open < x[1].Close else \"g\" for x in df_stock.iterrows()\n ]\n\n if s_interval == \"1440min\":\n bar_width = timedelta(days=1)\n else:\n bar_width = timedelta(minutes=int(s_interval.split(\"m\")[0]))\n\n # Daily\n if s_interval == \"1440min\":\n df_ta = ta.obv(\n close=df_stock[\"Adj Close\"],\n volume=df_stock[\"Volume\"],\n offset=ns_parser.n_offset,\n ).dropna()\n\n # Intraday\n else:\n df_ta = ta.obv(\n close=df_stock[\"Close\"],\n volume=df_stock[\"Volume\"],\n offset=ns_parser.n_offset,\n ).dropna()\n\n fig, axes = plt.subplots(\n 3,\n 1,\n gridspec_kw={\"height_ratios\": [2, 1, 1]},\n figsize=plot_autoscale(),\n dpi=PLOT_DPI,\n )\n ax = axes[0]\n if s_interval == \"1440min\":\n ax.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n else:\n ax.plot(df_stock.index, df_stock[\"Close\"].values, \"k\", lw=2)\n\n ax.set_title(f\"{s_ticker} OBV\")\n ax.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax.set_ylabel(\"Share Price ($)\")\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax.minorticks_on()\n ax.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax2 = axes[1]\n ax2.set_xlim(df_stock.index[0], df_stock.index[-1])\n\n if s_interval == \"1440min\":\n ax2.bar(\n df_stock.index,\n df_stock[\"Volume\"].values,\n color=bar_colors,\n alpha=0.8,\n width=bar_width,\n )\n else:\n ax2.bar(\n df_stock.index,\n df_stock[\"Volume\"].values,\n color=bar_colors,\n alpha=0.8,\n width=bar_width,\n )\n ax3 = axes[2]\n ax3.plot(df_ta.index, df_ta.values, \"b\", lw=1)\n ax3.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax3.set_xlabel(\"Time\")\n ax3.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax3.minorticks_on()\n ax3.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.gcf().autofmt_xdate()\n fig.tight_layout(pad=1)\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n"
] | [
[
"numpy.array"
],
[
"matplotlib.pyplot.show",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.gcf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
skylian/flare | [
"c920abcae975cc49c052f5f2abb6bbee5c39a11e",
"33b71b4864812b89a28120b01fbc886e6e690376"
] | [
"flare/framework/agent.py",
"flare/examples/img_q_example.py"
] | [
"from abc import ABCMeta, abstractmethod\nfrom multiprocessing import Process, Value\nimport numpy as np\nfrom flare.common.log import GameLogEntry\nfrom flare.common.communicator import AgentCommunicator\nfrom flare.common.replay_buffer import NoReplacementQueue, ReplayBuffer, Experience\n\n\nclass AgentHelper(object):\n \"\"\"\n AgentHelper abstracts some part of Agent's data processing and the I/O\n communication between Agent and ComputationDataProcessor (CDP). It receives a\n Communicator from one CDP and uses it to send data to the CDP.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, name, communicator, sample_interval):\n assert isinstance(communicator, AgentCommunicator)\n self.name = name\n self.comm = communicator\n self.counter = 0\n assert sample_interval >= 2\n self.sample_interval = sample_interval\n\n def unpack_exps(self, exp_seqs):\n \"\"\"\n The input `exp_seqs` is always a list of sequences, each sequence\n containing multiple Experience instances.\n \"\"\"\n\n def concat_lists(lists):\n return [x for l in lists for x in l]\n\n def extract_key(seq, k):\n assert seq\n return [e.val(k) for e in seq]\n\n ret = dict(\n inputs={},\n next_inputs={},\n next_alive={},\n rewards={},\n actions={},\n next_actions={},\n states=None,\n next_states=None)\n\n for k in self.input_keys:\n ipt_seqs = [extract_key(exp_seq, k) for exp_seq in exp_seqs]\n ret[\"inputs\"][k] = [ipt_seq[:-1] for ipt_seq in ipt_seqs]\n ret[\"next_inputs\"][k] = [ipt_seq[1:] for ipt_seq in ipt_seqs]\n\n for k in self.action_keys:\n act_seqs = [extract_key(exp_seq, k) for exp_seq in exp_seqs]\n ret[\"actions\"][k] = [act_seq[:-1] for act_seq in act_seqs]\n ret[\"next_actions\"][k] = [act_seq[1:] for act_seq in act_seqs]\n\n for k in self.reward_keys:\n ret[\"rewards\"][\n k] = [extract_key(exp_seq[:-1], k) for exp_seq in exp_seqs]\n\n if self.state_keys:\n ret[\"states\"] = dict()\n ret[\"next_states\"] = dict()\n\n for k in self.state_keys:\n ## we only take the first(second) element of a seq for states(next_states)\n ret[\"states\"][\n k] = [extract_key(exp_seq[:1], k)[0] for exp_seq in exp_seqs]\n ret[\"next_states\"][k] = [\n extract_key(exp_seq[1:2], k)[0] for exp_seq in exp_seqs\n ]\n\n ret[\"next_alive\"][\"alive\"] \\\n = [extract_key(exp_seq[1:], \"alive\") for exp_seq in exp_seqs]\n\n ## HERE we decide whether the data are instances or seqs\n ## according to the existence of states\n if not self.state_keys:\n # sample instances\n for k in ret.keys():\n if ret[k] is not None:\n for kk in ret[k].keys():\n ret[k][kk] = concat_lists(ret[k][kk])\n\n return ret, len(exp_seqs)\n\n def predict(self, inputs, states=dict()):\n \"\"\"\n Process the input data (if necessary), send them to CDP for prediction,\n and receive the outcome.\n\n Args:\n inputs(dict): data used for prediction. It is caller's job\n to make sure inputs contains all data needed and they are in the\n right form.\n \"\"\"\n data = dict(inputs=inputs, states=states)\n self.comm.put_prediction_data(data, 1)\n ret = self.comm.get_prediction_return()\n return ret\n\n @abstractmethod\n def add_experience(self, e):\n \"\"\"\n Implements how to record an experience.\n Will be called by self.store_data()\n \"\"\"\n pass\n\n def _store_data(self, alive, data):\n \"\"\"\n Store the past experience for later use, e.g., experience replay.\n\n Args:\n data(dict): data to store.\n \"\"\"\n assert isinstance(data, dict)\n data[\"alive\"] = [alive]\n t = Experience(data)\n self.add_experience(t)\n self.counter += 1\n if self.counter % self.sample_interval == 0:\n return self.learn()\n\n @abstractmethod\n def sample_experiences(self):\n \"\"\"\n Implements how to retrieve experiences from past.\n Will be called by self.learn()\n \"\"\"\n pass\n\n def learn(self):\n \"\"\"\n Sample data from past experiences and send them to CDP for learning.\n Optionally, it receives learning outcomes sent back from CW and does\n some processing.\n\n Depends on users' need, this function can be called in three ways:\n 1. In Agent's run_one_episode\n 2. In store_data(), e.g., learning once every few steps\n 3. As a separate thread, e.g., using experience replay\n \"\"\"\n exp_seqs = self.sample_experiences()\n if not exp_seqs:\n return\n data, size = self.unpack_exps(exp_seqs)\n self.comm.put_training_data(data, size)\n ret = self.comm.get_training_return()\n return ret\n\n\nclass OnlineHelper(AgentHelper):\n \"\"\"\n Online helper. It calls `learn()` every `sample_interval`\n steps.\n\n While waiting for learning return, the calling `Agent` is blocked.\n \"\"\"\n\n def __init__(self, name, communicator, sample_interval=5):\n super(OnlineHelper, self).__init__(name, communicator, sample_interval)\n # NoReplacementQueue used to store past experience.\n self.exp_queue = NoReplacementQueue()\n\n @staticmethod\n def exp_replay():\n return False\n\n def add_experience(self, e):\n self.exp_queue.add(e)\n\n def sample_experiences(self):\n return self.exp_queue.sample()\n\n\nclass ExpReplayHelper(AgentHelper):\n \"\"\"\n Example of applying experience replay. It starts a separate threads to\n run learn().\n \"\"\"\n\n def __init__(self,\n name,\n communicator,\n buffer_capacity,\n num_experiences,\n sample_interval=5,\n num_seqs=1):\n super(ExpReplayHelper, self).__init__(name, communicator,\n sample_interval)\n # replay buffer for experience replay\n self.replay_buffer = ReplayBuffer(buffer_capacity)\n self.num_experiences = num_experiences\n self.num_seqs = num_seqs\n\n @staticmethod\n def exp_replay():\n return True\n\n def add_experience(self, e):\n self.replay_buffer.add(e)\n\n def sample_experiences(self):\n return self.replay_buffer.sample(self.num_experiences, self.num_seqs)\n\n\nclass Agent(Process):\n \"\"\"\n Agent implements the control flow and logics of how Robot interacts with\n the environment and does computation. It is a subclass of Process. The entry\n function of the Agent process is run().\n\n Some members:\n env: the environment\n num_games: number of games to run\n learning: Whether learn or not (only do testing)\n helpers: a dictionary of `AgentHelper`, each corresponds to one\n `ComputationTask`\n log_q: communication channel between `Agent` and the centralized logger\n running: the `Agent` will keep running as long as `running` is True.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, num_games, actrep, learning):\n super(Agent, self).__init__()\n self.id = -1 # just created, not added to the Robot yet\n self.num_games = num_games\n self.learning = learning\n self.state_specs = None\n self.helpers = {}\n self.log_q = None\n self.running = Value('i', 0)\n self.daemon = True ## Process member\n self.alive = 1\n self.env_f = None\n self.actrep = actrep\n\n def set_env(self, env_class, *args, **kwargs):\n \"\"\"\n Set the environment for the agent. For now, only create a lambda\n function. Once the agent process starts running, we will call this\n function.\n\n env_class: The environment class to create\n args, kwargs: The arguments for creating the class\n \"\"\"\n self.env_f = lambda: env_class(*args, **kwargs)\n\n def add_agent_helper(self, helper, input_keys, action_keys, state_keys,\n reward_keys):\n \"\"\"\n Add an AgentHelper, with its name (also the name of its\n correspoding `ComputationTask`) as key.\n \"\"\"\n assert isinstance(helper, AgentHelper)\n helper.input_keys = input_keys\n helper.action_keys = action_keys\n helper.state_keys = state_keys\n helper.reward_keys = reward_keys\n self.helpers[helper.name] = helper\n\n def _make_zero_states(self, prop):\n dtype = prop[\"dtype\"] if \"dtype\" in prop else \"float32\"\n return np.zeros(prop[\"shape\"]).astype(dtype)\n\n ## The following three functions hide the `AgentHelper` from the users of\n ## `Agent`.\n def predict(self, alg_name, inputs, states=dict()):\n ## Convert single instances to batches of size 1\n ## The reason for this conversion is that we want to reuse the\n ## _pack_data() and _unpack_data() of the CDP for handling both training\n ## and prediction data. These two functions assume that data are stored\n ## as mini batches instead of single instances in the prediction and learning\n ## queues.\n inputs_ = {k: [v] for k, v in inputs.items()}\n states_ = {k: [v] for k, v in states.items()}\n prediction, next_states = self.helpers[alg_name].predict(inputs_,\n states_)\n ## convert back to single instances\n prediction = {k: v[0] for k, v in prediction.items()}\n next_states = {k: v[0] for k, v in next_states.items()}\n return prediction, next_states\n\n def run(self):\n \"\"\"\n Default entry function of Agent process.\n \"\"\"\n assert self.env_f is not None, \"You should first call self.set_env()!\"\n ## Only call the env function now to make sure there is only one\n ## environment (OpenGL context) in each process\n self.env = self.env_f()\n self.running.value = 1\n for i in range(self.num_games):\n self._run_one_episode()\n if not self.running.value:\n return\n self.running.value = 0\n\n def _store_data(self, alg_name, data):\n if self.learning: ## only store when the agent is learning\n return self.helpers[alg_name]._store_data(self.alive, data)\n\n def _run_one_episode(self):\n def __store_data(observations, actions, states, rewards):\n learning_ret = self._cts_store_data(observations, actions, states,\n rewards) ## written by user\n if learning_ret is not None:\n for k, v in learning_ret.items():\n self.log_entry.add_key(k, v)\n\n observations = self._reset_env()\n states = self._get_init_states() ## written by user\n\n while self.alive and (not self.env.time_out()):\n actions, next_states = self._cts_predict(\n observations, states) ## written by user\n assert isinstance(actions, dict)\n assert isinstance(next_states, dict)\n next_observations, rewards, next_game_over = self._step_env(\n actions)\n __store_data(observations, actions, states, rewards)\n\n observations = next_observations\n states = next_states\n ## next_game_over == 1: success\n ## next_game_over == -1: failure\n self.alive = 1 - abs(next_game_over)\n\n ## self.alive: 0 -- success/failure\n ## 1 -- normal\n ## -1 -- timeout\n if self.env.time_out():\n self.alive = -1\n actions, _ = self._cts_predict(observations, states)\n zero_rewards = {k: [0] * len(v) for k, v in rewards.items()}\n __store_data(observations, actions, states, zero_rewards)\n\n ## Record success. For games that do not have a defintion of\n ## 'success' (e.g., 'breakout' never ends), this quantity will\n ## always be zero\n self.log_entry.add_key(\"success\", next_game_over > 0)\n return self._total_reward()\n\n def _reset_env(self):\n self.alive = 1\n ## currently we only support a single logger for all CTs\n self.log_entry = GameLogEntry(self.id, 'All')\n obs = self.env.reset()\n assert isinstance(obs, dict)\n return obs\n\n def _step_env(self, actions):\n next_observations, rewards, next_game_over = self.env.step(actions,\n self.actrep)\n assert isinstance(next_observations, dict)\n assert isinstance(rewards, dict)\n self.log_entry.add_key(\"num_steps\", 1)\n self.log_entry.add_key(\"total_reward\", sum(map(sum, rewards.values())))\n return next_observations, rewards, next_game_over\n\n def _total_reward(self):\n self.log_q.put(self.log_entry)\n return self.log_entry.total_reward\n\n def _get_init_states(self):\n \"\"\"\n By default, there is no state. The user needs to override this function\n to return a dictionary of init states if necessary.\n \"\"\"\n return dict()\n\n @abstractmethod\n def _cts_predict(self, observations, states):\n \"\"\"\n The user needs to override this function to specify how different CTs\n make predictions given observations and states.\n\n Output: actions: a dictionary of actions, each action being a vector\n If the action is discrete, then it is a length-one\n list of an integer.\n states (optional): a dictionary of states, each state being a floating vector\n \"\"\"\n pass\n\n @abstractmethod\n def _cts_store_data(self, observations, actions, states, rewards):\n \"\"\"\n The user needs to override this function to specify how different CTs\n store their corresponding experiences, by calling self._store_data().\n Each input should be a dictionary.\n \"\"\"\n pass\n",
"import torch.nn as nn\nimport numpy as np\nimport torch.optim as optim\nfrom flare.algorithm_zoo.simple_algorithms import SimpleQ\nfrom flare.model_zoo.simple_models import SimpleModelQ\nfrom flare.framework.manager import Manager\nfrom flare.agent_zoo.simple_rl_agents import SimpleRLAgent\nfrom flare.framework.agent import ExpReplayHelper\nfrom flare.env_zoo.gym_env import GymEnvImage\nfrom flare.framework.common_functions import Flatten\n\nif __name__ == '__main__':\n \"\"\"\n A demo of how to train from image inputs\n \"\"\"\n game = \"Breakout-v0\"\n\n num_agents = 16\n num_games = 8000\n\n im_height, im_width = 84, 84\n env = GymEnvImage(\n game, contexts=4, height=im_height, width=im_width, gray=True)\n d, h, w = env.observation_dims()[\"sensor\"]\n num_actions = env.action_dims()[\"action\"]\n\n # 1. Spawn one agent for each instance of environment.\n # Agent's behavior depends on the actual algorithm being used. Since we\n # are using SimpleAC, a proper type of Agent is SimpleRLAgent.\n agents = []\n for _ in range(num_agents):\n agent = SimpleRLAgent(num_games, reward_shaping_f=np.sign)\n agent.set_env(\n GymEnvImage,\n game_name=game,\n contexts=4,\n height=im_height,\n width=im_width,\n gray=True)\n agents.append(agent)\n\n # 2. Construct the network and specify the algorithm.\n # Here we use a small CNN as the perception net for the Actor-Critic algorithm\n cnn = nn.Sequential(\n nn.Conv2d(\n d, 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(\n 32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(\n 64, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n Flatten(), # flatten the CNN cube to a vector\n nn.Linear(7 * 7 * 64, 512),\n nn.ReLU())\n\n alg = SimpleQ(\n model=SimpleModelQ(\n dims=(d, h, w), num_actions=num_actions, perception_net=cnn),\n gpu_id=0,\n optim=(optim.RMSprop, dict(lr=1e-4)),\n grad_clip=5.0,\n exploration_end_steps=500000 // num_agents,\n update_ref_interval=100)\n\n # 3. Specify the settings for learning: data sampling strategy\n # (ExpReplayHelper here) and other settings used by\n # ComputationTask.\n ct_settings = {\n \"RL\": dict(\n alg=alg,\n # sampling\n agent_helper=ExpReplayHelper,\n buffer_capacity=200000 // num_agents,\n num_experiences=4, # num per agent\n num_seqs=0, # sample instances\n sample_interval=5)\n }\n\n # 4. Create Manager that handles the running of the whole pipeline\n manager = Manager(ct_settings)\n manager.add_agents(agents)\n manager.start()\n"
] | [
[
"numpy.zeros"
],
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c-w-m/pyldpc | [
"c7eb471359086b7336d7b40f11cc912f0daf0476",
"c7eb471359086b7336d7b40f11cc912f0daf0476"
] | [
"pyldpc/decoder.py",
"pyldpc/utils_img.py"
] | [
"\"\"\"Decoding module.\"\"\"\nimport numpy as np\nimport warnings\nfrom . import utils\n\nfrom numba import njit, int64, types, float64\n\n\ndef decode(H, y, snr, maxiter=1000):\n \"\"\"Decode a Gaussian noise corrupted n bits message using BP algorithm.\n\n Decoding is performed in parallel if multiple codewords are passed in y.\n\n Parameters\n ----------\n H: array (n_equations, n_code). Decoding matrix H.\n y: array (n_code, n_messages) or (n_code,). Received message(s) in the\n codeword space.\n maxiter: int. Maximum number of iterations of the BP algorithm.\n\n Returns\n -------\n x: array (n_code,) or (n_code, n_messages) the solutions in the\n codeword space.\n\n \"\"\"\n m, n = H.shape\n\n bits_hist, bits_values, nodes_hist, nodes_values = utils._bitsandnodes(H)\n\n _n_bits = np.unique(H.sum(0))\n _n_nodes = np.unique(H.sum(1))\n\n if _n_bits * _n_nodes == 1:\n solver = _logbp_numba_regular\n bits_values = bits_values.reshape(n, -1)\n nodes_values = nodes_values.reshape(m, -1)\n\n else:\n solver = _logbp_numba\n\n var = 10 ** (-snr / 10)\n\n if y.ndim == 1:\n y = y[:, None]\n # step 0: initialization\n\n Lc = 2 * y / var\n _, n_messages = y.shape\n\n Lq = np.zeros(shape=(m, n, n_messages))\n\n Lr = np.zeros(shape=(m, n, n_messages))\n for n_iter in range(maxiter):\n Lq, Lr, L_posteriori = solver(bits_hist, bits_values, nodes_hist,\n nodes_values, Lc, Lq, Lr, n_iter)\n x = np.array(L_posteriori <= 0).astype(int)\n product = utils.incode(H, x)\n if product:\n break\n if n_iter == maxiter - 1:\n warnings.warn(\"\"\"Decoding stopped before convergence. You may want\n to increase maxiter\"\"\")\n return x.squeeze()\n\n\noutput_type_log2 = types.Tuple((float64[:, :, :], float64[:, :, :],\n float64[:, :]))\n\n\n@njit(output_type_log2(int64[:], int64[:], int64[:], int64[:], float64[:, :],\n float64[:, :, :], float64[:, :, :], int64), cache=True)\ndef _logbp_numba(bits_hist, bits_values, nodes_hist, nodes_values, Lc, Lq, Lr,\n n_iter):\n \"\"\"Perform inner ext LogBP solver.\"\"\"\n m, n, n_messages = Lr.shape\n # step 1 : Horizontal\n\n bits_counter = 0\n nodes_counter = 0\n for i in range(m):\n # ni = bits[i]\n ff = bits_hist[i]\n ni = bits_values[bits_counter: bits_counter + ff]\n bits_counter += ff\n for j in ni:\n nij = ni[:]\n\n X = np.ones(n_messages)\n if n_iter == 0:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lc[nij[kk]])\n else:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lq[i, nij[kk]])\n num = 1 + X\n denom = 1 - X\n for ll in range(n_messages):\n if num[ll] == 0:\n Lr[i, j, ll] = -1\n elif denom[ll] == 0:\n Lr[i, j, ll] = 1\n else:\n Lr[i, j, ll] = np.log(num[ll] / denom[ll])\n\n # step 2 : Vertical\n for j in range(n):\n # mj = nodes[j]\n ff = nodes_hist[j]\n mj = nodes_values[nodes_counter: nodes_counter + ff]\n nodes_counter += ff\n for i in mj:\n mji = mj[:]\n Lq[i, j] = Lc[j]\n\n for kk in range(len(mji)):\n if mji[kk] != i:\n Lq[i, j] += Lr[mji[kk], j]\n\n # LLR a posteriori:\n L_posteriori = np.zeros((n, n_messages))\n nodes_counter = 0\n for j in range(n):\n ff = nodes_hist[j]\n mj = nodes_values[nodes_counter: nodes_counter + ff]\n nodes_counter += ff\n L_posteriori[j] = Lc[j] + Lr[mj, j].sum(axis=0)\n\n return Lq, Lr, L_posteriori\n\n\n@njit(output_type_log2(int64[:], int64[:, :], int64[:], int64[:, :],\n float64[:, :], float64[:, :, :], float64[:, :, :],\n int64), cache=True)\ndef _logbp_numba_regular(bits_hist, bits_values, nodes_hist, nodes_values, Lc,\n Lq, Lr, n_iter):\n \"\"\"Perform inner ext LogBP solver.\"\"\"\n m, n, n_messages = Lr.shape\n # step 1 : Horizontal\n for i in range(m):\n ni = bits_values[i]\n for j in ni:\n nij = ni[:]\n\n X = np.ones(n_messages)\n if n_iter == 0:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lc[nij[kk]])\n else:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lq[i, nij[kk]])\n num = 1 + X\n denom = 1 - X\n for ll in range(n_messages):\n if num[ll] == 0:\n Lr[i, j, ll] = -1\n elif denom[ll] == 0:\n Lr[i, j, ll] = 1\n else:\n Lr[i, j, ll] = np.log(num[ll] / denom[ll])\n\n # step 2 : Vertical\n for j in range(n):\n mj = nodes_values[j]\n for i in mj:\n mji = mj[:]\n Lq[i, j] = Lc[j]\n\n for kk in range(len(mji)):\n if mji[kk] != i:\n Lq[i, j] += Lr[mji[kk], j]\n\n # LLR a posteriori:\n L_posteriori = np.zeros((n, n_messages))\n for j in range(n):\n mj = nodes_values[j]\n L_posteriori[j] = Lc[j] + Lr[mj, j].sum(axis=0)\n\n return Lq, Lr, L_posteriori\n\n\ndef get_message(tG, x):\n \"\"\"Compute the original `n_bits` message from a `n_code` codeword `x`.\n\n Parameters\n ----------\n tG: array (n_code, n_bits) coding matrix tG.\n x: array (n_code,) decoded codeword of length `n_code`.\n\n Returns\n -------\n message: array (n_bits,). Original binary message.\n\n \"\"\"\n n, k = tG.shape\n\n rtG, rx = utils.gausselimination(tG, x)\n\n message = np.zeros(k).astype(int)\n\n message[k - 1] = rx[k - 1]\n for i in reversed(range(k - 1)):\n message[i] = rx[i]\n message[i] -= utils.binaryproduct(rtG[i, list(range(i+1, k))],\n message[list(range(i+1, k))])\n\n return abs(message)\n",
"import numpy as np\nfrom . import utils\n\n\ndef gray2bin(img):\n \"\"\"Convert a GrayScale Image to a binary array.\"\"\"\n if not len(img.shape) == 2:\n raise ValueError(\"\"\"{} must have 2 dimensions.\n Make sure it\\'s a grayscale image.\"\"\")\n\n height, width = img.shape\n\n img_bin = np.zeros(shape=(height, width, 8), dtype=int)\n\n for i in range(height):\n for j in range(width):\n img_bin[i, j, :] = utils.int2bitarray(img[i, j], 8)\n\n return img_bin\n\n\ndef bin2gray(img_bin):\n \"\"\"Convert a binary Image to a grayscale image.\"\"\"\n height, width, k = img_bin.shape\n img_grayscale = np.zeros(shape=(height, width), dtype=np.uint8)\n\n for i in range(height):\n for j in range(width):\n img_grayscale[i, j] = utils.bitarray2int(img_bin[i, j, :])\n\n return img_grayscale\n\n\ndef rgb2bin(img):\n \"\"\"Convert an RGB Image to a binary array.\"\"\"\n height, width, depth = img.shape\n\n if not depth == 3:\n raise ValueError(\"\"\"{}\\'s 3rd dimension must be equal to 3 (RGB).\n Make sure it\\'s an RGB image.\"\"\")\n\n img_bin = np.zeros(shape=(height, width, 24), dtype=int)\n\n for i in range(height):\n for j in range(width):\n r = utils.int2bitarray(img[i, j, 0], 8)\n g = utils.int2bitarray(img[i, j, 1], 8)\n b = utils.int2bitarray(img[i, j, 2], 8)\n\n img_bin[i, j, :] = np.concatenate((r, g, b))\n\n return img_bin\n\n\ndef bin2rgb(img_bin):\n \"\"\"Convert a binary image to RGB.\"\"\"\n height, width, depth = img_bin.shape\n img_rgb = np.zeros(shape=(height, width, 3), dtype=np.uint8)\n\n for i in range(height):\n for j in range(width):\n r = utils.bitarray2int(img_bin[i, j, :8])\n g = utils.bitarray2int(img_bin[i, j, 8:16])\n b = utils.bitarray2int(img_bin[i, j, 16:])\n\n img_rgb[i, j] = np.array([r, g, b], dtype=np.uint8)\n\n return img_rgb\n"
] | [
[
"numpy.log",
"numpy.ones",
"numpy.array",
"numpy.tanh",
"numpy.zeros"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
code-lab-org/sys611 | [
"3b8c46788dee629a9f2d6b7f84373e041b918ff0",
"3b8c46788dee629a9f2d6b7f84373e041b918ff0",
"3b8c46788dee629a9f2d6b7f84373e041b918ff0"
] | [
"previous/week12/object-oriented/FactorySystemOO.py",
"previous/week7/discreteTimeModels.py",
"previous/week9/inventoryModel.py"
] | [
"\"\"\"\nSYS-611: Example factory model in SimPy (object-oriented).\n\n@author: Paul T. Grogan, [email protected]\n\"\"\"\n\n# import the python3 behavior for importing, division, and printing in python2\nfrom __future__ import absolute_import, division, print_function\n\n# import the simpy package \n# see https://simpy.readthedocs.io/en/latest/api_reference for documentation\nimport simpy\n\n# import the numpy package and refer to it as `np`\n# see http://docs.scipy.org/doc/numpy/reference/ for documentation\nimport numpy as np\n\n# import the matplotlib pyplot package and refer to it as `plt`\n# see http://matplotlib.org/api/pyplot_api.html for documentation\nimport matplotlib.pyplot as plt\n\n#%% SECTION TO CONFIGURE SIMULATION\n\n# number of simulation runs to perform\nNUM_RUNS = 1\n# simulation duration (hours)\nSIM_DURATION = 5*8*52\n# number of spares to purchase (S)\nNUM_SPARES = 20\n# number of repairers to hire (R)\nNUM_REPAIRERS = 5\n\n#%% SECTION TO DEFINE SIMULATION\n\nclass Factory(object):\n \"\"\" Defines a factory simulation. \"\"\"\n def __init__(self, env, num_repairers, num_spares):\n \"\"\" Initializes this factory.\n \n Args:\n env (simpy.Environment): the simulation environment\n num_repairers (int): the number of repairers to hire\n num_spares (int): the number of spares to purchase\n \"\"\"\n self.repairers = simpy.Resource(env, capacity=num_repairers) \n self.spares = simpy.Container(env, init=num_spares, capacity=num_spares)\n self.env = env\n self.cost = 0\n self.daily_cost = 3.75*8*num_repairers + 30*num_spares\n \n def run(self):\n \"\"\" Process to run this simulation. \"\"\"\n # launch the 50 machine processes\n for i in range(50):\n self.env.process(factory.operate_machine(i+1))\n # update the daily costs each day\n while True:\n self.cost += self.daily_cost\n yield self.env.timeout(8.0)\n \n def operate_machine(self, machine):\n \"\"\" Process to operate a machine.\n \n Args:\n machine (int): the machine number\n \"\"\"\n while True:\n # wait until the machine breaks\n yield self.env.timeout(np.random.uniform(132,182))\n time_broken = self.env.now\n if NUM_RUNS <= 1:\n print('machine {} broke at {:.2f} ({} spares available)'.format(\n machine, time_broken, self.spares.level))\n # launch the repair process\n self.env.process(self.repair_machine())\n # wait for a spare to become available\n yield self.spares.get(1)\n time_replaced = self.env.now\n if NUM_RUNS <= 1:\n print('machine {} replaced at {:.2f}'.format(machine, time_replaced))\n # update the cost for being out of service\n self.cost += 20*(time_replaced-time_broken)\n \n def repair_machine(self):\n \"\"\" Process to repair a machine. \"\"\"\n with self.repairers.request() as request:\n # wait for a repairer to become available\n yield request\n # perform the repair\n yield self.env.timeout(np.random.uniform(4,10))\n # put the machine back in the spares pool\n yield self.spares.put(1)\n if NUM_RUNS <= 1:\n print('repair complete at {:.2f} ({} spares available)'.format(\n self.env.now, self.spares.level))\n\n# arrays to record data\nobs_time = []\nobs_cost = []\nobs_spares = []\n\ndef observe(env, factory):\n \"\"\" Process to observe the factory during a simulation.\n \n Args:\n env (simpy.Environment): the simulation environment\n factory (Factory): the factory\n \"\"\"\n while True:\n obs_time.append(env.now)\n obs_cost.append(factory.cost)\n obs_spares.append(factory.spares.level)\n yield env.timeout(1.0)\n\n#%% SECTION TO RUN ANALYSIS\n\n# array to store outputs\nCOST = []\n\nfor i in range(NUM_RUNS):\n # set the random number seed\n np.random.seed(i)\n \n # create the simpy environment\n env = simpy.Environment()\n # create the factory\n factory = Factory(env, NUM_REPAIRERS, NUM_SPARES)\n # add the factory run process\n env.process(factory.run())\n # add the observation process\n env.process(observe(env, factory))\n # run simulation\n env.run(until=SIM_DURATION)\n # record the final observed cost\n COST.append(obs_cost[-1])\n \n if NUM_RUNS <= 1:\n # output the total cost\n print('Total cost: {:.2f}'.format(factory.cost))\n \n # plot the number of spares available\n plt.figure()\n plt.step(obs_time, obs_spares, where='post')\n plt.xlabel('Time (hour)')\n plt.ylabel('Number Spares Available')\n \n # plot the total cost accumulation\n plt.figure()\n plt.step(obs_time, obs_cost, where='post')\n plt.xlabel('Time (hour)')\n plt.ylabel('Total Cost')\n\n # print final results to console\n print('Factory costs for N={:} runs with R={:} repairers and S={:} spares:'.format(\n NUM_RUNS, NUM_REPAIRERS, NUM_SPARES))\n print('\\n'.join('{:.2f}'.format(i) for i in COST))\n\n#%% SECTION TO WRITE RESULTS TO CSV FILE\n\nimport csv\n\nwith open('factory.csv', 'w') as output:\n writer = csv.writer(output)\n for sample in COST:\n writer.writerow([sample])",
"\"\"\"\nSYS-611 Discrete Time Models.\n\n@author: Paul T. Grogan, [email protected]\n\"\"\"\n\n# import the python3 behavior for importing, division, and printing in python2\nfrom __future__ import absolute_import, division, print_function\n\n# import the matplotlib pyplot package and refer to it as `plt`\n# see http://matplotlib.org/api/pyplot_api.html for documentation\nimport matplotlib.pyplot as plt\n\n#%% delay system example\n\n# define the input trajectory\nx = [1,1,0,0,1,0,0,0,1]\n\n# define the state update function\ndef _delta(q, x): \n return x\n \n# define the output function\ndef _lambda(q, x): \n return x\n \n# define the output and state trajectories\ny = [0,0,0,0,0,0,0,0,0]\nq = [0,0,0,0,0,0,0,0,0,0]\n\n# initialize the simulation\nt = 0\nq[0] = 0\n\n# execute the simulation\nwhile t <= 8:\n # record output value\n y[t] = _lambda(q[t], x[t])\n # record state update\n q[t+1] = _delta(q[t], x[t])\n # advance time\n t += 1\n\nplt.figure()\nf, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\nax1.bar(range(9), x, color='k')\nax1.set_ylabel('Input ($x$)')\nax2.bar(range(9), q[:-1], color='k')\nax2.set_ylabel('State ($q$)')\nax3.bar(range(9), y, color='k')\nax3.set_ylabel('Output ($y$)')\nplt.xlabel('Time (ticks)')\nplt.suptitle('Delay System Model')\n\n#%% binary counter example\n\n# define the input trajectory\nx = [1,1,0,0,1,0,0,0,1]\n\n# define the state update function\ndef _delta(q, x): \n return q != x\n \n# define the output function\ndef _lambda(q, x): \n return q and x\n \n# define the output and state trajectories\ny = [0,0,0,0,0,0,0,0,0]\nq = [0,0,0,0,0,0,0,0,0,0]\n\n# initialize the simulation\nt = 0\nq[0] = 0\n\n# execute the simulation\nwhile t <= 8:\n # record output value\n y[t] = _lambda(q[t], x[t])\n # record state update\n q[t+1] = _delta(q[t], x[t])\n # advance time\n t += 1\n\nplt.figure()\nf, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\nax1.bar(range(9), x, color='k')\nax1.set_ylabel('Input ($x$)')\nax2.bar(range(9), q[:-1], color='k')\nax2.set_ylabel('State ($q$)')\nax3.bar(range(9), y, color='k')\nax3.set_ylabel('Output ($y$)')\nplt.xlabel('Time (ticks)')\nplt.suptitle('Binary Counter Model')\n\n#%% delay flip-flop example\n\n# define the input trajectory\nx = [1,1,0,0,1,0,0,0,1]\n\n# define the state update function\ndef _delta(q, x): \n return x\n \n# define the output function\ndef _lambda(q): \n return q\n \n# define the output and state trajectories\ny = [0,0,0,0,0,0,0,0,0]\nq = [0,0,0,0,0,0,0,0,0,0]\n\n# initialize the simulation\nt = 0\nq[0] = 0\n\n# execute the simulation\nwhile t <= 8:\n # record output value\n y[t] = _lambda(q[t])\n # record state update\n q[t+1] = _delta(q[t], x[t])\n # advance time\n t += 1\n\nplt.figure()\nf, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\nax1.bar(range(9), x, color='k')\nax1.set_ylabel('Input ($x$)')\nax2.bar(range(9), q[:-1], color='k')\nax2.set_ylabel('State ($q$)')\nax3.bar(range(9), y, color='k')\nax3.set_ylabel('Output ($y$)')\nplt.xlabel('Time (ticks)')\nplt.suptitle('Delay Flip-Flop Model')",
"\"\"\"\nSYS-611 Inventory Model\n\n@author: Paul T. Grogan, [email protected]\n\"\"\"\n\n# import the python3 behavior for importing, division, and printing in python2\nfrom __future__ import absolute_import, division, print_function\n\n# import the numpy package and refer to it as `np`\nimport numpy as np\n# import the matplotlib.pyplot package and refer to it as `plt`\nimport matplotlib.pyplot as plt\n\n# define simulation parameters\nproduct_price = 100.00 # dollars per item\nproduct_cost = 50.00 # dollars per item\nholding_cost = 2.00 # dollars per item per day\narrival_rate = 5 # customers per day\ndemand_lb = 1 # items per customer\ndemand_ub = 4 # items per customer\norder_trigger = 15 # items\norder_target = 20 # items\ndelivery_delay = 2 # days\n\ndef generate_interarrival():\n # generates a customer inter-arrival time\n return np.random.exponential(scale=1./arrival_rate)\n \ndef generate_demand():\n # generates a customer demand\n return np.random.randint(demand_lb, demand_ub+1)\n\n# initialize simulation variables\nt = 0.0\ninventory = order_target\nnum_ordered = 0\ncost_orders = 0.0\ncost_holding = 0.0\nrevenue = 0.0\nt_customer = generate_interarrival()\nt_delivery = float('inf')\n\n# initialize data lists for plotting\nplot_t = []\nplot_P = []\n\ndef print_state():\n \"\"\"Prints this simulation state.\"\"\"\n print('Time = {:.2f}'.format(t))\n print('Inventory on Hand = {:d}'.format(inventory))\n print('Amount on Order = {:d}'.format(num_ordered))\n print('Event list = C: {:.1f}, D: {:.1f}'.format(\n t_customer, t_delivery))\n print('Order Costs = {:.2f}'.format(cost_orders))\n print('Holding Costs = {:.2f}'.format(cost_holding))\n print('Revenue = {:.2f}'.format(revenue))\n print('')\n \n# print the simulation state\nprint_state()\n\n# iterate over the first 14.0 days\nwhile t < 14.0:\n # compute the holding costs for current inventory since last time\n cost_holding += holding_cost*inventory*(min(t_customer, t_delivery) - t)\n # update the simulation time\n t = min(t_customer, t_delivery)\n \n # check if this is a customer event (the tie-breaker if same time)\n if t == t_customer:\n # generate the customer demand\n demand = generate_demand()\n \n # check if inventory exceeds demands - can meet all demand\n if inventory > demand:\n # update revenue and inventory levels after sale\n revenue += product_price*demand\n inventory -= demand\n # otherwise can only meet partial demand\n else:\n # update revenue and inventory levels after sale\n revenue += product_price*inventory\n inventory = 0\n \n # check if inventory falls below order trigger and no order in progress\n if inventory < order_trigger and num_ordered == 0:\n # place an order, update costs and delivery event time\n num_ordered = order_target - inventory\n cost_orders += product_cost*num_ordered\n t_delivery = t + delivery_delay\n \n # generate the next customer arrival\n t_customer += generate_interarrival()\n # check if this is a delivery event\n else:\n # add the ordered products to the inventory \n inventory += num_ordered\n \n # reset the number ordered and delivery time\n num_ordered = 0\n t_delivery = float('inf')\n \n # print the simulation state for first 2.0 days\n if t < 2.0:\n print_state()\n \n # append data for plotting\n plot_t.append(t)\n plot_P.append(revenue-cost_orders-cost_holding)\n \nplt.figure()\nplt.plot(plot_t, plot_P, '-r')\nplt.xlabel('Time ($t$)')\nplt.ylabel('Profit')"
] | [
[
"numpy.random.seed",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.step",
"numpy.random.uniform",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure"
],
[
"numpy.random.exponential",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"numpy.random.randint",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ozgurozkan123/deepchem | [
"7b6248db5f7172ff2a833a1c7c99f48565befe67",
"7b6248db5f7172ff2a833a1c7c99f48565befe67",
"7b6248db5f7172ff2a833a1c7c99f48565befe67"
] | [
"deepchem/dock/binding_pocket.py",
"examples/hiv/hiv_irv.py",
"deepchem/feat/nnscore_utils.py"
] | [
"\"\"\"\nComputes putative binding pockets on protein.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n__author__ = \"Bharath Ramsundar\"\n__copyright__ = \"Copyright 2017, Stanford University\"\n__license__ = \"MIT\"\n\nimport os\nimport tempfile\nimport numpy as np\nfrom subprocess import call\nfrom scipy.spatial import ConvexHull\nfrom deepchem.feat.binding_pocket_features import BindingPocketFeaturizer\nfrom deepchem.feat.fingerprints import CircularFingerprint\nfrom deepchem.models.sklearn_models import SklearnModel\nfrom deepchem.utils import rdkit_util\n\n\ndef extract_active_site(protein_file, ligand_file, cutoff=4):\n \"\"\"Extracts a box for the active site.\"\"\"\n protein_coords = rdkit_util.load_molecule(\n protein_file, add_hydrogens=False)[0]\n ligand_coords = rdkit_util.load_molecule(\n ligand_file, add_hydrogens=True, calc_charges=True)[0]\n num_ligand_atoms = len(ligand_coords)\n num_protein_atoms = len(protein_coords)\n pocket_inds = []\n pocket_atoms = set([])\n for lig_atom_ind in range(num_ligand_atoms):\n lig_atom = ligand_coords[lig_atom_ind]\n for protein_atom_ind in range(num_protein_atoms):\n protein_atom = protein_coords[protein_atom_ind]\n if np.linalg.norm(lig_atom - protein_atom) < cutoff:\n if protein_atom_ind not in pocket_atoms:\n pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))\n # Should be an array of size (n_pocket_atoms, 3)\n pocket_atoms = list(pocket_atoms)\n n_pocket_atoms = len(pocket_atoms)\n pocket_coords = np.zeros((n_pocket_atoms, 3))\n for ind, pocket_ind in enumerate(pocket_atoms):\n pocket_coords[ind] = protein_coords[pocket_ind]\n\n x_min = int(np.floor(np.amin(pocket_coords[:, 0])))\n x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))\n y_min = int(np.floor(np.amin(pocket_coords[:, 1])))\n y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))\n z_min = int(np.floor(np.amin(pocket_coords[:, 2])))\n z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))\n return (((x_min, x_max), (y_min, y_max), (z_min, z_max)), pocket_atoms,\n pocket_coords)\n\n\ndef compute_overlap(mapping, box1, box2):\n \"\"\"Computes overlap between the two boxes.\n\n Overlap is defined as % atoms of box1 in box2. Note that\n overlap is not a symmetric measurement.\n \"\"\"\n atom1 = set(mapping[box1])\n atom2 = set(mapping[box2])\n return len(atom1.intersection(atom2)) / float(len(atom1))\n\n\ndef get_all_boxes(coords, pad=5):\n \"\"\"Get all pocket boxes for protein coords.\n\n We pad all boxes the prescribed number of angstroms.\n\n TODO(rbharath): It looks like this may perhaps be non-deterministic?\n \"\"\"\n hull = ConvexHull(coords)\n boxes = []\n for triangle in hull.simplices:\n # coords[triangle, 0] gives the x-dimension of all triangle points\n # Take transpose to make sure rows correspond to atoms.\n points = np.array(\n [coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T\n # We voxelize so all grids have integral coordinates (convenience)\n x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0])\n x_min, x_max = int(np.floor(x_min)) - pad, int(np.ceil(x_max)) + pad\n y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1])\n y_min, y_max = int(np.floor(y_min)) - pad, int(np.ceil(y_max)) + pad\n z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2])\n z_min, z_max = int(np.floor(z_min)) - pad, int(np.ceil(z_max)) + pad\n boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max)))\n return boxes\n\n\ndef boxes_to_atoms(atom_coords, boxes):\n \"\"\"Maps each box to a list of atoms in that box.\n\n TODO(rbharath): This does a num_atoms x num_boxes computations. Is\n there a reasonable heuristic we can use to speed this up?\n \"\"\"\n mapping = {}\n for box_ind, box in enumerate(boxes):\n box_atoms = []\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = box\n print(\"Handing box %d/%d\" % (box_ind, len(boxes)))\n for atom_ind in range(len(atom_coords)):\n atom = atom_coords[atom_ind]\n x_cont = x_min <= atom[0] and atom[0] <= x_max\n y_cont = y_min <= atom[1] and atom[1] <= y_max\n z_cont = z_min <= atom[2] and atom[2] <= z_max\n if x_cont and y_cont and z_cont:\n box_atoms.append(atom_ind)\n mapping[box] = box_atoms\n return mapping\n\n\ndef merge_boxes(box1, box2):\n \"\"\"Merges two boxes.\"\"\"\n (x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1\n (x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2\n x_min = min(x_min1, x_min2)\n y_min = min(y_min1, y_min2)\n z_min = min(z_min1, z_min2)\n x_max = max(x_max1, x_max2)\n y_max = max(y_max1, y_max2)\n z_max = max(z_max1, z_max2)\n return ((x_min, x_max), (y_min, y_max), (z_min, z_max))\n\n\ndef merge_overlapping_boxes(mapping, boxes, threshold=.8):\n \"\"\"Merge boxes which have an overlap greater than threshold.\n\n TODO(rbharath): This merge code is terribly inelegant. It's also quadratic\n in number of boxes. It feels like there ought to be an elegant divide and\n conquer approach here. Figure out later...\n \"\"\"\n num_boxes = len(boxes)\n outputs = []\n for i in range(num_boxes):\n box = boxes[0]\n new_boxes = []\n new_mapping = {}\n # If overlap of box with previously generated output boxes, return\n contained = False\n for output_box in outputs:\n # Carry forward mappings\n new_mapping[output_box] = mapping[output_box]\n if compute_overlap(mapping, box, output_box) == 1:\n contained = True\n if contained:\n continue\n # We know that box has at least one atom not in outputs\n unique_box = True\n for merge_box in boxes[1:]:\n overlap = compute_overlap(mapping, box, merge_box)\n if overlap < threshold:\n new_boxes.append(merge_box)\n new_mapping[merge_box] = mapping[merge_box]\n else:\n # Current box has been merged into box further down list.\n # No need to output current box\n unique_box = False\n merged = merge_boxes(box, merge_box)\n new_boxes.append(merged)\n new_mapping[merged] = list(\n set(mapping[box]).union(set(mapping[merge_box])))\n if unique_box:\n outputs.append(box)\n new_mapping[box] = mapping[box]\n boxes = new_boxes\n mapping = new_mapping\n return outputs, mapping\n\n\nclass BindingPocketFinder(object):\n \"\"\"Abstract superclass for binding pocket detectors\"\"\"\n\n def find_pockets(self, protein_file, ligand_file):\n \"\"\"Finds potential binding pockets in proteins.\"\"\"\n raise NotImplementedError\n\n\nclass ConvexHullPocketFinder(BindingPocketFinder):\n \"\"\"Implementation that uses convex hull of protein to find pockets.\n\n Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf\n \"\"\"\n\n def __init__(self, pad=5):\n self.pad = pad\n\n def find_all_pockets(self, protein_file):\n \"\"\"Find list of binding pockets on protein.\"\"\"\n # protein_coords is (N, 3) tensor\n coords = rdkit_util.load_molecule(protein_file)[0]\n return get_all_boxes(coords, self.pad)\n\n def find_pockets(self, protein_file, ligand_file):\n \"\"\"Find list of suitable binding pockets on protein.\"\"\"\n protein_coords = rdkit_util.load_molecule(\n protein_file, add_hydrogens=False, calc_charges=False)[0]\n ligand_coords = rdkit_util.load_molecule(\n ligand_file, add_hydrogens=False, calc_charges=False)[0]\n boxes = get_all_boxes(protein_coords, self.pad)\n mapping = boxes_to_atoms(protein_coords, boxes)\n pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes)\n pocket_coords = []\n for pocket in pockets:\n atoms = pocket_atoms_map[pocket]\n coords = np.zeros((len(atoms), 3))\n for ind, atom in enumerate(atoms):\n coords[ind] = protein_coords[atom]\n pocket_coords.append(coords)\n return pockets, pocket_atoms_map, pocket_coords\n\n\nclass RFConvexHullPocketFinder(BindingPocketFinder):\n \"\"\"Uses pre-trained RF model + ConvexHulPocketFinder to select pockets.\"\"\"\n\n def __init__(self, pad=5):\n self.pad = pad\n self.convex_finder = ConvexHullPocketFinder(pad)\n\n # Load binding pocket model\n self.base_dir = tempfile.mkdtemp()\n print(\"About to download trained model.\")\n # TODO(rbharath): Shift refined to full once trained.\n call((\n \"wget -nv -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz\"\n ).split())\n call((\"tar -zxvf pocket_random_refined_RF.tar.gz\").split())\n call((\"mv pocket_random_refined_RF %s\" % (self.base_dir)).split())\n self.model_dir = os.path.join(self.base_dir, \"pocket_random_refined_RF\")\n\n # Fit model on dataset\n self.model = SklearnModel(model_dir=self.model_dir)\n self.model.reload()\n\n # Create featurizers\n self.pocket_featurizer = BindingPocketFeaturizer()\n self.ligand_featurizer = CircularFingerprint(size=1024)\n\n def find_pockets(self, protein_file, ligand_file):\n \"\"\"Compute features for a given complex\n\n TODO(rbharath): This has a log of code overlap with\n compute_binding_pocket_features in\n examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor\n to avoid code duplication.\n \"\"\"\n # if not ligand_file.endswith(\".sdf\"):\n # raise ValueError(\"Only .sdf ligand files can be featurized.\")\n # ligand_basename = os.path.basename(ligand_file).split(\".\")[0]\n # ligand_mol2 = os.path.join(\n # self.base_dir, ligand_basename + \".mol2\")\n #\n # # Write mol2 file for ligand\n # obConversion = ob.OBConversion()\n # conv_out = obConversion.SetInAndOutFormats(str(\"sdf\"), str(\"mol2\"))\n # ob_mol = ob.OBMol()\n # obConversion.ReadFile(ob_mol, str(ligand_file))\n # obConversion.WriteFile(ob_mol, str(ligand_mol2))\n #\n # # Featurize ligand\n # mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)\n # if mol is None:\n # return None, None\n # # Default for CircularFingerprint\n # n_ligand_features = 1024\n # ligand_features = self.ligand_featurizer.featurize([mol])\n #\n # # Featurize pocket\n # pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets(\n # protein_file, ligand_file)\n # n_pockets = len(pockets)\n # n_pocket_features = BindingPocketFeaturizer.n_features\n #\n # features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))\n # pocket_features = self.pocket_featurizer.featurize(\n # protein_file, pockets, pocket_atoms_map, pocket_coords)\n # # Note broadcast operation\n # features[:, :n_pocket_features] = pocket_features\n # features[:, n_pocket_features:] = ligand_features\n # dataset = NumpyDataset(X=features)\n # pocket_preds = self.model.predict(dataset)\n # pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset))\n #\n # # Find pockets which are active\n # active_pockets = []\n # active_pocket_atoms_map = {}\n # active_pocket_coords = []\n # for pocket_ind in range(len(pockets)):\n # #################################################### DEBUG\n # # TODO(rbharath): For now, using a weak cutoff. Fix later.\n # #if pocket_preds[pocket_ind] == 1:\n # if pocket_pred_proba[pocket_ind][1] > .15:\n # #################################################### DEBUG\n # pocket = pockets[pocket_ind]\n # active_pockets.append(pocket)\n # active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket]\n # active_pocket_coords.append(pocket_coords[pocket_ind])\n # return active_pockets, active_pocket_atoms_map, active_pocket_coords\n # # TODO(LESWING)\n raise ValueError(\"Karl Implement\")\n",
"\"\"\"\nScript that trains multitask models on hiv dataset.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport deepchem as dc\nfrom hiv_datasets import load_hiv\n\n# Only for debug!\nnp.random.seed(123)\n\n# Load hiv dataset\nn_features = 512\nhiv_tasks, hiv_datasets, transformers = load_hiv()\ntrain_dataset, valid_dataset, test_dataset = hiv_datasets\n\n# Fit models\nmetric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)\n\ntransformer = dc.trans.IRVTransformer(10, len(hiv_tasks), train_dataset)\ntrain_dataset = transformer.transform(train_dataset)\nvalid_dataset = transformer.transform(valid_dataset)\n\nmodel = dc.models.TensorflowMultiTaskIRVClassifier(\n len(hiv_tasks), K=10, batch_size=50, learning_rate=0.001)\n\n# Fit trained model\nmodel.fit(train_dataset)\nmodel.save()\n\nprint(\"Evaluating model\")\ntrain_scores = model.evaluate(train_dataset, [metric], transformers)\nvalid_scores = model.evaluate(valid_dataset, [metric], transformers)\n\nprint(\"Train scores\")\nprint(train_scores)\n\nprint(\"Validation scores\")\nprint(valid_scores)\n",
"\"\"\"\nHelper Classes and Functions for docking fingerprint computation.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\n__author__ = \"Bharath Ramsundar and Jacob Durrant\"\n__license__ = \"GNU General Public License\"\n\nimport math\nimport os\nimport subprocess\nimport numpy as np\nimport deepchem.utils.rdkit_util as rdkit_util\n\n\ndef force_partial_charge_computation(mol):\n \"\"\"Force computation of partial charges for molecule.\n\n Parameters\n ----------\n mol: Rdkit Mol\n Molecule on which we compute partial charges.\n \"\"\"\n rdkit_util.compute_charges(mol)\n\n\ndef pdbqt_to_pdb(input_file, output_directory):\n \"\"\"Convert pdbqt file to pdb file.\n\n Parameters\n ----------\n input_file: String\n Path to input file.\n output_directory: String\n Path to desired output directory.\n \"\"\"\n print(input_file, output_directory)\n raise ValueError(\"Not yet implemented\")\n\n\ndef hydrogenate_and_compute_partial_charges(input_file,\n input_format,\n hyd_output=None,\n pdbqt_output=None,\n protein=True,\n verbose=True):\n \"\"\"Outputs a hydrogenated pdb and a pdbqt with partial charges.\n\n Takes an input file in specified format. Generates two outputs:\n\n -) A pdb file that contains a hydrogenated (at pH 7.4) version of\n original compound.\n -) A pdbqt file that has computed Gasteiger partial charges. This pdbqt\n file is build from the hydrogenated pdb.\n\n TODO(rbharath): Can do a bit of refactoring between this function and\n pdbqt_to_pdb.\n\n Parameters\n ----------\n input_file: String\n Path to input file.\n input_format: String\n Name of input format.\n \"\"\"\n mol = rdkit_util.load_molecule(\n input_file, add_hydrogens=True, calc_charges=True)[1]\n if verbose:\n print(\"Create pdb with hydrogens added\")\n rdkit_util.write_molecule(mol, str(hyd_output), is_protein=protein)\n if verbose:\n print(\"Create a pdbqt file from the hydrogenated pdb above.\")\n rdkit_util.write_molecule(mol, str(pdbqt_output), is_protein=protein)\n\n if protein:\n print(\"Removing ROOT/ENDROOT/TORSDOF\")\n with open(pdbqt_output) as f:\n pdbqt_lines = f.readlines()\n filtered_lines = []\n for line in pdbqt_lines:\n\n filtered_lines.append(line)\n with open(pdbqt_output, \"w\") as f:\n f.writelines(filtered_lines)\n\n\nclass AromaticRing(object):\n \"\"\"Holds information about an aromatic ring.\"\"\"\n\n def __init__(self, center, indices, plane_coeff, radius):\n \"\"\"\n Initializes an aromatic.\n\n Parameters\n ----------\n center: float\n Center of the ring.\n indices: list\n List of the atom indices for ring atoms.\n plane_coeff: list\n A list of elements [a, b, c, d] that define a plane by equation\n a x + b y + c z = d.\n radius: float\n Ring radius from center.\n \"\"\"\n self.center = center\n self.indices = indices\n # a*x + b*y + c*z = dI think that\n self.plane_coeff = plane_coeff\n self.radius = radius\n\n\ndef average_point(points):\n \"\"\"Returns the point with averaged coordinates of arguments.\n\n Parameters\n ----------\n points: list\n List of point objects.\n Returns\n -------\n pavg: Point object\n Has coordinates the arithmetic average of those of p1 and p2.\n \"\"\"\n coords = np.array([0, 0, 0])\n for point in points:\n coords += point.as_array().astype(coords.dtype)\n if len(points) > 0:\n return Point(coords=coords / len(points))\n else:\n return Point(coords=coords)\n\n\nclass Point(object):\n \"\"\"\n Simple implementation for a point in 3-space.\n \"\"\"\n\n def __init__(self, x=None, y=None, z=None, coords=None):\n \"\"\"\n Inputs can be specified either by explicitly providing x, y, z coords\n or by providing a numpy array of length 3.\n\n Parameters\n ----------\n x: float\n X-coord.\n y: float\n Y-coord.\n z: float\n Z-coord.\n coords: np.ndarray\n Should be of length 3 in format np.array([x, y, z])\n Raises\n ------\n ValueError: If no arguments are provided.\n \"\"\"\n if x and y and z:\n #self.x, self.y, self.z = x, y, z\n self.coords = np.array([x, y, z])\n elif coords is not None: # Implicit eval doesn't work on numpy arrays.\n #self.x, self.y, self.z = coords[0], coords[1], coords[2]\n self.coords = coords\n else:\n raise ValueError(\"Must specify coordinates for Point!\")\n\n # TODO(bramsundar): Should this be __copy__?\n def copy_of(self):\n \"\"\"Return a copy of this point.\"\"\"\n return Point(coords=np.copy(self.coords))\n\n def dist_to(self, point):\n \"\"\"Distance (in 2-norm) from this point to another.\"\"\"\n return np.linalg.norm(self.coords - point.coords)\n\n def magnitude(self):\n \"\"\"Magnitude of this point (in 2-norm).\"\"\"\n return np.linalg.norm(self.coords)\n #return self.dist_to(Point(coords=np.array([0, 0, 0])))\n\n def as_array(self):\n \"\"\"Return the coordinates of this point as array.\"\"\"\n #return np.array([self.x, self.y, self.z])\n return self.coords\n\n\nclass Atom(object):\n \"\"\"\n Implements a container class for atoms. This class contains useful\n annotations about the atom.\n \"\"\"\n\n def __init__(self,\n atomname=\"\",\n residue=\"\",\n coordinates=Point(coords=np.array([99999, 99999, 99999])),\n element=\"\",\n pdb_index=\"\",\n line=\"\",\n atomtype=\"\",\n indices_of_atoms_connecting=None,\n charge=0,\n resid=0,\n chain=\"\",\n structure=\"\",\n comment=\"\"):\n \"\"\"\n Initializes an atom.\n\n Assumes that atom is loaded from a PDB file.\n\n Parameters\n ----------\n atomname: string\n Name of atom. Note that atomname is not the same as residue since\n atomnames often have extra annotations (e.g., CG, NZ, etc).\n residue: string:\n Name of protein residue this atom belongs to.\n element: string\n Name of atom's element.\n coordinate: point\n A point object (x, y, z are in Angstroms).\n pdb_index: string\n Index of the atom in source PDB file.\n line: string\n The line in the PDB file which specifies this atom.\n atomtype: string\n Element of atom. This differs from atomname which typically has extra\n annotations (e.g. CA, OA, HD, etc)\n IndicesOfAtomConnecting: list\n The indices (in a PDB object) of all atoms bonded to this one.\n charge: float\n Associated electrostatic charge.\n resid: int\n The residue number in the receptor (listing the protein as a chain from\n N-Terminus to C-Terminus). Assumes this is a protein atom.\n chain: string\n Chain identifier for molecule. See PDB spec.\n structure: string\n One of ALPHA, BETA, or OTHER for the type of protein secondary\n structure this atom resides in (assuming this is a receptor atom).\n comment: string\n Either LIGAND or RECEPTOR depending on whether this is a ligand or\n receptor atom.\n \"\"\"\n self.atomname = atomname\n self.residue = residue\n self.coordinates = coordinates\n self.element = element\n self.pdb_index = pdb_index\n self.line = line\n self.atomtype = atomtype\n if indices_of_atoms_connecting is not None:\n self.indices_of_atoms_connecting = indices_of_atoms_connecting\n else:\n self.indices_of_atoms_connecting = []\n self.charge = charge\n self.resid = resid\n self.chain = chain\n self.structure = structure\n self.comment = comment\n\n def copy_of(self):\n \"\"\"Make a copy of this atom.\"\"\"\n theatom = Atom()\n theatom.atomname = self.atomname\n theatom.residue = self.residue\n theatom.coordinates = self.coordinates.copy_of()\n theatom.element = self.element\n theatom.pdb_index = self.pdb_index\n theatom.line = self.line\n theatom.atomtype = self.atomtype\n theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]\n theatom.charge = self.charge\n theatom.resid = self.resid\n theatom.chain = self.chain\n theatom.structure = self.structure\n theatom.comment = self.comment\n\n return theatom\n\n def create_pdb_line(self, index):\n \"\"\"\n Generates appropriate ATOM line for pdb file.\n\n Parameters\n ----------\n index: int\n Index in associated PDB file.\n \"\"\"\n output = \"ATOM \"\n output = (\n output + str(index).rjust(6) + self.atomname.rjust(5) +\n self.residue.rjust(4) + self.chain.rjust(2) + str(self.resid).rjust(4))\n coords = self.coordinates.as_array() # [x, y, z]\n output = output + (\"%.3f\" % coords[0]).rjust(12)\n output = output + (\"%.3f\" % coords[1]).rjust(8)\n output = output + (\"%.3f\" % coords[2]).rjust(8)\n output = output + self.element.rjust(24)\n return output\n\n def number_of_neighbors(self):\n \"\"\"Reports number of neighboring atoms.\"\"\"\n return len(self.indices_of_atoms_connecting)\n\n def add_neighbor_atom_indices(self, indices):\n \"\"\"\n Adds atoms with provided PDB indices as neighbors.\n\n Parameters\n ----------\n index: list\n List of indices of neighbors in PDB object.\n \"\"\"\n for index in indices:\n if index not in self.indices_of_atoms_connecting:\n self.indices_of_atoms_connecting.append(index)\n\n def side_chain_or_backbone(self):\n \"\"\"Determine whether receptor atom belongs to residue sidechain or backbone.\n \"\"\"\n # TODO(rbharath): Should this be an atom function?\n if (self.atomname.strip() == \"CA\" or self.atomname.strip() == \"C\" or\n self.atomname.strip() == \"O\" or self.atomname.strip() == \"N\"):\n return \"BACKBONE\"\n else:\n return \"SIDECHAIN\"\n\n def read_atom_pdb_line(self, line):\n \"\"\"\n TODO(rbharath): This method probably belongs in the PDB class, and not\n in the Atom class.\n\n Reads an ATOM or HETATM line from PDB and instantiates fields.\n\n Atoms in PDBs are represented by ATOM or HETATM statements. ATOM and\n HETATM statements follow the following record format:\n\n (see ftp://ftp.wwpdb.org/pub/pdb/doc/format_descriptions/Format_v33_Letter.pdf)\n\n COLUMNS DATA TYPE FIELD DEFINITION\n -------------------------------------------------------------------------------------\n 1 - 6 Record name \"ATOM \"/\"HETATM\"\n 7 - 11 Integer serial Atom serial number.\n 13 - 16 Atom name Atom name.\n 17 Character altLoc Alternate location indicator.\n 18 - 20 Residue name resName Residue name.\n 22 Character chainID Chain identifier.\n 23 - 26 Integer resSeq Residue sequence number.\n 27 AChar iCode Code for insertion of residues.\n 31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms.\n 39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms.\n 47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms.\n 55 - 60 Real(6.2) occupancy Occupancy.\n 61 - 66 Real(6.2) tempFactor Temperature factor.\n 77 - 78 LString(2) element Element symbol, right-justified.\n 79 - 80 LString(2) charge Charge on the atom.\n \"\"\"\n self.line = line\n self.atomname = line[11:16].strip()\n\n if len(self.atomname) == 1:\n self.atomname = self.atomname + \" \"\n elif len(self.atomname) == 2:\n self.atomname = self.atomname + \" \"\n elif len(self.atomname) == 3:\n # This line is necessary for babel to work, though many PDBs in\n # the PDB would have this line commented out\n self.atomname = self.atomname + \" \"\n\n self.coordinates = Point(coords=np.array(\n [float(line[30:38]), float(line[38:46]), float(line[46:54])]))\n\n # now atom type (for pdbqt)\n if line[77:79].strip():\n self.atomtype = line[77:79].strip().upper()\n elif self.atomname:\n # If atomtype is not specified, but atomname is, set atomtype to the\n # first letter of atomname. This heuristic suffices for proteins,\n # since no two-letter elements appear in standard amino acids.\n self.atomtype = self.atomname[:1]\n else:\n self.atomtype = \"\"\n\n if line[69:76].strip() != \"\":\n self.charge = float(line[69:76])\n else:\n self.charge = 0.0\n\n if self.element == \"\": # try to guess at element from name\n two_letters = self.atomname[0:2].strip().upper()\n valid_two_letters = [\n \"BR\", \"CL\", \"BI\", \"AS\", \"AG\", \"LI\", \"HG\", \"MG\", \"MN\", \"RH\", \"ZN\", \"FE\"\n ]\n if two_letters in valid_two_letters:\n self.element = two_letters\n else: #So, just assume it's the first letter.\n # Any number needs to be removed from the element name\n self.element = self.atomname\n self.element = self.element.replace('0', '')\n self.element = self.element.replace('1', '')\n self.element = self.element.replace('2', '')\n self.element = self.element.replace('3', '')\n self.element = self.element.replace('4', '')\n self.element = self.element.replace('5', '')\n self.element = self.element.replace('6', '')\n self.element = self.element.replace('7', '')\n self.element = self.element.replace('8', '')\n self.element = self.element.replace('9', '')\n self.element = self.element.replace('@', '')\n\n self.element = self.element[0:1].strip().upper()\n\n self.pdb_index = line[6:12].strip()\n self.residue = line[16:20]\n # this only uses the rightmost three characters, essentially\n # removing unique rotamer identification\n self.residue = \" \" + self.residue[-3:]\n\n if line[23:26].strip() != \"\":\n self.resid = int(line[23:26])\n else:\n self.resid = 1\n\n self.chain = line[21:22]\n if self.residue.strip() == \"\":\n self.residue = \" MOL\"\n\n\nclass Charged(object):\n \"\"\"\n A class that represeents a charged atom.\n \"\"\"\n\n def __init__(self, coordinates, indices, positive):\n \"\"\"\n Parameters\n ----------\n coordinates: point\n Coordinates of atom.\n indices: list\n Contains boolean true or false entries for self and neighbors to\n specify if positive or negative charge\n positive: bool\n Whether this atom is positive or negative.\n \"\"\"\n self.coordinates = coordinates\n self.indices = indices\n self.positive = positive\n\n\ndef vector_subtraction(point1, point2): # point1 - point2\n \"\"\"Subtracts the coordinates of the provided points.\"\"\"\n return Point(coords=point1.as_array() - point2.as_array())\n\n\ndef cross_product(point1, point2): # never tested\n \"\"\"Calculates the cross-product of provided points.\"\"\"\n return Point(coords=np.cross(point1.as_array(), point2.as_array()))\n\n\ndef vector_scalar_multiply(point, scalar):\n \"\"\"Multiplies the provided point by scalar.\"\"\"\n return Point(coords=scalar * point.as_array())\n\n\ndef dot_product(point1, point2):\n \"\"\"Dot product of points.\"\"\"\n return np.dot(point1.as_array(), point2.as_array())\n\n\ndef dihedral(point1, point2, point3, point4): # never tested\n \"\"\"Compute dihedral angle between 4 points.\n\n TODO(rbharath): Write a nontrivial test for this.\n \"\"\"\n\n b1 = vector_subtraction(point2, point1)\n b2 = vector_subtraction(point3, point2)\n b3 = vector_subtraction(point4, point3)\n\n b2Xb3 = cross_product(b2, b3)\n b1Xb2 = cross_product(b1, b2)\n\n b1XMagb2 = vector_scalar_multiply(b1, b2.magnitude())\n radians = math.atan2(dot_product(b1XMagb2, b2Xb3), dot_product(b1Xb2, b2Xb3))\n return radians\n\n\ndef angle_between_three_points(point1, point2, point3):\n \"\"\"Computes the angle (in radians) between the three provided points.\"\"\"\n return angle_between_points(\n vector_subtraction(point1, point2), vector_subtraction(point3, point2))\n\n\ndef angle_between_points(point1, point2):\n \"\"\"Computes the angle (in radians) between two points.\"\"\"\n return math.acos(\n dot_product(point1, point2) / (point1.magnitude() * point2.magnitude()))\n\n\ndef normalized_vector(point):\n \"\"\"Normalize provided point.\"\"\"\n return Point(coords=point.as_array() / np.linalg.norm(point.as_array()))\n\n\ndef distance(point1, point2):\n \"\"\"Computes distance between two points.\"\"\"\n return point1.dist_to(point2)\n\n\ndef project_point_onto_plane(point, plane_coefficients):\n \"\"\"Finds nearest point on specified plane to given point.\n\n Parameters\n ----------\n point: Point\n Given point\n plane_coefficients: list\n [a, b, c, d] where place equation is ax + by + cz = d\n \"\"\"\n # The normal vector to plane is n = [a, b, c]\n offset = plane_coefficients[3]\n normal = np.array(plane_coefficients[:3])\n # We first shift by basepoint (a point on given plane) to make math\n # simpler. basepoint is given by d/||n||^2 * n\n basepoint = (offset / np.linalg.norm(normal)**2) * normal\n diff = point.as_array() - basepoint\n # The perpendicular component of diff to plane is\n # (n^T diff / ||n||^2) * n\n perp = (np.dot(normal, diff) / np.linalg.norm(normal)**2) * normal\n closest = basepoint + (diff - perp)\n return Point(coords=np.array(closest))\n"
] | [
[
"numpy.amax",
"numpy.amin",
"numpy.linalg.norm",
"numpy.ceil",
"numpy.floor",
"scipy.spatial.ConvexHull",
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.seed"
],
[
"numpy.dot",
"numpy.copy",
"numpy.array",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shanyi15/tensorflow | [
"ebb3429856441149e41388dfbea59496f8dbf17b"
] | [
"tensorflow/python/saved_model/load_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for checkpointable object SavedModel loading.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.training.checkpointable import tracking\n\n\nclass LoadTest(test.TestCase):\n\n def cycle(self, obj):\n path = tempfile.mkdtemp(prefix=self.get_temp_dir())\n save.save(obj, path, signatures={})\n return load.load(path)\n\n def test_structure_import(self):\n root = tracking.Checkpointable()\n root.dep_one = tracking.Checkpointable()\n root.dep_two = tracking.Checkpointable()\n root.dep_two.dep = tracking.Checkpointable()\n root.dep_three = root.dep_two.dep\n imported = self.cycle(root)\n self.assertIs(imported.dep_three, imported.dep_two.dep)\n self.assertIsNot(imported.dep_one, imported.dep_two)\n\n def test_variables(self):\n root = tracking.Checkpointable()\n root.v1 = variables.Variable(1., trainable=True)\n root.v2 = variables.Variable(2., trainable=False)\n imported = self.cycle(root)\n self.assertEquals(imported.v1.numpy(), 1.0)\n self.assertTrue(imported.v1.trainable)\n self.assertEquals(imported.v2.numpy(), 2.0)\n self.assertFalse(imported.v2.trainable)\n\n def test_capture_variables(self):\n root = tracking.Checkpointable()\n root.weights = variables.Variable(2.)\n root.f = def_function.function(\n lambda x: root.weights * x,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n imported = self.cycle(root)\n self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())\n imported.weights.assign(4.0)\n self.assertEqual(8., imported.f(constant_op.constant(2.)).numpy())\n\n def _make_asset(self, contents):\n filename = tempfile.mktemp(prefix=self.get_temp_dir())\n with open(filename, \"w\") as f:\n f.write(contents)\n return filename\n\n def test_assets(self):\n file1 = self._make_asset(\"contents 1\")\n file2 = self._make_asset(\"contents 2\")\n\n root = tracking.Checkpointable()\n root.asset1 = tracking.TrackableAsset(file1)\n root.asset2 = tracking.TrackableAsset(file2)\n\n save_dir = os.path.join(self.get_temp_dir(), \"save_dir\")\n save.save(root, save_dir, signatures={})\n\n file_io.delete_file(file1)\n file_io.delete_file(file2)\n load_dir = os.path.join(self.get_temp_dir(), \"load_dir\")\n file_io.rename(save_dir, load_dir)\n\n imported = load.load(load_dir)\n with open(imported.asset1.asset_path.numpy(), \"r\") as f:\n self.assertEquals(\"contents 1\", f.read())\n with open(imported.asset2.asset_path.numpy(), \"r\") as f:\n self.assertEquals(\"contents 2\", f.read())\n\n def test_capture_assets(self):\n root = tracking.Checkpointable()\n root.vocab = tracking.TrackableAsset(self._make_asset(\"contents\"))\n root.f = def_function.function(\n lambda: root.vocab.asset_path,\n input_signature=[])\n imported = self.cycle(root)\n origin_output = root.f().numpy()\n imported_output = imported.f().numpy()\n self.assertNotEqual(origin_output, imported_output)\n with open(imported_output, \"r\") as f:\n self.assertEquals(\"contents\", f.read())\n\n def test_dedup_assets(self):\n vocab = self._make_asset(\"contents\")\n root = tracking.Checkpointable()\n root.asset1 = tracking.TrackableAsset(vocab)\n root.asset2 = tracking.TrackableAsset(vocab)\n imported = self.cycle(root)\n self.assertEqual(imported.asset1.asset_path.numpy(),\n imported.asset2.asset_path.numpy())\n\n def test_implicit_input_signature(self):\n @def_function.function\n def func(x):\n return 2 * x\n\n root = tracking.Checkpointable()\n root.f = func\n\n # Add two traces.\n root.f(constant_op.constant(1.))\n root.f(constant_op.constant(1))\n\n imported = self.cycle(root)\n\n self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())\n self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())\n\n def test_explicit_input_signature(self):\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def func(x):\n return 2 * x\n\n root = tracking.Checkpointable()\n root.f = func\n\n imported = self.cycle(root)\n self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())\n\n def test_nested_functions(self):\n f = def_function.function(\n lambda x: x*2.0,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n g = def_function.function(\n lambda x: f(x) + 1.0,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n\n root = tracking.Checkpointable()\n root.g = g\n imported = self.cycle(root)\n imported.g(constant_op.constant([1.0]))\n\n def test_function_with_default_bool_input(self):\n\n def func(x, training=False):\n if training:\n return 2 * x\n else:\n return 7\n\n root = tracking.Checkpointable()\n root.f = def_function.function(func)\n\n self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())\n self.assertEqual(7, root.f(constant_op.constant(1)).numpy())\n self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())\n\n imported = self.cycle(root)\n\n self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())\n self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())\n\n def test_positional_arguments(self):\n def func(x, training=False, abc=7.1, defg=7.7):\n del abc\n if training:\n return 2 * x\n if defg == 7:\n return 6\n else:\n return 7\n\n root = tracking.Checkpointable()\n root.f = def_function.function(func)\n\n self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())\n self.assertEqual(7, root.f(constant_op.constant(1)).numpy())\n self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())\n self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())\n\n imported = self.cycle(root)\n\n self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())\n self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())\n self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())\n\n def test_member_function(self):\n class CheckpointableWithMember(tracking.Checkpointable):\n\n def __init__(self):\n super(CheckpointableWithMember, self).__init__()\n self._some_value = 20\n\n @def_function.function\n def f(self, x, training=False):\n if training:\n return 2 * x\n else:\n return 7 + self._some_value\n\n root = CheckpointableWithMember()\n\n self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())\n self.assertEqual(27, root.f(constant_op.constant(1)).numpy())\n self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())\n\n imported = self.cycle(root)\n\n self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())\n self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())\n\n def test_side_effect_listing(self):\n class M(tracking.Checkpointable):\n\n def __init__(self):\n super(M, self).__init__()\n self.var = None\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def f(self, x):\n if self.var is None:\n self.var = variables.Variable(2.)\n return x * self.var\n\n m = M()\n self.cycle(m)\n self.assertEquals(4.0, m.f(constant_op.constant(2.0)).numpy())\n\n def test_basic_backprop(self):\n weight = variables.Variable(1., trainable=True)\n bias = variables.Variable(0., trainable=True)\n g = def_function.function(\n lambda x: x*weight + bias,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n\n root = tracking.Checkpointable()\n root.weight = weight\n root.bias = bias\n root.g = g\n imported = self.cycle(root)\n with backprop.GradientTape(watch_accessed_variables=True) as t:\n x = constant_op.constant([3.5])\n loss = imported.g(x)\n grad = t.gradient(loss, [imported.weight, imported.bias])\n self.assertAllClose(grad, [3.5, 1.0])\n\n def test_callable(self):\n class M1(tracking.Checkpointable):\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def __call__(self, x):\n return x\n\n root = tracking.Checkpointable()\n root.m1 = M1()\n root.m2 = tracking.Checkpointable()\n root.m2.__call__ = def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(\n lambda x: x*3.0)\n imported = self.cycle(root)\n x = constant_op.constant(1.0)\n\n self.assertTrue(callable(imported.m1))\n self.assertAllEqual(root.m1(x), imported.m1(x))\n\n # Note: `root.m2` was not callable since `__call__` attribute was set\n # into the instance and not on the class. But after a serialization cycle\n # that starts to work.\n self.assertTrue(callable(imported.m2))\n self.assertAllEqual(root.m2.__call__(x), imported.m2(x))\n\n # Verify that user objects without `__call__` attribute are not callable.\n self.assertFalse(callable(imported))\n\n def test_chain_callable(self):\n func = def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(\n lambda x: x*3.0)\n root = tracking.Checkpointable()\n root.__call__ = tracking.Checkpointable()\n root.__call__.__call__ = tracking.Checkpointable()\n root.__call__.__call__.__call__ = func\n\n imported = self.cycle(root)\n self.assertTrue(callable(imported))\n x = constant_op.constant(1.0)\n self.assertAllEqual(imported(x).numpy(), 3.0)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.lib.io.file_io.rename",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.training.checkpointable.tracking.Checkpointable",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.eager.test.main",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.lib.io.file_io.delete_file",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.training.checkpointable.tracking.TrackableAsset",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
}
] |
joytianya/google_bert | [
"06f131241163a745747da33c5f563abe4413897b"
] | [
"zuo/bert/tokenization.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tokenization classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport unicodedata\nimport six\nimport tensorflow as tf\n\n\ndef validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n #print(\"items:\",items) #['[CLS]', '日', '##期', ',', '但', '被', '##告', '金', '##东', '##福', '载', '##明', '[MASK]', 'U', '##N', '##K', ']', '保', '##证', '本', '##月', '1', '##4', '[MASK]', '到', '##位', ',', '2', '##0', '##1', '##5', '年', '6', '[MASK]', '1', '##1', '日', '[', 'U', '##N', '##K', ']', ',', '原', '##告', '[MASK]', '认', '##可', '于', '2', '##0', '##1', '##5', '[MASK]', '6', '月', '[MASK]', '[MASK]', '日', '##向', '被', '##告', '主', '##张', '权', '##利', '。', '而', '[MASK]', '[MASK]', '自', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '年', '6', '月', '1', '##1', '日', '[SEP]', '原', '##告', '于', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '日', '起', '##诉', ',', '主', '##张', '保', '##证', '责', '##任', ',', '已', '超', '##过', '保', '##证', '期', '##限', '[MASK]', '保', '##证', '人', '依', '##法', '不', '##再', '承', '##担', '保', '##证', '[MASK]', '[MASK]', '[MASK]', '[SEP]']\n for i,item in enumerate(items):\n #print(i,\"item:\",item) # ##期\n output.append(vocab[item])\n return output\n\n\ndef convert_tokens_to_ids(vocab, tokens):\n return convert_by_vocab(vocab, tokens)\n\n\ndef convert_ids_to_tokens(inv_vocab, ids):\n return convert_by_vocab(inv_vocab, ids)\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\nclass FullTokenizer(object):\n \"\"\"Runs end-to-end tokenziation.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case=True):\n self.vocab = load_vocab(vocab_file)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n\n def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n\n def convert_tokens_to_ids(self, tokens):\n return convert_by_vocab(self.vocab, tokens)\n\n def convert_ids_to_tokens(self, ids):\n return convert_by_vocab(self.inv_vocab, ids)\n\n\nclass BasicTokenizer(object):\n \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass WordpieceTokenizer(object):\n \"\"\"Runs WordPiece tokenziation.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n"
] | [
[
"tensorflow.gfile.GFile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
imandr/RLpy | [
"f01cf7af47b6054e4e52d663ceafc463df6f6166",
"f01cf7af47b6054e4e52d663ceafc463df6f6166",
"f01cf7af47b6054e4e52d663ceafc463df6f6166"
] | [
"rlpy/gradnet/samples/single_agent/single_agent_ttt_env.py",
"rlpy/gradnet/samples/single_agent/tank_target_env.py",
"rlpy/keras/samples/single_agent/train.py"
] | [
"#\n# Tic Tac Toe\n#\n\nimport numpy as np\nfrom gym import spaces\n\nWinMasks = [\n [\n [1,0,0],\n [1,0,0],\n [1,0,0],\n ],\n [\n [0,1,0],\n [0,1,0],\n [0,1,0],\n ],\n [\n [0,0,1],\n [0,0,1],\n [0,0,1],\n ],\n \n [\n [1,1,1],\n [0,0,0],\n [0,0,0],\n ],\n [\n [0,0,0],\n [1,1,1],\n [0,0,0],\n ],\n [\n [0,0,0],\n [0,0,0],\n [1,1,1],\n ],\n\n [\n [1,0,0],\n [0,1,0],\n [0,0,1],\n ],\n [\n [0,0,1],\n [0,1,0],\n [1,0,0],\n ]\n]\n\nWinMasks = np.array(WinMasks).reshape((-1,9))\n\nclass SingleAgentTicTacToeEnv(object):\n \n NActions = 9\n ObservationShape = (9,)\n NState = 9\n \n def __init__(self):\n self.Board = np.zeros((9,))\n self.action_space = spaces.Discrete(self.NActions)\n high = np.ones((self.NActions,))\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n \n def reset(self):\n self.Done = False\n self.Board[...] = 0.0\n self.BoardHistory = []\n self.Side = 1\n self.FirstMove = True\n return self.observation(self.Side), {\"valid_actions\":np.array([1,1,0,0,1,0,0,0,0], dtype=np.float32)}\n \n def observation(self, side):\n return self.Board * side\n \n def step(self, action):\n win = False\n draw = False\n side = self.Side\n other_side = -side\n color = side\n \n reward = 0.0\n done = False\n\n if self.Board[action] != 0:\n # invalid move\n reward = -1.0\n done = True\n else:\n self.Board[action] = side\n self.BoardHistory.append(self.Board.reshape((3,3)).copy())\n \n for win_mask in WinMasks:\n masked = self.Board*color*win_mask\n if np.sum(masked) == 3:\n reward = 1.0\n done = True\n break\n \n if np.all(self.Board != 0):\n done = True # draw\n self.Side = other_side\n self.Done = done\n self.Reward = reward\n return self.observation(self.Side), reward, done, {\"valid_actions\":np.asarray(self.Board==0, dtype=np.float32)}\n \n def render(self):\n if self.Done:\n last_move = -self.Side\n history = self.BoardHistory\n sep = \"+---\"*len(history) + \"+\"\n lines = [sep]\n for irow in (0,1,2):\n line = \"|\"\n for b in history:\n row = \"\".join(\".xo\"[int(c)] for c in b[irow])\n line += row + \"|\"\n lines.append(line)\n outcome = \"draw\"\n if self.Reward:\n outcome = \"%s won\" % (\".xo\"[int(last_move)])\n lines.append(sep + \" \" + outcome)\n print(\"\\n\".join(lines))\n \nif __name__ == \"__main__\":\n \n import random\n \n def show_board(board):\n sep = \"+---\"*3 + \"+\"\n out = [sep]\n for row in board.reshape((3,3)):\n line = \"| \"\n for x in row:\n line += \" OX\"[int(x)] + \" | \"\n out.append(line)\n out.append(sep)\n return \"\\n\".join(out)\n \n class Agent(object):\n \n def __init__(self, side):\n self.Side = side\n self.Sign = \"XO\"[side]\n self.Color = side*2-1\n \n def reset(self):\n pass\n \n def action(self, reward, observation, available_actions):\n print(f\"{self.Sign}: action:\", reward, observation, available_actions)\n choices = [i for i, x in enumerate(available_actions) if x]\n i = random.choice(choices)\n return i\n \n def reward(self, r):\n #print(f\"{self.Sign}: reward: {r}\")\n pass\n \n def done(self, r, last_observation):\n if r > 0:\n print(f\"===== {self.Sign} won\")\n elif r < 0:\n print(f\"===== {self.Sign} lost\")\n else:\n print(\"===== draw\")\n \n class Callback(object):\n \n def end_turn(self, agents, data):\n print(show_board(data[\"board\"]))\n \n def end_episode(self, agents, data):\n print(\"--- game over ---\")\n print(env.show_history(data[\"board_history\"]))\n \n x_agent = Agent(0)\n y_agent = Agent(1)\n \n env = TicTacToeEnv()\n env.run([x_agent, y_agent], [Callback])\n \n \n \n ",
"import random\nimport numpy as np\nimport math, time\nfrom gym import spaces\nfrom draw2d import Viewer, Frame, Line, Polygon, Circle, Text\n\n\nclass TankTargetEnv(object):\n \n FireRange = 0.1\n Speed = 0.02\n RotSpeed = math.pi*2/50\n Width = 0.01\n TimeHorizon = 100\n GasReward = 0.0\n IdleReward = 0.0\n MissReward = -0.02\n HitReward = 10.0\n \n X0 = 0.0\n X1 = 1.0\n Y0 = 0.0\n Y1 = 1.0\n Margin = 0.1\n \n FIRE = 0\n FWD = 1\n FFWD = 2\n LEFT = 3\n RIGHT = 4\n NActions = 5\n NState = 6\n \n \n\n def __init__(self):\n self.Viewer=None\n self.Hit = False\n self.Fire = False\n self.EpisodeReward = 0.0\n self.T = self.TimeHorizon\n \n high = np.array([1.0]*self.NState, dtype=np.float32)\n self.action_space = spaces.Discrete(self.NActions)\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n \n \n def bind_angle(self, a):\n while a < -math.pi:\n a += math.pi*2\n while a >= math.pi:\n a -= math.pi*2\n return a\n \n def observation(self):\n obs = np.empty((self.NState,))\n obs[0] = self.X\n obs[1] = self.Y\n obs[2] = self.Angle\n dx = self.TargetX - self.X\n dy = self.TargetY - self.Y\n obs[3] = math.sqrt(dx*dx + dy*dy)\n c = math.atan2(dy, dx)\n obs[4] = self.bind_angle(c-self.Angle)\n obs[5] = self.T/self.TimeHorizon\n return obs\n \n def seed(self, x):\n pass\n \n def reset(self):\n self.TargetX = self.Margin + random.random()*(self.X1-self.X0-self.Margin*2)\n self.TargetY = self.Margin + random.random()*(self.Y1-self.Y0-self.Margin*2)\n self.X = self.Margin + random.random()*(self.X1-self.X0-self.Margin*2)\n self.Y = self.Margin + random.random()*(self.X1-self.X0-self.Margin*2)\n self.Angle = self.bind_angle(random.random()*2*math.pi - math.pi)\n self.EpisodeReward = 0.0\n self.T = self.TimeHorizon\n \n return self.observation()\n \n def step(self, action):\n self.Hit = self.Fire = False\n self.Reward = 0.0\n done = False\n reward = self.IdleReward\n if action in (self.FWD, self.FFWD):\n d = self.Speed/2 if action == self.FWD else self.Speed*2\n reward = self.GasReward/4 if action == self.FWD else self.GasReward*2\n x = self.X + math.cos(self.Angle)*d\n y = self.Y + math.sin(self.Angle)*d\n x1 = max(self.X0, min(self.X1, x))\n y1 = max(self.Y0, min(self.Y1, y))\n if x1 != x or y1 != y: # bump ?\n reward = -1.0\n done = True\n self.X, self.Y = x1, y1\n #self.Reward += 0.001\n elif action == self.FIRE:\n self.Fire = True\n dx = self.TargetX - self.X\n dy = self.TargetY - self.Y\n a = math.atan2(dy, dx)\n distance = math.sqrt(dx*dx + dy*dy)\n delta = distance * math.sin(abs(a-self.Angle))\n self.Hit = abs(self.Angle - a) < math.pi/4 and delta < self.Width and distance < self.FireRange + self.Width\n if self.Hit:\n print(\"hit\")\n done = True\n reward = self.HitReward\n else:\n reward = self.MissReward\n elif action == self.LEFT:\n self.Angle += self.RotSpeed\n self.Angle = self.bind_angle(self.Angle)\n elif action == self.RIGHT:\n self.Angle -= self.RotSpeed\n self.Angle = self.bind_angle(self.Angle)\n \n self.T -= 1\n if self.T <= 0:\n done = True\n self.Reward = reward\n self.EpisodeReward += self.Reward\n \n return self.observation(), reward, done, {}\n \n def render(self):\n if self.Viewer is None:\n self.Viewer = Viewer(600, 600)\n self.Frame = self.Viewer.frame(0.0, 1.0, 0.0, 1.0)\n \n self.Tank = Frame()\n self.Tank.add(\n Polygon([(-0.02, -0.01), (0.02, 0.0), (-0.02, 0.01)]).color(0.0, 0.5, 0.1)\n )\n self.Beam = Line(end=(self.FireRange, 0)).color(1.0, 0.5, 0.0)\n self.Tank.add(self.Beam)\n self.Frame.add(self.Tank)\n\n self.Target = Circle(self.Width, filled=False)\n self.Frame.add(self.Target)\n \n self.ScoreText = Text(\"\", anchor_x=\"left\", size=8).color(0.5, 0.5, 0.5)\n self.Frame.add(self.ScoreText, at=(0.01, 0.01))\n \n self.Tank.move_to(self.X, self.Y)\n self.Tank.rotate_to(self.Angle)\n self.Beam.hidden = not self.Fire\n self.Target.move_to(self.TargetX, self.TargetY)\n if self.Hit:\n self.Target.color(1.0, 1.0, 0.5)\n else:\n self.Target.color(0.5, 0.5, 0.5)\n \n self.ScoreText.Text = \"r:%.3f R:%.3f %s\" % (self.Reward, self.EpisodeReward, self.observation())\n \n self.Viewer.render()\n \n if self.Hit:\n time.sleep(0.2)\n \n\n \n \n \n \n \n\n",
"from tank_target_env import TankTargetEnv\nfrom walker_env import WalkerEnv\nfrom cartpole_env import CartPoleEnv\nfrom rlpy.keras.AC import Brain\nfrom rlpy import Agent, Trainer\nfrom util import Monitor\nimport numpy as np\nfrom tensorflow import keras\nimport sys, getopt, math\n\nUsage = \"\"\"\npython train.py [-l <file>] [-s <file>] [-w <file>] <environment name>\n -s <file> - save weights into the file\n -l <file> - load weights from the file in the beginning\n -w <file> - equivalent to -s <file> -l <file>\n environment name - \"tanks\", \"walker\", \"cartpole\" or any suitable gym environment name\n\"\"\"\n\nclass MovingAverage(object):\n def __init__(self, alpha=0.1):\n self.Alpha = alpha\n self.Value = None\n \n def __call__(self, x):\n if self.Value is None:\n self.Value = x\n self.Value += self.Alpha*(x-self.Value)\n return self.Value\n\n\nEnvParams = {\n \"tanks\": {\n \"target\": 9.5,\n \"beta\": 0.5,\n \"cutoff\": None\n },\n \"walker\": {\n \"gamma\": 0.9,\n \"cutoff\": None,\n \"beta\": 0.5,\n \"entropy_weight\": 0.002,\n \"target\": 9.5,\n \"max_episodes\": 10000\n },\n \"CartPole-v0\": {\n \"target\": 195.0,\n \"max_steps_per_episode\": 200\n },\n \"cartpole\": {\n \"gamma\": 0.9,\n \"learning_rate\": 0.01,\n \"entropy_weight\": 0.001,\n \"target\": -0.01,\n \"max_steps_per_episode\": 200,\n \"max_episodes\": 10000\n },\n \"*\": { # default parameters\n \"gamma\": 0.99,\n \"epsilon\": 0.0,\n \"cutoff\": 1,\n \"beta\": None,\n \"learning_rate\": 0.01,\n \"entropy_weight\": 0.01,\n \"critic_weight\": 0.5,\n \"max_steps_per_episode\": 100,\n \"max_episodes\": 2000\n }\n}\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=200)\n\nopts, args = getopt.getopt(sys.argv[1:], \"w:s:l:\")\nif not args:\n print(Usage)\n sys.exit(2)\n \nopts = dict(opts)\nload_from = opts.get(\"-l\") or opts.get(\"-w\")\nsave_to = opts.get(\"-s\") or opts.get(\"-w\")\nenv_name = args[0]\n\nparams = EnvParams[\"*\"]\nparams.update(EnvParams.get(env_name, {}))\n\nprint(f\"Running {env_name} environment with the following parameters:\")\nfor k, v in sorted(params.items()):\n print(\" \", k,\"=\",v)\n\ngamma = params[\"gamma\"]\ncomment = params.get(\"comment\", \"\")\nlearning_rate = params[\"learning_rate\"]\ncutoff = params[\"cutoff\"]\nmax_steps_per_episode = params[\"max_steps_per_episode\"]\nport = 8989\nhidden = 200\nbeta = params[\"beta\"]\nepsilon = params[\"epsilon\"]\ntarget = params.get(\"target\")\nmax_episodes = params[\"max_episodes\"]\n\n\nentropy_weight = params[\"entropy_weight\"]\ncritic_weight = params[\"critic_weight\"]\n\n#optimizer = keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.5)\noptimizer = keras.optimizers.Adagrad(learning_rate=learning_rate) #, momentum=0.5)\n\nif env_name == \"tanks\":\n env = TankTargetEnv()\nelif env_name == \"walker\":\n env = WalkerEnv()\nelif env_name == \"cartpole\":\n env = CartPoleEnv()\nelse:\n import gym\n env = gym.make(env_name)\n \nmonitor = Monitor(\"monitor.csv\", \n title = \"Actor-Criric Reinforced Learning\",\n metadata = dict(\n gamma=gamma,\n comment = comment,\n environment = env_name,\n environment_reward = \"\",\n learning_rate = learning_rate,\n brain = \"AC\",\n cutoff = cutoff,\n beta = beta,\n steps_per_episode = max_steps_per_episode,\n optimizer = optimizer.__class__.__name__,\n entropy_weight = entropy_weight,\n critic_weight = critic_weight,\n hidden_layers = hidden,\n max_steps_per_episode = max_steps_per_episode\n ),\n plots=[\n [\n {\n \"label\": \"running average training score\",\n \"line_width\": 2.0\n } \n ],\n [\n { \"label\": \"critic loss\" },\n { \"label\": \"actor loss\" }\n ],\n [\n { \"label\": \"entropy\", \"line_width\": 1.0 },\n { \"label\": \"entropy MA\" }\n ],\n [\n { \"label\": \"average reward\"},\n { \"label\": \"average return\"},\n { \"label\": \"average value\"},\n { \"label\": \"average advantage\"}\n ]\n ]\n)\n\nmonitor.start_server(port)\n\nclass SaveCallback(object):\n \n def __init__(self, save_to):\n self.BestReward = None\n self.SaveTo = save_to\n\n def train_batch_end(self, brain, agent, batch_episodes, total_steps, losses):\n running_reward = agent.RunningReward\n if self.BestReward is None:\n self.BestReward = running_reward\n elif running_reward > self.BestReward and self.SaveTo:\n brain.save(self.SaveTo)\n print(\"Model weights saved to\", self.SaveTo, \"with best running reward\", self.BestReward)\n self.BestReward = running_reward\n\nclass UpdateMonitorCallback(object):\n \n PlayInterval = 200\n ReportInterval = 10\n \n def __init__(self, monitor):\n self.NextPlay = self.PlayInterval\n self.NextReport = self.ReportInterval\n self.Episodes = 0\n self.Monitor = monitor\n self.EntropyMA = MovingAverage()\n self.AvgValueMA = MovingAverage()\n self.AvgReturnMA = MovingAverage()\n self.AvgRewardMA = MovingAverage()\n self.AvgAdvantageMA = MovingAverage()\n\n def train_batch_end(self, brain, agent, batch_episodes, total_steps, stats):\n self.Episodes += batch_episodes\n running_reward = agent.RunningReward\n entropy = -stats[\"entropy\"]\n self.Monitor.add(self.Episodes, {\n \"running average training score\": running_reward,\n \"critic loss\": stats[\"critic\"],\n \"actor loss\": stats[\"actor\"],\n \"entropy\": entropy,\n \"entropy MA\": self.EntropyMA(entropy),\n \"invalid action loss\": stats[\"invalid_action\"],\n \"average value\": self.AvgValueMA(stats[\"average_value\"]),\n \"average return\": self.AvgReturnMA(stats[\"average_return\"]),\n \"average reward\": self.AvgRewardMA(stats[\"average_reward\"]),\n \"average advantage\": self.AvgAdvantageMA(stats[\"average_advantage\"])\n })\n\nclass Callback(object):\n \n PlayInterval = 50\n ReportInterval = 1\n \n def __init__(self):\n self.NextPlay = self.PlayInterval\n self.NextReport = self.ReportInterval\n self.Episodes = 0\n\n def train_episode_end(self, agent, episode_reward, history):\n print(\"train episode ended: reward=\", episode_reward, \" episode duration:\", len(history[\"actions\"]))\n \n def train_batch_end(self, brain, agent, batch_episodes, total_steps, stats):\n running_reward = agent.RunningReward\n self.Episodes += batch_episodes\n if self.Episodes >= self.NextReport:\n print(\n (\"Episode: %6d running reward: %8.4f. Losses: actor: %8.4f, critic:%8.4f, entropy:%8.4f.\" +\n \" Average: value:%8.4f, return:%8.4f, advantage:%8.4f\") % (self.Episodes, running_reward,\n stats[\"actor\"], stats[\"critic\"], stats[\"entropy\"],\n stats[\"average_value\"], stats[\"average_return\"], stats[\"average_advantage\"]\n )\n )\n self.NextReport += self.ReportInterval\n if self.Episodes >= self.NextPlay:\n for _ in range(3):\n data = agent.play_episode(env, max_steps=max_steps_per_episode, render=True, training=False)\n test_reward = agent.EpisodeReward\n print(\"test reward:\", test_reward)\n self.NextPlay += self.PlayInterval\n\n \n \n\nnum_inputs = env.observation_space.shape[0]\nnum_actions = env.action_space.n\n\nmodel = None\nif hasattr(env, \"create_model\"):\n model = env.create_model(hidden)\n print(\"Using model created by the environment\")\n model.summary()\n \nbrain = Brain((num_inputs,), num_actions, model=model, \n learning_rate=learning_rate, \n cutoff=cutoff, beta=beta, gamma=gamma,\n optimizer=optimizer, hidden=hidden,\n critic_weight = critic_weight,\n entropy_weight = entropy_weight\n )\n\nif load_from:\n brain.load(load_from)\n print(\"Model weights loaded from\", load_from)\n\nagent = Agent(brain, num_actions)\ncb = Callback()\nmcb = UpdateMonitorCallback(monitor)\nsave_cb = SaveCallback(save_to)\ntrainer = Trainer(agent, replay_ratio=0.0)\n\ntrainer.train(env, target, max_episodes=max_episodes, max_steps_per_episode=max_steps_per_episode, callbacks=[cb, save_cb, mcb])\n\nprint(\"--- training ended ---\")\n\n\n\n\n"
] | [
[
"numpy.asarray",
"numpy.ones",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.array",
"numpy.empty"
],
[
"numpy.set_printoptions",
"tensorflow.keras.optimizers.Adagrad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
CentraleNantesRobotics/ping360_sonar_python | [
"f461594aa0345a417f5bb711b8f4500fb4b4727d"
] | [
"ping360_sonar/ping360_sonar/sonar_interface.py"
] | [
"#!/usr/bin/env python\n\nfrom ping360_sonar.sensor import Ping360\nfrom numpy import pi, sqrt, tan, cos, sign\nfrom brping import definitions\n\nclass SonarInterface:\n \n samplePeriodTickDuration = 25e-9\n firmwareMinTransmitDuration = 5\n firmwareMaxTransmitDuration = 500\n firmwareMaxSamples = 1200\n firmwareMinSamplePeriod = 80\n maxDurationRatio = 64e6\n \n def __init__(self, port, baudrate, fallback_emulated):\n \n self.angle = 0\n try:\n self.sonar = Ping360(port, baudrate)\n if self.sonar.initialize():\n return\n except:\n pass\n \n if not fallback_emulated:\n raise RuntimeError('Cannot initialize sonar')\n print('Using emulated sonar')\n self.sonar = None\n \n def configureAngles(self, aperture_deg, step_deg, ensure_divisor):\n # to gradians\n target_half_aperture = int(aperture_deg*200/360+0.5)\n best_half_aperture = target_half_aperture\n self.angle_step = int(round(step_deg*400/360))\n\n # ensure angle_step is a divisor of max-min in gradians, necessary for LaserScan messages\n if ensure_divisor: \n # look around step, allow increased aperture\n target_step = self.angle_step\n \n # not too far from requested aperture, as close as possible to requested step (impacts turn duration)\n computeCost = lambda step,half_aperture: 1000 if half_aperture%step != 0 else abs(step-target_step) + abs(half_aperture-target_half_aperture)\n \n best_cost = computeCost(self.angle_step, target_half_aperture)\n if best_cost != 0: \n for step in range(1, target_step*2):\n for half_aperture in range(target_half_aperture, min(target_half_aperture+10, 200)+1):\n cost = computeCost(step, half_aperture)\n if cost < best_cost:\n best_cost = cost\n self.angle_step = step\n best_half_aperture = half_aperture\n \n self.angle_min = -best_half_aperture\n self.angle_max = best_half_aperture\n if self.angle_max == 200: \n self.angle_max -= self.angle_step\n if self.angle < self.angle_min or self.angle > self.angle_max or (self.angle-self.angle_min) % self.angle_step != 0:\n self.angle = 0\n \n @staticmethod\n def grad2rad(grad):\n return grad*pi/200\n \n def angleMin(self):\n return self.grad2rad(self.angle_min)\n def angleMax(self):\n return self.grad2rad(self.angle_max)\n def angleStep(self):\n return self.grad2rad(self.angle_step)\n def currentAngle(self):\n return self.grad2rad(self.angle)\n def angleCount(self):\n return (self.angle_max-self.angle_min)//self.angle_step\n def angleIndex(self):\n if self.angle_step > 0:\n return (self.angle-self.angle_min)//self.angle_step\n return (self.angle-self.angle_max)//self.angle_step\n def rangeFrom(self, index):\n return (index+1)*self.max_range/self.samples\n \n def configureTransducer(self, gain, frequency, speed_of_sound, max_range):\n \n self.gain = gain\n self.frequency = frequency\n \n self.samples = int(min(self.firmwareMaxSamples,2*max_range/(self.firmwareMinSamplePeriod*speed_of_sound*self.samplePeriodTickDuration)))\n \n self.sample_period = int((2.*max_range)/\n (self.samples*speed_of_sound*self.samplePeriodTickDuration));\n \n\n #* Per firmware engineer:\n #* 1. Starting point is TxPulse in usec = ((one-way range in metres) * 8000) / (Velocity of sound in metres\n #* per second)\n #* 2. Then check that TxPulse is wide enough for currently selected sample interval in usec, i.e.,\n #* if TxPulse < (2.5 * sample interval) then TxPulse = (2.5 * sample interval)\n #* 3. Perform limit checking\n\n #1\n one_way_duration_us = (8000.*max_range)/speed_of_sound\n # 2 (transmit duration is microseconds, sample_period_ns is nanoseconds) \n sample_period_ns = self.sample_period * self.samplePeriodTickDuration\n self.transmit_duration = max(2.5*sample_period_ns/1000, one_way_duration_us)\n # 3 ensure bounds \n if self.transmit_duration < self.firmwareMinTransmitDuration:\n self.transmit_duration = self.firmwareMinTransmitDuration\n else:\n max_duration = min(self.firmwareMaxTransmitDuration, sample_period_ns*self.maxDurationRatio)\n if self.transmit_duration > max_duration:\n self.transmit_duration = max_duration\n self.transmit_duration = int(self.transmit_duration)\n \n def transmitDuration(self):\n # microseconds to seconds\n return self.transmit_duration/1e6\n \n def updateAngle(self):\n self.angle += self.angle_step\n \n if self.angle_min == -200:\n # full scan\n end_turn = self.angle + self.angle_step > self.angle_max\n if self.angle > self.angle_max:\n self.angle = self.angle_min\n return end_turn\n \n # sector scan, check near end of sector\n if self.angle + self.angle_step >= self.angle_max or self.angle + self.angle_step <= self.angle_min:\n self.angle_step *= -1\n return True\n return False\n \n def read(self):\n # update angle before transmit\n end_turn = self.updateAngle()\n \n if self.sonar is not None:\n print(f'transmit: {self.transmit_duration}')\n \n self.sonar.control_transducer(\n 0, # reserved\n self.gain,\n self.angle,\n self.transmit_duration,\n self.sample_period,\n self.frequency,\n self.samples,\n 1,\n 0)\n self.sonar.wait_message([definitions.PING360_DEVICE_DATA, definitions.COMMON_NACK], 4.0)\n self.data = bytearray(self.sonar._data)\n return (len(self.data) != 0, end_turn)\n \n # emulated sonar\n from random import randint\n from time import sleep \n self.data = [0 for _ in range(self.samples)]\n scale = 5*abs((self.angle+400) % 400 - 200)\n for i in range(self.samples):\n if randint(self.samples,2*self.samples) < 1.1*i + scale:\n self.data[i] = randint(220, 255)\n # emulate transmit duration in microseconds\n #sleep(self.transmit_duration/1000000)\n return (True, end_turn)\n\n\n\n# handles an angular sector of the image\nclass Bound:\n radius = 0\n def __init__(self, x, tm, tM):\n self.x = x\n if type(tM) == int:\n self.low = Bound.clamp(tm*x)\n self.up = int(tM*sqrt(Bound.radius**2-x**2-1))\n else:\n self.low = Bound.clamp(x*tm)\n self.up = Bound.clamp(x*tM)\n \n if self.up**2 + x**2 > Bound.radius**2:\n self.up = int(sign(self.up) * sqrt(Bound.radius**2-x**2-1))\n \n if self.up < self.low:\n self.low,self.up = self.up,self.low\n \n #staticmethod\n def clamp(coord):\n if coord < -Bound.radius+1:\n return -Bound.radius+1\n elif coord > Bound.radius-1:\n return Bound.radius-1\n return int(coord)\n \nclass Sector:\n def __init__(self):\n self.dr = None\n \n def configure(self, samples, radius):\n self.dr = radius/samples\n Bound.radius = radius\n \n def init(self, angle, step):\n angle_min = angle-step/2\n angle_max = angle+step/2\n xmin, xmax,same_side = self.xLimits(angle_min, angle_max)\n tm, tM = tan(angle_min), tan(angle_max) \n self.bounds = []\n\n if same_side:\n # same side\n if abs(tm) > abs(tM):\n tm,tM = tM,tm\n for x in range(xmin, xmax+1):\n self.bounds.append(Bound(x,tm,tM))\n else:\n f = 1 if abs(angle-pi/2) < abs(angle+pi/2) else -1\n \n if f == -1:\n tm,tM = tM,tm\n \n for x in range(xmin, 0):\n self.bounds.append(Bound(x, tM,f))\n for x in range(0, xmax+1):\n self.bounds.append(Bound(x, tm,f))\n \n self.cur = -1\n \n def xLimits(self, angle_min, angle_max):\n cm = cos(angle_min)\n cM = cos(angle_max)\n if cM < cm:\n cm,cM = cM,cm\n if cm*cM > 0:\n if cM < 0:\n cM = 0\n else:\n cm = 0\n return Bound.clamp(round(Bound.radius*cm)), Bound.clamp(round(Bound.radius*cM)), cm*cM >= 0\n \n def nextPoint(self, x, y):\n if self.cur == -1:\n self.cur = 0\n x = self.bounds[0].x\n y = self.bounds[0].low\n elif y < self.bounds[self.cur].up:\n y += 1\n else:\n self.cur += 1\n if self.cur == len(self.bounds):\n return False, 0, 0, 0\n x = self.bounds[self.cur].x\n y = self.bounds[self.cur].low\n return True, x, y, int(round(sqrt(x*x+y*y)/self.dr))\n"
] | [
[
"numpy.tan",
"numpy.sqrt",
"numpy.cos",
"numpy.sign"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aknckaan/scrl | [
"bff485e27d8785628e35d2cb73dce06f10065b1f"
] | [
"torchlars/wrapper.py"
] | [
"import torch\r\nfrom torch.optim import Optimizer\r\n\r\nclass OptimWrapper(Optimizer):\r\n\r\n # Mixin class that defines convenient functions for writing Optimizer Wrappers\r\n\r\n def __init__(self, optim):\r\n self.optim = optim\r\n\r\n def __getstate__(self):\r\n return self.optim.__getstate__()\r\n\r\n def __setstate__(self, state):\r\n self.optim.__setstate__(state)\r\n\r\n @property\r\n def state(self):\r\n return self.optim.state\r\n\r\n @property\r\n def param_groups(self):\r\n return self.optim.param_groups\r\n\r\n @param_groups.setter\r\n def param_groups(self, value):\r\n self.optim.param_groups = value\r\n\r\n def state_dict(self):\r\n return self.optim.state_dict()\r\n\r\n def load_state_dict(self, state_dict):\r\n self.optim.load_state_dict(state_dict)\r\n\r\n def zero_grad(self):\r\n self.optim.zero_grad()\r\n\r\n def add_param_group(self, param_group):\r\n self.optim.add_param_group(param_group)\r\n\r\n @property\r\n def defaults(self):\r\n return self.optim.defaults\r\n\r\n @defaults.setter\r\n def defaults(self, defaults):\r\n self.optim.defaults = defaults\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n self.optim.step(closure=closure)\r\n\r\n def __repr__(self):\r\n return \"%s(%r)\" % (self.__class__.__name__, self.optim)"
] | [
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
schmitse/zfit | [
"d42588f1d43532a34a81f31e602d2471780690e2",
"d42588f1d43532a34a81f31e602d2471780690e2"
] | [
"zfit/models/basic.py",
"zfit/__init__.py"
] | [
"\"\"\"Basic PDFs are provided here.\n\nGauss, exponential... that can be used together with Functors to build larger models.\n\"\"\"\n\n# Copyright (c) 2021 zfit\nimport contextlib\n\nimport numpy as np\nimport tensorflow as tf\n\nimport zfit.z.numpy as znp\nfrom zfit import z\n\nfrom ..core.basepdf import BasePDF\nfrom ..core.space import ANY_LOWER, ANY_UPPER, Space\nfrom ..util import ztyping\nfrom ..util.exception import (AnalyticIntegralNotImplemented,\n BreakingAPIChangeError)\nfrom ..util.warnings import warn_advanced_feature\n\n\nclass Exponential(BasePDF):\n _N_OBS = 1\n\n def __init__(self, lam=None, obs: ztyping.ObsTypeInput = None, name: str = \"Exponential\", lambda_=None):\n \"\"\"Exponential function exp(lambda * x).\n\n The function is normalized over a finite range and therefore a pdf. So the PDF is precisely\n defined as :math:`\\\\frac{ e^{\\\\lambda \\\\cdot x}}{ \\\\int_{lower}^{upper} e^{\\\\lambda \\\\cdot x} dx}`\n\n Args:\n lam: Accessed as parameter \"lambda\".\n obs: The :py:class:`~zfit.Space` the pdf is defined in.\n name: Name of the pdf.\n dtype:\n \"\"\"\n if lambda_ is not None:\n if lam is None:\n lam = lambda_\n else:\n raise BreakingAPIChangeError(\"The 'lambda' parameter has been renamed from 'lambda_' to 'lam'.\")\n params = {'lambda': lam}\n super().__init__(obs, name=name, params=params)\n\n self._calc_numerics_data_shift = lambda: z.constant(0.)\n\n if not self.space.has_limits:\n warn_advanced_feature(\"Exponential pdf relies on a shift of the input towards 0 to keep the numerical \"\n f\"stability high. The space {self.space} does not have limits set and no shift\"\n f\" will occure. To set it manually, set _numerics_data_shift to the expected\"\n f\" average values given to this function _in case you want things to be set_.\"\n f\"If this sounds unfamiliar, regard this as an error and use a normalization range.\",\n identifier='exp_shift')\n self._set_numerics_data_shift(self.space)\n\n def _unnormalized_pdf(self, x):\n lambda_ = self.params['lambda']\n x = x.unstack_x()\n probs = znp.exp(lambda_ * (self._shift_x(x)))\n tf.debugging.assert_all_finite(probs, f\"Exponential PDF {self} has non valid values. This is likely caused\"\n f\" by numerical problems: if the exponential is too steep, this will\"\n f\" yield NaNs or infs. Make sure that your lambda is small enough and/or\"\n f\" the initial space is in the same\"\n f\" region as your data (and norm_range, if explicitly set differently).\"\n f\" If this issue still persists, please oben an issue on Github:\"\n f\" https://github.com/zfit/zfit\")\n return probs # Don't use exp! will overflow.\n\n def _shift_x(self, x):\n return x - self._calc_numerics_data_shift()\n\n @contextlib.contextmanager\n def _set_numerics_data_shift(self, limits):\n if limits:\n def calc_numerics_data_shift():\n lower, upper = [], []\n for limit in limits:\n low, up = limit.rect_limits\n lower.append(z.convert_to_tensor(low[:, 0]))\n upper.append(z.convert_to_tensor(up[:, 0]))\n lower = z.convert_to_tensor(lower)\n upper = z.convert_to_tensor(upper)\n lower_val = znp.min(lower, axis=0)\n upper_val = znp.max(upper, axis=0)\n\n return (upper_val + lower_val) / 2\n\n old_value = self._calc_numerics_data_shift\n\n self._calc_numerics_data_shift = calc_numerics_data_shift\n yield\n self._calc_numerics_data_shift = old_value\n else:\n yield\n\n # All hooks are needed to set the right shift when \"entering\" the pdf. The norm range is taken where both are\n # available. No special need needs to be taken for sampling (it samples from the correct region, the limits, and\n # uses the predictions by the `unnormalized_prob` -> that is shifted correctly\n def _single_hook_integrate(self, limits, norm_range, x):\n with self._set_numerics_data_shift(norm_range):\n return super()._single_hook_integrate(limits, norm_range, x=x)\n\n def _single_hook_analytic_integrate(self, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_analytic_integrate(limits, norm_range)\n\n def _single_hook_numeric_integrate(self, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_numeric_integrate(limits, norm_range)\n\n def _single_hook_partial_integrate(self, x, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_partial_integrate(x, limits, norm_range)\n\n def _single_hook_partial_analytic_integrate(self, x, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_partial_analytic_integrate(x, limits, norm_range)\n\n def _single_hook_partial_numeric_integrate(self, x, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_partial_numeric_integrate(x, limits, norm_range)\n\n # def _single_hook_normalization(self, limits):\n # with self._set_numerics_data_shift(limits=limits):\n # return super()._single_hook_normalization(limits)\n\n #\n # # TODO: remove component_norm_range? But needed for integral?\n # def _single_hook_unnormalized_pdf(self, x, name):\n # if component_norm_range.limits_are_false:\n # component_norm_range = self.space\n # if component_norm_range.limits_are_set:\n # with self._set_numerics_data_shift(limits=component_norm_range):\n # return super()._single_hook_unnormalized_pdf(x, name)\n # else:\n # return super()._single_hook_unnormalized_pdf(x, name)\n #\n def _single_hook_pdf(self, x, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_pdf(x, norm_range)\n\n #\n def _single_hook_log_pdf(self, x, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_log_pdf(x, norm_range)\n\n def _single_hook_sample(self, n, limits, x=None):\n with self._set_numerics_data_shift(limits=limits):\n return super()._single_hook_sample(n, limits, x)\n\n\ndef _exp_integral_from_any_to_any(limits, params, model):\n lambda_ = params['lambda']\n lower, upper = limits.rect_limits\n # if any(np.isinf([lower, upper])):\n # raise AnalyticIntegralNotImplemented\n\n integral = _exp_integral_func_shifting(lambd=lambda_, lower=lower, upper=upper, model=model)\n return integral[0]\n\n\ndef _exp_integral_func_shifting(lambd, lower, upper, model):\n def raw_integral(x):\n return z.exp(lambd * (model._shift_x(x))) / lambd # needed due to overflow in exp otherwise\n\n lower_int = raw_integral(x=lower)\n upper_int = raw_integral(x=upper)\n integral = (upper_int - lower_int)\n return integral\n\n\ndef exp_icdf(x, params, model):\n lambd = params['lambda']\n x = z.unstack_x(x)\n x = model._shift_x(x)\n return znp.log(lambd * x) / lambd\n\n\n# Exponential.register_inverse_analytic_integral(exp_icdf) # TODO: register icdf for exponential\n# TODO: cleanup, make cdf registrable _and_ inverse integral, but real\n\nlimits = Space(axes=0, limits=(ANY_LOWER, ANY_UPPER))\nExponential.register_analytic_integral(func=_exp_integral_from_any_to_any, limits=limits)\n",
"\"\"\"Top-level package for zfit.\"\"\"\n\n# Copyright (c) 2021 zfit\nimport inspect\nimport sys\nimport warnings\n\nfrom pkg_resources import get_distribution\n\n__version__ = get_distribution(__name__).version\n\n__license__ = \"BSD 3-Clause\"\n__copyright__ = \"Copyright 2018, zfit\"\n__status__ = \"Beta\"\n\n__author__ = (\"Jonas Eschle <[email protected]>,\"\n \"Albert Puig <[email protected]>, \"\n \"Rafael Silva Coutinho <[email protected]>, \"\n \"Matthieu Marinangeli <[email protected]>\")\n__maintainer__ = \"zfit\"\n__email__ = '[email protected]'\n__credits__ = \"Chris Burr, Martina Ferrillo, Abhijit Mathad, Oliver Lantwin, Johannes Lade\"\n\n__all__ = [\"z\", \"constraint\", \"pdf\", \"minimize\", \"loss\", \"core\", \"data\", \"func\", \"dimension\", \"exception\",\n \"sample\",\n \"Parameter\", \"ComposedParameter\", \"ComplexParameter\", \"convert_to_parameter\",\n \"Space\", \"convert_to_space\", \"supports\",\n \"run\", \"settings\"]\n\n# Copyright (c) 2019 zfit\n\n\ndef _maybe_disable_warnings():\n import os\n disable_warnings = os.environ.get(\"ZFIT_DISABLE_TF_WARNINGS\")\n if disable_warnings is None:\n warnings.warn(\"TensorFlow warnings are by default suppressed by zfit.\"\n \" In order to show them,\"\n \" set the environment variable ZFIT_DISABLE_TF_WARNINGS=0.\"\n \" In order to suppress the TensorFlow warnings AND this warning,\"\n \" set ZFIT_DISABLE_TF_WARNINGS=1.\")\n elif disable_warnings == '0':\n return\n\n os.environ[\"KMP_AFFINITY\"] = \"noverbose\"\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n import tensorflow as tf\n\n tf.get_logger().setLevel('ERROR')\n\n\n_maybe_disable_warnings()\n\nimport tensorflow as tf\n\nif int(tf.__version__[0]) < 2:\n raise RuntimeError(f\"You are using TensorFlow version {tf.__version__}. This zfit version ({__version__}) works\"\n f\" only with TF >= 2\")\n\nfrom . import z # initialize first\nfrom . import (constraint, core, data, dimension, exception, func, loss,\n minimize, param, pdf, sample)\nfrom .core.data import Data\nfrom .core.parameter import (ComplexParameter, ComposedParameter, Parameter,\n convert_to_parameter)\nfrom .core.space import Space, convert_to_space, supports\nfrom .settings import run, ztypes\nfrom .util.graph import jit as _jit\n\n\ndef _maybe_disable_jit():\n import os\n arg1 = os.environ.get(\"ZFIT_DO_JIT\")\n arg2 = os.environ.get(\"ZFIT_EXPERIMENTAL_DO_JIT\")\n arg3 = os.environ.get(\"ZFIT_MODE_GRAPH\")\n if arg3 is not None:\n warnings.warn(\"Depreceated to use `ZFIT_MODE_GRAPH`, use `ZFIT_GRAPH_MODE` instead.\",\n DeprecationWarning)\n\n if arg1 is not None and arg2 is None:\n warnings.warn(\"Depreceated to use `ZFIT_EXPERIMENTAL_DO_JIT`, use `ZFIT_GRAPH_MODE` instead.\",\n DeprecationWarning)\n arg = arg2 if arg1 is None else arg1\n if arg is not None and not int(arg):\n run.set_graph_mode(False)\n\n graph = os.environ.get(\"ZFIT_GRAPH_MODE\")\n if graph is not None and not int(graph):\n run.set_graph_mode(False)\n\n\n# experimental flags\n\n\n_maybe_disable_jit()\n\n# EOF\n"
] | [
[
"tensorflow.debugging.assert_all_finite"
],
[
"tensorflow.get_logger"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
bobub/distil_labse | [
"ad587d7e4e49101a22fb1459b724b25733715caa"
] | [
"distil_labse_repo/distil_funcs.py"
] | [
"# Imports\nimport torch\nfrom labml_nn.transformers.switch import SwitchTransformer, SwitchTransformerLayer, SwitchFeedForward\nfrom labml_nn.transformers import MultiHeadAttention\nfrom labml_nn.transformers.feed_forward import FeedForward\nimport numpy as np\nfrom transformers import AutoConfig, AutoModel\nimport torch.nn as nn\nimport math\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics import mean_squared_error\nfrom random import choice\nfrom sklearn.decomposition import PCA\nfrom copy import deepcopy\nfrom transformers import BertModel, BertConfig\n\n\n# Custom dataset function to store Open Subtitles data\nclass CustomDataset(torch.utils.data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, input_ids, token_type_ids, attention_masks):\n 'Initialization'\n self.input_ids = input_ids\n self.token_type_ids = token_type_ids\n self.attention_masks = attention_masks\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.input_ids)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n\n input_id = self.input_ids[index]\n token_type_ID = self.token_type_ids[index]\n attention_mask = self.attention_masks[index]\n sample = {'input_ids':input_id, 'token_type_ids':token_type_ID , 'attention_mask':attention_mask}\n\n return sample\n\n# Weights init and switch init initialise the weights for the model as desribed in Switch Transformer paper\ndef weights_init(tensor: torch.Tensor):\n if isinstance(tensor, nn.Linear):\n switch_init(tensor.weight.data)\n torch.nn.init.zeros_(tensor.bias.data)\n if isinstance(tensor, nn.LayerNorm):\n torch.nn.init.zeros_(tensor.weight.data)\n torch.nn.init.zeros_(tensor.bias.data)\n\ndef switch_init(tensor: torch.Tensor, s: float = 0.1, mean: float=0) -> torch.Tensor:\n fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(tensor)\n std = math.sqrt(s/fan_in)\n\n return torch.nn.init.trunc_normal_(tensor=tensor, mean=mean, std=std)\n\n\nclass LaBSE_Switch(nn.Module):\n \"\"\"\n Torch module for to create a Switch Transformer for LaBSE. \n Can be used for other BERT based models too, just change the input_id\n tokenization and word_embedding module.\n\n Inputs:\n config = dictionary of configuration\n word_embeddings_module = torch module mapping token ids to word embeddings\n\n Forward:\n Input_ids = ids using labse tokenizer \n attention_mask = binary, indicates to model which tokens should be attended to,\n and which should not.\n\n Outputs:\n outputs = a dictionary containing x, counts, route_prob, n_dropped, logits, attention, values\n\n See Switch Transformer paper to understand all except:\n attention, values and logits, which are used during knowledge distillation.\n \n \"\"\"\n\n def __init__(self, config, word_embeddings_module):\n\n super().__init__()\n # set the switch transformer as the actual neural net\n self.switch_model = SwitchTransformer(\n \n SwitchTransformerLayer(\n d_model=config['d_model'],\n attn=MultiHeadAttention(config['heads'], config['d_model'], config['dropout']),\n\n feed_forward=SwitchFeedForward(\n capacity_factor=config['capacity_factor'],\n drop_tokens=config['drop_tokens'],\n is_scale_prob=config['is_scale_prob'],\n n_experts=config['n_experts'],\n expert=FeedForward(config['d_model'], config['d_ff'], config['dropout_ffn']),\n d_model=config['d_model']),\n dropout_prob=config['dropout']),\n config['n_layers'],\n d_out = int(768),\n dropout_prob = config['dropout'])\n # initialise weights\n # self.switch_model.apply(weights_init)\n \n # module that maps input tokens into embedding vectors\n self.word_embeddings = word_embeddings_module\n\n # get attention weights from teacher\n # self.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches)\n \n def weight_init_from_teacher(self, teacher_model, int_matches):\n \n \n \"\"\"\n Initialises attention modules of student with those of the teacher for the --- specific to LaBSE and DistilSwitch\n int_matches should be a list of tuples of [(teacher_layer, student_layer),...]\n e.g. int_matches = [(5,0),(11,2)] --> give attention weights of teacher layer 5 to student layer 0 \n \"\"\"\n # teacher_model=load_teacher(device=torch.device('cuda'))\n self.switch_model.layers[int_matches[1]].attn.query.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.query.weight\n self.switch_model.layers[int_matches[1]].attn.query.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.query.bias\n self.switch_model.layers[int_matches[1]].attn.key.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.key.weight\n self.switch_model.layers[int_matches[1]].attn.key.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.key.bias\n self.switch_model.layers[int_matches[1]].attn.value.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.value.weight\n self.switch_model.layers[int_matches[1]].attn.value.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.value.bias\n self.switch_model.layers[int_matches[1]].attn.output.weight = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.weight\n self.switch_model.layers[int_matches[1]].attn.output.bias = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.bias\n# self.switch_model.layers[int_matches[1]].norm_ff.weight = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.weight\n# self.switch_model.layers[int_matches[1]].norm_ff.bias = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.bias\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None):\n \n # masks and token type ids not used, as we're just creating sentence embeddings for classification tasks\n \n # word embeddings of shape [batch, seq_len, d_model]\n input_embeddings = self.word_embeddings(input_ids)\n\n # model input on shape [seq_len, batch, d_model] and mask\n _batch,_seq_len,_n_hid = input_embeddings.shape\n #print(_n_hid)\n\n # call switch transformer\n outputs = self.switch_model(torch.reshape(input_embeddings, (_seq_len, _batch, _n_hid)),\n attention_mask=None)\n \n return outputs\n\n# function to blackbox load the student for distillation - can be switch or bert based\ndef load_student(name, student_config, device, teacher_model, int_matches, N_LAYERS):\n\n if name!='switch':\n \n # for pretrained bert models - setup config\n student_config = BertConfig.from_pretrained(name)\n student_config.num_hidden_layers = N_LAYERS\n student_config.output_hidden_states = True\n student_config.output_attentions = True\n student_config.use_cache = True\n student_config.is_decoder = True\n \n # load model and set input embeddings\n student_model = BertModel.from_pretrained(name, config=student_config)\n student_model.set_input_embeddings(teacher_model.get_input_embeddings())\n student_model = student_model.float()\n student_model.to(device=device)\n \n return student_model\n\n if name=='switch':\n \n # create compressed word embeddings from those of the teacher\n word_embeddings = deepcopy(teacher_model.get_input_embeddings())\n compressed_word_embeddings = word_embedding_compression(word_embeddings, student_config['d_model'])\n \n # create student model\n student_model = LaBSE_Switch(config=student_config, word_embeddings_module=compressed_word_embeddings)\n \n # initialise weights\n student_model.switch_model.apply(weights_init)\n student_model.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches)\n \n # convert model to float32 and move to device\n student_model = student_model.float() \n student_model.to(device=device)\n \n return student_model\n\n# loads teacher model from Huggingface\ndef load_teacher(device):\n teacher_config = AutoConfig.from_pretrained('sentence-transformers/LaBSE')\n teacher_config.output_hidden_states = True\n teacher_config.output_attentions = True\n teacher_config.use_cache = True\n teacher_config.is_decoder = True\n teacher_model = AutoModel.from_pretrained('sentence-transformers/LaBSE', config=teacher_config)\n teacher_model.float() # needs to be 32 bit precision to get decent results from distillation\n teacher_model.to(device=device)\n \n return teacher_model\n\n# Adaptor for BERT based models\ndef simple_adaptor(batch, model_outputs):\n \n # values need to be reformatted from Huggingface 'past_key_values' output\n values = []\n for i in model_outputs['past_key_values']:\n values.append(i[1])\n values = torch.stack(values)\n \n attentions = []\n for j in model_outputs['attentions']:\n attentions.append(inv_softmax(j))\n attentions = torch.stack(attentions)\n \n # we use pooler output as logits\n return {'logits': model_outputs['pooler_output'],\n 'hidden': model_outputs['hidden_states'],\n #'attention': model_outputs['attentions'],\n 'attention':attentions,\n 'inputs_mask': batch['attention_mask'],\n 'value_relation': values,\n 'pooler_output':model_outputs['pooler_output']}\n\ndef inv_softmax(x,C=-50):\n # reverses softmax operation - used in teacher_adaptor\n # C variable sets the min value of the scores, -50 works well.\n result = torch.log(x)\n result = torch.where(result <= float('-inf'), torch.full_like(result,C), result)\n return result\n\ndef teacher_adaptor(batch, model_outputs):\n # selects relevant model and batch outputs used for distillation loss calculation\n values = []\n for i in model_outputs['past_key_values']:\n values.append(i[1])\n values = torch.stack(values)\n \n attentions = []\n for j in model_outputs['attentions']:\n attentions.append(inv_softmax(j))\n attentions = torch.stack(attentions)\n \n # print(model_outputs['pooler_output'].requires_grad)\n\n return {#'logits': model_outputs['last_hidden_state'],\n 'logits':model_outputs['pooler_output'],\n 'hidden': model_outputs['hidden_states'],\n #'attention': model_outputs['attentions'],\n 'attention': attentions,\n 'inputs_mask': batch['attention_mask'],\n 'value_relation': values,\n 'pooler_output':model_outputs['pooler_output']}\n\n# adaptor for switch model\ndef switch_student_adaptor(batch, model_outputs):\n # selects relevant model and batch outputs and reformats them\n # needs to have same shapes as teacher adaptor\n\n # reformat attention\n layers, len, len, batch_size, heads = model_outputs['attention'].shape\n attention = model_outputs['attention'].reshape(layers, batch_size, heads, len, len)\n\n # reformat logits\n len, batch_size, d_model = model_outputs['logits'].shape\n logits = model_outputs['logits'].reshape(batch_size, len, d_model)\n # print(model_outputs['pooler_output'].requires_grad)\n\n # reformat values\n layers, len, batch_size, heads, embedding_per_head = model_outputs['values'].shape\n values = model_outputs['values'].reshape(layers, batch_size, heads, len, embedding_per_head)\n\n return {#'logits': logits,\n 'logits':model_outputs['pooler_output'],\n 'counts': model_outputs['counts'],\n 'attention': attention,\n 'inputs_mask': batch['attention_mask'],\n 'route_prob': model_outputs['route_prob'],\n 'n_dropped': model_outputs['n_dropped'],\n 'value_relation': values}\n\n# Predict function evaluates model every epoch to show training progress\ndef predict(model, teacher_model, eval_dataset, step, device, STUDENT, BATCH_SIZE, eval_metric='cosine_similarity', feedback=True):\n '''\n model = student_model\n teacher_model = labse\n eval_dataset = num of dev set samples to test model on per callback\n device = cuda or cpu\n student = switch or !switch\n eval_metric = metric to evaluate the model - mse or cosine_similarity\n '''\n model.eval()\n student_logits = []\n teacher_logits =[]\n batch_counts = []\n batch_n_dropped = []\n batch_route_prob = []\n \n dataloader = DataLoader(eval_dataset,batch_size=BATCH_SIZE)\n print('Running callback function on {} dev set samples...'.format(len(eval_dataset)))\n for batch in dataloader:\n input_ids = batch['input_ids'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n\n with torch.no_grad():\n model_outputs = model(input_ids=input_ids, attention_mask=attention_mask)\n logits_S = model_outputs['pooler_output']\n logits_T = teacher_model(input_ids=input_ids, attention_mask=attention_mask)['pooler_output']\n cpu_logits_S = logits_S.detach().cpu()\n cpu_logits_T = logits_T.detach().cpu()\n \n if STUDENT=='switch' and feedback==True:\n counts = model_outputs['counts'].detach().cpu()\n n_dropped = model_outputs['n_dropped']\n route_prob = model_outputs['route_prob'].detach().cpu()\n\n for i in range(len(cpu_logits_S)):\n student_logits.append(cpu_logits_S[i].numpy())\n teacher_logits.append(cpu_logits_T[i].numpy())\n \n if STUDENT=='switch' and feedback==True:\n for i in range(len(counts)):\n batch_counts.append(counts[i].numpy())\n batch_n_dropped.append(n_dropped[i])\n batch_route_prob.append(route_prob[i].numpy())\n \n model.train()\n student_logits = np.array(student_logits)\n teacher_logits = np.array(teacher_logits)\n\n if eval_metric=='cosine_similarity':\n \n similarities = np.diag(cosine_similarity(student_logits, teacher_logits))\n print (\"Average cosine similarity for these samples: \", np.mean(similarities))\n \n if eval_metric=='mse':\n mse_error = mean_squared_error(student_logits, teacher_logits)\n print (\"Average mean squared error for these samples: \", mse_error)\n \n if STUDENT=='switch' and feedback==True:\n switch_counts = np.array(batch_counts)\n switch_n_dropped = np.array(batch_n_dropped)\n switch_route_prob = np.array(batch_route_prob)\n print('SWITCH BEHAVIOUR:')\n print('Counts Shape: \\n', switch_counts.shape)\n print('Counts: \\n', switch_counts)\n print('N_dropped: \\n', switch_n_dropped)\n print('Route Prob: \\n', switch_route_prob)\n\n return torch.Tensor([np.mean(similarities)])\n\n# generates random parameters for hyperparam tuning\ndef generate_random_params(params):\n # input: params dictionary containing lists of possible values\n chosen_params = {}\n for param in params:\n chosen_params[param] = choice(params[param])\n return chosen_params\n\ndef word_embedding_compression(word_embedding_module, d_model):\n \n \"\"\"\n Compresses a given word_embedding_module (type torch.Embedding) into a module of d_model dimensionality.\n \"\"\"\n word_embedding_matrix = word_embedding_module.weight\n assert word_embedding_matrix.shape[1]>=d_model, 'The desired word embedding dimensionality is greater than the teacher word embeddings. That is not compression! Make d_model smaller.'\n # return the module if it's the same dimensionality\n if word_embedding_matrix.shape[1]==d_model:\n return word_embedding_module\n # else compress\n pca = PCA(n_components = d_model)\n compressed_word_embedding_matrix = pca.fit_transform(word_embedding_matrix.detach().cpu().numpy())\n compressed_word_embedding_matrix = torch.from_numpy(compressed_word_embedding_matrix)\n word_embedding_module.weight = torch.nn.parameter.Parameter(compressed_word_embedding_matrix)\n return word_embedding_module"
] | [
[
"torch.nn.init.trunc_normal_",
"torch.reshape",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.metrics.mean_squared_error",
"torch.full_like",
"torch.log",
"torch.no_grad",
"numpy.mean",
"torch.nn.parameter.Parameter",
"torch.stack",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.init.zeros_",
"numpy.array",
"sklearn.decomposition.PCA"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
StanfordVL/bullet3_ik | [
"52da668d60b32bfe6eea96d3ef3b9d442b2b8926",
"52da668d60b32bfe6eea96d3ef3b9d442b2b8926",
"52da668d60b32bfe6eea96d3ef3b9d442b2b8926"
] | [
"examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_logging.py",
"examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_derpy.py",
"examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env_example.py"
] | [
"\"\"\"A proto buffer based logging system for minitaur experiments.\n\nThe logging system records the time since reset, base position, orientation,\nangular velocity and motor information (joint angle, speed, and torque) into a\nproto buffer. See minitaur_logging.proto for more details. The episode_proto is\nupdated per time step by the environment and saved onto disk for each episode.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport time\n\nimport tensorflow as tf\nimport minitaur_logging_pb2\n\nNUM_MOTORS = 8\n\n\ndef _update_base_state(base_state, values):\n base_state.x = values[0]\n base_state.y = values[1]\n base_state.z = values[2]\n\n\ndef preallocate_episode_proto(episode_proto, max_num_steps):\n \"\"\"Preallocate the memory for proto buffer.\n\n Dynamically allocating memory as the protobuf expands causes unexpected delay\n that is not tolerable with locomotion control.\n\n Args:\n episode_proto: The proto that holds the state/action data for the current\n episode.\n max_num_steps: The max number of steps that will be recorded in the proto.\n The state/data over max_num_steps will not be stored in the proto.\n \"\"\"\n for _ in range(max_num_steps):\n step_log = episode_proto.state_action.add()\n step_log.info_valid = False\n step_log.time.seconds = 0\n step_log.time.nanos = 0\n for _ in range(NUM_MOTORS):\n motor_state = step_log.motor_states.add()\n motor_state.angle = 0\n motor_state.velocity = 0\n motor_state.torque = 0\n motor_state.action = 0\n _update_base_state(step_log.base_position, [0, 0, 0])\n _update_base_state(step_log.base_orientation, [0, 0, 0])\n _update_base_state(step_log.base_angular_vel, [0, 0, 0])\n\n\ndef update_episode_proto(episode_proto, minitaur, action, step):\n \"\"\"Update the episode proto by appending the states/action of the minitaur.\n\n Note that the state/data over max_num_steps preallocated\n (len(episode_proto.state_action)) will not be stored in the proto.\n Args:\n episode_proto: The proto that holds the state/action data for the current\n episode.\n minitaur: The minitaur instance. See envs.minitaur for details.\n action: The action applied at this time step. The action is an 8-element\n numpy floating-point array.\n step: The current step index.\n \"\"\"\n max_num_steps = len(episode_proto.state_action)\n if step >= max_num_steps:\n tf.logging.warning(\n \"{}th step is not recorded in the logging since only {} steps were \"\n \"pre-allocated.\".format(step, max_num_steps))\n return\n step_log = episode_proto.state_action[step]\n step_log.info_valid = minitaur.IsObservationValid()\n time_in_seconds = minitaur.GetTimeSinceReset()\n step_log.time.seconds = int(time_in_seconds)\n step_log.time.nanos = int((time_in_seconds - int(time_in_seconds)) * 1e9)\n\n motor_angles = minitaur.GetMotorAngles()\n motor_velocities = minitaur.GetMotorVelocities()\n motor_torques = minitaur.GetMotorTorques()\n for i in range(minitaur.num_motors):\n step_log.motor_states[i].angle = motor_angles[i]\n step_log.motor_states[i].velocity = motor_velocities[i]\n step_log.motor_states[i].torque = motor_torques[i]\n step_log.motor_states[i].action = action[i]\n\n _update_base_state(step_log.base_position, minitaur.GetBasePosition())\n _update_base_state(step_log.base_orientation, minitaur.GetBaseRollPitchYaw())\n _update_base_state(step_log.base_angular_vel,\n minitaur.GetBaseRollPitchYawRate())\n\n\nclass MinitaurLogging(object):\n \"\"\"A logging system that records the states/action of the minitaur.\"\"\"\n\n def __init__(self, log_path=None):\n self._log_path = log_path\n\n # TODO(jietan): Consider using recordio to write the logs.\n def save_episode(self, episode_proto):\n \"\"\"Save episode_proto to self._log_path.\n\n self._log_path is the directory name. A time stamp is the file name of the\n log file. For example, when self._log_path is \"/tmp/logs/\", the actual\n log file would be \"/tmp/logs/yyyy-mm-dd-hh:mm:ss\".\n\n Args:\n episode_proto: The proto that holds the states/action for the current\n episode that needs to be save to disk.\n Returns:\n The full log path, including the directory name and the file name.\n \"\"\"\n if not self._log_path or not episode_proto.state_action:\n return self._log_path\n if not tf.gfile.Exists(self._log_path):\n tf.gfile.MakeDirs(self._log_path)\n ts = time.time()\n time_stamp = datetime.datetime.fromtimestamp(ts).strftime(\n \"%Y-%m-%d-%H:%M:%S\")\n log_path = os.path.join(self._log_path,\n \"minitaur_log_{}\".format(time_stamp))\n with tf.gfile.Open(log_path, \"w\") as f:\n f.write(episode_proto.SerializeToString())\n return log_path\n\n def restore_episode(self, log_path):\n \"\"\"Restore the episodic proto from the log path.\n\n Args:\n log_path: The full path of the log file.\n Returns:\n The minitaur episode proto.\n \"\"\"\n with tf.gfile.Open(log_path) as f:\n content = f.read()\n episode_proto = minitaur_logging_pb2.MinitaurEpisode()\n episode_proto.ParseFromString(content)\n return episode_proto\n",
"\"\"\"This file implements the functionalities of a minitaur derpy using pybullet.\n\nIt is the result of first pass system identification for the derpy robot. The\n\n\n\"\"\"\nimport math\n\nimport numpy as np\nimport minitaur\n\nKNEE_CONSTRAINT_POINT_LONG = [0, 0.0055, 0.088]\nKNEE_CONSTRAINT_POINT_SHORT = [0, 0.0055, 0.100]\n\n\nclass MinitaurDerpy(minitaur.Minitaur):\n \"\"\"The minitaur class that simulates a quadruped robot from Ghost Robotics.\n\n \"\"\"\n\n def Reset(self, reload_urdf=True, default_motor_angles=None, reset_time=3.0):\n \"\"\"Reset the minitaur to its initial states.\n\n Args:\n reload_urdf: Whether to reload the urdf file. If not, Reset() just place\n the minitaur back to its starting position.\n default_motor_angles: The default motor angles. If it is None, minitaur\n will hold a default pose (motor angle math.pi / 2) for 100 steps. In\n torque control mode, the phase of holding the default pose is skipped.\n reset_time: The duration (in seconds) to hold the default motor angles. If\n reset_time <= 0 or in torque control mode, the phase of holding the\n default pose is skipped.\n \"\"\"\n if self._on_rack:\n init_position = minitaur.INIT_RACK_POSITION\n else:\n init_position = minitaur.INIT_POSITION\n if reload_urdf:\n if self._self_collision_enabled:\n self.quadruped = self._pybullet_client.loadURDF(\n \"%s/quadruped/minitaur_derpy.urdf\" % self._urdf_root,\n init_position,\n useFixedBase=self._on_rack,\n flags=(\n self._pybullet_client.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT))\n else:\n self.quadruped = self._pybullet_client.loadURDF(\n \"%s/quadruped/minitaur_derpy.urdf\" % self._urdf_root,\n init_position,\n useFixedBase=self._on_rack)\n self._BuildJointNameToIdDict()\n self._BuildUrdfIds()\n if self._remove_default_joint_damping:\n self._RemoveDefaultJointDamping()\n self._BuildMotorIdList()\n self._RecordMassInfoFromURDF()\n self._RecordInertiaInfoFromURDF()\n self.ResetPose(add_constraint=True)\n else:\n self._pybullet_client.resetBasePositionAndOrientation(\n self.quadruped, init_position, minitaur.INIT_ORIENTATION)\n self._pybullet_client.resetBaseVelocity(self.quadruped, [0, 0, 0],\n [0, 0, 0])\n self.ResetPose(add_constraint=False)\n\n self._overheat_counter = np.zeros(self.num_motors)\n self._motor_enabled_list = [True] * self.num_motors\n self._step_counter = 0\n\n # Perform reset motion within reset_duration if in position control mode.\n # Nothing is performed if in torque control mode for now.\n # TODO(jietan): Add reset motion when the torque control is fully supported.\n self._observation_history.clear()\n if not self._torque_control_enabled and reset_time > 0.0:\n self.ReceiveObservation()\n for _ in range(100):\n self.ApplyAction([math.pi / 2] * self.num_motors)\n self._pybullet_client.stepSimulation()\n self.ReceiveObservation()\n if default_motor_angles is not None:\n num_steps_to_reset = int(reset_time / self.time_step)\n for _ in range(num_steps_to_reset):\n self.ApplyAction(default_motor_angles)\n self._pybullet_client.stepSimulation()\n self.ReceiveObservation()\n self.ReceiveObservation()\n\n def _ResetPoseForLeg(self, leg_id, add_constraint):\n \"\"\"Reset the initial pose for the leg.\n\n Args:\n leg_id: It should be 0, 1, 2, or 3, which represents the leg at\n front_left, back_left, front_right and back_right.\n add_constraint: Whether to add a constraint at the joints of two feet.\n \"\"\"\n knee_friction_force = 0\n half_pi = math.pi / 2.0\n knee_angle = -2.1834\n\n leg_position = minitaur.LEG_POSITION[leg_id]\n self._pybullet_client.resetJointState(\n self.quadruped,\n self._joint_name_to_id[\"motor_\" + leg_position + \"L_joint\"],\n self._motor_direction[2 * leg_id] * half_pi,\n targetVelocity=0)\n self._pybullet_client.resetJointState(\n self.quadruped,\n self._joint_name_to_id[\"knee_\" + leg_position + \"L_joint\"],\n self._motor_direction[2 * leg_id] * knee_angle,\n targetVelocity=0)\n self._pybullet_client.resetJointState(\n self.quadruped,\n self._joint_name_to_id[\"motor_\" + leg_position + \"R_joint\"],\n self._motor_direction[2 * leg_id + 1] * half_pi,\n targetVelocity=0)\n self._pybullet_client.resetJointState(\n self.quadruped,\n self._joint_name_to_id[\"knee_\" + leg_position + \"R_joint\"],\n self._motor_direction[2 * leg_id + 1] * knee_angle,\n targetVelocity=0)\n if add_constraint:\n if leg_id < 2:\n self._pybullet_client.createConstraint(\n self.quadruped,\n self._joint_name_to_id[\"knee_\" + leg_position + \"R_joint\"],\n self.quadruped,\n self._joint_name_to_id[\"knee_\" + leg_position + \"L_joint\"],\n self._pybullet_client.JOINT_POINT2POINT, [0, 0, 0],\n KNEE_CONSTRAINT_POINT_SHORT, KNEE_CONSTRAINT_POINT_LONG)\n else:\n self._pybullet_client.createConstraint(\n self.quadruped,\n self._joint_name_to_id[\"knee_\" + leg_position + \"R_joint\"],\n self.quadruped,\n self._joint_name_to_id[\"knee_\" + leg_position + \"L_joint\"],\n self._pybullet_client.JOINT_POINT2POINT, [0, 0, 0],\n KNEE_CONSTRAINT_POINT_LONG, KNEE_CONSTRAINT_POINT_SHORT)\n\n if self._accurate_motor_model_enabled or self._pd_control_enabled:\n # Disable the default motor in pybullet.\n self._pybullet_client.setJointMotorControl2(\n bodyIndex=self.quadruped,\n jointIndex=(\n self._joint_name_to_id[\"motor_\" + leg_position + \"L_joint\"]),\n controlMode=self._pybullet_client.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n self._pybullet_client.setJointMotorControl2(\n bodyIndex=self.quadruped,\n jointIndex=(\n self._joint_name_to_id[\"motor_\" + leg_position + \"R_joint\"]),\n controlMode=self._pybullet_client.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n\n else:\n self._SetDesiredMotorAngleByName(\n \"motor_\" + leg_position + \"L_joint\",\n self._motor_direction[2 * leg_id] * half_pi)\n self._SetDesiredMotorAngleByName(\n \"motor_\" + leg_position + \"R_joint\",\n self._motor_direction[2 * leg_id + 1] * half_pi)\n\n self._pybullet_client.setJointMotorControl2(\n bodyIndex=self.quadruped,\n jointIndex=(self._joint_name_to_id[\"knee_\" + leg_position + \"L_joint\"]),\n controlMode=self._pybullet_client.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n self._pybullet_client.setJointMotorControl2(\n bodyIndex=self.quadruped,\n jointIndex=(self._joint_name_to_id[\"knee_\" + leg_position + \"R_joint\"]),\n controlMode=self._pybullet_client.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n",
"\"\"\"An example to run of the minitaur gym environment with standing up goal.\n\n\"\"\"\n\nimport math\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom pybullet_envs.minitaur.envs import minitaur_stand_gym_env\n\n\ndef StandUpExample():\n \"\"\"An example that the minitaur stands up.\"\"\"\n steps = 1000\n environment = minitaur_stand_gym_env.MinitaurStandGymEnv(\n render=True,\n motor_velocity_limit=np.inf)\n action = [0.5]\n _, _, done, _ = environment.step(action)\n for t in xrange(steps):\n # A policy that oscillates between -1 and 1\n action = [math.sin(t * math.pi * 0.01)]\n _, _, done, _ = environment.step(action)\n if done:\n break\n\n\ndef main(unused_argv):\n StandUpExample()\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run()\n"
] | [
[
"tensorflow.gfile.Exists",
"tensorflow.gfile.MakeDirs",
"tensorflow.gfile.Open"
],
[
"numpy.zeros"
],
[
"tensorflow.logging.set_verbosity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zacjohnston/pyburst | [
"f7d5ae9a229704d1cbcf656afb9fb3e29fb71c0c",
"f7d5ae9a229704d1cbcf656afb9fb3e29fb71c0c",
"f7d5ae9a229704d1cbcf656afb9fb3e29fb71c0c"
] | [
"pyburst/mcmc/mcmc_plot.py",
"pyburst/misc/alpha.py",
"pyburst/kepler/kepler_tools.py"
] | [
"import numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport chainconsumer\nfrom math import ceil\n\n# pyburst\nfrom . import mcmc_versions\nfrom . import mcmc_tools\nfrom . import burstfit\nfrom . import mcmc_params\nfrom pyburst.observations import obs_tools\nfrom pyburst.plotting import plot_tools\nfrom pyburst.grids.grid_strings import get_source_path, print_warning\nfrom pyburst.misc.pyprint import printv\n\nGRIDS_PATH = os.environ['KEPLER_GRIDS']\n\n\ndef default_plt_options():\n \"\"\"Initialise default plot parameters\"\"\"\n params = {'mathtext.default': 'regular',\n 'font.family': 'serif',\n 'text.usetex': False}\n plt.rcParams.update(params)\n\n\ndefault_plt_options()\n\n\ndef save_plot(fig, prefix, save, source, version, display, chain=None, n_dimensions=None,\n n_walkers=None, n_steps=None, label=None, extension='.png',\n enforce_chain_info=True):\n \"\"\"Handles saving/displaying of a figure passed to it\n \"\"\"\n if enforce_chain_info and (None in (n_dimensions, n_walkers, n_steps)):\n if chain is None:\n raise ValueError('Must provide chain, or specify each of '\n '(n_dimensions, n_walkers, n_steps)')\n else:\n n_walkers, n_steps, n_dimensions = chain.shape\n\n if save:\n filename = mcmc_tools.get_mcmc_string(source=source, version=version,\n n_walkers=n_walkers, n_steps=n_steps,\n prefix=prefix, label=label,\n extension=extension)\n source_path = get_source_path(source)\n filepath = os.path.join(source_path, 'plots', prefix, f'{filename}')\n fig.savefig(filepath)\n\n if display:\n plt.show(block=False)\n else:\n plt.close(fig)\n\n\ndef save_multiple_synth(series, source, version, n_steps, discard, n_walkers=960,\n walkers=True, posteriors=True, contours=False,\n display=False, mass_radius=True,\n synth=True, compressed=False):\n \"\"\"Save plots for multiple series in a synthetic data batch\n \"\"\"\n # TODO reuse max_lhood point\n default_plt_options()\n for ser in series:\n if synth:\n full_source = f'{source}_{ser}'\n else:\n full_source = source\n\n chain = mcmc_tools.load_chain(full_source, n_walkers=n_walkers, n_steps=n_steps,\n version=version, compressed=compressed)\n\n if walkers:\n plot_walkers(chain, source=full_source, save=True,\n display=display, version=version)\n\n if posteriors:\n plot_posteriors(chain, source=full_source, save=True, discard=discard,\n display=display, version=version)\n\n if contours:\n plot_contours(chain, source=full_source, save=True, discard=discard,\n display=display, version=version)\n\n if mass_radius:\n plot_mass_radius(chain, source=full_source, save=True, discard=discard,\n display=display, version=version)\n\n\ndef save_all_plots(source, version, discard, n_steps, n_walkers=1000, display=False,\n save=True, cap=None, posteriors=True, contours=True,\n redshift=True, mass_radius=True, verbose=True, compressed=False):\n \"\"\"Saves (and/or displays) main MCMC plots\n \"\"\"\n chain = mcmc_tools.load_chain(source, version=version, n_steps=n_steps,\n n_walkers=n_walkers, verbose=verbose,\n compressed=compressed)\n if posteriors:\n printv('Plotting posteriors', verbose=verbose)\n plot_posteriors(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n if contours:\n printv('Plotting contours', verbose=verbose)\n plot_contours(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n if mass_radius:\n printv('Plotting mass-radius', verbose=verbose)\n plot_mass_radius(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n if redshift:\n printv('Plotting redshift', verbose=verbose)\n plot_redshift(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n\ndef plot_contours(chain, discard, source, version, cap=None,\n display=True, save=False, truth_values=None, parameters=None,\n sigmas=np.linspace(0, 2, 5), cc=None, summary=False, fontsize=14,\n max_ticks=4):\n \"\"\"Plots posterior contours of mcmc chain\n\n parameters : [str]\n specify which parameters to plot\n \"\"\"\n default_plt_options()\n\n if cc is None:\n pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')\n pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)\n cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,\n discard=discard, cap=cap, sigmas=sigmas,\n summary=summary, fontsize=fontsize,\n max_ticks=max_ticks)\n if parameters is not None:\n parameters = plot_tools.convert_mcmc_labels(param_keys=parameters)\n\n # TODO: figsize\n if truth_values is not None:\n fig = cc.plotter.plot(truth=truth_values, parameters=parameters)\n else:\n fig = cc.plotter.plot(parameters=parameters)\n\n save_plot(fig, prefix='contours', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_posteriors(chain, discard, source, version, cap=None,\n display=True, save=False, truth_values=None,\n cc=None):\n \"\"\"Plots posterior distributions of mcmc chain\n\n truth_values : list|dict\n Specify parameters of point (e.g. the true value) to draw on the distributions.\n \"\"\"\n default_plt_options()\n pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')\n pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)\n if cc is None:\n cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,\n discard=discard, cap=cap)\n height = 3 * ceil(len(pkeys) / 4)\n\n if truth_values is not None:\n fig = cc.plotter.plot_distributions(figsize=[10, height],\n truth=truth_values)\n else:\n fig = cc.plotter.plot_distributions(figsize=[10, height])\n\n plt.tight_layout()\n save_plot(fig, prefix='posteriors', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_mass_radius(chain, discard, source, version, cap=None,\n display=True, save=False, summary=False,\n sigmas=np.linspace(0, 2, 5), fontsize=18, figsize='column'):\n \"\"\"Plots contours of mass versus radius from a given chain\n \"\"\"\n default_plt_options()\n mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)\n mass_radius_chain = mcmc_params.get_mass_radius_chain(chain=chain, discard=discard,\n source=source, version=version,\n cap=cap, mass_nw=mass_nw,\n mass_gr=mass_gr)\n\n cc = mcmc_tools.setup_custom_chainconsumer(mass_radius_chain, parameters=['R', 'M'],\n sigmas=sigmas, summary=summary,\n fontsize=fontsize)\n fig = cc.plotter.plot(figsize=figsize)\n fig.subplots_adjust(left=0.16, bottom=0.15)\n\n save_plot(fig, prefix='mass-radius', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_redshift(chain, discard, source, version, cap=None, display=True, save=False):\n \"\"\"Plots posterior distribution of redshift given a chain\n \"\"\"\n mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)\n redshift_chain = mcmc_params.get_redshift_chain(chain=chain, discard=discard,\n source=source, version=version,\n cap=cap, mass_nw=mass_nw,\n mass_gr=mass_gr)\n\n cc = mcmc_tools.setup_custom_chainconsumer(redshift_chain, parameters=['1+z'])\n fig = cc.plotter.plot_distributions(figsize=[5, 5])\n plt.tight_layout()\n\n save_plot(fig, prefix='redshift', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_gravitational_contours(chain, discard, source, version, cap=None, display=True,\n save=False, r_nw=10, sigmas=np.linspace(0, 2, 5),\n summary=False, unit_labels=True, fontsize=16,\n fixed_grav=False, figsize=None):\n \"\"\"Plots contours of gravitational parameters\n \"\"\"\n cc = mcmc_tools.setup_gravitational_chainconsumer(chain=chain, discard=discard,\n source=source, version=version,\n cap=cap, fixed_grav=fixed_grav,\n summary=summary, r_nw=r_nw,\n unit_labels=unit_labels,\n sigmas=sigmas, fontsize=fontsize)\n if fixed_grav:\n fig = cc.plotter.plot_distributions(figsize=figsize)\n plt.tight_layout()\n else:\n fig = cc.plotter.plot()\n\n save_plot(fig, prefix='gravitational', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_inclination(chain, discard, source, version, cap=None, display=True,\n save=False, disc_model='he16_a', sigmas=np.linspace(0, 2, 5),\n summary=False, unit_labels=True, figsize=(4, 4), fontsize=18):\n \"\"\"Plots contours of parameters derived using disc model\n \"\"\"\n disc_chain = mcmc_params.get_disc_chain(chain=chain, discard=discard, cap=cap,\n source=source, version=version,\n disc_model=disc_model)\n\n cc = mcmc_tools.setup_custom_chainconsumer(disc_chain, parameters=['d', 'i'],\n sigmas=sigmas, summary=summary,\n unit_labels=unit_labels, fontsize=fontsize)\n fig = cc.plotter.plot(figsize=figsize)\n fig.subplots_adjust(left=0.15, bottom=0.15)\n save_plot(fig, prefix='disc', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_distance_anisotropy(chain, discard, source, version, cap=None, display=True,\n save=False, sigmas=np.linspace(0, 2, 5), summary=False,\n figsize=(4, 4), unit_labels=True, fontsize=18):\n \"\"\"Plots contours of MCMC parameters d_b, xi_ratio\n \"\"\"\n d_b_chain = mcmc_params.get_param_chain(chain, param='d_b', discard=discard,\n source=source, version=version, cap=cap)\n xi_ratio_chain = mcmc_params.get_param_chain(chain, param='xi_ratio', discard=discard,\n source=source, version=version, cap=cap)\n\n flat_chain = np.column_stack([d_b_chain, xi_ratio_chain])\n cc = mcmc_tools.setup_custom_chainconsumer(flat_chain, parameters=['d_b', 'xi_ratio'],\n sigmas=sigmas, summary=summary,\n unit_labels=unit_labels, fontsize=fontsize)\n\n fig = cc.plotter.plot(figsize=figsize)\n fig.subplots_adjust(left=0.2, bottom=0.2)\n save_plot(fig, prefix='distance', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_xedd(chain, discard, source, version, cap=None, display=True,\n save=False, cloud=True, sigmas=np.linspace(0, 2, 10), figsize=(5, 5)):\n \"\"\"Plots posterior for Eddington hydrogen composition (X_Edd)\n \"\"\"\n default_plt_options()\n xedd_chain = mcmc_params.get_xedd_chain(chain=chain, discard=discard, source=source,\n version=version, cap=cap)\n\n label = plot_tools.quantity_label('xedd')\n cc = mcmc_tools.setup_custom_chainconsumer(xedd_chain, parameters=[label],\n sigmas=sigmas, cloud=cloud)\n fig = cc.plotter.plot(figsize=figsize)\n\n save_plot(fig, prefix='xedd', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_walkers(chain, source, version, params=None, n_lines=30, xlim=-1,\n display=True, save=False, label=''):\n \"\"\"Plots walkers vs steps (i.e. \"time\")\n\n Parameters\n ----------\n source : str\n version : int\n chain : np.array\n chain as returned by load_chain()\n params : [str]\n parameter(s) of which to plot walkers.\n n_lines : int\n approx number of lines/walkers to plot on parameter\n xlim : int\n x-axis limit to plot (n_steps), i.e. ax.set_xlim((0, xlim))\n label : str\n optional label to add to filename when saving\n display : bool\n save : bool\n \"\"\"\n default_plt_options()\n pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')\n\n # ===== Default to splitting all params into 2 plots =====\n if params is None:\n half = int(len(pkeys) / 2)\n for i, param_split in enumerate((pkeys[:half], pkeys[half:])):\n plot_walkers(chain=chain, source=source, version=version,\n params=param_split, n_lines=n_lines, xlim=xlim,\n display=display, save=save, label=f'P{i + 1}')\n return\n\n n_walkers, n_steps, n_dim = chain.shape\n n_params = len(params)\n\n jump_size = round(n_walkers / n_lines)\n steps = np.arange(n_steps)\n walker_idxs = np.arange(0, n_walkers, jump_size)\n\n # noinspection PyTypeChecker\n fig, ax = plt.subplots(n_params, 1, sharex=True, figsize=(10, 12))\n\n for i in range(n_params):\n p_idx = pkeys.index(params[i])\n\n for j in walker_idxs:\n walker = chain[j, :, p_idx]\n ax[i].plot(steps, walker, linewidth=0.5, color='black')\n ax[i].set_ylabel(params[i])\n\n if xlim == -1:\n xlim = n_steps\n\n ax[-1].set_xlabel('Step')\n ax[-1].set_xlim([0, xlim])\n plt.tight_layout()\n\n if display:\n plt.show(block=False)\n\n save_plot(fig, prefix='walkers', chain=chain, save=save, source=source,\n version=version, display=display,\n label=label, extension='.png')\n\n\ndef plot_qb_mdot(chain, source, version, discard, cap=None, display=True, save=False,\n figsize=(5, 5), fontsize=16, sigmas=(1, 2)):\n \"\"\"Plots 2D contours of Qb versus Mdot for each epoch (from multi-epoch chain)\n \"\"\"\n mv = mcmc_versions.McmcVersion(source=source, version=version)\n chain_flat = mcmc_tools.slice_chain(chain, discard=discard, cap=cap, flatten=True)\n\n system_table = obs_tools.load_summary(mv.system)\n epochs = list(system_table.epoch)\n cc = chainconsumer.ChainConsumer()\n\n param_labels = []\n for param in ['mdot', 'qb']:\n param_labels += [plot_tools.full_label(param)]\n\n for i, epoch in enumerate(epochs):\n mdot_idx = mv.param_keys.index(f'mdot{i + 1}')\n qb_idx = mv.param_keys.index(f'qb{i + 1}')\n param_idxs = [mdot_idx, qb_idx]\n\n cc.add_chain(chain_flat[:, param_idxs], parameters=param_labels,\n name=str(epoch))\n\n cc.configure(kde=False, smooth=0, label_font_size=fontsize,\n tick_font_size=fontsize-2, sigmas=sigmas)\n fig = cc.plotter.plot(display=False, figsize=figsize)\n fig.subplots_adjust(left=0.2, bottom=0.2)\n\n save_plot(fig, prefix='qb', save=save, source=source, version=version,\n display=display, chain=chain)\n return fig\n\n\ndef plot_epoch_posteriors(master_cc, source, version, display=True, save=False,\n col_wrap=None, alt_params=True, unit_labels=True,\n add_text=True, fontsize=16):\n \"\"\"Plot posteriors for multiiple epoch chains\n\n parameters\n ----------\n master_cc : ChainConsumer\n Contains the multi-epoch chain, created with setup_master_chainconsumer()\n source : str\n version : int\n display : bool (optional)\n save : bool (optional)\n col_wrap : int (optional)\n \"\"\"\n param_order = {\n 'grid5': ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'm_nw',\n 'm_gr', 'd_b', 'xi_ratio'],\n 'he2': ['mdot1', 'mdot2', 'qb1', 'qb2', 'm_gr', 'd_b', 'xi_ratio'],\n }\n\n param_keys = param_order[source]\n\n # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n # quick and dirty patch!\n if alt_params:\n param_keys = ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'g',\n 'M', 'd_b', 'xi_ratio']\n # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\n formatted_params = plot_tools.convert_mcmc_labels(param_keys, unit_labels=unit_labels)\n n_epochs = len(master_cc.chains) - 1\n\n if col_wrap is None:\n col_wrap = n_epochs\n\n height = 3 * ceil(len(param_keys) / n_epochs)\n fig = master_cc.plotter.plot_distributions(parameters=formatted_params,\n col_wrap=col_wrap,\n figsize=[8, height],\n display=False)\n if add_text:\n add_epoch_text(fig, fontsize=fontsize)\n\n plt.tight_layout()\n\n save_plot(fig, prefix='multi_posteriors', save=save, source=source, version=version,\n display=display, enforce_chain_info=False)\n return fig\n\n\ndef plot_max_lhood(source, version, n_walkers, n_steps, verbose=True, re_interp=False,\n display=True, save=False):\n default_plt_options()\n max_params, max_lhood = mcmc_tools.get_max_lhood_params(source, version=version,\n n_walkers=n_walkers,\n n_steps=n_steps,\n verbose=verbose,\n return_lhood=True)\n bfit = burstfit.BurstFit(source=source, version=version, verbose=False, re_interp=re_interp)\n lhood, fig = bfit.lhood(max_params, plot=True)\n\n if lhood != max_lhood:\n print_warning(f'lhoods do not match (original={max_lhood:.2f}, current={lhood:.2f}). '\n + 'BurstFit (e.g. lhood, lnhood) or interpolator may have changed')\n\n save_plot(fig, prefix='compare', n_dimensions=len(max_params),\n n_walkers=n_walkers, n_steps=n_steps, save=save, source=source,\n version=version, display=display)\n\n\ndef plot_bprop_sample(bp_sample, source, version, bprops=None, legend=True,\n subplot_figsize=(3, 2.5), bfit=None, fontsize=14,\n vlines=True):\n \"\"\"Plot burst properties from large sample against observations\n\n bprop_sample : np.array\n obtained using mcmc_tools.bprop_sample()\n \"\"\"\n if bfit is None:\n bfit = burstfit.BurstFit(source=source, version=version, verbose=False)\n\n if bprops is None:\n bprops = bfit.mcmc_version.bprops\n\n cc = mcmc_tools.setup_bprop_chainconsumer(chain=None, n=None, discard=None,\n source=source, version=version,\n bp_sample=bp_sample)\n bp_summary = mcmc_tools.extract_bprop_summary(cc, source=source, version=version)\n\n n_bprops = len(bprops)\n n_rows = int(np.ceil(n_bprops / 2))\n n_cols = {False: 1, True: 2}.get(n_bprops > 1)\n\n figsize = (n_cols * subplot_figsize[0], n_rows * subplot_figsize[1])\n fig, ax = plt.subplots(n_rows, n_cols, sharex=False, figsize=figsize)\n\n if n_bprops % 2 == 1 and n_bprops > 1: # blank odd-numbered subplot\n ax[-1, -1].axis('off')\n\n for i, bprop in enumerate(bprops):\n subplot_row = int(np.floor(i / 2))\n subplot_col = i % 2\n if n_cols > 1:\n axis = ax[subplot_row, subplot_col]\n else:\n axis = ax\n u_model = np.diff(bp_summary[:, :, i], axis=0)\n bfit.plot_compare(model=bp_summary[1, :, i], u_model=u_model,\n bprop=bprop, fontsize=fontsize,\n ax=axis, display=False, vlines=vlines,\n legend=True if (i == 0 and legend) else False,\n xlabel=True if (i in [n_bprops-1, ]) else False)\n\n fig.subplots_adjust(wspace=0.4)\n plt.show(block=False)\n return fig\n\n\ndef plot_autocorrelation(chain, source, version, n_points=10, load=True, save_tau=True,\n ylims=None):\n \"\"\"Plots estimated integrated autocorrelation time\n\n Note: Adapted from https://dfm.io/posts/autocorr/\n \"\"\"\n mv = mcmc_versions.McmcVersion(source=source, version=version)\n params_fmt = plot_tools.convert_mcmc_labels(mv.param_keys)\n\n if load:\n sample_steps, autoc = mcmc_tools.load_autocorrelation(source, version=version,\n n_steps=chain.shape[1])\n else:\n sample_steps, autoc = mcmc_tools.get_autocorrelation(chain, source=source,\n version=version,\n n_points=n_points,\n save=save_tau)\n fig, ax = plt.subplots()\n\n for i, param in enumerate(mv.param_keys):\n ax.loglog(sample_steps, autoc[i], \"o-\", label=rf\"{params_fmt[i]}\")\n\n ax.plot(sample_steps, sample_steps / 10.0, \"--k\", label=r\"$\\tau = N/10$\")\n\n if ylims is None:\n xlim = ax.get_xlim()\n ylims = [5, xlim[1] / 10]\n\n ax.set_ylim(ylims)\n ax.set_xlabel(\"N steps\")\n ax.set_ylabel(r\"$\\tau$ estimate (N)\")\n ax.legend(fontsize=14, ncol=2, labelspacing=0.3)\n plt.show(block=False)\n\n return fig\n\n\ndef add_epoch_text(fig, fontsize, epochs=(1998, 2000, 2007),\n colours=('C0', 'C2', 'C3')):\n \"\"\"Adds text of epoch to figure subplots\n \"\"\"\n for i, epoch in enumerate(epochs):\n ax = fig.axes[i]\n ax.text(0.95, 0.95, str(epoch), color=colours[i], fontsize=fontsize,\n transform=ax.transAxes, va='top', ha='right')\n",
"import numpy as np\nfrom astropy import units\n\nfrom pyburst.grids import grid_analyser\nfrom pyburst.physics import gravity\n\n\ndef add_alpha(kgrid):\n \"\"\"Adds alpha column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n add_redshift(kgrid)\n add_phi(kgrid)\n add_lum_acc(kgrid)\n add_acc_energy(kgrid)\n\n summ = kgrid.summ\n kgrid.summ['alpha'] = summ.acc_energy / summ.fluence\n kgrid.summ['u_alpha'] = summ.alpha * np.sqrt((summ.u_acc_energy / summ.acc_energy)**2\n + (summ.u_fluence / summ.fluence)**2)\n\n\ndef add_lum_acc(kgrid):\n \"\"\"Adds accretion luminosity column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n mdot_edd = 1.75e-8 # M_sun / yr\n msunyr_to_gramsec = (units.M_sun / units.year).to(units.g / units.s)\n check_column(kgrid.params, column='phi', label='params', remedy='add_phi()')\n\n mdot = kgrid.params.accrate * mdot_edd * msunyr_to_gramsec\n lum_acc = -mdot * kgrid.params.phi\n kgrid.params['lum_acc'] = lum_acc\n\n\ndef add_acc_energy(kgrid):\n \"\"\"Adds accretion energy column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n check_column(kgrid.params, column='lum_acc', label='params', remedy='add_lum_acc()')\n kgrid.summ['acc_energy'] = kgrid.params.lum_acc * kgrid.summ.dt\n kgrid.summ['u_acc_energy'] = kgrid.params.lum_acc * kgrid.summ.u_dt\n\n\ndef add_redshift(kgrid, m_ratio=1.0):\n \"\"\"Adds redshift (1+z) column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n m_ratio : flt (optional)\n mass ratio, M_gr / M_newton\n \"\"\"\n default_radius = 10\n\n if 'radius' not in kgrid.params.columns:\n print('Using default radius=10km')\n kgrid.params['radius'] = default_radius\n\n radii = np.array(kgrid.params.radius)\n masses = np.array(kgrid.params.mass)\n\n r_ratios, redshifts = gravity.gr_corrections(r=radii, m=masses, phi=m_ratio)\n kgrid.params['radius_gr'] = radii * r_ratios\n kgrid.params['mass_gr'] = masses * m_ratio\n kgrid.params['redshift'] = redshifts\n\n\ndef add_phi(kgrid):\n \"\"\"Adds phi (gravitational potential) column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n check_column(kgrid.params, column='redshift', label='params', remedy='add_redshift()')\n\n phi = gravity.get_potential_gr(redshift=kgrid.params.redshift)\n kgrid.params['phi'] = phi\n\n\ndef check_column(table, column, label, remedy):\n \"\"\"Checks if column exists in table\n\n parameters\n ----------\n table : pd.DataFrame\n table to check for columns\n column : str\n name of column to check for\n label : str\n name of table\n remedy : str\n suggested function to use\n \"\"\"\n if column not in table.columns:\n raise ValueError(f'No {column} column in kgrid.{label}, try using {remedy}')\n",
"import numpy as np\nimport pandas as pd\nimport os\nimport sys\nfrom scipy.interpolate import interp1d\n\n# kepler\ntry:\n import kepdump\nexcept ModuleNotFoundError:\n print('Kepler python module \"kepdump\" not found. Some functionality disabled.')\n\n# kepler_grids\nfrom pyburst.grids import grid_strings\nfrom pyburst.misc.pyprint import printv\n\nGRIDS_PATH = os.environ['KEPLER_GRIDS']\nMODELS_PATH = os.environ['KEPLER_MODELS']\n\n\ndef load_dumps(run, batch, source, cycles=None, basename='xrb'):\n \"\"\"Returns dict of dumpfiles, in form {cycle: dump_object}\n \"\"\"\n dumpfiles = {}\n cycles = check_cycles(cycles=cycles, run=run, batch=batch, source=source)\n for i, cycle in enumerate(cycles):\n print_cycle_progress(cycle, cycles=cycles, i=i, prefix=f'Loading dumpfiles: ')\n dumpfiles[cycle] = load_dump(cycle, run=run, batch=batch, source=source,\n basename=basename, verbose=False)\n return dumpfiles\n\ndef load_dump(cycle, run, batch, source, basename='xrb',\n prefix='', verbose=False):\n filename = get_dump_filename(cycle, run, basename, prefix=prefix)\n model_path = grid_strings.get_model_path(run=run, batch=batch, source=source,\n basename=basename)\n filepath = os.path.join(model_path, filename)\n printv(f'Loading: {filepath}', verbose=verbose)\n return kepdump.load(filepath, graphical=False, silent=True)\n\n\ndef get_dump_filename(cycle, run, basename, prefix=''):\n return f'{prefix}{basename}{run}#{cycle}'\n\n\ndef extract_dump_table(run, batch, source, cycles=None, dumps=None, basename='xrb'):\n \"\"\"Returns pandas table of summary dump values\n\n cycles : [int] (optional)\n list of cycles to extract. If None, uses all available\n dumps : {cycle: dump_object} (optional)\n Pre-loaded dumpfiles\n \"\"\"\n cycles = check_cycles(cycles=cycles, run=run, batch=batch, source=source)\n if dumps is None:\n dumps = load_dumps(run, batch=batch, source=source, cycles=cycles,\n basename=basename)\n table = pd.DataFrame()\n table['cycle'] = cycles\n\n for row in table.itertuples():\n table.loc[row.Index, 'time'] = dumps[row.cycle].time\n return table\n\n\ndef dump_dict(dump):\n \"\"\"Returns dict of common profiles (radial quantities)\n \"\"\"\n return {'y': dump.y,\n 'tn': dump.tn,\n 'xkn': dump.xkn,\n 'abar': dump.abar,\n 'zbar': dump.zbar,\n }\n\n\ndef check_cycles(cycles, run, batch, source):\n \"\"\"Get available cycles if none provided\n \"\"\"\n if cycles is None:\n return get_cycles(run=run, batch=batch, source=source)\n else:\n return cycles\n\n\ndef get_cycles(run, batch, source):\n \"\"\"Returns list of dump cycles available for given model\n \"\"\"\n path = grid_strings.get_model_path(run, batch, source=source)\n file_list = os.listdir(path)\n\n cycles = []\n for file in file_list:\n if '#' in file:\n idx = file.find('#')\n cyc = file[idx+1:]\n if cyc == 'nstop':\n continue\n else:\n cycles += [int(cyc)]\n return np.sort(cycles)\n\n\ndef get_cycle_times(cycles, run, batch, source, basename='xrb', prefix=''):\n \"\"\"Returns array of timestep values (s) for given cycles\n \"\"\"\n times = np.zeros(len(cycles))\n for i, cycle in enumerate(cycles):\n print_cycle_progress(cycle=cycle, cycles=cycles,\n i=i, prefix='Getting cycle times: ')\n dump = load_dump(cycle, run=run, batch=batch, source=source,\n basename=basename, prefix=prefix)\n times[i] = dump.time\n return times\n\n\ndef extract_temps(run, batch, source, depths, cycles=None, basename='xrb'):\n \"\"\"Extracts temperature versus time from mode dumps (at given depth)\n Returns as [t (s), T_1 .. T_n (K)], where n=len(depths)\n\n cycles : [int] (optional)\n specifiy which dump cycles to load. If None, load all available\n depths : array\n column depth(s) (g/cm^2) at which to extract temperatures\n \"\"\"\n if cycles is None:\n cycles = get_cycles(run, batch, source)\n\n temps = np.zeros((len(cycles), 1+len(depths)))\n for i, cycle in enumerate(cycles):\n print_cycle_progress(cycle, cycles, i, prefix='Extracting temperature: ')\n dump = load_dump(cycle, run=run, batch=batch, source=source, basename=basename)\n depth_temps = get_depth_temps(dump=dump, depths=depths)\n temps[i] = np.concatenate(([dump.time], depth_temps))\n return temps\n\n\ndef get_depth_temps(dump, depths):\n \"\"\"Returns temperature at given depth(s) (g/cm^2)\n \"\"\"\n linear = interp_temp(dump)\n return linear(depths)\n\n\ndef get_substrate_zone(dump):\n \"\"\"Returns column depth (g/cm^2) of transition to substrate\n \"\"\"\n mass_coord = dump.ymb\n substrate_mass = dump.parm('bmasslow')\n idx = np.searchsorted(np.sort(mass_coord), substrate_mass)\n return len(mass_coord) - idx\n\ndef interp_temp(dump, i0=1, i2=-2):\n \"\"\"Returns a linear interpolation function for given temperature profile\n \"\"\"\n return interp1d(dump.y[i0:i2], dump.tn[i0:i2])\n\n\ndef print_cycle_progress(cycle, cycles, i, prefix=''):\n sys.stdout.write(f'\\r{prefix}cycle {cycle}/{cycles[-1]} '\n f'({(i+1) / len(cycles) * 100:.1f}%)')\n if cycle == cycles[-1]:\n sys.stdout.write('\\n')\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.ceil",
"numpy.diff",
"matplotlib.pyplot.close",
"numpy.column_stack",
"numpy.floor",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show"
],
[
"numpy.array",
"numpy.sqrt"
],
[
"numpy.concatenate",
"numpy.sort",
"scipy.interpolate.interp1d",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
vishalbelsare/pyjanitor | [
"9c5ff2c4ad5969ee4bc683ba82010b55b55fd2bb"
] | [
"tests/functions/test_process_text.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\n\[email protected]\ndef process_test_df():\n \"Base DataFrame\"\n return pd.DataFrame(\n {\"text\": [\"a_b_c\", \"c_d_e\", np.nan, \"f_g_h\"], \"numbers\": range(1, 5)}\n )\n\n\[email protected]\ndef test_returns_dataframe():\n \"Base DataFrame\"\n return pd.DataFrame(\n {\"text\": [\"a1a2\", \"b1\", \"c1\"], \"numbers\": [1, 2, 3]},\n index=[\"A\", \"B\", \"C\"],\n )\n\n\ndef test_column_name_type(process_test_df):\n \"\"\"Raise TypeError if `column_name` type is not `str`.\"\"\"\n with pytest.raises(TypeError):\n process_test_df.process_text([\"text\"])\n\n\[email protected](reason=\"new_column_names is deprecated.\")\ndef test_new_column_names_type(process_test_df):\n \"\"\"Raise TypeError if `new_column_names` type is not string or list.\"\"\"\n with pytest.raises(TypeError):\n process_test_df.process_text(\n column_name=\"text\", new_column_names={\"nutext\": \"rar\"}\n )\n\n\ndef test_column_name_presence(process_test_df):\n \"\"\"Raise ValueError if `column_name` is not in dataframe.\"\"\"\n with pytest.raises(ValueError):\n process_test_df.process_text(\n column_name=\"Test\", string_function=\"lower\"\n )\n\n\[email protected](reason=\"new_column_names is deprecated.\")\ndef test_new_column_names_presence_str(test_returns_dataframe):\n \"\"\"\n Raise ValueError if `new_column_names` is a str\n and is in the dataframe.\n \"\"\"\n with pytest.raises(ValueError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n new_column_names=\"text\",\n string_function=\"extractall\",\n pat=r\"([ab])?(\\d)\",\n )\n\n\[email protected](reason=\"new_column_names is deprecated.\")\ndef test_new_column_names_presence_list(test_returns_dataframe):\n \"\"\"\n Raise ValueError if `new_column_names` is a list and at least\n one of the new names is in the dataframe.\n \"\"\"\n with pytest.raises(ValueError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n new_column_names=[\"numbers\", \"newtext\"],\n string_function=\"extractall\",\n pat=r\"([ab])?(\\d)\",\n )\n\n\[email protected](reason=\"merge_frame is deprecated.\")\ndef test_merge_frame_type(test_returns_dataframe):\n \"\"\"\n Raise TypeError if `merge_frame` type is not bool.\"\"\"\n with pytest.raises(TypeError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n new_column_names=[\"number\", \"newtext\"],\n string_function=\"extractall\",\n pat=r\"([ab])?(\\d)\",\n merge_frame=\"True\",\n )\n\n\[email protected](reason=\"string_function must be present.\")\ndef test_string_function_is_None(process_test_df):\n \"\"\"Test that dataframe is returned if string_function is None.\"\"\"\n result = process_test_df.process_text(column_name=\"text\")\n assert_frame_equal(result, process_test_df)\n\n\ndef test_str_split(process_test_df):\n \"\"\"Test wrapper for Pandas `str.split()` method.\"\"\"\n\n expected = process_test_df.assign(\n text=process_test_df[\"text\"].str.split(\"_\")\n )\n\n result = process_test_df.process_text(\n column_name=\"text\", string_function=\"split\", pat=\"_\"\n )\n\n assert_frame_equal(result, expected)\n\n\[email protected](reason=\"new_column_names is deprecated.\")\ndef test_new_column_names(process_test_df):\n \"\"\"\n Test that a new column name is created when\n `new_column_name` is not None.\n \"\"\"\n result = process_test_df.process_text(\n column_name=\"text\",\n new_column_names=\"new_text\",\n string_function=\"slice\",\n start=2,\n )\n expected = process_test_df.assign(\n new_text=process_test_df[\"text\"].str.slice(start=2)\n )\n assert_frame_equal(result, expected)\n\n\[email protected]\ndef no_nulls_df():\n return pd.DataFrame({\"text\": [\"a\", \"b\", \"c\", \"d\"], \"numbers\": range(1, 5)})\n\n\ndef test_str_cat(no_nulls_df):\n \"\"\"Test outcome for Pandas `.str.cat()` method.\"\"\"\n\n result = no_nulls_df.process_text(\n column_name=\"text\",\n string_function=\"cat\",\n others=[\"A\", \"B\", \"C\", \"D\"],\n )\n\n expected = no_nulls_df.assign(\n text=no_nulls_df[\"text\"].str.cat(others=[\"A\", \"B\", \"C\", \"D\"])\n )\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_cat_result_is_a_string(no_nulls_df):\n \"\"\"\n Test wrapper for Pandas `.str.cat()` method\n when the outcome is a string.\n \"\"\"\n\n result = no_nulls_df.process_text(\n column_name=\"text\",\n string_function=\"cat\",\n )\n\n expected = no_nulls_df.assign(text=no_nulls_df[\"text\"].str.cat())\n\n assert_frame_equal(result, expected)\n\n\[email protected](reason=\"new_column_names is deprecated.\")\ndef test_str_cat_result_is_a_string_and_new_column_names(no_nulls_df):\n \"\"\"\n Test wrapper for Pandas `.str.cat()` method when the outcome is a string,\n and `new_column_names` is not None.\n \"\"\"\n\n result = no_nulls_df.process_text(\n column_name=\"text\", string_function=\"cat\", new_column_names=\"combined\"\n )\n\n expected = no_nulls_df.assign(combined=no_nulls_df[\"text\"].str.cat())\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_get():\n \"\"\"Test outcome for Pandas `.str.get()` method.\"\"\"\n\n df = pd.DataFrame(\n {\"text\": [\"aA\", \"bB\", \"cC\", \"dD\"], \"numbers\": range(1, 5)}\n )\n\n expected = df.assign(text=df[\"text\"].str.get(1))\n\n result = df.process_text(column_name=\"text\", string_function=\"get\", i=-1)\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_lower():\n \"\"\"Test string conversion to lowercase using `.str.lower()`.\"\"\"\n\n df = pd.DataFrame(\n {\n \"codes\": range(1, 7),\n \"names\": [\n \"Graham Chapman\",\n \"John Cleese\",\n \"Terry Gilliam\",\n \"Eric Idle\",\n \"Terry Jones\",\n \"Michael Palin\",\n ],\n }\n )\n\n expected = df.assign(names=df[\"names\"].str.lower())\n\n result = df.process_text(column_name=\"names\", string_function=\"lower\")\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_wrong(process_test_df):\n \"\"\"Test that an invalid Pandas string method raises an exception.\"\"\"\n with pytest.raises(KeyError):\n process_test_df.process_text(\n column_name=\"text\", string_function=\"invalid_function\"\n )\n\n\ndef test_str_wrong_parameters(process_test_df):\n \"\"\"Test that invalid argument for Pandas string method raises an error.\"\"\"\n with pytest.raises(TypeError):\n process_test_df.process_text(\n column_name=\"text\", string_function=\"split\", pattern=\"_\"\n )\n\n\[email protected]\ndef returns_frame_1():\n return pd.DataFrame(\n {\n \"ticker\": [\n \"spx 5/25/2001 p500\",\n \"spx 5/25/2001 p600\",\n \"spx 5/25/2001 p700\",\n ]\n }\n )\n\n\[email protected](reason=\"merge_frame is deprecated.\")\ndef test_return_dataframe_merge_is_None(returns_frame_1):\n \"\"\"\n Test that the dataframe returned when `merge_frame` is None\n is the result of the text processing, and is not merged to\n the original dataframe.\n \"\"\"\n\n expected_output = returns_frame_1[\"ticker\"].str.split(\" \", expand=True)\n result = returns_frame_1.process_text(\n column_name=\"ticker\", string_function=\"split\", expand=True, pat=\" \"\n )\n assert_frame_equal(result, expected_output)\n\n\[email protected](reason=\"merge_frame is deprecated.\")\ndef test_return_dataframe_merge_is_not_None(returns_frame_1):\n \"\"\"\n Test that the dataframe returned when `merge_frame` is not None\n is a merger of the original dataframe, and the dataframe\n generated from the text processing.\n \"\"\"\n expected_output = pd.concat(\n [\n returns_frame_1,\n returns_frame_1[\"ticker\"]\n .str.split(\" \", expand=True)\n .add_prefix(\"new_\"),\n ],\n axis=\"columns\",\n )\n result = returns_frame_1.process_text(\n column_name=\"ticker\",\n new_column_names=\"new_\",\n merge_frame=True,\n string_function=\"split\",\n expand=True,\n pat=\" \",\n )\n assert_frame_equal(result, expected_output)\n\n\[email protected](reason=\"merge_frame is deprecated.\")\ndef test_return_dataframe_merge_is_not_None_new_column_names_is_a_list(\n returns_frame_1,\n):\n \"\"\"\n Test that the dataframe returned when `merge_frame` is not None\n is a merger of the original dataframe, and the dataframe\n generated from the text processing. Also, the `new_column_names`\n is a list.\n \"\"\"\n\n expected_output = pd.concat(\n [\n returns_frame_1,\n returns_frame_1[\"ticker\"]\n .str.split(\" \", expand=True)\n .set_axis([\"header1\", \"header2\", \"header3\"], axis=\"columns\"),\n ],\n axis=\"columns\",\n )\n result = returns_frame_1.process_text(\n column_name=\"ticker\",\n new_column_names=[\"header1\", \"header2\", \"header3\"],\n merge_frame=True,\n string_function=\"split\",\n expand=True,\n pat=\" \",\n )\n assert_frame_equal(result, expected_output)\n\n\[email protected](reason=\"new_column_names is deprecated.\")\ndef test_return_dataframe_new_column_names_is_a_list_len_unequal(\n returns_frame_1,\n):\n \"\"\"\n Raise error if text processing returns a dataframe,\n `new_column_names` is not None, and the length of\n `new_column_names` is not equal to the length of the\n new dataframe's columns.\n \"\"\"\n\n with pytest.raises(ValueError):\n returns_frame_1.process_text(\n column_name=\"ticker\",\n new_column_names=[\"header1\", \"header2\"],\n merge_frame=True,\n string_function=\"split\",\n expand=True,\n pat=\" \",\n )\n\n\ndef test_output_extractall(test_returns_dataframe):\n \"\"\"\n Raise ValueError if the output is a dataframe.\n \"\"\"\n with pytest.raises(ValueError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n string_function=\"extractall\",\n pat=r\"(?P<letter>[ab])?(?P<digit>\\d)\",\n )\n\n\[email protected](reason=\"merge_frame is deprecated.\")\ndef test_output_extractall_merge_frame_is_not_None(test_returns_dataframe):\n \"\"\"\n Test output when `string_function` is \"extractall\"\n and `merge_frame` is not None.\n \"\"\"\n expected_output = test_returns_dataframe[\"text\"].str.extractall(\n r\"(?P<letter>[ab])?(?P<digit>\\d)\"\n )\n expected_output = test_returns_dataframe.join(\n expected_output.reset_index(\"match\"), how=\"outer\"\n ).set_index(\"match\", append=True)\n result = test_returns_dataframe.process_text(\n column_name=\"text\",\n merge_frame=True,\n string_function=\"extractall\",\n pat=r\"(?P<letter>[ab])?(?P<digit>\\d)\",\n )\n assert_frame_equal(result, expected_output)\n"
] | [
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
docileninja/Calvin-and-Hobbes-Viewer | [
"74ff2e090e040517e2b445432a5ea6c0f87df49b"
] | [
"comic_classify/comic_detect.py"
] | [
"from sklearn import svm, cluster\nfrom PIL import Image, ImageDraw\nimport os\nimport sys\nimport random\n\n\n\ndef load_images(dirname):\n\timages = []\n\tfor image_name in os.listdir(dirname):\n\t\tif image_name.startswith('.'):\n\t\t\tcontinue\n\t\timage = Image.open(dirname + '/' + image_name).convert('1')\n\t\tx, y = image.size\n\t\timage = image.resize((x, 280), Image.ANTIALIAS)\n\t\tdata = [0 if pixel == 0 else 1 for pixel in image.getdata()]\n\t\timages.append(data)\n\treturn images\n\nmin_len = 10000000\ndef normalize(X):\n\tglobal min_len\n\tmin_len = min(min_len, min(len(x) for x in X))\n\treturn [x[:min_len] for x in X]\n\ndef crossvalidate(edges, nonedges):\n\trandom.shuffle(edges)\n\trandom.shuffle(nonedges)\n\ttrain_edge_len, train_nonedge_len = len(edges) * 7 // 10, len(nonedges) * 7 // 10\n\tcross_edge_len, cross_nonedge_len = len(edges) - train_edge_len, len(nonedges) - train_nonedge_len\n\n\tX_train = normalize(nonedges[:train_nonedge_len] + \n\t\t\t\t\t\tedges[:train_edge_len])\n\ty_train = [0] * train_nonedge_len + [1] * train_edge_len\n\n\tX_cross = normalize(nonedges[train_nonedge_len:] + \n\t\t\t\t\t\tedges[train_edge_len:])\n\ty_cross = [0] * cross_nonedge_len + [1] * cross_edge_len\n\n\tclf = svm.SVC(gamma=.001, C=100.)\n\tclf.fit(X_train, y_train)\n\tprint(\"prediction: {}\".format(list(clf.predict(X_cross))))\n\tprint(\"actuallity: {}\".format(y_cross))\n\tprint(clf.score(X_cross, y_cross))\n\ndef get_column(img, i):\n\tw, h = img.size\n\tcolumn = []\n\tfor j in range(h):\n\t\t\tcolumn.append(0 if img.getpixel((i, j)) == 0 else 1)\n\treturn column\n\ndef search_picture(clf, image_name):\n\timage = Image.open(image_name).convert('1')\n\tx, y = image.size\n\timage = image.resize((x, 280), Image.ANTIALIAS)\n\tw, h = image.size\n\n\tcolumns = [get_column(image, i) for i in range(25)]\n\tdatas = []\n\tfor i in range(25, w):\n\t\tcolumns = columns[1:] + [get_column(image, i)]\n\t\tdata = [columns[i][j] for j in range(len(columns[0])) for i in range(len(columns))]\n\t\tdatas.append(data)\n\tdatas = normalize(datas)\n\tmatches = [[i] for i, m in enumerate(clf.predict(datas)) if m == 1]\n\tif len(matches) == 0:\n\t\treturn [], matches\n\tclst = cluster.DBSCAN(eps=20, min_samples=1)\n\tclst.fit(matches)\n\ttrimmed = [idx for idx in clst.components_ if idx > w // 6 and idx < w * 5 // 6]\n\tclst = cluster.KMeans(3, init='k-means++')\n\tclst.fit(trimmed)\n\tseps = list(sorted([int(v[0]) + 25//2 for v in clst.cluster_centers_]))\n\tfinal_seps = []\n\tfor start, end in zip(seps, seps[1:]):\n\t\tif (end - start) > w // 6:\n\t\t\tfinal_seps.append(start)\n\tfinal_seps.append(seps[-1])\n\treturn final_seps, matches\n\ndef train(edges, nonedges):\n\tclf = svm.SVC(gamma=.001, C=100.)\n\tX = normalize(nonedges + edges)\n\ty = [0] * len(nonedges) + [1] * len(edges)\n\tclf.fit(X, y)\n\treturn clf\n\n\ndef main(edge_dir, non_edge_dir):\n\tedges = load_images(edge_dir)\n\tnonedges = load_images(non_edge_dir)\n\n\tcrossvalidate(edges, nonedges)\n\n\tclf = train(edges, nonedges)\n\n\tfor comic in os.listdir('test'):\n\t\tprint(comic)\n\t\tpanels, matches = search_picture(clf, 'test/' + comic)\n\t\tprint(\"\\tpanels: {}\".format(panels))\n\t\timage = Image.open('test/' + comic).convert('RGBA')\n\t\tdraw = ImageDraw.Draw(image)\n\t\tw, h = image.size\n\t\tfor match in matches:\n\t\t\tmatch = match[0]\n\t\t\tdraw.line((match, 0) + (match, h), fill=(0,0,255,0))\n\t\tfor sep in panels:\n\t\t\tdraw.line((sep, 0) + (sep, h), fill=(255,0,0), width=3)\n\t\timage.show()\n\n\treturn clf\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 3:\n\t\tprint('Usage: {} <edges-dir> <non-edges-dir>'.format(sys.argv[0]))\n\t\tsys.exit(1)\n\tedge_dir = sys.argv[1]\n\tnon_edge_dir = sys.argv[2]\n\tmain(edge_dir, non_edge_dir)\n\t"
] | [
[
"sklearn.cluster.DBSCAN",
"sklearn.cluster.KMeans",
"sklearn.svm.SVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UKPLab/cdcr-beyond-corpus-tailored | [
"52bf98692c7464f25628baea24addd1a988f9a1f",
"52bf98692c7464f25628baea24addd1a988f9a1f",
"52bf98692c7464f25628baea24addd1a988f9a1f"
] | [
"python/handwritten_baseline/pipeline/data/loader/ecb_reader_utils.py",
"python/handwritten_baseline/pipeline/model/feature_extr/tfidf.py",
"python/handwritten_baseline/pipeline/model/feature_extr/embedding_distance/sentence_embedding_distance.py"
] | [
"import logging\nimport os\nimport re\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\nfrom typing import Any, Tuple, Optional\n\nimport pandas as pd\n\nfrom python import TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX, TOKEN_IDX_TO, \\\n TOKEN_IDX_FROM, TOKEN, MENTION_ID, EVENT, MENTION_TYPE, DESCRIPTION, MENTION_TYPES_ACTION\n\nlogger = logging.getLogger()\n\n\ndef read_xml(xml_path) -> Tuple[Any, Any, Any, Any, Any]:\n tree = ET.parse(xml_path)\n\n # 1: read document info\n root = tree.getroot()\n assert root.tag == \"Document\"\n doc_filename = root.attrib[\"doc_name\"]\n doc_id = root.attrib[\"doc_id\"]\n m = re.match(r\"(?P<topic_id>\\d+)_(?P<document_number>\\d+)(?P<subtopic>\\w+)\\.xml\", doc_filename)\n\n topic_id = m.group(\"topic_id\")\n subtopic = m.group(\"subtopic\")\n document_number = int(m.group(\"document_number\"))\n\n documents_index = pd.MultiIndex.from_tuples([(topic_id, subtopic, doc_id)],\n names=[TOPIC_ID, SUBTOPIC, DOCUMENT_ID])\n documents = pd.DataFrame({DOCUMENT_ID: pd.Series(doc_id, index=documents_index),\n DOCUMENT_NUMBER: pd.Series(document_number, index=documents_index)})\n\n # 2: read document content\n contents_rows = []\n contents_index = []\n for token_elmt in root.iter(\"token\"):\n # index content\n sentence_idx = int(token_elmt.attrib[\"sentence\"])\n token_idx = int(token_elmt.attrib[\"number\"])\n contents_index.append((doc_id, sentence_idx, token_idx))\n\n # content\n token = token_elmt.text\n contents_rows.append({TOKEN: token})\n contents_index = pd.MultiIndex.from_tuples(contents_index, names=[DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX])\n contents = pd.DataFrame(contents_rows, index=contents_index)\n\n # 3: read markables / mentions and entity/event descriptions\n mentions_rows = []\n mentions_index = []\n entities_events = []\n for markable in root.find(\"Markables\").getchildren():\n # Don't know what this is, skip it\n if markable.tag == \"UNKNOWN_INSTANCE_TAG\":\n continue\n\n mention_id = int(markable.attrib[\"m_id\"])\n\n # there are markables without spans, these are descriptions of entities / events which we want to keep\n if \"TAG_DESCRIPTOR\" in markable.attrib.keys():\n if \"instance_id\" in markable.attrib.keys():\n entities_events.append({\n EVENT: markable.attrib[\"instance_id\"],\n DESCRIPTION: markable.attrib[\"TAG_DESCRIPTOR\"]\n })\n continue\n\n token_ids = [int(anchor.attrib[\"t_id\"]) for anchor in markable.iter(\"token_anchor\")]\n token_ids_from, token_ids_to = min(token_ids), max(token_ids)\n\n # the token_ids are cumulative token indexes, remove their cumulative nature\n token_indexes = contents.index.get_level_values(TOKEN_IDX).values\n token_idx_from = token_indexes[\n token_ids_from - 1] # -1 because token_ids start at 1, so we need to access index 0 in the dataframe to find t_id 1\n token_idx_to = token_indexes[\n token_ids_to - 1] + 1 # additionally +1 here because we want mention spans represented as intervals [from, to[\n\n sentence_idx = contents.index.get_level_values(SENTENCE_IDX).values[token_ids_from - 1]\n\n # resolve non-contiguous mentions\n is_non_contiguous_mention = len(token_ids) < token_idx_from - token_idx_to\n if is_non_contiguous_mention:\n logger.info(\"Converted non-contiguous mention to contiguous mention.\")\n\n mentions_index.append((doc_id, mention_id))\n mentions_rows.append({SENTENCE_IDX: sentence_idx,\n TOKEN_IDX_FROM: token_idx_from,\n TOKEN_IDX_TO: token_idx_to,\n MENTION_TYPE: markable.tag})\n mentions_index = pd.MultiIndex.from_tuples(mentions_index, names=[DOCUMENT_ID, MENTION_ID])\n mentions = pd.DataFrame(mentions_rows, index=mentions_index)\n entities_events = pd.DataFrame(entities_events).set_index(EVENT)\n\n # 4. read relations (clusters)\n clusters_rows = []\n for relation in root.find(\"Relations\").getchildren():\n tags_of_interest = [\"CROSS_DOC_COREF\", \"INTRA_DOC_COREF\"]\n if not relation.tag in tags_of_interest:\n logger.info(\"Unexpected tag \" + relation.tag)\n raise NotImplementedError\n\n # There are relations with tags INTRA_DOC_COREF and CROSS_DOC_COREF. The cross-doc ones have a \"note\" attribute.\n if \"note\" in relation.attrib:\n # this is the case for CROSS_DOC_COREF tags\n relation_id = relation.attrib[\"note\"]\n else:\n # this is the case for INTRA_DOC_COREF tags\n relation_id = doc_id + \"_\" + relation.attrib[\"r_id\"]\n\n for mention in relation.iter(\"source\"):\n mention_id = int(mention.attrib[\"m_id\"])\n clusters_rows.append({EVENT: relation_id, DOCUMENT_ID: doc_id, MENTION_ID: mention_id})\n clusters = pd.DataFrame(clusters_rows)\n\n # 5. create relations for singletons\n # In ECB plus, there are ACTION_OCCURRENCE markables which are not assigned to a relation. These are singletons. We\n # add one entry for each singleton to `clusters` to ensure consistency. Note that the opposite also exists:\n # singleton mentions which are marked as participating in a cross-doc coref relation, but there is no second\n # mention for this relation.\n if clusters.empty:\n singletons = mentions.index.to_frame().reset_index(drop=True)\n else:\n # This can most likely be done in a nicer way using some index difference...\n outer = pd.merge(mentions, clusters, left_index=True, right_on=[DOCUMENT_ID, MENTION_ID], how=\"outer\")\n singletons = outer.loc[outer[EVENT].isna(), [DOCUMENT_ID, MENTION_ID]]\n singletons[EVENT] = \"SINGLETON_\" + singletons.astype(str).apply(\"_\".join, axis=1)\n clusters = clusters.append(singletons, sort=False).reset_index(drop=True)\n\n return documents, contents, mentions, clusters, entities_events\n\n\ndef read_split_data(root: Path, sentence_filter_csv: Optional[Path]):\n documents = []\n contents = []\n mentions = []\n clusters = []\n entities_events = []\n\n # enumerate files\n for root, dirs, files in os.walk(str(root.absolute())):\n for file in files:\n path = os.path.abspath(os.path.join(root, file))\n f_documents, f_contents, f_mentions, f_clusters, f_entities_events = read_xml(path)\n\n documents.append(f_documents)\n contents.append(f_contents)\n mentions.append(f_mentions)\n clusters.append(f_clusters)\n entities_events.append(f_entities_events)\n\n documents = pd.concat(documents).sort_index()\n contents = pd.concat(contents).sort_index()\n mentions = pd.concat(mentions).sort_index()\n clusters = pd.concat(clusters, sort=False)\n entities_events = pd.concat(entities_events).sort_index()\n\n # assert that every mention participates only in one cluster -> meaning we can just add an 'EVENT' column to each mention\n assert clusters.duplicated(subset=[DOCUMENT_ID, MENTION_ID]).value_counts().get(True, 0) == 0\n\n clusters = clusters.set_index([DOCUMENT_ID, MENTION_ID])\n mentions = pd.merge(mentions, clusters, left_index=True, right_index=True).sort_index()\n\n # read file which tells us from which sentences we should keep event mentions\n if sentence_filter_csv is not None:\n sent_filter = pd.read_csv(sentence_filter_csv)\n doc_number_and_subtopic = sent_filter[\"File\"].str.split(\"ecb\", expand=True)\n doc_number_and_subtopic.columns = [DOCUMENT_NUMBER, SUBTOPIC]\n doc_number_and_subtopic[DOCUMENT_NUMBER] = doc_number_and_subtopic[DOCUMENT_NUMBER].astype(int)\n doc_number_and_subtopic[SUBTOPIC].replace({\"plus\": \"ecbplus\", \"\": \"ecb\"}, inplace=True)\n sent_filter = pd.concat([sent_filter.drop(columns=\"File\"), doc_number_and_subtopic], axis=1)\n sent_filter.rename(columns={\"Topic\": TOPIC_ID, \"Sentence Number\": SENTENCE_IDX}, inplace=True)\n sent_filter[TOPIC_ID] = sent_filter[TOPIC_ID].astype(str)\n sent_filter = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, SENTENCE_IDX]]\n\n # the sentence filter file applies to all splits, remove those topics that we don't have in the split we're loading\n topics_in_split = documents.index.get_level_values(TOPIC_ID).unique()\n sent_filter = sent_filter.loc[sent_filter[TOPIC_ID].isin(topics_in_split)].copy()\n\n # obtain doc-id from topic+subtopic+document number\n documents_with_doc_number_in_index = documents.set_index(DOCUMENT_NUMBER, append=True).reset_index(level=DOCUMENT_ID, drop=True).sort_index()\n sent_filter[DOCUMENT_ID] = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER]].apply(lambda row: documents_with_doc_number_in_index[DOCUMENT_ID].loc[tuple(row.values)], axis=1)\n\n all_mentions_to_keep = []\n for doc_id, df in mentions.groupby(DOCUMENT_ID):\n sentences_to_keep = sent_filter.loc[sent_filter[DOCUMENT_ID] == doc_id]\n\n # we only remove action phrases and leave the other mentions in place, so that we can potentially mask them for\n # analysis, see python.handwritten_baseline.pipeline.data.processing.masking.MentionMaskingStage\n is_official_evaluation_sentence = df[SENTENCE_IDX].isin(sentences_to_keep[SENTENCE_IDX])\n is_action_mention = df[MENTION_TYPE].isin(MENTION_TYPES_ACTION)\n mentions_to_keep = df.loc[is_official_evaluation_sentence | (~is_action_mention)]\n all_mentions_to_keep.append(mentions_to_keep)\n mentions = pd.concat(all_mentions_to_keep).sort_index()\n\n return documents, contents, mentions, entities_events",
"import pprint\nfrom typing import Optional, List, Tuple, Set, Any, Dict\n\nimport numpy as np\nfrom overrides import overrides\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom python import TOKEN, DOCUMENT_ID, SENTENCE_IDX\nfrom python.handwritten_baseline import LEMMA\nfrom python.handwritten_baseline.pipeline.data.base import Dataset\nfrom python.handwritten_baseline.pipeline.model.feature_extr import TFIDF_EXTR\nfrom python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin\nfrom python.handwritten_baseline.pipeline.model.feature_extr.util import batch_cosine_similarity\n\n\nclass TfidfFeatureExtractor(FeatureExtractorMixin):\n \"\"\"\n Computes the TF-IDF similarity between a mention pair. Three variants: (1) TF-IDF between sentence containing the\n mention, (2) TF-IDF between the extended sentence context of a mention and (3) TF-IDF between the full documents the\n mentions are coming from.\n \"\"\"\n\n def __init__(self,\n lowercase: bool,\n use_lemmas: bool,\n num_sentence_context: int,\n use_cache: bool,\n features_to_select: Optional[List[str]]):\n \"\"\"\n\n :param lowercase: apply lowercasing yes or no\n :param use_lemmas: use lemmas or surface forms\n :param num_sentence_context: number of sentences left and right which define the sentence context -> results in\n a window of 2*self._num_sentence_context + 1 sentences\n \"\"\"\n super(TfidfFeatureExtractor, self).__init__(TFIDF_EXTR, use_cache, features_to_select)\n\n self.lowercase = lowercase\n self.use_lemmas = use_lemmas\n self.num_sentence_context = num_sentence_context\n\n @staticmethod\n def get_tfidf_of_doc(doc_id: Any, dataset: Dataset, vectorizer_: TfidfVectorizer) -> np.array:\n tokens = dataset.tokens.loc[doc_id, TOKEN].values\n detokenized = \" \".join(tokens)\n return vectorizer_.transform([detokenized]).toarray()\n\n @staticmethod\n def get_tfidf_of_mention_sentence(idx: Tuple, dataset: Dataset, vectorizer_: TfidfVectorizer) -> np.array:\n doc_id, _ = idx\n sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]\n tokens = dataset.tokens.loc[(doc_id, sent_idx), TOKEN].values\n detokenized = \" \".join(tokens)\n return vectorizer_.transform([detokenized]).toarray()\n\n @staticmethod\n def get_tfidf_of_mention_context(idx: Tuple, dataset: Dataset, vectorizer_: TfidfVectorizer, num_sentence_context: int) -> np.array:\n doc_id, _ = idx\n sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]\n\n # determine how many preceding and following sentences there are for the mention sentence in this document\n document = dataset.tokens.loc[doc_id, TOKEN]\n sent_idx_start = max(sent_idx - num_sentence_context, 0)\n sent_idx_end = min(sent_idx + num_sentence_context,\n document.index.get_level_values(SENTENCE_IDX).max())\n\n tokens = document.loc[slice(sent_idx_start, sent_idx_end)].values\n detokenized = \" \".join(tokens)\n return vectorizer_.transform([detokenized]).toarray()\n\n @overrides\n def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):\n # TFIDF vectorization is an unsupervised transformation, therefore apply it in transform(), not in fit(). It\n # would not make much sense anyway to use a TF-IDF vectorizer trained on train and apply it on test.\n # The recommended way to handle pretokenized text according to the docs is to join with spaces and use\n # whitespace tokenization, see https://scikit-learn.org/stable/modules/feature_extraction.html#customizing-the-vectorizer-classes\n vectorizer_ = TfidfVectorizer(tokenizer=str.split, lowercase=self.lowercase, token_pattern=None, min_df=3, stop_words=\"english\")\n\n tokens_df = dataset.tokens\n tokens = tokens_df[LEMMA] if self.use_lemmas else tokens_df[TOKEN]\n\n docs = []\n for doc_id, df in tokens.groupby(DOCUMENT_ID):\n tokens = df.values.tolist()\n docs.append(\" \".join(tokens))\n vectorizer_.fit(docs)\n\n # precompute relevant information per document and mention\n unique_documents = {doc_id for doc_id, _ in unique_mentions}\n precomp_documents = {doc_id: self.get_tfidf_of_doc(doc_id, dataset, vectorizer_) for doc_id in unique_documents}\n\n precomp_surrounding_sentence = {}\n precomp_context = {}\n for mention_idx in unique_mentions:\n assert len(mention_idx) == 2 # (doc_id, mention_id)\n\n # features for the mention sentence: check if mentions were detected for both sentences\n surrounding_sentence = self.get_tfidf_of_mention_sentence(mention_idx, dataset, vectorizer_)\n context = self.get_tfidf_of_mention_context(mention_idx, dataset, vectorizer_, self.num_sentence_context)\n\n precomp_surrounding_sentence[mention_idx] = surrounding_sentence\n precomp_context[mention_idx] = context\n\n # compute cosine similarity between each pair of vectors to obtain features\n feature_columns = []\n for vectors, feature_desc in [(precomp_documents, \"document\"),\n (precomp_surrounding_sentence, \"sentence\"),\n (precomp_context, \"context\")]:\n if feature_desc == \"document\":\n pairs_transform = lambda tup: tup[0] # our document vectors map from doc-id to np.array\n else:\n pairs_transform = None\n\n feature_column = batch_cosine_similarity(pairs, vectors, pairs_transform=pairs_transform, desc=f\"{self.name} {feature_desc}\")\n feature_columns.append(feature_column)\n feature_matrix = np.hstack(feature_columns)\n return feature_matrix\n\n @overrides\n def _get_plain_names_of_all_features(self) -> List[str]:\n return [\"document-similarity\", \"surrounding-sentence-similarity\", \"context-similarity\"]\n\n @classmethod\n @overrides\n def from_params(cls, config: Dict):\n # Tested all four combinations in a small CV-experiment, this combination performed best by a small margin.\n lowercase = config.pop(\"lowercase\", True)\n use_lemmas = config.pop(\"use_lemmas\", False)\n\n num_sentence_context = config.pop(\"num_sentence_context\", 2)\n\n use_cache = config.pop(\"use_cache\", False)\n features_to_select = config.pop(\"features_to_select\", None)\n obj = TfidfFeatureExtractor(lowercase=lowercase,\n use_lemmas=use_lemmas,\n num_sentence_context=num_sentence_context,\n use_cache=use_cache,\n features_to_select=features_to_select)\n if config:\n raise ValueError(\"Leftover configuration: \" + pprint.pformat(config))\n return obj",
"import pprint\nfrom typing import Optional, List, Tuple, Set, Dict\n\nimport numpy as np\nfrom overrides import overrides\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import make_pipeline\n\nfrom python import SENTENCE_IDX\nfrom python.handwritten_baseline import SENTENCE_EMBEDDINGS\nfrom python.handwritten_baseline.pipeline.data.base import Dataset\nfrom python.handwritten_baseline.pipeline.model.feature_extr import SENTENCE_EMBEDDING_EXTR\nfrom python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin\nfrom python.handwritten_baseline.pipeline.model.feature_extr.embedding_distance import create_feature_names\nfrom python.handwritten_baseline.pipeline.model.feature_extr.util import batch_cosine_similarity\n\n\nclass SentenceEmbeddingDistanceFeatureExtractorPipelineCreator:\n\n @classmethod\n def from_params(cls, config: Dict):\n extractor = SentenceEmbeddingDistanceFeature.from_params(config)\n imputer = SimpleImputer(missing_values=np.nan, strategy=\"median\")\n return make_pipeline(extractor, imputer)\n\n\nSURROUNDING_SENTENCE = \"surrounding-sentence\"\nDOC_START = \"doc-start\"\n\n\nclass SentenceEmbeddingDistanceFeature(FeatureExtractorMixin):\n\n def __init__(self,\n use_cache: bool,\n features_to_select: Optional[List[str]]):\n super(SentenceEmbeddingDistanceFeature, self).__init__(SENTENCE_EMBEDDING_EXTR, use_cache, features_to_select)\n\n @overrides\n def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):\n # obtain embeddings\n assert dataset.has(SENTENCE_EMBEDDINGS)\n sentence_embeddings = dataset.get(SENTENCE_EMBEDDINGS) # type: Tuple[Dict[Tuple[str, int], int], np.array]\n embedding_index, embedding_mat = sentence_embeddings\n\n mentions_action = dataset.mentions_action\n\n # compute a mean embedding in case we need to pad somewhere\n mean_embedding = embedding_mat.mean(axis=0)\n\n # precompute embedding matrices for each action mention\n precomputed_sentence = {}\n precomputed_doc_start = {}\n for mention_idx in unique_mentions:\n assert len(mention_idx) == 2\n doc_id, mention_id = mention_idx\n\n # look up sentence embedding of the sentence containing the action mention\n sent_idx_of_action = mentions_action.loc[mention_idx, SENTENCE_IDX]\n surrounding_sent_embedding = embedding_mat[embedding_index[(doc_id, sent_idx_of_action)]]\n\n # for the document start, take n sentences from the start of the document and concatenate their embeddings\n NUM_SENTENCES_DOC_START = 3\n doc_start_sent_embeddings = []\n for i in range(NUM_SENTENCES_DOC_START):\n # there might be documents shorter than NUM_SENTENCES_DOC_START, therefore check: if there are not\n # enough sentences, pad with the mean embedding\n if (doc_id, i) in embedding_index:\n sent_embedding = embedding_mat[embedding_index[(doc_id, i)]]\n else:\n sent_embedding = mean_embedding\n doc_start_sent_embeddings.append(sent_embedding)\n doc_start_embedding = np.hstack(doc_start_sent_embeddings)\n\n precomputed_sentence[mention_idx] = surrounding_sent_embedding\n precomputed_doc_start[mention_idx] = doc_start_embedding\n\n feature_columns = []\n for vectors, feature_desc in [(precomputed_sentence, SURROUNDING_SENTENCE), (precomputed_doc_start, DOC_START)]:\n feature_column = batch_cosine_similarity(pairs, vectors, desc=f\"{self.name} {feature_desc}\")\n feature_columns.append(feature_column)\n feature_matrix = np.hstack(feature_columns)\n return feature_matrix\n\n @overrides\n def _get_plain_names_of_all_features(self) -> List[str]:\n return create_feature_names([SURROUNDING_SENTENCE, DOC_START], [])\n\n @classmethod\n @overrides\n def from_params(cls, config: Dict):\n use_cache = config.pop(\"use_cache\", False)\n features_to_select = config.pop(\"features_to_select\", None)\n obj = SentenceEmbeddingDistanceFeature(use_cache, features_to_select)\n if config:\n raise ValueError(\"Leftover configuration: \" + pprint.pformat(config))\n return obj"
] | [
[
"pandas.concat",
"pandas.merge",
"pandas.read_csv",
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame"
],
[
"numpy.hstack",
"sklearn.feature_extraction.text.TfidfVectorizer"
],
[
"sklearn.pipeline.make_pipeline",
"sklearn.impute.SimpleImputer",
"numpy.hstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nbro/probability | [
"07a6378155f0ed720b5aaccf5387e3f9a432bd10",
"07a6378155f0ed720b5aaccf5387e3f9a432bd10",
"07a6378155f0ed720b5aaccf5387e3f9a432bd10",
"07a6378155f0ed720b5aaccf5387e3f9a432bd10",
"07a6378155f0ed720b5aaccf5387e3f9a432bd10"
] | [
"tensorflow_probability/python/distributions/generalized_pareto_test.py",
"tensorflow_probability/python/internal/hypothesis_testlib.py",
"tensorflow_probability/python/layers/dense_variational.py",
"tensorflow_probability/python/sts/local_level.py",
"tensorflow_probability/python/bijectors/softfloor.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Generalized Pareto distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\n# Dependency imports\nimport hypothesis as hp\nimport hypothesis.strategies as hps\nimport numpy as np\nfrom scipy import stats as sp_stats\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps\nfrom tensorflow_probability.python.internal import test_util\n\ntfd = tfp.distributions\n\n\n# Pylint doesn't understand hps.composite.\n# pylint: disable=no-value-for-parameter\n\n\[email protected]\ndef generalized_paretos(draw, batch_shape=None):\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes())\n\n constraints = dict(\n loc=tfp_hps.identity_fn,\n scale=tfp_hps.softplus_plus_eps(),\n concentration=lambda x: tf.math.tanh(x) * 0.24) # <.25==safe for variance\n\n params = draw(\n tfp_hps.broadcasting_params(\n batch_shape,\n params_event_ndims=dict(loc=0, scale=0, concentration=0),\n constraint_fn_for=constraints.get))\n dist = tfd.GeneralizedPareto(validate_args=draw(hps.booleans()), **params)\n if dist.batch_shape != batch_shape:\n raise AssertionError('batch_shape mismatch: expect {} but got {}'.format(\n batch_shape, dist))\n return dist\n\n\n@test_util.test_all_tf_execution_regimes\nclass GeneralizedParetoTest(test_util.TestCase):\n\n @hp.given(generalized_paretos())\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testShape(self, dist):\n # batch_shape == dist.batch_shape asserted in generalized_paretos()\n self.assertEqual(dist.batch_shape, self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([]), dist.event_shape)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testLogPDF(self, dist):\n xs = self.evaluate(dist.sample())\n\n logp = dist.log_prob(xs)\n self.assertEqual(dist.batch_shape, logp.shape)\n p = dist.prob(xs)\n self.assertEqual(dist.batch_shape, p.shape)\n\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n expected_logp = sp_stats.genpareto(conc, loc=loc, scale=scale).logpdf(xs)\n actual_logp = self.evaluate(logp)\n self.assertAllClose(expected_logp, actual_logp, rtol=1e-5)\n self.assertAllClose(np.exp(expected_logp), self.evaluate(p), rtol=1e-5)\n\n def testLogPDFBoundary(self):\n # When loc = concentration = 0, we have an exponential distribution. Check\n # that at 0 we have finite log prob.\n scale = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)\n dist = tfd.GeneralizedPareto(loc=0, scale=scale, concentration=0)\n log_pdf = dist.log_prob(0.)\n self.assertAllClose(-np.log(scale), self.evaluate(log_pdf), rtol=1e-5)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testCDF(self, dist):\n xs = self.evaluate(dist.sample())\n cdf = dist.cdf(xs)\n self.assertEqual(dist.batch_shape, cdf.shape)\n\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n expected_cdf = sp_stats.genpareto(conc, loc=loc, scale=scale).cdf(xs)\n self.assertAllClose(expected_cdf, self.evaluate(cdf), rtol=5e-5)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testMean(self, dist):\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n self.assertEqual(dist.batch_shape, dist.mean().shape)\n if np.abs(conc) < 1e-5 and conc != 0:\n return # scipy does badly at small nonzero concentrations.\n expected = sp_stats.genpareto(conc, loc=loc, scale=scale).mean()\n actual = self.evaluate(dist.mean())\n self.assertAllClose(expected, actual, rtol=5e-4)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testVariance(self, dist):\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n self.assertEqual(dist.batch_shape, dist.variance().shape)\n expected = sp_stats.genpareto(conc, loc=loc, scale=scale).var()\n if np.abs(conc) < 1e-4 and conc != 0:\n return # scipy does badly at small nonzero concentrations.\n if expected <= 0:\n return # scipy sometimes returns nonsense zero or negative variances.\n actual = self.evaluate(dist.variance())\n print('var', loc, scale, conc, expected, actual, file=sys.stderr)\n self.assertAllClose(expected, actual, rtol=.01)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testEntropy(self, dist):\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n self.assertEqual(dist.batch_shape, dist.entropy().shape)\n expected = sp_stats.genpareto.entropy(conc, loc=loc, scale=scale)\n actual = self.evaluate(dist.entropy())\n self.assertAllClose(expected, actual)\n\n def testSample(self):\n loc = np.float32(-7.5)\n scale = np.float32(3.5)\n conc = np.float32(0.07)\n n = 100000\n dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)\n samples = dist.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n,), samples.shape)\n self.assertEqual((n,), sample_values.shape)\n self.assertTrue(self._kstest(loc, scale, conc, sample_values))\n self.assertAllClose(\n sp_stats.genpareto.mean(conc, loc=loc, scale=scale),\n sample_values.mean(),\n rtol=.005)\n self.assertAllClose(\n sp_stats.genpareto.var(conc, loc=loc, scale=scale),\n sample_values.var(),\n rtol=.01)\n\n def testFullyReparameterized(self):\n loc = tf.constant(4.0)\n scale = tf.constant(3.0)\n conc = tf.constant(2.0)\n _, grads = tfp.math.value_and_gradient(\n lambda *args: tfd.GeneralizedPareto(*args).sample(100),\n [loc, scale, conc])\n self.assertLen(grads, 3)\n self.assertAllNotNone(grads)\n\n def testSampleKolmogorovSmirnovMultiDimensional(self):\n loc = np.linspace(-10, 10, 3).reshape(3, 1, 1)\n scale = np.linspace(1e-6, 7, 5).reshape(5, 1)\n conc = np.linspace(-1.3, 1.3, 7)\n\n dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)\n n = 10000\n samples = dist.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n, 3, 5, 7), samples.shape)\n self.assertEqual((n, 3, 5, 7), sample_values.shape)\n\n fails = 0\n trials = 0\n for li, l in enumerate(loc.reshape(-1)):\n for si, s in enumerate(scale.reshape(-1)):\n for ci, c in enumerate(conc.reshape(-1)):\n samps = sample_values[:, li, si, ci]\n trials += 1\n fails += 0 if self._kstest(l, s, c, samps) else 1\n self.assertLess(fails, trials * 0.01)\n\n def _kstest(self, loc, scale, conc, samples):\n # Uses the Kolmogorov-Smirnov test for goodness of fit.\n ks, _ = sp_stats.kstest(samples,\n sp_stats.genpareto(conc, loc=loc, scale=scale).cdf)\n # Return True when the test passes.\n return ks < 0.02\n\n def testPdfOfSampleMultiDims(self):\n dist = tfd.GeneralizedPareto(\n loc=0, scale=[[2.], [3.]], concentration=[-.37, .11])\n num = 50000\n samples = dist.sample(num, seed=test_util.test_seed())\n pdfs = dist.prob(samples)\n sample_vals, pdf_vals = self.evaluate([samples, pdfs])\n self.assertEqual((num, 2, 2), samples.shape)\n self.assertEqual((num, 2, 2), pdfs.shape)\n self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)\n self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)\n self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)\n self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)\n\n def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):\n s_p = zip(sample_vals, pdf_vals)\n prev = (0, 0)\n total = 0\n for k in sorted(s_p, key=lambda x: x[0]):\n pair_pdf = (k[1] + prev[1]) / 2\n total += (k[0] - prev[0]) * pair_pdf\n prev = k\n self.assertNear(1., total, err=err)\n\n def testNonPositiveInitializationParamsRaises(self):\n scale = tf.constant(0.0, name='scale')\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n dist = tfd.GeneralizedPareto(\n loc=0, scale=scale, concentration=1, validate_args=True)\n self.evaluate(dist.mean())\n\n def testGradientThroughConcentration(self):\n concentration = tf.Variable(3.)\n d = tfd.GeneralizedPareto(loc=0, scale=1, concentration=concentration)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([1., 2., 4.])\n grad = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grad, 1)\n self.assertAllNotNone(grad)\n\n def testAssertsPositiveScale(self):\n scale = tf.Variable([1., 2., -3.])\n self.evaluate(scale.initializer)\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n d = tfd.GeneralizedPareto(\n loc=0, scale=scale, concentration=1, validate_args=True)\n self.evaluate(d.sample())\n\n def testAssertsPositiveScaleAfterMutation(self):\n scale = tf.Variable([1., 2., 3.])\n self.evaluate(scale.initializer)\n d = tfd.GeneralizedPareto(\n loc=0, scale=scale, concentration=0.25, validate_args=True)\n self.evaluate(d.mean())\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n with tf.control_dependencies([scale.assign([1., 2., -3.])]):\n self.evaluate(d.sample())\n\n def testGradientThroughLocScale(self):\n loc = tf.Variable(1.)\n scale = tf.Variable(2.5)\n d = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=.15)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([1., 2., 4.])\n grads = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grads, 2)\n self.assertAllNotNone(grads)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Hypothesis strategies for TFP.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport traceback\n\n# Dependency imports\nimport hypothesis as hp\nfrom hypothesis.extra import numpy as hpnp\nimport hypothesis.strategies as hps\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions.deprecated_linalg import matrix_diag_transform\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.util.deferred_tensor import DeferredTensor\n\n\ndef randomize_hypothesis():\n # Use --test_env=TFP_RANDOMIZE_HYPOTHESIS=1 to get random coverage.\n return bool(int(os.environ.get('TFP_RANDOMIZE_HYPOTHESIS', 0)))\n\n\ndef hypothesis_max_examples(default=None):\n # Use --test_env=TFP_HYPOTHESIS_MAX_EXAMPLES=1000 to get fuller coverage.\n return int(os.environ.get('TFP_HYPOTHESIS_MAX_EXAMPLES', default or 20))\n\n\ndef tfp_hp_settings(default_max_examples=None, **kwargs):\n \"\"\"Default TFP-specific Hypothesis settings.\"\"\"\n # Rationales for deviating from Hypothesis default settings\n # - Derandomize by default because flaky tests are horrible\n # - Turn off example database because\n # - It makes tests flaky on our cluster even if derandomized at the current\n # internal Hypothesis version (3.65)\n # - In the future, derandomization will imply ignoring the database setting\n # anyway\n # - Having one can't make example runs any faster\n # - No deadline because our test functions are too slow\n # - No too_slow health check for the same reason\n # - Fewer examples by default for the same reason\n # - Always print `@reproduce_failure` blobs because one never doesn't want\n # them in the logs\n kwds = dict(\n derandomize=not randomize_hypothesis(),\n database=None,\n deadline=None,\n suppress_health_check=[hp.HealthCheck.too_slow],\n max_examples=hypothesis_max_examples(default=default_max_examples),\n print_blob=hp.PrintSettings.ALWAYS)\n kwds.update(kwargs)\n return hp.settings(**kwds)\n\n\nVAR_USAGES = {}\n\n\ndef usage_counting_identity(var):\n key = (id(var), var.name)\n VAR_USAGES[key] = VAR_USAGES.get(key, []) + [traceback.format_stack(limit=25)]\n return tf.identity(var)\n\n\ndef defer_and_count_usage(var):\n return DeferredTensor(var, usage_counting_identity)\n\n\[email protected]\ndef assert_no_excessive_var_usage(name, max_permissible=2):\n \"\"\"Fails if a tagged DeferredTensor is convert_to_tensor'd too much.\n\n To set this up, wrap some Variables in `defer_and_count_usage`. Then, if any\n of them is accessed more than `max_permissible` times in the wrapped block,\n this will signal an informative error.\n\n Args:\n name: Python `str` naming this var usage counter.\n max_permissible: Python `int` giving the maximum OK number of times\n each tagged DeferredTensor may be read.\n\n Yields:\n Nothing (it's a context manager).\n \"\"\"\n VAR_USAGES.clear()\n yield\n # TODO(jvdillon): Reduce max_permissible to 1?\n var_nusages = {var_id_and_name: len(usages) for var_id_and_name,\n usages in VAR_USAGES.items()}\n if any(len(usages) > max_permissible for usages in VAR_USAGES.values()):\n for (_, var_name), usages in VAR_USAGES.items():\n if len(usages) > max_permissible:\n print('While executing {}, saw {} Tensor conversions of {}:'.format(\n name, len(usages), var_name))\n for i, usage in enumerate(usages):\n print('Conversion {} of {}:\\n{}'.format(i + 1, len(usages),\n ''.join(usage)))\n raise AssertionError(\n 'More than {} tensor conversions detected for {}: {}'.format(\n max_permissible, name, var_nusages))\n\n\nclass Support(object):\n \"\"\"Classification of sample spaces and bijector domains and codomains.\"\"\"\n SCALAR_UNCONSTRAINED = 'SCALAR_UNCONSTRAINED'\n SCALAR_NON_NEGATIVE = 'SCALAR_NON_NEGATIVE'\n SCALAR_NON_ZERO = 'SCALAR_NON_ZERO'\n SCALAR_POSITIVE = 'SCALAR_POSITIVE'\n SCALAR_GT_NEG1 = 'SCALAR_GT_NEG1'\n SCALAR_IN_NEG1_1 = 'SCALAR_IN_NEG1_1'\n SCALAR_IN_0_1 = 'SCALAR_IN_0_1'\n VECTOR_UNCONSTRAINED = 'VECTOR_UNCONSTRAINED'\n VECTOR_SIZE_TRIANGULAR = 'VECTOR_SIZE_TRIANGULAR'\n VECTOR_WITH_L1_NORM_1_SIZE_GT1 = 'VECTOR_WITH_L1_NORM_1_SIZE_GT1'\n VECTOR_STRICTLY_INCREASING = 'VECTOR_STRICTLY_INCREASING'\n MATRIX_UNCONSTRAINED = 'MATRIX_UNCONSTRAINED'\n MATRIX_LOWER_TRIL = 'MATRIX_LOWER_TRIL'\n MATRIX_LOWER_TRIL_POSITIVE_DEFINITE = 'MATRIX_LOWER_TRIL_POSITIVE_DEFINITE'\n MATRIX_POSITIVE_DEFINITE = 'MATRIX_POSITIVE_DEFINITE'\n CORRELATION_CHOLESKY = 'CORRELATION_CHOLESKY'\n OTHER = 'OTHER'\n\nALL_SUPPORTS = None\n\n\ndef all_supports():\n global ALL_SUPPORTS\n cls = Support\n ALL_SUPPORTS = [attr for attr in dir(cls)\n if not callable(getattr(cls, attr))\n and not attr.startswith('__')]\nall_supports()\ndel all_supports\n\n\ndef _scalar_constrainer(support):\n \"\"\"Helper for `constrainer` for scalar supports.\"\"\"\n\n def nonzero(x):\n return tf.where(tf.equal(x, 0), 1e-6, x)\n\n constrainers = {\n Support.SCALAR_IN_0_1: tf.math.sigmoid,\n Support.SCALAR_GT_NEG1: softplus_plus_eps(-1 + 1e-6),\n Support.SCALAR_NON_ZERO: nonzero,\n Support.SCALAR_IN_NEG1_1: lambda x: tf.math.tanh(x) * (1 - 1e-6),\n Support.SCALAR_NON_NEGATIVE: tf.math.softplus,\n Support.SCALAR_POSITIVE: softplus_plus_eps(),\n Support.SCALAR_UNCONSTRAINED: tf.identity,\n }\n if support not in constrainers:\n raise NotImplementedError(support)\n return constrainers[support]\n\n\ndef _vector_constrainer(support):\n \"\"\"Helper for `constrainer` for vector supports.\"\"\"\n\n def l1norm(x):\n x = tf.concat([x, tf.ones_like(x[..., :1]) * 1e-6], axis=-1)\n x = x / tf.linalg.norm(x, ord=1, axis=-1, keepdims=True)\n return x\n\n constrainers = {\n Support.VECTOR_UNCONSTRAINED:\n identity_fn,\n Support.VECTOR_STRICTLY_INCREASING:\n lambda x: tf.cumsum(tf.abs(x) + 1e-3, axis=-1),\n Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1:\n l1norm,\n Support.VECTOR_SIZE_TRIANGULAR:\n identity_fn,\n }\n if support not in constrainers:\n raise NotImplementedError(support)\n return constrainers[support]\n\n\ndef _matrix_constrainer(support):\n \"\"\"Helper for `constrainer` for matrix supports.\"\"\"\n constrainers = {\n Support.MATRIX_UNCONSTRAINED:\n identity_fn,\n Support.MATRIX_POSITIVE_DEFINITE:\n positive_definite,\n Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE:\n lower_tril_positive_definite,\n Support.MATRIX_LOWER_TRIL:\n lower_tril,\n }\n if support not in constrainers:\n raise NotImplementedError(support)\n return constrainers[support]\n\n\ndef constrainer(support):\n \"\"\"Determines a constraining transformation into the given support.\"\"\"\n if support.startswith('SCALAR_'):\n return _scalar_constrainer(support)\n if support.startswith('VECTOR_'):\n return _vector_constrainer(support)\n if support.startswith('MATRIX_'):\n return _matrix_constrainer(support)\n raise NotImplementedError(support)\n\n\ndef min_rank_for_support(support):\n \"\"\"Reports the minimum rank of a Tensor in the given support.\"\"\"\n if support.startswith('SCALAR_'):\n return 0\n if support.startswith('VECTOR_'):\n return 1\n if support.startswith('MATRIX_'):\n return 2\n raise NotImplementedError(support)\n\n\ndef constrained_tensors(constraint_fn, shape, dtype=np.float32):\n \"\"\"Strategy for drawing a constrained Tensor.\n\n Args:\n constraint_fn: Function mapping the unconstrained space to the desired\n constrained space.\n shape: Shape of the desired Tensors as a Python list.\n dtype: Dtype for constrained Tensors.\n\n Returns:\n tensors: A strategy for drawing constrained Tensors of the given shape.\n \"\"\"\n # TODO(bjp): Allow a wider range of floats.\n # float32s = hps.floats(\n # np.finfo(np.float32).min / 2, np.finfo(np.float32).max / 2,\n # allow_nan=False, allow_infinity=False)\n floats = hps.floats(-200, 200, allow_nan=False, allow_infinity=False)\n\n def mapper(x):\n x = constraint_fn(tf.convert_to_tensor(x, dtype_hint=dtype))\n if dtype_util.is_floating(x.dtype) and tf.executing_eagerly():\n # We'll skip this check in graph mode; too expensive.\n if not np.all(np.isfinite(x.numpy())):\n raise AssertionError('{} generated non-finite param value: {}'.format(\n constraint_fn, x.numpy()))\n return x\n\n return hpnp.arrays(dtype=dtype, shape=shape, elements=floats).map(mapper)\n\n\n# pylint: disable=no-value-for-parameter\n\n\[email protected]\ndef tensors_in_support(draw, support, batch_shape=None, event_dim=None):\n \"\"\"Strategy for drawing Tensors in the given support.\n\n Supports have a notion of event shape, which is the trailing dimensions in\n which the support region may not be axis-aligned (e.g., the event ndims of\n `VECTOR_STRICTLY_INCREASING` is 1). This strategy produces Tensors with at\n least the support's event rank, and also an optional batch shape.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n support: The `Support` in which the Tensor should live.\n batch_shape: Optional shape. The returned Tensors will have this batch\n shape. Hypothesis will pick one if omitted.\n event_dim: Optional Python int giving the size of each event dimension.\n This is shared across all event dimensions, permitting square event\n matrices, etc. If omitted, Hypothesis will choose one.\n\n Returns:\n tensors: A strategy for drawing such Tensors.\n \"\"\"\n if event_dim is None:\n event_dim = draw(hps.integers(min_value=2, max_value=6))\n if batch_shape is None:\n batch_shape = tensorshape_util.as_list(draw(shapes()))\n shape = batch_shape + [event_dim] * min_rank_for_support(support)\n constraint_fn = constrainer(support)\n return draw(constrained_tensors(constraint_fn, shape))\n\n\[email protected]\ndef shapes(draw, min_ndims=0, max_ndims=3, min_lastdimsize=1, max_side=None):\n \"\"\"Strategy for drawing TensorShapes with some control over rank/dim sizes.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n min_ndims: Python `int` giving the minimum rank.\n max_ndims: Python `int` giving the maximum rank.\n min_lastdimsize: Python `int`. The trailing dimension will always be at\n least this large. Ignored if the rank turns out to be 0.\n max_side: Python `int` giving the maximum size of each dimension\n\n Returns:\n shapes: A strategy for drawing fully-specified TensorShapes obeying\n these constraints.\n \"\"\"\n rank = draw(hps.integers(min_value=min_ndims, max_value=max_ndims))\n shape = tf.TensorShape(None).with_rank(rank)\n if rank > 0:\n\n def resize_lastdim(x):\n return x[:-1] + (max(x[-1], min_lastdimsize),)\n\n if max_side is None:\n # Apparently we can't pass an explicit None to the Hypothesis strategy?\n shps = hpnp.array_shapes(min_dims=rank, max_dims=rank)\n else:\n shps = hpnp.array_shapes(min_dims=rank, max_dims=rank, max_side=max_side)\n shape = draw(shps.map(resize_lastdim).map(tf.TensorShape))\n return shape\n\n\ndef identity_fn(x):\n return x\n\n\[email protected]\ndef broadcasting_params(draw,\n batch_shape,\n params_event_ndims,\n event_dim=None,\n enable_vars=False,\n constraint_fn_for=lambda param: identity_fn,\n mutex_params=(),\n dtype=np.float32):\n \"\"\"Streategy for drawing parameters which jointly have the given batch shape.\n\n Specifically, the batch shapes of the returned parameters will broadcast to\n the requested batch shape.\n\n The dtypes of the returned parameters are determined by their respective\n constraint functions.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: A `TensorShape`. The returned parameters' batch shapes will\n broadcast to this.\n params_event_ndims: Python `dict` mapping the name of each parameter to a\n Python `int` giving the event ndims for that parameter.\n event_dim: Optional Python int giving the size of each parameter's event\n dimensions (except where overridden by any applicable constraint\n functions). This is shared across all parameters, permitting square event\n matrices, compatible location and scale Tensors, etc. If omitted,\n Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}.\n constraint_fn_for: Python callable mapping parameter name to constraint\n function. The latter is itself a Python callable which converts an\n unconstrained Tensor (currently with float32 values from -200 to +200)\n into one that meets the parameter's validity constraints.\n mutex_params: Python iterable of Python sets. Each set gives a clique of\n mutually exclusive parameters (e.g., the 'probs' and 'logits' of a\n Categorical). At most one parameter from each set will appear in the\n result.\n dtype: Dtype for generated parameters.\n\n Returns:\n params: A Hypothesis strategy for drawing Python `dict`s mapping parameter\n name to a `tf.Tensor`, `tf.Variable`, `tfp.util.DeferredTensor`, or\n `tfp.util.TransformedVariable`. The batch shapes of the returned\n parameters broadcast together to the supplied `batch_shape`. Only\n parameters whose names appear as keys in `params_event_ndims` will appear\n (but possibly not all of them, depending on `mutex_params`).\n \"\"\"\n if event_dim is None:\n event_dim = draw(hps.integers(min_value=2, max_value=6))\n\n params_event_ndims = params_event_ndims or {}\n remaining_params = set(params_event_ndims.keys())\n params_to_use = []\n while remaining_params:\n param = draw(hps.sampled_from(sorted(remaining_params)))\n params_to_use.append(param)\n remaining_params.remove(param)\n for mutex_set in mutex_params:\n if param in mutex_set:\n remaining_params -= mutex_set\n\n param_batch_shapes = draw(\n broadcasting_named_shapes(batch_shape, params_to_use))\n params_kwargs = dict()\n for param in params_to_use:\n param_batch_shape = param_batch_shapes[param]\n param_event_rank = params_event_ndims[param]\n param_shape = (tensorshape_util.as_list(param_batch_shape) +\n [event_dim] * param_event_rank)\n\n # Reduce our risk of exceeding TF kernel broadcast limits.\n hp.assume(len(param_shape) < 6)\n\n # TODO(axch): Can I replace `params_event_ndims` and `constraint_fn_for`\n # with a map from params to `Suppport`s, and use `tensors_in_support` here\n # instead of this explicit `constrained_tensors` function?\n param_strategy = constrained_tensors(\n constraint_fn_for(param), param_shape, dtype=dtype)\n params_kwargs[param] = draw(maybe_variable(\n param_strategy, enable_vars=enable_vars, dtype=dtype, name=param))\n return params_kwargs\n\n\[email protected]\ndef maybe_variable(draw,\n strategy,\n enable_vars=False,\n dtype=None,\n name=None):\n \"\"\"Strategy for drawing objects that should sometimes be tf.Variables.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n strategy: Hypothesis strategy for drawing suitable values\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n never {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}.\n dtype: Dtype for generated parameters.\n name: Name for the produced `Tensor`s and `Variable`s, if any.\n\n Returns:\n strategy: A Hypothesis strategy for drawing a value, `tf.Variable`,\n `tfp.util.DeferredTensor`, or `tfp.util.TransformedVariable`. The\n `DeferredTensor`s are sometimes instrumented to count how many times they\n are concretized.\n \"\"\"\n result = tf.convert_to_tensor(draw(strategy), dtype_hint=dtype, name=name)\n if enable_vars and draw(hps.booleans()):\n result = tf.Variable(result, name=name)\n if name is None:\n alt_name = None\n else:\n alt_name = '{}_alt_value'.format(name)\n alt_value = tf.convert_to_tensor(\n draw(strategy), dtype_hint=dtype, name=alt_name)\n # This field provides an acceptable alternate value, to enable tests that\n # mutate the Variable (once).\n setattr(result, '_tfp_alt_value', alt_value)\n if draw(hps.booleans()):\n result = defer_and_count_usage(result)\n return result\n\n\[email protected]\ndef broadcasting_named_shapes(draw, batch_shape, param_names):\n \"\"\"Strategy for drawing a set of batch shapes that broadcast to `batch_shape`.\n\n For each parameter we need to choose its batch rank, and whether or not each\n axis i is 1 or batch_shape[i]. This function chooses a set of shapes that\n have possibly mismatched ranks, and possibly broadcasting axes, with the\n promise that the broadcast of the set of all shapes matches `batch_shape`.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: `tf.TensorShape`, the target (fully-defined) batch shape.\n param_names: Iterable of `str`, the parameters whose batch shapes need\n determination.\n\n Returns:\n param_batch_shapes: A strategy for drawing `dict`s of `str->tf.TensorShape`\n where the set of shapes broadcast to `batch_shape`. The shapes are fully\n defined.\n \"\"\"\n n = len(param_names)\n return dict(\n zip(draw(hps.permutations(param_names)),\n draw(broadcasting_shapes(batch_shape, n))))\n\n\ndef _compute_rank_and_fullsize_reqd(draw, target_shape, current_shape, is_last):\n \"\"\"Returns a param rank and a list of bools for full-size-required by axis.\n\n Args:\n draw: Hypothesis data sampler.\n target_shape: `tf.TensorShape`, the target broadcasted shape.\n current_shape: `tf.TensorShape`, the broadcasted shape of the shapes\n selected thus far. This is ignored for non-last shapes.\n is_last: bool indicator of whether this is the last shape (in which case, we\n must achieve the target shape).\n\n Returns:\n next_rank: Sampled rank for the next shape.\n force_fullsize_dim: `next_rank`-sized list of bool indicating whether the\n corresponding axis of the shape must be full-sized (True) or is allowed to\n be 1 (i.e., broadcast) (False).\n \"\"\"\n target_rank = target_shape.ndims\n if is_last:\n # We must force full size dim on any mismatched axes, and proper rank.\n full_rank_current = tf.broadcast_static_shape(\n current_shape, tf.TensorShape([1] * target_rank))\n # Identify axes in which the target shape is not yet matched.\n axis_is_mismatched = [\n full_rank_current[i] != target_shape[i] for i in range(target_rank)\n ]\n min_rank = target_rank\n if current_shape.ndims == target_rank:\n # Current rank might be already correct, but we could have a case like\n # batch_shape=[4,3,2] and current_batch_shape=[4,1,2], in which case\n # we must have at least 2 axes on this param's batch shape.\n min_rank -= (axis_is_mismatched + [True]).index(True)\n next_rank = draw(hps.integers(min_value=min_rank, max_value=target_rank))\n # Get the last param_batch_rank (possibly 0!) items.\n force_fullsize_dim = axis_is_mismatched[target_rank - next_rank:]\n else:\n # There are remaining params to be drawn, so we will be able to force full\n # size axes on subsequent params.\n next_rank = draw(hps.integers(min_value=0, max_value=target_rank))\n force_fullsize_dim = [False] * next_rank\n return next_rank, force_fullsize_dim\n\n\ndef broadcast_compatible_shape(shape):\n \"\"\"Strategy for drawing shapes broadcast-compatible with `shape`.\"\"\"\n # broadcasting_shapes draws a sequence of shapes, so that the last \"completes\"\n # the broadcast to fill out batch_shape. Here we just draw two and take the\n # first (incomplete) one.\n return broadcasting_shapes(shape, 2).map(lambda shapes: shapes[0])\n\n\[email protected]\ndef broadcasting_shapes(draw, target_shape, n):\n \"\"\"Strategy for drawing a set of `n` shapes that broadcast to `target_shape`.\n\n For each shape we need to choose its rank, and whether or not each axis i is 1\n or target_shape[i]. This function chooses a set of `n` shapes that have\n possibly mismatched ranks, and possibly broadcasting axes, with the promise\n that the broadcast of the set of all shapes matches `target_shape`.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n target_shape: The target (fully-defined) batch shape.\n n: Python `int`, the number of shapes to draw.\n\n Returns:\n shapes: A strategy for drawing sequences of `tf.TensorShape` such that the\n set of shapes in each sequence broadcast to `target_shape`. The shapes are\n fully defined.\n \"\"\"\n target_shape = tf.TensorShape(target_shape)\n target_rank = target_shape.ndims\n result = []\n current_shape = tf.TensorShape([])\n for is_last in [False] * (n - 1) + [True]:\n next_rank, force_fullsize_dim = _compute_rank_and_fullsize_reqd(\n draw, target_shape, current_shape, is_last=is_last)\n\n # Get the last next_rank (possibly 0!) dimensions.\n next_shape = target_shape[target_rank - next_rank:].as_list()\n for i, force_fullsize in enumerate(force_fullsize_dim):\n if not force_fullsize and draw(hps.booleans()):\n # Choose to make this param broadcast against some other param.\n next_shape[i] = 1\n next_shape = tf.TensorShape(next_shape)\n current_shape = tf.broadcast_static_shape(current_shape, next_shape)\n result.append(next_shape)\n return result\n\n\n# Utility functions for constraining parameters and/or domain/codomain members.\n\n\ndef softplus_plus_eps(eps=1e-6):\n return lambda x: tf.nn.softplus(x) + eps\n\n\ndef symmetric(x):\n return (x + tf.linalg.matrix_transpose(x)) / 2\n\n\ndef positive_definite(x):\n shp = tensorshape_util.as_list(x.shape)\n psd = (\n tf.matmul(x, x, transpose_b=True) +\n .1 * tf.linalg.eye(shp[-1], batch_shape=shp[:-2]))\n return symmetric(psd)\n\n\ndef lower_tril_positive_definite(x):\n return tf.linalg.band_part(\n matrix_diag_transform(x, softplus_plus_eps()), -1, 0)\n\n\ndef lower_tril(x):\n return tf.linalg.band_part(x, -1, 0)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Dense variational layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import independent as independent_lib\nfrom tensorflow_probability.python.distributions import kullback_leibler as kl_lib\nfrom tensorflow_probability.python.distributions import normal as normal_lib\nfrom tensorflow_probability.python.internal import docstring_util\nfrom tensorflow_probability.python.layers import util as tfp_layers_util\nfrom tensorflow_probability.python.math import random_rademacher\nfrom tensorflow_probability.python.util import SeedStream\n\n\n__all__ = [\n 'DenseFlipout',\n 'DenseLocalReparameterization',\n 'DenseReparameterization',\n]\n\n\ndoc_args = \"\"\"units: Integer or Long, dimensionality of the output space.\n activation: Activation function (`callable`). Set it to None to maintain a\n linear activation.\n activity_regularizer: Regularizer function for the output.\n kernel_posterior_fn: Python `callable` which creates\n `tfd.Distribution` instance representing the surrogate\n posterior of the `kernel` parameter. Default value:\n `default_mean_field_normal_fn()`.\n kernel_posterior_tensor_fn: Python `callable` which takes a\n `tfd.Distribution` instance and returns a representative\n value. Default value: `lambda d: d.sample()`.\n kernel_prior_fn: Python `callable` which creates `tfd`\n instance. See `default_mean_field_normal_fn` docstring for required\n parameter signature.\n Default value: `tfd.Normal(loc=0., scale=1.)`.\n kernel_divergence_fn: Python `callable` which takes the surrogate posterior\n distribution, prior distribution and random variate sample(s) from the\n surrogate posterior and computes or approximates the KL divergence. The\n distributions are `tfd.Distribution`-like instances and the\n sample is a `Tensor`.\n bias_posterior_fn: Python `callable` which creates\n `tfd.Distribution` instance representing the surrogate\n posterior of the `bias` parameter. Default value:\n `default_mean_field_normal_fn(is_singular=True)` (which creates an\n instance of `tfd.Deterministic`).\n bias_posterior_tensor_fn: Python `callable` which takes a\n `tfd.Distribution` instance and returns a representative\n value. Default value: `lambda d: d.sample()`.\n bias_prior_fn: Python `callable` which creates `tfd` instance.\n See `default_mean_field_normal_fn` docstring for required parameter\n signature. Default value: `None` (no prior, no variational inference)\n bias_divergence_fn: Python `callable` which takes the surrogate posterior\n distribution, prior distribution and random variate sample(s) from the\n surrogate posterior and computes or approximates the KL divergence. The\n distributions are `tfd.Distribution`-like instances and the\n sample is a `Tensor`.\"\"\"\n\n\nclass _DenseVariational(tf.keras.layers.Layer):\n \"\"\"Abstract densely-connected class (private, used as implementation base).\n\n This layer implements the Bayesian variational inference analogue to\n a dense layer by assuming the `kernel` and/or the `bias` are drawn\n from distributions. By default, the layer implements a stochastic\n forward pass via sampling from the kernel and bias posteriors,\n\n ```none\n kernel, bias ~ posterior\n outputs = activation(matmul(inputs, kernel) + bias)\n ```\n\n The arguments permit separate specification of the surrogate posterior\n (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`\n distributions.\n \"\"\"\n\n @docstring_util.expand_docstring(args=doc_args)\n def __init__(\n self,\n units,\n activation=None,\n activity_regularizer=None,\n kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(),\n kernel_posterior_tensor_fn=lambda d: d.sample(),\n kernel_prior_fn=tfp_layers_util.default_multivariate_normal_fn,\n kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n bias_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(\n is_singular=True),\n bias_posterior_tensor_fn=lambda d: d.sample(),\n bias_prior_fn=None,\n bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n **kwargs):\n # pylint: disable=g-doc-args\n \"\"\"Construct layer.\n\n Args:\n ${args}\n \"\"\"\n # pylint: enable=g-doc-args\n super(_DenseVariational, self).__init__(\n activity_regularizer=activity_regularizer,\n **kwargs)\n self.units = units\n self.activation = tf.keras.activations.get(activation)\n self.input_spec = tf.keras.layers.InputSpec(min_ndim=2)\n self.kernel_posterior_fn = kernel_posterior_fn\n self.kernel_posterior_tensor_fn = kernel_posterior_tensor_fn\n self.kernel_prior_fn = kernel_prior_fn\n self.kernel_divergence_fn = kernel_divergence_fn\n self.bias_posterior_fn = bias_posterior_fn\n self.bias_posterior_tensor_fn = bias_posterior_tensor_fn\n self.bias_prior_fn = bias_prior_fn\n self.bias_divergence_fn = bias_divergence_fn\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n in_size = tf.compat.dimension_value(input_shape.with_rank_at_least(2)[-1])\n if in_size is None:\n raise ValueError('The last dimension of the inputs to `Dense` '\n 'should be defined. Found `None`.')\n self._input_spec = tf.keras.layers.InputSpec(min_ndim=2, axes={-1: in_size})\n\n # If self.dtype is None, build weights using the default dtype.\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n\n # Must have a posterior kernel.\n self.kernel_posterior = self.kernel_posterior_fn(\n dtype, [in_size, self.units], 'kernel_posterior',\n self.trainable, self.add_variable)\n\n if self.kernel_prior_fn is None:\n self.kernel_prior = None\n else:\n self.kernel_prior = self.kernel_prior_fn(\n dtype, [in_size, self.units], 'kernel_prior',\n self.trainable, self.add_variable)\n\n if self.bias_posterior_fn is None:\n self.bias_posterior = None\n else:\n self.bias_posterior = self.bias_posterior_fn(\n dtype, [self.units], 'bias_posterior',\n self.trainable, self.add_variable)\n\n if self.bias_prior_fn is None:\n self.bias_prior = None\n else:\n self.bias_prior = self.bias_prior_fn(\n dtype, [self.units], 'bias_prior',\n self.trainable, self.add_variable)\n\n self.built = True\n\n def call(self, inputs):\n inputs = tf.convert_to_tensor(value=inputs, dtype=self.dtype)\n\n outputs = self._apply_variational_kernel(inputs)\n outputs = self._apply_variational_bias(outputs)\n if self.activation is not None:\n outputs = self.activation(outputs) # pylint: disable=not-callable\n self._apply_divergence(\n self.kernel_divergence_fn,\n self.kernel_posterior,\n self.kernel_prior,\n self.kernel_posterior_tensor,\n name='divergence_kernel')\n self._apply_divergence(\n self.bias_divergence_fn,\n self.bias_posterior,\n self.bias_prior,\n self.bias_posterior_tensor,\n name='divergence_bias')\n return outputs\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape of the layer.\n\n Args:\n input_shape: Shape tuple (tuple of integers) or list of shape tuples\n (one per output tensor of the layer). Shape tuples can include None for\n free dimensions, instead of an integer.\n\n Returns:\n output_shape: A tuple representing the output shape.\n\n Raises:\n ValueError: If innermost dimension of `input_shape` is not defined.\n \"\"\"\n input_shape = tf.TensorShape(input_shape)\n input_shape = input_shape.with_rank_at_least(2)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\n 'The innermost dimension of `input_shape` must be defined, '\n 'but saw: {}'.format(input_shape))\n return input_shape[:-1].concatenate(self.units)\n\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable) containing the\n configuration of a layer. The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n Returns:\n config: A Python dictionary of class keyword arguments and their\n serialized values.\n \"\"\"\n config = {\n 'units': self.units,\n 'activation': (tf.keras.activations.serialize(self.activation)\n if self.activation else None),\n 'activity_regularizer':\n tf.keras.initializers.serialize(self.activity_regularizer),\n }\n function_keys = [\n 'kernel_posterior_fn',\n 'kernel_posterior_tensor_fn',\n 'kernel_prior_fn',\n 'kernel_divergence_fn',\n 'bias_posterior_fn',\n 'bias_posterior_tensor_fn',\n 'bias_prior_fn',\n 'bias_divergence_fn',\n ]\n for function_key in function_keys:\n function = getattr(self, function_key)\n if function is None:\n function_name = None\n function_type = None\n else:\n function_name, function_type = tfp_layers_util.serialize_function(\n function)\n config[function_key] = function_name\n config[function_key + '_type'] = function_type\n base_config = super(_DenseVariational, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates a layer from its config.\n\n This method is the reverse of `get_config`, capable of instantiating the\n same layer from the config dictionary.\n\n Args:\n config: A Python dictionary, typically the output of `get_config`.\n\n Returns:\n layer: A layer instance.\n \"\"\"\n config = config.copy()\n function_keys = [\n 'kernel_posterior_fn',\n 'kernel_posterior_tensor_fn',\n 'kernel_prior_fn',\n 'kernel_divergence_fn',\n 'bias_posterior_fn',\n 'bias_posterior_tensor_fn',\n 'bias_prior_fn',\n 'bias_divergence_fn',\n ]\n for function_key in function_keys:\n serial = config[function_key]\n function_type = config.pop(function_key + '_type')\n if serial is not None:\n config[function_key] = tfp_layers_util.deserialize_function(\n serial,\n function_type=function_type)\n return cls(**config)\n\n def _apply_variational_bias(self, inputs):\n if self.bias_posterior is None:\n self.bias_posterior_tensor = None\n return inputs\n self.bias_posterior_tensor = self.bias_posterior_tensor_fn(\n self.bias_posterior)\n return tf.nn.bias_add(inputs, self.bias_posterior_tensor)\n\n def _apply_divergence(self, divergence_fn, posterior, prior,\n posterior_tensor, name):\n if (divergence_fn is None or\n posterior is None or\n prior is None):\n divergence = None\n return\n divergence = tf.identity(\n divergence_fn(\n posterior, prior, posterior_tensor),\n name=name)\n self.add_loss(divergence)\n\n\nclass DenseReparameterization(_DenseVariational):\n \"\"\"Densely-connected layer class with reparameterization estimator.\n\n This layer implements the Bayesian variational inference analogue to\n a dense layer by assuming the `kernel` and/or the `bias` are drawn\n from distributions. By default, the layer implements a stochastic\n forward pass via sampling from the kernel and bias posteriors,\n\n ```none\n kernel, bias ~ posterior\n outputs = activation(matmul(inputs, kernel) + bias)\n ```\n\n It uses the reparameterization estimator [(Kingma and Welling, 2014)][1],\n which performs a Monte Carlo approximation of the distribution integrating\n over the `kernel` and `bias`.\n\n The arguments permit separate specification of the surrogate posterior\n (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`\n distributions.\n\n Upon being built, this layer adds losses (accessible via the `losses`\n property) representing the divergences of `kernel` and/or `bias` surrogate\n posteriors and their respective priors. When doing minibatch stochastic\n optimization, make sure to scale this loss such that it is applied just once\n per epoch (e.g. if `kl` is the sum of `losses` for each element of the batch,\n you should pass `kl / num_examples_per_epoch` to your optimizer).\n\n You can access the `kernel` and/or `bias` posterior and prior distributions\n after the layer is built via the `kernel_posterior`, `kernel_prior`,\n `bias_posterior` and `bias_prior` properties.\n\n #### Examples\n\n We illustrate a Bayesian neural network with [variational inference](\n https://en.wikipedia.org/wiki/Variational_Bayesian_methods),\n assuming a dataset of `features` and `labels`.\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n model = tf.keras.Sequential([\n tfp.layers.DenseReparameterization(512, activation=tf.nn.relu),\n tfp.layers.DenseReparameterization(10),\n ])\n\n logits = model(features)\n neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels, logits=logits)\n kl = sum(model.losses)\n loss = neg_log_likelihood + kl\n train_op = tf.train.AdamOptimizer().minimize(loss)\n ```\n\n It uses reparameterization gradients to minimize the\n Kullback-Leibler divergence up to a constant, also known as the\n negative Evidence Lower Bound. It consists of the sum of two terms:\n the expected negative log-likelihood, which we approximate via\n Monte Carlo; and the KL divergence, which is added via regularizer\n terms which are arguments to the layer.\n\n #### References\n\n [1]: Diederik Kingma and Max Welling. Auto-Encoding Variational Bayes. In\n _International Conference on Learning Representations_, 2014.\n https://arxiv.org/abs/1312.6114\n \"\"\"\n\n @docstring_util.expand_docstring(args=doc_args)\n def __init__(\n self,\n units,\n activation=None,\n activity_regularizer=None,\n trainable=True,\n kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(),\n kernel_posterior_tensor_fn=lambda d: d.sample(),\n kernel_prior_fn=tfp_layers_util.default_multivariate_normal_fn,\n kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n bias_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(\n is_singular=True),\n bias_posterior_tensor_fn=lambda d: d.sample(),\n bias_prior_fn=None,\n bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n **kwargs):\n # pylint: disable=g-doc-args\n \"\"\"Construct layer.\n\n Args:\n ${args}\n \"\"\"\n # pylint: enable=g-doc-args\n super(DenseReparameterization, self).__init__(\n units=units,\n activation=activation,\n activity_regularizer=activity_regularizer,\n trainable=trainable,\n kernel_posterior_fn=kernel_posterior_fn,\n kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,\n kernel_prior_fn=kernel_prior_fn,\n kernel_divergence_fn=kernel_divergence_fn,\n bias_posterior_fn=bias_posterior_fn,\n bias_posterior_tensor_fn=bias_posterior_tensor_fn,\n bias_prior_fn=bias_prior_fn,\n bias_divergence_fn=bias_divergence_fn,\n **kwargs)\n\n def _apply_variational_kernel(self, inputs):\n self.kernel_posterior_tensor = self.kernel_posterior_tensor_fn(\n self.kernel_posterior)\n self.kernel_posterior_affine = None\n self.kernel_posterior_affine_tensor = None\n return tf.matmul(inputs, self.kernel_posterior_tensor)\n\n\nclass DenseLocalReparameterization(_DenseVariational):\n \"\"\"Densely-connected layer class with local reparameterization estimator.\n\n This layer implements the Bayesian variational inference analogue to\n a dense layer by assuming the `kernel` and/or the `bias` are drawn\n from distributions. By default, the layer implements a stochastic\n forward pass via sampling from the kernel and bias posteriors,\n\n ```none\n kernel, bias ~ posterior\n outputs = activation(matmul(inputs, kernel) + bias)\n ```\n\n It uses the local reparameterization estimator [(Kingma et al., 2015)][1],\n which performs a Monte Carlo approximation of the distribution on the hidden\n units induced by the `kernel` and `bias`. The default `kernel_posterior_fn`\n is a normal distribution which factorizes across all elements of the weight\n matrix and bias vector. Unlike [1]'s multiplicative parameterization, this\n distribution has trainable location and scale parameters which is known as\n an additive noise parameterization [(Molchanov et al., 2017)][2].\n\n The arguments permit separate specification of the surrogate posterior\n (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`\n distributions.\n\n Upon being built, this layer adds losses (accessible via the `losses`\n property) representing the divergences of `kernel` and/or `bias` surrogate\n posteriors and their respective priors. When doing minibatch stochastic\n optimization, make sure to scale this loss such that it is applied just once\n per epoch (e.g. if `kl` is the sum of `losses` for each element of the batch,\n you should pass `kl / num_examples_per_epoch` to your optimizer).\n\n You can access the `kernel` and/or `bias` posterior and prior distributions\n after the layer is built via the `kernel_posterior`, `kernel_prior`,\n `bias_posterior` and `bias_prior` properties.\n\n #### Examples\n\n We illustrate a Bayesian neural network with [variational inference](\n https://en.wikipedia.org/wiki/Variational_Bayesian_methods),\n assuming a dataset of `features` and `labels`.\n\n ```python\n import tensorflow_probability as tfp\n\n model = tf.keras.Sequential([\n tfp.layers.DenseLocalReparameterization(512, activation=tf.nn.relu),\n tfp.layers.DenseLocalReparameterization(10),\n ])\n\n logits = model(features)\n neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels, logits=logits)\n kl = sum(model.losses)\n loss = neg_log_likelihood + kl\n train_op = tf.train.AdamOptimizer().minimize(loss)\n ```\n\n It uses local reparameterization gradients to minimize the\n Kullback-Leibler divergence up to a constant, also known as the\n negative Evidence Lower Bound. It consists of the sum of two terms:\n the expected negative log-likelihood, which we approximate via\n Monte Carlo; and the KL divergence, which is added via regularizer\n terms which are arguments to the layer.\n\n #### References\n\n [1]: Diederik Kingma, Tim Salimans, and Max Welling. Variational Dropout and\n the Local Reparameterization Trick. In _Neural Information Processing\n Systems_, 2015. https://arxiv.org/abs/1506.02557\n [2]: Dmitry Molchanov, Arsenii Ashukha, Dmitry Vetrov. Variational Dropout\n Sparsifies Deep Neural Networks. In _International Conference on Machine\n Learning_, 2017. https://arxiv.org/abs/1701.05369\n \"\"\"\n\n @docstring_util.expand_docstring(args=doc_args)\n def __init__(\n self,\n units,\n activation=None,\n activity_regularizer=None,\n trainable=True,\n kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(),\n kernel_posterior_tensor_fn=lambda d: d.sample(),\n kernel_prior_fn=tfp_layers_util.default_multivariate_normal_fn,\n kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n bias_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(\n is_singular=True),\n bias_posterior_tensor_fn=lambda d: d.sample(),\n bias_prior_fn=None,\n bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n **kwargs):\n # pylint: disable=g-doc-args\n \"\"\"Construct layer.\n\n Args:\n ${args}\n \"\"\"\n # pylint: enable=g-doc-args\n super(DenseLocalReparameterization, self).__init__(\n units=units,\n activation=activation,\n activity_regularizer=activity_regularizer,\n trainable=trainable,\n kernel_posterior_fn=kernel_posterior_fn,\n kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,\n kernel_prior_fn=kernel_prior_fn,\n kernel_divergence_fn=kernel_divergence_fn,\n bias_posterior_fn=bias_posterior_fn,\n bias_posterior_tensor_fn=bias_posterior_tensor_fn,\n bias_prior_fn=bias_prior_fn,\n bias_divergence_fn=bias_divergence_fn,\n **kwargs)\n\n def _apply_variational_kernel(self, inputs):\n if (not isinstance(self.kernel_posterior, independent_lib.Independent) or\n not isinstance(self.kernel_posterior.distribution, normal_lib.Normal)):\n raise TypeError(\n '`DenseLocalReparameterization` requires '\n '`kernel_posterior_fn` produce an instance of '\n '`tfd.Independent(tfd.Normal)` '\n '(saw: \\\"{}\\\").'.format(self.kernel_posterior.name))\n self.kernel_posterior_affine = normal_lib.Normal(\n loc=tf.matmul(inputs, self.kernel_posterior.distribution.loc),\n scale=tf.sqrt(tf.matmul(\n tf.square(inputs),\n tf.square(self.kernel_posterior.distribution.scale))))\n self.kernel_posterior_affine_tensor = (\n self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))\n self.kernel_posterior_tensor = None\n return self.kernel_posterior_affine_tensor\n\n\nclass DenseFlipout(_DenseVariational):\n \"\"\"Densely-connected layer class with Flipout estimator.\n\n This layer implements the Bayesian variational inference analogue to\n a dense layer by assuming the `kernel` and/or the `bias` are drawn\n from distributions. By default, the layer implements a stochastic\n forward pass via sampling from the kernel and bias posteriors,\n\n ```none\n kernel, bias ~ posterior\n outputs = activation(matmul(inputs, kernel) + bias)\n ```\n\n It uses the Flipout estimator [(Wen et al., 2018)][1], which performs a Monte\n Carlo approximation of the distribution integrating over the `kernel` and\n `bias`. Flipout uses roughly twice as many floating point operations as the\n reparameterization estimator but has the advantage of significantly lower\n variance.\n\n The arguments permit separate specification of the surrogate posterior\n (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`\n distributions.\n\n Upon being built, this layer adds losses (accessible via the `losses`\n property) representing the divergences of `kernel` and/or `bias` surrogate\n posteriors and their respective priors. When doing minibatch stochastic\n optimization, make sure to scale this loss such that it is applied just once\n per epoch (e.g. if `kl` is the sum of `losses` for each element of the batch,\n you should pass `kl / num_examples_per_epoch` to your optimizer).\n\n #### Examples\n\n We illustrate a Bayesian neural network with [variational inference](\n https://en.wikipedia.org/wiki/Variational_Bayesian_methods),\n assuming a dataset of `features` and `labels`.\n\n ```python\n import tensorflow_probability as tfp\n\n model = tf.keras.Sequential([\n tfp.layers.DenseFlipout(512, activation=tf.nn.relu),\n tfp.layers.DenseFlipout(10),\n ])\n\n logits = model(features)\n neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels, logits=logits)\n kl = sum(model.losses)\n loss = neg_log_likelihood + kl\n train_op = tf.train.AdamOptimizer().minimize(loss)\n ```\n\n It uses the Flipout gradient estimator to minimize the\n Kullback-Leibler divergence up to a constant, also known as the\n negative Evidence Lower Bound. It consists of the sum of two terms:\n the expected negative log-likelihood, which we approximate via\n Monte Carlo; and the KL divergence, which is added via regularizer\n terms which are arguments to the layer.\n\n #### References\n\n [1]: Yeming Wen, Paul Vicol, Jimmy Ba, Dustin Tran, and Roger Grosse. Flipout:\n Efficient Pseudo-Independent Weight Perturbations on Mini-Batches. In\n _International Conference on Learning Representations_, 2018.\n https://arxiv.org/abs/1803.04386\n \"\"\"\n\n @docstring_util.expand_docstring(args=doc_args)\n def __init__(\n self,\n units,\n activation=None,\n activity_regularizer=None,\n trainable=True,\n kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(),\n kernel_posterior_tensor_fn=lambda d: d.sample(),\n kernel_prior_fn=tfp_layers_util.default_multivariate_normal_fn,\n kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n bias_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(\n is_singular=True),\n bias_posterior_tensor_fn=lambda d: d.sample(),\n bias_prior_fn=None,\n bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),\n seed=None,\n **kwargs):\n # pylint: disable=g-doc-args\n \"\"\"Construct layer.\n\n Args:\n ${args}\n seed: Python scalar `int` which initializes the random number\n generator. Default value: `None` (i.e., use global seed).\n \"\"\"\n # pylint: enable=g-doc-args\n super(DenseFlipout, self).__init__(\n units=units,\n activation=activation,\n activity_regularizer=activity_regularizer,\n trainable=trainable,\n kernel_posterior_fn=kernel_posterior_fn,\n kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,\n kernel_prior_fn=kernel_prior_fn,\n kernel_divergence_fn=kernel_divergence_fn,\n bias_posterior_fn=bias_posterior_fn,\n bias_posterior_tensor_fn=bias_posterior_tensor_fn,\n bias_prior_fn=bias_prior_fn,\n bias_divergence_fn=bias_divergence_fn,\n **kwargs)\n # Set additional attributes which do not exist in the parent class.\n self.seed = seed\n\n def _apply_variational_kernel(self, inputs):\n if (not isinstance(self.kernel_posterior, independent_lib.Independent) or\n not isinstance(self.kernel_posterior.distribution, normal_lib.Normal)):\n raise TypeError(\n '`DenseFlipout` requires '\n '`kernel_posterior_fn` produce an instance of '\n '`tfd.Independent(tfd.Normal)` '\n '(saw: \\\"{}\\\").'.format(self.kernel_posterior.name))\n self.kernel_posterior_affine = normal_lib.Normal(\n loc=tf.zeros_like(self.kernel_posterior.distribution.loc),\n scale=self.kernel_posterior.distribution.scale)\n self.kernel_posterior_affine_tensor = (\n self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))\n self.kernel_posterior_tensor = None\n\n input_shape = tf.shape(input=inputs)\n batch_shape = input_shape[:-1]\n\n seed_stream = SeedStream(self.seed, salt='DenseFlipout')\n\n sign_input = random_rademacher(\n input_shape,\n dtype=inputs.dtype,\n seed=seed_stream())\n sign_output = random_rademacher(\n tf.concat([batch_shape,\n tf.expand_dims(self.units, 0)], 0),\n dtype=inputs.dtype,\n seed=seed_stream())\n perturbed_inputs = tf.matmul(\n inputs * sign_input, self.kernel_posterior_affine_tensor) * sign_output\n\n outputs = tf.matmul(inputs, self.kernel_posterior.distribution.loc)\n outputs += perturbed_inputs\n return outputs\n\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable) containing the\n configuration of a layer. The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n Returns:\n config: A Python dictionary of class keyword arguments and their\n serialized values.\n \"\"\"\n config = {\n 'seed': self.seed,\n }\n base_config = super(DenseFlipout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Local Level model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python import util\nfrom tensorflow_probability.python.distributions import linear_gaussian_ssm\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.sts.internal import util as sts_util\nfrom tensorflow_probability.python.sts.structural_time_series import Parameter\nfrom tensorflow_probability.python.sts.structural_time_series import StructuralTimeSeries\n\n\nclass LocalLevelStateSpaceModel(tfd.LinearGaussianStateSpaceModel):\n \"\"\"State space model for a local level.\n\n A state space model (SSM) posits a set of latent (unobserved) variables that\n evolve over time with dynamics specified by a probabilistic transition model\n `p(z[t+1] | z[t])`. At each timestep, we observe a value sampled from an\n observation model conditioned on the current state, `p(x[t] | z[t])`. The\n special case where both the transition and observation models are Gaussians\n with mean specified as a linear function of the inputs, is known as a linear\n Gaussian state space model and supports tractable exact probabilistic\n calculations; see `tfp.distributions.LinearGaussianStateSpaceModel` for\n details.\n\n The local level model is a special case of a linear Gaussian SSM, in which the\n latent state posits a `level` evolving via a Gaussian random walk:\n\n ```python\n level[t] = level[t-1] + Normal(0., level_scale)\n ```\n\n The latent state is `[level]` and `[level]` is observed (with noise) at each\n timestep.\n\n The parameters `level_scale` and `observation_noise_scale` are each (a batch\n of) scalars. The batch shape of this `Distribution` is the broadcast batch\n shape of these parameters and of the `initial_state_prior`.\n\n #### Mathematical Details\n\n The local level model implements a\n `tfp.distributions.LinearGaussianStateSpaceModel` with `latent_size = 1` and\n `observation_size = 1`, following the transition model:\n\n ```\n transition_matrix = [[1.]]\n transition_noise ~ N(loc=0., scale=diag([level_scale]))\n ```\n\n which implements the evolution of `level` described above, and the observation\n model:\n\n ```\n observation_matrix = [[1.]]\n observation_noise ~ N(loc=0, scale=observation_noise_scale)\n ```\n\n #### Examples\n\n A simple model definition:\n\n ```python\n local_level_model = LocalLevelStateSpaceModel(\n num_timesteps=50,\n level_scale=0.5,\n initial_state_prior=tfd.MultivariateNormalDiag(scale_diag=[1.]))\n\n y = local_level_model.sample() # y has shape [50, 1]\n lp = local_level_model.log_prob(y) # log_prob is scalar\n ```\n\n Passing additional parameter dimensions constructs a batch of models. The\n overall batch shape is the broadcast batch shape of the parameters:\n\n ```python\n local_level_model = LocalLevelStateSpaceModel(\n num_timesteps=50,\n level_scale=tf.ones([10]),\n initial_state_prior=tfd.MultivariateNormalDiag(\n scale_diag=tf.ones([10, 10, 1])))\n\n y = local_level_model.sample(5) # y has shape [5, 10, 10, 50, 1]\n lp = local_level_model.log_prob(y) # has shape [5, 10, 10]\n ```\n \"\"\"\n\n def __init__(self,\n num_timesteps,\n level_scale,\n initial_state_prior,\n observation_noise_scale=0.,\n initial_step=0,\n validate_args=False,\n allow_nan_stats=True,\n name=None):\n \"\"\"Build a state space model implementing a local level.\n\n Args:\n num_timesteps: Scalar `int` `Tensor` number of timesteps to model\n with this distribution.\n level_scale: Scalar (any additional dimensions are treated as batch\n dimensions) `float` `Tensor` indicating the standard deviation of the\n level transitions.\n initial_state_prior: instance of `tfd.MultivariateNormal`\n representing the prior distribution on latent states. Must have\n event shape `[1]` (as `tfd.LinearGaussianStateSpaceModel` requires a\n rank-1 event shape).\n observation_noise_scale: Scalar (any additional dimensions are\n treated as batch dimensions) `float` `Tensor` indicating the standard\n deviation of the observation noise.\n initial_step: Optional scalar `int` `Tensor` specifying the starting\n timestep.\n Default value: 0.\n validate_args: Python `bool`. Whether to validate input\n with asserts. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n Default value: `False`.\n allow_nan_stats: Python `bool`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n Default value: `True`.\n name: Python `str` name prefixed to ops created by this class.\n Default value: \"LocalLevelStateSpaceModel\".\n \"\"\"\n\n with tf1.name_scope(name, 'LocalLevelStateSpaceModel',\n [level_scale]) as name:\n\n # The initial state prior determines the dtype of sampled values.\n # Other model parameters must have the same dtype.\n dtype = initial_state_prior.dtype\n\n level_scale = tf.convert_to_tensor(\n value=level_scale, name='level_scale', dtype=dtype)\n observation_noise_scale = tf.convert_to_tensor(\n value=observation_noise_scale,\n name='observation_noise_scale',\n dtype=dtype)\n\n self._level_scale = level_scale\n self._observation_noise_scale = observation_noise_scale\n\n # Construct a linear Gaussian state space model implementing the\n # local level model. See \"Mathematical Details\" in the\n # class docstring for further explanation.\n super(LocalLevelStateSpaceModel, self).__init__(\n num_timesteps=num_timesteps,\n transition_matrix=tf.constant(\n [[1.]], dtype=dtype, name='transition_matrix'),\n transition_noise=tfd.MultivariateNormalDiag(\n scale_diag=level_scale[..., tf.newaxis], name='transition_noise'),\n observation_matrix=tf.constant(\n [[1.]], dtype=dtype, name='observation_matrix'),\n observation_noise=tfd.MultivariateNormalDiag(\n scale_diag=observation_noise_scale[..., tf.newaxis],\n name='observation_noise'),\n initial_state_prior=initial_state_prior,\n initial_step=initial_step,\n allow_nan_stats=allow_nan_stats,\n validate_args=validate_args,\n name=name)\n\n @property\n def level_scale(self):\n \"\"\"Standard deviation of the level transitions.\"\"\"\n return self._level_scale\n\n @property\n def observation_noise_scale(self):\n \"\"\"Standard deviation of the observation noise.\"\"\"\n return self._observation_noise_scale\n\n def _joint_sample_n(self, n, seed=None):\n \"\"\"Draw a joint sample from the prior over latents and observations.\n\n This sampler is specific to LocalLevel models and is faster than the\n generic LinearGaussianStateSpaceModel implementation.\n\n Args:\n n: `int` `Tensor` number of samples to draw.\n seed: Optional `int` `Tensor` seed for the random number generator.\n Returns:\n latents: `float` `Tensor` of shape `concat([[n], self.batch_shape,\n [self.num_timesteps, self.latent_size]], axis=0)` representing samples\n of latent trajectories.\n observations: `float` `Tensor` of shape `concat([[n], self.batch_shape,\n [self.num_timesteps, self.observation_size]], axis=0)` representing\n samples of observed series generated from the sampled `latents`.\n \"\"\"\n with tf.name_scope('joint_sample_n'):\n strm = util.SeedStream(\n seed, 'LocalLevelStateSpaceModel_joint_sample_n')\n\n if self.batch_shape.is_fully_defined():\n batch_shape = self.batch_shape.as_list()\n else:\n batch_shape = self.batch_shape_tensor()\n sample_and_batch_shape = tf.cast(\n prefer_static.concat([[n], batch_shape], axis=0), tf.int32)\n\n # Sample the initial timestep from the prior. Since we want\n # this sample to have full batch shape (not just the batch shape\n # of the self.initial_state_prior object which might in general be\n # smaller), we augment the sample shape to include whatever\n # extra batch dimensions are required.\n initial_level = self.initial_state_prior.sample(\n linear_gaussian_ssm._augment_sample_shape( # pylint: disable=protected-access\n self.initial_state_prior,\n sample_and_batch_shape,\n self.validate_args), seed=strm())\n\n # Sample the latent random walk and observed noise, more efficiently than\n # the generic loop in `LinearGaussianStateSpaceModel`.\n level_jumps = (tf.random.normal(\n prefer_static.concat([sample_and_batch_shape,\n [self.num_timesteps - 1]], axis=0),\n dtype=self.dtype, seed=strm()) * self.level_scale[..., tf.newaxis])\n prior_level_sample = tf.cumsum(tf.concat(\n [initial_level, level_jumps], axis=-1), axis=-1)\n prior_observation_sample = prior_level_sample + ( # Sample noise.\n tf.random.normal(prefer_static.shape(prior_level_sample),\n dtype=self.dtype, seed=strm()) *\n self.observation_noise_scale[..., tf.newaxis])\n\n return (prior_level_sample[..., tf.newaxis],\n prior_observation_sample[..., tf.newaxis])\n\n\nclass LocalLevel(StructuralTimeSeries):\n \"\"\"Formal representation of a local level model.\n\n The local level model posits a `level` evolving via a Gaussian random walk:\n\n ```\n level[t] = level[t-1] + Normal(0., level_scale)\n ```\n\n The latent state is `[level]`. We observe a noisy realization of the current\n level: `f[t] = level[t] + Normal(0., observation_noise_scale)` at each\n timestep.\n \"\"\"\n\n def __init__(self,\n level_scale_prior=None,\n initial_level_prior=None,\n observed_time_series=None,\n name=None):\n \"\"\"Specify a local level model.\n\n Args:\n level_scale_prior: optional `tfd.Distribution` instance specifying a prior\n on the `level_scale` parameter. If `None`, a heuristic default prior is\n constructed based on the provided `observed_time_series`.\n Default value: `None`.\n initial_level_prior: optional `tfd.Distribution` instance specifying a\n prior on the initial level. If `None`, a heuristic default prior is\n constructed based on the provided `observed_time_series`.\n Default value: `None`.\n observed_time_series: optional `float` `Tensor` of shape\n `batch_shape + [T, 1]` (omitting the trailing unit dimension is also\n supported when `T > 1`), specifying an observed time series.\n Any priors not explicitly set will be given default values according to\n the scale of the observed time series (or batch of time series). May\n optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes\n a mask `Tensor` to specify timesteps with missing observations.\n Default value: `None`.\n name: the name of this model component.\n Default value: 'LocalLevel'.\n \"\"\"\n\n with tf1.name_scope(\n name, 'LocalLevel', values=[observed_time_series]) as name:\n\n dtype = dtype_util.common_dtype([level_scale_prior, initial_level_prior])\n\n if level_scale_prior is None or initial_level_prior is None:\n if observed_time_series is not None:\n _, observed_stddev, observed_initial = (\n sts_util.empirical_statistics(observed_time_series))\n else:\n observed_stddev, observed_initial = (tf.convert_to_tensor(\n value=1., dtype=dtype), tf.convert_to_tensor(\n value=0., dtype=dtype))\n\n # Heuristic default priors. Overriding these may dramatically\n # change inference performance and results.\n if level_scale_prior is None:\n level_scale_prior = tfd.LogNormal(\n loc=tf.math.log(.05 * observed_stddev),\n scale=3.,\n name='level_scale_prior')\n if initial_level_prior is None:\n self._initial_state_prior = tfd.MultivariateNormalDiag(\n loc=observed_initial[..., tf.newaxis],\n scale_diag=(\n tf.abs(observed_initial) + observed_stddev)[..., tf.newaxis],\n name='initial_level_prior')\n else:\n self._initial_state_prior = tfd.MultivariateNormalDiag(\n loc=initial_level_prior.mean()[..., tf.newaxis],\n scale_diag=initial_level_prior.stddev()[..., tf.newaxis])\n\n super(LocalLevel, self).__init__(\n parameters=[\n Parameter('level_scale', level_scale_prior,\n tfb.Chain([tfb.AffineScalar(scale=observed_stddev),\n tfb.Softplus()])),\n ],\n latent_size=1,\n name=name)\n\n @property\n def initial_state_prior(self):\n \"\"\"Prior distribution on the initial latent state (level and scale).\"\"\"\n return self._initial_state_prior\n\n def _make_state_space_model(self,\n num_timesteps,\n param_map,\n initial_state_prior=None,\n initial_step=0):\n\n if initial_state_prior is None:\n initial_state_prior = self.initial_state_prior\n\n return LocalLevelStateSpaceModel(\n num_timesteps,\n initial_state_prior=initial_state_prior,\n initial_step=initial_step,\n **param_map)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Softfloor bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import math as tfp_math\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensor_util\n\n\n__all__ = [\n 'Softfloor',\n]\n\n\nclass Softfloor(bijector.Bijector):\n \"\"\"Compute a differentiable approximation to `tf.math.floor`.\n\n Given `x`, compute a differentiable approximation to `tf.math.floor(x)`.\n It is parameterized by a temperature parameter `t` to control the closeness\n of the approximation at the cost of numerical stability of the inverse.\n\n This `Bijector` has the following properties:\n * This `Bijector` is a map between `R` to `R`.\n * For `t` close to `0`, this bijector mimics the identity function.\n * For `t` approaching `infinity`, this bijector converges pointwise\n to `tf.math.floor` (except at integer points).\n\n Note that for lower temperatures `t`, this bijector becomes more numerically\n unstable. In particular, the inverse for this bijector is not numerically\n stable at lower temperatures, because flooring is not a bijective function (\n and hence any pointwise limit towards the floor function will start to have a\n non-numerically stable inverse).\n\n #### Mathematical details\n\n Let `x` be in `[0.5, 1.5]`. We would like to simulate the floor function on\n this interval. We will do this via a shifted and rescaled `sigmoid`.\n\n `floor(x) = 0` for `x < 1` and `floor(x) = 1` for `x >= 1`.\n If we take `f(x) = sigmoid((x - 1.) / t)`, where `t > 0`, we can see that\n when `t` goes to zero, we get that when `x > 1`, the `f(x)` tends towards `1`\n while `f(x)` tends to `0` when `x < 1`, thus giving us a function that looks\n like the floor function. If we shift `f(x)` by `-sigmoid(-0.5 / t)` and\n rescale by `1 / (sigmoid(0.5 / t) - sigmoid(-0.5 / t))`, we preserve the\n pointwise limit, but also fix `f(0.5) = 0.` and `f(1.5) = 1.`.\n\n Thus we can define `softfloor(x, t) = a * sigmoid((x - 1.) / t) + b`\n\n where\n * `a = 1 / (sigmoid(0.5 / t) - sigmoid(-0.5 / t))`\n * `b = -sigmoid(-0.5 / t) / (sigmoid(0.5 / t) - sigmoid(-0.5 / t))`\n\n\n The implementation of the `Softfloor` bijector follows this, with the caveat\n that we extend the function to all of the real line, by appropriately shifting\n this function for each integer.\n\n #### Examples\n\n Example use:\n\n ```python\n # High temperature.\n soft_floor = Softfloor(temperature=100.)\n x = [2.1, 3.2, 5.5]\n soft_floor.forward(x)\n\n # Low temperature. This acts like a floor.\n soft_floor = Softfloor(temperature=0.01)\n soft_floor.forward(x) # Should be close to [2., 3., 5.]\n\n # Ceiling is just a shifted floor at non-integer points.\n soft_ceiling = tfb.Chain(\n [tfb.AffineScalar(1.),\n tfb.Softfloor(temperature=1.)])\n soft_ceiling.forward(x) # Should be close to [3., 5., 6.]\n ```\n \"\"\"\n\n def __init__(self,\n temperature,\n validate_args=False,\n name='softfloor'):\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype(\n [temperature], dtype_hint=tf.float32)\n self._temperature = tensor_util.convert_nonref_to_tensor(\n temperature, name='temperature', dtype=dtype)\n super(Softfloor, self).__init__(\n forward_min_event_ndims=0,\n validate_args=validate_args,\n dtype=dtype,\n name=name)\n\n @classmethod\n def _is_increasing(cls):\n return True\n\n def _forward(self, x):\n # This has a well defined derivative with respect to x.\n # This is because in the range [0.5, 1.5] this is just a rescaled\n # logit function and hence has a derivative. At the end points, because\n # the logit function satisfies 1 - sigma(-x) = sigma(x), we have that\n # the derivative is symmetric around the center of the interval=1.,\n # and hence is continuous at the endpoints.\n t = tf.convert_to_tensor(self.temperature)\n fractional_part = x - tf.math.floor(x)\n # First, because our function is defined on the interval [0.5, 1.5]\n # repeated, we need to rescale our input to reflect that. x - floor(x)\n # will map our input to [0, 1]. However, we need to map inputs whose\n # fractional part is < 0.5 to the right hand portion of the interval.\n # We'll also need to adjust the integer part to reflect this.\n integer_part = tf.math.floor(x)\n integer_part = tf.where(\n fractional_part < 0.5, integer_part - 1., integer_part)\n fractional_part = tf.where(\n fractional_part < 0.5, fractional_part + 0.5, fractional_part - 0.5)\n\n # Rescale so the left tail is 0., and the right tail is 1. This\n # will also guarantee us continuity. Differentiability comes from the\n # fact that the derivative of the sigmoid is symmetric, and hence\n # the two endpoints will have the same value for derivatives.\n # The below calculations are just\n # (sigmoid((f - 0.5) / t) - sigmoid(-0.5 / t)) /\n # (sigmoid(0.5 / t) - sigmoid(0.5 / t))\n # We use log_sum_exp and log_sub_exp to make this calculation more\n # numerically stable.\n\n log_numerator = tfp_math.log_sub_exp(\n (0.5 + fractional_part) / t, 0.5 / t)\n # If fractional_part == 0, then we'll get log(0).\n log_numerator = tf.where(\n tf.equal(fractional_part, 0.),\n dtype_util.as_numpy_dtype(self.dtype)(-np.inf), log_numerator)\n log_denominator = tfp_math.log_sub_exp(\n (0.5 + fractional_part) / t, fractional_part / t)\n # If fractional_part == 0, then we'll get log(0).\n log_denominator = tf.where(\n tf.equal(fractional_part, 0.),\n dtype_util.as_numpy_dtype(self.dtype)(-np.inf), log_denominator)\n log_denominator = tfp_math.log_add_exp(\n log_denominator, tfp_math.log_sub_exp(1. / t, 0.5 / t))\n rescaled_part = tf.math.exp(log_numerator - log_denominator)\n return integer_part + rescaled_part\n\n def _inverse(self, y):\n # We undo the transformation from [0, 1] -> [0, 1].\n # The inverse of the transformation will look like a shifted and scaled\n # logit function. We rewrite this to be more numericaly stable, and will\n # produce a term log(a / b). log_{numerator, denominator} below is log(a)\n # and log(b) respectively.\n t = tf.convert_to_tensor(self.temperature)\n fractional_part = y - tf.math.floor(y)\n log_f = tf.math.log(fractional_part)\n log_numerator = tfp_math.log_sub_exp(0.5 / t + log_f, log_f)\n log_numerator = tfp_math.log_add_exp(0., log_numerator)\n # When the fractional part is zero, the numerator is 1.\n log_numerator = tf.where(\n tf.equal(fractional_part, 0.),\n dtype_util.as_numpy_dtype(self.dtype)(0.), log_numerator)\n log_denominator = tfp_math.log_sub_exp(0.5 / t, log_f + 0.5 / t)\n log_denominator = tfp_math.log_add_exp(log_f, log_denominator)\n # When the fractional part is zero, the denominator is 0.5 / t.\n log_denominator = tf.where(\n tf.equal(fractional_part, 0.), 0.5 / t, log_denominator)\n\n new_fractional_part = t * (log_numerator - log_denominator) + 0.5\n # We finally shift this up since the original transformation was from\n # [0.5, 1.5] to [0, 1].\n new_fractional_part = new_fractional_part + 0.5\n return tf.math.floor(y) + new_fractional_part\n\n def _forward_log_det_jacobian(self, x):\n t = tf.convert_to_tensor(self.temperature)\n fractional_part = x - tf.math.floor(x)\n # Because our function is from [0.5, 1.5], we need to transform our\n # fractional_part to that domain like in the forward transformation.\n fractional_part = tf.where(\n fractional_part < 0.5, fractional_part + 0.5, fractional_part - 0.5)\n inner_part = (fractional_part - 0.5) / t\n\n offset = (tf.math.log(t) - tf.math.softplus(0.5 / t) +\n tfp_math.softplus_inverse(0.5 / t))\n\n return (-tf.math.softplus(-inner_part) -\n tf.math.softplus(inner_part) -\n offset)\n\n @property\n def temperature(self):\n return self._temperature\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n if (self.temperature is not None and\n is_init != tensor_util.is_ref(self.temperature)):\n assertions.append(assert_util.assert_positive(\n self._temperature,\n message='Argument `temperature` was not positive.'))\n return assertions\n"
] | [
[
"tensorflow.compat.v2.math.tanh",
"tensorflow.compat.v2.Variable",
"numpy.log",
"tensorflow.compat.v2.test.main",
"scipy.stats.genpareto.entropy",
"tensorflow.compat.v2.constant",
"numpy.linspace",
"numpy.abs",
"tensorflow.compat.v2.GradientTape",
"scipy.stats.genpareto",
"scipy.stats.genpareto.mean",
"numpy.float32",
"numpy.exp",
"tensorflow.compat.v2.TensorShape",
"scipy.stats.genpareto.var",
"numpy.array"
],
[
"tensorflow.compat.v2.math.tanh",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.abs",
"tensorflow.compat.v2.linalg.matrix_transpose",
"tensorflow.compat.v2.linalg.eye",
"tensorflow.compat.v2.linalg.norm",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.linalg.band_part",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.nn.softplus"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.nn.bias_add",
"tensorflow.compat.v2.keras.backend.floatx",
"tensorflow.compat.v2.keras.activations.get",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.keras.initializers.serialize",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.keras.activations.serialize",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.compat.dimension_value",
"tensorflow.compat.v2.keras.layers.InputSpec"
],
[
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.abs",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.compat.v2.math.floor",
"tensorflow.compat.v2.math.exp",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.softplus",
"tensorflow.compat.v2.math.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aaronga19/MachineLearning-A-Z | [
"e8e27f0a31ac3e3c05d4029e6e5e14ac8a911153"
] | [
"Apuntes/Preprocess/Missing Data.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 13 20:30:46 2020\n\n@author: Aaronga\n\"\"\"\n\n# Datos faltantes\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv(\"Data.csv\")\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n# Tratamiento de los NaN \nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values=\"NaN\", strategy=\"mean\", axis = 0)\nimputer = imputer.fit(X[:, 1:3])\nX[:, 1:3]= imputer.transform(X[:,1:3])\nprint(X)"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.Imputer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
anonyma2020/dagnn | [
"3191b2cc4f923d523ece3962c96a0e3dd54f1a0b",
"3191b2cc4f923d523ece3962c96a0e3dd54f1a0b"
] | [
"ogbg-code/model/asap.py",
"ogb/io/read_graph_pyg.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_geometric.nn import (ASAPooling,\n GraphConv, global_mean_pool,\n JumpingKnowledge)\n\n\nclass ASAP(torch.nn.Module):\n def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0):\n super(ASAP, self).__init__()\n\n self.num_class = num_class\n self.max_seq_len = max_seq_len\n self.node_encoder = node_encoder\n\n self.conv1 = GraphConv(emb_dim, hidden, aggr='mean')\n self.convs = torch.nn.ModuleList()\n self.pools = torch.nn.ModuleList()\n self.convs.extend([\n GraphConv(hidden, hidden, aggr='mean')\n for i in range(num_layers - 1)\n ])\n self.pools.extend([\n ASAPooling(hidden, ratio, dropout=dropout)\n for i in range((num_layers) // 2)\n ])\n self.jump = JumpingKnowledge(mode='cat')\n self.lin1 = Linear(num_layers * hidden, hidden)\n # self.lin2 = Linear(hidden, dataset.num_classes)\n\n if self.num_class > 0: # classification\n self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class)\n else:\n self.graph_pred_linear_list = torch.nn.ModuleList()\n for i in range(max_seq_len):\n self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab))\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n for pool in self.pools:\n pool.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, node_depth, batch = data.x, data.edge_index, data.node_depth, data.batch\n\n x = self.node_encoder(x, node_depth.view(-1, ))\n\n edge_weight = None\n x = F.relu(self.conv1(x, edge_index))\n xs = [global_mean_pool(x, batch)]\n for i, conv in enumerate(self.convs):\n x = conv(x=x, edge_index=edge_index, edge_weight=edge_weight)\n x = F.relu(x)\n xs += [global_mean_pool(x, batch)]\n if i % 2 == 0 and i < len(self.convs) - 1:\n pool = self.pools[i // 2]\n x, edge_index, edge_weight, batch, _ = pool(\n x=x, edge_index=edge_index, edge_weight=edge_weight,\n batch=batch)\n x = self.jump(xs)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n # x = self.lin2(x)\n # return F.log_softmax(x, dim=-1)\n\n if self.num_class > 0:\n return self.graph_pred_linear(x)\n\n pred_list = []\n for i in range(self.max_seq_len):\n pred_list.append(self.graph_pred_linear_list[i](x))\n return pred_list\n\n def __repr__(self):\n return self.__class__.__name__\n\n",
"import pandas as pd\nimport torch\nfrom torch_geometric.data import Data\nimport os.path as osp\nimport numpy as np\nfrom ogb.io.read_graph_raw import read_csv_graph_raw, read_csv_heterograph_raw, read_binary_graph_raw, read_binary_heterograph_raw\nfrom tqdm import tqdm\n\nfrom src.utils_dag import add_order_info_01 # VT\n\n\ndef read_graph_pyg(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = [], binary = False):\n\n if binary:\n # npz\n graph_list = read_binary_graph_raw(raw_dir, add_inverse_edge)\n else:\n # csv\n graph_list = read_csv_graph_raw(raw_dir, add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)\n \n pyg_graph_list = []\n\n print('Converting graphs into PyG objects...')\n\n for graph in tqdm(graph_list):\n g = Data()\n g.__num_nodes__ = graph['num_nodes']\n g.edge_index = torch.from_numpy(graph['edge_index'])\n\n del graph['num_nodes']\n del graph['edge_index']\n\n if graph['edge_feat'] is not None:\n g.edge_attr = torch.from_numpy(graph['edge_feat'])\n del graph['edge_feat']\n\n if graph['node_feat'] is not None:\n g.x = torch.from_numpy(graph['node_feat'])\n del graph['node_feat']\n\n for key in additional_node_files:\n g[key] = torch.from_numpy(graph[key])\n del graph[key]\n\n for key in additional_edge_files:\n g[key] = torch.from_numpy(graph[key])\n del graph[key]\n\n pyg_graph_list.append(g)\n\n add_order_info_01(g) # DAGNN\n # length of longest path\n # layer ids start with 0 so max, gives actual path length and -1 is not necessary\n g.len_longest_path = float(torch.max(g._bi_layer_idx0).item())\n\n return pyg_graph_list\n\n\ndef read_heterograph_pyg(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = [], binary = False):\n\n if binary:\n # npz\n graph_list = read_binary_heterograph_raw(raw_dir, add_inverse_edge)\n else:\n # csv\n graph_list = read_csv_heterograph_raw(raw_dir, add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)\n\n pyg_graph_list = []\n\n print('Converting graphs into PyG objects...')\n\n for graph in tqdm(graph_list):\n g = Data()\n \n g.__num_nodes__ = graph['num_nodes_dict']\n g.num_nodes_dict = graph['num_nodes_dict']\n\n # add edge connectivity\n g.edge_index_dict = {}\n for triplet, edge_index in graph['edge_index_dict'].items():\n g.edge_index_dict[triplet] = torch.from_numpy(edge_index)\n\n del graph['edge_index_dict']\n\n if graph['edge_feat_dict'] is not None:\n g.edge_attr_dict = {}\n for triplet in graph['edge_feat_dict'].keys():\n g.edge_attr_dict[triplet] = torch.from_numpy(graph['edge_feat_dict'][triplet])\n\n del graph['edge_feat_dict']\n\n if graph['node_feat_dict'] is not None:\n g.x_dict = {}\n for nodetype in graph['node_feat_dict'].keys():\n g.x_dict[nodetype] = torch.from_numpy(graph['node_feat_dict'][nodetype])\n\n del graph['node_feat_dict']\n\n for key in additional_node_files:\n g[key] = {}\n for nodetype in graph[key].keys():\n g[key][nodetype] = torch.from_numpy(graph[key][nodetype])\n\n del graph[key]\n\n for key in additional_edge_files:\n g[key] = {}\n for triplet in graph[key].keys():\n g[key][triplet] = torch.from_numpy(graph[key][triplet])\n\n del graph[key]\n\n pyg_graph_list.append(g)\n\n return pyg_graph_list\n\nif __name__ == '__main__':\n pass"
] | [
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.functional.relu",
"torch.nn.functional.dropout"
],
[
"torch.max",
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xgmiao/AutoDL-Projects | [
"0dbbc286c9f56136291590136fffd513af881c36"
] | [
"exps/LFNA/basic-same.py"
] | [
"#####################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 #\n#####################################################\n# python exps/LFNA/basic-same.py --srange 1-999 --env_version v1 --hidden_dim 16\n# python exps/LFNA/basic-same.py --srange 1-999 --env_version v2 --hidden_dim\n#####################################################\nimport sys, time, copy, torch, random, argparse\nfrom tqdm import tqdm\nfrom copy import deepcopy\nfrom pathlib import Path\n\nlib_dir = (Path(__file__).parent / \"..\" / \"..\" / \"lib\").resolve()\nif str(lib_dir) not in sys.path:\n sys.path.insert(0, str(lib_dir))\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint\nfrom log_utils import time_string\nfrom log_utils import AverageMeter, convert_secs2time\n\nfrom utils import split_str2indexes\n\nfrom procedures.advanced_main import basic_train_fn, basic_eval_fn\nfrom procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric\nfrom datasets.synthetic_core import get_synthetic_env\nfrom models.xcore import get_model\n\nfrom lfna_utils import lfna_setup\n\n\ndef subsample(historical_x, historical_y, maxn=10000):\n total = historical_x.size(0)\n if total <= maxn:\n return historical_x, historical_y\n else:\n indexes = torch.randint(low=0, high=total, size=[maxn])\n return historical_x[indexes], historical_y[indexes]\n\n\ndef main(args):\n logger, env_info, model_kwargs = lfna_setup(args)\n\n # check indexes to be evaluated\n to_evaluate_indexes = split_str2indexes(args.srange, env_info[\"total\"], None)\n logger.log(\n \"Evaluate {:}, which has {:} timestamps in total.\".format(\n args.srange, len(to_evaluate_indexes)\n )\n )\n\n w_container_per_epoch = dict()\n\n per_timestamp_time, start_time = AverageMeter(), time.time()\n for i, idx in enumerate(to_evaluate_indexes):\n\n need_time = \"Time Left: {:}\".format(\n convert_secs2time(\n per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True\n )\n )\n logger.log(\n \"[{:}]\".format(time_string())\n + \" [{:04d}/{:04d}][{:04d}]\".format(i, len(to_evaluate_indexes), idx)\n + \" \"\n + need_time\n )\n # train the same data\n historical_x = env_info[\"{:}-x\".format(idx)]\n historical_y = env_info[\"{:}-y\".format(idx)]\n # build model\n model = get_model(dict(model_type=\"simple_mlp\"), **model_kwargs)\n # build optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True)\n criterion = torch.nn.MSELoss()\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer,\n milestones=[\n int(args.epochs * 0.25),\n int(args.epochs * 0.5),\n int(args.epochs * 0.75),\n ],\n gamma=0.3,\n )\n train_metric = MSEMetric()\n best_loss, best_param = None, None\n for _iepoch in range(args.epochs):\n preds = model(historical_x)\n optimizer.zero_grad()\n loss = criterion(preds, historical_y)\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n # save best\n if best_loss is None or best_loss > loss.item():\n best_loss = loss.item()\n best_param = copy.deepcopy(model.state_dict())\n model.load_state_dict(best_param)\n with torch.no_grad():\n train_metric(preds, historical_y)\n train_results = train_metric.get_info()\n\n metric = ComposeMetric(MSEMetric(), SaveMetric())\n eval_dataset = torch.utils.data.TensorDataset(\n env_info[\"{:}-x\".format(idx)], env_info[\"{:}-y\".format(idx)]\n )\n eval_loader = torch.utils.data.DataLoader(\n eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0\n )\n results = basic_eval_fn(eval_loader, model, metric, logger)\n log_str = (\n \"[{:}]\".format(time_string())\n + \" [{:04d}/{:04d}]\".format(idx, env_info[\"total\"])\n + \" train-mse: {:.5f}, eval-mse: {:.5f}\".format(\n train_results[\"mse\"], results[\"mse\"]\n )\n )\n logger.log(log_str)\n\n save_path = logger.path(None) / \"{:04d}-{:04d}.pth\".format(\n idx, env_info[\"total\"]\n )\n w_container_per_epoch[idx] = model.get_w_container().no_grad_clone()\n save_checkpoint(\n {\n \"model_state_dict\": model.state_dict(),\n \"model\": model,\n \"index\": idx,\n \"timestamp\": env_info[\"{:}-timestamp\".format(idx)],\n },\n save_path,\n logger,\n )\n logger.log(\"\")\n per_timestamp_time.update(time.time() - start_time)\n start_time = time.time()\n\n save_checkpoint(\n {\"w_container_per_epoch\": w_container_per_epoch},\n logger.path(None) / \"final-ckp.pth\",\n logger,\n )\n\n logger.log(\"-\" * 200 + \"\\n\")\n logger.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Use the data in the past.\")\n parser.add_argument(\n \"--save_dir\",\n type=str,\n default=\"./outputs/lfna-synthetic/use-same-timestamp\",\n help=\"The checkpoint directory.\",\n )\n parser.add_argument(\n \"--env_version\",\n type=str,\n required=True,\n help=\"The synthetic enviornment version.\",\n )\n parser.add_argument(\n \"--hidden_dim\",\n type=int,\n required=True,\n help=\"The hidden dimension.\",\n )\n parser.add_argument(\n \"--init_lr\",\n type=float,\n default=0.1,\n help=\"The initial learning rate for the optimizer (default is Adam)\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=512,\n help=\"The batch size\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=1000,\n help=\"The total number of epochs.\",\n )\n parser.add_argument(\n \"--srange\", type=str, required=True, help=\"The range of models to be evaluated\"\n )\n parser.add_argument(\n \"--workers\",\n type=int,\n default=4,\n help=\"The number of data loading workers (default: 4)\",\n )\n # Random Seed\n parser.add_argument(\"--rand_seed\", type=int, default=-1, help=\"manual seed\")\n args = parser.parse_args()\n if args.rand_seed is None or args.rand_seed < 0:\n args.rand_seed = random.randint(1, 100000)\n assert args.save_dir is not None, \"The save dir argument can not be None\"\n args.save_dir = \"{:}-{:}-d{:}\".format(\n args.save_dir, args.env_version, args.hidden_dim\n )\n main(args)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.randint",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brenov/ip-usp | [
"06f9f16229a4587e38a3ae89fbe3394d5f1572fd",
"06f9f16229a4587e38a3ae89fbe3394d5f1572fd"
] | [
"05-Image-Descriptors/solution.py",
"other-exercises/fourier/fft_manual.py"
] | [
"# Name: Breno Maurício de Freitas Viana\n# NUSP: 11920060\n# Course Code: SCC5830\n# Year/Semester: 2021/1\n# Assignment 5: Image Descriptors\n\n\nimport math\nimport numpy as np\nimport imageio\nfrom scipy import ndimage\n\n\nnp.seterr(divide='ignore', invalid='ignore')\n\nLEVELS = 256\n\n# ----- (1) Read Parameters\n\n# Get the location of the object image `f`\nf = input().rstrip()\n# Get the location of the large image `g`\ng = input().rstrip()\n# Get the quantisation parameter `b`\nb = int(input())\n\n\n# --- Load images\n\n# Object image `f`\nf = imageio.imread(f)\n# Large image `g`\ng = imageio.imread(g)\n\n\n\n# ----- (2) Preprocessing and Quantisation\n\ndef luminance(img):\n \"\"\"\n Get a RGB image as input and return a black&white image.\n \"\"\"\n N, M, _ = img.shape\n out = np.empty(img.shape)\n out = 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]\n return out.astype(np.uint8)\n\n\n# --- Convert the images to black&white\nf = luminance(f)\ng = luminance(g)\n\n\n# --- Quantise the images to `b` bits\nB = 8 - b\nf = f >> B\ng = g >> B\n\n\n# ----- (3) Image Descriptors\n\ndef nh_descriptor(f):\n \"\"\"\n Return the normalized histogram descriptor.\n \"\"\"\n hist, _ = np.histogram(f, bins=[i for i in range(2 ** b + 1)])\n hist = hist / hist.sum()\n dc = hist / np.linalg.norm(hist)\n return dc\n\ndef ht_descriptor(f):\n \"\"\"\n Return the Haralick texture descriptors (intensity-level co-ocurrence matrix).\n \"\"\"\n # Calculate the co-occurence matrix\n N, M = f.shape\n C = np.zeros((LEVELS, LEVELS))\n for x in range(N - 1):\n for y in range(M - 1):\n i = f[x, y]\n j = f[x + 1, y + 1]\n C[i][j] += 1\n C = C / C.sum()\n #\n # Computing the descriptors\n N, M = C.shape\n #\n energy = np.power(C, 2).sum()\n #\n epsilon = 0.001\n entropy = - (C * np.log(C + epsilon)).sum()\n #\n A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int)\n contrast = (1 / math.pow(N, 2)) * (C * A).sum()\n #\n mu_i, si_i = 0, 0\n mu_j, si_j = 0, 0\n for k in range(N):\n a1 = C[k,:].sum()\n mu_i += k * a1\n si_i += math.pow(k - mu_i, 2) * a1\n #\n a2 = C[:,k].sum()\n mu_j += k * a2\n si_j += math.pow(k - mu_j, 2) * a2\n #\n A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int)\n correlation = (A * C).sum() - mu_i * mu_j\n correlation /= (si_i * si_j)\n #\n homogeneity = 0\n #\n A = np.fromfunction(lambda i, j: (1 + abs(i - j)), (N, M), dtype=int)\n homogeneity = (C * A).sum()\n #\n # Return the Haralick texture descriptors\n dt = np.array([energy, entropy, contrast, correlation, homogeneity])\n dt = dt / np.linalg.norm(dt)\n return dt\n\ndef hg_descriptor(f):\n \"\"\"\n Return the histogram of oriented gradients descriptor.\n \"\"\"\n wsx = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n wsy = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n #\n f = f.astype(np.float64)\n fx = ndimage.convolve(f, wsx)\n fy = ndimage.convolve(f, wsy)\n #\n N, M = f.shape\n #\n div = np.sqrt(np.power(fx, 2) + np.power(fy, 2)).sum()\n Mg = np.sqrt(np.power(fx, 2) + np.power(fy, 2)) / div\n #\n sigma = np.zeros(f.shape)\n sigma = np.arctan(fy / fx) + np.pi / 2\n sigma = np.degrees(sigma)\n sigma = np.digitize(sigma, np.arange(0, 180, 20))\n sigma = sigma.astype(np.uint8)\n #\n dg = np.zeros(9)\n for x in range(N):\n for y in range(M):\n dg[sigma[x][y] - 1] += Mg[x][y]\n #\n dg = dg / np.linalg.norm(dg)\n return dg\n\n\n# --- Compute the image descriptors\n\n# Calculate the object image descriptors\ndc = nh_descriptor(f)\ndt = ht_descriptor(f)\ndg = hg_descriptor(f)\n\nd = np.concatenate((dc, dt, dg))\n\n\n\n# ----- (4) Finding Our Object\n\ndef distance(d, di):\n \"\"\"\n Calculate the distance of two descriptors.\n \"\"\"\n return math.sqrt(np.power(d - di, 2).sum())\n\n\n# --- Search for the object image location in the original image\n\nsize = f.shape[0]\nstep = size // 2\nN, M = g.shape\nN = N // step\nM = M // step\n\ndist = np.iinfo(np.uint8).max\n\npos_x = None\npos_y = None\n\nfor i in range(N - 1):\n for j in range(M - 1):\n # Calculate the window\n window = g[i*step:i*step+size, j*step:j*step+size]\n # Calculate the descriptors of the window\n window_dc = nh_descriptor(window)\n window_dt = ht_descriptor(window)\n window_dg = hg_descriptor(window)\n window_d = np.concatenate((window_dc, window_dt, window_dg))\n # Calculate the distance between the window and the object image\n ndist = distance(d, window_d)\n if dist > ndist:\n dist = ndist\n pos_x, pos_y = i, j\n\n\n# --- Print the found location\n\nprint(pos_x, pos_y)\n",
"import numpy as np\n\nN = 4\nf = [0, 100, 200, 300]\nprint('f =', f)\nprint()\n\nf_even = f[0::2]\nprint('f_even =', f_even)\n\nf_even_even = f_even[0::2]\nf_even_odd = f_even[1::2]\nprint('f_even_even =', f_even_even)\nprint('f_even_odd =', f_even_odd)\n\nreseven0 = f_even_even[0] + np.exp(-2j * np.pi * 0 / N) * f_even_odd[0]\nreseven1 = f_even_even[0] - np.exp(-2j * np.pi * 0 / N) * f_even_odd[0]\nreseven = [reseven0, reseven1]\nprint('reseven =', reseven)\nprint()\n\nf_odd = f[1::2]\nprint('f_odd =', f_odd)\n\nf_odd_even = f_odd[0::2]\nf_odd_odd = f_odd[1::2]\nprint('f_odd_even =', f_odd_even)\nprint('f_odd_odd =', f_odd_odd)\n\nresodd0 = f_odd_even[0] + np.exp(-2j * np.pi * 0 / N) * f_odd_odd[0]\nresodd1 = f_odd_even[0] - np.exp(-2j * np.pi * 0 / N) * f_odd_odd[0]\nresodd = [resodd0, resodd1]\nprint('resodd =', resodd)\n\nres0 = reseven[0] + np.exp(-2j * np.pi * 0 / N) * resodd[0]\nres2 = reseven[0] - np.exp(-2j * np.pi * 0 / N) * resodd[0]\n\nres1 = reseven[1] + np.exp(-2j * np.pi * 1 / N) * resodd[1]\nres3 = reseven[1] - np.exp(-2j * np.pi * 1 / N) * resodd[1]\n\nf_manual = np.array([res0, res1, res2, res3]).astype(np.complex64)\nprint(f_manual)\n"
] | [
[
"numpy.log",
"numpy.arctan",
"numpy.power",
"numpy.arange",
"numpy.degrees",
"scipy.ndimage.convolve",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.seterr",
"numpy.fromfunction",
"numpy.iinfo",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.array",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CityU-AIM-Group/SFPolypDA | [
"3902577cf9549a65be7ba89e2c11a7115158b531"
] | [
"fcos_core/solver/build.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport logging\nfrom .lr_scheduler import WarmupMultiStepLR\n\n\ndef make_optimizer(cfg, model):\n logger = logging.getLogger(\"fcos_core.trainer\")\n params = []\n for key, value in model.named_parameters():\n if not value.requires_grad:\n continue\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if \"bias\" in key:\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n if key.endswith(\".offset.weight\") or key.endswith(\".offset.bias\"):\n logger.info(\"set lr factor of {} as {}\".format(\n key, cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR\n ))\n lr *= cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)\n if cfg.SOLVER.ADAM:\n optimizer = torch.optim.Adam(params)\n return optimizer\n\n\ndef make_lr_scheduler(cfg, optimizer):\n return WarmupMultiStepLR(\n optimizer,\n cfg.SOLVER.STEPS,\n cfg.SOLVER.GAMMA,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n"
] | [
[
"torch.optim.Adam",
"torch.optim.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stevezheng23/fewshot_nlp_pt | [
"aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2",
"aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2"
] | [
"src/transformersX/models/cutoffbert/modeling_cutoffbert.py",
"src/transformersX/models/promptbert/modeling_promptbert.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch CUTOFFBERT model. \"\"\"\n\n\nimport math\nimport os\nimport warnings\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss\nfrom torch.distributions.beta import Beta\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n DualPassageEncoderModelOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_cutoffbert import CutoffBertConfig\nfrom ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings\nfrom ..bert.modeling_bert import BertEncoder as CutoffBertEncoder\nfrom ..bert.modeling_bert import BertPooler as CutoffBertPooler\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"bert-base-uncased\"\n_CONFIG_FOR_DOC = \"CutoffBertConfig\"\n_TOKENIZER_FOR_DOC = \"CutoffBertTokenizer\"\n\nCUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\n\n\ndef load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass CutoffBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = CutoffBertConfig\n load_tf_weights = load_tf_weights_in_cutoffbert\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nCUTOFFBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nCUTOFFBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.\",\n CUTOFFBERT_START_DOCSTRING,\n)\nclass CutoffBertModel(CutoffBertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = CutoffBertEmbeddings(config)\n self.encoder = CutoffBertEncoder(config)\n\n self.pooler = CutoffBertPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled\n output) + Cut-off data augmentation support.\n \"\"\",\n CUTOFFBERT_START_DOCSTRING,\n)\nclass CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.cls_token_id = config.cls_token_id\n self.sep_token_id = config.sep_token_id\n self.mask_token_id = config.mask_token_id\n self.masking_prob = config.cutoff_masking_prob\n self.temperature = config.cutoff_temperature\n self.mask_loss_wgt = config.cutoff_mask_loss_wgt\n self.js_loss_wgt = config.cutoff_js_loss_wgt\n self.config = config\n\n self.bert = CutoffBertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n \n def _apply_cutoff(self, inputs):\n masked_inputs = inputs.clone()\n valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)\n random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()\n masking_indices = random_masking_indices & valid_masking_indices\n masked_inputs[masking_indices] = self.mask_token_id\n return masked_inputs\n\n @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is None: \n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = self.dropout(outputs[1])\n logits = self.classifier(pooled_output)\n\n if not return_dict:\n return (logits,) + outputs[2:]\n\n return SequenceClassifierOutput(\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n b, l = input_ids.size()\n masked_input_ids = self._apply_cutoff(input_ids.clone())\n flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)\n flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None\n flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None\n flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None\n flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None\n\n flatten_outputs = self.bert(\n flatten_input_ids,\n attention_mask=flatten_attention_mask,\n token_type_ids=flatten_token_type_ids,\n position_ids=flatten_position_ids,\n head_mask=head_mask,\n inputs_embeds=flatten_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n flatten_pooled_output = self.dropout(flatten_outputs[1])\n flatten_logits = self.classifier(flatten_pooled_output)\n\n logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)\n logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()\n\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:\n mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))\n loss += mask_loss * self.mask_loss_wgt\n\n if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:\n kl_loss_fct = KLDivLoss(reduction=\"batchmean\")\n src_logits, trg_logits = logits, masked_logits\n mean_logits = (src_logits + trg_logits) * 0.5\n src_loss = kl_loss_fct(\n F.log_softmax(src_logits / self.temperature, dim=-1),\n F.softmax(mean_logits / self.temperature, dim=-1)\n ) * (self.temperature ** 2)\n trg_loss = kl_loss_fct(\n F.log_softmax(trg_logits / self.temperature, dim=-1),\n F.softmax(mean_logits / self.temperature, dim=-1)\n ) * (self.temperature ** 2)\n js_loss = (src_loss + trg_loss) * 0.5\n loss += js_loss * self.js_loss_wgt\n\n if not return_dict:\n return (loss, logits)\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n )\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch PROMPTBERT model. \"\"\"\n\n\nimport math\nimport os\nimport warnings\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss\nfrom torch.distributions.beta import Beta\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n DualPassageEncoderModelOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_promptbert import PromptBertConfig\nfrom ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings\nfrom ..bert.modeling_bert import BertEncoder as PromptBertEncoder\nfrom ..bert.modeling_bert import BertPooler as PromptBertPooler\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"bert-base-uncased\"\n_CONFIG_FOR_DOC = \"PromptBertConfig\"\n_TOKENIZER_FOR_DOC = \"PromptBertTokenizer\"\n\nPROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\n\n\ndef load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass PromptBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = PromptBertConfig\n load_tf_weights = load_tf_weights_in_promptbert\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nPROMPTBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nPROMPTBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.\",\n PROMPTBERT_START_DOCSTRING,\n)\nclass PromptBertModel(PromptBertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = PromptBertEmbeddings(config)\n self.encoder = PromptBertEncoder(config)\n\n self.pooler = PromptBertPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).\n \"\"\",\n PROMPTBERT_START_DOCSTRING,\n)\nclass PromptBertForSequenceClassification(PromptBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.bert = PromptBertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output\n for computing source-target similarity).\n \"\"\",\n PROMPTBERT_START_DOCSTRING,\n)\nclass PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):\n def __init__(self, config, cls_loss_wgt=None):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.cls_loss_wgt = cls_loss_wgt\n\n self.bert = PromptBertModel(config)\n self.pooler = PromptBertPooler(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format(\"batch_size, 2, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=DualPassageEncoderModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is None or len(input_ids.size()) < 3:\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = self.pooler(outputs[0])\n pooled_output = self.dropout(pooled_output)\n\n if not return_dict:\n return (pooled_output,) + outputs[2:]\n\n return DualPassageEncoderModelOutput(\n pooled_output=pooled_output,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n b, _, l = input_ids.size()\n flatten_input_ids = input_ids.reshape(-1, l)\n flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None\n flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None\n flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None\n flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None\n\n flatten_outputs = self.bert(\n flatten_input_ids,\n attention_mask=flatten_attention_mask,\n token_type_ids=flatten_token_type_ids,\n position_ids=flatten_position_ids,\n head_mask=head_mask,\n inputs_embeds=flatten_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n flatten_pooled_output = self.pooler(flatten_outputs[0])\n src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)\n src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()\n\n mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()\n cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))\n cl_labels = torch.arange(b).to(labels.device)\n \n loss_fct = CrossEntropyLoss()\n cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))\n\n if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:\n flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))\n src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)\n src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()\n src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))\n trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))\n cls_loss = src_loss + trg_loss\n cls_logits = src_logits + trg_logits\n loss = cl_loss + cls_loss * self.cls_loss_wgt\n logits = cls_logits\n else:\n loss = cl_loss\n logits = cl_logits\n\n if not return_dict:\n return (loss, logits,)\n\n return DualPassageEncoderModelOutput(\n loss=loss,\n logits=logits,\n )\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.KLDivLoss",
"torch.full",
"torch.zeros",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.from_numpy",
"numpy.transpose",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"tensorflow.train.list_variables",
"torch.stack"
],
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.einsum",
"torch.eye",
"torch.from_numpy",
"numpy.transpose",
"torch.arange",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"tensorflow.train.list_variables",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
grassofsky/modules | [
"fe51de837fed6887228f2d3f8a455d5f4602d786",
"fe51de837fed6887228f2d3f8a455d5f4602d786"
] | [
"misc/pythontools/processors/VolumeExtractChannel.py",
"misc/vasp/python/processors/chgcarsource.py"
] | [
"# Name: VolumeExtractChannel \n\nimport inviwopy as ivw\nimport numpy as np\n\nclass VolumeExtractChannel(ivw.Processor):\n def __init__(self, id, name):\n ivw.Processor.__init__(self, id, name)\n self.inport = ivw.data.VolumeInport(\"inport\")\n self.addInport(self.inport, owner=False)\n self.outport = ivw.data.VolumeOutport(\"outport\")\n self.addOutport(self.outport, owner=False)\n\n self.channel = ivw.properties.IntProperty(\"channel\", \"channel\", 0, 0, 4, 1)\n self.addProperty(self.channel, owner=False)\n\n @staticmethod\n def processorInfo():\n return ivw.ProcessorInfo(\n \t\tclassIdentifier = \"org.inviwo.VolumeExtractChannel\", \n \t\tdisplayName = \"Volume Extract Channel\",\n \t\tcategory = \"Volume Operation\",\n \t\tcodeState = ivw.CodeState.Stable,\n \t\ttags = ivw.Tags.PY\n )\n\n def getProcessorInfo(self):\n return VolumeExtractChannel.processorInfo()\n\n def process(self):\n volume = self.inport.getData()\n if len(volume.data.shape) <= 3:\n self.outport.setData(volume)\n return\n\n channels = volume.data.shape[3]\n\n volumeSlice = volume.data[:,:,:, np.clip(self.channel.value, 0, channels-1)]\n newVolume = ivw.data.Volume(volumeSlice)\n newVolume.dataMap = volume.dataMap\n newVolume.modelMatrix = volume.modelMatrix\n newVolume.worldMatrix = volume.worldMatrix\n newVolume.copyMetaDataFrom(volume)\n newVolume.swizzlemask = volume.swizzlemask\n newVolume.interpolation = volume.interpolation\n newVolume.wrapping = volume.wrapping\n\n self.outport.setData(newVolume)\n",
"# Name: ChgcarSource\n\nimport inviwopy as ivw\nimport ivwdataframe as df\nimport atomdata\nimport vasputil\n\nimport functools\nimport math\nimport numpy\nfrom pathlib import Path\n\n# Descriotion found at https://cms.mpi.univie.ac.at/wiki/index.php/CHGCAR\nclass ChgcarSource(ivw.Processor):\n def __init__(self, id, name):\n ivw.Processor.__init__(self, id, name)\n\n self.volumeOutport = ivw.data.VolumeOutport(\"chargedensity\")\n self.addOutport(self.volumeOutport, owner=False)\n\n self.meshOutport = ivw.data.MeshOutport(\"atoms\")\n self.addOutport(self.meshOutport)\n\n self.dataframeOutport = df.DataFrameOutport(\"atomInformation\")\n self.addOutport(self.dataframeOutport)\n\n self.chgcar = ivw.properties.FileProperty(\"chgcar\", \"CHGCAR\")\n self.addProperty(self.chgcar)\n\n self.dataRange = ivw.properties.DoubleMinMaxProperty(\n \"dataRange\", \"Data Range\", 0.0, 1.0, -1.70e308, 1.79e308)\n self.addProperty(self.dataRange)\n self.dataRange.semantics = ivw.properties.PropertySemantics(\"Text\")\n self.dataRange.readOnly = True\n\n self.useCustomRange = ivw.properties.BoolProperty(\n \"useCustomRange\", \"Overwrite Data Range\", False)\n self.addProperty(self.useCustomRange)\n\n self.customDataRange = ivw.properties.DoubleMinMaxProperty(\n \"customDataRange\", \"Custom Data Range\", 0.0, 1.0, -1.70e308, 1.79e308)\n self.addProperty(self.customDataRange)\n self.properties.customDataRange.semantics = ivw.properties.PropertySemantics(\n \"Text\")\n\n self.margin = ivw.properties.FloatProperty(\n \"margin\", \"Border Repetition Margin\", 0.05, 0.0, 0.5, 0.01)\n self.addProperty(self.margin)\n\n self.pm = inviwopy.PickingMapper(self, 1, lambda x: self.callback(x))\n\n @staticmethod\n def processorInfo():\n return ivw.ProcessorInfo(\n classIdentifier=\"org.inviwo.vasp.ChgcarSource\",\n displayName=\"Chgcar Source\",\n category=\"Source\",\n codeState=ivw.CodeState.Stable,\n tags=ivw.Tags([ivw.Tag.PY, ivw.Tag(\"VASP\"),\n ivw.Tag(\"Volume\"), ivw.Tag(\"Mesh\")])\n )\n\n def getProcessorInfo(self):\n return ChgcarSource.processorInfo()\n\n def initializeResources(self):\n pass\n\n def process(self):\n if len(self.chgcar.value) == 0 or not Path(self.chgcar.value).exists():\n return\n\n self.volume, self.atomPos, self.elem, self.nelem, self.elemtype = vasputil.parseFile(\n self.chgcar.value)\n self.volumeDataRange = self.volume.dataMap.dataRange\n\n self.volume.dataMap.dataRange = self.customDataRange.value if self.useCustomRange.value else self.volumeDataRange\n self.volume.dataMap.valueRange = self.customDataRange.value if self.useCustomRange.value else self.volumeDataRange\n\n self.mesh = vasputil.createMesh(self.atomPos, self.elemtype,\n self.volume.basis, self.volume.offset, self.pm, self.margin.value)\n\n self.dataframe = vasputil.createDataFrame(self.atomPos, self.elemtype,\n self.volume.modelMatrix)\n\n print(\"Loaded CHGCAR: {}\\nDims: {}\\nElem: {}\\nNElem {}\\nRange: {}\".format(\n self.chgcar.value, self.volume.dimensions, self.elem, self.nelem, self.volume.dataMap.dataRange))\n\n self.volumeOutport.setData(self.volume)\n self.meshOutport.setData(self.mesh)\n self.dataframeOutport.setData(self.dataframe)\n\n def callback(self, pickevent):\n if (pickevent.state == inviwopy.PickingState.Updated):\n i = pickevent.pickedId\n pos = numpy.dot(numpy.array(self.volume.basis), self.atomPos[i])\n pickevent.setToolTip(\"Atom id: {}\\nType: {}\\nPosition: {}\\nFractional: {}\".format(\n i, self.elemtype[i], pos, self.atomPos[i]))\n else:\n pickevent.setToolTip(\"\")\n"
] | [
[
"numpy.clip"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dertilo/speech-recognition | [
"32dfd0a05480ecb3a4ea3eb9e28628da976e7065"
] | [
"data_related/data_augmentation/signal_augment.py"
] | [
"import os\nimport subprocess\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom typing import Dict\n\nMAX_FREQ = 7999\n\n\ndef to_str(v):\n if isinstance(v, tuple):\n s = \" \".join(str(x) for x in v)\n elif isinstance(v, float) or isinstance(v, int):\n s = str(v)\n else:\n assert False\n\n return s\n\n\ndef build_sox_distortions(audio_file, params):\n param_str = \" \".join([k + \" \" + to_str(v) for k, v in params.items()])\n sox_params = \"sox {} -p {} \".format(audio_file, param_str)\n return sox_params\n\n\ndef build_sox_noise(\n audio_file,\n amod_lowpass_cutoff=0.1,\n lowpass_cutoff=MAX_FREQ,\n highpass_cutoff=1,\n noise_gain=-4,\n):\n \"\"\"\n play original.wav synth whitenoise lowpass 0.1 synth whitenoise amod gain -n 0 lowpass 100 highpass 1\n \"\"\"\n\n sox_params = \"sox {audio_file} -p synth whitenoise lowpass {amod_lowpass_cutoff} synth whitenoise amod gain -n {noise_gain} lowpass {lowpass_cutoff} highpass {highpass_cutoff}\".format(\n audio_file=audio_file,\n amod_lowpass_cutoff=amod_lowpass_cutoff,\n lowpass_cutoff=lowpass_cutoff,\n highpass_cutoff=highpass_cutoff,\n noise_gain=noise_gain,\n )\n return sox_params\n\n\ndef build_varying_amplitude_factor(audio_file, lowpass_cutoff=1, ac_gain=-9):\n ac = \"sox {} -p synth whitenoise lowpass {} gain -n {}\".format(\n audio_file, lowpass_cutoff, ac_gain\n )\n dc = \"sox {} -p gain -90 dcshift 0.5\".format(audio_file)\n return \"sox -m <({}) <({}) -p\".format(ac, dc)\n\n\ndef multiply_signals(signal_a, signal_b):\n return (\"sox -T <({signal_a}) <({signal_b}) -p\").format(\n signal_a=signal_a, signal_b=signal_b,\n )\n\n\ndef build_sox_interference(\n interfere_file, interfere_signal, lowpass_cutoff=1, ac_gain=-6\n):\n factor = build_varying_amplitude_factor(interfere_file, lowpass_cutoff, ac_gain)\n return multiply_signals(factor, interfere_signal)\n\n\ndef add_signals_trim_to_len(original, signals, augmented):\n signals_to_add = \" \".join([\"<(%s)\" % s for s in signals])\n sox_cmd = \"sox -m {signals} -b 16 {augmented} trim 0 $(soxi -D {original})\".format(\n signals=signals_to_add, original=original, augmented=augmented\n )\n return sox_cmd\n\n\ndef build_random_bandpass(min_low=50, min_band_width=100, max_high=1000) -> Dict:\n d = {}\n max_high_cutoff = MAX_FREQ\n if np.random.choice([True, False], p=[0.5, 0.5]):\n lowpass = int(round(np.random.uniform(low=min_low, high=MAX_FREQ)))\n d[\"lowpass\"] = lowpass\n max_high_cutoff = lowpass - min_band_width\n\n if np.random.choice([True, False], p=[0.5, 0.5]):\n highpass = int(\n round(np.random.uniform(low=1, high=min(max_high, max_high_cutoff)))\n )\n d[\"highpass\"] = highpass\n\n return d\n\n\ndef augment_with_sox(original_file, audio_files, augmented_file):\n interfere_file = np.random.choice(audio_files)\n min_SNR = 20 # normal:20, less:30, evenless:40\n min_SIR = 5 # normal:10, less:20, evenless:30\n\n signal_gain = round(np.random.uniform(low=-10, high=0), 2)\n signal_params = {\n \"tempo\": round(np.random.triangular(left=0.7, mode=1.0, right=1.3), 2),\n \"pitch\": int(\n round(np.random.triangular(left=-200, mode=0, right=200))\n ), # normal 100, less: 50, evenless: 30\n \"reverb\": (int(round(np.random.uniform(low=0, high=50))), 50, 100, 100, 0, 0,),\n \"gain -n\": signal_gain,\n }\n signal_params.update(build_random_bandpass(1000, 1000, 100))\n\n interfere_params = {\n \"tempo\": round(np.random.uniform(low=0.6, high=1.4), 2),\n \"pitch\": int(round(np.random.uniform(low=-500, high=500))),\n \"reverb\": (int(round(np.random.uniform(low=0, high=100))), 50, 100, 100, 0, 0),\n \"gain -n\": round(np.random.uniform(low=-50, high=signal_gain - min_SIR), 2),\n }\n interfere_params.update(build_random_bandpass(50, 100, 1000))\n\n # params = {'signal_params':signal_params,'interfere_params':interfere_params,'noise_power':noise_power}\n # pprint(params)\n\n signal = build_sox_distortions(original_file, signal_params)\n interfere_signal = build_sox_distortions(interfere_file, interfere_params)\n\n noise_power = round(np.random.uniform(-60, signal_gain - min_SNR), 2)\n lowpass = int(round(np.random.uniform(low=100, high=MAX_FREQ)))\n highpass = int(round(np.random.uniform(low=1, high=lowpass)))\n noise = build_sox_noise(\n original_file, np.random.uniform(0.1, 2), lowpass, highpass, noise_power\n )\n\n interf = build_sox_interference(\n interfere_file,\n interfere_signal,\n lowpass_cutoff=np.random.uniform(0.5, 2),\n ac_gain=int(round(np.random.uniform(-9, -3))),\n )\n\n sox_cmd = add_signals_trim_to_len(\n original_file, [signal, noise, interf], augmented_file\n )\n FNULL = open(os.devnull, \"w\")\n subprocess.call([\"bash\", \"-c\", sox_cmd], stdout=FNULL, stderr=subprocess.STDOUT)\n # subprocess.call([\"bash\", \"-c\", sox_cmd])\n # output = subprocess.check_output([\"bash\", \"-c\", sox_cmd])\n # if len(output)>0 and 'FAIL' in output:\n # print(output)\n # return 1 if len(output)>0 else 0\n\n\ndef augment_with_specific_params():\n signal_gain = 0\n signal_params = dict(tempo=1.0, pitch=0, reverb=0)\n signal_params[\"gain -n\"] = 0\n signal = build_sox_distortions(original, signal_params)\n interfere_signal = build_sox_distortions(\n interfering, dict(gain=signal_gain - 10, tempo=0.8, pitch=100, reverb=50)\n )\n noise = build_sox_noise(\n original, noise_gain=signal_gain - 20, lowpass_cutoff=6000, highpass_cutoff=10\n )\n interf = build_sox_interference(interfering, interfere_signal)\n sox_cmd = add_signals_trim_to_len(original, [signal, noise, interf], augmented)\n subprocess.call([\"bash\", \"-c\", sox_cmd])\n\n\nif __name__ == \"__main__\":\n import librosa\n original = \"../../original.wav\"\n augmented = \"/tmp/augmented.wav\"\n interfering = \"../../interference.wav\"\n\n # augment_with_specific_params()\n\n for k in range(9):\n augment_with_sox(original, [interfering], \"/tmp/augmented_%d.wav\" % k)\n # assert False\n # path = os.environ['HOME']+\"/data/asr_data/SPANISH\"\n # audio_files = librosa.util.find_files(path)\n\n #\n # with open('spanish_train_manifest.csv') as f:\n # audio_text_files = f.readlines()\n # audio_files = [x.strip().split(\",\")[0] for x in audio_text_files]\n #\n # for k in tqdm(range(100000)):\n # original = np.random.choice(audio_files)\n # random_augmentation(original, audio_files, augmented)\n"
] | [
[
"numpy.random.triangular",
"numpy.random.uniform",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marbre/mlir-npcomp | [
"30adf9e6b0c1e94db38050a9e143f20a5a461d17"
] | [
"frontends/pytorch/test/acap_regression/test_jit_add2.py"
] | [
"# -*- Python -*-\n# This file is licensed under a pytorch-style license\n# See frontends/pytorch/LICENSE for license information.\n\nimport torch\nimport npcomp.frontends.pytorch as torch_mlir\nimport npcomp.frontends.pytorch.test as test\n\n# RUN: %PYTHON %s | FileCheck %s\n\ndev = torch_mlir.mlir_device()\nt0 = torch.randn((4,4), device=dev)\nt1 = torch.randn((4,4), device=dev)\n\nt2 = t0 + t1\n\n#\n# Check the result tensor against the CPU\n#\nt0_cpu = t0.to('cpu')\nt1_cpu = t1.to('cpu')\nt2_cpu = t2.to('cpu')\n\nprint (t0_cpu, \" +\\n\", t1_cpu, \" =\\n\", t2_cpu)\n\n# CHECK: PASS! add2 check\ntest.compare(t2, t0_cpu + t1_cpu, \"add2\")\n"
] | [
[
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GuoSuiming/mindspore | [
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff",
"48afc4cfa53d970c0b20eedfb46e039db2a133d5",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"48afc4cfa53d970c0b20eedfb46e039db2a133d5",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"48afc4cfa53d970c0b20eedfb46e039db2a133d5",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"48afc4cfa53d970c0b20eedfb46e039db2a133d5",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"59a277756eb4faad9ac9afcc7fd526e8277d4994"
] | [
"tests/st/ops/gpu/test_tanh_grad_grad_op.py",
"mindspore/ops/_grad/grad_nn_ops.py",
"tests/ut/python/parallel/test_dropout_do_mask.py",
"tests/st/ops/cpu/test_smooth_l1_loss_grad_op.py",
"tests/ut/python/dataset/test_random_vertical_flip_with_bbox.py",
"tests/ut/python/dataset/test_shuffle.py",
"tests/st/ops/gpu/test_layer_norm_grad_op.py",
"tests/st/ops/cpu/test_layer_norm_op.py",
"tests/ut/python/metrics/test_cosine_similarity.py",
"mindspore/nn/probability/bijector/softplus.py",
"tests/ut/python/pipeline/parse/test_operator.py",
"tests/st/ops/cpu/test_softmax_cross_entropy_with_logits_op.py",
"tests/ut/python/nn/test_image_gradients.py",
"model_zoo/official/cv/resnext50/eval.py",
"tests/st/ops/graph_kernel/test_logsoftmax.py",
"model_zoo/official/cv/centerface/src/convert_weight_mobilenetv2.py",
"model_zoo/official/nlp/mass/src/dataset/base.py",
"model_zoo/research/cv/FaceAttribute/src/FaceAttribute/cross_entropy.py",
"tests/ut/python/train/test_amp.py",
"tests/st/ops/gpu/test_div_op.py",
"tests/st/ops/cpu/test_relu_op.py",
"model_zoo/official/cv/faster_rcnn/src/FasterRcnn/bbox_assign_sample_stage2.py",
"tests/ut/python/parallel/test_loss_and_optimizer.py",
"tests/ut/python/parallel/test_broadcast_dict.py",
"tests/ut/python/parallel/test_l2normalize.py",
"tests/st/ops/cpu/test_apply_adagrad_op.py",
"tests/ut/python/dataset/test_random_resize_with_bbox.py",
"tests/st/model_zoo_tests/transformer/test_transformer.py",
"tests/st/networks/models/bert/src/bert_model.py",
"model_zoo/official/recommend/wide_and_deep/src/metrics.py",
"tests/ut/python/parallel/test_auto_parallel_parameter_cast.py",
"tests/ut/python/dataset/test_rename.py",
"tests/st/ops/ascend/test_tbe_ops/test_conv.py",
"tests/st/pynative/test_pynative_embeddinglookup.py",
"tests/ut/python/parallel/test_train_and_eval.py",
"tests/st/pynative/dynamic_shape/test_pynative_unique.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops.operations import _grad_ops as G\nfrom mindspore.ops import composite as C\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n\nclass NetTanhGrad(nn.Cell):\n def __init__(self):\n super(NetTanhGrad, self).__init__()\n self.tanh_grad = G.TanhGrad()\n\n def construct(self, y, grad):\n return self.tanh_grad(y, grad)\n\n\nclass NetTanhGradGrad(nn.Cell):\n def __init__(self, forward_net):\n super(NetTanhGradGrad, self).__init__()\n self.forward_net = forward_net\n self.gradOps = C.GradOperation(get_all=True, sens_param=True)\n\n def construct(self, y, grad, dout):\n backward_net = self.gradOps(self.forward_net)\n return backward_net(y, grad, dout)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef tanh_grad_grad_base(dtype, loss):\n np.random.seed(1)\n shape = (4, 2)\n y_np = (np.random.rand(*shape) * 2 - 1).astype(dtype)\n grad_np = (np.random.rand(*shape) * 20 - 10).astype(dtype)\n dout_np = (np.random.rand(*shape) * 20 - 10).astype(dtype)\n\n y_np_32 = y_np.astype(np.float32)\n grad_np_32 = grad_np.astype(np.float32)\n dout_np_32 = dout_np.astype(np.float32)\n dy_np = (dout_np_32 * grad_np_32 * (-2.0) * y_np_32).astype(dtype)\n dgrad_np = (dout_np_32 * (1 - y_np_32 * y_np_32)).astype(dtype)\n\n y_ms = Tensor(y_np)\n grad_ms = Tensor(grad_np)\n dout_ms = Tensor(dout_np)\n forward_net = NetTanhGrad()\n net = NetTanhGradGrad(forward_net)\n dy_ms, dgrad_ms = net(y_ms, grad_ms, dout_ms)\n\n assert np.allclose(dy_ms.asnumpy(), dy_np, loss, loss)\n assert np.allclose(dgrad_ms.asnumpy(), dgrad_np, loss, loss)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tanh_grad_grad_float16():\n tanh_grad_grad_base(np.float16, 1e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tanh_grad_grad_float32():\n tanh_grad_grad_base(np.float32, 1e-4)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Define the grad rules of neural network related operations.\"\"\"\nimport os\nimport numpy as np\nfrom mindspore.ops import _selected_grad_ops as SG\nfrom mindspore.ops.primitive import constexpr\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.ops.operations import nn_ops as nps\nfrom .grad_base import bprop_getters\nfrom .. import functional as F\nfrom .. import operations as P\nfrom ...common import dtype as mstype\nfrom ..composite.multitype_ops.zeros_like_impl import zeros_like\nfrom ..operations import _grad_ops as G\nfrom ..operations import _inner_ops as inner\nfrom ... import context\n\nenv_force_bprop_seq = os.getenv(\"ENV_FORCE_BPROP_SEQ\")\n\n@bprop_getters.register(P.BiasAdd)\ndef get_bprop_bias_add(self):\n \"\"\"Grad definition for `BiasAdd` operation.\"\"\"\n bias_grad = SG.BiasAddGrad(self.data_format)\n\n def bprop(x, w, out, dout):\n return dout, bias_grad(dout)\n\n return bprop\n\n\n@bprop_getters.register(P.Conv2D)\ndef get_bprop_conv2d(self):\n \"\"\"Grad definition for `Conv2D` operation.\"\"\"\n input_grad = P.Conv2DBackpropInput(\n self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n filter_grad = G.Conv2DBackpropFilter(\n self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n get_shape = P.Shape()\n\n def bprop(x, w, out, dout):\n dx = input_grad(dout, w, get_shape(x))\n if env_force_bprop_seq == '1':\n x = F.depend(x, dx)\n dw = filter_grad(dout, x, get_shape(w))\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(nps.Conv3D)\ndef get_bprop_conv3d(self):\n \"\"\"Grad definition for `Conv3D` operation.\"\"\"\n input_grad = nps.Conv3DBackpropInput(\n self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n filter_grad = G.Conv3DBackpropFilter(\n self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n get_shape = P.Shape()\n\n def bprop(x, w, out, dout):\n dx = input_grad(w, dout, get_shape(x))\n dw = filter_grad(x, dout, get_shape(w))\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(nps.Conv3DTranspose)\ndef get_bprop_conv3d_transpose(self):\n \"\"\"Grad definition for `Conv3DTranspose` operation.\"\"\"\n input_grad = nps.Conv3D(\n out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode=\"pad\",\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n filter_grad = G.Conv3DBackpropFilter(\n out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode=\"pad\",\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n input_size = self.input_size\n\n def bprop(x, w, out, dout):\n dx = input_grad(dout, w)\n dw = filter_grad(dout, x, F.shape(w))\n return dx, dw, zeros_like(input_size)\n\n return bprop\n\n\n@bprop_getters.register(inner.ExtractImagePatches)\ndef get_bprop_extract_image_patches(self):\n \"\"\"Grad definition for `ExtractImagePatches` operation.\"\"\"\n get_shape = P.Shape()\n reshape = P.Reshape()\n extract_image_patches = inner.ExtractImagePatches(ksizes=self.ksizes,\n strides=self.strides,\n rates=self.rates,\n padding=self.padding)\n concat = P.Concat(axis=-1)\n expand_dims = P.ExpandDims()\n scatter_nd = P.ScatterNd()\n dtype = P.DType()\n fill = P.Fill()\n slice_op = P.Slice()\n transpose = P.Transpose()\n cast = P.Cast()\n matmul = P.MatMul()\n\n _, _, ksizes_row, ksizes_col = self.ksizes\n\n def bprop(x, out, dout):\n x_shape = get_shape(x)\n x_batch, x_depth, x_row, x_col = x_shape\n x_indices_num = x_row * x_col + 1\n x_idx = cast(F.tuple_to_array(range(1, x_indices_num)), mstype.float32)\n x_idx = reshape(x_idx, (1, 1, x_row, x_col))\n x_idx_patch = cast(extract_image_patches(x_idx), mstype.int32)\n x_idx_patch = transpose(x_idx_patch, (0, 2, 3, 1))\n\n out_shape = get_shape(out)\n _, _, out_row, out_col = out_shape\n out_indices_num = out_row * out_col * ksizes_row * ksizes_col\n out_idx = F.tuple_to_array(range(out_indices_num))\n out_idx = reshape(out_idx, (1, out_row, out_col, ksizes_row * ksizes_col))\n\n idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1)))\n idx_tensor = reshape(idx_tensor, (-1, 2))\n sp_shape = (x_indices_num, out_indices_num)\n sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape)\n sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num))\n\n grad = transpose(dout, (0, 2, 3, 1))\n grad = reshape(grad, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth))\n grad = transpose(grad, (1, 2, 3, 4, 0, 5))\n grad = reshape(grad, (-1, x_batch * x_depth))\n\n jac = matmul(sp_tensor, grad)\n dx = reshape(jac, (x_row, x_col, x_batch, x_depth))\n dx = transpose(dx, (2, 3, 0, 1))\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.DepthwiseConv2dNative)\ndef get_bprop_depthwise_conv2d_native(self):\n \"\"\"Grad definition for `DepthwiseConv2dNative` operation.\"\"\"\n input_grad = G.DepthwiseConv2dNativeBackpropInput(\n self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,\n self.dilation, self.group\n )\n filter_grad = G.DepthwiseConv2dNativeBackpropFilter(\n self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,\n self.dilation, self.group\n )\n get_shape = P.Shape()\n\n def bprop(x, w, out, dout):\n dx = input_grad(get_shape(x), w, dout)\n if env_force_bprop_seq == '1':\n x = F.depend(x, dx)\n dw = filter_grad(x, get_shape(w), dout)\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(P.MaxPoolWithArgmax)\ndef get_bprop_max_pool_with_argmax(self):\n \"\"\"Grad definition for `MaxPoolWithArgmax` operation.\"\"\"\n maxpool_grad = G.MaxPoolGradWithArgmax(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n\n def bprop(x, out, dout):\n dx = maxpool_grad(x, dout[0], out[1])\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.MaxPoolGrad)\ndef get_bprop_max_pool_grad_grad(self):\n \"\"\"Grad definition for `MaxPoolGrad` operation.\"\"\"\n maxpool_grad_grad = G.MaxPoolGradGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n\n def bprop(x1, x2, grad, out, dout):\n dx1 = zeros_like(x1)\n dx2 = zeros_like(x2)\n dgrad = maxpool_grad_grad(x1, x2, dout)\n return (dx1, dx2, dgrad)\n\n return bprop\n\n\n@bprop_getters.register(G.MaxPoolGradGrad)\ndef get_bprop_max_pool_grad_grad_grad(self):\n \"\"\"Grad definition for `MaxPoolGradGrad` operation.\"\"\"\n maxpool_grad = G.MaxPoolGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n\n def bprop(x1, x2, grad, out, dout):\n dx1 = zeros_like(x1)\n dx2 = zeros_like(x2)\n dgrad = maxpool_grad(x1, x2, dout)\n return (dx1, dx2, dgrad)\n\n return bprop\n\n\n@bprop_getters.register(P.MaxPool)\ndef get_bprop_max_pool_grad(self):\n \"\"\"Grad definition for `MaxPool` operation.\"\"\"\n maxpool_grad = G.MaxPoolGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode,\n data_format=self.format)\n\n def bprop(x, out, dout):\n dx = maxpool_grad(x, out, dout)\n return (dx,)\n\n return bprop\n\n\ndef _windowed_output_size(input_size, ksize, stride, pad_mode):\n \"\"\"\n helper func for AvgPoolGrad\n \"\"\"\n\n tmp_output = 0\n tmp_pad_need = 0\n tmp_pad_before = 0\n tmp_pad_after = 0\n if pad_mode == 'VALID':\n tmp_output = (input_size - ksize + stride) // stride\n tmp_pad_before = 0\n tmp_pad_after = 0\n elif pad_mode == 'SAME':\n tmp_output = (input_size + stride - 1) // stride\n tmp_pad_need = max(0, (tmp_output - 1) * stride + ksize - input_size)\n tmp_pad_before = tmp_pad_need // 2\n tmp_pad_after = tmp_pad_need - tmp_pad_before\n return tmp_output, tmp_pad_before, tmp_pad_after\n\n\n@constexpr\ndef _get_mean_matrix(x_shape, ksize, stride, pad_mode, x_dtype):\n \"\"\"\n helper func for AvgPoolGrad.\n\n `assist_input_matrix` is a 2d matrix with input_shape after padding,\n the value of element which is padded is 0, else are 1.\n For each element of output, it is mapped for slide window: `[h*h_stride : h*h_stride + h_ksize,\n w*w_stride : w*w_stride + w_ksize]` of `assist_input_matrix`, so the sum of slide window is the\n number of input that associate with output element.\n \"\"\"\n\n n_input, c_input, h_input, w_input = x_shape\n h_ksize, w_ksize = ksize[2], ksize[3]\n h_stride, w_stride = stride[2], stride[3]\n n_output = n_input\n c_output = c_input\n h_output, w_output = 0, 0\n pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0\n h_output, pad_top, pad_bottom = _windowed_output_size(h_input, h_ksize,\n h_stride, pad_mode)\n w_output, pad_left, pad_right = _windowed_output_size(w_input, w_ksize,\n w_stride, pad_mode)\n\n output_size = n_output * c_output * h_output * w_output\n output_shape = (n_output, c_output, h_output, w_output)\n output = np.array([0.0] * output_size)\n output = np.reshape(output, output_shape)\n\n in_shape_after_padding_2d = (h_input + pad_top + pad_bottom, w_input + pad_left + pad_right)\n assist_input_matrix = np.ones(in_shape_after_padding_2d).astype(np.float32)\n if pad_top > 0:\n assist_input_matrix[:pad_top, :] = 0\n if pad_bottom > 0:\n assist_input_matrix[-pad_bottom:, :] = 0\n if pad_left > 0:\n assist_input_matrix[:, :pad_left] = 0\n if pad_right > 0:\n assist_input_matrix[:, -pad_right:] = 0\n\n for h in range(h_output):\n for w in range(w_output):\n curr_input = assist_input_matrix[h*h_stride : h*h_stride + h_ksize, w*w_stride : w*w_stride + w_ksize]\n curr_sum = np.sum(curr_input)\n if curr_sum > 0:\n output[:, :, h, w] = 1. / curr_sum\n return Tensor(output, x_dtype)\n\n\n@constexpr\ndef _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype):\n kernel_matrix = np.ones(kernel_matrix_shape)\n return Tensor(kernel_matrix, x_dtype)\n\n\n@bprop_getters.register(P.AvgPool)\ndef get_bprop_avg_pool_grad(self):\n \"\"\"Grad definition for `AvgPool` operation.\"\"\"\n\n # the parameter of AvgPoolGrad in GPU and TBE/CPU is not same\n if self.target == \"GPU\":\n avgpool_grad_gpu = G.AvgPoolGradGpu(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode,\n data_format=self.format)\n\n def bprop_gpu(x, out, dout):\n dx = avgpool_grad_gpu(x, out, dout)\n return (dx,)\n\n bprop_fn = bprop_gpu\n\n elif self.target == \"CPU\":\n avgpool_grad_cpu = G.AvgPoolGradCpu(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode,\n data_format=self.format)\n\n def bprop_cpu(x, out, dout):\n dx = avgpool_grad_cpu(x, out, dout)\n return (dx,)\n\n bprop_fn = bprop_cpu\n\n elif self.target == \"GE\":\n avgpool_grad_ge = G.AvgPoolGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n shape_op = P.Shape()\n\n def bprop_ge(x, out, dout):\n dx = avgpool_grad_ge(shape_op(x), dout)\n return (dx,)\n\n bprop_fn = bprop_ge\n\n else:\n avgpool_grad_vm = G.AvgPoolGradVm(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n k_size_nchw = avgpool_grad_vm.kernel_size\n stride_nchw = avgpool_grad_vm.strides\n pad_mode = self.pad_mode\n\n def bprop_vm(x, out, dout):\n x_shape_nchw = F.shape(x)\n x_dtype = F.dtype(x)\n kernel_matrix_shape = (1, x_shape_nchw[1],\n k_size_nchw[2],\n k_size_nchw[3])\n mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, pad_mode, x_dtype)\n kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype)\n dx = avgpool_grad_vm(x_shape_nchw, dout, mean_matrix, kernel_matrix)\n return (dx,)\n\n bprop_fn = bprop_vm\n\n return bprop_fn\n\n\n@bprop_getters.register(P.DropoutGenMask)\ndef get_bprop_dropout_gen_mask(self):\n \"\"\"Grad definition for `DropoutGenMask` operation.\"\"\"\n\n def bprop(shape, keep_prob, out, dout):\n return (zeros_like(shape), zeros_like(keep_prob))\n\n return bprop\n\n\n@bprop_getters.register(P.DropoutDoMask)\ndef get_bprop_dropout_do_mask(self):\n \"\"\"Grad definition for `DropoutDoMask` operation.\"\"\"\n do_mask = P.DropoutDoMask()\n\n def bprop(x, y, keep_prob, out, dout):\n return (do_mask(dout, y, keep_prob), zeros_like(y), zeros_like(keep_prob))\n\n return bprop\n\n\n@bprop_getters.register(P.Mish)\ndef get_bprop_mish(self):\n \"\"\"Grad definition for `Mish` operation.\"\"\"\n tanh = P.Tanh()\n tanh_grad = SG.TanhGrad()\n softplus = P.Softplus()\n softplus_grad = G.SoftplusGrad()\n\n def bprop(x, out, dout):\n dx1 = tanh(softplus(x))\n dx2 = softplus_grad(tanh_grad(dx1, x * dout), x)\n dx = (dx1 * dout + dx2)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.SeLU)\ndef get_bprop_selu(self):\n \"\"\"Grad definition for `SeLU` operation.\"\"\"\n scale = 1.0507009873554804934193349852946\n elu_grad = G.EluGrad()\n\n def bprop(x, out, dout):\n dx = elu_grad(dout, out) * scale\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.MulNoNan)\ndef get_bprop_mul_no_nan(self):\n \"\"\"Grad definition for `MulNoNan` operation.\"\"\"\n mul_no_nan = P.MulNoNan()\n reduce_sum = P.ReduceSum()\n reshape = P.Reshape()\n\n def bprop(x, y, out, dout):\n x_shape = F.shape(x)\n y_shape = F.shape(y)\n dx = mul_no_nan(dout, y)\n dy = mul_no_nan(x, dout)\n broadcast_x, broadcast_y = F.broadcast_gradient_args(x_shape, y_shape)\n if broadcast_x != ():\n dx = reshape(reduce_sum(dx, broadcast_x), x_shape)\n if broadcast_y != ():\n dy = reshape(reduce_sum(dy, broadcast_y), y_shape)\n return dx, dy\n\n return bprop\n\n\n@bprop_getters.register(P.ReLU)\ndef get_bprop_relu(self):\n \"\"\"Grad definition for `ReLU` operation.\"\"\"\n input_grad = G.ReluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, out)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.ReluGrad)\ndef get_bprop_relu_grad(self):\n \"\"\"Grad definition for `ReLUGrad` operation.\"\"\"\n input_grad = G.ReluGrad()\n\n def bprop(grad, y, out, dout):\n dgrad = input_grad(dout, y)\n return dgrad, zeros_like(y)\n\n return bprop\n\n\n@bprop_getters.register(P.ReLU6)\ndef get_bprop_relu6(self):\n \"\"\"Grad definition for `ReLU6` operation.\"\"\"\n input_grad = G.ReLU6Grad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.ReLUV2)\ndef get_bprop_relu_v2(self):\n \"\"\"Grad definition for `ReLUV2` operation.\"\"\"\n input_grad = G.ReluGradV2()\n\n def bprop(x, out, dout):\n mask = out[1]\n dx = input_grad(dout[0], mask)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.HSwish)\ndef get_bprop_hswish(self):\n \"\"\"Grad definition for `HSwish` operation.\"\"\"\n input_grad = G.HSwishGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.HSigmoid)\ndef get_bprop_hsigmoid(self):\n \"\"\"Grad definition for `HSigmoid` operation.\"\"\"\n input_grad = G.HSigmoidGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Elu)\ndef get_bprop_elu(self):\n \"\"\"Grad definition for `Elu` operation.\"\"\"\n input_grad = G.EluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, out)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Sigmoid)\ndef get_bprop_sigmoid(self):\n \"\"\"Grad definition for `Sigmoid` operation.\"\"\"\n input_grad = G.SigmoidGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.SigmoidGrad)\ndef get_bprop_sigmoid_grad(self):\n \"\"\"Grad definition for `SigmoidGrad` operation.\"\"\"\n sigmoid_grad = G.SigmoidGrad()\n\n def bprop(y, grad, out, dout):\n dy = dout * grad * (1. - 2 * y)\n dgrad = sigmoid_grad(y, dout)\n return dy, dgrad\n\n return bprop\n\n\n@constexpr\ndef _get_transpose_axis(x_shp, axis):\n rank = len(x_shp)\n if axis < 0:\n axis += rank\n reverse_axis = [i for i in range(rank)]\n reverse_axis[axis] = rank - 1\n reverse_axis[rank - 1] = axis\n return tuple(reverse_axis)\n\n\n@bprop_getters.register(P.Softmax)\ndef get_bprop_softmax(self):\n \"\"\"Grad definition for `Softmax` operation.\"\"\"\n sum_func = P.ReduceSum(keep_dims=True)\n sub = P.Sub()\n mul = P.Mul()\n get_shape = P.Shape()\n transpose = P.Transpose()\n axis = self.axis\n if not isinstance(axis, int):\n axis = axis[0]\n\n def bprop(x, out, dout):\n # dx = (dout - sum(dout * out)) * out\n # This formula is correct only when the `axis` is the last dimension.\n # In order to support the scenario where the `axis` is other values,\n # we transpose the data of the `axis` dimension to the last dimension for calculation,\n # and then transpose it back after the calculation.\n reverse_axis = _get_transpose_axis(get_shape(x), axis)\n out = transpose(out, reverse_axis)\n dout = transpose(dout, reverse_axis)\n dx = mul(out, sub(dout, sum_func(mul(out, dout), -1)))\n dx = transpose(dx, reverse_axis)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.LogSoftmax)\ndef get_bprop_log_softmax(self):\n \"\"\"Grad definition for `LogSoftmax` operation.\"\"\"\n logsoftmax_grad = G.LogSoftmaxGrad(self.axis)\n\n def bprop(x, out, dout):\n dx = logsoftmax_grad(out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Softplus)\ndef get_bprop_softplus(self):\n \"\"\"Grad definition for `Softplus` operation.\"\"\"\n softplus_grad = G.SoftplusGrad()\n\n def bprop(x, out, dout):\n dx = softplus_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Softsign)\ndef get_bprop_softsign(self):\n \"\"\"Grad definition for `Softsign` operation.\"\"\"\n mul = P.Mul()\n absolute = P.Abs()\n div = P.Div()\n square = P.Square()\n\n def bprop(x, out, dout):\n dx = mul(dout, div(1, square(1 + absolute(x))))\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Tanh)\ndef get_bprop_tanh(self):\n \"\"\"Grad definition for `Tanh` operation.\"\"\"\n tanh_grad = SG.TanhGrad()\n\n def bprop(x, out, dout):\n dx = tanh_grad(out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.TanhGrad)\ndef get_bprop_tanh_grad(self):\n \"\"\"Grad definition for `TanhGrad` operation.\"\"\"\n tanh_grad = G.TanhGrad()\n\n def bprop(y, grad, out, dout):\n dy = dout * -2.0 * grad * y\n dgrad = tanh_grad(y, dout)\n return dy, dgrad\n\n return bprop\n\n\n@bprop_getters.register(P.Gelu)\ndef get_bprop_gelu(self):\n \"\"\"Grad definition for `Gelu` operation.\"\"\"\n input_grad = G.GeluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x, out)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.FastGelu)\ndef get_bprop_fast_gelu(self):\n \"\"\"Grad definition for `FastGelu` operation.\"\"\"\n input_grad = G.FastGeluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.FusedBatchNorm)\ndef get_bprop_fused_batch_norm(self):\n \"\"\"Grad definition for `FusedBatchNorm` operation.\"\"\"\n input_grad = G.FusedBatchNormGrad(self.epsilon, self.momentum)\n target_cpu = False\n if self.target == \"CPU\":\n input_grad = G.FusedBatchNormGradCPU(self.epsilon, self.momentum)\n target_cpu = True\n def bprop(x, scale, b, mean, variance, out, dout):\n saved_mean = out[3]\n saved_variance = out[4]\n if target_cpu:\n out = input_grad(dout[0], x, scale, b, saved_mean, saved_variance)\n else:\n out = input_grad(dout[0], x, scale, saved_mean, saved_variance)\n dx = out[0]\n dscale = out[1]\n dbias = out[2]\n return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.FusedBatchNormEx)\ndef get_bprop_fused_batch_norm_ex(self):\n \"\"\"Grad definition for `FusedBatchNormEx` operation.\"\"\"\n input_grad = G.FusedBatchNormGradEx(self.epsilon, self.momentum, self.format)\n\n def bprop(x, scale, b, mean, variance, out, dout):\n saved_mean = out[3]\n saved_variance = out[4]\n reserve = out[5]\n out = input_grad(dout[0], x, scale, saved_mean, saved_variance, reserve)\n dx = out[0]\n dscale = out[1]\n dbias = out[2]\n return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.InstanceNorm)\ndef get_bprop_instance_norm(self):\n \"\"\"Grad definition for `InstanceNorm` operation.\"\"\"\n is_training = self.is_training\n input_grad = G.InstanceNormGrad(is_training, self.epsilon, self.momentum)\n\n def bprop(x, gamma, beta, mean, variance, out, dout):\n saved_mean = out[1]\n saved_variance = out[2]\n out = input_grad(dout[0], x, gamma, saved_mean, saved_variance)\n dx = out[0]\n dgamma = out[1]\n dbeta = out[2]\n return dx, dgamma, dbeta, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.BatchNorm)\ndef get_bprop_batch_norm(self):\n \"\"\"Grad definition for `BatchNorm` operation.\"\"\"\n is_training = self.is_training\n input_grad = G.BatchNormGrad(is_training, self.epsilon)\n\n def bprop(x, scale, b, mean, variance, out, dout):\n if is_training:\n saved_reserve_1 = out[3]\n saved_reserve_2 = out[4]\n else:\n saved_reserve_1 = mean\n saved_reserve_2 = variance\n out = input_grad(dout[0], x, scale, saved_reserve_1, saved_reserve_2)\n dx = out[0]\n dscale = out[1]\n dbias = out[2]\n return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.LayerNorm)\ndef get_bprop_layer_norm(self):\n \"\"\"Grad definition for `LayerNorm` operation.\"\"\"\n layer_norm_grad = G.LayerNormGrad(self.begin_norm_axis, self.begin_params_axis)\n\n def bprop(x, gamma, beta, out, dout):\n dx, d_gamma, d_beta = layer_norm_grad(\n x, dout[0], out[2], out[1], gamma)\n return dx, d_gamma, d_beta\n\n return bprop\n\n\n@bprop_getters.register(G.LayerNormGrad)\ndef get_bprop_layer_norm_grad(self):\n \"\"\"Grad definition for `LayerNormGrad` operation.\"\"\"\n layer_norm_grad_grad = G.LayerNormGradGrad(self.begin_norm_axis, self.begin_params_axis)\n\n def bprop(x, dy, variance, mean, gamma, out, dout):\n d_x, d_dy, d_gamma = layer_norm_grad_grad(\n x, dy, variance, mean, gamma, dout[0], dout[1], dout[2])\n return d_x, d_dy, zeros_like(variance), zeros_like(mean), d_gamma\n\n return bprop\n\n\n@bprop_getters.register(P.L2Normalize)\ndef get_bprop_l2normalize(self):\n \"\"\"Grad definition for `L2Normalize` operation.\"\"\"\n input_grad = G.L2NormalizeGrad(self.axis, self.epsilon)\n\n def bprop(x, out, dout):\n dx = input_grad(x, out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.SoftmaxCrossEntropyWithLogits)\ndef get_bprop_softmax_cross_entropy_with_logits(self):\n \"\"\"Grad definition for `SoftmaxCrossEntropyWithLogits` operation.\"\"\"\n expand = P.ExpandDims()\n\n def bprop(logits, labels, out, dout):\n grad = out[1]\n grad = grad * expand(dout[0], -1)\n return grad, zeros_like(labels)\n\n return bprop\n\n\n@bprop_getters.register(P.NLLLoss)\ndef get_bprop_nll_loss(self):\n \"\"\"Grad definition for `NLLLoss` operation.\"\"\"\n nll_loss_grad = G.NLLLossGrad(reduction=self.reduction)\n\n def bprop(x, target, weight, out, dout):\n total_weight = out[1]\n dout_x = dout[0]\n dx = nll_loss_grad(x, dout_x, target, weight, total_weight)\n return dx, zeros_like(target), zeros_like(weight)\n\n return bprop\n\n\n@bprop_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)\ndef get_bprop_sparse_softmax_cross_entropy_with_logits(self):\n \"\"\"Grad definition for `SparseSoftmaxCrossEntropyWithLogits` operation.\"\"\"\n is_grad = self.is_grad\n grad_op = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=True)\n\n def bprop(logits, labels, out, dout):\n grad = out[0]\n if not is_grad:\n # if construct use loss\n grad = grad_op(logits, labels)\n grad = F.depend(grad, out)\n grad = grad * dout\n return grad, zeros_like(labels)\n\n return bprop\n\n\n@bprop_getters.register(P.ResizeBilinear)\ndef get_bprop_resize_bilinear(self):\n \"\"\"Grad definition for `ResizeBilinear` operation.\"\"\"\n resize_grad = G.ResizeBilinearGrad(self.align_corners)\n\n def bprop(x, out, dout):\n dx = resize_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.OneHot)\ndef get_bprop_onehot(self):\n \"\"\"Grad definition for `OneHot` operation.\"\"\"\n\n def bprop(indices, depth, on_value, off_value, out, dout):\n return zeros_like(indices), zeros_like(depth), zeros_like(on_value), zeros_like(off_value)\n\n return bprop\n\n\n@constexpr\ndef _range_op(start, limit, delta, dtype):\n \"\"\"helper function for Grad TopK\"\"\"\n output_tensor = Tensor(list(range(start, limit, delta)), dtype)\n return output_tensor\n\n@constexpr\ndef _get_1d_shape(in_shape):\n \"\"\"helper function for Grad TopK\"\"\"\n out_shape = 1\n for i in in_shape:\n out_shape *= i\n return (out_shape,)\n\n@bprop_getters.register(P.TopK)\ndef get_bprop_top_kv2(self):\n \"\"\"Grad definition for `TopK` operation.\"\"\"\n scatter = P.ScatterNd()\n expand_dims = P.ExpandDims()\n shape_op = P.Shape()\n reshape_op = P.Reshape()\n dtype = P.DType()\n\n def bprop(input_x, k, out, dout):\n\n in_shape = shape_op(input_x)\n in_lastdim = in_shape[-1]\n\n indices = out[1]\n ind_shape = shape_op(indices)\n ind_lastdim = ind_shape[-1]\n\n ind_2d = reshape_op(indices, (-1, ind_lastdim))\n outerdim = shape_op(ind_2d)[0]\n\n # [0, outterdim, 2*outerdim, ..., (k-1)*outerdim]\n indices_dtype = dtype(indices)\n range_flatten_index = _range_op(0, outerdim * in_lastdim, in_lastdim, indices_dtype)\n\n # expand_dims to (k, 1), then broadcast\n ind = reshape_op(ind_2d + expand_dims(range_flatten_index, -1), (-1,))\n in_shape_1d = _get_1d_shape(in_shape)\n\n out_grad = reshape_op(\n scatter(\n expand_dims(ind, -1),\n reshape_op(dout[0], (-1,)),\n in_shape_1d),\n in_shape)\n return out_grad, zeros_like(k)\n\n return bprop\n\n\n@bprop_getters.register(P.SmoothL1Loss)\ndef get_bprop_smooth_l1_loss(self):\n \"\"\"Grad definition for `SmoothL1Loss` operation.\"\"\"\n grad = G.SmoothL1LossGrad(self.beta)\n\n def bprop(prediction, target, out, dout):\n dx = grad(prediction, target, dout)\n dy = grad(target, prediction, dout)\n return dx, dy\n\n return bprop\n\n\n@bprop_getters.register(P.L2Loss)\ndef get_bprop_l2_loss(self):\n \"\"\"Grad definition for `L2Loss` operation.\"\"\"\n\n def bprop(x, out, dout):\n dx = x * dout\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.RNNTLoss)\ndef get_bprop_rnnt_loss(self):\n \"\"\"Grad definition for `RNNTLoss` operation.\"\"\"\n\n def bprop(acts, labels, act_lens, label_lens, out, dout):\n grad = out[1]\n return grad, zeros_like(labels), zeros_like(act_lens), zeros_like(label_lens)\n return bprop\n\n\n@bprop_getters.register(P.PReLU)\ndef get_bprop_prelu(self):\n \"\"\"Grad definition for `PReLU` operation.\"\"\"\n grad = G.PReLUGrad()\n\n def bprop(x, w, out, dout):\n dx, dw = grad(dout, x, w)\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(P.LSTM)\ndef get_bprop_lstm(self):\n \"\"\"Grad definition for `LSTM` operation.\"\"\"\n lstm_grad_data = G.LSTMGradData(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout\n )\n\n lstm_grad_weight = G.LSTMGradWeight(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout\n )\n lstm_grad = G.LSTMGrad(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout\n )\n\n def bprop(x, hx, cx, w, out, dout):\n y, _, _, reserve, state = out\n dy, dhy, dcy, _, _ = dout\n dx, dhx, dcx = lstm_grad_data(y, dy, dhy, dcy, w, hx, cx, reserve, state)\n dw = lstm_grad_weight(F.depend(x, dx), hx, y, reserve, state)\n return dx, dhx, dcx, dw\n\n #\n def bprop_cpu(x, hx, cx, w, out, dout):\n y, hy, cy, reserve, _ = out\n dy, dhy, dcy, _, _ = dout\n dx, dhx, dcx, dw = lstm_grad(x, hx, cx, w, y, hy, cy, dy, dhy, dcy, reserve)\n return dx, dhx, dcx, dw\n\n if context.get_context('device_target') == \"CPU\":\n return bprop_cpu\n\n return bprop\n\n\n@bprop_getters.register(P.DynamicRNN)\ndef get_bprop_dynamic_rnn(self):\n \"\"\"Grad definition for `DynamicRNN` operation.\"\"\"\n dynamic_rnn_grad = G.DynamicRNNGrad(cell_type=self.cell_type,\n direction=self.direction,\n cell_depth=self.cell_depth,\n use_peephole=self.use_peephole,\n keep_prob=self.keep_prob,\n cell_clip=self.cell_clip,\n num_proj=self.num_proj,\n time_major=self.time_major,\n forget_bias=self.forget_bias)\n expand_dims = P.ExpandDims()\n\n def bprop(x, w, b, seq_length, init_h, init_c, out, dout):\n dy, dh, dc, _, _, _, _, _, = dout\n dh = dh[-1]\n dc = dc[-1]\n y, h, c, i, j, f, o, tanhct = out\n dw, db, dx, dh_prev, dc_prev = dynamic_rnn_grad(x, w, b, y, init_h[0], init_c[0], h,\n c, dy, dh, dc, i, j, f, o, tanhct)\n dh_prev = expand_dims(dh_prev, 0)\n dc_prev = expand_dims(dc_prev, 0)\n return dx, dw, db, (0), dh_prev, dc_prev\n return bprop\n\n\n@bprop_getters.register(P.DynamicGRUV2)\ndef get_bprop_dynamic_gru_v2(self):\n \"\"\"Grad definition for `DynamicGRUV2` operation.\"\"\"\n dynamic_gru_v2_grad = G.DynamicGRUV2Grad(self.direction, self.cell_depth, self.keep_prob, self.cell_clip,\n self.num_proj, self.time_major, self.gate_order,\n self.reset_after)\n\n def bprop(x, winput, whidden, binput, bhidden, seq, init_h, out, dout):\n y, out_h, update, reset, new, hidden_new = out\n dy, dout_h, _, _, _, _ = dout\n\n dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev = dynamic_gru_v2_grad(x, winput, whidden, y, init_h,\n out_h, dy, dout_h[-1], update,\n reset, new, hidden_new, None, None)\n return dx, dw_input, dw_hidden, db_input, db_hidden, (0), dh_prev\n return bprop\n\n\n@bprop_getters.register(P.SigmoidCrossEntropyWithLogits)\ndef get_bprop_sigmoid_crossentropy_with_logits(self):\n \"\"\"Grad definition for `SigmoidCrossEntropyWithLogits` operation.\"\"\"\n op = G.SigmoidCrossEntropyWithLogitsGrad()\n\n def bprop(x, y, out, dout):\n dx = op(x, y, dout)\n return (dx, zeros_like(y))\n\n return bprop\n\n\n@bprop_getters.register(P.Pad)\ndef get_bprop_pad(self):\n \"\"\"Grad definition for `Pad` operation.\"\"\"\n shape_op = P.Shape()\n paddings = self.paddings\n\n def bprop(x, out, dout):\n begin = ()\n for item in paddings:\n begin += (item[0],)\n shp = shape_op(x)\n dx = P.Slice()(dout, begin, shp)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.MirrorPad)\ndef get_bprop_mirror_pad(self):\n \"\"\"Grad definition for `MirrorPad` operation.\"\"\"\n mirror_pad_grad = G.MirrorPadGrad(self.mode)\n\n def bprop(x, paddings, out, dout):\n dx = mirror_pad_grad(dout, paddings)\n return (dx, zeros_like(paddings))\n\n return bprop\n\n\n@bprop_getters.register(P.ROIAlign)\ndef get_bprop_roi_align(self):\n \"\"\"Grad definition for `ROIAlign` operation.\"\"\"\n shape_op = P.Shape()\n pooled_height = self.pooled_height\n pooled_width = self.pooled_width\n spatial_scale = self.spatial_scale\n sample_num = self.sample_num\n\n def bprop(inputs, rois, out, dout):\n inputs_shape = shape_op(inputs)\n dx = G.ROIAlignGrad(inputs_shape,\n pooled_height,\n pooled_width,\n spatial_scale,\n sample_num,\n )(dout, rois)\n return dx, zeros_like(rois)\n\n return bprop\n\n\n@bprop_getters.register(P.Conv2DBackpropInput)\ndef get_bprop_conv2d_backprop_input(self):\n \"\"\"Grad definition for `Conv2DBackpropInput` operation.\"\"\"\n filter_grad = G.Conv2DBackpropFilter(\n self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n input_grad = P.Conv2D(\n self.out_channel, self.kernel_size, pad_mode=self.pad_mode.lower(), pad=self.pad,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n\n def bprop(x, w, f_sizes, out, dout):\n dx = input_grad(dout, w)\n if env_force_bprop_seq == '1':\n x = F.depend(x, dx)\n dw = filter_grad(x, dout, F.shape(w))\n return dx, dw, zeros_like(f_sizes)\n\n return bprop\n\n\n@bprop_getters.register(P.BinaryCrossEntropy)\ndef get_bprop_binary_cross_entropy(self):\n \"\"\"Grad definition for `BinaryCrossEntropy` operation.\"\"\"\n grad = G.BinaryCrossEntropyGrad(self.reduction)\n\n def bprop(x, y, weight, out, dout):\n dx = grad(x, y, dout, weight)\n return dx, zeros_like(y), zeros_like(weight)\n\n return bprop\n\n@bprop_getters.register(P.KLDivLoss)\ndef get_bprop_kl_div_loss(self):\n \"\"\"Grad definition for `KLDivLoss` operation.\"\"\"\n grad = G.KLDivLossGrad(self.reduction)\n\n def bprop(x, y, out, dout):\n dx, dy = grad(x, y, dout)\n return dx, dy\n\n return bprop\n\n\n@bprop_getters.register(P.Dropout)\ndef get_bprop_dropout(self):\n \"\"\"Grad definition for `Dropout` operation.\"\"\"\n grad = G.DropoutGrad(self.keep_prob)\n\n def bprop(x, out, dout):\n _, mask = out\n dy, _ = dout\n dx = grad(dy, mask)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.CTCLoss)\ndef get_bprop_ctc_loss(self):\n \"\"\"Grad definition for `CTCLoss` operation\"\"\"\n expand = P.ExpandDims()\n\n def bprop(inputs, labels_indices, labels_values, sequence_length, out, dout):\n grad_loss = out[1]\n grad = grad_loss * expand(dout[0], -1)\n return grad, zeros_like(labels_indices), zeros_like(labels_values), zeros_like(sequence_length)\n\n return bprop\n\n\n@bprop_getters.register(P.BasicLSTMCell)\ndef get_bprop_basic_lstm_cell(self):\n \"\"\"Grad definition for `BasicLSTMCell` operation.\"\"\"\n basic_lstm_cell_cstate_grad = G.BasicLSTMCellCStateGrad(\n forget_bias=self.forget_bias,\n activation=self.activation\n )\n\n basic_lstm_cell_weight_grad = G.BasicLSTMCellWeightGrad()\n\n basic_lstm_cell_input_grad = G.BasicLSTMCellInputGrad(keep_prob=self.keep_prob)\n\n def bprop(x, h, c, w, b, out, dout):\n _, _, it, jt, ft, ot, tanhct = out\n dct, dht, _, _, _, _, _ = dout\n dgate, dct_1 = basic_lstm_cell_cstate_grad(c, dht, dct, it, jt, ft, ot, tanhct)\n dxt, dht = basic_lstm_cell_input_grad(dgate, w)\n dw, db = basic_lstm_cell_weight_grad(F.depend(x, dxt), h, dgate)\n return dxt, dht, dct_1, dw, db\n return bprop\n\n\n@bprop_getters.register(P.LRN)\ndef get_bprop_lrn(self):\n \"\"\"Grad definition for `LRN` operation.\"\"\"\n grad = G.LRNGrad(self.depth_radius, self.bias, self.alpha, self.beta)\n\n def bprop(x, out, dout):\n dx = grad(dout, x, out)\n return (dx,)\n\n return bprop\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import context, Tensor, Parameter\nfrom mindspore.common.api import _executor\nfrom mindspore.nn import Cell, TrainOneStepCell, Momentum\nfrom mindspore.ops import operations as P\n\n\nclass Net(Cell):\n def __init__(self, mul_weight, strategy1=None, strategy2=None):\n super().__init__()\n self.mul = P.Mul().shard(strategy1)\n self.mul2 = P.Mul().shard(strategy1)\n self.dropout_do_mask = P.DropoutDoMask().shard(strategy2)\n self.dropout_gen_mask = P.DropoutGenMask()\n self.get_shape = P.Shape()\n self.cast = P.Cast()\n self.mul_weight = Parameter(mul_weight, \"w1\")\n self.mul_weight2 = Parameter(mul_weight, \"w2\")\n self.keep_prob = Tensor(0.9)\n\n def construct(self, x, b):\n out = self.mul(x, self.mul_weight)\n shape = self.get_shape(out)\n dtype = P.DType()(out)\n keep_prob = self.cast(self.keep_prob, dtype)\n mask = self.dropout_gen_mask(shape, keep_prob)\n out = self.dropout_do_mask(out, mask, keep_prob)\n out = self.mul2(out, self.mul_weight2)\n return out\n\n\n_x = Tensor(np.ones([128, 64]), dtype=ms.float32)\n_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32)\n_b = Tensor(np.ones([128, 64]), dtype=ms.float32)\n\n\ndef compile_net(net):\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_net = TrainOneStepCell(net, optimizer)\n train_net.set_auto_parallel()\n train_net.set_train()\n _executor.compile(train_net, _x, _b)\n context.reset_auto_parallel_context()\n\n\ndef test_dropout_do_mask_data_parallel():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((16, 1), (16, 1))\n strategy2 = ((16, 1),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_dropout_do_mask_model_parallel():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((1, 16), (1, 16))\n strategy2 = ((1, 16),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_dropout_do_mask_hybrid_parallel():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((4, 4), (4, 4))\n strategy2 = ((4, 4),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_dropout_do_mask_auto_parallel():\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=16, global_rank=0)\n net = Net(_w1)\n compile_net(net)\n\n\ndef test_dropout_do_mask_repeat_calc():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((4, 4), (4, 4))\n strategy2 = ((2, 4),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.ops.composite import GradOperation\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n\n\nclass Net(nn.Cell):\n def __init__(self, sigma=1.0):\n super(Net, self).__init__()\n self.SmoothL1Loss = P.SmoothL1Loss(sigma)\n\n def construct(self, pred, gt):\n return self.SmoothL1Loss(pred, gt)\n\n\nclass Grad(nn.Cell):\n def __init__(self, network):\n super(Grad, self).__init__()\n self.grad = GradOperation(get_all=True, sens_param=True)\n self.network = network\n\n def construct(self, pred, gt, dout):\n return self.grad(self.network)(pred, gt, dout)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_net():\n pred = np.random.randn(2, 4).astype(np.float32)\n gt = np.random.randn(2, 4).astype(np.float32)\n dout = np.random.randn(2, 4).astype(np.float32)\n smooth_l1_loss_grad = Grad(Net())\n output = smooth_l1_loss_grad(Tensor(pred), Tensor(gt), Tensor(dout))\n print(\"------------- input ---------------\")\n print(\"predict:\\n\", pred)\n print(\"grount truth:\\n\", gt)\n print(\"dout:\\n\", dout)\n print(\"------------- output ---------------\")\n print(\"predict grad:\\n\", output[0].asnumpy())\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting RandomVerticalFlipWithBBox op in DE\n\"\"\"\nimport numpy as np\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as c_vision\n\nfrom mindspore import log as logger\nfrom util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \\\n config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5\n\nGENERATE_GOLDEN = False\n\n# Updated VOC dataset with correct annotations - DATA_DIR\nDATA_DIR_VOC = \"../data/dataset/testVOC2012_2\"\n# COCO dataset - DATA_DIR, ANNOTATION_DIR\nDATA_DIR_COCO = [\"../data/dataset/testCOCO/train/\", \"../data/dataset/testCOCO/annotations/train.json\"]\n\n\ndef test_random_vertical_flip_with_bbox_op_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_c\")\n # Load dataset\n dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n\ndef test_random_vertical_flip_with_bbox_op_coco_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied,\n Testing with Coco dataset\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_coco_c\")\n # load dataset\n dataCoco1 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task=\"Detection\",\n decode=True, shuffle=False)\n\n dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task=\"Detection\",\n decode=True, shuffle=False)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp, \"bbox\")\n\n\ndef test_random_vertical_flip_with_bbox_op_rand_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied,\n tests with MD5 check, expected to pass\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_rand_c\")\n original_seed = config_get_set_seed(29847)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n # Load dataset\n dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(0.8)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n filename = \"random_vertical_flip_with_bbox_01_c_result.npz\"\n save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN)\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n # Restore config setting\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers(original_num_parallel_workers)\n\n\ndef test_random_vertical_flip_with_bbox_op_edge_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied,\n applied on dynamically generated edge case, expected to pass\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_edge_c\")\n dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n # maps to convert data into valid edge case data\n dataVoc1 = dataVoc1.map(\n operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype))],\n input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n # Test Op added to list of Operations here\n dataVoc2 = dataVoc2.map(\n operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype)),\n test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n\ndef test_random_vertical_flip_with_bbox_op_invalid_c():\n \"\"\"\n Test RandomVerticalFlipWithBBox Op on invalid constructor parameters, expected to raise ValueError\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_invalid_c\")\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n try:\n test_op = c_vision.RandomVerticalFlipWithBBox(2)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n for _ in dataVoc2.create_dict_iterator(num_epochs=1):\n break\n\n except ValueError as err:\n logger.info(\"Got an exception in DE: {}\".format(str(err)))\n assert \"Input prob is not within the required interval of (0.0 to 1.0).\" in str(err)\n\n\ndef test_random_vertical_flip_with_bbox_op_bad_c():\n \"\"\"\n Tests RandomVerticalFlipWithBBox Op with invalid bounding boxes, expected to catch multiple errors\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_bad_c\")\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, \"bounding boxes is out of bounds of the image\")\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.HeightOverflow, \"bounding boxes is out of bounds of the image\")\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.NegativeXY, \"negative value\")\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WrongShape, \"4 features\")\n\n\nif __name__ == \"__main__\":\n test_random_vertical_flip_with_bbox_op_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_coco_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_rand_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_edge_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_invalid_c()\n test_random_vertical_flip_with_bbox_op_bad_c()\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\nfrom util import save_and_check_dict\n\n# Note: Number of rows in test.data dataset: 12\nDATA_DIR = [\"../data/dataset/testTFTestAllTypes/test.data\"]\nGENERATE_GOLDEN = False\n\n\ndef test_shuffle_01():\n \"\"\"\n Test shuffle: buffer_size < number-of-rows-in-dataset\n \"\"\"\n logger.info(\"test_shuffle_01\")\n # define parameters\n buffer_size = 5\n seed = 1\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)\n ds.config.set_seed(seed)\n data1 = data1.shuffle(buffer_size=buffer_size)\n\n filename = \"shuffle_01_result.npz\"\n save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)\n\n\ndef test_shuffle_02():\n \"\"\"\n Test shuffle: buffer_size = number-of-rows-in-dataset\n \"\"\"\n logger.info(\"test_shuffle_02\")\n # define parameters\n buffer_size = 12\n seed = 1\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)\n ds.config.set_seed(seed)\n data1 = data1.shuffle(buffer_size=buffer_size)\n\n filename = \"shuffle_02_result.npz\"\n save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)\n\n\ndef test_shuffle_03():\n \"\"\"\n Test shuffle: buffer_size=2 (minimum size), number-of-rows-in-dataset > 2\n \"\"\"\n logger.info(\"test_shuffle_03\")\n # define parameters\n buffer_size = 2\n seed = 1\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)\n ds.config.set_seed(seed)\n data1 = data1.shuffle(buffer_size)\n\n filename = \"shuffle_03_result.npz\"\n save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)\n\n\ndef test_shuffle_04():\n \"\"\"\n Test shuffle: buffer_size=2 (minimum size), number-of-rows-in-dataset = 2\n \"\"\"\n logger.info(\"test_shuffle_04\")\n # define parameters\n buffer_size = 2\n seed = 1\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, num_samples=2)\n ds.config.set_seed(seed)\n data1 = data1.shuffle(buffer_size=buffer_size)\n\n filename = \"shuffle_04_result.npz\"\n save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)\n\n\ndef test_shuffle_05():\n \"\"\"\n Test shuffle: buffer_size > number-of-rows-in-dataset\n \"\"\"\n logger.info(\"test_shuffle_05\")\n # define parameters\n buffer_size = 13\n seed = 1\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)\n ds.config.set_seed(seed)\n data1 = data1.shuffle(buffer_size=buffer_size)\n\n filename = \"shuffle_05_result.npz\"\n save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)\n\n\ndef test_shuffle_06():\n \"\"\"\n Test shuffle: with set seed, both datasets\n \"\"\"\n logger.info(\"test_shuffle_06\")\n # define parameters\n buffer_size = 13\n seed = 1\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)\n ds.config.set_seed(seed)\n data1 = data1.shuffle(buffer_size=buffer_size)\n\n data2 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)\n data2 = data2.shuffle(buffer_size=buffer_size)\n\n for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),\n data2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n np.testing.assert_equal(item1, item2)\n\n\ndef test_shuffle_exception_01():\n \"\"\"\n Test shuffle exception: buffer_size<0\n \"\"\"\n logger.info(\"test_shuffle_exception_01\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR)\n ds.config.set_seed(1)\n try:\n data1 = data1.shuffle(buffer_size=-1)\n sum([1 for _ in data1])\n\n except Exception as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Input buffer_size is not within the required interval of (2 to 2147483647)\" in str(e)\n\n\ndef test_shuffle_exception_02():\n \"\"\"\n Test shuffle exception: buffer_size=0\n \"\"\"\n logger.info(\"test_shuffle_exception_02\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR)\n ds.config.set_seed(1)\n try:\n data1 = data1.shuffle(buffer_size=0)\n sum([1 for _ in data1])\n\n except Exception as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Input buffer_size is not within the required interval of (2 to 2147483647)\" in str(e)\n\n\ndef test_shuffle_exception_03():\n \"\"\"\n Test shuffle exception: buffer_size=1\n \"\"\"\n logger.info(\"test_shuffle_exception_03\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR)\n ds.config.set_seed(1)\n try:\n data1 = data1.shuffle(buffer_size=1)\n sum([1 for _ in data1])\n\n except Exception as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Input buffer_size is not within the required interval of (2 to 2147483647)\" in str(e)\n\n\ndef test_shuffle_exception_05():\n \"\"\"\n Test shuffle exception: Missing mandatory buffer_size input parameter\n \"\"\"\n logger.info(\"test_shuffle_exception_05\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR)\n ds.config.set_seed(1)\n try:\n data1 = data1.shuffle()\n sum([1 for _ in data1])\n\n except Exception as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"buffer_size\" in str(e)\n\n\ndef test_shuffle_exception_06():\n \"\"\"\n Test shuffle exception: buffer_size wrong type, boolean value False\n \"\"\"\n logger.info(\"test_shuffle_exception_06\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR)\n ds.config.set_seed(1)\n try:\n data1 = data1.shuffle(buffer_size=False)\n sum([1 for _ in data1])\n\n except Exception as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"buffer_size\" in str(e)\n\n\ndef test_shuffle_exception_07():\n \"\"\"\n Test shuffle exception: buffer_size wrong type, boolean value True\n \"\"\"\n logger.info(\"test_shuffle_exception_07\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR)\n ds.config.set_seed(1)\n try:\n data1 = data1.shuffle(buffer_size=True)\n sum([1 for _ in data1])\n\n except Exception as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"buffer_size\" in str(e)\n\n\nif __name__ == '__main__':\n test_shuffle_01()\n test_shuffle_02()\n test_shuffle_03()\n test_shuffle_04()\n test_shuffle_05()\n test_shuffle_06()\n test_shuffle_exception_01()\n test_shuffle_exception_02()\n test_shuffle_exception_03()\n test_shuffle_exception_05()\n test_shuffle_exception_06()\n test_shuffle_exception_07()\n logger.info('\\n')\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops.operations import _grad_ops as G\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n\n\nclass LayerNormGradNet(nn.Cell):\n def __init__(self, begin_norm_axis, begin_params_axis):\n super(LayerNormGradNet, self).__init__()\n self.norm = G.LayerNormGrad(begin_norm_axis, begin_params_axis)\n\n def construct(self, dy, x, var, mean, gamma):\n return self.norm(dy, x, var, mean, gamma)\n\n\ndef LayerNormGradReference(x, dy, gamma, epsilon, begin_norm_axis, begin_params_axis):\n begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape)\n begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape)\n\n norm_axis = [i for i in range(begin_norm_axis, len(x.shape))]\n param_axis = [i for i in range(0, begin_params_axis)]\n num = 1\n for i in range(begin_norm_axis, len(x.shape)):\n num *= x.shape[i]\n\n mean = np.mean(x, axis=tuple(norm_axis), keepdims=True)\n var = np.var(x, axis=tuple(norm_axis), keepdims=True)\n\n gamma = gamma.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:]))\n dg = np.sum(dy * np.power(var + epsilon, -0.5) * (x - mean), axis=tuple(param_axis), keepdims=True)\n db = np.sum(dy, axis=tuple(param_axis), keepdims=True)\n\n sum1 = np.sum((-0.5) * dy * gamma * (x - mean) * np.power(var + epsilon, -1.5), axis=tuple(norm_axis),\n keepdims=True)\n sum2 = np.sum(dy * gamma, axis=tuple(norm_axis), keepdims=True)\n sum3 = np.sum(-2.0 * (x - mean), axis=tuple(norm_axis), keepdims=True)\n\n dx1 = dy * gamma * np.power(var + epsilon, -0.5)\n dx2 = sum1 * 2.0 / num * (x - mean)\n dx3 = ((-1.0) * np.power(var + epsilon, -0.5) * sum2 + (1.0 / num) * sum1 * sum3) * (1.0 / num)\n dx = dx1 + dx2 + dx3\n return dx, dg, db, mean, var\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgrad0():\n begin_norm_axis = 1\n begin_params_axis = 1\n x_np = np.random.randn(4096, 3072).astype(np.float32)\n dy_np = np.random.randn(4096, 3072).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n epsilon = 10e-12\n dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n dy_ms = Tensor(dy_np)\n x_ms = Tensor(x_np)\n var_ms = Tensor(var_np)\n mean_ms = Tensor(mean_np)\n gamma_ms = Tensor(gamma_np)\n\n net = LayerNormGradNet(begin_norm_axis, begin_params_axis)\n dx_ms, dg_ms, db_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms)\n\n assert np.allclose(dx_ms.asnumpy(), dx_np, rtol=1e-6, atol=1e-6)\n assert np.allclose(dg_ms.asnumpy(), dg_np, rtol=1e-6, atol=1e-3)\n assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgrad1():\n begin_norm_axis = 1\n begin_params_axis = 1\n x_np = np.random.randn(640, 768).astype(np.float32)\n dy_np = np.random.randn(640, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n epsilon = 10e-12\n dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n dy_ms = Tensor(dy_np)\n x_ms = Tensor(x_np)\n var_ms = Tensor(var_np)\n mean_ms = Tensor(mean_np)\n gamma_ms = Tensor(gamma_np)\n\n net = LayerNormGradNet(begin_norm_axis, begin_params_axis)\n dx_ms, dg_ms, db_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms)\n\n assert np.allclose(dx_ms.asnumpy(), dx_np, rtol=1e-6, atol=1e-6)\n assert np.allclose(dg_ms.asnumpy(), dg_np, rtol=1e-6, atol=1e-3)\n assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgrad2():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 128, 768).astype(np.float32)\n dy_np = np.random.randn(32, 128, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n epsilon = 10e-12\n dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n dy_ms = Tensor(dy_np)\n x_ms = Tensor(x_np)\n var_ms = Tensor(var_np)\n mean_ms = Tensor(mean_np)\n gamma_ms = Tensor(gamma_np)\n\n net = LayerNormGradNet(begin_norm_axis, begin_params_axis)\n dx_ms, dg_ms, db_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms)\n\n assert np.allclose(dx_ms.asnumpy(), dx_np, rtol=1e-6, atol=1e-6)\n assert np.allclose(dg_ms.asnumpy(), dg_np, rtol=1e-6, atol=1e-3)\n assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgrad3():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 64).astype(np.float32)\n dy_np = np.random.randn(32, 64).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n epsilon = 10e-12\n dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n dy_ms = Tensor(dy_np)\n x_ms = Tensor(x_np)\n var_ms = Tensor(var_np)\n mean_ms = Tensor(mean_np)\n gamma_ms = Tensor(gamma_np)\n\n net = LayerNormGradNet(begin_norm_axis, begin_params_axis)\n dx_ms, dg_ms, db_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms)\n assert np.allclose(dx_ms.asnumpy(), dx_np, rtol=1e-6, atol=1e-6)\n assert np.allclose(dg_ms.asnumpy(), dg_np, rtol=1e-6, atol=1e-3)\n assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgrad4():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 64).astype(np.float32)\n dy_np = np.random.randn(32, 64).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n epsilon = 10e-12\n dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n dy_ms = Tensor(dy_np)\n x_ms = Tensor(x_np)\n var_ms = Tensor(var_np)\n mean_ms = Tensor(mean_np)\n gamma_ms = Tensor(gamma_np)\n\n net = LayerNormGradNet(begin_norm_axis, begin_params_axis)\n dx_ms, dg_ms, db_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms)\n assert np.allclose(dx_ms.asnumpy(), dx_np, rtol=1e-6, atol=1e-6)\n assert np.allclose(dg_ms.asnumpy(), dg_np, rtol=1e-6, atol=1e-3)\n assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgrad5():\n begin_norm_axis = 2\n begin_params_axis = 1\n x_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n dy_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n epsilon = 10e-12\n dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n dy_ms = Tensor(dy_np)\n x_ms = Tensor(x_np)\n var_ms = Tensor(var_np)\n mean_ms = Tensor(mean_np)\n gamma_ms = Tensor(gamma_np)\n\n net = LayerNormGradNet(begin_norm_axis, begin_params_axis)\n dx_ms, dg_ms, db_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms)\n assert np.allclose(dx_ms.asnumpy(), dx_np, rtol=1e-6, atol=1e-6)\n assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3)\n assert np.allclose(dg_ms.asnumpy(), dg_np, rtol=1e-6, atol=1e-3)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n\n\nclass LayerNormNet(nn.Cell):\n def __init__(self, begin_norm_axis, begin_params_axis):\n super(LayerNormNet, self).__init__()\n self.norm = P.LayerNorm(begin_norm_axis, begin_params_axis)\n\n def construct(self, x, gamma, beta):\n return self.norm(x, gamma, beta)\n\n\ndef LayerNormReference(begin_norm_axis, begin_params_axis, x, gamma, beta):\n begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape)\n begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape)\n\n axis = [i for i in range(begin_norm_axis, len(x.shape))]\n mean = np.mean(x, axis=tuple(axis), keepdims=True)\n var = np.var(x, axis=tuple(axis), keepdims=True)\n\n gamma = gamma.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:]))\n beta = beta.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:]))\n y = np.subtract(x, mean) / np.sqrt(var + 1e-12) * gamma + beta\n return y, mean, var\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm0():\n begin_norm_axis = 1\n begin_params_axis = 1\n x_np = np.random.randn(4096, 3072).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n\n assert np.allclose(y_ms.asnumpy(), y_np, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, atol=1e-4)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm1():\n begin_norm_axis = 1\n begin_params_axis = 1\n x_np = np.random.randn(640, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n\n assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm3d_1():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 128, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n\n assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm3d_2():\n begin_norm_axis = -1\n begin_params_axis = 1\n x_np = np.random.randn(32, 128, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n\n assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm2d_2():\n begin_norm_axis = -1\n begin_params_axis = 1\n x_np = np.random.randn(64, 32).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm2d_3():\n begin_norm_axis = -1\n begin_params_axis = 1\n x_np = np.random.randn(128, 128).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_layernorm2d_4():\n begin_norm_axis = 2\n begin_params_axis = 1\n np.random.seed(42)\n x_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)\n\n x_ms = Tensor(x_np)\n gamma_ms = Tensor(gamma_np)\n beta_ms = Tensor(beta_np)\n net = LayerNormNet(begin_norm_axis, begin_params_axis)\n y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)\n assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)\n assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test cosine_similarity\"\"\"\nimport pytest\nimport numpy as np\nfrom sklearn.metrics import pairwise\nfrom mindspore.nn.metrics import CosineSimilarity\n\n\ndef test_cosine_similarity():\n \"\"\"test_cosine_similarity\"\"\"\n test_data = np.array([[5, 8, 3, 2], [5, 8, 3, 2], [4, 2, 3, 4]])\n metric = CosineSimilarity()\n metric.clear()\n metric.update(test_data)\n square_matrix = metric.eval()\n\n assert np.allclose(square_matrix, np.array([[0, 1, 0.78229315], [1, 0, 0.78229315], [0.78229315, 0.78229315, 0]]))\n\n\ndef test_cosine_similarity_compare():\n \"\"\"test_cosine_similarity_compare\"\"\"\n test_data = np.array([[5, 8, 3, 2], [5, 8, 3, 2], [4, 2, 3, 4]])\n metric = CosineSimilarity(similarity='cosine', reduction='none', zero_diagonal=False)\n metric.clear()\n metric.update(test_data)\n ms_square_matrix = metric.eval()\n\n def sklearn_cosine_similarity(test_data, similarity, reduction):\n \"\"\"sklearn_cosine_similarity\"\"\"\n metric_func = {'cosine': pairwise.cosine_similarity,\n 'dot': pairwise.linear_kernel}[similarity]\n\n square_matrix = metric_func(test_data, test_data)\n if reduction == 'mean':\n return square_matrix.mean(axis=-1)\n if reduction == 'sum':\n return square_matrix.sum(axis=-1)\n return square_matrix\n\n sk_square_matrix = sklearn_cosine_similarity(test_data, similarity='cosine', reduction='none')\n\n assert np.allclose(sk_square_matrix, ms_square_matrix)\n\n\ndef test_cosine_similarity_init1():\n \"\"\"test_cosine_similarity_init1\"\"\"\n with pytest.raises(ValueError):\n CosineSimilarity(similarity=\"4\")\n\n\ndef test_cosine_similarity_init2():\n \"\"\"test_cosine_similarity_init2\"\"\"\n with pytest.raises(TypeError):\n CosineSimilarity(similarity=4)\n\n\ndef test_cosine_similarity_init3():\n \"\"\"test_cosine_similarity_init3\"\"\"\n with pytest.raises(TypeError):\n CosineSimilarity(reduction=2)\n\n\ndef test_cosine_similarity_init4():\n \"\"\"test_cosine_similarity_init4\"\"\"\n with pytest.raises(ValueError):\n CosineSimilarity(reduction=\"1\")\n\n\n\ndef test_cosine_similarity_init5():\n \"\"\"test_cosine_similarity_init5\"\"\"\n with pytest.raises(TypeError):\n CosineSimilarity(zero_diagonal=3)\n\n\ndef test_cosine_similarity_runtime():\n \"\"\"test_cosine_similarity_runtime\"\"\"\n metric = CosineSimilarity()\n metric.clear()\n\n with pytest.raises(RuntimeError):\n metric.eval()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Softplus Bijector\"\"\"\nimport numpy as np\nfrom mindspore.ops import operations as P\nfrom mindspore.nn.layer.activation import LogSigmoid\nfrom ..distribution._utils.custom_ops import exp_generic, log_generic\nfrom .bijector import Bijector\n\n\nclass Softplus(Bijector):\n r\"\"\"\n Softplus Bijector.\n This Bijector performs the operation:\n\n .. math::\n Y = \\frac{\\log(1 + e ^ {kX})}{k}\n\n where k is the sharpness factor.\n\n Args:\n sharpness (float, list, numpy.ndarray, Tensor): The scale factor. Default: 1.0.\n name (str): The name of the Bijector. Default: 'Softplus'.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Note:\n The dtype of `sharpness` must be float.\n\n Raises:\n TypeError: When the dtype of the sharpness is not float.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import mindspore.nn.probability.bijector as msb\n >>> from mindspore import Tensor\n >>>\n >>> # To initialize a Softplus bijector of sharpness 2.0.\n >>> softplus = msb.Softplus(2.0)\n >>> # To use a ScalarAffine bijector in a network.\n >>> value = Tensor([1, 2, 3], dtype=mindspore.float32)\n >>> ans1 = softplus.forward(value)\n >>> print(ans1.shape)\n (3,)\n >>> ans2 = softplus.inverse(value)\n >>> print(ans2.shape)\n (3,)\n >>> ans3 = softplus.forward_log_jacobian(value)\n >>> print(ans3.shape)\n (3,)\n >>> ans4 = softplus.inverse_log_jacobian(value)\n >>> print(ans4.shape)\n (3,)\n \"\"\"\n\n def __init__(self,\n sharpness=1.0,\n name='Softplus'):\n \"\"\"\n Constructor of Softplus Bijector.\n \"\"\"\n param = dict(locals())\n param['param_dict'] = {'sharpness': sharpness}\n super(Softplus, self).__init__(name=name, dtype=None, param=param)\n self._sharpness = self._add_parameter(sharpness, 'sharpness')\n\n self.exp = exp_generic\n self.log = log_generic\n self.expm1 = P.Expm1()\n self.abs = P.Abs()\n self.dtypeop = P.DType()\n self.cast = P.Cast()\n self.fill = P.Fill()\n self.greater = P.Greater()\n self.less = P.Less()\n self.log_sigmoid = LogSigmoid()\n self.logicalor = P.LogicalOr()\n self.select = P.Select()\n self.shape = P.Shape()\n self.sigmoid = P.Sigmoid()\n self.softplus = self._softplus\n self.inverse_softplus = self._inverse_softplus\n\n self.threshold = np.log(np.finfo(np.float32).eps) + 1\n self.tiny = np.exp(self.threshold)\n\n def _softplus(self, x):\n too_small = self.less(x, self.threshold)\n too_large = self.greater(x, -self.threshold)\n too_small_value = self.exp(x)\n too_large_value = x\n ones = self.fill(self.dtypeop(x), self.shape(x), 1.0)\n too_small_or_too_large = self.logicalor(too_small, too_large)\n x = self.select(too_small_or_too_large, ones, x)\n y = self.log(self.exp(x) + 1.0)\n return self.select(too_small, too_small_value, self.select(too_large, too_large_value, y))\n\n def _inverse_softplus(self, x):\n r\"\"\"\n .. math::\n f(x) = \\frac{\\log(1 + e^{x}))}\n f^{-1}(y) = \\frac{\\log(e^{y} - 1)}\n \"\"\"\n too_small = self.less(x, self.tiny)\n too_large = self.greater(x, -self.threshold)\n too_small_value = self.log(x)\n too_large_value = x\n ones = self.fill(self.dtypeop(x), self.shape(x), 1.0)\n too_small_or_too_large = self.logicalor(too_small, too_large)\n x = self.select(too_small_or_too_large, ones, x)\n y = x + self.log(self.abs(self.expm1(-x)))\n return self.select(too_small, too_small_value, self.select(too_large, too_large_value, y))\n\n @property\n def sharpness(self):\n return self._sharpness\n\n def extend_repr(self):\n if self.is_scalar_batch:\n str_info = f'sharpness = {self.sharpness}'\n else:\n str_info = f'batch_shape = {self.batch_shape}'\n return str_info\n\n def _forward(self, x):\n x = self._check_value_dtype(x)\n sharpness_local = self.cast_param_by_value(x, self.sharpness)\n scaled_value = sharpness_local * x\n forward_v = self.softplus(scaled_value) / sharpness_local\n return forward_v\n\n def _inverse(self, y):\n r\"\"\"\n .. math::\n f(x) = \\frac{\\log(1 + e^{kx}))}{k}\n f^{-1}(y) = \\frac{\\log(e^{ky} - 1)}{k}\n \"\"\"\n y = self._check_value_dtype(y)\n sharpness_local = self.cast_param_by_value(y, self.sharpness)\n scaled_value = sharpness_local * y\n inverse_v = self.inverse_softplus(scaled_value) / sharpness_local\n return inverse_v\n\n def _forward_log_jacobian(self, x):\n r\"\"\"\n .. math:\n f(x) = \\log(1 + e^{kx}) / k\n f'(x) = \\frac{e^{kx}}{ 1 + e^{kx}}\n \\log(f'(x)) = kx - \\log(1 + e^{kx}) = kx - f(kx)\n \"\"\"\n x = self._check_value_dtype(x)\n sharpness_local = self.cast_param_by_value(x, self.sharpness)\n scaled_value = sharpness_local * x\n forward_log_j = self.log_sigmoid(scaled_value)\n return forward_log_j\n\n def _inverse_log_jacobian(self, y):\n r\"\"\"\n .. math:\n f(y) = \\frac{\\log(e^{ky} - 1)}{k}\n f'(y) = \\frac{e^{ky}}{e^{ky} - 1}\n \\log(f'(y)) = ky - \\log(e^{ky} - 1) = ky - f(ky)\n \"\"\"\n y = self._check_value_dtype(y)\n sharpness_local = self.cast_param_by_value(y, self.sharpness)\n scaled_value = sharpness_local * y\n inverse_log_j = scaled_value - self.inverse_softplus(scaled_value)\n return inverse_log_j\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_operator \"\"\"\nimport numpy as np\n\nfrom mindspore import Tensor, Model, context\nfrom mindspore.nn import Cell\nfrom mindspore.nn import ReLU\nfrom mindspore.ops import operations as P\nfrom ...ut_filter import non_graph_engine\n\n\nclass arithmetic_Net(Cell):\n \"\"\" arithmetic_Net definition \"\"\"\n\n def __init__(self, symbol, loop_count=(1, 3)):\n super().__init__()\n self.symbol = symbol\n self.loop_count = loop_count\n self.relu = ReLU()\n\n def construct(self, x):\n a, b = self.loop_count\n y = self.symbol\n if y == 1:\n a += b\n for _ in (b, a):\n x = self.relu(x)\n elif y == 2:\n b -= a\n for _ in (a, b):\n x = self.relu(x)\n elif y == 3:\n z = a + b\n for _ in (b, z):\n x = self.relu(x)\n elif y == 4:\n z = b - a\n for _ in (z, b):\n x = self.relu(x)\n elif y == 5:\n z = a * b\n for _ in (a, z):\n x = self.relu(x)\n elif y == 6:\n z = b / a\n for _ in (a, z):\n x = self.relu(x)\n elif y == 7:\n z = b % a + 1\n for _ in (a, z):\n x = self.relu(x)\n else:\n if not a:\n x = self.relu(x)\n return x\n\n\nclass logical_Net(Cell):\n \"\"\" logical_Net definition \"\"\"\n\n def __init__(self, symbol, loop_count=(1, 3)):\n super().__init__()\n self.symbol = symbol\n self.loop_count = loop_count\n self.fla = P.Flatten()\n self.relu = ReLU()\n\n def construct(self, x):\n a, b = self.loop_count\n y = self.symbol\n if y == 1:\n if b and a:\n x = self.relu(x)\n else:\n x = self.fla(x)\n else:\n if b or a:\n x = self.relu(x)\n else:\n x = self.fla(x)\n return x\n\n\ndef arithmetic_operator_base(symbol):\n \"\"\" arithmetic_operator_base \"\"\"\n input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)\n input_me = Tensor(input_np)\n logical_operator = {\"++\": 1, \"--\": 2, \"+\": 3, \"-\": 4, \"*\": 5, \"/\": 6, \"%\": 7, \"not\": 8}\n x = logical_operator[symbol]\n net = arithmetic_Net(x)\n context.set_context(mode=context.GRAPH_MODE)\n model = Model(net)\n model.predict(input_me)\n\n\ndef logical_operator_base(symbol):\n \"\"\" logical_operator_base \"\"\"\n input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)\n input_me = Tensor(input_np)\n logical_operator = {\"and\": 1, \"or\": 2}\n x = logical_operator[symbol]\n net = logical_Net(x)\n context.set_context(mode=context.GRAPH_MODE)\n model = Model(net)\n model.predict(input_me)\n\n\n@non_graph_engine\ndef test_ME_arithmetic_operator_0080():\n \"\"\" test_ME_arithmetic_operator_0080 \"\"\"\n arithmetic_operator_base('not')\n\n\n@non_graph_engine\ndef test_ME_arithmetic_operator_0070():\n \"\"\" test_ME_arithmetic_operator_0070 \"\"\"\n logical_operator_base('and')\n\n\n@non_graph_engine\ndef test_ME_logical_operator_0020():\n \"\"\" test_ME_logical_operator_0020 \"\"\"\n logical_operator_base('or')\n\n\ndef test_ops():\n class OpsNet(Cell):\n \"\"\" OpsNet definition \"\"\"\n\n def __init__(self, x, y):\n super(OpsNet, self).__init__()\n self.x = x\n self.y = y\n self.int = 4\n self.float = 3.2\n self.str_a = \"hello\"\n self.str_b = \"world\"\n\n def construct(self, x, y):\n h = x // y\n m = x ** y\n n = x % y\n r = self.x // self.y\n s = self.x ** self.y\n t = self.x % self.y\n p = h + m + n\n q = r + s + t\n ret_pow = p ** q + q ** p\n ret_mod = p % q + q % p\n ret_floor = p // q + q // p\n ret = ret_pow + ret_mod + ret_floor\n if self.int > self.float:\n if [1, 2, 3] is not None:\n if self.str_a + self.str_b == \"helloworld\":\n if q == 86:\n print(\"hello world\")\n return ret\n return x\n\n net = OpsNet(9, 2)\n x = Tensor(np.random.randint(low=1, high=10, size=(2, 3, 4), dtype=np.int32))\n y = Tensor(np.random.randint(low=10, high=20, size=(2, 3, 4), dtype=np.int32))\n context.set_context(mode=context.GRAPH_MODE)\n net(x, y)\n\n\ndef test_in_dict():\n class InDictNet(Cell):\n \"\"\" InDictNet definition \"\"\"\n\n def __init__(self, key_in, key_not_in):\n super(InDictNet, self).__init__()\n self.key_in = key_in\n self.key_not_in = key_not_in\n\n def construct(self, x, y, z):\n d = {\"a\": x, \"b\": y}\n ret_in = 1\n ret_not_in = 2\n if self.key_in in d:\n ret_in = d[self.key_in]\n if self.key_not_in not in d:\n ret_not_in = z\n ret = ret_in + ret_not_in\n return ret\n\n net = InDictNet(\"a\", \"c\")\n x = Tensor(np.random.randint(low=1, high=10, size=(2, 3, 4), dtype=np.int32))\n y = Tensor(np.random.randint(low=10, high=20, size=(2, 3, 4), dtype=np.int32))\n z = Tensor(np.random.randint(low=20, high=30, size=(2, 3, 4), dtype=np.int32))\n context.set_context(mode=context.GRAPH_MODE)\n net(x, y, z)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport mindspore.context as context\r\nimport mindspore.nn as nn\r\nfrom mindspore import Tensor\r\n\r\n\r\nclass NetSoftmaxCrossEntropyWithLogits(nn.Cell):\r\n def __init__(self):\r\n super(NetSoftmaxCrossEntropyWithLogits, self).__init__()\r\n self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)\r\n\r\n def construct(self, logits, labels):\r\n return self.loss(logits, labels)\r\n\r\n\r\[email protected]\r\[email protected]_x86_cpu\r\[email protected]_onecard\r\ndef test_softmax_cross_entropy_with_logits():\r\n logits = Tensor(np.array([[1, 1, 10],\r\n [1, 10, 1],\r\n [10, 1, 1]]).astype(np.float32))\r\n labels = Tensor(np.array([[0, 0, 1],\r\n [0, 1, 0],\r\n [1, 0, 0]]).astype(np.float32))\r\n expect_loss = [0.00024673, 0.00024673, 0.00024673]\r\n\r\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\r\n softmax_cross_entropy_with_logits = NetSoftmaxCrossEntropyWithLogits()\r\n output = softmax_cross_entropy_with_logits(logits, labels)\r\n error0 = 1.0e-6\r\n diff0 = output.asnumpy() - expect_loss\r\n assert np.all(abs(diff0) < error0)\r\n\r\ntest_softmax_cross_entropy_with_logits()\r\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test image gradients \"\"\"\nimport numpy as np\nimport pytest\n\nimport mindspore.common.dtype as mstype\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.api import _executor\nfrom mindspore.common.api import ms_function\n\ncontext.set_context(device_target=\"Ascend\")\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.image_gradients = nn.ImageGradients()\n\n @ms_function\n def construct(self, x):\n return self.image_gradients(x)\n\n\ndef test_compile():\n # input shape 1 x 1 x 2 x 2\n image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32)\n net = Net()\n _executor.compile(net, image)\n\n\ndef test_compile_multi_channel():\n # input shape 4 x 2 x 2 x 2\n dtype = mstype.int32\n image = Tensor(np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[3, 5], [7, 9]], [[11, 13], [15, 17]]],\n [[[5, 10], [15, 20]], [[25, 30], [35, 40]]],\n [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype)\n net = Net()\n _executor.compile(net, image)\n\n\ndef test_invalid_5d_input():\n dtype = mstype.float32\n image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype)\n net = Net()\n with pytest.raises(ValueError):\n _executor.compile(net, image)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Eval\"\"\"\nimport os\nimport time\nimport argparse\nimport datetime\nimport glob\nimport numpy as np\nimport mindspore.nn as nn\n\nfrom mindspore import Tensor, context\nfrom mindspore.context import ParallelMode\nfrom mindspore.communication.management import init, get_rank, get_group_size, release\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import functional as F\nfrom mindspore.common import dtype as mstype\n\nfrom src.utils.logging import get_logger\nfrom src.utils.auto_mixed_precision import auto_mixed_precision\nfrom src.utils.var_init import load_pretrain_model\nfrom src.image_classification import get_network\nfrom src.dataset import classification_dataset\nfrom src.config import config\n\n\nclass ParameterReduce(nn.Cell):\n \"\"\"ParameterReduce\"\"\"\n def __init__(self):\n super(ParameterReduce, self).__init__()\n self.cast = P.Cast()\n self.reduce = P.AllReduce()\n\n def construct(self, x):\n one = self.cast(F.scalar_to_array(1.0), mstype.float32)\n out = x * one\n ret = self.reduce(out)\n return ret\n\n\ndef parse_args(cloud_args=None):\n \"\"\"parse_args\"\"\"\n parser = argparse.ArgumentParser('mindspore classification test')\n parser.add_argument('--platform', type=str, default='Ascend', choices=('Ascend', 'GPU'), help='run platform')\n\n # dataset related\n parser.add_argument('--data_dir', type=str, default='/opt/npu/datasets/classification/val', help='eval data dir')\n parser.add_argument('--per_batch_size', default=32, type=int, help='batch size for per npu')\n # network related\n parser.add_argument('--graph_ckpt', type=int, default=1, help='graph ckpt or feed ckpt')\n parser.add_argument('--pretrained', default='', type=str, help='fully path of pretrained model to load. '\n 'If it is a direction, it will test all ckpt')\n\n # logging related\n parser.add_argument('--log_path', type=str, default='outputs/', help='path to save log')\n parser.add_argument('--is_distributed', type=int, default=0, help='if multi device')\n\n # roma obs\n parser.add_argument('--train_url', type=str, default=\"\", help='train url')\n\n args, _ = parser.parse_known_args()\n args = merge_args(args, cloud_args)\n args.image_size = config.image_size\n args.num_classes = config.num_classes\n args.rank = config.rank\n args.group_size = config.group_size\n\n args.image_size = list(map(int, args.image_size.split(',')))\n\n # init distributed\n if args.is_distributed:\n if args.platform == \"Ascend\":\n init()\n elif args.platform == \"GPU\":\n init(\"nccl\")\n args.rank = get_rank()\n args.group_size = get_group_size()\n else:\n args.rank = 0\n args.group_size = 1\n\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n\n args.logger = get_logger(args.outputs_dir, args.rank)\n return args\n\n\ndef get_top5_acc(top5_arg, gt_class):\n sub_count = 0\n for top5, gt in zip(top5_arg, gt_class):\n if gt in top5:\n sub_count += 1\n return sub_count\n\ndef merge_args(args, cloud_args):\n \"\"\"merge_args\"\"\"\n args_dict = vars(args)\n if isinstance(cloud_args, dict):\n for key in cloud_args.keys():\n val = cloud_args[key]\n if key in args_dict and val:\n arg_type = type(args_dict[key])\n if arg_type is not type(None):\n val = arg_type(val)\n args_dict[key] = val\n return args\n\n\ndef get_result(args, model, top1_correct, top5_correct, img_tot):\n \"\"\"calculate top1 and top5 value.\"\"\"\n results = [[top1_correct], [top5_correct], [img_tot]]\n args.logger.info('before results={}'.format(results))\n if args.is_distributed:\n model_md5 = model.replace('/', '')\n tmp_dir = '/cache'\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n top1_correct_npy = '/cache/top1_rank_{}_{}.npy'.format(args.rank, model_md5)\n top5_correct_npy = '/cache/top5_rank_{}_{}.npy'.format(args.rank, model_md5)\n img_tot_npy = '/cache/img_tot_rank_{}_{}.npy'.format(args.rank, model_md5)\n np.save(top1_correct_npy, top1_correct)\n np.save(top5_correct_npy, top5_correct)\n np.save(img_tot_npy, img_tot)\n while True:\n rank_ok = True\n for other_rank in range(args.group_size):\n top1_correct_npy = '/cache/top1_rank_{}_{}.npy'.format(other_rank, model_md5)\n top5_correct_npy = '/cache/top5_rank_{}_{}.npy'.format(other_rank, model_md5)\n img_tot_npy = '/cache/img_tot_rank_{}_{}.npy'.format(other_rank, model_md5)\n if not os.path.exists(top1_correct_npy) or not os.path.exists(top5_correct_npy) or \\\n not os.path.exists(img_tot_npy):\n rank_ok = False\n if rank_ok:\n break\n\n top1_correct_all = 0\n top5_correct_all = 0\n img_tot_all = 0\n for other_rank in range(args.group_size):\n top1_correct_npy = '/cache/top1_rank_{}_{}.npy'.format(other_rank, model_md5)\n top5_correct_npy = '/cache/top5_rank_{}_{}.npy'.format(other_rank, model_md5)\n img_tot_npy = '/cache/img_tot_rank_{}_{}.npy'.format(other_rank, model_md5)\n top1_correct_all += np.load(top1_correct_npy)\n top5_correct_all += np.load(top5_correct_npy)\n img_tot_all += np.load(img_tot_npy)\n results = [[top1_correct_all], [top5_correct_all], [img_tot_all]]\n results = np.array(results)\n else:\n results = np.array(results)\n\n args.logger.info('after results={}'.format(results))\n return results\n\n\ndef test(cloud_args=None):\n \"\"\"test\"\"\"\n args = parse_args(cloud_args)\n context.set_context(mode=context.GRAPH_MODE, enable_auto_mixed_precision=True,\n device_target=args.platform, save_graphs=False)\n if os.getenv('DEVICE_ID', \"not_set\").isdigit():\n context.set_context(device_id=int(os.getenv('DEVICE_ID')))\n\n # init distributed\n if args.is_distributed:\n parallel_mode = ParallelMode.DATA_PARALLEL\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=args.group_size,\n gradients_mean=True)\n\n args.logger.save_args(args)\n\n # network\n args.logger.important_info('start create network')\n if os.path.isdir(args.pretrained):\n models = list(glob.glob(os.path.join(args.pretrained, '*.ckpt')))\n print(models)\n if args.graph_ckpt:\n f = lambda x: -1 * int(os.path.splitext(os.path.split(x)[-1])[0].split('-')[-1].split('_')[0])\n else:\n f = lambda x: -1 * int(os.path.splitext(os.path.split(x)[-1])[0].split('_')[-1])\n args.models = sorted(models, key=f)\n else:\n args.models = [args.pretrained,]\n\n for model in args.models:\n de_dataset = classification_dataset(args.data_dir, image_size=args.image_size,\n per_batch_size=args.per_batch_size,\n max_epoch=1, rank=args.rank, group_size=args.group_size,\n mode='eval')\n eval_dataloader = de_dataset.create_tuple_iterator(output_numpy=True, num_epochs=1)\n network = get_network(num_classes=args.num_classes, platform=args.platform)\n\n load_pretrain_model(model, network, args)\n\n img_tot = 0\n top1_correct = 0\n top5_correct = 0\n if args.platform == \"Ascend\":\n network.to_float(mstype.float16)\n else:\n auto_mixed_precision(network)\n network.set_train(False)\n t_end = time.time()\n it = 0\n for data, gt_classes in eval_dataloader:\n output = network(Tensor(data, mstype.float32))\n output = output.asnumpy()\n\n top1_output = np.argmax(output, (-1))\n top5_output = np.argsort(output)[:, -5:]\n\n t1_correct = np.equal(top1_output, gt_classes).sum()\n top1_correct += t1_correct\n top5_correct += get_top5_acc(top5_output, gt_classes)\n img_tot += args.per_batch_size\n\n if args.rank == 0 and it == 0:\n t_end = time.time()\n it = 1\n if args.rank == 0:\n time_used = time.time() - t_end\n fps = (img_tot - args.per_batch_size) * args.group_size / time_used\n args.logger.info('Inference Performance: {:.2f} img/sec'.format(fps))\n results = get_result(args, model, top1_correct, top5_correct, img_tot)\n top1_correct = results[0, 0]\n top5_correct = results[1, 0]\n img_tot = results[2, 0]\n acc1 = 100.0 * top1_correct / img_tot\n acc5 = 100.0 * top5_correct / img_tot\n args.logger.info('after allreduce eval: top1_correct={}, tot={},'\n 'acc={:.2f}%(TOP1)'.format(top1_correct, img_tot, acc1))\n args.logger.info('after allreduce eval: top5_correct={}, tot={},'\n 'acc={:.2f}%(TOP5)'.format(top5_correct, img_tot, acc5))\n if args.is_distributed:\n release()\n\n\nif __name__ == \"__main__\":\n test()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import operations as P\n\n\nclass LogSoftmax(nn.Cell):\n def __init__(self, axis=1):\n super(LogSoftmax, self).__init__()\n self.logsoftmax = P.LogSoftmax(axis)\n\n def construct(self, x):\n return self.logsoftmax(x)\n\n\nclass Grad(nn.Cell):\n def __init__(self, network):\n super(Grad, self).__init__()\n self.grad = C.GradOperation(get_all=True, sens_param=True)\n self.network = network\n\n def construct(self, input_data, sens):\n gout = self.grad(self.network)(input_data, sens)\n return gout\n\n\ndef test_logsoftmax():\n x = np.array([[-0.08082921, -0.13706027, -0.4711177, -0.05606057],\n [-0.46082982, 1.1761844, -1.016654, -1.743829],\n [-1.5062045, 0.6910976, 0.4839723, 1.1502692]]).astype(np.float32)\n expect = np.array([[-1.2939762, -1.3502073, -1.6842647, -1.2692076],\n [-1.9445671, -0.3075528, -2.5003912, -3.2275662],\n [-3.452001, -1.2546989, -1.4618242, -0.79552734]]).astype(np.float32)\n logSoftmax = LogSoftmax()\n output = logSoftmax(Tensor(x))\n assert np.allclose(output.asnumpy(), expect)\n\n\ndef test_logsoftmaxgrad():\n x = np.array([[-0.47705367, 0.48267725, -1.0453935, 1.574488, 0.20362134, 0.4435456, -0.23984082, -0.43684655,\n -0.7725506, 1.4481013],\n [1.1012247, 1.7069651, 0.55062026, 0.3361901, -1.1082426, -0.5001939, -0.3255393, -0.7972024,\n -0.27965206, -0.702805],\n [0.19450496, 0.87596166, 0.6467245, -1.044987, 0.5248943, -2.6166635, 1.6719198, 0.06600758,\n -0.4099178, 1.1861311],\n [1.1305193, -1.97308, 2.1047623, -1.5105937, 0.93052036, 1.2467804, 0.5310002, 0.7084912, -1.3681422,\n -0.9686862],\n [1.871408, 0.14219497, -0.41050452, -0.749807, 1.4900619, -1.8172716, -0.73839617, 0.17565694,\n -0.4553867, -1.5423119]]).astype(np.float32)\n dy = np.array([[1.516363, -0.15196544, 0.598733, 0.64357865, 0.16265012, -1.3521105, 0.22621834, 0.7168259,\n -0.6709239, 0.79757756],\n [-0.32457778, 1.2831115, 1.1211495, -0.02665559, 1.9170904, -1.3397789, 1.4124829, -1.4298155,\n 0.758519, -0.25322974],\n [-0.24226122, -1.2555921, 0.6492511, -0.34847677, 0.19916506, 0.628554, -0.19658111, 0.44939864,\n -0.11677749, -1.2131723],\n [0.24267715, 0.28106326, 1.1075432, -0.29006946, 0.31335673, 0.8833154, 0.13152207, 1.5482179,\n 0.29770762, -0.16246222],\n [0.02145994, 0.80424, -0.95061, 1.5875458, -0.00308682, 0.17964548, 0.49912593, 0.46977136,\n 0.2151897, 0.30908248]]).astype(np.float32)\n expect = np.array([[1.4219905, -0.39837134, 0.5452743, -0.09062839, -0.02375537, -1.5890603, 0.10658137, 0.6185817,\n -0.7411523, 0.15054005],\n [-0.94926417, 0.13830578, 0.7609547, -0.31733334, 1.8485254, -1.4657221, 1.2625053, -1.523396,\n 0.601499, -0.35607445],\n [-0.14447737, -1.0622973, 0.80294746, -0.32016528, 0.33523226, 0.63443416, 0.23186903,\n 0.53539133, -0.0633494, -0.9495847],\n [-0.36894822, 0.253609, -0.5127511, -0.33366728, -0.18740037, 0.19628316, -0.20430653, 1.1471655,\n 0.24743511, -0.23741922],\n [-1.2582518, 0.57718843, -1.0812542, 1.4944922, -0.8770549, 0.1476463, 0.40500447, 0.23499368,\n 0.09027944, 0.26695627]]).astype(np.float32)\n net = LogSoftmax()\n dx = Grad(net)(Tensor(x), Tensor(dy))\n assert np.allclose(dx[0].asnumpy(), expect)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_logsoftmax_gpu():\n context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target=\"GPU\")\n test_logsoftmax()\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_logsoftmaxgrad_gpu():\n context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target=\"GPU\")\n test_logsoftmaxgrad()\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_logsoftmax_asend():\n context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target=\"Ascend\")\n test_logsoftmax()\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_logsoftmaxgrad_asend():\n context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target=\"Ascend\")\n test_logsoftmaxgrad()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nMobilenet model transform: torch => mindspore\n\"\"\"\nimport os\nimport argparse\nimport torch\nfrom mindspore.train.serialization import load_checkpoint, save_checkpoint\nfrom mindspore import Tensor\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('--ckpt_fn', type=str, default='/model_path/mobilenet_v2_key.ckpt',\n help='ckpt for user to get cell/module name')\nparser.add_argument('--pt_fn', type=str, default='/model_path/mobilenet_v2-b0353104.pth',\n help='checkpoint filename to convert')\nparser.add_argument('--out_ckpt_fn', type=str, default='/model_path/mobilenet_v2-b0353104.ckpt',\n help='convert output ckpt path')\n\nargs = parser.parse_args()\n\ndef load_model(model_path):\n \"\"\"\n Load model\n \"\"\"\n state_dict_ = torch.load(model_path, map_location=torch.device('cpu'))\n state_dict = {}\n\n # convert data_parallal to model\n for k in state_dict_:\n if k.find(\"num_batches_tracked\") != -1:\n continue\n elif k.startswith('module') and not k.startswith('module_list'):\n state_dict[k[7:]] = state_dict_[k]\n else:\n state_dict[k] = state_dict_[k]\n return state_dict\n\ndef load_model_ms(model_path):\n \"\"\"\n Load mindspore model\n \"\"\"\n state_dict_useless = ['global_step', 'learning_rate',\n 'beta1_power', 'beta2_power']\n if os.path.isfile(model_path):\n param_dict = load_checkpoint(model_path)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key in state_dict_useless or key.startswith('moments.') \\\n or key.startswith('moment1.') or key.startswith('moment2.'):\n continue\n elif key.startswith('centerface_network.'): #useless, since the start name is \"network.backbone.\"\n param_dict_new[key[19:]] = values\n else:\n param_dict_new[key] = values\n else:\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(model_path))\n exit(1)\n return param_dict_new\n\ndef name_map(ckpt):\n \"\"\"\n Name map\n \"\"\"\n out = {}\n for name in ckpt:\n # conv + bn\n pt_name = name\n\n pt_name = pt_name.replace('network.backbone.', '')\n # backbone\n pt_name = pt_name.replace('need_fp1', 'feature_1')\n pt_name = pt_name.replace('need_fp2', 'feature_2')\n pt_name = pt_name.replace('need_fp3', 'feature_4')\n pt_name = pt_name.replace('need_fp4', 'feature_6')\n pt_name = pt_name.replace('.features', '')\n pt_name = pt_name.replace('.moving_mean', '.running_mean')\n pt_name = pt_name.replace('.moving_variance', '.running_var')\n pt_name = pt_name.replace('.gamma', '.weight')\n pt_name = pt_name.replace('.beta', '.bias')\n # fpn\n pt_name = pt_name.replace('.up1', '.up_0')\n pt_name = pt_name.replace('.up2', '.up_1')\n pt_name = pt_name.replace('.up3', '.up_2')\n\n # heads\n pt_name = pt_name.replace('hm_head.0.', 'hm.')\n pt_name = pt_name.replace('wh_head.', 'wh.')\n pt_name = pt_name.replace('off_head.', 'hm_offset.')\n pt_name = pt_name.replace('kps_head.', 'landmarks.')\n\n pt_name = pt_name.replace('network.head.fc.', 'classifier.1.')\n\n out[pt_name] = name\n return out\n\ndef pt_to_ckpt(pt, ckpt, out_ckpt):\n \"\"\"\n Pt convert to ckpt file\n \"\"\"\n state_dict_torch = load_model(pt)\n state_dict_ms = load_model_ms(ckpt)\n name_relate = name_map(state_dict_ms)\n new_params_list = []\n\n for key in state_dict_torch:\n param_dict = {}\n parameter = state_dict_torch[key]\n parameter = parameter.numpy()\n\n param_dict['name'] = name_relate[key]\n param_dict['data'] = Tensor(parameter)\n new_params_list.append(param_dict)\n\n save_checkpoint(new_params_list, out_ckpt)\n return state_dict_ms\n\nif __name__ == \"__main__\":\n # beta <=> bias, gamma <=> weight\n pt_to_ckpt(args.pt_fn, args.ckpt_fn, args.out_ckpt_fn)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Base class of data loader.\"\"\"\nimport os\nimport collections\nimport numpy as np\n\nfrom mindspore.mindrecord import FileWriter\nfrom .schema import SCHEMA\n\n\nclass DataLoader:\n \"\"\"Data loader for dataset.\"\"\"\n _SCHEMA = SCHEMA\n\n def __init__(self, max_sen_len=66):\n self._examples = []\n self._max_sentence_len = max_sen_len\n\n def _load(self):\n raise NotImplementedError\n\n def padding(self, sen, padding_idx, dtype=np.int64):\n \"\"\"Padding <pad> to sentence.\"\"\"\n if sen.shape[0] > self._max_sentence_len:\n return None\n new_sen = np.array([padding_idx] * self._max_sentence_len,\n dtype=dtype)\n new_sen[:sen.shape[0]] = sen[:]\n return new_sen\n\n def write_to_mindrecord(self, path, shard_num=1, desc=\"\"):\n \"\"\"\n Write mindrecord file.\n\n Args:\n path (str): File path.\n shard_num (int): Shard num.\n desc (str): Description.\n \"\"\"\n if not os.path.isabs(path):\n path = os.path.abspath(path)\n\n writer = FileWriter(file_name=path, shard_num=shard_num)\n writer.add_schema(self._SCHEMA, desc)\n if not self._examples:\n self._load()\n\n writer.write_raw_data(self._examples)\n writer.commit()\n print(f\"| Wrote to {path}.\")\n\n def write_to_tfrecord(self, path, shard_num=1):\n \"\"\"\n Write to tfrecord.\n\n Args:\n path (str): Output file path.\n shard_num (int): Shard num.\n \"\"\"\n import tensorflow as tf\n if not os.path.isabs(path):\n path = os.path.abspath(path)\n output_files = []\n for i in range(shard_num):\n output_file = path + \"-%03d-of-%03d\" % (i + 1, shard_num)\n output_files.append(output_file)\n # create writers\n writers = []\n for output_file in output_files:\n writers.append(tf.io.TFRecordWriter(output_file))\n\n if not self._examples:\n self._load()\n\n # create feature\n features = collections.OrderedDict()\n for example in self._examples:\n for key in example:\n features[key] = tf.train.Feature(int64_list=tf.train.Int64List(value=example[key].tolist()))\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n for writer in writers:\n writer.write(tf_example.SerializeToString())\n for writer in writers:\n writer.close()\n for p in output_files:\n print(f\" | Write to {p}.\")\n\n def _add_example(self, example):\n self._examples.append(example)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Face attribute cross entropy.\"\"\"\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import functional as F\nfrom mindspore import Tensor\nfrom mindspore.common import dtype as mstype\n\n\nclass CrossEntropyWithIgnoreIndex(nn.Cell):\n '''Cross Entropy With Ignore Index Loss.'''\n def __init__(self):\n super(CrossEntropyWithIgnoreIndex, self).__init__()\n self.onehot = P.OneHot()\n self.on_value = Tensor(1.0, dtype=mstype.float32)\n self.off_value = Tensor(0.0, dtype=mstype.float32)\n self.cast = P.Cast()\n self.ce = nn.SoftmaxCrossEntropyWithLogits()\n self.greater = P.Greater()\n self.maximum = P.Maximum()\n self.fill = P.Fill()\n self.sum = P.ReduceSum(keep_dims=False)\n self.dtype = P.DType()\n self.relu = P.ReLU()\n self.reshape = P.Reshape()\n self.const_one = Tensor(np.ones([1]), dtype=mstype.float32)\n self.const_eps = Tensor(0.00001, dtype=mstype.float32)\n\n def construct(self, x, label):\n '''Construct function.'''\n mask = self.reshape(label, (F.shape(label)[0], 1))\n mask = self.cast(mask, mstype.float32)\n mask = mask + self.const_eps\n mask = self.relu(mask)/mask\n x = x * mask\n one_hot_label = self.onehot(self.cast(label, mstype.int32), F.shape(x)[1], self.on_value, self.off_value)\n loss = self.ce(x, one_hot_label)\n positive = self.sum(self.cast(self.greater(loss, self.fill(self.dtype(loss), F.shape(loss), 0.0)),\n mstype.float32), 0)\n positive = self.maximum(positive, self.const_one)\n loss = self.sum(loss, 0) / positive\n return loss\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" auto mixed precision \"\"\"\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nfrom mindspore import Tensor\nfrom mindspore import amp\nfrom mindspore import nn\nfrom mindspore.communication.management import init\nfrom mindspore.context import ParallelMode\nfrom mindspore.train import Model\nfrom ....dataset_mock import MindData\n\n\ndef setup_module(module):\n _ = module\n context.set_context(mode=context.GRAPH_MODE)\n\n\nclass Net(nn.Cell):\n def __init__(self, in_features, out_features):\n super(Net, self).__init__()\n self.dense = nn.Dense(in_features, out_features)\n self.loss = nn.MSELoss()\n\n def construct(self, input_x, label):\n output = self.dense(input_x)\n loss = self.loss(output, label)\n return loss\n\n\nclass NetNoLoss(nn.Cell):\n def __init__(self, in_features, out_features):\n super(NetNoLoss, self).__init__()\n self.dense = nn.Dense(in_features, out_features)\n\n def construct(self, input_x):\n return self.dense(input_x)\n\n\ndef test_amp_o0():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = Net(16, 16)\n\n optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_network = amp.build_train_network(net, optimizer, level=\"O0\")\n _ = train_network(inputs, label)\n\n\ndef test_amp_o2():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = Net(16, 16)\n\n optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_network = amp.build_train_network(net, optimizer, level=\"O2\")\n _ = train_network(inputs, label)\n\n\ndef test_amp_o2_loss():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = NetNoLoss(16, 16)\n loss = nn.MSELoss()\n optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_network = amp.build_train_network(net, optimizer, loss, level=\"O2\")\n _ = train_network(inputs, label)\n\n\ndef test_amp_o0_loss():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = NetNoLoss(16, 16)\n loss = nn.MSELoss()\n optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_network = amp.build_train_network(net, optimizer, loss)\n _ = train_network(inputs, label)\n\n\nclass MindDataSet(MindData):\n def __init__(self, dataset_types, dataset_shapes):\n super(MindDataSet, self).__init__(size=2, batch_size=32,\n np_types=dataset_types,\n output_shapes=dataset_shapes,\n input_indexs=(0, 1))\n\n def __next__(self):\n if self._size < self._iter_num:\n raise StopIteration\n self._iter_num += 1\n lst = []\n for shape_, type_ in zip(self._output_shapes, self._np_types):\n lst.append(Tensor(np.ones(shape_).astype(type_)))\n return tuple(lst)\n\n\ndef test_compile_model_train_O0():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((16, 16), (16, 16))\n\n dataset = MindDataSet(dataset_types, dataset_shapes)\n\n net = NetNoLoss(16, 16)\n loss = nn.MSELoss()\n optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n\n model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={\"acc\"}, amp_level=\"O0\")\n model.train(2, dataset, dataset_sink_mode=False)\n with pytest.raises(ValueError):\n # not actual run, the metrics step will fail, check if compile ok.\n model.eval(dataset)\n\n\ndef test_compile_model_train_O2():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((16, 16), (16, 16))\n\n dataset = MindDataSet(dataset_types, dataset_shapes)\n\n net = NetNoLoss(16, 16)\n loss = nn.MSELoss()\n optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n\n model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={\"acc\"}, amp_level=\"O2\")\n model.train(2, dataset, dataset_sink_mode=False)\n with pytest.raises(ValueError):\n # not actual run, the metrics step will fail, check if compile ok.\n model.eval(dataset)\n\n\ndef test_compile_model_train_O2_parallel():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((16, 16), (16, 16))\n context.set_auto_parallel_context(\n global_rank=0, device_num=8,\n gradients_mean=True, parameter_broadcast=True,\n parallel_mode=ParallelMode.DATA_PARALLEL)\n\n dataset = MindDataSet(dataset_types, dataset_shapes)\n\n net = NetNoLoss(16, 16)\n loss = nn.MSELoss()\n optimizer = nn.Momentum(net.trainable_params(), 0.1, 0.9, 0.00004, 1024.0)\n\n init()\n\n model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={\"acc\"}, amp_level=\"O2\")\n model.train(2, dataset, dataset_sink_mode=False)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport mindspore.context as context\r\nimport mindspore.nn as nn\r\nfrom mindspore import Tensor\r\nfrom mindspore.ops import operations as P\r\n\r\nclass NetDiv(nn.Cell):\r\n def __init__(self):\r\n super(NetDiv, self).__init__()\r\n self.div = P.Div()\r\n\r\n def construct(self, x, y):\r\n return self.div(x, y)\r\n\r\[email protected]\r\[email protected]_x86_gpu_training\r\[email protected]_onecard\r\ndef test_div():\r\n x0_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.float32)\r\n y0_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.float32)\r\n x1_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.float32)\r\n y1_np = np.random.randint(1, 5, (2, 1, 4, 4)).astype(np.float32)\r\n x2_np = np.random.randint(1, 5, (2, 1, 1, 4)).astype(np.float32)\r\n y2_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.float32)\r\n x3_np = np.random.randint(1, 5, 1).astype(np.float32)\r\n y3_np = np.random.randint(1, 5, 1).astype(np.float32)\r\n x4_np = np.array(768).astype(np.float32)\r\n y4_np = np.array(3072.5).astype(np.float32)\r\n x5_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.float16)\r\n y5_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.float16)\r\n x6_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(np.int32)\r\n y6_np = np.random.randint(1, 5, (2, 1, 4, 4)).astype(np.int32)\r\n\r\n x0 = Tensor(x0_np)\r\n y0 = Tensor(y0_np)\r\n x1 = Tensor(x1_np)\r\n y1 = Tensor(y1_np)\r\n x2 = Tensor(x2_np)\r\n y2 = Tensor(y2_np)\r\n x3 = Tensor(x3_np)\r\n y3 = Tensor(y3_np)\r\n x4 = Tensor(x4_np)\r\n y4 = Tensor(y4_np)\r\n x5 = Tensor(x5_np)\r\n y5 = Tensor(y5_np)\r\n x6 = Tensor(x6_np)\r\n y6 = Tensor(y6_np)\r\n\r\n context.set_context(mode=context.GRAPH_MODE, device_target='GPU')\r\n div = NetDiv()\r\n output0 = div(x0, y0)\r\n expect0 = np.divide(x0_np, y0_np)\r\n diff0 = output0.asnumpy() - expect0\r\n error0 = np.ones(shape=expect0.shape) * 1.0e-5\r\n assert np.all(diff0 < error0)\r\n assert output0.shape == expect0.shape\r\n\r\n output1 = div(x1, y1)\r\n expect1 = np.divide(x1_np, y1_np)\r\n diff1 = output1.asnumpy() - expect1\r\n error1 = np.ones(shape=expect1.shape) * 1.0e-5\r\n assert np.all(diff1 < error1)\r\n assert output1.shape == expect1.shape\r\n\r\n output2 = div(x2, y2)\r\n expect2 = np.divide(x2_np, y2_np)\r\n diff2 = output2.asnumpy() - expect2\r\n error2 = np.ones(shape=expect2.shape) * 1.0e-5\r\n assert np.all(diff2 < error2)\r\n assert output2.shape == expect2.shape\r\n\r\n context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')\r\n output3 = div(x3, y3)\r\n expect3 = np.divide(x3_np, y3_np)\r\n diff3 = output3.asnumpy() - expect3\r\n error3 = np.ones(shape=expect3.shape) * 1.0e-5\r\n assert np.all(diff3 < error3)\r\n assert output3.shape == expect3.shape\r\n\r\n output4 = div(x4, y4)\r\n expect4 = np.divide(x4_np, y4_np)\r\n diff4 = output4.asnumpy() - expect4\r\n error4 = np.ones(shape=expect4.shape) * 1.0e-5\r\n assert np.all(diff4 < error4)\r\n assert output4.shape == expect4.shape\r\n\r\n output5 = div(x5, y5)\r\n expect5 = np.divide(x5_np, y5_np)\r\n diff5 = output5.asnumpy() - expect5\r\n error5 = np.ones(shape=expect5.shape) * 1.0e-5\r\n assert np.all(diff5 < error5)\r\n assert output5.shape == expect5.shape\r\n\r\n output6 = div(x6, y6)\r\n expect6 = np.divide(x6_np, y6_np)\r\n diff6 = output6.asnumpy() - expect6\r\n error6 = np.ones(shape=expect6.shape) * 1.0e-5\r\n assert np.all(diff6 < error6)\r\n assert output6.shape == expect6.shape\r\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n\nclass NetRelu(nn.Cell):\n def __init__(self):\n super(NetRelu, self).__init__()\n self.relu = P.ReLU()\n self.x = Parameter(initializer(Tensor(np.array([[[[-1, 1, 10],\n [1, -1, 1],\n [10, 1, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='x')\n\n def construct(self):\n return self.relu(self.x)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_relu():\n relu = NetRelu()\n output = relu()\n expect = np.array([[[[0, 1, 10,],\n [1, 0, 1,],\n [10, 1, 0.]]]]).astype(np.float32)\n print(output)\n assert (output.asnumpy() == expect).all()\n",
"# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"FasterRcnn tpositive and negative sample screening for Rcnn.\"\"\"\n\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nfrom mindspore.ops import operations as P\nfrom mindspore.common.tensor import Tensor\nfrom mindspore import context\n\n\nclass BboxAssignSampleForRcnn(nn.Cell):\n \"\"\"\n Bbox assigner and sampler definition.\n\n Args:\n config (dict): Config.\n batch_size (int): Batchsize.\n num_bboxes (int): The anchor nums.\n add_gt_as_proposals (bool): add gt bboxes as proposals flag.\n\n Returns:\n Tensor, output tensor.\n bbox_targets: bbox location, (batch_size, num_bboxes, 4)\n bbox_weights: bbox weights, (batch_size, num_bboxes, 1)\n labels: label for every bboxes, (batch_size, num_bboxes, 1)\n label_weights: label weight for every bboxes, (batch_size, num_bboxes, 1)\n\n Examples:\n BboxAssignSampleForRcnn(config, 2, 1024, True)\n \"\"\"\n\n def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals):\n super(BboxAssignSampleForRcnn, self).__init__()\n cfg = config\n _mode_16 = bool(context.get_context(\"device_target\") == \"Ascend\")\n self.dtype = np.float16 if _mode_16 else np.float32\n self.ms_type = mstype.float16 if _mode_16 else mstype.float32\n self.batch_size = batch_size\n self.neg_iou_thr = cfg.neg_iou_thr_stage2\n self.pos_iou_thr = cfg.pos_iou_thr_stage2\n self.min_pos_iou = cfg.min_pos_iou_stage2\n self.num_gts = cfg.num_gts\n self.num_bboxes = num_bboxes\n self.num_expected_pos = cfg.num_expected_pos_stage2\n self.num_expected_neg = cfg.num_expected_neg_stage2\n self.num_expected_total = cfg.num_expected_total_stage2\n\n self.add_gt_as_proposals = add_gt_as_proposals\n self.label_inds = Tensor(np.arange(1, self.num_gts + 1).astype(np.int32))\n self.add_gt_as_proposals_valid = Tensor(np.array(self.add_gt_as_proposals * np.ones(self.num_gts),\n dtype=np.int32))\n\n self.concat = P.Concat(axis=0)\n self.max_gt = P.ArgMaxWithValue(axis=0)\n self.max_anchor = P.ArgMaxWithValue(axis=1)\n self.sum_inds = P.ReduceSum()\n self.iou = P.IOU()\n self.greaterequal = P.GreaterEqual()\n self.greater = P.Greater()\n self.select = P.Select()\n self.gatherND = P.GatherNd()\n self.squeeze = P.Squeeze()\n self.cast = P.Cast()\n self.logicaland = P.LogicalAnd()\n self.less = P.Less()\n self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos)\n self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg)\n self.reshape = P.Reshape()\n self.equal = P.Equal()\n self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2))\n self.concat_axis1 = P.Concat(axis=1)\n self.logicalnot = P.LogicalNot()\n self.tile = P.Tile()\n\n # Check\n self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=self.dtype))\n self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=self.dtype))\n\n # Init tensor\n self.assigned_gt_inds = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))\n self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32))\n self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32))\n self.assigned_gt_ignores = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))\n self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32))\n\n self.gt_ignores = Tensor(np.array(-1 * np.ones(self.num_gts), dtype=np.int32))\n self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(self.dtype))\n self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool))\n self.bboxs_neg_mask = Tensor(np.zeros((self.num_expected_neg, 4), dtype=self.dtype))\n self.labels_neg_mask = Tensor(np.array(np.zeros(self.num_expected_neg), dtype=np.uint8))\n\n self.reshape_shape_pos = (self.num_expected_pos, 1)\n self.reshape_shape_neg = (self.num_expected_neg, 1)\n\n self.scalar_zero = Tensor(0.0, dtype=self.ms_type)\n self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=self.ms_type)\n self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=self.ms_type)\n self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=self.ms_type)\n\n def construct(self, gt_bboxes_i, gt_labels_i, valid_mask, bboxes, gt_valids):\n gt_bboxes_i = self.select(self.cast(self.tile(self.reshape(self.cast(gt_valids, mstype.int32), \\\n (self.num_gts, 1)), (1, 4)), mstype.bool_), \\\n gt_bboxes_i, self.check_gt_one)\n bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \\\n (self.num_bboxes, 1)), (1, 4)), mstype.bool_), \\\n bboxes, self.check_anchor_two)\n\n overlaps = self.iou(bboxes, gt_bboxes_i)\n\n max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps)\n _, max_overlaps_w_ac = self.max_anchor(overlaps)\n\n neg_sample_iou_mask = self.logicaland(self.greaterequal(max_overlaps_w_gt,\n self.scalar_zero),\n self.less(max_overlaps_w_gt,\n self.scalar_neg_iou_thr))\n\n assigned_gt_inds2 = self.select(neg_sample_iou_mask, self.assigned_gt_zeros, self.assigned_gt_inds)\n\n pos_sample_iou_mask = self.greaterequal(max_overlaps_w_gt, self.scalar_pos_iou_thr)\n assigned_gt_inds3 = self.select(pos_sample_iou_mask, \\\n max_overlaps_w_gt_index + self.assigned_gt_ones, assigned_gt_inds2)\n\n for j in range(self.num_gts):\n max_overlaps_w_ac_j = max_overlaps_w_ac[j:j+1:1]\n overlaps_w_ac_j = overlaps[j:j+1:1, ::]\n temp1 = self.greaterequal(max_overlaps_w_ac_j, self.scalar_min_pos_iou)\n temp2 = self.squeeze(self.equal(overlaps_w_ac_j, max_overlaps_w_ac_j))\n pos_mask_j = self.logicaland(temp1, temp2)\n assigned_gt_inds3 = self.select(pos_mask_j, (j+1)*self.assigned_gt_ones, assigned_gt_inds3)\n\n assigned_gt_inds5 = self.select(valid_mask, assigned_gt_inds3, self.assigned_gt_ignores)\n\n bboxes = self.concat((gt_bboxes_i, bboxes))\n label_inds_valid = self.select(gt_valids, self.label_inds, self.gt_ignores)\n label_inds_valid = label_inds_valid * self.add_gt_as_proposals_valid\n assigned_gt_inds5 = self.concat((label_inds_valid, assigned_gt_inds5))\n\n # Get pos index\n pos_index, valid_pos_index = self.random_choice_with_mask_pos(self.greater(assigned_gt_inds5, 0))\n\n pos_check_valid = self.cast(self.greater(assigned_gt_inds5, 0), self.ms_type)\n pos_check_valid = self.sum_inds(pos_check_valid, -1)\n valid_pos_index = self.less(self.range_pos_size, pos_check_valid)\n pos_index = pos_index * self.reshape(self.cast(valid_pos_index, mstype.int32), (self.num_expected_pos, 1))\n\n num_pos = self.sum_inds(self.cast(self.logicalnot(valid_pos_index), self.ms_type), -1)\n valid_pos_index = self.cast(valid_pos_index, mstype.int32)\n pos_index = self.reshape(pos_index, self.reshape_shape_pos)\n valid_pos_index = self.reshape(valid_pos_index, self.reshape_shape_pos)\n pos_index = pos_index * valid_pos_index\n\n pos_assigned_gt_index = self.gatherND(assigned_gt_inds5, pos_index) - self.assigned_pos_ones\n pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, self.reshape_shape_pos)\n pos_assigned_gt_index = pos_assigned_gt_index * valid_pos_index\n\n pos_gt_labels = self.gatherND(gt_labels_i, pos_assigned_gt_index)\n\n # Get neg index\n neg_index, valid_neg_index = self.random_choice_with_mask_neg(self.equal(assigned_gt_inds5, 0))\n\n unvalid_pos_index = self.less(self.range_pos_size, num_pos)\n valid_neg_index = self.logicaland(self.concat((self.check_neg_mask, unvalid_pos_index)), valid_neg_index)\n neg_index = self.reshape(neg_index, self.reshape_shape_neg)\n\n valid_neg_index = self.cast(valid_neg_index, mstype.int32)\n valid_neg_index = self.reshape(valid_neg_index, self.reshape_shape_neg)\n neg_index = neg_index * valid_neg_index\n\n pos_bboxes_ = self.gatherND(bboxes, pos_index)\n\n neg_bboxes_ = self.gatherND(bboxes, neg_index)\n pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, self.reshape_shape_pos)\n pos_gt_bboxes_ = self.gatherND(gt_bboxes_i, pos_assigned_gt_index)\n pos_bbox_targets_ = self.bounding_box_encode(pos_bboxes_, pos_gt_bboxes_)\n\n total_bboxes = self.concat((pos_bboxes_, neg_bboxes_))\n total_deltas = self.concat((pos_bbox_targets_, self.bboxs_neg_mask))\n total_labels = self.concat((pos_gt_labels, self.labels_neg_mask))\n\n valid_pos_index = self.reshape(valid_pos_index, self.reshape_shape_pos)\n valid_neg_index = self.reshape(valid_neg_index, self.reshape_shape_neg)\n total_mask = self.concat((valid_pos_index, valid_neg_index))\n\n return total_bboxes, total_deltas, total_labels, total_mask\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore import context\nfrom mindspore.common.api import _executor\nfrom mindspore.nn import TrainOneStepCell\nfrom mindspore.nn.optim import Momentum, LARS\nfrom mindspore.ops import operations as P\n\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network, strategy3):\n super(NetWithLoss, self).__init__()\n self.loss = P.SoftmaxCrossEntropyWithLogits().shard(strategy3)\n self.network = network\n\n def construct(self, x, b):\n predict = self.network(x)\n return self.loss(predict, b)[0]\n\n\ndef compile_net(net, x, b):\n net.set_auto_parallel()\n net.set_train()\n _executor.compile(net, x, b)\n\n\ndef test_momentum():\n class Net(nn.Cell):\n def __init__(self, strategy1, strategy2, weight):\n super().__init__()\n self.weight = Parameter(weight, \"w1\")\n self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)\n self.relu = P.ReLU().shard(strategy2)\n\n def construct(self, x):\n out = self.matmul(x, self.weight)\n out = self.relu(out)\n return out\n\n context.set_auto_parallel_context(device_num=4, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n strategy1 = ((2, 1), (2, 1))\n strategy2 = ((4, 1),)\n strategy3 = ((4, 1), (4, 1))\n\n x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n weight = Tensor(np.ones([64, 32]), dtype=ms.float32)\n b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = Net(strategy1, strategy2, weight)\n\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n\n net_with_loss = NetWithLoss(net, strategy3)\n\n train_net = TrainOneStepCell(net_with_loss, optimizer)\n\n compile_net(train_net, x, b)\n\n\ndef test_momentum_with_loss_scale():\n class Net(nn.Cell):\n def __init__(self, strategy1, strategy2, weight):\n super().__init__()\n self.weight = Parameter(weight, \"w1\")\n self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)\n self.relu = P.ReLU().shard(strategy2)\n\n def construct(self, x):\n out = self.matmul(x, self.weight)\n out = self.relu(out)\n return out\n\n context.set_auto_parallel_context(device_num=4, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n strategy1 = ((2, 1), (2, 1))\n strategy2 = ((4, 1),)\n strategy3 = ((4, 1), (4, 1))\n\n x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n weight = Tensor(np.ones([64, 32]), dtype=ms.float32)\n b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = Net(strategy1, strategy2, weight)\n\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9, loss_scale=0.5)\n\n net_with_loss = NetWithLoss(net, strategy3)\n\n train_net = TrainOneStepCell(net_with_loss, optimizer)\n\n compile_net(train_net, x, b)\n\n\ndef test_momentum_with_dynamic_lr():\n class Net(nn.Cell):\n def __init__(self, strategy1, strategy2, weight):\n super().__init__()\n self.weight = Parameter(weight, \"w1\")\n self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)\n self.relu = P.ReLU().shard(strategy2)\n\n def construct(self, x):\n out = self.matmul(x, self.weight)\n out = self.relu(out)\n return out\n\n context.set_auto_parallel_context(device_num=4, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n strategy1 = ((2, 1), (2, 1))\n strategy2 = ((4, 1),)\n strategy3 = ((4, 1), (4, 1))\n\n x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n weight = Tensor(np.ones([64, 32]), dtype=ms.float32)\n b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = Net(strategy1, strategy2, weight)\n\n lr = Tensor(np.ones([6]), dtype=ms.float32)\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = NetWithLoss(net, strategy3)\n\n train_net = TrainOneStepCell(net_with_loss, optimizer)\n\n compile_net(train_net, x, b)\n\n\ndef test_momentum_with_loss_scale_and_dynamic_lr():\n class Net(nn.Cell):\n def __init__(self, strategy1, strategy2, weight):\n super().__init__()\n self.weight = Parameter(weight, \"w1\")\n self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)\n self.relu = P.ReLU().shard(strategy2)\n\n def construct(self, x):\n out = self.matmul(x, self.weight)\n out = self.relu(out)\n return out\n\n context.set_auto_parallel_context(device_num=4, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n\n strategy1 = ((2, 1), (2, 1))\n strategy2 = ((4, 1),)\n strategy3 = ((4, 1), (4, 1))\n\n x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n weight = Tensor(np.ones([64, 32]), dtype=ms.float32)\n b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = Net(strategy1, strategy2, weight)\n\n lr = Tensor(np.ones([6]), dtype=ms.float32)\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9, loss_scale=0.5)\n\n net_with_loss = NetWithLoss(net, strategy3)\n\n train_net = TrainOneStepCell(net_with_loss, optimizer)\n\n compile_net(train_net, x, b)\n\n\ndef test_lars():\n class Net(nn.Cell):\n def __init__(self, strategy1, strategy2, weight):\n super().__init__()\n self.weight = Parameter(weight, \"w1\")\n self.matmul = P.MatMul(transpose_a=False, transpose_b=True).shard(strategy1)\n self.relu = P.ReLU().shard(strategy2)\n\n def construct(self, x):\n out = self.matmul(x, self.weight)\n out = self.relu(out)\n return out\n\n context.set_auto_parallel_context(device_num=4, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n strategy1 = ((2, 1), (2, 1))\n strategy2 = ((4, 1),)\n strategy3 = ((4, 1), (4, 1))\n\n x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n weight = Tensor(np.ones([64, 32]), dtype=ms.float32)\n b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = Net(strategy1, strategy2, weight)\n\n lr = Tensor(np.ones([6]), dtype=ms.float32)\n sgd = Momentum(net.trainable_params(), lr, 0.9)\n optimizer = LARS(sgd, epsilon=1e-08, coefficient=0.02,\n lars_filter=lambda x: 'bn' not in x.name)\n net_with_loss = NetWithLoss(net, strategy3)\n train_net = TrainOneStepCell(net_with_loss, optimizer)\n\n compile_net(train_net, x, b)\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.communication.management import init\nfrom mindspore.ops import operations as P\n\n\nclass DataParallelNet(nn.Cell):\n def __init__(self):\n super(DataParallelNet, self).__init__()\n weight_init = np.random.rand(512, 64).astype(np.float32)\n self.weight = Parameter(Tensor(weight_init), name=\"weight\", layerwise_parallel=False)\n self.fc = P.MatMul()\n\n def construct(self, x):\n x = self.fc(x, self.weight)\n return x\n\n\nclass ModelParallelNet(nn.Cell):\n def __init__(self):\n super(ModelParallelNet, self).__init__()\n weight_init = np.random.rand(512, 64).astype(np.float32)\n self.weight = Parameter(Tensor(weight_init), name=\"weight\", layerwise_parallel=True)\n self.fc = P.MatMul()\n\n def construct(self, x):\n x = self.fc(x, self.weight)\n return x\n\n\ndef test_param_broadcast():\n context.set_context(mode=context.GRAPH_MODE)\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=\"data_parallel\", parameter_broadcast=True)\n init()\n network = DataParallelNet()\n network.set_train()\n\n predict = Tensor(np.ones([64, 512]).astype(np.float32) * 0.01)\n _ = network(predict)\n context.reset_auto_parallel_context()\n\n\ndef test_param_not_broadcast():\n context.set_context(mode=context.GRAPH_MODE)\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=\"data_parallel\", parameter_broadcast=False)\n init()\n network = ModelParallelNet()\n network.set_train()\n\n predict = Tensor(np.ones([64, 512]).astype(np.float32) * 0.01)\n _ = network(predict)\n context.reset_auto_parallel_context()\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore.common.api import _executor\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import operations as P\nfrom tests.ut.python.ops.test_math_ops import VirtualLoss\n\n\ngrad_all = C.GradOperation(get_all=True)\n\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network):\n super(NetWithLoss, self).__init__()\n self.loss = VirtualLoss()\n self.network = network\n\n def construct(self, x, y, b):\n predict = self.network(x, y, b)\n return self.loss(predict)\n\n\nclass GradWrap(nn.Cell):\n def __init__(self, network):\n super(GradWrap, self).__init__()\n self.network = network\n\n def construct(self, x, y, b):\n return grad_all(self.network)(x, y, b)\n\n\n# model_parallel test\ndef test_l2normalize_matmul():\n class Net(nn.Cell):\n def __init__(self, strategy1, strategy2, strategy3):\n super().__init__()\n self.norm1 = P.L2Normalize(axis=0).shard(strategy1)\n self.norm2 = P.L2Normalize(axis=0).shard(strategy1)\n self.mul1 = P.Mul().shard(strategy2)\n self.mul2 = P.Mul().shard(strategy3)\n\n def construct(self, x, y, b):\n y = self.norm1(y)\n x = self.norm2(x)\n out = self.mul1(x, y)\n out = self.mul2(out, b)\n return out\n\n context.set_auto_parallel_context(device_num=8, global_rank=0)\n strategy1 = ((1, 1, 4),)\n strategy2 = ((1, 1, 4), (1, 1, 4))\n strategy3 = ((1, 1, 8), (1, 1, 8))\n net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n net.set_auto_parallel()\n\n x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)\n y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)\n b = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)\n net.set_train()\n _executor.compile(net, x, y, b)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.ops import operations as P\nimport mindspore.common.dtype as mstype\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n\nvar_np = np.random.rand(3, 3).astype(np.float32)\naccum_np = np.random.rand(3, 3).astype(np.float32)\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.apply_adagrad = P.ApplyAdagrad()\n self.var = Parameter(Tensor(var_np), name=\"var\")\n self.accum = Parameter(Tensor(accum_np), name=\"accum\")\n\n def construct(self, lr, grad):\n self.apply_adagrad(self.var, self.accum, lr, grad)\n return self.var, self.accum\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_apply_adagrad():\n # numpy op\n grident_np = np.random.rand(3, 3).astype(np.float32)\n expect_accum_np = accum_np + grident_np * grident_np\n expect_var_np = var_np - (0.001 * grident_np * (1 / np.sqrt(expect_accum_np + 1e-6)))\n\n net = Net()\n lr = Tensor(0.001, mstype.float32)\n grad = Tensor(grident_np)\n out = net(lr, grad)\n res_var_mindspore = out[0].asnumpy()\n res_accum_mindspore = out[1].asnumpy()\n eps = np.array([1e-6 for i in range(9)]).reshape(3, 3)\n\n assert np.all(expect_var_np - res_var_mindspore < eps)\n assert np.all(expect_accum_np - res_accum_mindspore < eps)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting the random resize with bounding boxes op in DE\n\"\"\"\nimport numpy as np\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as c_vision\n\nfrom mindspore import log as logger\nfrom util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \\\n config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5\n\nGENERATE_GOLDEN = False\n\nDATA_DIR = \"../data/dataset/testVOC2012_2\"\nDATA_DIR_2 = [\"../data/dataset/testCOCO/train/\",\n \"../data/dataset/testCOCO/annotations/train.json\"] # DATA_DIR, ANNOTATION_DIR\n\n\ndef test_random_resize_with_bbox_op_voc_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomResizeWithBBox Op applied\n testing with VOC dataset\n \"\"\"\n logger.info(\"test_random_resize_with_bbox_op_voc_c\")\n original_seed = config_get_set_seed(123)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n # Load dataset\n dataVoc1 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomResizeWithBBox(100)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n filename = \"random_resize_with_bbox_op_01_c_voc_result.npz\"\n save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN)\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n # Restore config setting\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers(original_num_parallel_workers)\n\n\ndef test_random_resize_with_bbox_op_rand_coco_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomResizeWithBBox Op applied,\n tests with MD5 check, expected to pass\n testing with COCO dataset\n \"\"\"\n logger.info(\"test_random_resize_with_bbox_op_rand_coco_c\")\n original_seed = config_get_set_seed(231)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n # Load dataset\n dataCoco1 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task=\"Detection\",\n decode=True, shuffle=False)\n\n dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task=\"Detection\",\n decode=True, shuffle=False)\n\n test_op = c_vision.RandomResizeWithBBox(200)\n\n # map to apply ops\n\n dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n filename = \"random_resize_with_bbox_op_01_c_coco_result.npz\"\n save_and_check_md5(dataCoco2, filename, generate_golden=GENERATE_GOLDEN)\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp, annot_name=\"bbox\")\n\n # Restore config setting\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers(original_num_parallel_workers)\n\n\ndef test_random_resize_with_bbox_op_edge_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomresizeWithBBox Op applied,\n applied on dynamically generated edge case, expected to pass. edge case is when bounding\n box has dimensions as the image itself.\n \"\"\"\n logger.info(\"test_random_resize_with_bbox_op_edge_c\")\n dataVoc1 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomResizeWithBBox(500)\n\n # maps to convert data into valid edge case data\n dataVoc1 = dataVoc1.map(\n operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype))],\n input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n dataVoc2 = dataVoc2.map(\n operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype)),\n test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n\ndef test_random_resize_with_bbox_op_invalid_c():\n \"\"\"\n Test RandomResizeWithBBox Op on invalid constructor parameters, expected to raise ValueError\n \"\"\"\n logger.info(\"test_random_resize_with_bbox_op_invalid_c\")\n\n try:\n # zero value for resize\n c_vision.RandomResizeWithBBox(0)\n\n except ValueError as err:\n logger.info(\"Got an exception in DE: {}\".format(str(err)))\n assert \"Input is not within the required interval of (1 to 16777216).\" in str(err)\n\n try:\n # one of the size values is zero\n c_vision.RandomResizeWithBBox((0, 100))\n\n except ValueError as err:\n logger.info(\"Got an exception in DE: {}\".format(str(err)))\n assert \"Input size at dim 0 is not within the required interval of (1 to 2147483647).\" in str(err)\n\n try:\n # negative value for resize\n c_vision.RandomResizeWithBBox(-10)\n\n except ValueError as err:\n logger.info(\"Got an exception in DE: {}\".format(str(err)))\n assert \"Input is not within the required interval of (1 to 16777216).\" in str(err)\n\n try:\n # invalid input shape\n c_vision.RandomResizeWithBBox((100, 100, 100))\n\n except TypeError as err:\n logger.info(\"Got an exception in DE: {}\".format(str(err)))\n assert \"Size should be\" in str(err)\n\n\ndef test_random_resize_with_bbox_op_bad_c():\n \"\"\"\n Tests RandomResizeWithBBox Op with invalid bounding boxes, expected to catch multiple errors\n \"\"\"\n logger.info(\"test_random_resize_with_bbox_op_bad_c\")\n test_op = c_vision.RandomResizeWithBBox((400, 300))\n\n data_voc2 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, \"bounding boxes is out of bounds of the image\")\n data_voc2 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.HeightOverflow, \"bounding boxes is out of bounds of the image\")\n data_voc2 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.NegativeXY, \"negative value\")\n data_voc2 = ds.VOCDataset(DATA_DIR, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WrongShape, \"4 features\")\n\n\nif __name__ == \"__main__\":\n test_random_resize_with_bbox_op_voc_c(plot_vis=False)\n test_random_resize_with_bbox_op_rand_coco_c(plot_vis=False)\n test_random_resize_with_bbox_op_edge_c(plot_vis=False)\n test_random_resize_with_bbox_op_invalid_c()\n test_random_resize_with_bbox_op_bad_c()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Transformer testing script.\"\"\"\n\nimport time\nimport os\nimport pytest\nimport numpy as np\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.nn.optim import Adam\nfrom mindspore.train.model import Model\nfrom mindspore.train.loss_scale_manager import DynamicLossScaleManager\nfrom mindspore.train.callback import Callback\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.c_transforms as deC\nfrom mindspore import context\nfrom model_zoo.official.nlp.transformer.src.transformer_model import TransformerConfig\nfrom model_zoo.official.nlp.transformer.src.transformer_for_train import TransformerNetworkWithLoss, \\\n TransformerTrainOneStepWithLossScaleCell\nfrom model_zoo.official.nlp.transformer.src.config import cfg, transformer_net_cfg\nfrom model_zoo.official.nlp.transformer.src.lr_schedule import create_dynamic_lr\n\nDATA_DIR = [\"/home/workspace/mindspore_dataset/transformer/test-mindrecord\"]\n\n\ndef get_config(version='base', batch_size=1):\n \"\"\"get config\"\"\"\n if version == 'large':\n transformer_cfg = TransformerConfig(\n batch_size=96,\n seq_length=128,\n vocab_size=36560,\n hidden_size=1024,\n num_hidden_layers=6,\n num_attention_heads=16,\n intermediate_size=4096,\n hidden_act=\"relu\",\n hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0,\n max_position_embeddings=128,\n initializer_range=0.02,\n label_smoothing=0.1,\n dtype=mstype.float32,\n compute_type=mstype.float16)\n elif version == 'base':\n transformer_cfg = TransformerConfig(\n batch_size=96,\n seq_length=128,\n vocab_size=36560,\n hidden_size=512,\n num_hidden_layers=6,\n num_attention_heads=8,\n intermediate_size=2048,\n hidden_act=\"relu\",\n hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0,\n max_position_embeddings=128,\n initializer_range=0.02,\n label_smoothing=0.1,\n dtype=mstype.float32,\n compute_type=mstype.float16)\n else:\n transformer_cfg = TransformerConfig(batch_size=batch_size)\n return transformer_cfg\n\n\ndef load_test_data(batch_size=1, data_file=None):\n \"\"\"Load test dataset.\"\"\"\n data_set = ds.MindDataset(data_file,\n columns_list=[\"source_eos_ids\", \"source_eos_mask\",\n \"target_sos_ids\", \"target_sos_mask\",\n \"target_eos_ids\", \"target_eos_mask\"],\n shuffle=False)\n type_cast_op = deC.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\"source_eos_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"source_eos_mask\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"target_sos_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"target_sos_mask\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"target_eos_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"target_eos_mask\")\n # apply batch operations\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set\n\n\nclass ModelCallback(Callback):\n def __init__(self):\n super(ModelCallback, self).__init__()\n self.loss_list = []\n self.overflow_list = []\n self.lossscale_list = []\n\n def step_end(self, run_context):\n cb_params = run_context.original_args()\n self.loss_list.append(cb_params.net_outputs[0].asnumpy()[0])\n self.overflow_list.append(cb_params.net_outputs[1].asnumpy())\n self.lossscale_list.append(cb_params.net_outputs[2].asnumpy())\n print(\"epoch: {}, outputs are: {}\".format(cb_params.cur_epoch_num, str(cb_params.net_outputs)))\n\n\nclass TimeMonitor(Callback):\n \"\"\"Time Monitor.\"\"\"\n\n def __init__(self, data_size):\n super(TimeMonitor, self).__init__()\n self.data_size = data_size\n self.epoch_mseconds_list = []\n self.per_step_mseconds_list = []\n\n def epoch_begin(self, run_context):\n self.epoch_time = time.time()\n\n def epoch_end(self, run_context):\n epoch_mseconds = (time.time() - self.epoch_time) * 1000\n self.epoch_mseconds_list.append(epoch_mseconds)\n self.per_step_mseconds_list.append(epoch_mseconds / self.data_size)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_transformer():\n \"\"\"\n Transformer training.\n \"\"\"\n np.random.seed(1)\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n context.set_context(reserve_class_name_in_scope=False, enable_auto_mixed_precision=False)\n version = os.getenv('VERSION', 'large')\n batch_size = 96\n epoch_size = 3\n config = get_config(version=version, batch_size=batch_size)\n dataset = load_test_data(batch_size=transformer_net_cfg.batch_size, data_file=DATA_DIR)\n\n netwithloss = TransformerNetworkWithLoss(config, True)\n\n lr = Tensor(create_dynamic_lr(schedule=\"constant*rsqrt_hidden*linear_warmup*rsqrt_decay\",\n training_steps=dataset.get_dataset_size() * epoch_size,\n learning_rate=cfg.lr_schedule.learning_rate,\n warmup_steps=cfg.lr_schedule.warmup_steps,\n hidden_size=config.hidden_size), mstype.float32)\n optimizer = Adam(netwithloss.trainable_params(), lr)\n\n callback = ModelCallback()\n\n scale_manager = DynamicLossScaleManager(init_loss_scale=4194304,\n scale_factor=cfg.scale_factor,\n scale_window=3)\n update_cell = scale_manager.get_update_cell()\n netwithgrads = TransformerTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer,\n scale_update_cell=update_cell)\n\n netwithgrads.set_train(True)\n time_monitor_callback = TimeMonitor(dataset.get_dataset_size())\n model = Model(netwithgrads)\n model.train(epoch_size, dataset, callbacks=[time_monitor_callback, callback], dataset_sink_mode=False)\n\n # assertion occurs while the loss value, overflow state or loss_scale value is wrong\n loss_value = np.array(callback.loss_list)\n assert np.allclose(loss_value[0], 11.241624, 0, 0.000005)\n\n expect_loss_value = [11.241624, 11.243232, 11.217465, 11.204196, 11.2138195,\n 11.215386, 11.19053, 11.150403, 11.191858, 11.160057]\n\n print(\"loss value: {}\".format(loss_value))\n assert np.allclose(loss_value[0:10], expect_loss_value, 0, 0.0005)\n\n overflow = np.array(callback.overflow_list)\n expect_overflow = [False, False, False, True, False, False, False, True, False, False]\n print(\"overflow: {}\".format(overflow))\n assert (overflow[0:10] == expect_overflow).all()\n\n loss_scale = np.array(callback.lossscale_list)\n expect_loss_scale = [4194304.0, 4194304.0, 8388608.0, 4194304.0, 4194304.0,\n 4194304.0, 8388608.0, 4194304.0, 4194304.0, 4194304.0]\n print(\"loss scale: {}\".format(loss_scale))\n assert np.allclose(loss_scale[0:10], expect_loss_scale, 0, 0)\n\n epoch_mseconds = np.array(time_monitor_callback.epoch_mseconds_list)[2]\n expect_epoch_mseconds = 3180\n print(\"epoch mseconds: {}\".format(epoch_mseconds))\n assert epoch_mseconds <= expect_epoch_mseconds + 20\n\n per_step_mseconds = np.array(time_monitor_callback.per_step_mseconds_list)[2]\n expect_per_step_mseconds = 318\n print(\"per step mseconds: {}\".format(per_step_mseconds))\n assert per_step_mseconds <= expect_per_step_mseconds + 2\n\n\nif __name__ == '__main__':\n test_transformer()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Bert model.\"\"\"\n\nimport math\nimport copy\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.nn as nn\nimport mindspore.ops.functional as F\nfrom mindspore.common.initializer import TruncatedNormal, initializer\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import composite as C\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.common.parameter import Parameter\n\n\nclass BertConfig:\n \"\"\"\n Configuration for `BertModel`.\n\n Args:\n batch_size (int): Batch size of input dataset.\n seq_length (int): Length of input sequence. Default: 128.\n vocab_size (int): The shape of each embedding vector. Default: 32000.\n hidden_size (int): Size of the bert encoder layers. Default: 768.\n num_hidden_layers (int): Number of hidden layers in the BertTransformer encoder\n cell. Default: 12.\n num_attention_heads (int): Number of attention heads in the BertTransformer\n encoder cell. Default: 12.\n intermediate_size (int): Size of intermediate layer in the BertTransformer\n encoder cell. Default: 3072.\n hidden_act (str): Activation function used in the BertTransformer encoder\n cell. Default: \"gelu\".\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.1.\n max_position_embeddings (int): Maximum length of sequences used in this\n model. Default: 512.\n type_vocab_size (int): Size of token type vocab. Default: 16.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n input_mask_from_dataset (bool): Specifies whether to use the input mask that loaded from\n dataset. Default: True.\n token_type_ids_from_dataset (bool): Specifies whether to use the token type ids that loaded\n from dataset. Default: True.\n dtype (:class:`mindspore.dtype`): Data type of the input. Default: mstype.float32.\n compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32.\n \"\"\"\n def __init__(self,\n batch_size,\n seq_length=128,\n vocab_size=32000,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02,\n use_relative_positions=False,\n input_mask_from_dataset=True,\n token_type_ids_from_dataset=True,\n dtype=mstype.float32,\n compute_type=mstype.float32,\n enable_fused_layernorm=False):\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.input_mask_from_dataset = input_mask_from_dataset\n self.token_type_ids_from_dataset = token_type_ids_from_dataset\n self.use_relative_positions = use_relative_positions\n self.dtype = dtype\n self.compute_type = compute_type\n self.enable_fused_layernorm = enable_fused_layernorm\n\n\nclass EmbeddingLookup(nn.Cell):\n \"\"\"\n A embeddings lookup table with a fixed dictionary and size.\n\n Args:\n vocab_size (int): Size of the dictionary of embeddings.\n embedding_size (int): The size of each embedding vector.\n embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of\n each embedding vector.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n \"\"\"\n def __init__(self,\n vocab_size,\n embedding_size,\n embedding_shape,\n use_one_hot_embeddings=False,\n initializer_range=0.02):\n super(EmbeddingLookup, self).__init__()\n self.vocab_size = vocab_size\n self.use_one_hot_embeddings = use_one_hot_embeddings\n self.embedding_table = Parameter(initializer\n (TruncatedNormal(initializer_range),\n [vocab_size, embedding_size]),\n name='embedding_table')\n self.expand = P.ExpandDims()\n self.shape_flat = (-1,)\n self.gather = P.GatherV2()\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.0, mstype.float32)\n self.array_mul = P.MatMul()\n self.reshape = P.Reshape()\n self.shape = tuple(embedding_shape)\n\n def construct(self, input_ids):\n extended_ids = self.expand(input_ids, -1)\n flat_ids = self.reshape(extended_ids, self.shape_flat)\n if self.use_one_hot_embeddings:\n one_hot_ids = self.one_hot(flat_ids, self.vocab_size, self.on_value, self.off_value)\n output_for_reshape = self.array_mul(\n one_hot_ids, self.embedding_table)\n else:\n output_for_reshape = self.gather(self.embedding_table, flat_ids, 0)\n output = self.reshape(output_for_reshape, self.shape)\n return output, self.embedding_table\n\n\nclass EmbeddingPostprocessor(nn.Cell):\n \"\"\"\n Postprocessors apply positional and token type embeddings to word embeddings.\n\n Args:\n embedding_size (int): The size of each embedding vector.\n embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of\n each embedding vector.\n use_token_type (bool): Specifies whether to use token type embeddings. Default: False.\n token_type_vocab_size (int): Size of token type vocab. Default: 16.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n max_position_embeddings (int): Maximum length of sequences used in this\n model. Default: 512.\n dropout_prob (float): The dropout probability. Default: 0.1.\n \"\"\"\n def __init__(self,\n embedding_size,\n embedding_shape,\n use_relative_positions=False,\n use_token_type=False,\n token_type_vocab_size=16,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n super(EmbeddingPostprocessor, self).__init__()\n self.use_token_type = use_token_type\n self.token_type_vocab_size = token_type_vocab_size\n self.use_one_hot_embeddings = use_one_hot_embeddings\n self.max_position_embeddings = max_position_embeddings\n self.embedding_table = Parameter(initializer\n (TruncatedNormal(initializer_range),\n [token_type_vocab_size,\n embedding_size]),\n name='embedding_table')\n\n self.shape_flat = (-1,)\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.1, mstype.float32)\n self.array_mul = P.MatMul()\n self.reshape = P.Reshape()\n self.shape = tuple(embedding_shape)\n self.layernorm = nn.LayerNorm((embedding_size,))\n self.dropout = nn.Dropout(1 - dropout_prob)\n self.gather = P.GatherV2()\n self.use_relative_positions = use_relative_positions\n self.slice = P.StridedSlice()\n self.full_position_embeddings = Parameter(initializer\n (TruncatedNormal(initializer_range),\n [max_position_embeddings,\n embedding_size]),\n name='full_position_embeddings')\n\n def construct(self, token_type_ids, word_embeddings):\n output = word_embeddings\n if self.use_token_type:\n flat_ids = self.reshape(token_type_ids, self.shape_flat)\n if self.use_one_hot_embeddings:\n one_hot_ids = self.one_hot(flat_ids,\n self.token_type_vocab_size, self.on_value, self.off_value)\n token_type_embeddings = self.array_mul(one_hot_ids,\n self.embedding_table)\n else:\n token_type_embeddings = self.gather(self.embedding_table, flat_ids, 0)\n token_type_embeddings = self.reshape(token_type_embeddings, self.shape)\n output += token_type_embeddings\n if not self.use_relative_positions:\n _, seq, width = self.shape\n position_embeddings = self.slice(self.full_position_embeddings, (0, 0), (seq, width), (1, 1))\n position_embeddings = self.reshape(position_embeddings, (1, seq, width))\n output += position_embeddings\n output = self.layernorm(output)\n output = self.dropout(output)\n return output\n\n\nclass BertOutput(nn.Cell):\n \"\"\"\n Apply a linear computation to hidden status and a residual computation to input.\n\n Args:\n in_channels (int): Input channels.\n out_channels (int): Output channels.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n dropout_prob (float): The dropout probability. Default: 0.1.\n compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n initializer_range=0.02,\n dropout_prob=0.1,\n compute_type=mstype.float32,\n enable_fused_layernorm=False):\n super(BertOutput, self).__init__()\n self.dense = nn.Dense(in_channels, out_channels,\n weight_init=TruncatedNormal(initializer_range)).to_float(compute_type)\n self.dropout = nn.Dropout(1 - dropout_prob)\n self.dropout_prob = dropout_prob\n self.add = P.TensorAdd()\n self.layernorm = nn.LayerNorm((out_channels,)).to_float(compute_type)\n self.cast = P.Cast()\n\n def construct(self, hidden_status, input_tensor):\n output = self.dense(hidden_status)\n output = self.dropout(output)\n output = self.add(output, input_tensor)\n output = self.layernorm(output)\n return output\n\n\nclass RelaPosMatrixGenerator(nn.Cell):\n \"\"\"\n Generates matrix of relative positions between inputs.\n\n Args:\n length (int): Length of one dim for the matrix to be generated.\n max_relative_position (int): Max value of relative position.\n \"\"\"\n def __init__(self, length, max_relative_position):\n super(RelaPosMatrixGenerator, self).__init__()\n self._length = length\n self._max_relative_position = max_relative_position\n self._min_relative_position = -max_relative_position\n self.range_length = -length + 1\n\n self.tile = P.Tile()\n self.range_mat = P.Reshape()\n self.sub = P.Sub()\n self.expanddims = P.ExpandDims()\n self.cast = P.Cast()\n\n def construct(self):\n range_vec_row_out = self.cast(F.tuple_to_array(F.make_range(self._length)), mstype.int32)\n range_vec_col_out = self.range_mat(range_vec_row_out, (self._length, -1))\n tile_row_out = self.tile(range_vec_row_out, (self._length,))\n tile_col_out = self.tile(range_vec_col_out, (1, self._length))\n range_mat_out = self.range_mat(tile_row_out, (self._length, self._length))\n transpose_out = self.range_mat(tile_col_out, (self._length, self._length))\n distance_mat = self.sub(range_mat_out, transpose_out)\n\n distance_mat_clipped = C.clip_by_value(distance_mat,\n self._min_relative_position,\n self._max_relative_position)\n\n # Shift values to be >=0. Each integer still uniquely identifies a\n # relative position difference.\n final_mat = distance_mat_clipped + self._max_relative_position\n return final_mat\n\n\nclass RelaPosEmbeddingsGenerator(nn.Cell):\n \"\"\"\n Generates tensor of size [length, length, depth].\n\n Args:\n length (int): Length of one dim for the matrix to be generated.\n depth (int): Size of each attention head.\n max_relative_position (int): Maxmum value of relative position.\n initializer_range (float): Initialization value of TruncatedNormal.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n \"\"\"\n def __init__(self,\n length,\n depth,\n max_relative_position,\n initializer_range,\n use_one_hot_embeddings=False):\n super(RelaPosEmbeddingsGenerator, self).__init__()\n self.depth = depth\n self.vocab_size = max_relative_position * 2 + 1\n self.use_one_hot_embeddings = use_one_hot_embeddings\n\n self.embeddings_table = Parameter(\n initializer(TruncatedNormal(initializer_range),\n [self.vocab_size, self.depth]),\n name='embeddings_for_position')\n\n self.relative_positions_matrix = RelaPosMatrixGenerator(length=length,\n max_relative_position=max_relative_position)\n self.reshape = P.Reshape()\n self.one_hot = nn.OneHot(depth=self.vocab_size)\n self.shape = P.Shape()\n self.gather = P.GatherV2() # index_select\n self.matmul = P.BatchMatMul()\n\n def construct(self):\n relative_positions_matrix_out = self.relative_positions_matrix()\n\n # Generate embedding for each relative position of dimension depth.\n if self.use_one_hot_embeddings:\n flat_relative_positions_matrix = self.reshape(relative_positions_matrix_out, (-1,))\n one_hot_relative_positions_matrix = self.one_hot(\n flat_relative_positions_matrix)\n embeddings = self.matmul(one_hot_relative_positions_matrix, self.embeddings_table)\n my_shape = self.shape(relative_positions_matrix_out) + (self.depth,)\n embeddings = self.reshape(embeddings, my_shape)\n else:\n embeddings = self.gather(self.embeddings_table,\n relative_positions_matrix_out, 0)\n return embeddings\n\n\nclass SaturateCast(nn.Cell):\n \"\"\"\n Performs a safe saturating cast. This operation applies proper clamping before casting to prevent\n the danger that the value will overflow or underflow.\n\n Args:\n src_type (:class:`mindspore.dtype`): The type of the elements of the input tensor. Default: mstype.float32.\n dst_type (:class:`mindspore.dtype`): The type of the elements of the output tensor. Default: mstype.float32.\n \"\"\"\n def __init__(self, src_type=mstype.float32, dst_type=mstype.float32):\n super(SaturateCast, self).__init__()\n np_type = mstype.dtype_to_nptype(dst_type)\n min_type = float(np.finfo(np_type).min)\n max_type = float(np.finfo(np_type).max)\n\n self.tensor_min_type = min_type\n self.tensor_max_type = max_type\n\n self.min_op = P.Minimum()\n self.max_op = P.Maximum()\n self.cast = P.Cast()\n self.dst_type = dst_type\n\n def construct(self, x):\n out = self.max_op(x, self.tensor_min_type)\n out = self.min_op(out, self.tensor_max_type)\n return self.cast(out, self.dst_type)\n\n\nclass BertAttention(nn.Cell):\n \"\"\"\n Apply multi-headed attention from \"from_tensor\" to \"to_tensor\".\n\n Args:\n batch_size (int): Batch size of input datasets.\n from_tensor_width (int): Size of last dim of from_tensor.\n to_tensor_width (int): Size of last dim of to_tensor.\n from_seq_length (int): Length of from_tensor sequence.\n to_seq_length (int): Length of to_tensor sequence.\n num_attention_heads (int): Number of attention heads. Default: 1.\n size_per_head (int): Size of each attention head. Default: 512.\n query_act (str): Activation function for the query transform. Default: None.\n key_act (str): Activation function for the key transform. Default: None.\n value_act (str): Activation function for the value transform. Default: None.\n has_attention_mask (bool): Specifies whether to use attention mask. Default: False.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.0.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n do_return_2d_tensor (bool): True for return 2d tensor. False for return 3d\n tensor. Default: False.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n compute_type (:class:`mindspore.dtype`): Compute type in BertAttention. Default: mstype.float32.\n \"\"\"\n def __init__(self,\n batch_size,\n from_tensor_width,\n to_tensor_width,\n from_seq_length,\n to_seq_length,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n has_attention_mask=False,\n attention_probs_dropout_prob=0.0,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n use_relative_positions=False,\n compute_type=mstype.float32):\n\n super(BertAttention, self).__init__()\n self.batch_size = batch_size\n self.from_seq_length = from_seq_length\n self.to_seq_length = to_seq_length\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.has_attention_mask = has_attention_mask\n self.use_relative_positions = use_relative_positions\n\n self.scores_mul = 1.0 / math.sqrt(float(self.size_per_head))\n self.reshape = P.Reshape()\n self.shape_from_2d = (-1, from_tensor_width)\n self.shape_to_2d = (-1, to_tensor_width)\n weight = TruncatedNormal(initializer_range)\n units = num_attention_heads * size_per_head\n self.query_layer = nn.Dense(from_tensor_width,\n units,\n activation=query_act,\n weight_init=weight).to_float(compute_type)\n self.key_layer = nn.Dense(to_tensor_width,\n units,\n activation=key_act,\n weight_init=weight).to_float(compute_type)\n self.value_layer = nn.Dense(to_tensor_width,\n units,\n activation=value_act,\n weight_init=weight).to_float(compute_type)\n\n self.shape_from = (batch_size, from_seq_length, num_attention_heads, size_per_head)\n self.shape_to = (\n batch_size, to_seq_length, num_attention_heads, size_per_head)\n\n self.matmul_trans_b = P.BatchMatMul(transpose_b=True)\n self.multiply = P.Mul()\n self.transpose = P.Transpose()\n self.trans_shape = (0, 2, 1, 3)\n self.trans_shape_relative = (2, 0, 1, 3)\n self.trans_shape_position = (1, 2, 0, 3)\n self.multiply_data = -10000.0\n self.batch_num = batch_size * num_attention_heads\n self.matmul = P.BatchMatMul()\n\n self.softmax = nn.Softmax()\n self.dropout = nn.Dropout(1 - attention_probs_dropout_prob)\n\n if self.has_attention_mask:\n self.expand_dims = P.ExpandDims()\n self.sub = P.Sub()\n self.add = P.TensorAdd()\n self.cast = P.Cast()\n self.get_dtype = P.DType()\n if do_return_2d_tensor:\n self.shape_return = (batch_size * from_seq_length, num_attention_heads * size_per_head)\n else:\n self.shape_return = (batch_size, from_seq_length, num_attention_heads * size_per_head)\n\n self.cast_compute_type = SaturateCast(dst_type=compute_type)\n if self.use_relative_positions:\n self._generate_relative_positions_embeddings = \\\n RelaPosEmbeddingsGenerator(length=to_seq_length,\n depth=size_per_head,\n max_relative_position=16,\n initializer_range=initializer_range,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n def construct(self, from_tensor, to_tensor, attention_mask):\n # reshape 2d/3d input tensors to 2d\n from_tensor_2d = self.reshape(from_tensor, self.shape_from_2d)\n to_tensor_2d = self.reshape(to_tensor, self.shape_to_2d)\n query_out = self.query_layer(from_tensor_2d)\n key_out = self.key_layer(to_tensor_2d)\n value_out = self.value_layer(to_tensor_2d)\n\n query_layer = self.reshape(query_out, self.shape_from)\n query_layer = self.transpose(query_layer, self.trans_shape)\n key_layer = self.reshape(key_out, self.shape_to)\n key_layer = self.transpose(key_layer, self.trans_shape)\n\n attention_scores = self.matmul_trans_b(query_layer, key_layer)\n\n # use_relative_position, supplementary logic\n if self.use_relative_positions:\n # 'relations_keys' = [F|T, F|T, H]\n relations_keys = self._generate_relative_positions_embeddings()\n relations_keys = self.cast_compute_type(relations_keys)\n # query_layer_t is [F, B, N, H]\n query_layer_t = self.transpose(query_layer, self.trans_shape_relative)\n # query_layer_r is [F, B * N, H]\n query_layer_r = self.reshape(query_layer_t,\n (self.from_seq_length,\n self.batch_num,\n self.size_per_head))\n # key_position_scores is [F, B * N, F|T]\n key_position_scores = self.matmul_trans_b(query_layer_r,\n relations_keys)\n # key_position_scores_r is [F, B, N, F|T]\n key_position_scores_r = self.reshape(key_position_scores,\n (self.from_seq_length,\n self.batch_size,\n self.num_attention_heads,\n self.from_seq_length))\n # key_position_scores_r_t is [B, N, F, F|T]\n key_position_scores_r_t = self.transpose(key_position_scores_r,\n self.trans_shape_position)\n attention_scores = attention_scores + key_position_scores_r_t\n\n attention_scores = self.multiply(self.scores_mul, attention_scores)\n\n if self.has_attention_mask:\n attention_mask = self.expand_dims(attention_mask, 1)\n multiply_out = self.sub(self.cast(F.tuple_to_array((1.0,)), self.get_dtype(attention_scores)),\n self.cast(attention_mask, self.get_dtype(attention_scores)))\n\n adder = self.multiply(multiply_out, self.multiply_data)\n attention_scores = self.add(adder, attention_scores)\n\n attention_probs = self.softmax(attention_scores)\n attention_probs = self.dropout(attention_probs)\n\n value_layer = self.reshape(value_out, self.shape_to)\n value_layer = self.transpose(value_layer, self.trans_shape)\n context_layer = self.matmul(attention_probs, value_layer)\n\n # use_relative_position, supplementary logic\n if self.use_relative_positions:\n # 'relations_values' = [F|T, F|T, H]\n relations_values = self._generate_relative_positions_embeddings()\n relations_values = self.cast_compute_type(relations_values)\n # attention_probs_t is [F, B, N, T]\n attention_probs_t = self.transpose(attention_probs, self.trans_shape_relative)\n # attention_probs_r is [F, B * N, T]\n attention_probs_r = self.reshape(\n attention_probs_t,\n (self.from_seq_length,\n self.batch_num,\n self.to_seq_length))\n # value_position_scores is [F, B * N, H]\n value_position_scores = self.matmul(attention_probs_r,\n relations_values)\n # value_position_scores_r is [F, B, N, H]\n value_position_scores_r = self.reshape(value_position_scores,\n (self.from_seq_length,\n self.batch_size,\n self.num_attention_heads,\n self.size_per_head))\n # value_position_scores_r_t is [B, N, F, H]\n value_position_scores_r_t = self.transpose(value_position_scores_r,\n self.trans_shape_position)\n context_layer = context_layer + value_position_scores_r_t\n\n context_layer = self.transpose(context_layer, self.trans_shape)\n context_layer = self.reshape(context_layer, self.shape_return)\n\n return context_layer\n\n\nclass BertSelfAttention(nn.Cell):\n \"\"\"\n Apply self-attention.\n\n Args:\n batch_size (int): Batch size of input dataset.\n seq_length (int): Length of input sequence.\n hidden_size (int): Size of the bert encoder layers.\n num_attention_heads (int): Number of attention heads. Default: 12.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.1.\n use_one_hot_embeddings (bool): Specifies whether to use one_hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n compute_type (:class:`mindspore.dtype`): Compute type in BertSelfAttention. Default: mstype.float32.\n \"\"\"\n def __init__(self,\n batch_size,\n seq_length,\n hidden_size,\n num_attention_heads=12,\n attention_probs_dropout_prob=0.1,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n hidden_dropout_prob=0.1,\n use_relative_positions=False,\n compute_type=mstype.float32,\n enable_fused_layernorm=False):\n super(BertSelfAttention, self).__init__()\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\"The hidden size (%d) is not a multiple of the number \"\n \"of attention heads (%d)\" % (hidden_size, num_attention_heads))\n\n self.size_per_head = int(hidden_size / num_attention_heads)\n\n self.attention = BertAttention(\n batch_size=batch_size,\n from_tensor_width=hidden_size,\n to_tensor_width=hidden_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length,\n num_attention_heads=num_attention_heads,\n size_per_head=self.size_per_head,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=initializer_range,\n use_relative_positions=use_relative_positions,\n has_attention_mask=True,\n do_return_2d_tensor=True,\n compute_type=compute_type)\n\n self.output = BertOutput(in_channels=hidden_size,\n out_channels=hidden_size,\n initializer_range=initializer_range,\n dropout_prob=hidden_dropout_prob,\n compute_type=compute_type,\n enable_fused_layernorm=enable_fused_layernorm)\n self.reshape = P.Reshape()\n self.shape = (-1, hidden_size)\n\n def construct(self, input_tensor, attention_mask):\n input_tensor = self.reshape(input_tensor, self.shape)\n attention_output = self.attention(input_tensor, input_tensor, attention_mask)\n output = self.output(attention_output, input_tensor)\n return output\n\n\nclass BertEncoderCell(nn.Cell):\n \"\"\"\n Encoder cells used in BertTransformer.\n\n Args:\n batch_size (int): Batch size of input dataset.\n hidden_size (int): Size of the bert encoder layers. Default: 768.\n seq_length (int): Length of input sequence. Default: 512.\n num_attention_heads (int): Number of attention heads. Default: 12.\n intermediate_size (int): Size of intermediate layer. Default: 3072.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.02.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n hidden_act (str): Activation function. Default: \"gelu\".\n compute_type (:class:`mindspore.dtype`): Compute type in attention. Default: mstype.float32.\n \"\"\"\n def __init__(self,\n batch_size,\n hidden_size=768,\n seq_length=512,\n num_attention_heads=12,\n intermediate_size=3072,\n attention_probs_dropout_prob=0.02,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n hidden_dropout_prob=0.1,\n use_relative_positions=False,\n hidden_act=\"gelu\",\n compute_type=mstype.float32,\n enable_fused_layernorm=False):\n super(BertEncoderCell, self).__init__()\n self.attention = BertSelfAttention(\n batch_size=batch_size,\n hidden_size=hidden_size,\n seq_length=seq_length,\n num_attention_heads=num_attention_heads,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=initializer_range,\n hidden_dropout_prob=hidden_dropout_prob,\n use_relative_positions=use_relative_positions,\n compute_type=compute_type,\n enable_fused_layernorm=enable_fused_layernorm)\n self.intermediate = nn.Dense(in_channels=hidden_size,\n out_channels=intermediate_size,\n activation=hidden_act,\n weight_init=TruncatedNormal(initializer_range)).to_float(compute_type)\n self.output = BertOutput(in_channels=intermediate_size,\n out_channels=hidden_size,\n initializer_range=initializer_range,\n dropout_prob=hidden_dropout_prob,\n compute_type=compute_type,\n enable_fused_layernorm=enable_fused_layernorm)\n\n def construct(self, hidden_states, attention_mask):\n # self-attention\n attention_output = self.attention(hidden_states, attention_mask)\n # feed construct\n intermediate_output = self.intermediate(attention_output)\n # add and normalize\n output = self.output(intermediate_output, attention_output)\n return output\n\n\nclass BertTransformer(nn.Cell):\n \"\"\"\n Multi-layer bert transformer.\n\n Args:\n batch_size (int): Batch size of input dataset.\n hidden_size (int): Size of the encoder layers.\n seq_length (int): Length of input sequence.\n num_hidden_layers (int): Number of hidden layers in encoder cells.\n num_attention_heads (int): Number of attention heads in encoder cells. Default: 12.\n intermediate_size (int): Size of intermediate layer in encoder cells. Default: 3072.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.1.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n hidden_act (str): Activation function used in the encoder cells. Default: \"gelu\".\n compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32.\n return_all_encoders (bool): Specifies whether to return all encoders. Default: False.\n \"\"\"\n def __init__(self,\n batch_size,\n hidden_size,\n seq_length,\n num_hidden_layers,\n num_attention_heads=12,\n intermediate_size=3072,\n attention_probs_dropout_prob=0.1,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n hidden_dropout_prob=0.1,\n use_relative_positions=False,\n hidden_act=\"gelu\",\n compute_type=mstype.float32,\n return_all_encoders=False,\n enable_fused_layernorm=False):\n super(BertTransformer, self).__init__()\n self.return_all_encoders = return_all_encoders\n\n layers = []\n for _ in range(num_hidden_layers):\n layer = BertEncoderCell(batch_size=batch_size,\n hidden_size=hidden_size,\n seq_length=seq_length,\n num_attention_heads=num_attention_heads,\n intermediate_size=intermediate_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=initializer_range,\n hidden_dropout_prob=hidden_dropout_prob,\n use_relative_positions=use_relative_positions,\n hidden_act=hidden_act,\n compute_type=compute_type,\n enable_fused_layernorm=enable_fused_layernorm)\n layers.append(layer)\n\n self.layers = nn.CellList(layers)\n\n self.reshape = P.Reshape()\n self.shape = (-1, hidden_size)\n self.out_shape = (batch_size, seq_length, hidden_size)\n\n def construct(self, input_tensor, attention_mask):\n prev_output = self.reshape(input_tensor, self.shape)\n\n all_encoder_layers = ()\n for layer_module in self.layers:\n layer_output = layer_module(prev_output, attention_mask)\n prev_output = layer_output\n\n if self.return_all_encoders:\n layer_output = self.reshape(layer_output, self.out_shape)\n all_encoder_layers = all_encoder_layers + (layer_output,)\n\n if not self.return_all_encoders:\n prev_output = self.reshape(prev_output, self.out_shape)\n all_encoder_layers = all_encoder_layers + (prev_output,)\n return all_encoder_layers\n\n\nclass CreateAttentionMaskFromInputMask(nn.Cell):\n \"\"\"\n Create attention mask according to input mask.\n\n Args:\n config (Class): Configuration for BertModel.\n \"\"\"\n def __init__(self, config):\n super(CreateAttentionMaskFromInputMask, self).__init__()\n self.input_mask_from_dataset = config.input_mask_from_dataset\n self.input_mask = None\n\n if not self.input_mask_from_dataset:\n self.input_mask = initializer(\n \"ones\", [config.batch_size, config.seq_length], mstype.int32).init_data()\n\n self.cast = P.Cast()\n self.reshape = P.Reshape()\n self.shape = (config.batch_size, 1, config.seq_length)\n self.broadcast_ones = initializer(\n \"ones\", [config.batch_size, config.seq_length, 1], mstype.float32).init_data()\n self.batch_matmul = P.BatchMatMul()\n\n def construct(self, input_mask):\n if not self.input_mask_from_dataset:\n input_mask = self.input_mask\n\n input_mask = self.cast(self.reshape(input_mask, self.shape), mstype.float32)\n attention_mask = self.batch_matmul(self.broadcast_ones, input_mask)\n return attention_mask\n\n\nclass BertModel(nn.Cell):\n \"\"\"\n Bidirectional Encoder Representations from Transformers.\n\n Args:\n config (Class): Configuration for BertModel.\n is_training (bool): True for training mode. False for eval mode.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n \"\"\"\n def __init__(self,\n config,\n is_training,\n use_one_hot_embeddings=False):\n super(BertModel, self).__init__()\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n self.input_mask_from_dataset = config.input_mask_from_dataset\n self.token_type_ids_from_dataset = config.token_type_ids_from_dataset\n self.batch_size = config.batch_size\n self.seq_length = config.seq_length\n self.hidden_size = config.hidden_size\n self.num_hidden_layers = config.num_hidden_layers\n self.embedding_size = config.hidden_size\n self.token_type_ids = None\n\n self.last_idx = self.num_hidden_layers - 1\n output_embedding_shape = [self.batch_size, self.seq_length,\n self.embedding_size]\n\n if not self.token_type_ids_from_dataset:\n self.token_type_ids = initializer(\n \"zeros\", [self.batch_size, self.seq_length], mstype.int32).init_data()\n\n self.bert_embedding_lookup = EmbeddingLookup(\n vocab_size=config.vocab_size,\n embedding_size=self.embedding_size,\n embedding_shape=output_embedding_shape,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=config.initializer_range)\n\n self.bert_embedding_postprocessor = EmbeddingPostprocessor(\n embedding_size=self.embedding_size,\n embedding_shape=output_embedding_shape,\n use_relative_positions=config.use_relative_positions,\n use_token_type=True,\n token_type_vocab_size=config.type_vocab_size,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=0.02,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n self.bert_encoder = BertTransformer(\n batch_size=self.batch_size,\n hidden_size=self.hidden_size,\n seq_length=self.seq_length,\n num_attention_heads=config.num_attention_heads,\n num_hidden_layers=self.num_hidden_layers,\n intermediate_size=config.intermediate_size,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=config.initializer_range,\n hidden_dropout_prob=config.hidden_dropout_prob,\n use_relative_positions=config.use_relative_positions,\n hidden_act=config.hidden_act,\n compute_type=config.compute_type,\n return_all_encoders=True,\n enable_fused_layernorm=config.enable_fused_layernorm)\n\n self.cast = P.Cast()\n self.dtype = config.dtype\n self.cast_compute_type = SaturateCast(dst_type=config.compute_type)\n self.slice = P.StridedSlice()\n\n self.squeeze_1 = P.Squeeze(axis=1)\n self.dense = nn.Dense(self.hidden_size, self.hidden_size,\n activation=\"tanh\",\n weight_init=TruncatedNormal(config.initializer_range)).to_float(config.compute_type)\n self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config)\n\n def construct(self, input_ids, token_type_ids, input_mask):\n\n # embedding\n if not self.token_type_ids_from_dataset:\n token_type_ids = self.token_type_ids\n word_embeddings, embedding_tables = self.bert_embedding_lookup(input_ids)\n embedding_output = self.bert_embedding_postprocessor(token_type_ids,\n word_embeddings)\n\n # attention mask [batch_size, seq_length, seq_length]\n attention_mask = self._create_attention_mask_from_input_mask(input_mask)\n\n # bert encoder\n encoder_output = self.bert_encoder(self.cast_compute_type(embedding_output),\n attention_mask)\n\n sequence_output = self.cast(encoder_output[self.last_idx], self.dtype)\n\n # pooler\n sequence_slice = self.slice(sequence_output,\n (0, 0, 0),\n (self.batch_size, 1, self.hidden_size),\n (1, 1, 1))\n first_token = self.squeeze_1(sequence_slice)\n pooled_output = self.dense(first_token)\n pooled_output = self.cast(pooled_output, self.dtype)\n\n return sequence_output, pooled_output, embedding_tables\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"\nArea under cure metric\n\"\"\"\n\nfrom sklearn.metrics import roc_auc_score\nfrom mindspore.nn.metrics import Metric\n\nclass AUCMetric(Metric):\n \"\"\"\n Area under cure metric\n \"\"\"\n\n def __init__(self):\n super(AUCMetric, self).__init__()\n self.clear()\n\n def clear(self):\n \"\"\"Clear the internal evaluation result.\"\"\"\n self.true_labels = []\n self.pred_probs = []\n\n def update(self, *inputs): # inputs\n \"\"\"Update list of predicts and labels.\"\"\"\n all_predict = inputs[1].asnumpy().flatten().tolist() # predict\n all_label = inputs[2].asnumpy().flatten().tolist() # label\n self.pred_probs.extend(all_predict)\n self.true_labels.extend(all_label)\n\n def eval(self):\n if len(self.true_labels) != len(self.pred_probs):\n raise RuntimeError(\n 'true_labels.size is not equal to pred_probs.size()')\n\n auc = roc_auc_score(self.true_labels, self.pred_probs)\n print(\"====\" * 20 + \" auc_metric end\")\n print(\"====\" * 20 + \" auc: {}\".format(auc))\n return auc\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport numpy as np\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore import context\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.api import _executor\nfrom mindspore.ops import operations as P\nfrom mindspore.parallel import set_algo_parameters\nfrom mindspore.parallel._utils import _reset_op_id as reset_op_id\nfrom tests.ut.python.ops.test_math_ops import VirtualLoss\n\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network):\n super(NetWithLoss, self).__init__()\n self.loss = VirtualLoss()\n self.network = network\n\n def construct(self, x, y):\n predict = self.network(x, y)\n return self.loss(predict)\n\n\ndef test_common_parameter():\n class Net(nn.Cell):\n def __init__(self):\n super().__init__()\n self.matmul1 = P.MatMul()\n self.matmul2 = P.MatMul()\n self.matmul3 = P.MatMul()\n self.weight1 = Parameter(Tensor(np.ones([64, 64]).astype(np.float16) * 0.01), \"w\", requires_grad=True)\n self.cast1 = P.Cast()\n self.cast2 = P.Cast()\n\n def construct(self, x, y):\n m1_result = self.matmul1(x, self.cast1(self.weight1, mstype.float32))\n m2_result = self.matmul2(y, self.cast2(self.weight1, mstype.float32))\n m3_result = self.matmul3(m2_result, m1_result)\n\n return m3_result\n\n size = 8\n context.set_auto_parallel_context(device_num=size, global_rank=0)\n\n set_algo_parameters(elementwise_op_strategy_follow=True)\n x = Tensor(np.ones([64, 64]), dtype=ms.float32)\n y = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = NetWithLoss(Net())\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\")\n net.set_auto_parallel()\n reset_op_id()\n\n net.set_train()\n _executor.compile(net, x, y, phase='train')\n strategies = _executor._get_shard_strategy(net)\n for (k, v) in strategies.items():\n if re.search('MatMul-op', k) is not None:\n assert v == [[8, 1], [1, 1]]\n elif re.search('Cast-op', k) is not None:\n assert v == [[1, 1]]\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\n\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\n\nDATA_DIR = [\"../data/dataset/testTFBert5Rows1/5TFDatas.data\"]\nDATA_DIR_2 = [\"../data/dataset/testTFBert5Rows2/5TFDatas.data\"]\nSCHEMA_DIR = \"../data/dataset/testTFBert5Rows1/datasetSchema.json\"\nSCHEMA_DIR_2 = \"../data/dataset/testTFBert5Rows2/datasetSchema.json\"\n\n\ndef test_rename():\n data1 = ds.TFRecordDataset(DATA_DIR_2, SCHEMA_DIR_2, shuffle=False)\n data2 = ds.TFRecordDataset(DATA_DIR_2, SCHEMA_DIR_2, shuffle=False)\n\n data2 = data2.rename(input_columns=[\"input_ids\", \"segment_ids\"], output_columns=[\"masks\", \"seg_ids\"])\n\n data = ds.zip((data1, data2))\n data = data.repeat(3)\n\n num_iter = 0\n\n for _, item in enumerate(data.create_dict_iterator(num_epochs=1, output_numpy=True)):\n logger.info(\"item[mask] is {}\".format(item[\"masks\"]))\n np.testing.assert_equal(item[\"masks\"], item[\"input_ids\"])\n logger.info(\"item[seg_ids] is {}\".format(item[\"seg_ids\"]))\n np.testing.assert_equal(item[\"segment_ids\"], item[\"seg_ids\"])\n # need to consume the data in the buffer\n num_iter += 1\n logger.info(\"Number of data in data: {}\".format(num_iter))\n assert num_iter == 15\n\n\nif __name__ == '__main__':\n logger.info('===========test Rename Repeat===========')\n test_rename()\n logger.info('\\n')\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.api import ms_function\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.ops import operations as P\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n out_channel = 64\n kernel_size = 7\n self.conv = P.Conv2D(out_channel,\n kernel_size,\n mode=1,\n pad_mode=\"valid\",\n pad=0,\n stride=1,\n dilation=1,\n group=1)\n self.w = Parameter(initializer(\n 'normal', [64, 3, 7, 7]), name='w')\n\n @ms_function\n def construct(self, x):\n return self.conv(x, self.w)\n\n\ndef test_net():\n x = np.random.randn(32, 3, 224, 224).astype(np.float32)\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n conv = Net()\n output = conv(Tensor(x))\n print(output.asnumpy())\n\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\")\n conv = Net()\n output = conv(Tensor(x))\n print(output.asnumpy())\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_pynative_embeddinglookup \"\"\"\nimport pytest\nimport numpy as np\nimport mindspore.ops.operations as op\nfrom mindspore import Tensor, context\nfrom mindspore.nn import Cell\n\ndef setup_module():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\")\n\nclass MetaFactory:\n def __init__(self):\n self.device_target = context.get_context('device_target')\n self.rank_size = None\n self.device_id = None\n self.global_rank_id = None\n\nclass OpsFactory(MetaFactory):\n def __init__(self, dtype=np.float16):\n super().__init__()\n self.dtype = dtype\n if self.dtype == np.float16:\n self.loss = 1e-3\n elif self.dtype == np.float32:\n self.loss = 1e-4\n elif self.dtype == np.float64:\n self.loss = 1e-5\n else:\n self.loss = 0\n\nclass EmbeddingLookup(Cell):\n def __init__(self, offset):\n super().__init__()\n self.op = op.EmbeddingLookup()\n self.offset = offset\n\n def construct(self, params, indices):\n x = self.op(params, indices, self.offset)\n return x\n\nclass EmbeddingLookupFactory(OpsFactory):\n def __init__(self, params_shape, indices_shape, offset=0, low=0, high=2, dtype=np.float32, ids_type=np.int32):\n super().__init__(dtype=dtype)\n self.input_np = np.random.randn(*params_shape).astype(dtype)\n self.indices_np = np.random.randint(low, high, size=indices_shape).astype(ids_type)\n self.offset = offset\n self.output_grad_np = None\n\n def forward_mindspore_impl(self):\n net = EmbeddingLookup(self.offset)\n out = net(Tensor(self.input_np), Tensor(self.indices_np))\n return out.asnumpy()\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_embeddinglookup_indices_outrange():\n fact = EmbeddingLookupFactory(params_shape=(2, 4), indices_shape=(2, 3), low=1, high=3, offset=10, dtype=np.int8)\n out = fact.forward_mindspore_impl()\n out_expect = np.zeros((2, 3, 4))\n np.allclose(out_expect, out)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import context, Tensor, Parameter\nfrom mindspore.common.api import _executor\nfrom mindspore.nn import Cell\nfrom mindspore.ops import operations as P\n\n\nclass Net(Cell):\n def __init__(self, mul_weight, strategy1=None, strategy2=None):\n super().__init__()\n self.mul = P.Mul().shard(strategy1)\n self.neg = P.Neg().shard(strategy2)\n self.mul_weight = Parameter(mul_weight, \"w1\")\n\n def construct(self, x, b):\n out = self.mul(x, self.mul_weight)\n out = self.neg(out)\n return out\n\n\nclass EvalNet(Cell):\n def __init__(self, network, strategy2=None):\n super().__init__()\n self.network = network\n self.relu = P.ReLU().shard(strategy2)\n\n def construct(self, x, b):\n out = self.network(x, b)\n out = self.relu(out)\n return out\n\n\n_x = Tensor(np.ones([64, 64]), dtype=ms.float32)\n_w1 = Tensor(np.ones([64, 64]), dtype=ms.float32)\n_b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n\ndef test_train_and_eval():\n context.set_context(save_graphs=True, mode=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16)\n strategy1 = ((4, 4), (4, 4))\n strategy2 = ((4, 4),)\n net = Net(_w1, strategy1, strategy2)\n eval_net = EvalNet(net, strategy2=strategy2)\n net.set_auto_parallel()\n net.set_train()\n _executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True)\n\n eval_net.set_train(mode=False)\n eval_net.set_auto_parallel()\n _executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True)\n\n context.reset_auto_parallel_context()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport pytest\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nimport mindspore.common.dtype as mstype\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\")\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.unique = P.Unique()\n\n def construct(self, x):\n return self.unique(x)\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_pynative_unqiue():\n x = Tensor(np.array([1, 1, 2, 2, 3, 3]), mstype.int32)\n unique = Net()\n output = unique(x)\n expect1 = np.array([1, 2, 3])\n expect2 = np.array([0, 0, 1, 1, 2, 2])\n assert (output[0].asnumpy() == expect1).all()\n assert (output[1].asnumpy() == expect2).all()\n"
] | [
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.reshape",
"numpy.array",
"numpy.sum",
"numpy.ones"
],
[
"numpy.ones"
],
[
"numpy.random.randn"
],
[
"numpy.array"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.random.randn",
"numpy.power"
],
[
"numpy.subtract",
"numpy.random.randn",
"numpy.sqrt",
"numpy.random.seed"
],
[
"numpy.array",
"numpy.allclose"
],
[
"numpy.exp",
"numpy.finfo"
],
[
"numpy.random.randn",
"numpy.random.randint"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.random.random"
],
[
"numpy.load",
"numpy.save",
"numpy.argmax",
"numpy.equal",
"numpy.argsort",
"numpy.array"
],
[
"numpy.array"
],
[
"torch.device"
],
[
"tensorflow.io.TFRecordWriter",
"numpy.array",
"tensorflow.train.Features"
],
[
"numpy.ones"
],
[
"numpy.zeros",
"numpy.ones"
],
[
"numpy.ones",
"numpy.all",
"numpy.array",
"numpy.divide",
"numpy.random.randint"
],
[
"numpy.array"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.ones"
],
[
"numpy.random.rand",
"numpy.ones"
],
[
"numpy.ones"
],
[
"numpy.all",
"numpy.sqrt",
"numpy.random.rand"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.allclose",
"numpy.random.seed"
],
[
"numpy.finfo"
],
[
"sklearn.metrics.roc_auc_score"
],
[
"numpy.ones"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.random.randn"
],
[
"numpy.random.randn",
"numpy.zeros",
"numpy.allclose",
"numpy.random.randint"
],
[
"numpy.ones"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abishekganesh72/koalas | [
"40c2e209384d078ee75d08c7681d2e6a276ab834"
] | [
"databricks/koalas/frame.py"
] | [
"#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA wrapper class for Spark DataFrame to behave similar to pandas DataFrame.\n\"\"\"\nimport re\nimport warnings\nfrom functools import partial, reduce\nfrom typing import Any, Optional, List, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \\\n is_dict_like\nfrom pyspark import sql as spark\nfrom pyspark.sql import functions as F, Column\nfrom pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,\n IntegerType, LongType, ShortType, StructField, StructType,\n to_arrow_type)\nfrom pyspark.sql.utils import AnalysisException\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas.utils import default_session, validate_arguments_and_invoke_function\nfrom databricks.koalas.generic import _Frame, max_display_count\nfrom databricks.koalas.metadata import Metadata\nfrom databricks.koalas.missing.frame import _MissingPandasLikeDataFrame\nfrom databricks.koalas.ml import corr\nfrom databricks.koalas.typedef import infer_pd_series_spark_type\n\n\n# These regular expression patterns are complied and defined here to avoid to compile the same\n# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.\n# Two patterns basically seek the footer string from Pandas'\nREPR_PATTERN = re.compile(r\"\\n\\n\\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\\]$\")\nREPR_HTML_PATTERN = re.compile(\n r\"\\n\\<p\\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\\<\\/p\\>\\n\\<\\/div\\>$\")\n\n\nclass DataFrame(_Frame):\n \"\"\"\n Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame\n internally.\n\n :ivar _sdf: Spark Column instance\n :ivar _metadata: Metadata related to column names and index information.\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame\n Dict can contain Series, arrays, constants, or list-like objects\n If data is a dict, argument order is maintained for Python 3.6\n and later.\n Note that if `data` is a Pandas DataFrame, other arguments should not be used.\n If `data` is a Spark DataFrame, all other arguments except `index` should not be used.\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided\n If `data` is a Spark DataFrame, `index` is expected to be `Metadata`.\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Constructing DataFrame from Pandas DataFrame\n\n >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = ks.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df2 # doctest: +SKIP\n a b c d e\n 0 3 1 4 9 8\n 1 4 8 4 8 4\n 2 7 6 5 6 7\n 3 8 7 9 1 0\n 4 2 5 4 3 9\n \"\"\"\n def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):\n if isinstance(data, pd.DataFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n self._init_from_pandas(data)\n elif isinstance(data, spark.DataFrame):\n assert columns is None\n assert dtype is None\n assert not copy\n self._init_from_spark(data, index)\n else:\n pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)\n self._init_from_pandas(pdf)\n\n def _init_from_pandas(self, pdf):\n metadata = Metadata.from_pandas(pdf)\n reset_index = pdf.reset_index()\n reset_index.columns = metadata.columns\n schema = StructType([StructField(name, infer_pd_series_spark_type(col),\n nullable=bool(col.isnull().any()))\n for name, col in reset_index.iteritems()])\n for name, col in reset_index.iteritems():\n dt = col.dtype\n if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):\n continue\n reset_index[name] = col.replace({np.nan: None})\n self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema),\n metadata)\n\n def _init_from_spark(self, sdf, metadata=None):\n self._sdf = sdf\n if metadata is None:\n self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames())\n else:\n self._metadata = metadata\n\n @property\n def _index_columns(self):\n return [self._sdf.__getitem__(field)\n for field in self._metadata.index_columns]\n\n def _reduce_for_stat_function(self, sfun):\n \"\"\"\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n :param sfun: either an 1-arg function that takes a Column and returns a Column, or\n a 2-arg function that takes a Column and its DataType and returns a Column.\n \"\"\"\n from inspect import signature\n exprs = []\n num_args = len(signature(sfun).parameters)\n for col in self.columns:\n col_sdf = self._sdf[col]\n col_type = self._sdf.schema[col].dataType\n if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):\n # Stat functions cannot be used with boolean values by default\n # Thus, cast to integer (true to 1 and false to 0)\n # Exclude the min and max methods though since those work with booleans\n col_sdf = col_sdf.cast('integer')\n if num_args == 1:\n # Only pass in the column if sfun accepts only one arg\n col_sdf = sfun(col_sdf)\n else: # must be 2\n assert num_args == 2\n # Pass in both the column and its data type if sfun accepts two args\n col_sdf = sfun(col_sdf, col_type)\n exprs.append(col_sdf.alias(col))\n\n sdf = self._sdf.select(*exprs)\n pdf = sdf.toPandas()\n assert len(pdf) == 1, (sdf, pdf)\n row = pdf.iloc[0]\n row.name = None\n return row # Return first row as a Series\n\n def corr(self, method='pearson'):\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n y : pandas.DataFrame\n\n See Also\n --------\n Series.corr\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr('pearson')\n dogs cats\n dogs 1.000000 -0.851064\n cats -0.851064 1.000000\n\n >>> df.corr('spearman')\n dogs cats\n dogs 1.000000 -0.948683\n cats -0.948683 1.000000\n\n Notes\n -----\n There are behavior differences between Koalas and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. Koalas will return an error.\n * Koalas doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n \"\"\"\n return corr(self, method)\n\n def iteritems(self):\n \"\"\"\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n Examples\n --------\n >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'],\n ... columns=['species', 'population'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n\n >>> for label, content in df.iteritems():\n ... print('label:', label)\n ... print('content:', content.to_string())\n ...\n label: species\n content: panda bear\n polar bear\n koala marsupial\n label: population\n content: panda 1864\n polar 22000\n koala 80000\n \"\"\"\n cols = list(self.columns)\n return list((col_name, self[col_name]) for col_name in cols)\n\n def to_clipboard(self, excel=True, sep=None, **kwargs):\n \"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n\n This function also works for Series:\n\n >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # 0, 1\n ... # 1, 2\n ... # 2, 3\n ... # 3, 4\n ... # 4, 5\n ... # 5, 6\n ... # 6, 7\n \"\"\"\n\n args = locals()\n kdf = self\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)\n\n def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,\n na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,\n justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',\n bold_rows=True, classes=None, escape=True, notebook=False, border=None,\n table_id=None, render_links=False):\n \"\"\"\n Render a DataFrame as an HTML table.\n\n .. note:: This method should only be used if the resulting Pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.html.border``.\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links (only works with Pandas 0.24+).\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n kdf = self.head(max_rows)\n else:\n kdf = self\n\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)\n\n def to_string(self, buf=None, columns=None, col_space=None, header=True,\n index=True, na_rep='NaN', formatters=None, float_format=None,\n sparsify=None, index_names=True, justify=None,\n max_rows=None, max_cols=None, show_dimensions=False,\n decimal='.', line_width=None):\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n\n .. note:: This method should only be used if the resulting Pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n line_width : int, optional\n Width to wrap a line in characters.\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> print(df.to_string(max_rows=2))\n col1 col2\n 0 1 4\n 1 2 5\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n kdf = self.head(max_rows)\n else:\n kdf = self\n\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)\n\n def to_dict(self, orient='dict', into=dict):\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting Pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'],\n ... columns=['col1', 'col2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n >>> df_dict = df.to_dict()\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]\n\n You can specify the return orientation.\n\n >>> df_dict = df.to_dict('series')\n >>> sorted(df_dict.items())\n [('col1', row1 1\n row2 2\n Name: col1, dtype: int64), ('col2', row1 0.50\n row2 0.75\n Name: col2, dtype: float64)]\n >>> df_dict = df.to_dict('split')\n >>> sorted(df_dict.items()) # doctest: +ELLIPSIS\n [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]\n\n >>> df_dict = df.to_dict('records')\n >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS\n [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]\n\n >>> df_dict = df.to_dict('index')\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \\\n('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS\n [defaultdict(<class 'list'>, {'col..., 'col...}), \\\ndefaultdict(<class 'list'>, {'col..., 'col...})]\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n kdf = self\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)\n\n def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,\n na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,\n bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,\n decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):\n r\"\"\"\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice this into a LaTeX\n document. Requires usepackage{booktabs}.\n\n .. note:: This method should only be used if the resulting Pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, consider alternative formats.\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given, it is assumed to be aliases\n for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default ‘NaN’\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns’ elements by position or name. The result of\n each function must be a unicode string. List must be of length equal to the number of\n columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print every multiindex key at\n each row. By default, the value will be read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By\n default, ‘l’ will be used for all columns except columns of numbers, which default\n to ‘r’.\n longtable : bool, optional\n By default, the value will be read from the pandas config module. Use a longtable\n environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX\n preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config module. When set to False\n prevents from escaping latex special characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file, defaults to ‘ascii’ on\n Python 2 and ‘utf-8’ on Python 3.\n decimal : str, default ‘.’\n Character recognized as decimal separator, e.g. ‘,’ in Europe.\n multicolumn : bool, default True\n Use multicolumn to enhance MultiIndex columns. The default will be read from the config\n module.\n multicolumn_format : str, default ‘l’\n The alignment for multicolumns, similar to column_format The default will be read from\n the config module.\n multirow : bool, default False\n Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your\n LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read from the pandas config\n module.\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']},\n ... columns=['name', 'mask', 'weapon'])\n >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE\n '\\\\begin{tabular}{lll}\\n\\\\toprule\\n name & mask & weapon\n \\\\\\\\\\n\\\\midrule\\n Raphael & red & sai \\\\\\\\\\n Donatello &\n purple & bo staff \\\\\\\\\\n\\\\bottomrule\\n\\\\end{tabular}\\n'\n \"\"\"\n\n args = locals()\n kdf = self\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)\n\n @property\n def index(self):\n \"\"\"The index (row labels) Column of the DataFrame.\n\n Currently supported only when the DataFrame has a single index.\n \"\"\"\n from databricks.koalas.series import Series\n if len(self._metadata.index_map) != 1:\n raise KeyError('Currently supported only when the DataFrame has a single index.')\n return Series(self._index_columns[0], anchor=self, index=[])\n\n def set_index(self, keys, drop=True, append=False, inplace=False):\n \"\"\"Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n Changed row labels.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n\n Examples\n --------\n >>> df = ks.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]},\n ... columns=['month', 'year', 'sale'])\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n \"\"\"\n if isinstance(keys, str):\n keys = [keys]\n else:\n keys = list(keys)\n for key in keys:\n if key not in self.columns:\n raise KeyError(key)\n\n if drop:\n data_columns = [column for column in self._metadata.data_columns if column not in keys]\n else:\n data_columns = self._metadata.data_columns\n if append:\n index_map = self._metadata.index_map + [(column, column) for column in keys]\n else:\n index_map = [(column, column) for column in keys]\n\n metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)\n\n # Sync Spark's columns as well.\n sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])\n\n if inplace:\n self._metadata = metadata\n self._sdf = sdf\n else:\n kdf = self.copy()\n kdf._metadata = metadata\n kdf._sdf = sdf\n return kdf\n\n def reset_index(self, level=None, drop=False, inplace=False):\n \"\"\"Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling information in\n the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.\n For a standard index, the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n DataFrame with the new index.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n\n Examples\n --------\n >>> df = ks.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column. Unlike pandas, Koalas\n does not automatically add a sequential index. The following 0, 1, 2, 3 are only\n there when we display the DataFrame.\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n \"\"\"\n # TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301\n if len(self._metadata.index_map) == 0:\n raise NotImplementedError('Can\\'t reset index because there is no index.')\n\n multi_index = len(self._metadata.index_map) > 1\n\n def rename(index):\n if multi_index:\n return 'level_{}'.format(index)\n else:\n if 'index' not in self._metadata.data_columns:\n return 'index'\n else:\n return 'level_{}'.format(index)\n\n if level is None:\n new_index_map = [(column, name if name is not None else rename(i))\n for i, (column, name) in enumerate(self._metadata.index_map)]\n index_map = []\n else:\n if isinstance(level, (int, str)):\n level = [level]\n level = list(level)\n\n if all(isinstance(l, int) for l in level):\n for lev in level:\n if lev >= len(self._metadata.index_map):\n raise IndexError('Too many levels: Index has only {} level, not {}'\n .format(len(self._metadata.index_map), lev + 1))\n idx = level\n elif all(isinstance(lev, str) for lev in level):\n idx = []\n for l in level:\n try:\n i = self._metadata.index_columns.index(l)\n idx.append(i)\n except ValueError:\n if multi_index:\n raise KeyError('Level unknown not found')\n else:\n raise KeyError('Level unknown must be same as name ({})'\n .format(self._metadata.index_columns[0]))\n else:\n raise ValueError('Level should be all int or all string.')\n idx.sort()\n\n new_index_map = []\n index_map = self._metadata.index_map.copy()\n for i in idx:\n info = self._metadata.index_map[i]\n index_column, index_name = info\n new_index_map.append(\n (index_column,\n index_name if index_name is not None else rename(index_name)))\n index_map.remove(info)\n\n if drop:\n new_index_map = []\n\n metadata = self._metadata.copy(\n data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,\n index_map=index_map)\n columns = [name for _, name in new_index_map] + self._metadata.data_columns\n if inplace:\n self._metadata = metadata\n self.columns = columns\n else:\n kdf = self.copy()\n kdf._metadata = metadata\n kdf.columns = columns\n return kdf\n\n def isnull(self):\n \"\"\"\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\n See Also\n --------\n Dataframe.notnull\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.isnull()\n 0 1\n 0 False False\n 1 False True\n 2 False True\n 3 False False\n\n >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])\n >>> df.isnull()\n 0 1 2\n 0 True False True\n 1 False True False\n \"\"\"\n kdf = self.copy()\n for name, ks in kdf.iteritems():\n kdf[name] = ks.isnull()\n return kdf\n\n isna = isnull\n\n def notnull(self):\n \"\"\"\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).\n\n See Also\n --------\n Dataframe.isnull\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.notnull()\n 0 1\n 0 True True\n 1 True False\n 2 True False\n 3 True True\n\n >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df.notnull()\n 0 1 2\n 0 True True True\n 1 True False True\n \"\"\"\n kdf = self.copy()\n for name, ks in kdf.iteritems():\n kdf[name] = ks.notnull()\n return kdf\n\n notna = notnull\n\n def to_koalas(self):\n \"\"\"\n Converts the existing DataFrame into a Koalas DataFrame.\n\n This method is monkey-patched into Spark's DataFrame and can be used\n to convert a Spark DataFrame into a Koalas DataFrame. If running on\n an existing Koalas DataFrame, the method returns itself.\n\n If a Koalas DataFrame is converted to a Spark DataFrame and then back\n to Koalas, it will lose the index information and the original index\n will be turned into a normal column.\n\n See Also\n --------\n DataFrame.to_spark\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> spark_df = df.to_spark()\n >>> spark_df\n DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]\n\n >>> kdf = spark_df.to_koalas()\n >>> kdf\n __index_level_0__ col1 col2\n 0 0 1 3\n 1 1 2 4\n\n Calling to_koalas on a Koalas DataFrame simply returns itself.\n\n >>> df.to_koalas()\n col1 col2\n 0 1 3\n 1 2 4\n \"\"\"\n if isinstance(self, DataFrame):\n return self\n else:\n return DataFrame(self)\n\n def to_spark(self):\n \"\"\"\n Return the current DataFrame as a Spark DataFrame.\n\n See Also\n --------\n DataFrame.to_koalas\n \"\"\"\n return self._sdf\n\n def to_pandas(self):\n \"\"\"\n Return a Pandas DataFrame.\n\n .. note:: This method should only be used if the resulting Pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.to_pandas()\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.6\n 2 0.6 0.0\n 3 0.2 0.1\n \"\"\"\n sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns])\n pdf = sdf.toPandas()\n if len(pdf) == 0 and len(sdf.schema) > 0:\n # TODO: push to OSS\n pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()\n for field in sdf.schema})\n\n index_columns = self._metadata.index_columns\n if len(index_columns) > 0:\n append = False\n for index_field in index_columns:\n drop = index_field not in self._metadata.data_columns\n pdf = pdf.set_index(index_field, drop=drop, append=append)\n append = True\n pdf = pdf[self._metadata.data_columns]\n\n index_names = self._metadata.index_names\n if len(index_names) > 0:\n if isinstance(pdf.index, pd.MultiIndex):\n pdf.index.names = index_names\n else:\n pdf.index.name = index_names[0]\n return pdf\n\n # Alias to maintain backward compatibility with Spark\n toPandas = to_pandas\n\n def assign(self, **kwargs):\n \"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though Koalas doesn't check it).\n If the values are not callable, (e.g. a Series or a literal),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Examples\n --------\n >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence and you can also\n create multiple columns within the same assign.\n\n >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,\n ... temp_k=df['temp_c'] + 273.15)\n >>> assigned[['temp_c', 'temp_f', 'temp_k']]\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible\n but you cannot refer to newly created or modified columns. This\n feature is supported in pandas for Python 3.6 and later but not in\n Koalas. In Koalas, all items are computed first, and then assigned.\n \"\"\"\n from databricks.koalas.series import Series\n for k, v in kwargs.items():\n if not (isinstance(v, (Series, spark.Column)) or\n callable(v) or pd.api.types.is_scalar(v)):\n raise TypeError(\"Column assignment doesn't support type \"\n \"{0}\".format(type(v).__name__))\n if callable(v):\n kwargs[k] = v(self)\n\n pairs = list(kwargs.items())\n sdf = self._sdf\n for (name, c) in pairs:\n if isinstance(c, Series):\n sdf = sdf.withColumn(name, c._scol)\n elif isinstance(c, Column):\n sdf = sdf.withColumn(name, c)\n else:\n sdf = sdf.withColumn(name, F.lit(c))\n\n data_columns = self._metadata.data_columns\n metadata = self._metadata.copy(\n data_columns=(data_columns +\n [name for name, _ in pairs if name not in data_columns]))\n return DataFrame(sdf, metadata)\n\n def to_records(self, index=True, convert_datetime64=None,\n column_dtypes=None, index_dtypes=None):\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is\n expected to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n convert_datetime64 : bool, default None\n Whether to convert the index to datetime.datetime if it is a\n DatetimeIndex.\n column_dtypes : str, type, dict, default None\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n\n >>> df.to_records() # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False) # doctest: +SKIP\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Specification of dtype for columns is new in Pandas 0.24.0.\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"}) # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n Specification of dtype for index is new in Pandas 0.24.0.\n Data types can also be specified for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\") # doctest: +SKIP\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n args = locals()\n kdf = self\n\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)\n\n def copy(self) -> 'DataFrame':\n \"\"\"\n Make a copy of this object's indices and data.\n\n Returns\n -------\n copy : DataFrame\n \"\"\"\n return DataFrame(self._sdf, self._metadata.copy())\n\n def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):\n \"\"\"\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries dropped from it.\n\n See Also\n --------\n DataFrame.drop : Drop specified labels from columns.\n DataFrame.isnull: Indicate missing values.\n DataFrame.notnull : Indicate existing (non-missing) values.\n\n Examples\n --------\n >>> df = ks.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [None, 'Batmobile', 'Bullwhip'],\n ... \"born\": [None, \"1940-04-25\", None]},\n ... columns=['name', 'toy', 'born'])\n >>> df\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'born'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n if axis == 0 or axis == 'index':\n if subset is not None:\n if isinstance(subset, str):\n columns = [subset]\n else:\n columns = list(subset)\n invalids = [column for column in columns\n if column not in self._metadata.data_columns]\n if len(invalids) > 0:\n raise KeyError(invalids)\n else:\n columns = list(self.columns)\n\n cnt = reduce(lambda x, y: x + y,\n [F.when(self[column].notna()._scol, 1).otherwise(0)\n for column in columns],\n F.lit(0))\n if thresh is not None:\n pred = cnt >= F.lit(int(thresh))\n elif how == 'any':\n pred = cnt == F.lit(len(columns))\n elif how == 'all':\n pred = cnt > F.lit(0)\n else:\n if how is not None:\n raise ValueError('invalid how option: {h}'.format(h=how))\n else:\n raise TypeError('must specify how or thresh')\n\n sdf = self._sdf.filter(pred)\n if inplace:\n self._sdf = sdf\n else:\n return DataFrame(sdf, self._metadata.copy())\n\n else:\n raise NotImplementedError(\"dropna currently only works for axis=0 or axis='index'\")\n\n def fillna(self, value=None, axis=None, inplace=False):\n \"\"\"Fill NA/NaN values.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ks.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 1.0 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 1.0 4\n \"\"\"\n if axis is None:\n axis = 0\n if not (axis == 0 or axis == \"index\"):\n raise NotImplementedError(\"fillna currently only works for axis=0 or axis='index'\")\n\n if value is None:\n raise ValueError('Currently must specify value')\n if not isinstance(value, (float, int, str, bool, dict, pd.Series)):\n raise TypeError(\"Unsupported type %s\" % type(value))\n if isinstance(value, pd.Series):\n value = value.to_dict()\n if isinstance(value, dict):\n for v in value.values():\n if not isinstance(v, (float, int, str, bool)):\n raise TypeError(\"Unsupported type %s\" % type(v))\n\n sdf = self._sdf.fillna(value)\n if inplace:\n self._sdf = sdf\n else:\n return DataFrame(sdf, self._metadata.copy())\n\n def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \\\n -> 'DataFrame':\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n DataFrame\n DataFrame with the values outside the clip boundaries replaced.\n\n Examples\n --------\n >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)\n A\n 0 1\n 1 2\n 2 3\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with \"TypeError: '<=' not supported\n between instances of 'str' and 'int'\" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)\n will output the original DataFrame, simply ignoring the incompatible types.\n \"\"\"\n if is_list_like(lower) or is_list_like(upper):\n raise ValueError(\"List-like value are not supported for 'lower' and 'upper' at the \" +\n \"moment\")\n\n if lower is None and upper is None:\n return self\n\n sdf = self._sdf\n\n numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,\n ShortType)\n numeric_columns = [c for c in self.columns\n if isinstance(sdf.schema[c].dataType, numeric_types)]\n nonnumeric_columns = [c for c in self.columns\n if not isinstance(sdf.schema[c].dataType, numeric_types)]\n\n if lower is not None:\n sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c)\n for c in numeric_columns] + nonnumeric_columns)\n if upper is not None:\n sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c)\n for c in numeric_columns] + nonnumeric_columns)\n\n # Restore initial column order\n sdf = sdf.select(list(self.columns))\n\n return ks.DataFrame(sdf)\n\n def head(self, n=5):\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n\n return DataFrame(self._sdf.limit(n), self._metadata.copy())\n\n @property\n def columns(self):\n \"\"\"The column labels of the DataFrame.\"\"\"\n return pd.Index(self._metadata.data_columns)\n\n @columns.setter\n def columns(self, names):\n old_names = self._metadata.data_columns\n if len(old_names) != len(names):\n raise ValueError(\n \"Length mismatch: Expected axis has %d elements, new values have %d elements\"\n % (len(old_names), len(names)))\n sdf = self._sdf.select(self._metadata.index_columns +\n [self[old_name]._scol.alias(new_name)\n for (old_name, new_name) in zip(old_names, names)])\n self._sdf = sdf\n self._metadata = self._metadata.copy(data_columns=names)\n\n @property\n def dtypes(self):\n \"\"\"Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n pd.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = ks.DataFrame({'a': list('abc'),\n ... 'b': list(range(1, 4)),\n ... 'c': np.arange(3, 6).astype('i1'),\n ... 'd': np.arange(4.0, 7.0, dtype='float64'),\n ... 'e': [True, False, True],\n ... 'f': pd.date_range('20130101', periods=3)},\n ... columns=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> df.dtypes\n a object\n b int64\n c int8\n d float64\n e bool\n f datetime64[ns]\n dtype: object\n \"\"\"\n return pd.Series([self[col].dtype for col in self._metadata.data_columns],\n index=self._metadata.data_columns)\n\n def count(self):\n \"\"\"\n Count non-NA cells for each column.\n\n The values `None`, `NaN` are considered NA.\n\n Returns\n -------\n pandas.Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = ks.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]},\n ... columns=[\"Person\", \"Age\", \"Single\"])\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n \"\"\"\n return self._reduce_for_stat_function(_Frame._count_expr)\n\n def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):\n \"\"\"\n Drop specified labels from columns.\n\n Remove columns by specifying label names and axis=1 or columns.\n When specifying both labels and columns, only labels will be dropped.\n Removing rows is yet to be implemented.\n\n Parameters\n ----------\n labels : single label or list-like\n Column labels to drop.\n axis : {1 or 'columns'}, default 1\n .. dropna currently only works for axis=1 'columns'\n axis=0 is yet to be implemented.\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n Returns\n -------\n dropped : DataFrame\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> df\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n\n >>> df.drop('x', axis=1)\n y z w\n 0 3 5 7\n 1 4 6 8\n\n >>> df.drop(['y', 'z'], axis=1)\n x w\n 0 1 7\n 1 2 8\n\n >>> df.drop(columns=['y', 'z'])\n x w\n 0 1 7\n 1 2 8\n\n Notes\n -----\n Currently only axis = 1 is supported in this function,\n axis = 0 is yet to be implemented.\n \"\"\"\n if labels is not None:\n axis = self._validate_axis(axis)\n if axis == 1:\n return self.drop(columns=labels)\n raise NotImplementedError(\"Drop currently only works for axis=1\")\n elif columns is not None:\n if isinstance(columns, str):\n columns = [columns]\n sdf = self._sdf.drop(*columns)\n metadata = self._metadata.copy(\n data_columns=[column for column in self.columns if column not in columns]\n )\n return DataFrame(sdf, metadata)\n else:\n raise ValueError(\"Need to specify at least one of 'labels' or 'columns'\")\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n\n Examples\n --------\n >>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},\n ... columns=['x', 'y', 'z'])\n >>> df\n x y z\n 0 0 a a\n 1 1 b b\n 2 2 b b\n\n >>> df.get('x')\n 0 0\n 1 1\n 2 2\n Name: x, dtype: int64\n\n >>> df.get(['x', 'y'])\n x y\n 0 0 a\n 1 1 b\n 2 2 b\n \"\"\"\n try:\n return self._pd_getitem(key)\n except (KeyError, ValueError, IndexError):\n return default\n\n def sort_values(self, by, ascending=True, inplace=False, na_position='last'):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of str\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ks.DataFrame({\n ... 'col1': ['A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 9, 8, 7, 4],\n ... 'col3': [0, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 2 None 8 4\n 3 D 7 2\n 4 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 4 C 4 3\n 3 D 7 2\n 2 None 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 3 D 7 2\n 4 C 4 3\n 1 B 9 9\n 0 A 2 0\n 2 None 8 4\n\n Sort by multiple columns\n\n >>> df = ks.DataFrame({\n ... 'col1': ['A', 'A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 None 8 4\n \"\"\"\n if isinstance(by, str):\n by = [by]\n if isinstance(ascending, bool):\n ascending = [ascending] * len(by)\n if len(ascending) != len(by):\n raise ValueError('Length of ascending ({}) != length of by ({})'\n .format(len(ascending), len(by)))\n if na_position not in ('first', 'last'):\n raise ValueError(\"invalid na_position: '{}'\".format(na_position))\n\n # Mapper: Get a spark column function for (ascending, na_position) combination\n # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.\n mapper = {\n (True, 'first'): lambda x: Column(getattr(x._jc, \"asc_nulls_first\")()),\n (True, 'last'): lambda x: Column(getattr(x._jc, \"asc_nulls_last\")()),\n (False, 'first'): lambda x: Column(getattr(x._jc, \"desc_nulls_first\")()),\n (False, 'last'): lambda x: Column(getattr(x._jc, \"desc_nulls_last\")()),\n }\n by = [mapper[(asc, na_position)](self[colname]._scol)\n for colname, asc in zip(by, ascending)]\n kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())\n if inplace:\n self._sdf = kdf._sdf\n self._metadata = kdf._metadata\n else:\n return kdf\n\n # TODO: add keep = First\n def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant in Pandas.\n In Koalas, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"population\".\n\n >>> df.nlargest(n=3, columns='X')\n X Y\n 5 7.0 11\n 4 6.0 10\n 3 5.0 9\n\n >>> df.nlargest(n=3, columns=['Y', 'X'])\n X Y\n 6 NaN 12\n 5 7.0 11\n 4 6.0 10\n\n \"\"\"\n return self.sort_values(by=columns, ascending=False).head(n=n)\n\n # TODO: add keep = First\n def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=True).head(n)``, but more\n performant.\n In Koalas, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"a\".\n\n >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n To order by the largest values in column \"a\" and then \"c\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n \"\"\"\n return self.sort_values(by=columns, ascending=True).head(n=n)\n\n def isin(self, values):\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column names, which must match.\n Series and DataFrame are not supported.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n Examples\n --------\n >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'],\n ... columns=['num_legs', 'num_wings'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n \"\"\"\n if isinstance(values, (pd.DataFrame, pd.Series)):\n raise NotImplementedError(\"DataFrame and Series are not supported\")\n if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):\n raise AttributeError(\n \"'DataFrame' object has no attribute %s\"\n % (set(values.keys()).difference(self.columns)))\n\n _select_columns = self._metadata.index_columns\n if isinstance(values, dict):\n for col in self.columns:\n if col in values:\n _select_columns.append(self[col]._scol.isin(values[col]).alias(col))\n else:\n _select_columns.append(F.lit(False).alias(col))\n elif is_list_like(values):\n _select_columns += [\n self[col]._scol.isin(list(values)).alias(col) for col in self.columns]\n else:\n raise TypeError('Values should be iterable, Series, DataFrame or dict.')\n\n return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())\n\n def pipe(self, func, *args, **kwargs):\n r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the DataFrame.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the DataFrames.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. For example, given\n\n >>> df = ks.DataFrame({'category': ['A', 'A', 'B'],\n ... 'col1': [1, 2, 3],\n ... 'col2': [4, 5, 6]},\n ... columns=['category', 'col1', 'col2'])\n >>> def keep_category_a(df):\n ... return df[df['category'] == 'A']\n >>> def add_one(df, column):\n ... return df.assign(col3=df[column] + 1)\n >>> def multiply(df, column1, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n instead of writing\n\n >>> multiply(add_one(keep_category_a(df), column=\"col1\"), column1=\"col2\", column2=\"col3\")\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n You can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column=\"col1\")\n ... .pipe(multiply, column1=\"col2\", column2=\"col3\")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``df``:\n\n >>> def multiply_2(column1, df, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n Then you can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column=\"col1\")\n ... .pipe((multiply_2, 'df'), column1=\"col2\", column2=\"col3\")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n \"\"\"\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError('%s is both the pipe target and a keyword '\n 'argument' % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self), len(self.columns)\n\n def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None,\n left_index: bool = False, right_index: bool = False,\n suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':\n \"\"\"\n Merge DataFrame objects with a database-style join.\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’\n\n left: use only keys from left frame, similar to a SQL left outer join; preserve key\n order.\n right: use only keys from right frame, similar to a SQL right outer join; preserve key\n order.\n outer: use union of keys from both frames, similar to a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, similar to a SQL inner join;\n preserve the order of the left keys.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n Examples\n --------\n >>> left_kdf = ks.DataFrame({'A': [1, 2]})\n >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True)\n A B\n 0 2 x\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')\n A B\n 0 1 None\n 1 2 x\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')\n A B\n 0 2.0 x\n 1 NaN y\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n \"\"\"\n if on is None and not left_index and not right_index:\n raise ValueError(\"At least 'on' or 'left_index' and 'right_index' have to be set\")\n if on is not None and (left_index or right_index):\n raise ValueError(\"Only 'on' or 'left_index' and 'right_index' can be set\")\n\n if how == 'full':\n warnings.warn(\"Warning: While Koalas will accept 'full', you should use 'outer' \" +\n \"instead to be compatible with the pandas merge API\", UserWarning)\n if how == 'outer':\n # 'outer' in pandas equals 'full' in Spark\n how = 'full'\n if how not in ('inner', 'left', 'right', 'full'):\n raise ValueError(\"The 'how' parameter has to be amongst the following values: \",\n \"['inner', 'left', 'right', 'outer']\")\n\n if on is None:\n # FIXME Move index string to constant?\n on = '__index_level_0__'\n\n left_table = self._sdf.alias('left_table')\n right_table = right._sdf.alias('right_table')\n\n # Unpack suffixes tuple for convenience\n left_suffix = suffixes[0]\n right_suffix = suffixes[1]\n\n # Append suffixes to columns with the same name to avoid conflicts later\n duplicate_columns = list(self.columns & right.columns)\n if duplicate_columns:\n for duplicate_column_name in duplicate_columns:\n left_table = left_table.withColumnRenamed(duplicate_column_name,\n duplicate_column_name + left_suffix)\n right_table = right_table.withColumnRenamed(duplicate_column_name,\n duplicate_column_name + right_suffix)\n\n join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns\n else left_table[on + left_suffix] == right_table[on + right_suffix])\n joined_table = left_table.join(right_table, join_condition, how=how)\n\n if on in duplicate_columns:\n # Merge duplicate key columns\n joined_table = joined_table.withColumnRenamed(on + left_suffix, on)\n joined_table = joined_table.drop(on + right_suffix)\n\n # Remove auxiliary index\n # FIXME Move index string to constant?\n joined_table = joined_table.drop('__index_level_0__')\n\n kdf = DataFrame(joined_table)\n return kdf\n\n def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,\n random_state: Optional[int] = None) -> 'DataFrame':\n \"\"\"\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifing the ``frac`` argument.\n\n You can use `random_state` for reproducibility. However, note that different from pandas,\n specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The\n result set depends on not only the seed, but also how the data is distributed across\n machines and to some extent network randomness when shuffle operations are involved. Even\n in the simplest case, the result set will depend on the system's CPU core count.\n\n Parameters\n ----------\n n : int, optional\n Number of items to return. This is currently NOT supported. Use frac instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : bool, default False\n Sample with or without replacement.\n random_state : int, optional\n Seed for the random number generator (if int).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing the sampled items.\n\n Examples\n --------\n >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'],\n ... columns=['num_legs', 'num_wings', 'num_specimen_seen'])\n >>> df # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n A random 25% sample of the ``DataFrame``.\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n\n Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,\n so the same items could appear more than once.\n\n >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP\n falcon 2\n spider 8\n spider 8\n Name: num_legs, dtype: int64\n\n Specifying the exact number of items to return is not supported at the moment.\n\n >>> df.sample(n=5) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n NotImplementedError: Function sample currently does not support specifying ...\n \"\"\"\n # Note: we don't run any of the doctests because the result can change depending on the\n # system's core count.\n if n is not None:\n raise NotImplementedError(\"Function sample currently does not support specifying \"\n \"exact number of items to return. Use frac instead.\")\n\n if frac is None:\n raise ValueError(\"frac must be specified.\")\n\n sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)\n return DataFrame(sdf, self._metadata.copy())\n\n def astype(self, dtype) -> 'DataFrame':\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')\n >>> df\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert to float type:\n\n >>> df.astype('float')\n a b\n 0 1.0 1.0\n 1 2.0 2.0\n 2 3.0 3.0\n\n Convert to int64 type back:\n\n >>> df.astype('int64')\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert column a to float type:\n\n >>> df.astype({'a': float})\n a b\n 0 1.0 1\n 1 2.0 2\n 2 3.0 3\n\n \"\"\"\n results = []\n if is_dict_like(dtype):\n for col_name in dtype.keys():\n if col_name not in self.columns:\n raise KeyError('Only a column name can be used for the '\n 'key in a dtype mappings argument.')\n for col_name, col in self.iteritems():\n if col_name in dtype:\n results.append(col.astype(dtype=dtype[col_name]))\n else:\n results.append(col)\n else:\n for col_name, col in self.iteritems():\n results.append(col.astype(dtype=dtype))\n sdf = self._sdf.select(\n self._metadata.index_columns + list(map(lambda ser: ser._scol, results)))\n return DataFrame(sdf, self._metadata.copy())\n\n def _pd_getitem(self, key):\n from databricks.koalas.series import Series\n if key is None:\n raise KeyError(\"none key\")\n if isinstance(key, str):\n try:\n return Series(self._sdf.__getitem__(key), anchor=self,\n index=self._metadata.index_map)\n except AnalysisException:\n raise KeyError(key)\n if np.isscalar(key) or isinstance(key, (tuple, str)):\n raise NotImplementedError(key)\n elif isinstance(key, slice):\n return self.loc[key]\n\n if isinstance(key, (pd.Series, np.ndarray, pd.Index)):\n raise NotImplementedError(key)\n if isinstance(key, list):\n return self.loc[:, key]\n if isinstance(key, DataFrame):\n # TODO Should not implement alignment, too dangerous?\n return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map)\n if isinstance(key, Series):\n # TODO Should not implement alignment, too dangerous?\n # It is assumed to be only a filter, otherwise .loc should be used.\n bcol = key._scol.cast(\"boolean\")\n return DataFrame(self._sdf.filter(bcol), self._metadata.copy())\n raise NotImplementedError(key)\n\n def __repr__(self):\n pdf = self.head(max_display_count + 1).to_pandas()\n pdf_length = len(pdf)\n repr_string = repr(pdf.iloc[:max_display_count])\n if pdf_length > max_display_count:\n match = REPR_PATTERN.search(repr_string)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n footer = (\"\\n\\n[Showing only the first {nrows} rows x {ncols} columns]\"\n .format(nrows=nrows, ncols=ncols))\n return REPR_PATTERN.sub(footer, repr_string)\n return repr_string\n\n def _repr_html_(self):\n pdf = self.head(max_display_count + 1).to_pandas()\n pdf_length = len(pdf)\n repr_html = pdf[:max_display_count]._repr_html_()\n if pdf_length > max_display_count:\n match = REPR_HTML_PATTERN.search(repr_html)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n by = chr(215)\n footer = ('\\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\\n</div>'\n .format(rows=nrows,\n by=by,\n cols=ncols))\n return REPR_HTML_PATTERN.sub(footer, repr_html)\n return repr_html\n\n def __getitem__(self, key):\n return self._pd_getitem(key)\n\n def __setitem__(self, key, value):\n from databricks.koalas.series import Series\n # For now, we don't support realignment against different dataframes.\n # This is too expensive in Spark.\n # Are we assigning against a column?\n if isinstance(value, Series):\n assert value._kdf is self, \\\n \"Cannot combine column argument because it comes from a different dataframe\"\n if isinstance(key, (tuple, list)):\n assert isinstance(value.schema, StructType)\n field_names = value.schema.fieldNames()\n kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})\n else:\n kdf = self.assign(**{key: value})\n\n self._sdf = kdf._sdf\n self._metadata = kdf._metadata\n\n def __getattr__(self, key: str) -> Any:\n from databricks.koalas.series import Series\n if key.startswith(\"__\") or key.startswith(\"_pandas_\") or key.startswith(\"_spark_\"):\n raise AttributeError(key)\n if hasattr(_MissingPandasLikeDataFrame, key):\n property_or_func = getattr(_MissingPandasLikeDataFrame, key)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self) # type: ignore\n else:\n return partial(property_or_func, self)\n return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map)\n\n def __len__(self):\n return self._sdf.count()\n\n def __dir__(self):\n fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]\n return super(DataFrame, self).__dir__() + fields\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 1, 'index', 'columns', None):\n raise ValueError('No axis named {0}'.format(axis))\n # convert to numeric axis\n return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)\n\n\ndef _reduce_spark_multi(sdf, aggs):\n \"\"\"\n Performs a reduction on a dataframe, the functions being known sql aggregate functions.\n \"\"\"\n assert isinstance(sdf, spark.DataFrame)\n sdf0 = sdf.agg(*aggs)\n l = sdf0.head(2)\n assert len(l) == 1, (sdf, l)\n row = l[0]\n l2 = list(row)\n assert len(l2) == len(aggs), (row, l2)\n return l2\n"
] | [
[
"pandas.api.types.is_datetime64_dtype",
"pandas.Series",
"pandas.api.types.is_scalar",
"pandas.Index",
"pandas.DataFrame",
"pandas.api.types.is_dict_like",
"pandas.api.types.is_list_like",
"pandas.api.types.is_datetime64tz_dtype",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24"
],
"scipy": [],
"tensorflow": []
}
] |
paalge/scikit-image | [
"f3c4b88b0610242b033449fd38c1118475f96a73"
] | [
"doc/examples/transform/plot_pyramid.py"
] | [
"\"\"\"\n====================\nBuild image pyramids\n====================\n\nThe ``pyramid_gaussian`` function takes an image and yields successive images\nshrunk by a constant scale factor. Image pyramids are often used, e.g., to\nimplement algorithms for denoising, texture discrimination, and scale-\ninvariant detection.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.transform import pyramid_gaussian\n\n\nimage = data.astronaut()\nrows, cols, dim = image.shape\npyramid = tuple(pyramid_gaussian(image, downscale=2))\n\ncomposite_image = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)\n\ncomposite_image[:rows, :cols, :] = pyramid[0]\n\ni_row = 0\nfor p in pyramid[1:]:\n n_rows, n_cols = p.shape[:2]\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\n i_row += n_rows\n\nfig, ax = plt.subplots()\nax.imshow(composite_image)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RajArPatra/Improvement-semantic-segmentation-using-clustring-and-class-voating | [
"1e4b5fa5ccc462d88a68f3c88c8af31fa3f14b8b"
] | [
"deeplabv3/datahandler.py"
] | [
"from torch.utils.data import Dataset, DataLoader\nimport glob\nimport os\nimport numpy as np\nimport cv2\nimport torch\nfrom torchvision import transforms, utils\nfrom skimage.transform import resize\n\n\nclass SegDataset(Dataset):\n \"\"\"Segmentation Dataset\"\"\"\n\n def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale'):\n \"\"\"\n Args:\n root_dir (string): Directory with all the images and should have the following structure.\n root\n --Images\n -----Img 1\n -----Img N\n --Mask\n -----Mask 1\n -----Mask N\n imageFolder (string) = 'Images' : Name of the folder which contains the Images.\n maskFolder (string) = 'Masks : Name of the folder which contains the Masks.\n transform (callable, optional): Optional transform to be applied on a sample.\n seed: Specify a seed for the train and test split\n fraction: A float value from 0 to 1 which specifies the validation split fraction\n subset: 'Train' or 'Test' to select the appropriate set.\n imagecolormode: 'rgb' or 'grayscale'\n maskcolormode: 'rgb' or 'grayscale'\n \"\"\"\n self.color_dict = {'rgb': 1, 'grayscale': 0}\n assert(imagecolormode in ['rgb', 'grayscale'])\n assert(maskcolormode in ['rgb', 'grayscale'])\n\n self.imagecolorflag = self.color_dict[imagecolormode]\n self.maskcolorflag = self.color_dict[maskcolormode]\n self.root_dir = root_dir\n self.transform = transform\n if not fraction:\n self.image_names = sorted(\n glob.glob(os.path.join(self.root_dir, imageFolder, '*')))\n self.mask_names = sorted(\n glob.glob(os.path.join(self.root_dir, maskFolder, '*')))\n else:\n assert(subset in ['Train', 'Test'])\n self.fraction = fraction\n self.image_list = np.array(\n sorted(glob.glob(os.path.join(self.root_dir, imageFolder, '*'))))\n self.mask_list = np.array(\n sorted(glob.glob(os.path.join(self.root_dir, maskFolder, '*'))))\n if seed:\n np.random.seed(seed)\n indices = np.arange(len(self.image_list))\n np.random.shuffle(indices)\n self.image_list = self.image_list[indices]\n self.mask_list = self.mask_list[indices]\n if subset == 'Train':\n self.image_names = self.image_list[:int(\n np.ceil(len(self.image_list)*(1-self.fraction)))]\n self.mask_names = self.mask_list[:int(\n np.ceil(len(self.mask_list)*(1-self.fraction)))]\n else:\n self.image_names = self.image_list[int(\n np.ceil(len(self.image_list)*(1-self.fraction))):]\n self.mask_names = self.mask_list[int(\n np.ceil(len(self.mask_list)*(1-self.fraction))):]\n\n def __len__(self):\n return len(self.image_names)\n\n def __getitem__(self, idx):\n img_name = self.image_names[idx]\n if self.imagecolorflag:\n image = cv2.imread(\n img_name, self.imagecolorflag).transpose(2, 0, 1)\n else:\n image = cv2.imread(img_name, self.imagecolorflag)\n msk_name = self.mask_names[idx]\n if self.maskcolorflag:\n mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1)\n else:\n mask = cv2.imread(msk_name, self.maskcolorflag)\n sample = {'image': image, 'mask': mask}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n# Define few transformations for the Segmentation Dataloader\n\n\nclass Resize(object):\n \"\"\"Resize image and/or masks.\"\"\"\n\n def __init__(self, imageresize, maskresize):\n self.imageresize = imageresize\n self.maskresize = maskresize\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n if len(image.shape) == 3:\n image = image.transpose(1, 2, 0)\n if len(mask.shape) == 3:\n mask = mask.transpose(1, 2, 0)\n mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA)\n #mask = 256 * resize(mask, (256, 256), anti_aliasing = True)\n image = cv2.resize(image, self.imageresize, cv2.INTER_AREA)\n #image = 256 * resize(image, (256, 256), anti_aliasing = True)\n if len(image.shape) == 3:\n image = image.transpose(2, 0, 1)\n if len(mask.shape) == 3:\n mask = mask.transpose(2, 0, 1)\n\n return {'image': image,\n 'mask': mask}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample, maskresize=None, imageresize=None):\n image, mask = sample['image'], sample['mask']\n if len(mask.shape) == 2:\n mask = mask.reshape((1,)+mask.shape)\n if len(image.shape) == 2:\n image = image.reshape((1,)+image.shape)\n return {'image': torch.from_numpy(image),\n 'mask': torch.from_numpy(mask)}\n\n\nclass Normalize(object):\n '''Normalize image'''\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n return {'image': image.type(torch.FloatTensor)/255,\n 'mask': mask.type(torch.FloatTensor)/255}\n\n\n\n\n\ndef get_dataloader_single_folder(data_dir, imageFolder='Images', maskFolder='Masks', fraction=0.2, batch_size=4):\n \"\"\"\n Create training and testing dataloaders from a single folder.\n \"\"\"\n data_transforms = {\n 'Train': transforms.Compose([Resize((256, 256), (256, 256)), ToTensor(), Normalize()]),\n 'Test': transforms.Compose([Resize((256,256), (256, 256)), ToTensor(), Normalize()]),\n }\n\n image_datasets = {x: SegDataset(data_dir, imageFolder=imageFolder, maskFolder=maskFolder, seed=100, fraction=fraction, subset=x, transform=data_transforms[x])\n for x in ['Train', 'Test']}\n dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size,\n shuffle=True, num_workers=8)\n for x in ['Train', 'Test']}\n return dataloaders\n"
] | [
[
"numpy.random.shuffle",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UCL/scikit-surgeryvtk | [
"75a2cb15f976348b844fea165bddf187efa722f0"
] | [
"tests/utils/test_polydata_utils.py"
] | [
"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\nimport pytest\nimport vtk\nimport numpy as np\nimport sksurgeryvtk.utils.polydata_utils as pdu\nimport sksurgeryvtk.models.vtk_surface_model as vbs\n\ndef test_overlapping_bounds():\n radius_0=10.0\n radius_1=7.0\n centre_1=5.0\n radius_2=4.0\n centre_2=15.0\n radius_3=4.0\n centre_3=0.0\n sphere_0 = vtk.vtkSphereSource()\n sphere_0.SetRadius(radius_0)\n sphere_0.SetPhiResolution(12)\n sphere_0.SetThetaResolution(12)\n sphere_0.SetCenter(0.0, 0.0, 0.0)\n sphere_0.Update()\n vtk_model_0 = sphere_0.GetOutput()\n\n sphere_1 = vtk.vtkSphereSource()\n sphere_1.SetRadius(radius_1)\n sphere_1.SetPhiResolution(12)\n sphere_1.SetThetaResolution(21)\n sphere_1.SetCenter(centre_1, 0.0, 0.0)\n sphere_1.Update()\n vtk_model_1 = sphere_1.GetOutput()\n \n sphere_2 = vtk.vtkSphereSource()\n sphere_2.SetRadius(radius_2)\n sphere_2.SetPhiResolution(12)\n sphere_2.SetThetaResolution(21)\n sphere_2.SetCenter(centre_2, 0.0, 0.0)\n sphere_2.Update()\n vtk_model_2 = sphere_2.GetOutput()\n\n sphere_3 = vtk.vtkSphereSource()\n sphere_3.SetRadius(radius_3)\n sphere_3.SetPhiResolution(12)\n sphere_3.SetThetaResolution(21)\n sphere_3.SetCenter(centre_3, 0.0, 0.0)\n sphere_3.Update()\n vtk_model_3 = sphere_3.GetOutput()\n \n assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_1))\n assert (pdu.check_overlapping_bounds( vtk_model_1, vtk_model_0))\n assert (not pdu.check_overlapping_bounds( vtk_model_0, vtk_model_2))\n assert (not pdu.check_overlapping_bounds( vtk_model_2, vtk_model_0))\n assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_3))\n assert (pdu.check_overlapping_bounds( vtk_model_3, vtk_model_0))\n\ndef test_dice_overlap():\n\n radius_0=10.0\n radius_1=7.0\n centre_1=5.0\n sphere_0 = vtk.vtkSphereSource()\n sphere_0.SetRadius(radius_0)\n sphere_0.SetPhiResolution(60)\n sphere_0.SetThetaResolution(60)\n sphere_0.SetCenter(0.0, 0.0, 0.0)\n sphere_0.Update()\n vtk_model_0 = sphere_0.GetOutput()\n\n sphere_1 = vtk.vtkSphereSource()\n sphere_1.SetRadius(radius_1)\n sphere_1.SetPhiResolution(60)\n sphere_1.SetThetaResolution(60)\n sphere_1.SetCenter(centre_1, 0.0, 0.0)\n sphere_1.Update()\n vtk_model_1 = sphere_1.GetOutput()\n\n dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1)\n\n np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2)\n np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2)\n\n #from http://mathworld.wolfram.com/Sphere-SphereIntersection.html\n cap_height_0 = ( radius_1 - radius_0 + centre_1) * ( radius_1 + radius_0 - centre_1) / (2 * centre_1)\n cap_height_1 = ( radius_0 - radius_1 + centre_1) * ( radius_0 + radius_1 - centre_1) / (2 * centre_1)\n cap_vol_0 = np.pi * cap_height_0**2 * ( 3 * radius_0 - cap_height_0) / 3\n cap_vol_1 = np.pi * cap_height_1**2 * ( 3 * radius_1 - cap_height_1) / 3\n\n analytic = cap_vol_0 + cap_vol_1\n np.testing.assert_approx_equal(volume_01, analytic, significant=2)\n\n np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)\n\ndef test_dice_no_overlap():\n\n radius_0=5.5\n radius_1=4.3\n centre_1=12.0\n sphere_0 = vtk.vtkSphereSource()\n sphere_0.SetRadius(radius_0)\n sphere_0.SetPhiResolution(60)\n sphere_0.SetThetaResolution(60)\n sphere_0.SetCenter(0.0, 0.0, 0.0)\n sphere_0.Update()\n vtk_model_0 = sphere_0.GetOutput()\n\n sphere_1 = vtk.vtkSphereSource()\n sphere_1.SetRadius(radius_1)\n sphere_1.SetPhiResolution(60)\n sphere_1.SetThetaResolution(60)\n sphere_1.SetCenter(centre_1, 0.0, 0.0)\n sphere_1.Update()\n vtk_model_1 = sphere_1.GetOutput()\n\n dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1)\n\n np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2)\n np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2)\n\n analytic = 0.0\n np.testing.assert_approx_equal(volume_01, analytic, significant=2)\n\n np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)\n"
] | [
[
"numpy.testing.assert_approx_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YuHsin1998/EllSeg | [
"ff56b255f8e650856aec9af23792e105897eba5c",
"ff56b255f8e650856aec9af23792e105897eba5c",
"ff56b255f8e650856aec9af23792e105897eba5c"
] | [
"dataset_generation/ExtractSantini.py",
"extern/locating-objects-without-bboxes/object-locator/models/utils.py",
"dataset_generation/ExtractOpenEDS_seg.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 16:16:57 2019\n\n@author: rakshit\n\"\"\"\nimport os\nimport cv2\nimport argparse\nimport matplotlib\nimport numpy as np\nimport deepdish as dd\nimport scipy.io as scio\n\nprint('Extracting Santini')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int)\nparser.add_argument('--path2ds', help='Path to dataset', type=str)\nargs = parser.parse_args()\nif args.noDisp:\n noDisp = True\n print('No graphics')\nelse:\n noDisp = False\n print('Showing figures')\n\ngui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg']\nfor gui in gui_env:\n try:\n print(\"testing: {}\".format(gui))\n matplotlib.use(gui,warn=False, force=True)\n from matplotlib import pyplot as plt\n break\n except:\n continue\n\nprint(\"Using: {}\".format(matplotlib.get_backend()))\nplt.ion()\n\nargs.path2ds = '/media/rakshit/tank/Dataset'\nPATH_DIR = os.path.join(args.path2ds, 'Santini')\nPATH_DS = os.path.join(args.path2ds, 'All')\nPATH_MASTER = os.path.join(args.path2ds, 'MasterKey')\nlist_ds = ['1', '2', '3', '4', '5', '6']\n\nsc = (640.0/384.0)\nImage_counter = 0.0\nds_num = 24\n\ndef mypause(interval):\n backend = plt.rcParams['backend']\n if backend in matplotlib.rcsetup.interactive_bk:\n figManager = matplotlib._pylab_helpers.Gcf.get_active()\n if figManager is not None:\n canvas = figManager.canvas\n if canvas.figure.stale:\n canvas.draw()\n canvas.start_event_loop(interval)\n return\n\ndef fix_pupil_loc(p, res):\n # res: [H, W]\n p[0] = 0.5*p[0]\n p[1] = res[0] - 0.5*p[1]\n return p\n\ndef readFormattedText(path2file, ignoreLines):\n data = []\n count = 0\n f = open(path2file, 'r')\n for line in f:\n d = [int(d) for d in line.split() if d.isdigit()]\n count = count + 1\n if d and count > ignoreLines:\n data.append(d)\n f.close()\n return data\n\nfor name in list_ds:\n # Ignore the first row and column.\n # Columns: [index, p_x, p_y]\n opts = os.listdir(os.path.join(PATH_DIR, name))\n for subdir in opts:\n PATH_DATA = os.path.join(PATH_DIR, name, subdir)\n\n # Read pupil data\n Path2text = os.path.join(PATH_DATA, 'journal-{:04d}.txt'.format(int(subdir)-1))\n Path2vid = os.path.join(PATH_DATA, 'eye-{:04d}-0000.avi'.format(int(subdir)-1))\n PupilData = np.array(readFormattedText(Path2text, 2))\n VidObj = cv2.VideoCapture(Path2vid)\n\n keydict = {k:[] for k in ['pupil_loc', 'archive', 'data_type', 'resolution', 'dataset', 'subset']}\n\n # Generate empty dictionaries\n keydict['data_type'] = 0 # Only pupil center available\n keydict['resolution'] = []\n keydict['dataset'] = 'Santini'\n keydict['subset'] = '{}-{}'.format(name, subdir)\n\n # Create an empty dictionary as per agreed structure\n Data = {k:[] for k in ['Images', 'Info', 'Masks', 'Masks_noSkin', 'Fits', 'pupil_loc']}\n Data['Fits'] = {k:[] for k in ['pupil', 'pupil_norm', 'pupil_phi', 'iris', 'iris_norm', 'iris_phi']}\n\n if not noDisp:\n fig, plts = plt.subplots(1,1)\n fr_num = 0\n while(VidObj.isOpened()):\n ret, I = VidObj.read()\n if ret == True:\n\n I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n I = cv2.resize(I, (640, 480), cv2.INTER_LANCZOS4)\n\n Data['Images'].append(I)\n keydict['resolution'].append(I.shape)\n keydict['archive'].append(ds_num)\n\n pupil_loc = fix_pupil_loc(PupilData[fr_num, 10:12]*sc, I.shape)\n\n keydict['pupil_loc'].append(pupil_loc)\n Data['pupil_loc'].append(pupil_loc)\n Data['Info'].append(str(fr_num))\n fr_num+=1\n Image_counter+=1\n if not noDisp:\n if fr_num == 1:\n cI = plts.imshow(I)\n cX = plts.scatter(pupil_loc[0], pupil_loc[1])\n plt.show()\n plt.pause(.01)\n else:\n newLoc = np.array([pupil_loc[0], pupil_loc[1]])\n cI.set_data(I)\n cX.set_offsets(newLoc)\n mypause(0.01)\n else: # No more frames to load\n break\n\n Data['Images'] = np.stack(Data['Images'], axis=0)\n Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0)\n keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0)\n keydict['resolution'] = np.stack(keydict['resolution'], axis=0)\n keydict['archive'] = np.stack(keydict['archive'], axis=0)\n\n # Save out data\n dd.io.save(os.path.join(PATH_DS, str(ds_num)+'.h5'), Data)\n scio.savemat(os.path.join(PATH_MASTER, str(ds_num)), keydict, appendmat=True)\n ds_num=ds_num+1",
"__copyright__ = \\\n\"\"\"\nCopyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.\nAll rights reserved.\n\nThis software is covered by US patents and copyright.\nThis source code is to be used for academic research purposes only, and no commercial use is allowed.\n\nFor any questions, please contact Edward J. Delp ([email protected]) at Purdue University.\n\nLast Modified: 10/02/2019 \n\"\"\"\n__license__ = \"CC BY-NC-SA 4.0\"\n__authors__ = \"Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp\"\n__version__ = \"1.6.0\"\n\n\nimport h5py\nimport torch\nimport shutil\n\ndef save_net(fname, net):\n with h5py.File(fname, 'w') as h5f:\n for k, v in net.state_dict().items():\n h5f.create_dataset(k, data=v.cpu().numpy())\ndef load_net(fname, net):\n with h5py.File(fname, 'r') as h5f:\n for k, v in net.state_dict().items(): \n param = torch.from_numpy(np.asarray(h5f[k])) \n v.copy_(param)\n \ndef save_checkpoint(state, is_best,task_id, filename='checkpoint.pth.tar'):\n torch.save(state, task_id+filename)\n if is_best:\n shutil.copyfile(task_id+filename, task_id+'model_best.pth.tar') \n\n\n\"\"\"\nCopyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.\nAll rights reserved.\n\nThis software is covered by US patents and copyright.\nThis source code is to be used for academic research purposes only, and no commercial use is allowed.\n\nFor any questions, please contact Edward J. Delp ([email protected]) at Purdue University.\n\nLast Modified: 10/02/2019 \n\"\"\"\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 21 15:21:36 2019\n\n@author: rakshit\n\"\"\"\n# Confirmed code works perfectly. Do not display.\nimport os\nimport cv2\nimport sys\nimport json\nimport argparse\nimport matplotlib\nimport numpy as np\nimport deepdish as dd\nimport scipy.io as scio\nfrom matplotlib.patches import Ellipse\nfrom skimage.draw import ellipse as drawEllipse\n\nsys.path.append('..')\n\nfrom helperfunctions import ransac, ElliFit, my_ellipse\nfrom helperfunctions import generateEmptyStorage, getValidPoints\n\ndef mypause(interval):\n backend = plt.rcParams['backend']\n if backend in matplotlib.rcsetup.interactive_bk:\n figManager = matplotlib._pylab_helpers.Gcf.get_active()\n if figManager is not None:\n canvas = figManager.canvas\n if canvas.figure.stale:\n canvas.draw()\n canvas.start_event_loop(interval)\n return\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int, default=1)\nparser.add_argument('--path2ds',\n help='Path to dataset',\n type=str,\n default='/media/rakshit/Monster/Datasets')\n\nargs = parser.parse_args()\nif args.noDisp:\n noDisp = True\n print('No graphics')\nelse:\n noDisp = False\n print('Showing figures')\n\ngui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg']\nfor gui in gui_env:\n try:\n print(\"testing: {}\".format(gui))\n matplotlib.use(gui,warn=False, force=True)\n from matplotlib import pyplot as plt\n break\n except:\n continue\n\nprint(\"Using: {}\".format(matplotlib.get_backend()))\nplt.ion()\n\nds_num = 0\nPATH_OPENEDS = os.path.join(args.path2ds, 'OpenEDS')\nPATH_DIR = os.path.join(args.path2ds, 'OpenEDS', 'Semantic_Segmentation_Dataset')\nPATH_DS = os.path.join(args.path2ds, 'All')\nPATH_MASTER = os.path.join(args.path2ds, 'MasterKey')\n\nprint('Extracting OpenEDS')\n\n# Don't append the test set.\nlistDir = os.listdir(PATH_DIR)\nlistDir.remove('test')\nfor dirCond in listDir:\n ds_name = 'OpenEDS_{}_{}'.format(dirCond, ds_num)\n\n print('Opening the {} folder'.format(dirCond))\n\n # Read UID\n path2json = os.path.join(PATH_OPENEDS, 'OpenEDS_{}_userID_mapping_to_images.json'.format(dirCond))\n im2UID = json.load(open(path2json, 'r'))\n\n PATH_IMAGES = os.path.join(PATH_DIR, dirCond, 'images')\n PATH_LABELS = os.path.join(PATH_DIR, dirCond, 'labels')\n PATH_FITS = os.path.join(PATH_DIR, dirCond, 'fits')\n listIm = os.listdir(PATH_IMAGES)\n\n Data, keydict = generateEmptyStorage(name='OpenEDS', subset=dirCond)\n\n i = 0\n if not noDisp:\n fig, plts = plt.subplots(1,1)\n\n for pData in im2UID:\n # Image number and UID for each person\n listIm = pData['semantic_segmenation_images']\n pid = int(pData['id'].replace('U', '')) - 111\n for imName_full in listIm:\n imName, _ = os.path.splitext(imName_full)\n\n # Do not save images without a proper ellipse and iris fit\n # Load image, label map and fits\n I = cv2.imread(os.path.join(PATH_IMAGES, imName_full), 0)\n LabelMat = np.load(os.path.join(PATH_LABELS, imName+'.npy'))\n\n #%% Make sure images are 640x480\n r = np.where(LabelMat)[0]\n c = int(0.5*(np.max(r) + np.min(r)))\n top, bot = (0, c+150-(c-150)) if c-150<0 else (c-150, c+150)\n\n I = I[top:bot, :]\n LabelMat = LabelMat[top:bot, :]\n I = cv2.resize(I, (640, 480), interpolation=cv2.INTER_LANCZOS4)\n LabelMat = cv2.resize(LabelMat, (640, 480), interpolation=cv2.INTER_NEAREST)\n #%%\n\n pupilPts, irisPts = getValidPoints(LabelMat)\n if np.sum(LabelMat == 3) > 150 and type(pupilPts) is not list:\n model_pupil = ransac(pupilPts, ElliFit, 15, 40, 5e-3, 15).loop()\n pupil_fit_error = my_ellipse(model_pupil.model).verify(pupilPts)\n else:\n print('Not enough pupil points')\n model_pupil = type('model', (object, ), {})\n model_pupil.model = np.array([-1, -1, -1, -1, -1])\n pupil_fit_error = np.inf\n\n if np.sum(LabelMat == 2) > 200 and type(irisPts) is not list:\n model_iris = ransac(irisPts, ElliFit, 15, 40, 5e-3, 15).loop()\n iris_fit_error = my_ellipse(model_iris.model).verify(irisPts)\n else:\n print('Not enough iris points')\n model_iris = type('model', (object, ), {})\n model_iris.model = np.array([-1, -1, -1, -1, -1])\n model_iris.Phi = np.array([-1, -1, -1, -1, -1])\n iris_fit_error = np.inf\n\n if pupil_fit_error >= 0.1:\n print('Not recording pupil. Unacceptable fit.')\n print('Pupil fit error: {}'.format(pupil_fit_error))\n model_pupil.model = np.array([-1, -1, -1, -1, -1])\n\n if iris_fit_error >= 0.1:\n print('Not recording iris. Unacceptable fit.')\n print('Iris fit error: {}'.format(iris_fit_error))\n model_iris.model = np.array([-1, -1, -1, -1, -1])\n\n pupil_loc = model_pupil.model[:2]\n\n # Draw mask no skin\n rr, cc = drawEllipse(pupil_loc[1],\n pupil_loc[0],\n model_pupil.model[3],\n model_pupil.model[2],\n rotation=-model_pupil.model[-1])\n pupMask = np.zeros_like(I)\n pupMask[rr.clip(0, I.shape[0]-1), cc.clip(0, I.shape[1]-1)] = 1\n rr, cc = drawEllipse(model_iris.model[1],\n model_iris.model[0],\n model_iris.model[3],\n model_iris.model[2],\n rotation=-model_iris.model[-1])\n iriMask = np.zeros_like(I)\n iriMask[rr.clip(0, I.shape[0]-1), cc.clip(0, I.shape[1]-1)] = 1\n\n if (np.any(pupMask) and np.any(iriMask)) and ((pupil_fit_error<0.1) and (iris_fit_error<0.1)):\n mask_woSkin = 2*iriMask + pupMask # Iris = 2, Pupil = 3\n else:\n # Neither fit exists, mask should be -1s.\n print('Found bad mask: {}'.format(imName))\n mask_woSkin = -np.ones(I.shape)\n continue\n\n # Add model information\n keydict['archive'].append(ds_name)\n keydict['resolution'].append(I.shape)\n keydict['pupil_loc'].append(pupil_loc)\n\n # Append images and label map\n Data['Images'].append(I)\n Data['Info'].append(imName_full) # Train or valid\n Data['Masks'].append(LabelMat)\n Data['Masks_noSkin'].append(mask_woSkin)\n Data['pupil_loc'].append(pupil_loc)\n\n # Append fits\n Data['Fits']['pupil'].append(model_pupil.model)\n Data['Fits']['iris'].append(model_iris.model)\n\n keydict['Fits']['pupil'].append(model_pupil.model)\n keydict['Fits']['iris'].append(model_iris.model)\n\n if not noDisp:\n if i == 0:\n cE = Ellipse(tuple(pupil_loc),\n 2*model_pupil.model[2],\n 2*model_pupil.model[3],\n angle=np.rad2deg(model_pupil.model[4]))\n cL = Ellipse(tuple(model_iris.model[0:2]),\n 2*model_iris.model[2],\n 2*model_iris.model[3],\n np.rad2deg(model_iris.model[4]))\n cE.set_facecolor('None')\n cE.set_edgecolor((1.0, 0.0, 0.0))\n cL.set_facecolor('None')\n cL.set_edgecolor((0.0, 1.0, 0.0))\n cI = plts.imshow(I)\n cM = plts.imshow(mask_woSkin, alpha=0.5)\n plts.add_patch(cE)\n plts.add_patch(cL)\n plt.show()\n plt.pause(.01)\n else:\n cE.center = tuple(pupil_loc)\n cE.angle = np.rad2deg(model_pupil.model[4])\n cE.width = 2*model_pupil.model[2]\n cE.height = 2*model_pupil.model[3]\n cL.center = tuple(model_iris.model[0:2])\n cL.width = 2*model_iris.model[2]\n cL.height = 2*model_iris.model[3]\n cL.angle = np.rad2deg(model_iris.model[-1])\n cI.set_data(I)\n cM.set_data(mask_woSkin)\n mypause(0.01)\n i = i + 1\n print('{} images: {}'.format(dirCond, i))\n\n # Stack data\n Data['Images'] = np.stack(Data['Images'], axis=0)\n Data['Masks'] = np.stack(Data['Masks'], axis=0)\n Data['Masks_noSkin'] = np.stack(Data['Masks_noSkin'], axis=0)\n Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0)\n Data['Fits']['pupil'] = np.stack(Data['Fits']['pupil'], axis=0)\n Data['Fits']['iris'] = np.stack(Data['Fits']['iris'], axis=0)\n\n keydict['resolution'] = np.stack(keydict['resolution'], axis=0)\n keydict['archive'] = np.stack(keydict['archive'], axis=0)\n keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0)\n\n # Save data\n dd.io.save(os.path.join(PATH_DS, ds_name+'.h5'), Data)\n scio.savemat(os.path.join(PATH_MASTER, str(ds_name)+'.mat'), keydict, appendmat=True)\n ds_num=ds_num+1"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.stack",
"matplotlib.get_backend",
"matplotlib._pylab_helpers.Gcf.get_active",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show"
],
[
"torch.save"
],
[
"matplotlib.pyplot.pause",
"numpy.min",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.stack",
"numpy.ones",
"matplotlib.get_backend",
"numpy.rad2deg",
"numpy.max",
"numpy.zeros_like",
"numpy.any",
"matplotlib._pylab_helpers.Gcf.get_active",
"numpy.array",
"matplotlib.pyplot.ion",
"numpy.where",
"numpy.sum",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anaheino/Ufo-sightings-map | [
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc",
"64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc"
] | [
"app/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py",
"app/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.py",
"app/venv/lib/python2.7/site-packages/pandas/io/tests/test_parsers.py",
"app/venv/lib/python2.7/site-packages/pandas/tests/test_series.py",
"app/venv/lib/python2.7/site-packages/folium/folium.py",
"app/venv/lib/python2.7/site-packages/pandas/io/tests/test_common.py",
"app/venv/lib/python2.7/site-packages/pandas/core/frame.py",
"app/venv/lib/python2.7/site-packages/pandas/core/panel.py"
] | [
"\"\"\"\nThe arraypad module contains a group of functions to pad values onto the edges\nof an n-dimensional array.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n\n__all__ = ['pad']\n\n\n###############################################################################\n# Private utility functions.\n\n\ndef _arange_ndarray(arr, shape, axis, reverse=False):\n \"\"\"\n Create an ndarray of `shape` with increments along specified `axis`\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n shape : tuple of ints\n Shape of desired array. Should be equivalent to `arr.shape` except\n `shape[axis]` which may have any positive value.\n axis : int\n Axis to increment along.\n reverse : bool\n If False, increment in a positive fashion from 1 to `shape[axis]`,\n inclusive. If True, the bounds are the same but the order reversed.\n\n Returns\n -------\n padarr : ndarray\n Output array sized to pad `arr` along `axis`, with linear range from\n 1 to `shape[axis]` along specified `axis`.\n\n Notes\n -----\n The range is deliberately 1-indexed for this specific use case. Think of\n this algorithm as broadcasting `np.arange` to a single `axis` of an\n arbitrarily shaped ndarray.\n\n \"\"\"\n initshape = tuple(1 if i != axis else shape[axis]\n for (i, x) in enumerate(arr.shape))\n if not reverse:\n padarr = np.arange(1, shape[axis] + 1)\n else:\n padarr = np.arange(shape[axis], 0, -1)\n padarr = padarr.reshape(initshape)\n for i, dim in enumerate(shape):\n if padarr.shape[i] != dim:\n padarr = padarr.repeat(dim, axis=i)\n return padarr\n\n\ndef _round_ifneeded(arr, dtype):\n \"\"\"\n Rounds arr inplace if destination dtype is integer.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dtype : dtype\n The dtype of the destination array.\n\n \"\"\"\n if np.issubdtype(dtype, np.integer):\n arr.round(out=arr)\n\n\ndef _prepend_const(arr, pad_amt, val, axis=-1):\n \"\"\"\n Prepend constant `val` along `axis` of `arr`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n val : scalar\n Constant value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` constant `val` prepended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n if val == 0:\n return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),\n axis=axis)\n else:\n return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),\n arr), axis=axis)\n\n\ndef _append_const(arr, pad_amt, val, axis=-1):\n \"\"\"\n Append constant `val` along `axis` of `arr`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n val : scalar\n Constant value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` constant `val` appended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n if val == 0:\n return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),\n axis=axis)\n else:\n return np.concatenate(\n (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_edge(arr, pad_amt, axis=-1):\n \"\"\"\n Prepend `pad_amt` to `arr` along `axis` by extending edge values.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, extended by `pad_amt` edge values appended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n edge_slice = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n edge_arr = arr[edge_slice].reshape(pad_singleton)\n return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),\n axis=axis)\n\n\ndef _append_edge(arr, pad_amt, axis=-1):\n \"\"\"\n Append `pad_amt` to `arr` along `axis` by extending edge values.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, extended by `pad_amt` edge values prepended along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n edge_arr = arr[edge_slice].reshape(pad_singleton)\n return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),\n axis=axis)\n\n\ndef _prepend_ramp(arr, pad_amt, end, axis=-1):\n \"\"\"\n Prepend linear ramp along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n end : scalar\n Constal value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region ramps linearly from the edge value to `end`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Generate shape for final concatenated array\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n\n # Generate an n-dimensional array incrementing along `axis`\n ramp_arr = _arange_ndarray(arr, padshape, axis,\n reverse=True).astype(np.float64)\n\n # Appropriate slicing to extract n-dimensional edge along `axis`\n edge_slice = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract edge, reshape to original rank, and extend along `axis`\n edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)\n\n # Linear ramp\n slope = (end - edge_pad) / float(pad_amt)\n ramp_arr = ramp_arr * slope\n ramp_arr += edge_pad\n _round_ifneeded(ramp_arr, arr.dtype)\n\n # Ramp values will most likely be float, cast them to the same type as arr\n return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)\n\n\ndef _append_ramp(arr, pad_amt, end, axis=-1):\n \"\"\"\n Append linear ramp along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n end : scalar\n Constal value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region ramps linearly from the edge value to `end`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Generate shape for final concatenated array\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n\n # Generate an n-dimensional array incrementing along `axis`\n ramp_arr = _arange_ndarray(arr, padshape, axis,\n reverse=False).astype(np.float64)\n\n # Slice a chunk from the edge to calculate stats on\n edge_slice = tuple(slice(None) if i != axis else -1\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract edge, reshape to original rank, and extend along `axis`\n edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)\n\n # Linear ramp\n slope = (end - edge_pad) / float(pad_amt)\n ramp_arr = ramp_arr * slope\n ramp_arr += edge_pad\n _round_ifneeded(ramp_arr, arr.dtype)\n\n # Ramp values will most likely be float, cast them to the same type as arr\n return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_max(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` maximum values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate maximum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n prepended region is the maximum of the first `num` values along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n max_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate max, reshape to add singleton dimension back\n max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),\n axis=axis)\n\n\ndef _append_max(arr, pad_amt, num, axis=-1):\n \"\"\"\n Pad one `axis` of `arr` with the maximum of the last `num` elements.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate maximum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the maximum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n max_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n max_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate max, reshape to add singleton dimension back\n max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),\n axis=axis)\n\n\ndef _prepend_mean(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` mean values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate mean.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the mean of the first `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n mean_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate mean, reshape to add singleton dimension back\n mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)\n _round_ifneeded(mean_chunk, arr.dtype)\n\n # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),\n arr), axis=axis)\n\n\ndef _append_mean(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` mean values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate mean.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the maximum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n mean_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n mean_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate mean, reshape to add singleton dimension back\n mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)\n _round_ifneeded(mean_chunk, arr.dtype)\n\n # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate(\n (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_med(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate median.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the median of the first `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n med_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate median, reshape to add singleton dimension back\n med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)\n _round_ifneeded(med_chunk, arr.dtype)\n\n # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate(\n (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)\n\n\ndef _append_med(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate median.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the median of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n med_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n med_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate median, reshape to add singleton dimension back\n med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)\n _round_ifneeded(med_chunk, arr.dtype)\n\n # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate(\n (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_min(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` minimum values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate minimum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the minimum of the first `num` values along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n min_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate min, reshape to add singleton dimension back\n min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),\n axis=axis)\n\n\ndef _append_min(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate minimum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the minimum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n min_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n min_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate min, reshape to add singleton dimension back\n min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),\n axis=axis)\n\n\ndef _pad_ref(arr, pad_amt, method, axis=-1):\n \"\"\"\n Pad `axis` of `arr` by reflection.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n method : str\n Controls method of reflection; options are 'even' or 'odd'.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded with reflected\n values from the original array.\n\n Notes\n -----\n This algorithm does not pad with repetition, i.e. the edges are not\n repeated in the reflection. For that behavior, use `mode='symmetric'`.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)\n for (i, x) in enumerate(arr.shape))\n\n ref_chunk1 = arr[ref_slice]\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n if pad_amt[0] == 1:\n ref_chunk1 = ref_chunk1.reshape(pad_singleton)\n\n # Memory/computationally more expensive, only do this if `method='odd'`\n if 'odd' in method and pad_amt[0] > 0:\n edge_slice1 = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice1].reshape(pad_singleton)\n ref_chunk1 = 2 * edge_chunk - ref_chunk1\n del edge_chunk\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n start = arr.shape[axis] - pad_amt[1] - 1\n end = arr.shape[axis] - 1\n ref_slice = tuple(slice(None) if i != axis else slice(start, end)\n for (i, x) in enumerate(arr.shape))\n rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)\n for (i, x) in enumerate(arr.shape))\n ref_chunk2 = arr[ref_slice][rev_idx]\n\n if pad_amt[1] == 1:\n ref_chunk2 = ref_chunk2.reshape(pad_singleton)\n\n if 'odd' in method:\n edge_slice2 = tuple(slice(None) if i != axis else -1\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice2].reshape(pad_singleton)\n ref_chunk2 = 2 * edge_chunk - ref_chunk2\n del edge_chunk\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)\n\n\ndef _pad_sym(arr, pad_amt, method, axis=-1):\n \"\"\"\n Pad `axis` of `arr` by symmetry.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n method : str\n Controls method of symmetry; options are 'even' or 'odd'.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded with symmetric\n values from the original array.\n\n Notes\n -----\n This algorithm DOES pad with repetition, i.e. the edges are repeated.\n For padding without repeated edges, use `mode='reflect'`.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])\n for (i, x) in enumerate(arr.shape))\n rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)\n for (i, x) in enumerate(arr.shape))\n sym_chunk1 = arr[sym_slice][rev_idx]\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n if pad_amt[0] == 1:\n sym_chunk1 = sym_chunk1.reshape(pad_singleton)\n\n # Memory/computationally more expensive, only do this if `method='odd'`\n if 'odd' in method and pad_amt[0] > 0:\n edge_slice1 = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice1].reshape(pad_singleton)\n sym_chunk1 = 2 * edge_chunk - sym_chunk1\n del edge_chunk\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n start = arr.shape[axis] - pad_amt[1]\n end = arr.shape[axis]\n sym_slice = tuple(slice(None) if i != axis else slice(start, end)\n for (i, x) in enumerate(arr.shape))\n sym_chunk2 = arr[sym_slice][rev_idx]\n\n if pad_amt[1] == 1:\n sym_chunk2 = sym_chunk2.reshape(pad_singleton)\n\n if 'odd' in method:\n edge_slice2 = tuple(slice(None) if i != axis else -1\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice2].reshape(pad_singleton)\n sym_chunk2 = 2 * edge_chunk - sym_chunk2\n del edge_chunk\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)\n\n\ndef _pad_wrap(arr, pad_amt, axis=-1):\n \"\"\"\n Pad `axis` of `arr` via wrapping.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded wrapped values\n from the opposite end of `axis`.\n\n Notes\n -----\n This method of padding is also known as 'tile' or 'tiling'.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n start = arr.shape[axis] - pad_amt[0]\n end = arr.shape[axis]\n wrap_slice = tuple(slice(None) if i != axis else slice(start, end)\n for (i, x) in enumerate(arr.shape))\n wrap_chunk1 = arr[wrap_slice]\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n if pad_amt[0] == 1:\n wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])\n for (i, x) in enumerate(arr.shape))\n wrap_chunk2 = arr[wrap_slice]\n\n if pad_amt[1] == 1:\n wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)\n\n\ndef _normalize_shape(ndarray, shape, cast_to_int=True):\n \"\"\"\n Private function which does some checks and normalizes the possibly\n much simpler representations of 'pad_width', 'stat_length',\n 'constant_values', 'end_values'.\n\n Parameters\n ----------\n narray : ndarray\n Input ndarray\n shape : {sequence, array_like, float, int}, optional\n The width of padding (pad_width), the number of elements on the\n edge of the narray used for statistics (stat_length), the constant\n value(s) to use when filling padded regions (constant_values), or the\n endpoint target(s) for linear ramps (end_values).\n ((before_1, after_1), ... (before_N, after_N)) unique number of\n elements for each axis where `N` is rank of `narray`.\n ((before, after),) yields same before and after constants for each\n axis.\n (constant,) or val is a shortcut for before = after = constant for\n all axes.\n cast_to_int : bool, optional\n Controls if values in ``shape`` will be rounded and cast to int\n before being returned.\n\n Returns\n -------\n normalized_shape : tuple of tuples\n val => ((val, val), (val, val), ...)\n [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)\n ((val1, val2), (val3, val4), ...) => no change\n [[val1, val2], ] => ((val1, val2), (val1, val2), ...)\n ((val1, val2), ) => ((val1, val2), (val1, val2), ...)\n [[val , ], ] => ((val, val), (val, val), ...)\n ((val , ), ) => ((val, val), (val, val), ...)\n\n \"\"\"\n ndims = ndarray.ndim\n\n # Shortcut shape=None\n if shape is None:\n return ((None, None), ) * ndims\n\n # Convert any input `info` to a NumPy array\n arr = np.asarray(shape)\n\n # Switch based on what input looks like\n if arr.ndim <= 1:\n if arr.shape == () or arr.shape == (1,):\n # Single scalar input\n # Create new array of ones, multiply by the scalar\n arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr\n elif arr.shape == (2,):\n # Apply padding (before, after) each axis\n # Create new axis 0, repeat along it for every axis\n arr = arr[np.newaxis, :].repeat(ndims, axis=0)\n else:\n fmt = \"Unable to create correctly shaped tuple from %s\"\n raise ValueError(fmt % (shape,))\n\n elif arr.ndim == 2:\n if arr.shape[1] == 1 and arr.shape[0] == ndims:\n # Padded before and after by the same amount\n arr = arr.repeat(2, axis=1)\n elif arr.shape[0] == ndims:\n # Input correctly formatted, pass it on as `arr`\n arr = shape\n else:\n fmt = \"Unable to create correctly shaped tuple from %s\"\n raise ValueError(fmt % (shape,))\n\n else:\n fmt = \"Unable to create correctly shaped tuple from %s\"\n raise ValueError(fmt % (shape,))\n\n # Cast if necessary\n if cast_to_int is True:\n arr = np.round(arr).astype(int)\n\n # Convert list of lists to tuple of tuples\n return tuple(tuple(axis) for axis in arr.tolist())\n\n\ndef _validate_lengths(narray, number_elements):\n \"\"\"\n Private function which does some checks and reformats pad_width and\n stat_length using _normalize_shape.\n\n Parameters\n ----------\n narray : ndarray\n Input ndarray\n number_elements : {sequence, int}, optional\n The width of padding (pad_width) or the number of elements on the edge\n of the narray used for statistics (stat_length).\n ((before_1, after_1), ... (before_N, after_N)) unique number of\n elements for each axis.\n ((before, after),) yields same before and after constants for each\n axis.\n (constant,) or int is a shortcut for before = after = constant for all\n axes.\n\n Returns\n -------\n _validate_lengths : tuple of tuples\n int => ((int, int), (int, int), ...)\n [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)\n ((int1, int2), (int3, int4), ...) => no change\n [[int1, int2], ] => ((int1, int2), (int1, int2), ...)\n ((int1, int2), ) => ((int1, int2), (int1, int2), ...)\n [[int , ], ] => ((int, int), (int, int), ...)\n ((int , ), ) => ((int, int), (int, int), ...)\n\n \"\"\"\n normshp = _normalize_shape(narray, number_elements)\n for i in normshp:\n chk = [1 if x is None else x for x in i]\n chk = [1 if x >= 0 else -1 for x in chk]\n if (chk[0] < 0) or (chk[1] < 0):\n fmt = \"%s cannot contain negative values.\"\n raise ValueError(fmt % (number_elements,))\n return normshp\n\n\n###############################################################################\n# Public functions\n\n\ndef pad(array, pad_width, mode=None, **kwargs):\n \"\"\"\n Pads an array.\n\n Parameters\n ----------\n array : array_like of rank N\n Input array\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n ((before_1, after_1), ... (before_N, after_N)) unique pad widths\n for each axis.\n ((before, after),) yields same before and after pad for each axis.\n (pad,) or int is a shortcut for before = after = pad width for all\n axes.\n mode : str or function\n One of the following string values or a user supplied function.\n\n 'constant'\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n Pads with the linear ramp between end_value and the\n array edge value.\n 'maximum'\n Pads with the maximum value of all or part of the\n vector along each axis.\n 'mean'\n Pads with the mean value of all or part of the\n vector along each axis.\n 'median'\n Pads with the median value of all or part of the\n vector along each axis.\n 'minimum'\n Pads with the minimum value of all or part of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n Pads with the wrap of the vector along the axis.\n The first values are used to pad the end and the\n end values are used to pad the beginning.\n <function>\n Padding function, see Notes.\n stat_length : sequence or int, optional\n Used in 'maximum', 'mean', 'median', and 'minimum'. Number of\n values at edge of each axis used to calculate the statistic value.\n\n ((before_1, after_1), ... (before_N, after_N)) unique statistic\n lengths for each axis.\n\n ((before, after),) yields same before and after statistic lengths\n for each axis.\n\n (stat_length,) or int is a shortcut for before = after = statistic\n length for all axes.\n\n Default is ``None``, to use the entire axis.\n constant_values : sequence or int, optional\n Used in 'constant'. The values to set the padded values for each\n axis.\n\n ((before_1, after_1), ... (before_N, after_N)) unique pad constants\n for each axis.\n\n ((before, after),) yields same before and after constants for each\n axis.\n\n (constant,) or int is a shortcut for before = after = constant for\n all axes.\n\n Default is 0.\n end_values : sequence or int, optional\n Used in 'linear_ramp'. The values used for the ending value of the\n linear_ramp and that will form the edge of the padded array.\n\n ((before_1, after_1), ... (before_N, after_N)) unique end values\n for each axis.\n\n ((before, after),) yields same before and after end values for each\n axis.\n\n (constant,) or int is a shortcut for before = after = end value for\n all axes.\n\n Default is 0.\n reflect_type : {'even', 'odd'}, optional\n Used in 'reflect', and 'symmetric'. The 'even' style is the\n default with an unaltered reflection around the edge value. For\n the 'odd' style, the extented part of the array is created by\n subtracting the reflected values from two times the edge value.\n\n Returns\n -------\n pad : ndarray\n Padded array of rank equal to `array` with shape increased\n according to `pad_width`.\n\n Notes\n -----\n .. versionadded:: 1.7.0\n\n For an array with rank greater than 1, some of the padding of later\n axes is calculated from padding of previous axes. This is easiest to\n think about with a rank 2 array where the corners of the padded array\n are calculated by using padded values from the first axis.\n\n The padding function, if used, should return a rank 1 array equal in\n length to the vector argument with padded values replaced. It has the\n following signature::\n\n padding_func(vector, iaxis_pad_width, iaxis, **kwargs)\n\n where\n\n vector : ndarray\n A rank 1 array already padded with zeros. Padded values are\n vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].\n iaxis_pad_width : tuple\n A 2-tuple of ints, iaxis_pad_width[0] represents the number of\n values padded at the beginning of vector where\n iaxis_pad_width[1] represents the number of values padded at\n the end of vector.\n iaxis : int\n The axis currently being calculated.\n kwargs : misc\n Any keyword arguments the function requires.\n\n Examples\n --------\n >>> a = [1, 2, 3, 4, 5]\n >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6))\n array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])\n\n >>> np.lib.pad(a, (2, 3), 'edge')\n array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])\n\n >>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))\n array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])\n\n >>> np.lib.pad(a, (2,), 'maximum')\n array([5, 5, 1, 2, 3, 4, 5, 5, 5])\n\n >>> np.lib.pad(a, (2,), 'mean')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> np.lib.pad(a, (2,), 'median')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> a = [[1, 2], [3, 4]]\n >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum')\n array([[1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [3, 3, 3, 4, 3, 3, 3],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1]])\n\n >>> a = [1, 2, 3, 4, 5]\n >>> np.lib.pad(a, (2, 3), 'reflect')\n array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])\n\n >>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd')\n array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])\n\n >>> np.lib.pad(a, (2, 3), 'symmetric')\n array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])\n\n >>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd')\n array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])\n\n >>> np.lib.pad(a, (2, 3), 'wrap')\n array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])\n\n >>> def padwithtens(vector, pad_width, iaxis, kwargs):\n ... vector[:pad_width[0]] = 10\n ... vector[-pad_width[1]:] = 10\n ... return vector\n\n >>> a = np.arange(6)\n >>> a = a.reshape((2, 3))\n\n >>> np.lib.pad(a, 2, padwithtens)\n array([[10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 0, 1, 2, 10, 10],\n [10, 10, 3, 4, 5, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10]])\n \"\"\"\n if not np.asarray(pad_width).dtype.kind == 'i':\n raise TypeError('`pad_width` must be of integral type.')\n\n narray = np.array(array)\n pad_width = _validate_lengths(narray, pad_width)\n\n allowedkwargs = {\n 'constant': ['constant_values'],\n 'edge': [],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n 'wrap': [],\n }\n\n kwdefaults = {\n 'stat_length': None,\n 'constant_values': 0,\n 'end_values': 0,\n 'reflect_type': 'even',\n }\n\n if isinstance(mode, str):\n # Make sure have allowed kwargs appropriate for mode\n for key in kwargs:\n if key not in allowedkwargs[mode]:\n raise ValueError('%s keyword not in allowed keywords %s' %\n (key, allowedkwargs[mode]))\n\n # Set kwarg defaults\n for kw in allowedkwargs[mode]:\n kwargs.setdefault(kw, kwdefaults[kw])\n\n # Need to only normalize particular keywords.\n for i in kwargs:\n if i == 'stat_length':\n kwargs[i] = _validate_lengths(narray, kwargs[i])\n if i in ['end_values', 'constant_values']:\n kwargs[i] = _normalize_shape(narray, kwargs[i],\n cast_to_int=False)\n elif mode is None:\n raise ValueError('Keyword \"mode\" must be a function or one of %s.' %\n (list(allowedkwargs.keys()),))\n else:\n # Drop back to old, slower np.apply_along_axis mode for user-supplied\n # vector function\n function = mode\n\n # Create a new padded array\n rank = list(range(len(narray.shape)))\n total_dim_increase = [np.sum(pad_width[i]) for i in rank]\n offset_slices = [slice(pad_width[i][0],\n pad_width[i][0] + narray.shape[i])\n for i in rank]\n new_shape = np.array(narray.shape) + total_dim_increase\n newmat = np.zeros(new_shape, narray.dtype)\n\n # Insert the original array into the padded array\n newmat[offset_slices] = narray\n\n # This is the core of pad ...\n for iaxis in rank:\n np.apply_along_axis(function,\n iaxis,\n newmat,\n pad_width[iaxis],\n iaxis,\n kwargs)\n return newmat\n\n # If we get here, use new padding method\n newmat = narray.copy()\n\n # API preserved, but completely new algorithm which pads by building the\n # entire block to pad before/after `arr` with in one step, for each axis.\n if mode == 'constant':\n for axis, ((pad_before, pad_after), (before_val, after_val)) \\\n in enumerate(zip(pad_width, kwargs['constant_values'])):\n newmat = _prepend_const(newmat, pad_before, before_val, axis)\n newmat = _append_const(newmat, pad_after, after_val, axis)\n\n elif mode == 'edge':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n newmat = _prepend_edge(newmat, pad_before, axis)\n newmat = _append_edge(newmat, pad_after, axis)\n\n elif mode == 'linear_ramp':\n for axis, ((pad_before, pad_after), (before_val, after_val)) \\\n in enumerate(zip(pad_width, kwargs['end_values'])):\n newmat = _prepend_ramp(newmat, pad_before, before_val, axis)\n newmat = _append_ramp(newmat, pad_after, after_val, axis)\n\n elif mode == 'maximum':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_max(newmat, pad_before, chunk_before, axis)\n newmat = _append_max(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'mean':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)\n newmat = _append_mean(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'median':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_med(newmat, pad_before, chunk_before, axis)\n newmat = _append_med(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'minimum':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_min(newmat, pad_before, chunk_before, axis)\n newmat = _append_min(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'reflect':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n if ((pad_before > 0) or\n (pad_after > 0)) and newmat.shape[axis] == 1:\n # Extending singleton dimension for 'reflect' is legacy\n # behavior; it really should raise an error.\n newmat = _prepend_edge(newmat, pad_before, axis)\n newmat = _append_edge(newmat, pad_after, axis)\n continue\n\n method = kwargs['reflect_type']\n safe_pad = newmat.shape[axis] - 1\n while ((pad_before > safe_pad) or (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_ref(newmat, (pad_iter_b,\n pad_iter_a), method, axis)\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)\n\n elif mode == 'symmetric':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n method = kwargs['reflect_type']\n safe_pad = newmat.shape[axis]\n while ((pad_before > safe_pad) or\n (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_sym(newmat, (pad_iter_b,\n pad_iter_a), method, axis)\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)\n\n elif mode == 'wrap':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n safe_pad = newmat.shape[axis]\n while ((pad_before > safe_pad) or\n (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)\n\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)\n\n return newmat\n",
"\"\"\"\nSupport code for building Python extensions on Windows.\n\n # NT stuff\n # 1. Make sure libpython<version>.a exists for gcc. If not, build it.\n # 2. Force windows to use gcc (we're struggling with MSVC and g77 support)\n # 3. Force windows to use g77\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport re\n\n# Overwrite certain distutils.ccompiler functions:\nimport numpy.distutils.ccompiler\n\nif sys.version_info[0] < 3:\n from . import log\nelse:\n from numpy.distutils import log\n# NT stuff\n# 1. Make sure libpython<version>.a exists for gcc. If not, build it.\n# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)\n# --> this is done in numpy/distutils/ccompiler.py\n# 3. Force windows to use g77\n\nimport distutils.cygwinccompiler\nfrom distutils.version import StrictVersion\nfrom numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options\nfrom distutils.unixccompiler import UnixCCompiler\nfrom distutils.msvccompiler import get_build_version as get_build_msvc_version\nfrom distutils.errors import (DistutilsExecError, CompileError,\n UnknownFileError)\nfrom numpy.distutils.misc_util import (msvc_runtime_library,\n get_build_architecture)\n\n# Useful to generate table of symbols from a dll\n_START = re.compile(r'\\[Ordinal/Name Pointer\\] Table')\n_TABLE = re.compile(r'^\\s+\\[([\\s*[0-9]*)\\] ([a-zA-Z0-9_]*)')\n\n# the same as cygwin plus some additional parameters\nclass Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):\n \"\"\" A modified MingW32 compiler compatible with an MSVC built Python.\n\n \"\"\"\n\n compiler_type = 'mingw32'\n\n def __init__ (self,\n verbose=0,\n dry_run=0,\n force=0):\n\n distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,\n dry_run, force)\n\n # we need to support 3.2 which doesn't match the standard\n # get_versions methods regex\n if self.gcc_version is None:\n import re\n p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,\n stdout=subprocess.PIPE)\n out_string = p.stdout.read()\n p.stdout.close()\n result = re.search('(\\d+\\.\\d+)', out_string)\n if result:\n self.gcc_version = StrictVersion(result.group(1))\n\n # A real mingw32 doesn't need to specify a different entry point,\n # but cygwin 2.91.57 in no-cygwin-mode needs it.\n if self.gcc_version <= \"2.91.57\":\n entry_point = '--entry _DllMain@12'\n else:\n entry_point = ''\n\n if self.linker_dll == 'dllwrap':\n # Commented out '--driver-name g++' part that fixes weird\n # g++.exe: g++: No such file or directory\n # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).\n # If the --driver-name part is required for some environment\n # then make the inclusion of this part specific to that\n # environment.\n self.linker = 'dllwrap' # --driver-name g++'\n elif self.linker_dll == 'gcc':\n self.linker = 'g++'\n\n p = subprocess.Popen(['gcc', '--version'], shell=True,\n stdout=subprocess.PIPE)\n out_string = p.stdout.read()\n p.stdout.close()\n\n # Before build with MinGW-W64 generate the python import library\n # with gendef and dlltool according to the MingW-W64 FAQ.\n # Use the MinGW-W64 provided msvc runtime import libraries.\n # Don't call build_import_library() and build_msvcr_library.\n\n if 'MinGW-W64' not in str(out_string):\n\n # **changes: eric jones 4/11/01\n # 1. Check for import library on Windows. Build if it doesn't\n # exist.\n build_import_library()\n\n # Check for custom msvc runtime library on Windows. Build if it\n # doesn't exist.\n msvcr_success = build_msvcr_library()\n msvcr_dbg_success = build_msvcr_library(debug=True)\n if msvcr_success or msvcr_dbg_success:\n # add preprocessor statement for using customized msvcr lib\n self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')\n\n # Define the MSVC version as hint for MinGW\n msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr'))\n self.define_macro('__MSVCRT_VERSION__', msvcr_version)\n\n # MS_WIN64 should be defined when building for amd64 on windows,\n # but python headers define it only for MS compilers, which has all\n # kind of bad consequences, like using Py_ModuleInit4 instead of\n # Py_ModuleInit4_64, etc... So we add it here\n if get_build_architecture() == 'AMD64':\n if self.gcc_version < \"4.0\":\n self.set_executables(\n compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',\n compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'\n ' -Wall -Wstrict-prototypes',\n linker_exe='gcc -g -mno-cygwin',\n linker_so='gcc -g -mno-cygwin -shared')\n else:\n # gcc-4 series releases do not support -mno-cygwin option\n self.set_executables(\n compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64'\n ' -O2 -msse2 -Wall',\n compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64'\n ' -O2 -msse2 -Wall -Wstrict-prototypes',\n linker_exe='gcc',\n linker_so='gcc -shared -Wl,-gc-sections -Wl,-s')\n else:\n if self.gcc_version <= \"3.0.0\":\n self.set_executables(\n compiler='gcc -mno-cygwin -O2 -w',\n compiler_so='gcc -mno-cygwin -mdll -O2 -w'\n ' -Wstrict-prototypes',\n linker_exe='g++ -mno-cygwin',\n linker_so='%s -mno-cygwin -mdll -static %s' %\n (self.linker, entry_point))\n elif self.gcc_version < \"4.0\":\n self.set_executables(\n compiler='gcc -mno-cygwin -O2 -Wall',\n compiler_so='gcc -mno-cygwin -O2 -Wall'\n ' -Wstrict-prototypes',\n linker_exe='g++ -mno-cygwin',\n linker_so='g++ -mno-cygwin -shared')\n else:\n # gcc-4 series releases do not support -mno-cygwin option i686\n # build needs '-mincoming-stack-boundary=2' due to ABI\n # incompatibility to Win32 ABI\n self.set_executables(\n compiler='gcc -O2 -march=core2 -mtune=generic'\n ' -mfpmath=sse -msse2'\n ' -mincoming-stack-boundary=2 -Wall',\n compiler_so='gcc -O2 -march=core2 -mtune=generic'\n ' -mfpmath=sse -msse2'\n ' -mincoming-stack-boundary=2 -Wall'\n ' -Wstrict-prototypes',\n linker_exe='g++ ',\n linker_so='g++ -shared -Wl,-gc-sections -Wl,-s')\n # added for python2.3 support we can't pass it through set_executables\n # because pre 2.2 would fail\n self.compiler_cxx = ['g++']\n\n # Maybe we should also append -mthreads, but then the finished dlls\n # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support\n # thread-safe exception handling on `Mingw32')\n\n # no additional libraries needed\n #self.dll_libraries=[]\n return\n\n # __init__ ()\n\n def link(self,\n target_desc,\n objects,\n output_filename,\n output_dir,\n libraries,\n library_dirs,\n runtime_library_dirs,\n export_symbols = None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n build_temp=None,\n target_lang=None):\n # Include the appropiate MSVC runtime library if Python was built\n # with MSVC >= 7.0 (MinGW standard is msvcrt)\n runtime_library = msvc_runtime_library()\n if runtime_library:\n if not libraries:\n libraries = []\n libraries.append(runtime_library)\n args = (self,\n target_desc,\n objects,\n output_filename,\n output_dir,\n libraries,\n library_dirs,\n runtime_library_dirs,\n None, #export_symbols, we do this in our def-file\n debug,\n extra_preargs,\n extra_postargs,\n build_temp,\n target_lang)\n if self.gcc_version < \"3.0.0\":\n func = distutils.cygwinccompiler.CygwinCCompiler.link\n else:\n func = UnixCCompiler.link\n func(*args[:func.__code__.co_argcount])\n return\n\n def object_filenames (self,\n source_filenames,\n strip_dir=0,\n output_dir=''):\n if output_dir is None: output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n # use normcase to make sure '.rc' is really '.rc' and not '.RC'\n (base, ext) = os.path.splitext (os.path.normcase(src_name))\n\n # added these lines to strip off windows drive letters\n # without it, .o files are placed next to .c files\n # instead of the build directory\n drv, base = os.path.splitdrive(base)\n if drv:\n base = base[1:]\n\n if ext not in (self.src_extensions + ['.rc', '.res']):\n raise UnknownFileError(\n \"unknown file type '%s' (from '%s')\" % \\\n (ext, src_name))\n if strip_dir:\n base = os.path.basename (base)\n if ext == '.res' or ext == '.rc':\n # these need to be compiled to object files\n obj_names.append (os.path.join (output_dir,\n base + ext + self.obj_extension))\n else:\n obj_names.append (os.path.join (output_dir,\n base + self.obj_extension))\n return obj_names\n\n # object_filenames ()\n\n\ndef find_python_dll():\n maj, min, micro = [int(i) for i in sys.version_info[:3]]\n dllname = 'python%d%d.dll' % (maj, min)\n print(\"Looking for %s\" % dllname)\n\n # We can't do much here:\n # - find it in python main dir\n # - in system32,\n # - ortherwise (Sxs), I don't know how to get it.\n lib_dirs = []\n lib_dirs.append(sys.prefix)\n lib_dirs.append(os.path.join(sys.prefix, 'lib'))\n try:\n lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))\n except KeyError:\n pass\n\n for d in lib_dirs:\n dll = os.path.join(d, dllname)\n if os.path.exists(dll):\n return dll\n\n raise ValueError(\"%s not found in %s\" % (dllname, lib_dirs))\n\ndef dump_table(dll):\n st = subprocess.Popen([\"objdump.exe\", \"-p\", dll], stdout=subprocess.PIPE)\n return st.stdout.readlines()\n\ndef generate_def(dll, dfile):\n \"\"\"Given a dll file location, get all its exported symbols and dump them\n into the given def file.\n\n The .def file will be overwritten\"\"\"\n dump = dump_table(dll)\n for i in range(len(dump)):\n if _START.match(dump[i].decode()):\n break\n else:\n raise ValueError(\"Symbol table not found\")\n\n syms = []\n for j in range(i+1, len(dump)):\n m = _TABLE.match(dump[j].decode())\n if m:\n syms.append((int(m.group(1).strip()), m.group(2)))\n else:\n break\n\n if len(syms) == 0:\n log.warn('No symbols found in %s' % dll)\n\n d = open(dfile, 'w')\n d.write('LIBRARY %s\\n' % os.path.basename(dll))\n d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\\n')\n d.write(';DATA PRELOAD SINGLE\\n')\n d.write('\\nEXPORTS\\n')\n for s in syms:\n #d.write('@%d %s\\n' % (s[0], s[1]))\n d.write('%s\\n' % s[1])\n d.close()\n\ndef find_dll(dll_name):\n\n arch = {'AMD64' : 'amd64',\n 'Intel' : 'x86'}[get_build_architecture()]\n\n def _find_dll_in_winsxs(dll_name):\n # Walk through the WinSxS directory to find the dll.\n winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs')\n if not os.path.exists(winsxs_path):\n return None\n for root, dirs, files in os.walk(winsxs_path):\n if dll_name in files and arch in root:\n return os.path.join(root, dll_name)\n return None\n\n def _find_dll_in_path(dll_name):\n # First, look in the Python directory, then scan PATH for\n # the given dll name.\n for path in [sys.prefix] + os.environ['PATH'].split(';'):\n filepath = os.path.join(path, dll_name)\n if os.path.exists(filepath):\n return os.path.abspath(filepath)\n\n return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)\n\ndef build_msvcr_library(debug=False):\n if os.name != 'nt':\n return False\n\n msvcr_name = msvc_runtime_library()\n\n # Skip using a custom library for versions < MSVC 8.0\n if int(msvcr_name.lstrip('msvcr')) < 80:\n log.debug('Skip building msvcr library:'\n ' custom functionality not present')\n return False\n\n if debug:\n msvcr_name += 'd'\n\n # Skip if custom library already exists\n out_name = \"lib%s.a\" % msvcr_name\n out_file = os.path.join(sys.prefix, 'libs', out_name)\n if os.path.isfile(out_file):\n log.debug('Skip building msvcr library: \"%s\" exists' %\n (out_file,))\n return True\n\n # Find the msvcr dll\n msvcr_dll_name = msvcr_name + '.dll'\n dll_file = find_dll(msvcr_dll_name)\n if not dll_file:\n log.warn('Cannot build msvcr library: \"%s\" not found' %\n msvcr_dll_name)\n return False\n\n def_name = \"lib%s.def\" % msvcr_name\n def_file = os.path.join(sys.prefix, 'libs', def_name)\n\n log.info('Building msvcr library: \"%s\" (from %s)' \\\n % (out_file, dll_file))\n\n # Generate a symbol definition file from the msvcr dll\n generate_def(dll_file, def_file)\n\n # Create a custom mingw library for the given symbol definitions\n cmd = ['dlltool', '-d', def_file, '-l', out_file]\n retcode = subprocess.call(cmd)\n\n # Clean up symbol definitions\n os.remove(def_file)\n\n return (not retcode)\n\ndef build_import_library():\n if os.name != 'nt':\n return\n\n arch = get_build_architecture()\n if arch == 'AMD64':\n return _build_import_library_amd64()\n elif arch == 'Intel':\n return _build_import_library_x86()\n else:\n raise ValueError(\"Unhandled arch %s\" % arch)\n\ndef _build_import_library_amd64():\n dll_file = find_python_dll()\n\n out_name = \"libpython%d%d.a\" % tuple(sys.version_info[:2])\n out_file = os.path.join(sys.prefix, 'libs', out_name)\n if os.path.isfile(out_file):\n log.debug('Skip building import library: \"%s\" exists' %\n (out_file))\n return\n\n def_name = \"python%d%d.def\" % tuple(sys.version_info[:2])\n def_file = os.path.join(sys.prefix, 'libs', def_name)\n\n log.info('Building import library (arch=AMD64): \"%s\" (from %s)' %\n (out_file, dll_file))\n\n generate_def(dll_file, def_file)\n\n cmd = ['dlltool', '-d', def_file, '-l', out_file]\n subprocess.Popen(cmd)\n\ndef _build_import_library_x86():\n \"\"\" Build the import libraries for Mingw32-gcc on Windows\n \"\"\"\n lib_name = \"python%d%d.lib\" % tuple(sys.version_info[:2])\n lib_file = os.path.join(sys.prefix, 'libs', lib_name)\n out_name = \"libpython%d%d.a\" % tuple(sys.version_info[:2])\n out_file = os.path.join(sys.prefix, 'libs', out_name)\n if not os.path.isfile(lib_file):\n log.warn('Cannot build import library: \"%s\" not found' % (lib_file))\n return\n if os.path.isfile(out_file):\n log.debug('Skip building import library: \"%s\" exists' % (out_file))\n return\n log.info('Building import library (ARCH=x86): \"%s\"' % (out_file))\n\n from numpy.distutils import lib2def\n\n def_name = \"python%d%d.def\" % tuple(sys.version_info[:2])\n def_file = os.path.join(sys.prefix, 'libs', def_name)\n nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)\n nm_output = lib2def.getnm(nm_cmd)\n dlist, flist = lib2def.parse_nm(nm_output)\n lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))\n\n dll_name = \"python%d%d.dll\" % tuple(sys.version_info[:2])\n args = (dll_name, def_file, out_file)\n cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args\n status = os.system(cmd)\n # for now, fail silently\n if status:\n log.warn('Failed to build import library for gcc. Linking will fail.')\n return\n\n#=====================================\n# Dealing with Visual Studio MANIFESTS\n#=====================================\n\n# Functions to deal with visual studio manifests. Manifest are a mechanism to\n# enforce strong DLL versioning on windows, and has nothing to do with\n# distutils MANIFEST. manifests are XML files with version info, and used by\n# the OS loader; they are necessary when linking against a DLL not in the\n# system path; in particular, official python 2.6 binary is built against the\n# MS runtime 9 (the one from VS 2008), which is not available on most windows\n# systems; python 2.6 installer does install it in the Win SxS (Side by side)\n# directory, but this requires the manifest for this to work. This is a big\n# mess, thanks MS for a wonderful system.\n\n# XXX: ideally, we should use exactly the same version as used by python. I\n# submitted a patch to get this version, but it was only included for python\n# 2.6.1 and above. So for versions below, we use a \"best guess\".\n_MSVCRVER_TO_FULLVER = {}\nif sys.platform == 'win32':\n try:\n import msvcrt\n # I took one version in my SxS directory: no idea if it is the good\n # one, and we can't retrieve it from python\n _MSVCRVER_TO_FULLVER['80'] = \"8.0.50727.42\"\n _MSVCRVER_TO_FULLVER['90'] = \"9.0.21022.8\"\n # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0\n # on Windows XP:\n _MSVCRVER_TO_FULLVER['100'] = \"10.0.30319.460\"\n if hasattr(msvcrt, \"CRT_ASSEMBLY_VERSION\"):\n major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(\".\", 2)\n _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION\n del major, minor, rest\n except ImportError:\n # If we are here, means python was not built with MSVC. Not sure what\n # to do in that case: manifest building will fail, but it should not be\n # used in that case anyway\n log.warn('Cannot import msvcrt: using manifest will not be possible')\n\ndef msvc_manifest_xml(maj, min):\n \"\"\"Given a major and minor version of the MSVCR, returns the\n corresponding XML file.\"\"\"\n try:\n fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]\n except KeyError:\n raise ValueError(\"Version %d,%d of MSVCRT not supported yet\" %\n (maj, min))\n # Don't be fooled, it looks like an XML, but it is not. In particular, it\n # should not have any space before starting, and its size should be\n # divisible by 4, most likely for alignement constraints when the xml is\n # embedded in the binary...\n # This template was copied directly from the python 2.6 binary (using\n # strings.exe from mingw on python.exe).\n template = \"\"\"\\\n<assembly xmlns=\"urn:schemas-microsoft-com:asm.v1\" manifestVersion=\"1.0\">\n <trustInfo xmlns=\"urn:schemas-microsoft-com:asm.v3\">\n <security>\n <requestedPrivileges>\n <requestedExecutionLevel level=\"asInvoker\" uiAccess=\"false\"></requestedExecutionLevel>\n </requestedPrivileges>\n </security>\n </trustInfo>\n <dependency>\n <dependentAssembly>\n <assemblyIdentity type=\"win32\" name=\"Microsoft.VC%(maj)d%(min)d.CRT\" version=\"%(fullver)s\" processorArchitecture=\"*\" publicKeyToken=\"1fc8b3b9a1e18e3b\"></assemblyIdentity>\n </dependentAssembly>\n </dependency>\n</assembly>\"\"\"\n\n return template % {'fullver': fullver, 'maj': maj, 'min': min}\n\ndef manifest_rc(name, type='dll'):\n \"\"\"Return the rc file used to generate the res file which will be embedded\n as manifest for given manifest file name, of given type ('dll' or\n 'exe').\n\n Parameters\n ----------\n name : str\n name of the manifest file to embed\n type : str {'dll', 'exe'}\n type of the binary which will embed the manifest\n\n \"\"\"\n if type == 'dll':\n rctype = 2\n elif type == 'exe':\n rctype = 1\n else:\n raise ValueError(\"Type %s not supported\" % type)\n\n return \"\"\"\\\n#include \"winuser.h\"\n%d RT_MANIFEST %s\"\"\" % (rctype, name)\n\ndef check_embedded_msvcr_match_linked(msver):\n \"\"\"msver is the ms runtime version used for the MANIFEST.\"\"\"\n # check msvcr major version are the same for linking and\n # embedding\n msvcv = msvc_runtime_library()\n if msvcv:\n assert msvcv.startswith(\"msvcr\"), msvcv\n # Dealing with something like \"mscvr90\" or \"mscvr100\", the last\n # last digit is the minor release, want int(\"9\") or int(\"10\"):\n maj = int(msvcv[5:-1])\n if not maj == int(msver):\n raise ValueError(\n \"Discrepancy between linked msvcr \" \\\n \"(%d) and the one about to be embedded \" \\\n \"(%d)\" % (int(msver), maj))\n\ndef configtest_name(config):\n base = os.path.basename(config._gen_temp_sourcefile(\"yo\", [], \"c\"))\n return os.path.splitext(base)[0]\n\ndef manifest_name(config):\n # Get configest name (including suffix)\n root = configtest_name(config)\n exext = config.compiler.exe_extension\n return root + exext + \".manifest\"\n\ndef rc_name(config):\n # Get configtest name (including suffix)\n root = configtest_name(config)\n return root + \".rc\"\n\ndef generate_manifest(config):\n msver = get_build_msvc_version()\n if msver is not None:\n if msver >= 8:\n check_embedded_msvcr_match_linked(msver)\n ma = int(msver)\n mi = int((msver - ma) * 10)\n # Write the manifest file\n manxml = msvc_manifest_xml(ma, mi)\n man = open(manifest_name(config), \"w\")\n config.temp_files.append(manifest_name(config))\n man.write(manxml)\n man.close()\n",
"# -*- coding: utf-8 -*-\n# pylint: disable=E1101\n\nfrom datetime import datetime\nimport csv\nimport os\nimport sys\nimport re\nimport nose\nimport platform\n\nfrom numpy import nan\nimport numpy as np\nfrom pandas.io.common import DtypeWarning\n\nfrom pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex\nfrom pandas.compat import(\n StringIO, BytesIO, PY3, range, long, lrange, lmap, u\n)\nfrom pandas.io.common import URLError\nimport pandas.io.parsers as parsers\nfrom pandas.io.parsers import (read_csv, read_table, read_fwf,\n TextFileReader, TextParser)\n\nimport pandas.util.testing as tm\nimport pandas as pd\n\nfrom pandas.compat import parse_date\nimport pandas.lib as lib\nfrom pandas import compat\nfrom pandas.lib import Timestamp\nfrom pandas.tseries.index import date_range\nimport pandas.tseries.tools as tools\n\nfrom numpy.testing.decorators import slow\n\nimport pandas.parser\n\n\nclass ParserTests(object):\n \"\"\"\n Want to be able to test either C+Cython or Python+Cython parsers\n \"\"\"\n data1 = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n\n def read_csv(self, *args, **kwargs):\n raise NotImplementedError\n\n def read_table(self, *args, **kwargs):\n raise NotImplementedError\n\n def setUp(self):\n import warnings\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n self.dirpath = tm.get_data_path()\n self.csv1 = os.path.join(self.dirpath, 'test1.csv')\n self.csv2 = os.path.join(self.dirpath, 'test2.csv')\n self.xls1 = os.path.join(self.dirpath, 'test.xls')\n\n def test_converters_type_must_be_dict(self):\n with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):\n self.read_csv(StringIO(self.data1), converters=0)\n\n def test_multi_character_decimal_marker(self):\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n self.assertRaises(ValueError, read_csv, StringIO(data), decimal=',,')\n\n def test_empty_decimal_marker(self):\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')\n\n def test_empty_thousands_marker(self):\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')\n\n\n def test_multi_character_decimal_marker(self):\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')\n\n def test_empty_string(self):\n data = \"\"\"\\\nOne,Two,Three\na,1,one\nb,2,two\n,3,three\nd,4,nan\ne,5,five\nnan,6,\ng,7,seven\n\"\"\"\n df = self.read_csv(StringIO(data))\n xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],\n 'Two': [1, 2, 3, 4, 5, 6, 7],\n 'Three': ['one', 'two', 'three', np.nan, 'five',\n np.nan, 'seven']})\n tm.assert_frame_equal(xp.reindex(columns=df.columns), df)\n\n df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},\n keep_default_na=False)\n xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],\n 'Two': [1, 2, 3, 4, 5, 6, 7],\n 'Three': ['one', 'two', 'three', 'nan', 'five',\n '', 'seven']})\n tm.assert_frame_equal(xp.reindex(columns=df.columns), df)\n\n df = self.read_csv(\n StringIO(data), na_values=['a'], keep_default_na=False)\n xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],\n 'Two': [1, 2, 3, 4, 5, 6, 7],\n 'Three': ['one', 'two', 'three', 'nan', 'five', '',\n 'seven']})\n tm.assert_frame_equal(xp.reindex(columns=df.columns), df)\n\n df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})\n xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],\n 'Two': [1, 2, 3, 4, 5, 6, 7],\n 'Three': ['one', 'two', 'three', np.nan, 'five',\n np.nan, 'seven']})\n tm.assert_frame_equal(xp.reindex(columns=df.columns), df)\n\n\n # GH4318, passing na_values=None and keep_default_na=False yields 'None' as a na_value\n data = \"\"\"\\\nOne,Two,Three\na,1,None\nb,2,two\n,3,None\nd,4,nan\ne,5,five\nnan,6,\ng,7,seven\n\"\"\"\n df = self.read_csv(\n StringIO(data), keep_default_na=False)\n xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],\n 'Two': [1, 2, 3, 4, 5, 6, 7],\n 'Three': ['None', 'two', 'None', 'nan', 'five', '',\n 'seven']})\n tm.assert_frame_equal(xp.reindex(columns=df.columns), df)\n\n\n def test_read_csv(self):\n if not compat.PY3:\n if compat.is_platform_windows():\n prefix = u(\"file:///\")\n else:\n prefix = u(\"file://\")\n fname = prefix + compat.text_type(self.csv1)\n # it works!\n df1 = read_csv(fname, index_col=0, parse_dates=True)\n\n def test_dialect(self):\n data = \"\"\"\\\nlabel1,label2,label3\nindex1,\"a,c,e\nindex2,b,d,f\n\"\"\"\n\n dia = csv.excel()\n dia.quoting = csv.QUOTE_NONE\n df = self.read_csv(StringIO(data), dialect=dia)\n\n data = '''\\\nlabel1,label2,label3\nindex1,a,c,e\nindex2,b,d,f\n'''\n exp = self.read_csv(StringIO(data))\n exp.replace('a', '\"a', inplace=True)\n tm.assert_frame_equal(df, exp)\n\n def test_dialect_str(self):\n data = \"\"\"\\\nfruit:vegetable\napple:brocolli\npear:tomato\n\"\"\"\n exp = DataFrame({\n 'fruit': ['apple', 'pear'],\n 'vegetable': ['brocolli', 'tomato']\n })\n dia = csv.register_dialect('mydialect', delimiter=':')\n df = self.read_csv(StringIO(data), dialect='mydialect')\n tm.assert_frame_equal(df, exp)\n csv.unregister_dialect('mydialect')\n\n def test_1000_sep(self):\n data = \"\"\"A|B|C\n1|2,334|5\n10|13|10.\n\"\"\"\n expected = DataFrame({\n 'A': [1, 10],\n 'B': [2334, 13],\n 'C': [5, 10.]\n })\n\n df = self.read_csv(StringIO(data), sep='|', thousands=',')\n tm.assert_frame_equal(df, expected)\n\n df = self.read_table(StringIO(data), sep='|', thousands=',')\n tm.assert_frame_equal(df, expected)\n\n def test_1000_sep_with_decimal(self):\n data = \"\"\"A|B|C\n1|2,334.01|5\n10|13|10.\n\"\"\"\n expected = DataFrame({\n 'A': [1, 10],\n 'B': [2334.01, 13],\n 'C': [5, 10.]\n })\n\n tm.assert_equal(expected.A.dtype, 'int64')\n tm.assert_equal(expected.B.dtype, 'float')\n tm.assert_equal(expected.C.dtype, 'float')\n\n df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')\n tm.assert_frame_equal(df, expected)\n\n df = self.read_table(StringIO(data), sep='|', thousands=',', decimal='.')\n tm.assert_frame_equal(df, expected)\n\n data_with_odd_sep = \"\"\"A|B|C\n1|2.334,01|5\n10|13|10,\n\"\"\"\n df = self.read_csv(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',')\n tm.assert_frame_equal(df, expected)\n\n df = self.read_table(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',')\n tm.assert_frame_equal(df, expected)\n\n def test_separator_date_conflict(self):\n # Regression test for issue #4678: make sure thousands separator and\n # date parsing do not conflict.\n data = '06-02-2013;13:00;1-000.215'\n expected = DataFrame(\n [[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],\n columns=['Date', 2]\n )\n\n df = self.read_csv(StringIO(data), sep=';', thousands='-', parse_dates={'Date': [0, 1]}, header=None)\n tm.assert_frame_equal(df, expected)\n\n def test_squeeze(self):\n data = \"\"\"\\\na,1\nb,2\nc,3\n\"\"\"\n idx = Index(['a', 'b', 'c'], name=0)\n expected = Series([1, 2, 3], name=1, index=idx)\n result = self.read_table(StringIO(data), sep=',', index_col=0,\n header=None, squeeze=True)\n tm.assertIsInstance(result, Series)\n tm.assert_series_equal(result, expected)\n\n def test_squeeze_no_view(self):\n\n # GH 8217\n # series should not be a view\n\n data = \"\"\"time,data\\n0,10\\n1,11\\n2,12\\n4,14\\n5,15\\n3,13\"\"\"\n result = self.read_csv(StringIO(data), index_col='time', squeeze=True)\n self.assertFalse(result._is_view)\n\n def test_inf_parsing(self):\n data = \"\"\"\\\n,A\na,inf\nb,-inf\nc,Inf\nd,-Inf\ne,INF\nf,-INF\ng,INf\nh,-INf\ni,inF\nj,-inF\"\"\"\n inf = float('inf')\n expected = Series([inf, -inf] * 5)\n df = read_csv(StringIO(data), index_col=0)\n tm.assert_almost_equal(df['A'].values, expected.values)\n df = read_csv(StringIO(data), index_col=0, na_filter=False)\n tm.assert_almost_equal(df['A'].values, expected.values)\n\n def test_multiple_date_col(self):\n # Can use multiple date parsers\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n def func(*date_cols):\n return lib.try_parse_dates(parsers._concat_date_cols(date_cols))\n\n df = self.read_csv(StringIO(data), header=None,\n date_parser=func,\n prefix='X',\n parse_dates={'nominal': [1, 2],\n 'actual': [1, 3]})\n self.assertIn('nominal', df)\n self.assertIn('actual', df)\n self.assertNotIn('X1', df)\n self.assertNotIn('X2', df)\n self.assertNotIn('X3', df)\n\n d = datetime(1999, 1, 27, 19, 0)\n self.assertEqual(df.ix[0, 'nominal'], d)\n\n df = self.read_csv(StringIO(data), header=None,\n date_parser=func,\n parse_dates={'nominal': [1, 2],\n 'actual': [1, 3]},\n keep_date_col=True)\n self.assertIn('nominal', df)\n self.assertIn('actual', df)\n\n self.assertIn(1, df)\n self.assertIn(2, df)\n self.assertIn(3, df)\n\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n df = read_csv(StringIO(data), header=None,\n prefix='X',\n parse_dates=[[1, 2], [1, 3]])\n\n self.assertIn('X1_X2', df)\n self.assertIn('X1_X3', df)\n self.assertNotIn('X1', df)\n self.assertNotIn('X2', df)\n self.assertNotIn('X3', df)\n\n d = datetime(1999, 1, 27, 19, 0)\n self.assertEqual(df.ix[0, 'X1_X2'], d)\n\n df = read_csv(StringIO(data), header=None,\n parse_dates=[[1, 2], [1, 3]], keep_date_col=True)\n\n self.assertIn('1_2', df)\n self.assertIn('1_3', df)\n self.assertIn(1, df)\n self.assertIn(2, df)\n self.assertIn(3, df)\n\n data = '''\\\nKORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n'''\n df = self.read_csv(StringIO(data), sep=',', header=None,\n parse_dates=[1], index_col=1)\n d = datetime(1999, 1, 27, 19, 0)\n self.assertEqual(df.index[0], d)\n\n def test_multiple_date_cols_int_cast(self):\n data = (\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\")\n date_spec = {'nominal': [1, 2], 'actual': [1, 3]}\n import pandas.io.date_converters as conv\n\n # it works!\n df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,\n date_parser=conv.parse_date_time)\n self.assertIn('nominal', df)\n\n def test_multiple_date_col_timestamp_parse(self):\n data = \"\"\"05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25\n05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25\"\"\"\n result = self.read_csv(StringIO(data), sep=',', header=None,\n parse_dates=[[0,1]], date_parser=Timestamp)\n\n ex_val = Timestamp('05/31/2012 15:30:00.029')\n self.assertEqual(result['0_1'][0], ex_val)\n\n def test_single_line(self):\n # GH 6607\n # Test currently only valid with python engine because sep=None and\n # delim_whitespace=False. Temporarily copied to TestPythonParser.\n # Test for ValueError with other engines:\n\n with tm.assertRaisesRegexp(ValueError,\n 'sep=None with delim_whitespace=False'):\n # sniff separator\n buf = StringIO()\n sys.stdout = buf\n\n # printing warning message when engine == 'c' for now\n\n try:\n # it works!\n df = self.read_csv(StringIO('1,2'), names=['a', 'b'],\n header=None, sep=None)\n tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)\n finally:\n sys.stdout = sys.__stdout__\n\n def test_multiple_date_cols_with_header(self):\n data = \"\"\"\\\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})\n self.assertNotIsInstance(df.nominal[0], compat.string_types)\n\n ts_data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n def test_multiple_date_col_name_collision(self):\n self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),\n parse_dates={'ID': [1, 2]})\n\n data = \"\"\"\\\ndate_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n self.assertRaises(ValueError, self.read_csv, StringIO(data),\n parse_dates=[[1, 2]])\n\n def test_index_col_named(self):\n no_header = \"\"\"\\\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n h = \"ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\\n\"\n data = h + no_header\n rs = self.read_csv(StringIO(data), index_col='ID')\n xp = self.read_csv(StringIO(data), header=0).set_index('ID')\n tm.assert_frame_equal(rs, xp)\n\n self.assertRaises(ValueError, self.read_csv, StringIO(no_header),\n index_col='ID')\n\n data = \"\"\"\\\n1,2,3,4,hello\n5,6,7,8,world\n9,10,11,12,foo\n\"\"\"\n names = ['a', 'b', 'c', 'd', 'message']\n xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],\n 'd': [4, 8, 12]},\n index=Index(['hello', 'world', 'foo'], name='message'))\n rs = self.read_csv(StringIO(data), names=names, index_col=['message'])\n tm.assert_frame_equal(xp, rs)\n self.assertEqual(xp.index.name, rs.index.name)\n\n rs = self.read_csv(StringIO(data), names=names, index_col='message')\n tm.assert_frame_equal(xp, rs)\n self.assertEqual(xp.index.name, rs.index.name)\n\n def test_usecols_index_col_False(self):\n # Issue 9082\n s = \"a,b,c,d\\n1,2,3,4\\n5,6,7,8\"\n s_malformed = \"a,b,c,d\\n1,2,3,4,\\n5,6,7,8,\"\n cols = ['a','c','d']\n expected = DataFrame({'a':[1,5], 'c':[3,7], 'd':[4,8]})\n df = self.read_csv(StringIO(s), usecols=cols, index_col=False)\n tm.assert_frame_equal(expected, df)\n df = self.read_csv(StringIO(s_malformed), usecols=cols, index_col=False)\n tm.assert_frame_equal(expected, df)\n\n def test_index_col_is_True(self):\n # Issue 9798\n self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),\n index_col=True)\n\n def test_converter_index_col_bug(self):\n # 1835\n data = \"A;B\\n1;2\\n3;4\"\n\n rs = self.read_csv(StringIO(data), sep=';', index_col='A',\n converters={'A': lambda x: x})\n\n xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))\n tm.assert_frame_equal(rs, xp)\n self.assertEqual(rs.index.name, xp.index.name)\n\n def test_date_parser_int_bug(self):\n # #3071\n log_file = StringIO(\n 'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'\n 'accountid,userid,contactid,level,silo,method\\n'\n '1343103150,0.062353,0,4,6,0.01690,3,'\n '12345,1,-1,3,invoice_InvoiceResource,search\\n'\n )\n\n def f(posix_string):\n return datetime.utcfromtimestamp(int(posix_string))\n\n # it works!\n read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)\n\n def test_multiple_skts_example(self):\n data = \"year, month, a, b\\n 2001, 01, 0.0, 10.\\n 2001, 02, 1.1, 11.\"\n pass\n\n def test_malformed(self):\n # all\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n\n try:\n df = self.read_table(\n StringIO(data), sep=',', header=1, comment='#')\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))\n\n # skip_footer\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\nfooter\n\"\"\"\n\n # GH 6607\n # Test currently only valid with python engine because\n # skip_footer != 0. Temporarily copied to TestPythonParser.\n # Test for ValueError with other engines:\n\n try:\n with tm.assertRaisesRegexp(ValueError, 'skip_footer'): #XXX\n df = self.read_table(\n StringIO(data), sep=',', header=1, comment='#',\n skip_footer=1)\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))\n\n # first chunk\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n try:\n it = self.read_table(StringIO(data), sep=',',\n header=1, comment='#', iterator=True, chunksize=1,\n skiprows=[2])\n df = it.read(5)\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))\n\n # middle chunk\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n try:\n it = self.read_table(StringIO(data), sep=',', header=1,\n comment='#', iterator=True, chunksize=1,\n skiprows=[2])\n df = it.read(1)\n it.read(2)\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))\n\n # last chunk\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n try:\n it = self.read_table(StringIO(data), sep=',',\n header=1, comment='#', iterator=True, chunksize=1,\n skiprows=[2])\n df = it.read(1)\n it.read()\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))\n\n def test_passing_dtype(self):\n # GH 6607\n # Passing dtype is currently only supported by the C engine.\n # Temporarily copied to TestCParser*.\n # Test for ValueError with other engines:\n\n with tm.assertRaisesRegexp(ValueError,\n \"The 'dtype' option is not supported\"):\n\n df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E'])\n\n with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:\n df.to_csv(path)\n\n # GH 3795\n # passing 'str' as the dtype\n result = self.read_csv(path, dtype=str, index_col=0)\n tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' }))\n\n # we expect all object columns, so need to convert to test for equivalence\n result = result.astype(float)\n tm.assert_frame_equal(result,df)\n\n # invalid dtype\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' },\n index_col=0)\n\n # valid but we don't support it (date)\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },\n index_col=0)\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },\n index_col=0, parse_dates=['B'])\n\n # valid but we don't support it\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' },\n index_col=0)\n\n def test_quoting(self):\n bad_line_small = \"\"\"printer\\tresult\\tvariant_name\nKlosterdruckerei\\tKlosterdruckerei <Salem> (1611-1804)\\tMuller, Jacob\nKlosterdruckerei\\tKlosterdruckerei <Salem> (1611-1804)\\tMuller, Jakob\nKlosterdruckerei\\tKlosterdruckerei <Kempten> (1609-1805)\\t\"Furststiftische Hofdruckerei, <Kempten\"\"\nKlosterdruckerei\\tKlosterdruckerei <Kempten> (1609-1805)\\tGaller, Alois\nKlosterdruckerei\\tKlosterdruckerei <Kempten> (1609-1805)\\tHochfurstliche Buchhandlung <Kempten>\"\"\"\n self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),\n sep='\\t')\n\n good_line_small = bad_line_small + '\"'\n df = self.read_table(StringIO(good_line_small), sep='\\t')\n self.assertEqual(len(df), 3)\n\n def test_non_string_na_values(self):\n # GH3611, na_values that are not a string are an issue\n with tm.ensure_clean('__non_string_na_values__.csv') as path:\n df = DataFrame({'A' : [-999, 2, 3], 'B' : [1.2, -999, 4.5]})\n df.to_csv(path, sep=' ', index=False)\n result1 = read_csv(path, sep= ' ', header=0, na_values=['-999.0','-999'])\n result2 = read_csv(path, sep= ' ', header=0, na_values=[-999,-999.0])\n result3 = read_csv(path, sep= ' ', header=0, na_values=[-999.0,-999])\n tm.assert_frame_equal(result1,result2)\n tm.assert_frame_equal(result2,result3)\n\n result4 = read_csv(path, sep= ' ', header=0, na_values=['-999.0'])\n result5 = read_csv(path, sep= ' ', header=0, na_values=['-999'])\n result6 = read_csv(path, sep= ' ', header=0, na_values=[-999.0])\n result7 = read_csv(path, sep= ' ', header=0, na_values=[-999])\n tm.assert_frame_equal(result4,result3)\n tm.assert_frame_equal(result5,result3)\n tm.assert_frame_equal(result6,result3)\n tm.assert_frame_equal(result7,result3)\n\n good_compare = result3\n\n # with an odd float format, so we can't match the string 999.0 exactly,\n # but need float matching\n df.to_csv(path, sep=' ', index=False, float_format = '%.3f')\n result1 = read_csv(path, sep= ' ', header=0, na_values=['-999.0','-999'])\n result2 = read_csv(path, sep= ' ', header=0, na_values=[-999,-999.0])\n result3 = read_csv(path, sep= ' ', header=0, na_values=[-999.0,-999])\n tm.assert_frame_equal(result1,good_compare)\n tm.assert_frame_equal(result2,good_compare)\n tm.assert_frame_equal(result3,good_compare)\n\n result4 = read_csv(path, sep= ' ', header=0, na_values=['-999.0'])\n result5 = read_csv(path, sep= ' ', header=0, na_values=['-999'])\n result6 = read_csv(path, sep= ' ', header=0, na_values=[-999.0])\n result7 = read_csv(path, sep= ' ', header=0, na_values=[-999])\n tm.assert_frame_equal(result4,good_compare)\n tm.assert_frame_equal(result5,good_compare)\n tm.assert_frame_equal(result6,good_compare)\n tm.assert_frame_equal(result7,good_compare)\n\n def test_default_na_values(self):\n _NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',\n '#N/A','N/A', 'NA', '#NA', 'NULL', 'NaN',\n 'nan', '-NaN', '-nan', '#N/A N/A',''])\n self.assertEqual(_NA_VALUES, parsers._NA_VALUES)\n nv = len(_NA_VALUES)\n def f(i, v):\n if i == 0:\n buf = ''\n elif i > 0:\n buf = ''.join([','] * i)\n\n buf = \"{0}{1}\".format(buf,v)\n\n if i < nv-1:\n buf = \"{0}{1}\".format(buf,''.join([','] * (nv-i-1)))\n\n return buf\n\n data = StringIO('\\n'.join([ f(i, v) for i, v in enumerate(_NA_VALUES) ]))\n expected = DataFrame(np.nan,columns=range(nv),index=range(nv))\n df = self.read_csv(data, header=None)\n tm.assert_frame_equal(df, expected)\n\n def test_custom_na_values(self):\n data = \"\"\"A,B,C\nignore,this,row\n1,NA,3\n-1.#IND,5,baz\n7,8,NaN\n\"\"\"\n expected = [[1., nan, 3],\n [nan, 5, nan],\n [7, 8, nan]]\n\n df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])\n tm.assert_almost_equal(df.values, expected)\n\n df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],\n skiprows=[1])\n tm.assert_almost_equal(df2.values, expected)\n\n df3 = self.read_table(StringIO(data), sep=',', na_values='baz',\n skiprows=[1])\n tm.assert_almost_equal(df3.values, expected)\n\n def test_nat_parse(self):\n\n # GH 3062\n df = DataFrame(dict({\n 'A' : np.asarray(lrange(10),dtype='float64'),\n 'B' : pd.Timestamp('20010101') }))\n df.iloc[3:6,:] = np.nan\n\n with tm.ensure_clean('__nat_parse_.csv') as path:\n df.to_csv(path)\n result = read_csv(path,index_col=0,parse_dates=['B'])\n tm.assert_frame_equal(result,df)\n\n expected = Series(dict( A = 'float64',B = 'datetime64[ns]'))\n tm.assert_series_equal(expected,result.dtypes)\n\n # test with NaT for the nan_rep\n # we don't have a method to specif the Datetime na_rep (it defaults to '')\n df.to_csv(path)\n result = read_csv(path,index_col=0,parse_dates=['B'])\n tm.assert_frame_equal(result,df)\n\n def test_skiprows_bug(self):\n # GH #505\n text = \"\"\"#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n1/1/2000,1.,2.,3.\n1/2/2000,4,5,6\n1/3/2000,7,8,9\n\"\"\"\n data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,\n index_col=0, parse_dates=True)\n\n data2 = self.read_csv(StringIO(text), skiprows=6, header=None,\n index_col=0, parse_dates=True)\n\n expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),\n columns=[1, 2, 3],\n index=[datetime(2000, 1, 1), datetime(2000, 1, 2),\n datetime(2000, 1, 3)])\n expected.index.name = 0\n tm.assert_frame_equal(data, expected)\n tm.assert_frame_equal(data, data2)\n\n def test_deep_skiprows(self):\n # GH #4382\n text = \"a,b,c\\n\" + \"\\n\".join([\",\".join([str(i), str(i+1), str(i+2)]) for i in range(10)])\n condensed_text = \"a,b,c\\n\" + \"\\n\".join([\",\".join([str(i), str(i+1), str(i+2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]])\n data = self.read_csv(StringIO(text), skiprows=[6, 8])\n condensed_data = self.read_csv(StringIO(condensed_text))\n tm.assert_frame_equal(data, condensed_data)\n\n def test_skiprows_blank(self):\n # GH 9832\n text = \"\"\"#foo,a,b,c\n#foo,a,b,c\n\n#foo,a,b,c\n#foo,a,b,c\n\n1/1/2000,1.,2.,3.\n1/2/2000,4,5,6\n1/3/2000,7,8,9\n\"\"\"\n data = self.read_csv(StringIO(text), skiprows=6, header=None,\n index_col=0, parse_dates=True)\n\n expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),\n columns=[1, 2, 3],\n index=[datetime(2000, 1, 1), datetime(2000, 1, 2),\n datetime(2000, 1, 3)])\n expected.index.name = 0\n tm.assert_frame_equal(data, expected)\n\n def test_detect_string_na(self):\n data = \"\"\"A,B\nfoo,bar\nNA,baz\nNaN,nan\n\"\"\"\n expected = [['foo', 'bar'],\n [nan, 'baz'],\n [nan, nan]]\n\n df = self.read_csv(StringIO(data))\n tm.assert_almost_equal(df.values, expected)\n\n def test_unnamed_columns(self):\n data = \"\"\"A,B,C,,\n1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n\"\"\"\n expected = [[1, 2, 3, 4, 5.],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15]]\n df = self.read_table(StringIO(data), sep=',')\n tm.assert_almost_equal(df.values, expected)\n self.assert_numpy_array_equal(df.columns,\n ['A', 'B', 'C', 'Unnamed: 3',\n 'Unnamed: 4'])\n\n def test_string_nas(self):\n data = \"\"\"A,B,C\na,b,c\nd,,f\n,g,h\n\"\"\"\n result = self.read_csv(StringIO(data))\n expected = DataFrame([['a', 'b', 'c'],\n ['d', np.nan, 'f'],\n [np.nan, 'g', 'h']],\n columns=['A', 'B', 'C'])\n\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_columns(self):\n for engine in ['python', 'c']:\n data = \"\"\"A,A,B,B,B\n 1,2,3,4,5\n 6,7,8,9,10\n 11,12,13,14,15\n \"\"\"\n # check default beahviour\n df = self.read_table(StringIO(data), sep=',',engine=engine)\n self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])\n\n df = self.read_table(StringIO(data), sep=',',engine=engine,mangle_dupe_cols=False)\n self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])\n\n df = self.read_table(StringIO(data), sep=',',engine=engine,mangle_dupe_cols=True)\n self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])\n\n def test_csv_mixed_type(self):\n data = \"\"\"A,B,C\na,1,2\nb,3,4\nc,4,5\n\"\"\"\n df = self.read_csv(StringIO(data))\n # TODO\n\n def test_csv_custom_parser(self):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n f = lambda x: datetime.strptime(x, '%Y%m%d')\n df = self.read_csv(StringIO(data), date_parser=f)\n expected = self.read_csv(StringIO(data), parse_dates=True)\n tm.assert_frame_equal(df, expected)\n\n def test_parse_dates_implicit_first_col(self):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n df = self.read_csv(StringIO(data), parse_dates=True)\n expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)\n self.assertIsInstance(df.index[0], (datetime, np.datetime64, Timestamp))\n tm.assert_frame_equal(df, expected)\n\n def test_parse_dates_string(self):\n data = \"\"\"date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n rs = self.read_csv(\n StringIO(data), index_col='date', parse_dates='date')\n idx = date_range('1/1/2009', periods=3)\n idx.name = 'date'\n xp = DataFrame({'A': ['a', 'b', 'c'],\n 'B': [1, 3, 4],\n 'C': [2, 4, 5]}, idx)\n tm.assert_frame_equal(rs, xp)\n\n def test_yy_format(self):\n data = \"\"\"date,time,B,C\n090131,0010,1,2\n090228,1020,3,4\n090331,0830,5,6\n\"\"\"\n rs = self.read_csv(StringIO(data), index_col=0,\n parse_dates=[['date', 'time']])\n idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),\n datetime(2009, 2, 28, 10, 20, 0),\n datetime(2009, 3, 31, 8, 30, 0)],\n dtype=object, name='date_time')\n xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)\n tm.assert_frame_equal(rs, xp)\n\n rs = self.read_csv(StringIO(data), index_col=0,\n parse_dates=[[0, 1]])\n idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),\n datetime(2009, 2, 28, 10, 20, 0),\n datetime(2009, 3, 31, 8, 30, 0)],\n dtype=object, name='date_time')\n xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)\n tm.assert_frame_equal(rs, xp)\n\n def test_parse_dates_column_list(self):\n from pandas.core.datetools import to_datetime\n\n data = '''date;destination;ventilationcode;unitcode;units;aux_date\n01/01/2010;P;P;50;1;12/1/2011\n01/01/2010;P;R;50;1;13/1/2011\n15/01/2010;P;P;50;1;14/1/2011\n01/05/2010;P;P;50;1;15/1/2011'''\n\n expected = self.read_csv(StringIO(data), sep=\";\", index_col=lrange(4))\n\n lev = expected.index.levels[0]\n levels = list(expected.index.levels)\n levels[0] = lev.to_datetime(dayfirst=True)\n # hack to get this to work - remove for final test\n levels[0].name = lev.name\n expected.index.set_levels(levels, inplace=True)\n expected['aux_date'] = to_datetime(expected['aux_date'],\n dayfirst=True)\n expected['aux_date'] = lmap(Timestamp, expected['aux_date'])\n tm.assertIsInstance(expected['aux_date'][0], datetime)\n\n df = self.read_csv(StringIO(data), sep=\";\", index_col=lrange(4),\n parse_dates=[0, 5], dayfirst=True)\n tm.assert_frame_equal(df, expected)\n\n df = self.read_csv(StringIO(data), sep=\";\", index_col=lrange(4),\n parse_dates=['date', 'aux_date'], dayfirst=True)\n tm.assert_frame_equal(df, expected)\n\n def test_no_header(self):\n data = \"\"\"1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n\"\"\"\n df = self.read_table(StringIO(data), sep=',', header=None)\n df_pref = self.read_table(StringIO(data), sep=',', prefix='X',\n header=None)\n\n names = ['foo', 'bar', 'baz', 'quux', 'panda']\n df2 = self.read_table(StringIO(data), sep=',', names=names)\n expected = [[1, 2, 3, 4, 5.],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15]]\n tm.assert_almost_equal(df.values, expected)\n tm.assert_almost_equal(df.values, df2.values)\n\n self.assert_numpy_array_equal(df_pref.columns,\n ['X0', 'X1', 'X2', 'X3', 'X4'])\n self.assert_numpy_array_equal(df.columns, lrange(5))\n\n self.assert_numpy_array_equal(df2.columns, names)\n\n def test_no_header_prefix(self):\n data = \"\"\"1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n\"\"\"\n df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',\n header=None)\n\n expected = [[1, 2, 3, 4, 5.],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15]]\n tm.assert_almost_equal(df_pref.values, expected)\n\n self.assert_numpy_array_equal(df_pref.columns,\n ['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])\n\n def test_header_with_index_col(self):\n data = \"\"\"foo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n\"\"\"\n names = ['A', 'B', 'C']\n df = self.read_csv(StringIO(data), names=names)\n\n self.assertEqual(names, ['A', 'B', 'C'])\n\n values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n expected = DataFrame(values, index=['foo', 'bar', 'baz'],\n columns=['A', 'B', 'C'])\n tm.assert_frame_equal(df, expected)\n\n def test_read_csv_dataframe(self):\n df = self.read_csv(self.csv1, index_col=0, parse_dates=True)\n df2 = self.read_table(self.csv1, sep=',', index_col=0,\n parse_dates=True)\n self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])\n self.assertEqual(df.index.name, 'index')\n self.assertIsInstance(df.index[0], (datetime, np.datetime64, Timestamp))\n self.assertEqual(df.values.dtype, np.float64)\n tm.assert_frame_equal(df, df2)\n\n def test_read_csv_no_index_name(self):\n df = self.read_csv(self.csv2, index_col=0, parse_dates=True)\n df2 = self.read_table(self.csv2, sep=',', index_col=0,\n parse_dates=True)\n self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])\n self.assertIsInstance(df.index[0], (datetime, np.datetime64, Timestamp))\n self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']].values.dtype, np.float64)\n tm.assert_frame_equal(df, df2)\n\n def test_read_csv_infer_compression(self):\n # GH 9770\n expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)\n\n inputs = [self.csv1, self.csv1 + '.gz',\n self.csv1 + '.bz2', open(self.csv1)]\n\n for f in inputs:\n df = self.read_csv(f, index_col=0, parse_dates=True,\n compression='infer')\n\n tm.assert_frame_equal(expected, df)\n\n inputs[3].close()\n\n def test_read_table_unicode(self):\n fin = BytesIO(u('\\u0141aski, Jan;1').encode('utf-8'))\n df1 = read_table(fin, sep=\";\", encoding=\"utf-8\", header=None)\n tm.assertIsInstance(df1[0].values[0], compat.text_type)\n\n def test_read_table_wrong_num_columns(self):\n # too few!\n data = \"\"\"A,B,C,D,E,F\n1,2,3,4,5,6\n6,7,8,9,10,11,12\n11,12,13,14,15,16\n\"\"\"\n self.assertRaises(Exception, self.read_csv, StringIO(data))\n\n def test_read_table_duplicate_index(self):\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo,12,13,14,15\nbar,12,13,14,15\n\"\"\"\n\n result = self.read_csv(StringIO(data), index_col=0)\n expected = self.read_csv(StringIO(data)).set_index('index',\n verify_integrity=False)\n tm.assert_frame_equal(result, expected)\n\n def test_read_table_duplicate_index_implicit(self):\n data = \"\"\"A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo,12,13,14,15\nbar,12,13,14,15\n\"\"\"\n\n # it works!\n result = self.read_csv(StringIO(data))\n\n def test_parse_bools(self):\n data = \"\"\"A,B\nTrue,1\nFalse,2\nTrue,3\n\"\"\"\n data = self.read_csv(StringIO(data))\n self.assertEqual(data['A'].dtype, np.bool_)\n\n data = \"\"\"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3\n\"\"\"\n data = self.read_csv(StringIO(data),\n true_values=['yes', 'Yes', 'YES'],\n false_values=['no', 'NO', 'No'])\n self.assertEqual(data['A'].dtype, np.bool_)\n\n data = \"\"\"A,B\nTRUE,1\nFALSE,2\nTRUE,3\n\"\"\"\n data = self.read_csv(StringIO(data))\n self.assertEqual(data['A'].dtype, np.bool_)\n\n data = \"\"\"A,B\nfoo,bar\nbar,foo\"\"\"\n result = self.read_csv(StringIO(data), true_values=['foo'],\n false_values=['bar'])\n expected = DataFrame({'A': [True, False], 'B': [False, True]})\n tm.assert_frame_equal(result, expected)\n\n def test_int_conversion(self):\n data = \"\"\"A,B\n1.0,1\n2.0,2\n3.0,3\n\"\"\"\n data = self.read_csv(StringIO(data))\n self.assertEqual(data['A'].dtype, np.float64)\n self.assertEqual(data['B'].dtype, np.int64)\n\n def test_infer_index_col(self):\n data = \"\"\"A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n\"\"\"\n data = self.read_csv(StringIO(data))\n self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))\n\n def test_read_nrows(self):\n df = self.read_csv(StringIO(self.data1), nrows=3)\n expected = self.read_csv(StringIO(self.data1))[:3]\n tm.assert_frame_equal(df, expected)\n\n def test_read_chunksize(self):\n reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)\n df = self.read_csv(StringIO(self.data1), index_col=0)\n\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], df[:2])\n tm.assert_frame_equal(chunks[1], df[2:4])\n tm.assert_frame_equal(chunks[2], df[4:])\n\n def test_read_chunksize_named(self):\n reader = self.read_csv(\n StringIO(self.data1), index_col='index', chunksize=2)\n df = self.read_csv(StringIO(self.data1), index_col='index')\n\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], df[:2])\n tm.assert_frame_equal(chunks[1], df[2:4])\n tm.assert_frame_equal(chunks[2], df[4:])\n\n def test_get_chunk_passed_chunksize(self):\n data = \"\"\"A,B,C\n1,2,3\n4,5,6\n7,8,9\n1,2,3\"\"\"\n result = self.read_csv(StringIO(data), chunksize=2)\n\n piece = result.get_chunk()\n self.assertEqual(len(piece), 2)\n\n def test_read_text_list(self):\n data = \"\"\"A,B,C\\nfoo,1,2,3\\nbar,4,5,6\"\"\"\n as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',\n '4', '5', '6']]\n df = self.read_csv(StringIO(data), index_col=0)\n\n parser = TextParser(as_list, index_col=0, chunksize=2)\n chunk = parser.read(None)\n\n tm.assert_frame_equal(chunk, df)\n\n def test_iterator(self):\n # GH 6607\n # Test currently only valid with python engine because\n # skip_footer != 0. Temporarily copied to TestPythonParser.\n # Test for ValueError with other engines:\n\n with tm.assertRaisesRegexp(ValueError, 'skip_footer'):\n reader = self.read_csv(StringIO(self.data1), index_col=0,\n iterator=True)\n df = self.read_csv(StringIO(self.data1), index_col=0)\n\n chunk = reader.read(3)\n tm.assert_frame_equal(chunk, df[:3])\n\n last_chunk = reader.read(5)\n tm.assert_frame_equal(last_chunk, df[3:])\n\n # pass list\n lines = list(csv.reader(StringIO(self.data1)))\n parser = TextParser(lines, index_col=0, chunksize=2)\n\n df = self.read_csv(StringIO(self.data1), index_col=0)\n\n chunks = list(parser)\n tm.assert_frame_equal(chunks[0], df[:2])\n tm.assert_frame_equal(chunks[1], df[2:4])\n tm.assert_frame_equal(chunks[2], df[4:])\n\n # pass skiprows\n parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])\n chunks = list(parser)\n tm.assert_frame_equal(chunks[0], df[1:3])\n\n # test bad parameter (skip_footer)\n reader = self.read_csv(StringIO(self.data1), index_col=0,\n iterator=True, skip_footer=True)\n self.assertRaises(ValueError, reader.read, 3)\n\n treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,\n iterator=True)\n tm.assertIsInstance(treader, TextFileReader)\n\n # stopping iteration when on chunksize is specified, GH 3967\n data = \"\"\"A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n\"\"\"\n reader = self.read_csv(StringIO(data), iterator=True)\n result = list(reader)\n expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz'])\n tm.assert_frame_equal(result[0], expected)\n\n # chunksize = 1\n reader = self.read_csv(StringIO(data), chunksize=1)\n result = list(reader)\n expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz'])\n self.assertEqual(len(result), 3)\n tm.assert_frame_equal(pd.concat(result), expected)\n\n def test_header_not_first_line(self):\n data = \"\"\"got,to,ignore,this,line\ngot,to,ignore,this,line\nindex,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\n\"\"\"\n data2 = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\n\"\"\"\n\n df = self.read_csv(StringIO(data), header=2, index_col=0)\n expected = self.read_csv(StringIO(data2), header=0, index_col=0)\n tm.assert_frame_equal(df, expected)\n\n def test_header_multi_index(self):\n expected = tm.makeCustomDataframe(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n\n data = \"\"\"\\\nC0,,C_l0_g0,C_l0_g1,C_l0_g2\n\nC1,,C_l1_g0,C_l1_g1,C_l1_g2\nC2,,C_l2_g0,C_l2_g1,C_l2_g2\nC3,,C_l3_g0,C_l3_g1,C_l3_g2\nR0,R1,,,\nR_l0_g0,R_l1_g0,R0C0,R0C1,R0C2\nR_l0_g1,R_l1_g1,R1C0,R1C1,R1C2\nR_l0_g2,R_l1_g2,R2C0,R2C1,R2C2\nR_l0_g3,R_l1_g3,R3C0,R3C1,R3C2\nR_l0_g4,R_l1_g4,R4C0,R4C1,R4C2\n\"\"\"\n\n df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1], tupleize_cols=False)\n tm.assert_frame_equal(df, expected)\n\n # skipping lines in the header\n df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1], tupleize_cols=False)\n tm.assert_frame_equal(df, expected)\n\n #### invalid options ####\n\n # no as_recarray\n self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3],\n index_col=[0,1], as_recarray=True, tupleize_cols=False)\n\n # names\n self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3],\n index_col=[0,1], names=['foo','bar'], tupleize_cols=False)\n # usecols\n self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3],\n index_col=[0,1], usecols=['foo','bar'], tupleize_cols=False)\n # non-numeric index_col\n self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3],\n index_col=['foo','bar'], tupleize_cols=False)\n\n def test_header_multiindex_common_format(self):\n\n df = DataFrame([[1,2,3,4,5,6],[7,8,9,10,11,12]],\n index=['one','two'],\n columns=MultiIndex.from_tuples([('a','q'),('a','r'),('a','s'),\n ('b','t'),('c','u'),('c','v')]))\n\n # to_csv\n data = \"\"\",a,a,a,b,c,c\n,q,r,s,t,u,v\n,,,,,,\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data),header=[0,1],index_col=0)\n tm.assert_frame_equal(df,result)\n\n # common\n data = \"\"\",a,a,a,b,c,c\n,q,r,s,t,u,v\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data),header=[0,1],index_col=0)\n tm.assert_frame_equal(df,result)\n\n # common, no index_col\n data = \"\"\"a,a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data),header=[0,1],index_col=None)\n tm.assert_frame_equal(df.reset_index(drop=True),result)\n\n # malformed case 1\n expected = DataFrame(np.array([[2, 3, 4, 5, 6],\n [8, 9, 10, 11, 12]], dtype='int64'),\n index=Index([1, 7]),\n columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],\n labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],\n names=[u('a'), u('q')]))\n\n data = \"\"\"a,a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data),header=[0,1],index_col=0)\n tm.assert_frame_equal(expected,result)\n\n # malformed case 2\n expected = DataFrame(np.array([[2, 3, 4, 5, 6],\n [8, 9, 10, 11, 12]], dtype='int64'),\n index=Index([1, 7]),\n columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],\n labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],\n names=[None, u('q')]))\n\n data = \"\"\",a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data),header=[0,1],index_col=0)\n tm.assert_frame_equal(expected,result)\n\n # mi on columns and index (malformed)\n expected = DataFrame(np.array([[ 3, 4, 5, 6],\n [ 9, 10, 11, 12]], dtype='int64'),\n index=MultiIndex(levels=[[1, 7], [2, 8]],\n labels=[[0, 1], [0, 1]]),\n columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],\n labels=[[0, 1, 2, 2], [0, 1, 2, 3]],\n names=[None, u('q')]))\n\n data = \"\"\",a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data),header=[0,1],index_col=[0, 1])\n tm.assert_frame_equal(expected,result)\n\n def test_pass_names_with_index(self):\n lines = self.data1.split('\\n')\n no_header = '\\n'.join(lines[1:])\n\n # regular index\n names = ['index', 'A', 'B', 'C', 'D']\n df = self.read_csv(StringIO(no_header), index_col=0, names=names)\n expected = self.read_csv(StringIO(self.data1), index_col=0)\n tm.assert_frame_equal(df, expected)\n\n # multi index\n data = \"\"\"index1,index2,A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n\"\"\"\n lines = data.split('\\n')\n no_header = '\\n'.join(lines[1:])\n names = ['index1', 'index2', 'A', 'B', 'C', 'D']\n df = self.read_csv(StringIO(no_header), index_col=[0, 1],\n names=names)\n expected = self.read_csv(StringIO(data), index_col=[0, 1])\n tm.assert_frame_equal(df, expected)\n\n df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])\n tm.assert_frame_equal(df, expected)\n\n def test_multi_index_no_level_names(self):\n data = \"\"\"index1,index2,A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n\"\"\"\n\n data2 = \"\"\"A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n\"\"\"\n\n lines = data.split('\\n')\n no_header = '\\n'.join(lines[1:])\n names = ['A', 'B', 'C', 'D']\n\n df = self.read_csv(StringIO(no_header), index_col=[0, 1],\n header=None, names=names)\n expected = self.read_csv(StringIO(data), index_col=[0, 1])\n tm.assert_frame_equal(df, expected, check_names=False)\n\n # 2 implicit first cols\n df2 = self.read_csv(StringIO(data2))\n tm.assert_frame_equal(df2, df)\n\n # reverse order of index\n df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,\n header=None)\n expected = self.read_csv(StringIO(data), index_col=[1, 0])\n tm.assert_frame_equal(df, expected, check_names=False)\n\n def test_multi_index_parse_dates(self):\n data = \"\"\"index1,index2,A,B,C\n20090101,one,a,1,2\n20090101,two,b,3,4\n20090101,three,c,4,5\n20090102,one,a,1,2\n20090102,two,b,3,4\n20090102,three,c,4,5\n20090103,one,a,1,2\n20090103,two,b,3,4\n20090103,three,c,4,5\n\"\"\"\n df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)\n self.assertIsInstance(df.index.levels[0][0],\n (datetime, np.datetime64, Timestamp))\n\n # specify columns out of order!\n df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)\n self.assertIsInstance(df2.index.levels[1][0],\n (datetime, np.datetime64, Timestamp))\n\n def test_skip_footer(self):\n # GH 6607\n # Test currently only valid with python engine because\n # skip_footer != 0. Temporarily copied to TestPythonParser.\n # Test for ValueError with other engines:\n\n with tm.assertRaisesRegexp(ValueError, 'skip_footer'):\n data = \"\"\"A,B,C\n1,2,3\n4,5,6\n7,8,9\nwant to skip this\nalso also skip this\n\"\"\"\n result = self.read_csv(StringIO(data), skip_footer=2)\n no_footer = '\\n'.join(data.split('\\n')[:-3])\n expected = self.read_csv(StringIO(no_footer))\n\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data), nrows=3)\n tm.assert_frame_equal(result, expected)\n\n # skipfooter alias\n result = read_csv(StringIO(data), skipfooter=2)\n no_footer = '\\n'.join(data.split('\\n')[:-3])\n expected = read_csv(StringIO(no_footer))\n\n tm.assert_frame_equal(result, expected)\n\n def test_no_unnamed_index(self):\n data = \"\"\" id c0 c1 c2\n0 1 0 a b\n1 2 0 c d\n2 2 2 e f\n\"\"\"\n df = self.read_table(StringIO(data), sep=' ')\n self.assertIsNone(df.index.name)\n\n def test_converters(self):\n data = \"\"\"A,B,C,D\na,1,2,01/01/2009\nb,3,4,01/02/2009\nc,4,5,01/03/2009\n\"\"\"\n from pandas.compat import parse_date\n\n result = self.read_csv(StringIO(data), converters={'D': parse_date})\n result2 = self.read_csv(StringIO(data), converters={3: parse_date})\n\n expected = self.read_csv(StringIO(data))\n expected['D'] = expected['D'].map(parse_date)\n\n tm.assertIsInstance(result['D'][0], (datetime, Timestamp))\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # produce integer\n converter = lambda x: int(x.split('/')[2])\n result = self.read_csv(StringIO(data), converters={'D': converter})\n expected = self.read_csv(StringIO(data))\n expected['D'] = expected['D'].map(converter)\n tm.assert_frame_equal(result, expected)\n\n def test_converters_no_implicit_conv(self):\n # GH2184\n data = \"\"\"000102,1.2,A\\n001245,2,B\"\"\"\n f = lambda x: x.strip()\n converter = {0: f}\n df = self.read_csv(StringIO(data), header=None, converters=converter)\n self.assertEqual(df[0].dtype, object)\n\n def test_converters_euro_decimal_format(self):\n data = \"\"\"Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,738797819\n2;121,12;14897,76;DEF;uyt;0,377320872\n3;878,158;108013,434;GHI;rez;2,735694704\"\"\"\n f = lambda x: float(x.replace(\",\", \".\"))\n converter = {'Number1': f, 'Number2': f, 'Number3': f}\n df2 = self.read_csv(StringIO(data), sep=';', converters=converter)\n self.assertEqual(df2['Number1'].dtype, float)\n self.assertEqual(df2['Number2'].dtype, float)\n self.assertEqual(df2['Number3'].dtype, float)\n\n def test_converter_return_string_bug(self):\n # GH #583\n data = \"\"\"Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,738797819\n2;121,12;14897,76;DEF;uyt;0,377320872\n3;878,158;108013,434;GHI;rez;2,735694704\"\"\"\n f = lambda x: float(x.replace(\",\", \".\"))\n converter = {'Number1': f, 'Number2': f, 'Number3': f}\n df2 = self.read_csv(StringIO(data), sep=';', converters=converter)\n self.assertEqual(df2['Number1'].dtype, float)\n\n def test_read_table_buglet_4x_multiindex(self):\n # GH 6607\n # Parsing multi-level index currently causes an error in the C parser.\n # Temporarily copied to TestPythonParser.\n # Here test that CParserError is raised:\n\n with tm.assertRaises(pandas.parser.CParserError):\n text = \"\"\" A B C D E\none two three four\na b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640\na q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744\nx q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838\"\"\"\n\n # it works!\n df = self.read_table(StringIO(text), sep='\\s+')\n self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))\n\n def test_line_comment(self):\n data = \"\"\"# empty\nA,B,C\n1,2.,4.#hello world\n#ignore this line\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n df = self.read_csv(StringIO(data), comment='#')\n tm.assert_almost_equal(df.values, expected)\n\n def test_comment_skiprows(self):\n data = \"\"\"# empty\nrandom line\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n # this should ignore the first four lines (including comments)\n df = self.read_csv(StringIO(data), comment='#', skiprows=4)\n tm.assert_almost_equal(df.values, expected)\n\n def test_comment_header(self):\n data = \"\"\"# empty\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n # header should begin at the second non-comment line\n df = self.read_csv(StringIO(data), comment='#', header=1)\n tm.assert_almost_equal(df.values, expected)\n\n def test_comment_skiprows_header(self):\n data = \"\"\"# empty\n# second empty line\n# third empty line\nX,Y,Z\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n # skiprows should skip the first 4 lines (including comments), while\n # header should start from the second non-commented line starting\n # with line 5\n df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)\n tm.assert_almost_equal(df.values, expected)\n\n def test_read_csv_parse_simple_list(self):\n text = \"\"\"foo\nbar baz\nqux foo\nfoo\nbar\"\"\"\n df = read_csv(StringIO(text), header=None)\n expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',\n 'foo', 'bar']})\n tm.assert_frame_equal(df, expected)\n\n def test_parse_dates_custom_euroformat(self):\n text = \"\"\"foo,bar,baz\n31/01/2010,1,2\n01/02/2010,1,NA\n02/02/2010,1,2\n\"\"\"\n parser = lambda d: parse_date(d, dayfirst=True)\n df = self.read_csv(StringIO(text),\n names=['time', 'Q', 'NTU'], header=0,\n index_col=0, parse_dates=True,\n date_parser=parser, na_values=['NA'])\n\n exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),\n datetime(2010, 2, 2)], name='time')\n expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},\n index=exp_index, columns=['Q', 'NTU'])\n tm.assert_frame_equal(df, expected)\n\n parser = lambda d: parse_date(d, day_first=True)\n self.assertRaises(Exception, self.read_csv,\n StringIO(text), skiprows=[0],\n names=['time', 'Q', 'NTU'], index_col=0,\n parse_dates=True, date_parser=parser,\n na_values=['NA'])\n\n def test_na_value_dict(self):\n data = \"\"\"A,B,C\nfoo,bar,NA\nbar,foo,foo\nfoo,bar,NA\nbar,foo,foo\"\"\"\n\n df = self.read_csv(StringIO(data),\n na_values={'A': ['foo'], 'B': ['bar']})\n expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],\n 'B': [np.nan, 'foo', np.nan, 'foo'],\n 'C': [np.nan, 'foo', np.nan, 'foo']})\n tm.assert_frame_equal(df, expected)\n\n data = \"\"\"\\\na,b,c,d\n0,NA,1,5\n\"\"\"\n xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])\n xp.index.name = 'a'\n df = self.read_csv(StringIO(data), na_values={}, index_col=0)\n tm.assert_frame_equal(df, xp)\n\n xp = DataFrame({'b': [np.nan], 'd': [5]},\n MultiIndex.from_tuples([(0, 1)]))\n xp.index.names = ['a', 'c']\n df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])\n tm.assert_frame_equal(df, xp)\n\n xp = DataFrame({'b': [np.nan], 'd': [5]},\n MultiIndex.from_tuples([(0, 1)]))\n xp.index.names = ['a', 'c']\n df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])\n tm.assert_frame_equal(df, xp)\n\n @tm.network\n def test_url(self):\n # HTTP(S)\n url = ('https://raw.github.com/pydata/pandas/master/'\n 'pandas/io/tests/data/salary.table')\n url_table = self.read_table(url)\n dirpath = tm.get_data_path()\n localtable = os.path.join(dirpath, 'salary.table')\n local_table = self.read_table(localtable)\n tm.assert_frame_equal(url_table, local_table)\n # TODO: ftp testing\n\n @slow\n def test_file(self):\n\n # FILE\n if sys.version_info[:2] < (2, 6):\n raise nose.SkipTest(\"file:// not supported with Python < 2.6\")\n dirpath = tm.get_data_path()\n localtable = os.path.join(dirpath, 'salary.table')\n local_table = self.read_table(localtable)\n\n try:\n url_table = self.read_table('file://localhost/' + localtable)\n except URLError:\n # fails on some systems\n raise nose.SkipTest(\"failing on %s\" %\n ' '.join(platform.uname()).strip())\n\n tm.assert_frame_equal(url_table, local_table)\n\n def test_parse_tz_aware(self):\n import pytz\n # #1693\n data = StringIO(\"Date,x\\n2012-06-13T01:39:00Z,0.5\")\n\n # it works\n result = read_csv(data, index_col=0, parse_dates=True)\n stamp = result.index[0]\n self.assertEqual(stamp.minute, 39)\n try:\n self.assertIs(result.index.tz, pytz.utc)\n except AssertionError: # hello Yaroslav\n arr = result.index.to_pydatetime()\n result = tools.to_datetime(arr, utc=True)[0]\n self.assertEqual(stamp.minute, result.minute)\n self.assertEqual(stamp.hour, result.hour)\n self.assertEqual(stamp.day, result.day)\n\n def test_multiple_date_cols_index(self):\n data = \"\"\"\\\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})\n df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},\n index_col='nominal')\n tm.assert_frame_equal(xp.set_index('nominal'), df)\n df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},\n index_col=0)\n tm.assert_frame_equal(df2, df)\n\n df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)\n tm.assert_frame_equal(df3, df, check_names=False)\n\n def test_multiple_date_cols_chunked(self):\n df = self.read_csv(StringIO(self.ts_data), parse_dates={\n 'nominal': [1, 2]}, index_col='nominal')\n reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':\n [1, 2]}, index_col='nominal', chunksize=2)\n\n chunks = list(reader)\n\n self.assertNotIn('nominalTime', df)\n\n tm.assert_frame_equal(chunks[0], df[:2])\n tm.assert_frame_equal(chunks[1], df[2:4])\n tm.assert_frame_equal(chunks[2], df[4:])\n\n def test_multiple_date_col_named_components(self):\n xp = self.read_csv(StringIO(self.ts_data),\n parse_dates={'nominal': [1, 2]},\n index_col='nominal')\n colspec = {'nominal': ['date', 'nominalTime']}\n df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,\n index_col='nominal')\n tm.assert_frame_equal(df, xp)\n\n def test_multiple_date_col_multiple_index(self):\n df = self.read_csv(StringIO(self.ts_data),\n parse_dates={'nominal': [1, 2]},\n index_col=['nominal', 'ID'])\n\n xp = self.read_csv(StringIO(self.ts_data),\n parse_dates={'nominal': [1, 2]})\n\n tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)\n\n def test_comment(self):\n data = \"\"\"A,B,C\n1,2.,4.#hello world\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n df = self.read_csv(StringIO(data), comment='#')\n tm.assert_almost_equal(df.values, expected)\n\n df = self.read_table(StringIO(data), sep=',', comment='#',\n na_values=['NaN'])\n tm.assert_almost_equal(df.values, expected)\n\n def test_bool_na_values(self):\n data = \"\"\"A,B,C\nTrue,False,True\nNA,True,False\nFalse,NA,True\"\"\"\n\n result = self.read_csv(StringIO(data))\n expected = DataFrame({'A': np.array([True, nan, False], dtype=object),\n 'B': np.array([False, True, nan], dtype=object),\n 'C': [True, False, True]})\n\n tm.assert_frame_equal(result, expected)\n\n def test_nonexistent_path(self):\n # don't segfault pls #2428\n path = '%s.csv' % tm.rands(10)\n self.assertRaises(Exception, self.read_csv, path)\n\n def test_missing_trailing_delimiters(self):\n data = \"\"\"A,B,C,D\n1,2,3,4\n1,3,3,\n1,4,5\"\"\"\n result = self.read_csv(StringIO(data))\n self.assertTrue(result['D'].isnull()[1:].all())\n\n def test_skipinitialspace(self):\n s = ('\"09-Apr-2012\", \"01:10:18.300\", 2456026.548822908, 12849, '\n '1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '\n '314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '\n '70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '\n '0.212036, 14.7674, 41.605, -9999.0, -9999.0, '\n '-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')\n\n sfile = StringIO(s)\n # it's 33 columns\n result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],\n header=None, skipinitialspace=True)\n self.assertTrue(pd.isnull(result.ix[0, 29]))\n\n def test_utf16_bom_skiprows(self):\n # #2298\n data = u(\"\"\"skip this\nskip this too\nA\\tB\\tC\n1\\t2\\t3\n4\\t5\\t6\"\"\")\n\n data2 = u(\"\"\"skip this\nskip this too\nA,B,C\n1,2,3\n4,5,6\"\"\")\n\n path = '__%s__.csv' % tm.rands(10)\n\n with tm.ensure_clean(path) as path:\n for sep, dat in [('\\t', data), (',', data2)]:\n for enc in ['utf-16', 'utf-16le', 'utf-16be']:\n bytes = dat.encode(enc)\n with open(path, 'wb') as f:\n f.write(bytes)\n\n s = BytesIO(dat.encode('utf-8'))\n if compat.PY3:\n # somewhat False since the code never sees bytes\n from io import TextIOWrapper\n s = TextIOWrapper(s, encoding='utf-8')\n\n result = self.read_csv(path, encoding=enc, skiprows=2,\n sep=sep)\n expected = self.read_csv(s, encoding='utf-8', skiprows=2,\n sep=sep)\n\n tm.assert_frame_equal(result, expected)\n\n def test_utf16_example(self):\n path = tm.get_data_path('utf16_ex.txt')\n\n # it works! and is the right length\n result = self.read_table(path, encoding='utf-16')\n self.assertEqual(len(result), 50)\n\n if not compat.PY3:\n buf = BytesIO(open(path, 'rb').read())\n result = self.read_table(buf, encoding='utf-16')\n self.assertEqual(len(result), 50)\n\n def test_converters_corner_with_nas(self):\n # skip aberration observed on Win64 Python 3.2.2\n if hash(np.int64(-1)) != -2:\n raise nose.SkipTest(\"skipping because of windows hash on Python\"\n \" 3.2.2\")\n\n csv = \"\"\"id,score,days\n1,2,12\n2,2-5,\n3,,14+\n4,6-12,2\"\"\"\n\n def convert_days(x):\n x = x.strip()\n if not x:\n return np.nan\n\n is_plus = x.endswith('+')\n if is_plus:\n x = int(x[:-1]) + 1\n else:\n x = int(x)\n return x\n\n def convert_days_sentinel(x):\n x = x.strip()\n if not x:\n return np.nan\n\n is_plus = x.endswith('+')\n if is_plus:\n x = int(x[:-1]) + 1\n else:\n x = int(x)\n return x\n\n def convert_score(x):\n x = x.strip()\n if not x:\n return np.nan\n if x.find('-') > 0:\n valmin, valmax = lmap(int, x.split('-'))\n val = 0.5 * (valmin + valmax)\n else:\n val = float(x)\n\n return val\n\n fh = StringIO(csv)\n result = self.read_csv(fh, converters={'score': convert_score,\n 'days': convert_days},\n na_values=['', None])\n self.assertTrue(pd.isnull(result['days'][1]))\n\n fh = StringIO(csv)\n result2 = self.read_csv(fh, converters={'score': convert_score,\n 'days': convert_days_sentinel},\n na_values=['', None])\n tm.assert_frame_equal(result, result2)\n\n def test_unicode_encoding(self):\n pth = tm.get_data_path('unicode_series.csv')\n\n result = self.read_csv(pth, header=None, encoding='latin-1')\n result = result.set_index(0)\n\n got = result[1][1632]\n expected = u('\\xc1 k\\xf6ldum klaka (Cold Fever) (1994)')\n\n self.assertEqual(got, expected)\n\n def test_trailing_delimiters(self):\n # #2442. grumble grumble\n data = \"\"\"A,B,C\n1,2,3,\n4,5,6,\n7,8,9,\"\"\"\n result = self.read_csv(StringIO(data), index_col=False)\n\n expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],\n 'C': [3, 6, 9]})\n\n tm.assert_frame_equal(result, expected)\n\n def test_escapechar(self):\n # http://stackoverflow.com/questions/13824840/feature-request-for-\n # pandas-read-csv\n data = '''SEARCH_TERM,ACTUAL_URL\n\"bra tv bord\",\"http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord\"\n\"tv p\\xc3\\xa5 hjul\",\"http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord\"\n\"SLAGBORD, \\\\\"Bergslagen\\\\\", IKEA:s 1700-tals serie\",\"http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord\"'''\n\n result = self.read_csv(StringIO(data), escapechar='\\\\',\n quotechar='\"', encoding='utf-8')\n self.assertEqual(result['SEARCH_TERM'][2],\n 'SLAGBORD, \"Bergslagen\", IKEA:s 1700-tals serie')\n self.assertTrue(np.array_equal(result.columns,\n ['SEARCH_TERM', 'ACTUAL_URL']))\n\n def test_header_names_backward_compat(self):\n # #2539\n data = '1,2,3\\n4,5,6'\n\n result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])\n expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],\n header=None)\n tm.assert_frame_equal(result, expected)\n\n data2 = 'foo,bar,baz\\n' + data\n result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],\n header=0)\n tm.assert_frame_equal(result, expected)\n\n def test_int64_min_issues(self):\n # #2599\n data = 'A,B\\n0,0\\n0,'\n\n result = self.read_csv(StringIO(data))\n expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})\n\n tm.assert_frame_equal(result, expected)\n\n def test_parse_integers_above_fp_precision(self):\n data = \"\"\"Numbers\n17007000002000191\n17007000002000191\n17007000002000191\n17007000002000191\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000194\"\"\"\n\n result = self.read_csv(StringIO(data))\n expected = DataFrame({'Numbers': [17007000002000191,\n 17007000002000191,\n 17007000002000191,\n 17007000002000191,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000194]})\n\n self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))\n\n def test_usecols_index_col_conflict(self):\n # Issue 4201 Test that index_col as integer reflects usecols\n data = \"\"\"SecId,Time,Price,P2,P3\n10000,2013-5-11,100,10,1\n500,2013-5-12,101,11,1\n\"\"\"\n expected = DataFrame({'Price': [100, 101]}, index=[datetime(2013, 5, 11), datetime(2013, 5, 12)])\n expected.index.name = 'Time'\n\n df = self.read_csv(StringIO(data), usecols=['Time', 'Price'], parse_dates=True, index_col=0)\n tm.assert_frame_equal(expected, df)\n\n df = self.read_csv(StringIO(data), usecols=['Time', 'Price'], parse_dates=True, index_col='Time')\n tm.assert_frame_equal(expected, df)\n\n df = self.read_csv(StringIO(data), usecols=[1, 2], parse_dates=True, index_col='Time')\n tm.assert_frame_equal(expected, df)\n\n df = self.read_csv(StringIO(data), usecols=[1, 2], parse_dates=True, index_col=0)\n tm.assert_frame_equal(expected, df)\n\n expected = DataFrame({'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})\n expected = expected.set_index(['Price', 'P2'])\n df = self.read_csv(StringIO(data), usecols=['Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])\n tm.assert_frame_equal(expected, df)\n\n def test_chunks_have_consistent_numerical_type(self):\n integers = [str(i) for i in range(499999)]\n data = \"a\\n\" + \"\\n\".join(integers + [\"1.0\", \"2.0\"] + integers)\n\n with tm.assert_produces_warning(False):\n df = self.read_csv(StringIO(data))\n self.assertTrue(type(df.a[0]) is np.float64) # Assert that types were coerced.\n self.assertEqual(df.a.dtype, np.float)\n\n def test_warn_if_chunks_have_mismatched_type(self):\n # See test in TestCParserLowMemory.\n integers = [str(i) for i in range(499999)]\n data = \"a\\n\" + \"\\n\".join(integers + ['a', 'b'] + integers)\n\n with tm.assert_produces_warning(False):\n df = self.read_csv(StringIO(data))\n self.assertEqual(df.a.dtype, np.object)\n\n def test_usecols(self):\n data = \"\"\"\\\na,b,c\n1,2,3\n4,5,6\n7,8,9\n10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data), usecols=(1, 2))\n result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))\n exp = self.read_csv(StringIO(data))\n\n self.assertEqual(len(result.columns), 2)\n self.assertTrue((result['b'] == exp['b']).all())\n self.assertTrue((result['c'] == exp['c']).all())\n\n tm.assert_frame_equal(result, result2)\n\n result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,\n names=['foo', 'bar'])\n expected = self.read_csv(StringIO(data), usecols=[1, 2])\n expected.columns = ['foo', 'bar']\n tm.assert_frame_equal(result, expected)\n\n data = \"\"\"\\\n1,2,3\n4,5,6\n7,8,9\n10,11,12\"\"\"\n result = self.read_csv(StringIO(data), names=['b', 'c'],\n header=None, usecols=[1, 2])\n\n expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],\n header=None)\n expected = expected[['b', 'c']]\n tm.assert_frame_equal(result, expected)\n\n result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],\n header=None, usecols=['b', 'c'])\n tm.assert_frame_equal(result2, result)\n\n\n # 5766\n result = self.read_csv(StringIO(data), names=['a', 'b'],\n header=None, usecols=[0, 1])\n\n expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],\n header=None)\n expected = expected[['a', 'b']]\n tm.assert_frame_equal(result, expected)\n\n # length conflict, passed names and usecols disagree\n self.assertRaises(ValueError, self.read_csv, StringIO(data),\n names=['a', 'b'], usecols=[1], header=None)\n\n def test_integer_overflow_bug(self):\n # #2601\n data = \"65248E10 11\\n55555E55 22\\n\"\n\n result = self.read_csv(StringIO(data), header=None, sep=' ')\n self.assertTrue(result[0].dtype == np.float64)\n\n result = self.read_csv(StringIO(data), header=None, sep='\\s+')\n self.assertTrue(result[0].dtype == np.float64)\n\n def test_catch_too_many_names(self):\n # Issue 5156\n data = \"\"\"\\\n1,2,3\n4,,6\n7,8,9\n10,11,12\\n\"\"\"\n tm.assertRaises(Exception, read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd'])\n\n def test_ignore_leading_whitespace(self):\n # GH 6607, GH 3374\n data = ' a b c\\n 1 2 3\\n 4 5 6\\n 7 8 9'\n result = self.read_table(StringIO(data), sep='\\s+')\n expected = DataFrame({'a':[1,4,7], 'b':[2,5,8], 'c': [3,6,9]})\n tm.assert_frame_equal(result, expected)\n\n def test_nrows_and_chunksize_raises_notimplemented(self):\n data = 'a b c'\n self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),\n nrows=10, chunksize=5)\n\n def test_single_char_leading_whitespace(self):\n # GH 9710\n data = \"\"\"\\\nMyColumn\n a\n b\n a\n b\\n\"\"\"\n\n expected = DataFrame({'MyColumn' : list('abab')})\n\n result = self.read_csv(StringIO(data), skipinitialspace=True)\n tm.assert_frame_equal(result, expected)\n\n def test_chunk_begins_with_newline_whitespace(self):\n # GH 10022\n data = '\\n hello\\nworld\\n'\n result = self.read_csv(StringIO(data), header=None)\n self.assertEqual(len(result), 2)\n\n # GH 9735\n chunk1 = 'a' * (1024 * 256 - 2) + '\\na'\n chunk2 = '\\n a'\n result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)\n expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_index(self):\n # GH 10184\n data = 'x,y'\n result = self.read_csv(StringIO(data), index_col=0)\n expected = DataFrame([], columns=['y'], index=Index([], name='x'))\n tm.assert_frame_equal(result, expected)\n\n def test_emtpy_with_multiindex(self):\n # GH 10467\n data = 'x,y,z'\n result = self.read_csv(StringIO(data), index_col=['x', 'y'])\n expected = DataFrame([], columns=['z'],\n index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_reversed_multiindex(self):\n data = 'x,y,z'\n result = self.read_csv(StringIO(data), index_col=[1, 0])\n expected = DataFrame([], columns=['z'],\n index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))\n tm.assert_frame_equal(result, expected)\n\n def test_empty_index_col_scenarios(self):\n data = 'x,y,z'\n\n # None, no index\n index_col, expected = None, DataFrame([], columns=list('xyz')),\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # False, no index\n index_col, expected = False, DataFrame([], columns=list('xyz')),\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # int, first column\n index_col, expected = 0, DataFrame([], columns=['y', 'z'], index=Index([], name='x'))\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # int, not first column\n index_col, expected = 1, DataFrame([], columns=['x', 'z'], index=Index([], name='y'))\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # str, first column\n index_col, expected = 'x', DataFrame([], columns=['y', 'z'], index=Index([], name='x'))\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # str, not the first column\n index_col, expected = 'y', DataFrame([], columns=['x', 'z'], index=Index([], name='y'))\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # list of int\n index_col, expected = [0, 1], DataFrame([], columns=['z'],\n index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # list of str\n index_col, expected = (\n ['x', 'y'],\n DataFrame([], columns=['z'], index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))\n )\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # list of int, reversed sequence\n index_col, expected = (\n [1, 0],\n DataFrame([], columns=['z'], index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))\n )\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n # list of str, reversed sequence\n index_col, expected = (\n ['y', 'x'],\n DataFrame([], columns=['z'], index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))\n )\n tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)\n\n def test_empty_with_index_col_false(self):\n # GH 10413\n data = 'x,y'\n result = self.read_csv(StringIO(data), index_col=False)\n expected = DataFrame([], columns=['x', 'y'])\n tm.assert_frame_equal(result, expected)\n\n def test_float_parser(self):\n # GH 9565\n data = '45e-1,4.5,45.,inf,-inf'\n result = self.read_csv(StringIO(data), header=None)\n expected = pd.DataFrame([[float(s) for s in data.split(',')]])\n tm.assert_frame_equal(result, expected)\n\n def test_int64_overflow(self):\n data = \"\"\"ID\n00013007854817840016671868\n00013007854817840016749251\n00013007854817840016754630\n00013007854817840016781876\n00013007854817840017028824\n00013007854817840017963235\n00013007854817840018860166\"\"\"\n\n result = self.read_csv(StringIO(data))\n self.assertTrue(result['ID'].dtype == object)\n\n self.assertRaises((OverflowError, pandas.parser.OverflowError),\n self.read_csv, StringIO(data),\n converters={'ID' : np.int64})\n\n # Just inside int64 range: parse as integer\n i_max = np.iinfo(np.int64).max\n i_min = np.iinfo(np.int64).min\n for x in [i_max, i_min]:\n result = pd.read_csv(StringIO(str(x)), header=None)\n expected = pd.DataFrame([x])\n tm.assert_frame_equal(result, expected)\n\n # Just outside int64 range: parse as string\n too_big = i_max + 1\n too_small = i_min - 1\n for x in [too_big, too_small]:\n result = pd.read_csv(StringIO(str(x)), header=None)\n expected = pd.DataFrame([str(x)])\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_nrows_chunksize(self):\n # GH 9535\n expected = pd.DataFrame([], columns=['foo', 'bar'])\n\n result = self.read_csv(StringIO('foo,bar\\n'), nrows=10)\n tm.assert_frame_equal(result, expected)\n\n result = next(iter(pd.read_csv(StringIO('foo,bar\\n'), chunksize=10)))\n tm.assert_frame_equal(result, expected)\n\n result = pd.read_csv(StringIO('foo,bar\\n'), nrows=10, as_recarray=True)\n result = pd.DataFrame(result[2], columns=result[1], index=result[0])\n tm.assert_frame_equal(pd.DataFrame.from_records(result), expected)\n\n result = next(iter(pd.read_csv(StringIO('foo,bar\\n'), chunksize=10, as_recarray=True)))\n result = pd.DataFrame(result[2], columns=result[1], index=result[0])\n tm.assert_frame_equal(pd.DataFrame.from_records(result), expected)\n\n def test_eof_states(self):\n # GH 10728 and 10548\n\n ## With skip_blank_lines = True\n expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])\n\n # GH 10728\n # WHITESPACE_LINE\n data = 'a,b,c\\n4,5,6\\n '\n result = self.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n # GH 10548\n # EAT_LINE_COMMENT\n data = 'a,b,c\\n4,5,6\\n#comment'\n result = self.read_csv(StringIO(data), comment='#')\n tm.assert_frame_equal(result, expected)\n\n # EAT_CRNL_NOP\n data = 'a,b,c\\n4,5,6\\n\\r'\n result = self.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n # EAT_COMMENT\n data = 'a,b,c\\n4,5,6#comment'\n result = self.read_csv(StringIO(data), comment='#')\n tm.assert_frame_equal(result, expected)\n\n # SKIP_LINE\n data = 'a,b,c\\n4,5,6\\nskipme'\n result = self.read_csv(StringIO(data), skiprows=[2])\n tm.assert_frame_equal(result, expected)\n\n ## With skip_blank_lines = False\n\n # EAT_LINE_COMMENT\n data = 'a,b,c\\n4,5,6\\n#comment'\n result = self.read_csv(StringIO(data), comment='#', skip_blank_lines=False)\n expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])\n tm.assert_frame_equal(result, expected)\n\n # IN_FIELD\n data = 'a,b,c\\n4,5,6\\n '\n result = self.read_csv(StringIO(data), skip_blank_lines=False)\n expected = pd.DataFrame([['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])\n tm.assert_frame_equal(result, expected)\n\n # EAT_CRNL\n data = 'a,b,c\\n4,5,6\\n\\r'\n result = self.read_csv(StringIO(data), skip_blank_lines=False)\n expected = pd.DataFrame([[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])\n tm.assert_frame_equal(result, expected)\n\n ## Should produce exceptions\n\n # ESCAPED_CHAR\n data = \"a,b,c\\n4,5,6\\n\\\\\"\n self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\\\')\n\n # ESCAPE_IN_QUOTED_FIELD\n data = 'a,b,c\\n4,5,6\\n\"\\\\'\n self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\\\')\n\n # IN_QUOTED_FIELD\n # Python 2.6 won't throw an exception for this case (see http://bugs.python.org/issue16013)\n tm._skip_if_python26()\n data = 'a,b,c\\n4,5,6\\n\"'\n self.assertRaises(Exception, self.read_csv, StringIO(data), escapechar='\\\\')\n\n\n\nclass TestPythonParser(ParserTests, tm.TestCase):\n def test_negative_skipfooter_raises(self):\n text = \"\"\"#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n1/1/2000,1.,2.,3.\n1/2/2000,4,5,6\n1/3/2000,7,8,9\n\"\"\"\n\n with tm.assertRaisesRegexp(ValueError,\n 'skip footer cannot be negative'):\n df = self.read_csv(StringIO(text), skipfooter=-1)\n\n def read_csv(self, *args, **kwds):\n kwds = kwds.copy()\n kwds['engine'] = 'python'\n return read_csv(*args, **kwds)\n\n def read_table(self, *args, **kwds):\n kwds = kwds.copy()\n kwds['engine'] = 'python'\n return read_table(*args, **kwds)\n\n def test_sniff_delimiter(self):\n text = \"\"\"index|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n data = self.read_csv(StringIO(text), index_col=0, sep=None)\n self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))\n\n data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')\n tm.assert_frame_equal(data, data2)\n\n text = \"\"\"ignore this\nignore this too\nindex|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n data3 = self.read_csv(StringIO(text), index_col=0,\n sep=None, skiprows=2)\n tm.assert_frame_equal(data, data3)\n\n text = u(\"\"\"ignore this\nignore this too\nindex|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\").encode('utf-8')\n\n s = BytesIO(text)\n if compat.PY3:\n # somewhat False since the code never sees bytes\n from io import TextIOWrapper\n s = TextIOWrapper(s, encoding='utf-8')\n\n data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,\n encoding='utf-8')\n tm.assert_frame_equal(data, data4)\n\n def test_regex_separator(self):\n data = \"\"\" A B C D\na 1 2 3 4\nb 1 2 3 4\nc 1 2 3 4\n\"\"\"\n df = self.read_table(StringIO(data), sep='\\s+')\n expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),\n index_col=0)\n self.assertIsNone(expected.index.name)\n tm.assert_frame_equal(df, expected)\n\n def test_1000_fwf(self):\n data = \"\"\"\n 1 2,334.0 5\n10 13 10.\n\"\"\"\n expected = [[1, 2334., 5],\n [10, 13, 10]]\n df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],\n thousands=',')\n tm.assert_almost_equal(df.values, expected)\n\n def test_1000_sep_with_decimal(self):\n data = \"\"\"A|B|C\n1|2,334.01|5\n10|13|10.\n\"\"\"\n\n expected = DataFrame({\n 'A': [1, 10],\n 'B': [2334.01, 13],\n 'C': [5, 10.]\n })\n\n df = self.read_csv(StringIO(data), sep='|', thousands=',')\n tm.assert_frame_equal(df, expected)\n\n df = self.read_table(StringIO(data), sep='|', thousands=',')\n tm.assert_frame_equal(df, expected)\n\n def test_comment_fwf(self):\n data = \"\"\"\n 1 2. 4 #hello world\n 5 NaN 10.0\n\"\"\"\n expected = [[1, 2., 4],\n [5, np.nan, 10.]]\n df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],\n comment='#')\n tm.assert_almost_equal(df.values, expected)\n\n def test_fwf(self):\n data_expected = \"\"\"\\\n2011,58,360.242940,149.910199,11950.7\n2011,59,444.953632,166.985655,11788.4\n2011,60,364.136849,183.628767,11806.2\n2011,61,413.836124,184.375703,11916.8\n2011,62,502.953953,173.237159,12468.3\n\"\"\"\n expected = self.read_csv(StringIO(data_expected), header=None)\n\n data1 = \"\"\"\\\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n\"\"\"\n colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]\n df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)\n tm.assert_frame_equal(df, expected)\n\n data2 = \"\"\"\\\n2011 58 360.242940 149.910199 11950.7\n2011 59 444.953632 166.985655 11788.4\n2011 60 364.136849 183.628767 11806.2\n2011 61 413.836124 184.375703 11916.8\n2011 62 502.953953 173.237159 12468.3\n\"\"\"\n df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)\n tm.assert_frame_equal(df, expected)\n\n # From Thomas Kluyver: apparently some non-space filler characters can\n # be seen, this is supported by specifying the 'delimiter' character:\n # http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html\n data3 = \"\"\"\\\n201158~~~~360.242940~~~149.910199~~~11950.7\n201159~~~~444.953632~~~166.985655~~~11788.4\n201160~~~~364.136849~~~183.628767~~~11806.2\n201161~~~~413.836124~~~184.375703~~~11916.8\n201162~~~~502.953953~~~173.237159~~~12468.3\n\"\"\"\n df = read_fwf(\n StringIO(data3), colspecs=colspecs, delimiter='~', header=None)\n tm.assert_frame_equal(df, expected)\n\n with tm.assertRaisesRegexp(ValueError, \"must specify only one of\"):\n read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])\n\n with tm.assertRaisesRegexp(ValueError, \"Must specify either\"):\n read_fwf(StringIO(data3), colspecs=None, widths=None)\n\n def test_fwf_colspecs_is_list_or_tuple(self):\n with tm.assertRaisesRegexp(TypeError,\n 'column specifications must be a list or '\n 'tuple.+'):\n pd.io.parsers.FixedWidthReader(StringIO(self.data1),\n {'a': 1}, ',', '#')\n\n def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):\n with tm.assertRaisesRegexp(TypeError,\n 'Each column specification must be.+'):\n read_fwf(StringIO(self.data1), [('a', 1)])\n\n def test_fwf_colspecs_None(self):\n # GH 7079\n data = \"\"\"\\\n123456\n456789\n\"\"\"\n colspecs = [(0, 3), (3, None)]\n result = read_fwf(StringIO(data), colspecs=colspecs, header=None)\n expected = DataFrame([[123, 456], [456, 789]])\n tm.assert_frame_equal(result, expected)\n\n colspecs = [(None, 3), (3, 6)]\n result = read_fwf(StringIO(data), colspecs=colspecs, header=None)\n expected = DataFrame([[123, 456], [456, 789]])\n tm.assert_frame_equal(result, expected)\n\n colspecs = [(0, None), (3, None)]\n result = read_fwf(StringIO(data), colspecs=colspecs, header=None)\n expected = DataFrame([[123456, 456], [456789, 789]])\n tm.assert_frame_equal(result, expected)\n\n colspecs = [(None, None), (3, 6)]\n result = read_fwf(StringIO(data), colspecs=colspecs, header=None)\n expected = DataFrame([[123456, 456], [456789, 789]])\n tm.assert_frame_equal(result, expected)\n\n\n def test_fwf_regression(self):\n # GH 3594\n #### turns out 'T060' is parsable as a datetime slice!\n\n tzlist = [1,10,20,30,60,80,100]\n ntz = len(tzlist)\n tcolspecs = [16]+[8]*ntz\n tcolnames = ['SST'] + [\"T%03d\" % z for z in tzlist[1:]]\n data = \"\"\" 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192\n 2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869\n 2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657\n 2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379\n 2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039\n\"\"\"\n\n df = read_fwf(StringIO(data),\n index_col=0,\n header=None,\n names=tcolnames,\n widths=tcolspecs,\n parse_dates=True,\n date_parser=lambda s: datetime.strptime(s,'%Y%j%H%M%S'))\n\n for c in df.columns:\n res = df.loc[:,c]\n self.assertTrue(len(res))\n\n def test_fwf_for_uint8(self):\n data = \"\"\"1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127\n1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71\"\"\"\n df = read_fwf(StringIO(data),\n colspecs=[(0,17),(25,26),(33,37),(49,51),(58,62),(63,1000)],\n names=['time','pri','pgn','dst','src','data'],\n converters={\n 'pgn':lambda x: int(x,16),\n 'src':lambda x: int(x,16),\n 'dst':lambda x: int(x,16),\n 'data':lambda x: len(x.split(' '))})\n\n expected = DataFrame([[1421302965.213420,3,61184,23,40,8],\n [1421302964.226776,6,61442,None, 71,8]],\n columns = [\"time\", \"pri\", \"pgn\", \"dst\", \"src\",\"data\"])\n expected[\"dst\"] = expected[\"dst\"].astype(object)\n\n tm.assert_frame_equal(df, expected)\n\n def test_fwf_compression(self):\n try:\n import gzip\n import bz2\n except ImportError:\n raise nose.SkipTest(\"Need gzip and bz2 to run this test\")\n\n data = \"\"\"1111111111\n 2222222222\n 3333333333\"\"\".strip()\n widths = [5, 5]\n names = ['one', 'two']\n expected = read_fwf(StringIO(data), widths=widths, names=names)\n if compat.PY3:\n data = bytes(data, encoding='utf-8')\n comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]\n for comp_name, compresser in comps:\n with tm.ensure_clean() as path:\n tmp = compresser(path, mode='wb')\n tmp.write(data)\n tmp.close()\n result = read_fwf(path, widths=widths, names=names,\n compression=comp_name)\n tm.assert_frame_equal(result, expected)\n\n def test_BytesIO_input(self):\n if not compat.PY3:\n raise nose.SkipTest(\"Bytes-related test - only needs to work on Python 3\")\n result = pd.read_fwf(BytesIO(\"שלום\\nשלום\".encode('utf8')), widths=[2,2], encoding='utf8')\n expected = pd.DataFrame([[\"של\", \"ום\"]], columns=[\"של\", \"ום\"])\n tm.assert_frame_equal(result, expected)\n data = BytesIO(\"שלום::1234\\n562::123\".encode('cp1255'))\n result = pd.read_table(data, sep=\"::\", engine='python',\n encoding='cp1255')\n expected = pd.DataFrame([[562, 123]], columns=[\"שלום\",\"1234\"])\n tm.assert_frame_equal(result, expected)\n\n def test_verbose_import(self):\n text = \"\"\"a,b,c,d\none,1,2,3\none,1,2,3\n,1,2,3\none,1,2,3\n,1,2,3\n,1,2,3\none,1,2,3\ntwo,1,2,3\"\"\"\n\n buf = StringIO()\n sys.stdout = buf\n\n try:\n # it works!\n df = self.read_csv(StringIO(text), verbose=True)\n self.assertEqual(buf.getvalue(), 'Filled 3 NA values in column a\\n')\n finally:\n sys.stdout = sys.__stdout__\n\n buf = StringIO()\n sys.stdout = buf\n\n text = \"\"\"a,b,c,d\none,1,2,3\ntwo,1,2,3\nthree,1,2,3\nfour,1,2,3\nfive,1,2,3\n,1,2,3\nseven,1,2,3\neight,1,2,3\"\"\"\n\n try:\n # it works!\n df = self.read_csv(StringIO(text), verbose=True, index_col=0)\n self.assertEqual(buf.getvalue(), 'Filled 1 NA values in column a\\n')\n finally:\n sys.stdout = sys.__stdout__\n\n def test_float_precision_specified(self):\n # Should raise an error if float_precision (C parser option) is specified\n with tm.assertRaisesRegexp(ValueError, \"The 'float_precision' option \"\n \"is not supported with the 'python' engine\"):\n self.read_csv(StringIO('a,b,c\\n1,2,3'), float_precision='high')\n\n def test_iteration_open_handle(self):\n if PY3:\n raise nose.SkipTest(\"won't work in Python 3 {0}\".format(sys.version_info))\n\n with tm.ensure_clean() as path:\n with open(path, 'wb') as f:\n f.write('AAA\\nBBB\\nCCC\\nDDD\\nEEE\\nFFF\\nGGG')\n\n with open(path, 'rb') as f:\n for line in f:\n if 'CCC' in line:\n break\n\n try:\n read_table(f, squeeze=True, header=None, engine='c')\n except Exception:\n pass\n else:\n raise ValueError('this should not happen')\n\n result = read_table(f, squeeze=True, header=None,\n engine='python')\n\n expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)\n tm.assert_series_equal(result, expected)\n\n def test_iterator(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the issue with the C parser is fixed\n\n reader = self.read_csv(StringIO(self.data1), index_col=0,\n iterator=True)\n df = self.read_csv(StringIO(self.data1), index_col=0)\n\n chunk = reader.read(3)\n tm.assert_frame_equal(chunk, df[:3])\n\n last_chunk = reader.read(5)\n tm.assert_frame_equal(last_chunk, df[3:])\n\n # pass list\n lines = list(csv.reader(StringIO(self.data1)))\n parser = TextParser(lines, index_col=0, chunksize=2)\n\n df = self.read_csv(StringIO(self.data1), index_col=0)\n\n chunks = list(parser)\n tm.assert_frame_equal(chunks[0], df[:2])\n tm.assert_frame_equal(chunks[1], df[2:4])\n tm.assert_frame_equal(chunks[2], df[4:])\n\n # pass skiprows\n parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])\n chunks = list(parser)\n tm.assert_frame_equal(chunks[0], df[1:3])\n\n # test bad parameter (skip_footer)\n reader = self.read_csv(StringIO(self.data1), index_col=0,\n iterator=True, skip_footer=True)\n self.assertRaises(ValueError, reader.read, 3)\n\n treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,\n iterator=True)\n tm.assertIsInstance(treader, TextFileReader)\n\n # stopping iteration when on chunksize is specified, GH 3967\n data = \"\"\"A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n\"\"\"\n reader = self.read_csv(StringIO(data), iterator=True)\n result = list(reader)\n expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz'])\n tm.assert_frame_equal(result[0], expected)\n\n # chunksize = 1\n reader = self.read_csv(StringIO(data), chunksize=1)\n result = list(reader)\n expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz'])\n self.assertEqual(len(result), 3)\n tm.assert_frame_equal(pd.concat(result), expected)\n\n def test_single_line(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the issue with the C parser is fixed\n\n # sniff separator\n buf = StringIO()\n sys.stdout = buf\n\n # printing warning message when engine == 'c' for now\n\n try:\n # it works!\n df = self.read_csv(StringIO('1,2'), names=['a', 'b'],\n header=None, sep=None)\n tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)\n finally:\n sys.stdout = sys.__stdout__\n\n def test_malformed(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the issue with the C parser is fixed\n\n # all\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n\n try:\n df = self.read_table(\n StringIO(data), sep=',', header=1, comment='#')\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))\n\n # skip_footer\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\nfooter\n\"\"\"\n\n try:\n df = self.read_table(\n StringIO(data), sep=',', header=1, comment='#',\n skip_footer=1)\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))\n\n # first chunk\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n try:\n it = self.read_table(StringIO(data), sep=',',\n header=1, comment='#', iterator=True, chunksize=1,\n skiprows=[2])\n df = it.read(5)\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))\n\n # middle chunk\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n try:\n it = self.read_table(StringIO(data), sep=',', header=1,\n comment='#', iterator=True, chunksize=1,\n skiprows=[2])\n df = it.read(1)\n it.read(2)\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))\n\n # last chunk\n data = \"\"\"ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n\"\"\"\n try:\n it = self.read_table(StringIO(data), sep=',',\n header=1, comment='#', iterator=True, chunksize=1,\n skiprows=[2])\n df = it.read(1)\n it.read()\n self.assertTrue(False)\n except Exception as inst:\n self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))\n\n def test_skip_footer(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the issue with the C parser is fixed\n\n data = \"\"\"A,B,C\n1,2,3\n4,5,6\n7,8,9\nwant to skip this\nalso also skip this\n\"\"\"\n result = self.read_csv(StringIO(data), skip_footer=2)\n no_footer = '\\n'.join(data.split('\\n')[:-3])\n expected = self.read_csv(StringIO(no_footer))\n\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data), nrows=3)\n tm.assert_frame_equal(result, expected)\n\n # skipfooter alias\n result = self.read_csv(StringIO(data), skipfooter=2)\n no_footer = '\\n'.join(data.split('\\n')[:-3])\n expected = self.read_csv(StringIO(no_footer))\n\n tm.assert_frame_equal(result, expected)\n\n def test_decompression_regex_sep(self):\n # GH 6607\n # This is a copy which should eventually be moved to ParserTests\n # when the issue with the C parser is fixed\n\n try:\n import gzip\n import bz2\n except ImportError:\n raise nose.SkipTest('need gzip and bz2 to run')\n\n data = open(self.csv1, 'rb').read()\n data = data.replace(b',', b'::')\n expected = self.read_csv(self.csv1)\n\n with tm.ensure_clean() as path:\n tmp = gzip.GzipFile(path, mode='wb')\n tmp.write(data)\n tmp.close()\n\n result = self.read_csv(path, sep='::', compression='gzip')\n tm.assert_frame_equal(result, expected)\n\n with tm.ensure_clean() as path:\n tmp = bz2.BZ2File(path, mode='wb')\n tmp.write(data)\n tmp.close()\n\n result = self.read_csv(path, sep='::', compression='bz2')\n tm.assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, self.read_csv,\n path, compression='bz3')\n\n def test_read_table_buglet_4x_multiindex(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the issue with multi-level index is fixed in the C parser.\n\n text = \"\"\" A B C D E\none two three four\na b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640\na q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744\nx q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838\"\"\"\n\n # it works!\n df = self.read_table(StringIO(text), sep='\\s+')\n self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))\n\n # GH 6893\n data = ' A B C\\na b c\\n1 3 7 0 3 6\\n3 1 4 1 5 9'\n expected = DataFrame.from_records([(1,3,7,0,3,6), (3,1,4,1,5,9)],\n columns=list('abcABC'), index=list('abc'))\n actual = self.read_table(StringIO(data), sep='\\s+')\n tm.assert_frame_equal(actual, expected)\n\n def test_line_comment(self):\n data = \"\"\"# empty\nA,B,C\n1,2.,4.#hello world\n#ignore this line\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n df = self.read_csv(StringIO(data), comment='#')\n tm.assert_almost_equal(df.values, expected)\n\n def test_empty_lines(self):\n data = \"\"\"\\\nA,B,C\n1,2.,4.\n\n\n5.,NaN,10.0\n\n-70,.4,1\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.],\n [-70., .4, 1.]]\n df = self.read_csv(StringIO(data))\n tm.assert_almost_equal(df.values, expected)\n df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\\s+')\n tm.assert_almost_equal(df.values, expected)\n expected = [[1., 2., 4.],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [5., np.nan, 10.],\n [np.nan, np.nan, np.nan],\n [-70., .4, 1.]]\n df = self.read_csv(StringIO(data), skip_blank_lines=False)\n tm.assert_almost_equal(list(df.values), list(expected))\n\n def test_whitespace_lines(self):\n data = \"\"\"\n\n\\t \\t\\t\n \\t\nA,B,C\n \\t 1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1, 2., 4.],\n [5., np.nan, 10.]]\n df = self.read_csv(StringIO(data))\n tm.assert_almost_equal(df.values, expected)\n\n\nclass TestFwfColspaceSniffing(tm.TestCase):\n def test_full_file(self):\n # File with all values\n test = '''index A B C\n2000-01-03T00:00:00 0.980268513777 3 foo\n2000-01-04T00:00:00 1.04791624281 -4 bar\n2000-01-05T00:00:00 0.498580885705 73 baz\n2000-01-06T00:00:00 1.12020151869 1 foo\n2000-01-07T00:00:00 0.487094399463 0 bar\n2000-01-10T00:00:00 0.836648671666 2 baz\n2000-01-11T00:00:00 0.157160753327 34 foo'''\n colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n tm.assert_frame_equal(expected, read_fwf(StringIO(test)))\n\n def test_full_file_with_missing(self):\n # File with missing values\n test = '''index A B C\n2000-01-03T00:00:00 0.980268513777 3 foo\n2000-01-04T00:00:00 1.04791624281 -4 bar\n 0.498580885705 73 baz\n2000-01-06T00:00:00 1.12020151869 1 foo\n2000-01-07T00:00:00 0 bar\n2000-01-10T00:00:00 0.836648671666 2 baz\n 34'''\n colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n tm.assert_frame_equal(expected, read_fwf(StringIO(test)))\n\n def test_full_file_with_spaces(self):\n # File with spaces in columns\n test = '''\nAccount Name Balance CreditLimit AccountCreated\n101 Keanu Reeves 9315.45 10000.00 1/17/1998\n312 Gerard Butler 90.00 1000.00 8/6/2003\n868 Jennifer Love Hewitt 0 17000.00 5/25/1985\n761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006\n317 Bill Murray 789.65 5000.00 2/5/2007\n'''.strip('\\r\\n')\n colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n tm.assert_frame_equal(expected, read_fwf(StringIO(test)))\n\n def test_full_file_with_spaces_and_missing(self):\n # File with spaces and missing values in columsn\n test = '''\nAccount Name Balance CreditLimit AccountCreated\n101 10000.00 1/17/1998\n312 Gerard Butler 90.00 1000.00 8/6/2003\n868 5/25/1985\n761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006\n317 Bill Murray 789.65\n'''.strip('\\r\\n')\n colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n tm.assert_frame_equal(expected, read_fwf(StringIO(test)))\n\n def test_messed_up_data(self):\n # Completely messed up file\n test = '''\n Account Name Balance Credit Limit Account Created\n 101 10000.00 1/17/1998\n 312 Gerard Butler 90.00 1000.00\n\n 761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006\n 317 Bill Murray 789.65\n'''.strip('\\r\\n')\n colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n tm.assert_frame_equal(expected, read_fwf(StringIO(test)))\n\n def test_multiple_delimiters(self):\n test = r'''\ncol1~~~~~col2 col3++++++++++++++++++col4\n~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves\n 33+++122.33\\\\\\bar.........Gerard Butler\n++44~~~~12.01 baz~~Jennifer Love Hewitt\n~~55 11+++foo++++Jada Pinkett-Smith\n..66++++++.03~~~bar Bill Murray\n'''.strip('\\r\\n')\n colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))\n expected = read_fwf(StringIO(test), colspecs=colspecs,\n delimiter=' +~.\\\\')\n tm.assert_frame_equal(expected, read_fwf(StringIO(test),\n delimiter=' +~.\\\\'))\n\n def test_variable_width_unicode(self):\n if not compat.PY3:\n raise nose.SkipTest('Bytes-related test - only needs to work on Python 3')\n test = '''\nשלום שלום\nום שלל\nשל ום\n'''.strip('\\r\\n')\n expected = pd.read_fwf(BytesIO(test.encode('utf8')),\n colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')\n tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),\n header=None, encoding='utf8'))\n\n\nclass TestCParserHighMemory(ParserTests, tm.TestCase):\n\n def read_csv(self, *args, **kwds):\n kwds = kwds.copy()\n kwds['engine'] = 'c'\n kwds['low_memory'] = False\n return read_csv(*args, **kwds)\n\n def read_table(self, *args, **kwds):\n kwds = kwds.copy()\n kwds['engine'] = 'c'\n kwds['low_memory'] = False\n return read_table(*args, **kwds)\n\n def test_compact_ints(self):\n if compat.is_platform_windows():\n raise nose.SkipTest(\"segfaults on win-64, only when all tests are run\")\n\n data = ('0,1,0,0\\n'\n '1,1,0,0\\n'\n '0,1,0,1')\n\n result = read_csv(StringIO(data), delimiter=',', header=None,\n compact_ints=True, as_recarray=True)\n ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])\n self.assertEqual(result.dtype, ex_dtype)\n\n result = read_csv(StringIO(data), delimiter=',', header=None,\n as_recarray=True, compact_ints=True,\n use_unsigned=True)\n ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])\n self.assertEqual(result.dtype, ex_dtype)\n\n def test_parse_dates_empty_string(self):\n # #2263\n s = StringIO(\"Date, test\\n2012-01-01, 1\\n,2\")\n result = self.read_csv(s, parse_dates=[\"Date\"], na_filter=False)\n self.assertTrue(result['Date'].isnull()[1])\n\n def test_usecols(self):\n raise nose.SkipTest(\"Usecols is not supported in C High Memory engine.\")\n\n def test_line_comment(self):\n data = \"\"\"# empty\nA,B,C\n1,2.,4.#hello world\n#ignore this line\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n df = self.read_csv(StringIO(data), comment='#')\n tm.assert_almost_equal(df.values, expected)\n # check with delim_whitespace=True\n df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',\n delim_whitespace=True)\n tm.assert_almost_equal(df.values, expected)\n # check with custom line terminator\n df = self.read_csv(StringIO(data.replace('\\n', '*')), comment='#',\n lineterminator='*')\n tm.assert_almost_equal(df.values, expected)\n\n def test_comment_skiprows(self):\n data = \"\"\"# empty\nrandom line\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n # this should ignore the first four lines (including comments)\n df = self.read_csv(StringIO(data), comment='#', skiprows=4)\n tm.assert_almost_equal(df.values, expected)\n\n def test_skiprows_lineterminator(self):\n #GH #9079\n data = '\\n'.join(['SMOSMANIA ThetaProbe-ML2X ',\n '2007/01/01 01:00 0.2140 U M ',\n '2007/01/01 02:00 0.2141 M O ',\n '2007/01/01 04:00 0.2142 D M '])\n expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],\n ['2007/01/01', '02:00', 0.2141, 'M', 'O'],\n ['2007/01/01', '04:00', 0.2142, 'D', 'M']],\n columns=['date', 'time', 'var', 'flag',\n 'oflag'])\n # test with the three default lineterminators LF, CR and CRLF\n df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,\n names=['date', 'time', 'var', 'flag', 'oflag'])\n tm.assert_frame_equal(df, expected)\n df = self.read_csv(StringIO(data.replace('\\n', '\\r')),\n skiprows=1, delim_whitespace=True,\n names=['date', 'time', 'var', 'flag', 'oflag'])\n tm.assert_frame_equal(df, expected)\n df = self.read_csv(StringIO(data.replace('\\n', '\\r\\n')),\n skiprows=1, delim_whitespace=True,\n names=['date', 'time', 'var', 'flag', 'oflag'])\n tm.assert_frame_equal(df, expected)\n\n def test_trailing_spaces(self):\n data = \"A B C \\nrandom line with trailing spaces \\nskip\\n1,2,3\\n1,2.,4.\\nrandom line with trailing tabs\\t\\t\\t\\n \\n5.1,NaN,10.0\\n\"\n expected = pd.DataFrame([[1., 2., 4.],\n [5.1, np.nan, 10.]])\n # this should ignore six lines including lines with trailing\n # whitespace and blank lines. issues 8661, 8679\n df = self.read_csv(StringIO(data.replace(',', ' ')),\n header=None, delim_whitespace=True,\n skiprows=[0,1,2,3,5,6], skip_blank_lines=True)\n tm.assert_frame_equal(df, expected)\n df = self.read_table(StringIO(data.replace(',', ' ')),\n header=None, delim_whitespace=True,\n skiprows=[0,1,2,3,5,6], skip_blank_lines=True)\n tm.assert_frame_equal(df, expected)\n # test skipping set of rows after a row with trailing spaces, issue #8983\n expected = pd.DataFrame({\"A\":[1., 5.1], \"B\":[2., np.nan],\n \"C\":[4., 10]})\n df = self.read_table(StringIO(data.replace(',', ' ')),\n delim_whitespace=True,\n skiprows=[1,2,3,5,6], skip_blank_lines=True)\n tm.assert_frame_equal(df, expected)\n\n def test_comment_header(self):\n data = \"\"\"# empty\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n # header should begin at the second non-comment line\n df = self.read_csv(StringIO(data), comment='#', header=1)\n tm.assert_almost_equal(df.values, expected)\n\n def test_comment_skiprows_header(self):\n data = \"\"\"# empty\n# second empty line\n# third empty line\nX,Y,Z\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.]]\n # skiprows should skip the first 4 lines (including comments), while\n # header should start from the second non-commented line starting\n # with line 5\n df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)\n tm.assert_almost_equal(df.values, expected)\n\n def test_empty_lines(self):\n data = \"\"\"\\\nA,B,C\n1,2.,4.\n\n\n5.,NaN,10.0\n\n-70,.4,1\n\"\"\"\n expected = [[1., 2., 4.],\n [5., np.nan, 10.],\n [-70., .4, 1.]]\n df = self.read_csv(StringIO(data))\n tm.assert_almost_equal(df.values, expected)\n df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\\s+')\n tm.assert_almost_equal(df.values, expected)\n expected = [[1., 2., 4.],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [5., np.nan, 10.],\n [np.nan, np.nan, np.nan],\n [-70., .4, 1.]]\n df = self.read_csv(StringIO(data), skip_blank_lines=False)\n tm.assert_almost_equal(list(df.values), list(expected))\n\n def test_whitespace_lines(self):\n data = \"\"\"\n\n\\t \\t\\t\n \\t\nA,B,C\n \\t 1,2.,4.\n5.,NaN,10.0\n\"\"\"\n expected = [[1, 2., 4.],\n [5., np.nan, 10.]]\n df = self.read_csv(StringIO(data))\n tm.assert_almost_equal(df.values, expected)\n\n def test_passing_dtype(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the dtype argument is supported by all engines.\n\n df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E'])\n\n with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:\n df.to_csv(path)\n\n # GH 3795\n # passing 'str' as the dtype\n result = self.read_csv(path, dtype=str, index_col=0)\n tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' }))\n\n # we expect all object columns, so need to convert to test for equivalence\n result = result.astype(float)\n tm.assert_frame_equal(result,df)\n\n # invalid dtype\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' },\n index_col=0)\n\n # valid but we don't support it (date)\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },\n index_col=0)\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },\n index_col=0, parse_dates=['B'])\n\n # valid but we don't support it\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' },\n index_col=0)\n\n def test_dtype_and_names_error(self):\n\n # GH 8833\n # passing both dtype and names resulting in an error reporting issue\n\n data = \"\"\"\n1.0 1\n2.0 2\n3.0 3\n\"\"\"\n # base cases\n result = self.read_csv(StringIO(data),sep='\\s+',header=None)\n expected = DataFrame([[1.0,1],[2.0,2],[3.0,3]])\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data),sep='\\s+',header=None,names=['a','b'])\n expected = DataFrame([[1.0,1],[2.0,2],[3.0,3]],columns=['a','b'])\n tm.assert_frame_equal(result, expected)\n\n # fallback casting\n result = self.read_csv(StringIO(data),sep='\\s+',header=None,names=['a','b'],dtype={'a' : np.int32})\n expected = DataFrame([[1,1],[2,2],[3,3]],columns=['a','b'])\n expected['a'] = expected['a'].astype(np.int32)\n tm.assert_frame_equal(result, expected)\n\n data = \"\"\"\n1.0 1\nnan 2\n3.0 3\n\"\"\"\n # fallback casting, but not castable\n with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):\n self.read_csv(StringIO(data),sep='\\s+',header=None,names=['a','b'],dtype={'a' : np.int32})\n\n def test_fallback_to_python(self):\n # GH 6607\n data = 'a b c\\n1 2 3'\n\n # specify C engine with unsupported options (raise)\n with tm.assertRaisesRegexp(ValueError, 'does not support'):\n self.read_table(StringIO(data), engine='c', sep=None,\n delim_whitespace=False)\n with tm.assertRaisesRegexp(ValueError, 'does not support'):\n self.read_table(StringIO(data), engine='c', sep='\\s')\n with tm.assertRaisesRegexp(ValueError, 'does not support'):\n self.read_table(StringIO(data), engine='c', skip_footer=1)\n\n\n def test_buffer_overflow(self):\n # GH9205\n # test certain malformed input files that cause buffer overflows in\n # tokenizer.c\n malfw = \"1\\r1\\r1\\r 1\\r 1\\r\" # buffer overflow in words pointer\n malfs = \"1\\r1\\r1\\r 1\\r 1\\r11\\r\" # buffer overflow in stream pointer\n malfl = \"1\\r1\\r1\\r 1\\r 1\\r11\\r1\\r\" # buffer overflow in lines pointer\n for malf in (malfw, malfs, malfl):\n try:\n df = self.read_table(StringIO(malf))\n except Exception as cperr:\n self.assertIn('Buffer overflow caught - possible malformed input file.', str(cperr))\n\n def test_single_char_leading_whitespace(self):\n # GH 9710\n data = \"\"\"\\\nMyColumn\n a\n b\n a\n b\\n\"\"\"\n\n expected = DataFrame({'MyColumn' : list('abab')})\n\n result = self.read_csv(StringIO(data), delim_whitespace=True,\n skipinitialspace=True)\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data), lineterminator='\\n',\n skipinitialspace=True)\n tm.assert_frame_equal(result, expected)\n\nclass TestCParserLowMemory(ParserTests, tm.TestCase):\n\n def read_csv(self, *args, **kwds):\n kwds = kwds.copy()\n kwds['engine'] = 'c'\n kwds['low_memory'] = True\n kwds['buffer_lines'] = 2\n return read_csv(*args, **kwds)\n\n def read_table(self, *args, **kwds):\n kwds = kwds.copy()\n kwds['engine'] = 'c'\n kwds['low_memory'] = True\n kwds['buffer_lines'] = 2\n return read_table(*args, **kwds)\n\n def test_compact_ints(self):\n data = ('0,1,0,0\\n'\n '1,1,0,0\\n'\n '0,1,0,1')\n\n result = read_csv(StringIO(data), delimiter=',', header=None,\n compact_ints=True)\n ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])\n self.assertEqual(result.to_records(index=False).dtype, ex_dtype)\n\n result = read_csv(StringIO(data), delimiter=',', header=None,\n\t\t\t compact_ints=True,\n use_unsigned=True)\n ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])\n self.assertEqual(result.to_records(index=False).dtype, ex_dtype)\n\n def test_compact_ints_as_recarray(self):\n if compat.is_platform_windows():\n raise nose.SkipTest(\"segfaults on win-64, only when all tests are run\")\n\n data = ('0,1,0,0\\n'\n '1,1,0,0\\n'\n '0,1,0,1')\n\n result = read_csv(StringIO(data), delimiter=',', header=None,\n compact_ints=True, as_recarray=True)\n ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])\n self.assertEqual(result.dtype, ex_dtype)\n\n result = read_csv(StringIO(data), delimiter=',', header=None,\n as_recarray=True, compact_ints=True,\n use_unsigned=True)\n ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])\n self.assertEqual(result.dtype, ex_dtype)\n\n def test_precise_conversion(self):\n # GH #8002\n tm._skip_if_32bit()\n from decimal import Decimal\n normal_errors = []\n precise_errors = []\n for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2\n text = 'a\\n{0:.25}'.format(num) # 25 decimal digits of precision\n normal_val = float(self.read_csv(StringIO(text))['a'][0])\n precise_val = float(self.read_csv(StringIO(text), float_precision='high')['a'][0])\n roundtrip_val = float(self.read_csv(StringIO(text), float_precision='round_trip')['a'][0])\n actual_val = Decimal(text[2:])\n def error(val):\n return abs(Decimal('{0:.100}'.format(val)) - actual_val)\n normal_errors.append(error(normal_val))\n precise_errors.append(error(precise_val))\n self.assertEqual(roundtrip_val, float(text[2:])) # round-trip should match float()\n self.assertTrue(sum(precise_errors) <= sum(normal_errors))\n self.assertTrue(max(precise_errors) <= max(normal_errors))\n\n def test_pass_dtype(self):\n data = \"\"\"\\\none,two\n1,2.5\n2,3.5\n3,4.5\n4,5.5\"\"\"\n\n result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})\n self.assertEqual(result['one'].dtype, 'u1')\n self.assertEqual(result['two'].dtype, 'object')\n\n def test_pass_dtype_as_recarray(self):\n data = \"\"\"\\\none,two\n1,2.5\n2,3.5\n3,4.5\n4,5.5\"\"\"\n\n if compat.is_platform_windows():\n raise nose.SkipTest(\"segfaults on win-64, only when all tests are run\")\n\n result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},\n as_recarray=True)\n self.assertEqual(result['one'].dtype, 'u1')\n self.assertEqual(result['two'].dtype, 'S1')\n\n def test_empty_pass_dtype(self):\n data = 'one,two'\n result = self.read_csv(StringIO(data), dtype={'one': 'u1'})\n\n expected = DataFrame({'one': np.empty(0, dtype='u1'),\n 'two': np.empty(0, dtype=np.object)})\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_index_pass_dtype(self):\n data = 'one,two'\n result = self.read_csv(StringIO(data), index_col=['one'],\n dtype={'one': 'u1', 1: 'f'})\n\n expected = DataFrame({'two': np.empty(0, dtype='f')},\n index=Index([], dtype='u1', name='one'))\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_multiindex_pass_dtype(self):\n data = 'one,two,three'\n result = self.read_csv(StringIO(data), index_col=['one', 'two'],\n dtype={'one': 'u1', 1: 'f8'})\n\n expected = DataFrame({'three': np.empty(0, dtype=np.object)}, index=MultiIndex.from_arrays(\n [np.empty(0, dtype='u1'), np.empty(0, dtype='O')],\n names=['one', 'two'])\n )\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_mangled_column_pass_dtype_by_names(self):\n data = 'one,one'\n result = self.read_csv(StringIO(data), dtype={'one': 'u1', 'one.1': 'f'})\n\n expected = DataFrame({'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_mangled_column_pass_dtype_by_indexes(self):\n data = 'one,one'\n result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})\n\n expected = DataFrame({'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_dup_column_pass_dtype_by_names(self):\n data = 'one,one'\n result = self.read_csv(StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})\n expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)\n tm.assert_frame_equal(result, expected)\n\n def test_empty_with_dup_column_pass_dtype_by_indexes(self):\n ### FIXME in GH9424\n raise nose.SkipTest(\"GH 9424; known failure read_csv with duplicate columns\")\n\n data = 'one,one'\n result = self.read_csv(StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})\n expected = pd.concat([Series([], name='one', dtype='u1'),\n Series([], name='one', dtype='f')], axis=1)\n tm.assert_frame_equal(result, expected)\n\n def test_usecols_dtypes(self):\n data = \"\"\"\\\n1,2,3\n4,5,6\n7,8,9\n10,11,12\"\"\"\n\n result = self.read_csv(StringIO(data), usecols=(0, 1, 2),\n names=('a', 'b', 'c'),\n header=None,\n converters={'a': str},\n dtype={'b': int, 'c': float},\n )\n result2 = self.read_csv(StringIO(data), usecols=(0, 2),\n names=('a', 'b', 'c'),\n header=None,\n converters={'a': str},\n dtype={'b': int, 'c': float},\n )\n self.assertTrue((result.dtypes == [object, np.int, np.float]).all())\n self.assertTrue((result2.dtypes == [object, np.float]).all())\n\n def test_usecols_implicit_index_col(self):\n # #2654\n data = 'a,b,c\\n4,apple,bat,5.7\\n8,orange,cow,10'\n\n result = self.read_csv(StringIO(data), usecols=['a', 'b'])\n expected = DataFrame({'a': ['apple', 'orange'],\n 'b': ['bat', 'cow']}, index=[4, 8])\n\n tm.assert_frame_equal(result, expected)\n\n def test_usecols_with_whitespace(self):\n data = 'a b c\\n4 apple bat 5.7\\n8 orange cow 10'\n\n result = self.read_csv(StringIO(data), delim_whitespace=True,\n usecols=('a', 'b'))\n expected = DataFrame({'a': ['apple', 'orange'],\n 'b': ['bat', 'cow']}, index=[4, 8])\n\n tm.assert_frame_equal(result, expected)\n\n def test_usecols_regex_sep(self):\n # #2733\n data = 'a b c\\n4 apple bat 5.7\\n8 orange cow 10'\n\n df = self.read_csv(StringIO(data), sep='\\s+', usecols=('a', 'b'))\n\n expected = DataFrame({'a': ['apple', 'orange'],\n 'b': ['bat', 'cow']}, index=[4, 8])\n tm.assert_frame_equal(df, expected)\n\n def test_pure_python_failover(self):\n data = \"a,b,c\\n1,2,3#ignore this!\\n4,5,6#ignorethistoo\"\n\n result = self.read_csv(StringIO(data), comment='#')\n expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})\n tm.assert_frame_equal(result, expected)\n\n def test_decompression(self):\n try:\n import gzip\n import bz2\n except ImportError:\n raise nose.SkipTest('need gzip and bz2 to run')\n\n data = open(self.csv1, 'rb').read()\n expected = self.read_csv(self.csv1)\n\n with tm.ensure_clean() as path:\n tmp = gzip.GzipFile(path, mode='wb')\n tmp.write(data)\n tmp.close()\n\n result = self.read_csv(path, compression='gzip')\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(open(path, 'rb'), compression='gzip')\n tm.assert_frame_equal(result, expected)\n\n with tm.ensure_clean() as path:\n tmp = bz2.BZ2File(path, mode='wb')\n tmp.write(data)\n tmp.close()\n\n result = self.read_csv(path, compression='bz2')\n tm.assert_frame_equal(result, expected)\n\n # result = self.read_csv(open(path, 'rb'), compression='bz2')\n # tm.assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, self.read_csv,\n path, compression='bz3')\n\n with open(path, 'rb') as fin:\n if compat.PY3:\n result = self.read_csv(fin, compression='bz2')\n tm.assert_frame_equal(result, expected)\n else:\n self.assertRaises(ValueError, self.read_csv,\n fin, compression='bz2')\n\n def test_decompression_regex_sep(self):\n try:\n import gzip\n import bz2\n except ImportError:\n raise nose.SkipTest('need gzip and bz2 to run')\n\n data = open(self.csv1, 'rb').read()\n data = data.replace(b',', b'::')\n expected = self.read_csv(self.csv1)\n\n with tm.ensure_clean() as path:\n tmp = gzip.GzipFile(path, mode='wb')\n tmp.write(data)\n tmp.close()\n\n # GH 6607\n # Test currently only valid with the python engine because of\n # regex sep. Temporarily copied to TestPythonParser.\n # Here test for ValueError when passing regex sep:\n\n with tm.assertRaisesRegexp(ValueError, 'regex sep'): #XXX\n result = self.read_csv(path, sep='::', compression='gzip')\n tm.assert_frame_equal(result, expected)\n\n with tm.ensure_clean() as path:\n tmp = bz2.BZ2File(path, mode='wb')\n tmp.write(data)\n tmp.close()\n\n # GH 6607\n with tm.assertRaisesRegexp(ValueError, 'regex sep'): #XXX\n result = self.read_csv(path, sep='::', compression='bz2')\n tm.assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, self.read_csv,\n path, compression='bz3')\n\n def test_memory_map(self):\n # it works!\n result = self.read_csv(self.csv1, memory_map=True)\n\n def test_disable_bool_parsing(self):\n # #2090\n\n data = \"\"\"A,B,C\nYes,No,Yes\nNo,Yes,Yes\nYes,,Yes\nNo,No,No\"\"\"\n\n result = read_csv(StringIO(data), dtype=object)\n self.assertTrue((result.dtypes == object).all())\n\n result = read_csv(StringIO(data), dtype=object, na_filter=False)\n self.assertEqual(result['B'][2], '')\n\n def test_euro_decimal_format(self):\n data = \"\"\"Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,738797819\n2;121,12;14897,76;DEF;uyt;0,377320872\n3;878,158;108013,434;GHI;rez;2,735694704\"\"\"\n\n df2 = self.read_csv(StringIO(data), sep=';', decimal=',')\n self.assertEqual(df2['Number1'].dtype, float)\n self.assertEqual(df2['Number2'].dtype, float)\n self.assertEqual(df2['Number3'].dtype, float)\n\n def test_custom_lineterminator(self):\n data = 'a,b,c~1,2,3~4,5,6'\n\n result = self.read_csv(StringIO(data), lineterminator='~')\n expected = self.read_csv(StringIO(data.replace('~', '\\n')))\n\n tm.assert_frame_equal(result, expected)\n\n data2 = data.replace('~', '~~')\n result = self.assertRaises(ValueError, read_csv, StringIO(data2),\n lineterminator='~~')\n\n def test_raise_on_passed_int_dtype_with_nas(self):\n # #2631\n data = \"\"\"YEAR, DOY, a\n2001,106380451,10\n2001,,11\n2001,106380451,67\"\"\"\n self.assertRaises(Exception, read_csv, StringIO(data), sep=\",\",\n skipinitialspace=True,\n dtype={'DOY': np.int64})\n\n def test_na_trailing_columns(self):\n data = \"\"\"Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax\n2012-03-14,USD,AAPL,BUY,1000\n2012-05-12,USD,SBUX,SELL,500\"\"\"\n\n result = self.read_csv(StringIO(data))\n self.assertEqual(result['Date'][1], '2012-05-12')\n self.assertTrue(result['UnitPrice'].isnull().all())\n\n def test_parse_ragged_csv(self):\n data = \"\"\"1,2,3\n1,2,3,4\n1,2,3,4,5\n1,2\n1,2,3,4\"\"\"\n\n nice_data = \"\"\"1,2,3,,\n1,2,3,4,\n1,2,3,4,5\n1,2,,,\n1,2,3,4,\"\"\"\n result = self.read_csv(StringIO(data), header=None,\n names=['a', 'b', 'c', 'd', 'e'])\n\n expected = self.read_csv(StringIO(nice_data), header=None,\n names=['a', 'b', 'c', 'd', 'e'])\n\n tm.assert_frame_equal(result, expected)\n\n # too many columns, cause segfault if not careful\n data = \"1,2\\n3,4,5\"\n\n result = self.read_csv(StringIO(data), header=None,\n names=lrange(50))\n expected = self.read_csv(StringIO(data), header=None,\n names=lrange(3)).reindex(columns=lrange(50))\n\n tm.assert_frame_equal(result, expected)\n\n def test_tokenize_CR_with_quoting(self):\n # #3453, this doesn't work with Python parser for some reason\n\n data = ' a,b,c\\r\"a,b\",\"e,d\",\"f,f\"'\n\n result = self.read_csv(StringIO(data), header=None)\n expected = self.read_csv(StringIO(data.replace('\\r', '\\n')),\n header=None)\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data))\n expected = self.read_csv(StringIO(data.replace('\\r', '\\n')))\n tm.assert_frame_equal(result, expected)\n\n def test_raise_on_no_columns(self):\n # single newline\n data = \"\\n\"\n self.assertRaises(ValueError, self.read_csv, StringIO(data))\n\n # test with more than a single newline\n data = \"\\n\\n\\n\"\n self.assertRaises(ValueError, self.read_csv, StringIO(data))\n\n def test_warn_if_chunks_have_mismatched_type(self):\n # Issue #3866 If chunks are different types and can't\n # be coerced using numerical types, then issue warning.\n integers = [str(i) for i in range(499999)]\n data = \"a\\n\" + \"\\n\".join(integers + ['a', 'b'] + integers)\n\n with tm.assert_produces_warning(DtypeWarning):\n df = self.read_csv(StringIO(data))\n self.assertEqual(df.a.dtype, np.object)\n\n def test_invalid_c_parser_opts_with_not_c_parser(self):\n from pandas.io.parsers import _c_parser_defaults as c_defaults\n\n data = \"\"\"1,2,3,,\n1,2,3,4,\n1,2,3,4,5\n1,2,,,\n1,2,3,4,\"\"\"\n\n engines = 'python', 'python-fwf'\n for default in c_defaults:\n for engine in engines:\n kwargs = {default: object()}\n with tm.assertRaisesRegexp(ValueError,\n 'The %r option is not supported '\n 'with the %r engine' % (default,\n engine)):\n read_csv(StringIO(data), engine=engine, **kwargs)\n\n def test_passing_dtype(self):\n # GH 6607\n # This is a copy which should eventually be merged into ParserTests\n # when the dtype argument is supported by all engines.\n\n df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E'])\n\n with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:\n df.to_csv(path)\n\n # GH 3795\n # passing 'str' as the dtype\n result = self.read_csv(path, dtype=str, index_col=0)\n tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' }))\n\n # we expect all object columns, so need to convert to test for equivalence\n result = result.astype(float)\n tm.assert_frame_equal(result,df)\n\n # invalid dtype\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' },\n index_col=0)\n\n # valid but we don't support it (date)\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },\n index_col=0)\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },\n index_col=0, parse_dates=['B'])\n\n # valid but we don't support it\n self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' },\n index_col=0)\n\n def test_fallback_to_python(self):\n # GH 6607\n data = 'a b c\\n1 2 3'\n\n # specify C engine with C-unsupported options (raise)\n with tm.assertRaisesRegexp(ValueError, 'does not support'):\n self.read_table(StringIO(data), engine='c', sep=None,\n delim_whitespace=False)\n with tm.assertRaisesRegexp(ValueError, 'does not support'):\n self.read_table(StringIO(data), engine='c', sep='\\s')\n with tm.assertRaisesRegexp(ValueError, 'does not support'):\n self.read_table(StringIO(data), engine='c', skip_footer=1)\n\n def test_raise_on_sep_with_delim_whitespace(self):\n # GH 6607\n data = 'a b c\\n1 2 3'\n with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):\n self.read_table(StringIO(data), sep='\\s', delim_whitespace=True)\n\n\n def test_buffer_overflow(self):\n # GH9205\n # test certain malformed input files that cause buffer overflows in\n # tokenizer.c\n malfw = \"1\\r1\\r1\\r 1\\r 1\\r\" # buffer overflow in words pointer\n malfs = \"1\\r1\\r1\\r 1\\r 1\\r11\\r\" # buffer overflow in stream pointer\n malfl = \"1\\r1\\r1\\r 1\\r 1\\r11\\r1\\r\" # buffer overflow in lines pointer\n for malf in (malfw, malfs, malfl):\n try:\n df = self.read_table(StringIO(malf))\n except Exception as cperr:\n self.assertIn('Buffer overflow caught - possible malformed input file.', str(cperr))\n\n def test_single_char_leading_whitespace(self):\n # GH 9710\n data = \"\"\"\\\nMyColumn\n a\n b\n a\n b\\n\"\"\"\n\n expected = DataFrame({'MyColumn' : list('abab')})\n\n result = self.read_csv(StringIO(data), delim_whitespace=True,\n skipinitialspace=True)\n tm.assert_frame_equal(result, expected)\n\n result = self.read_csv(StringIO(data), lineterminator='\\n',\n skipinitialspace=True)\n tm.assert_frame_equal(result, expected)\n\n def test_bool_header_arg(self):\n # GH 6114\n data = \"\"\"\\\nMyColumn\n a\n b\n a\n b\"\"\"\n for arg in [True, False]:\n with tm.assertRaises(TypeError):\n pd.read_csv(StringIO(data), header=arg)\n with tm.assertRaises(TypeError):\n pd.read_table(StringIO(data), header=arg)\n with tm.assertRaises(TypeError):\n pd.read_fwf(StringIO(data), header=arg)\n\nclass TestMiscellaneous(tm.TestCase):\n\n # for tests that don't fit into any of the other classes, e.g. those that\n # compare results for different engines or test the behavior when 'engine'\n # is not passed\n\n def test_compare_whitespace_regex(self):\n # GH 6607\n data = ' a b c\\n1 2 3 \\n4 5 6\\n 7 8 9'\n result_c = pd.read_table(StringIO(data), sep='\\s+', engine='c')\n result_py = pd.read_table(StringIO(data), sep='\\s+', engine='python')\n print(result_c)\n tm.assert_frame_equal(result_c, result_py)\n\n def test_fallback_to_python(self):\n # GH 6607\n data = 'a b c\\n1 2 3'\n\n # specify C-unsupported options with python-unsupported option\n # (options will be ignored on fallback, raise)\n with tm.assertRaisesRegexp(ValueError, 'Falling back'):\n pd.read_table(StringIO(data), sep=None,\n delim_whitespace=False, dtype={'a': float})\n with tm.assertRaisesRegexp(ValueError, 'Falling back'):\n pd.read_table(StringIO(data), sep='\\s', dtype={'a': float})\n with tm.assertRaisesRegexp(ValueError, 'Falling back'):\n pd.read_table(StringIO(data), skip_footer=1, dtype={'a': float})\n\n # specify C-unsupported options without python-unsupported options\n with tm.assert_produces_warning(parsers.ParserWarning):\n pd.read_table(StringIO(data), sep=None, delim_whitespace=False)\n with tm.assert_produces_warning(parsers.ParserWarning):\n pd.read_table(StringIO(data), sep='\\s')\n with tm.assert_produces_warning(parsers.ParserWarning):\n pd.read_table(StringIO(data), skip_footer=1)\n\n\nclass TestParseSQL(tm.TestCase):\n\n def test_convert_sql_column_floats(self):\n arr = np.array([1.5, None, 3, 4.2], dtype=object)\n result = lib.convert_sql_column(arr)\n expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')\n assert_same_values_and_dtype(result, expected)\n\n def test_convert_sql_column_strings(self):\n arr = np.array(['1.5', None, '3', '4.2'], dtype=object)\n result = lib.convert_sql_column(arr)\n expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)\n assert_same_values_and_dtype(result, expected)\n\n def test_convert_sql_column_unicode(self):\n arr = np.array([u('1.5'), None, u('3'), u('4.2')],\n dtype=object)\n result = lib.convert_sql_column(arr)\n expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],\n dtype=object)\n assert_same_values_and_dtype(result, expected)\n\n def test_convert_sql_column_ints(self):\n arr = np.array([1, 2, 3, 4], dtype='O')\n arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')\n result = lib.convert_sql_column(arr)\n result2 = lib.convert_sql_column(arr2)\n expected = np.array([1, 2, 3, 4], dtype='i8')\n assert_same_values_and_dtype(result, expected)\n assert_same_values_and_dtype(result2, expected)\n\n arr = np.array([1, 2, 3, None, 4], dtype='O')\n result = lib.convert_sql_column(arr)\n expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')\n assert_same_values_and_dtype(result, expected)\n\n def test_convert_sql_column_longs(self):\n arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')\n result = lib.convert_sql_column(arr)\n expected = np.array([1, 2, 3, 4], dtype='i8')\n assert_same_values_and_dtype(result, expected)\n\n arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')\n result = lib.convert_sql_column(arr)\n expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')\n assert_same_values_and_dtype(result, expected)\n\n def test_convert_sql_column_bools(self):\n arr = np.array([True, False, True, False], dtype='O')\n result = lib.convert_sql_column(arr)\n expected = np.array([True, False, True, False], dtype=bool)\n assert_same_values_and_dtype(result, expected)\n\n arr = np.array([True, False, None, False], dtype='O')\n result = lib.convert_sql_column(arr)\n expected = np.array([True, False, np.nan, False], dtype=object)\n assert_same_values_and_dtype(result, expected)\n\n def test_convert_sql_column_decimals(self):\n from decimal import Decimal\n arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])\n result = lib.convert_sql_column(arr)\n expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')\n assert_same_values_and_dtype(result, expected)\n\n\nclass TestUrlGz(tm.TestCase):\n def setUp(self):\n dirpath = tm.get_data_path()\n localtable = os.path.join(dirpath, 'salary.table')\n self.local_table = read_table(localtable)\n\n @tm.network\n def test_url_gz(self):\n url = 'https://raw.github.com/pydata/pandas/master/pandas/io/tests/data/salary.table.gz'\n url_table = read_table(url, compression=\"gzip\", engine=\"python\")\n tm.assert_frame_equal(url_table, self.local_table)\n\n @tm.network\n def test_url_gz_infer(self):\n url = ('https://s3.amazonaws.com/pandas-test/salary.table.gz')\n url_table = read_table(url, compression=\"infer\", engine=\"python\")\n tm.assert_frame_equal(url_table, self.local_table)\n\n\nclass TestS3(tm.TestCase):\n def setUp(self):\n try:\n import boto\n except ImportError:\n raise nose.SkipTest(\"boto not installed\")\n\n @tm.network\n def test_parse_public_s3_bucket(self):\n for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:\n if comp == 'bz2' and compat.PY2:\n # The Python 2 C parser can't read bz2 from S3.\n self.assertRaises(ValueError, pd.read_csv,\n 's3://pandas-test/tips.csv' + ext,\n compression=comp)\n else:\n df = pd.read_csv('s3://pandas-test/tips.csv' + ext, compression=comp)\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)\n\n # Read public file from bucket with not-public contents\n df = pd.read_csv('s3://cant_get_it/tips.csv')\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)\n\n @tm.network\n def test_parse_public_s3n_bucket(self):\n # Read from AWS s3 as \"s3n\" URL\n df = pd.read_csv('s3n://pandas-test/tips.csv', nrows=10)\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')).iloc[:10], df)\n\n @tm.network\n def test_parse_public_s3a_bucket(self):\n # Read from AWS s3 as \"s3a\" URL\n df = pd.read_csv('s3a://pandas-test/tips.csv', nrows=10)\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')).iloc[:10], df)\n\n @tm.network\n def test_parse_public_s3_bucket_nrows(self):\n for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:\n if comp == 'bz2' and compat.PY2:\n # The Python 2 C parser can't read bz2 from S3.\n self.assertRaises(ValueError, pd.read_csv,\n 's3://pandas-test/tips.csv' + ext,\n compression=comp)\n else:\n df = pd.read_csv('s3://pandas-test/tips.csv' + ext, nrows=10, compression=comp)\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')).iloc[:10], df)\n\n @tm.network\n def test_parse_public_s3_bucket_chunked(self):\n # Read with a chunksize\n chunksize = 5\n local_tips = pd.read_csv(tm.get_data_path('tips.csv'))\n for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:\n if comp == 'bz2' and compat.PY2:\n # The Python 2 C parser can't read bz2 from S3.\n self.assertRaises(ValueError, pd.read_csv,\n 's3://pandas-test/tips.csv' + ext,\n compression=comp)\n else:\n df_reader = pd.read_csv('s3://pandas-test/tips.csv' + ext,\n chunksize=chunksize, compression=comp)\n self.assertEqual(df_reader.chunksize, chunksize)\n for i_chunk in [0, 1, 2]:\n # Read a couple of chunks and make sure we see them properly.\n df = df_reader.get_chunk()\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n true_df = local_tips.iloc[chunksize * i_chunk: chunksize * (i_chunk + 1)]\n true_df = true_df.reset_index().drop('index', axis=1) # Chunking doesn't preserve row numbering\n tm.assert_frame_equal(true_df, df)\n\n @tm.network\n def test_parse_public_s3_bucket_chunked_python(self):\n # Read with a chunksize using the Python parser\n chunksize = 5\n local_tips = pd.read_csv(tm.get_data_path('tips.csv'))\n for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:\n df_reader = pd.read_csv('s3://pandas-test/tips.csv' + ext,\n chunksize=chunksize, compression=comp,\n engine='python')\n self.assertEqual(df_reader.chunksize, chunksize)\n for i_chunk in [0, 1, 2]:\n # Read a couple of chunks and make sure we see them properly.\n df = df_reader.get_chunk()\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n true_df = local_tips.iloc[chunksize * i_chunk: chunksize * (i_chunk + 1)]\n true_df = true_df.reset_index().drop('index', axis=1) # Chunking doesn't preserve row numbering\n tm.assert_frame_equal(true_df, df)\n\n @tm.network\n def test_parse_public_s3_bucket_python(self):\n for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:\n df = pd.read_csv('s3://pandas-test/tips.csv' + ext, engine='python',\n compression=comp)\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)\n\n @tm.network\n def test_infer_s3_compression(self):\n for ext in ['', '.gz', '.bz2']:\n df = pd.read_csv('s3://pandas-test/tips.csv' + ext,\n engine='python', compression='infer')\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)\n\n @tm.network\n def test_parse_public_s3_bucket_nrows_python(self):\n for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:\n df = pd.read_csv('s3://pandas-test/tips.csv' + ext, engine='python',\n nrows=10, compression=comp)\n self.assertTrue(isinstance(df, pd.DataFrame))\n self.assertFalse(df.empty)\n tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')).iloc[:10], df)\n\n @tm.network\n def test_s3_fails(self):\n import boto\n with tm.assertRaisesRegexp(boto.exception.S3ResponseError,\n 'S3ResponseError: 404 Not Found'):\n pd.read_csv('s3://nyqpug/asdf.csv')\n\n # Receive a permission error when trying to read a private bucket.\n # It's irrelevant here that this isn't actually a table.\n with tm.assertRaisesRegexp(boto.exception.S3ResponseError,\n 'S3ResponseError: 403 Forbidden'):\n pd.read_csv('s3://cant_get_it/')\n\n\ndef assert_same_values_and_dtype(res, exp):\n tm.assert_equal(res.dtype, exp.dtype)\n tm.assert_almost_equal(res, exp)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nimport re\nimport sys\nfrom datetime import datetime, timedelta\nimport operator\nimport string\nfrom inspect import getargspec\nfrom itertools import product, starmap\nfrom distutils.version import LooseVersion\nimport warnings\nimport random\n\nimport nose\n\nfrom numpy import nan, inf\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\n\nfrom pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,\n date_range, period_range, timedelta_range, _np_version_under1p8)\nfrom pandas.core.index import MultiIndex\nfrom pandas.core.indexing import IndexingError\nfrom pandas.tseries.period import PeriodIndex\nfrom pandas.tseries.index import Timestamp, DatetimeIndex\nfrom pandas.tseries.tdi import Timedelta, TimedeltaIndex\nimport pandas.core.common as com\nimport pandas.core.config as cf\nimport pandas.lib as lib\n\nimport pandas.core.datetools as datetools\nimport pandas.core.nanops as nanops\n\nfrom pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long\nfrom pandas import compat\nfrom pandas.util.testing import (assert_series_equal,\n assert_almost_equal,\n assert_frame_equal,\n assert_index_equal,\n ensure_clean)\nimport pandas.util.testing as tm\n\n\n#------------------------------------------------------------------------------\n# Series test cases\n\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\n\n\nclass CheckNameIntegration(object):\n\n _multiprocess_can_split_ = True\n\n def test_scalarop_preserve_name(self):\n result = self.ts * 2\n self.assertEqual(result.name, self.ts.name)\n\n def test_copy_name(self):\n result = self.ts.copy()\n self.assertEqual(result.name, self.ts.name)\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n\n self.ts.index.name = None\n self.assertIsNone(self.ts.index.name)\n self.assertIs(self.ts, self.ts)\n\n cp = self.ts.copy()\n cp.index.name = 'foo'\n com.pprint_thing(self.ts.index.name)\n self.assertIsNone(self.ts.index.name)\n\n def test_append_preserve_name(self):\n result = self.ts[:5].append(self.ts[5:])\n self.assertEqual(result.name, self.ts.name)\n\n def test_dt_namespace_accessor(self):\n\n # GH 7207\n # test .dt namespace accessor\n\n ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq','days_in_month','daysinmonth']\n ok_for_period = ok_for_base + ['qyear']\n ok_for_period_methods = ['strftime']\n ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',\n 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']\n ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime']\n ok_for_td = ['days','seconds','microseconds','nanoseconds']\n ok_for_td_methods = ['components','to_pytimedelta','total_seconds']\n\n def get_expected(s, name):\n result = getattr(Index(s._values),prop)\n if isinstance(result, np.ndarray):\n if com.is_integer_dtype(result):\n result = result.astype('int64')\n elif not com.is_list_like(result):\n return result\n return Series(result,index=s.index)\n\n def compare(s, name):\n a = getattr(s.dt,prop)\n b = get_expected(s,prop)\n if not (com.is_list_like(a) and com.is_list_like(b)):\n self.assertEqual(a,b)\n else:\n tm.assert_series_equal(a,b)\n\n # datetimeindex\n for s in [Series(date_range('20130101',periods=5)),\n Series(date_range('20130101',periods=5,freq='s')),\n Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:\n for prop in ok_for_dt:\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_dt_methods:\n getattr(s.dt, prop)\n\n result = s.dt.to_pydatetime()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.tz_localize('US/Eastern')\n expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n self.assertEqual(str(tz_result), 'US/Eastern')\n freq_result = s.dt.freq\n self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)\n\n # let's localize, then convert\n result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')\n expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n # datetimeindex with tz\n s = Series(date_range('20130101',periods=5,tz='US/Eastern'))\n for prop in ok_for_dt:\n\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_dt_methods:\n getattr(s.dt,prop)\n\n result = s.dt.to_pydatetime()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.tz_convert('CET')\n expected = Series(s._values.tz_convert('CET'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n self.assertEqual(str(tz_result), 'CET')\n freq_result = s.dt.freq\n self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)\n\n # timedeltaindex\n for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),\n Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),\n Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:\n for prop in ok_for_td:\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_td_methods:\n getattr(s.dt, prop)\n\n result = s.dt.components\n self.assertIsInstance(result,DataFrame)\n tm.assert_index_equal(result.index,s.index)\n\n result = s.dt.to_pytimedelta()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.total_seconds()\n self.assertIsInstance(result,pd.Series)\n self.assertTrue(result.dtype == 'float64')\n\n freq_result = s.dt.freq\n self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)\n\n # both\n index = date_range('20130101',periods=3,freq='D')\n s = Series(date_range('20140204',periods=3,freq='s'),index=index)\n tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.normalize(), pd.Series([s[0]] * 3, index=index))\n\n # periodindex\n for s in [Series(period_range('20130101',periods=5,freq='D'))]:\n for prop in ok_for_period:\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_period_methods:\n getattr(s.dt, prop)\n\n freq_result = s.dt.freq\n self.assertEqual(freq_result, PeriodIndex(s.values).freq)\n\n # test limited display api\n def get_dir(s):\n results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]\n return list(sorted(set(results)))\n\n s = Series(date_range('20130101',periods=5,freq='D'))\n results = get_dir(s)\n tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))\n\n s = Series(period_range('20130101',periods=5,freq='D').asobject)\n results = get_dir(s)\n tm.assert_almost_equal(results, list(sorted(set(ok_for_period + ok_for_period_methods))))\n\n # no setting allowed\n s = Series(date_range('20130101',periods=5,freq='D'))\n with tm.assertRaisesRegexp(ValueError, \"modifications\"):\n s.dt.hour = 5\n\n # trying to set a copy\n with pd.option_context('chained_assignment','raise'):\n def f():\n s.dt.hour[0] = 5\n self.assertRaises(com.SettingWithCopyError, f)\n\n def test_strftime(self):\n # GH 10086\n s = Series(date_range('20130101', periods=5))\n result = s.dt.strftime('%Y/%m/%d')\n expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])\n tm.assert_series_equal(result, expected)\n\n s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))\n result = s.dt.strftime('%Y/%m/%d %H-%M-%S')\n expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33', '2015/02/05 11-22-33',\n '2015/02/06 11-22-33', '2015/02/07 11-22-33'])\n tm.assert_series_equal(result, expected)\n\n s = Series(period_range('20130101', periods=5))\n result = s.dt.strftime('%Y/%m/%d')\n expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])\n tm.assert_series_equal(result, expected)\n\n s = Series(period_range('2015-02-03 11:22:33.4567', periods=5, freq='s'))\n result = s.dt.strftime('%Y/%m/%d %H-%M-%S')\n expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34', '2015/02/03 11-22-35',\n '2015/02/03 11-22-36', '2015/02/03 11-22-37'])\n tm.assert_series_equal(result, expected)\n\n s = Series(date_range('20130101', periods=5))\n s.iloc[0] = pd.NaT\n result = s.dt.strftime('%Y/%m/%d')\n expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])\n tm.assert_series_equal(result, expected)\n\n datetime_index = date_range('20150301', periods=5)\n result = datetime_index.strftime(\"%Y/%m/%d\")\n expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n period_index = period_range('20150301', periods=5)\n result = period_index.strftime(\"%Y/%m/%d\")\n expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])\n result = s.dt.strftime('%Y-%m-%d %H:%M:%S')\n expected = Series([\"2013-01-01 02:32:59\", \"2013-01-02 14:32:01\"])\n tm.assert_series_equal(result, expected)\n\n s = Series(period_range('20130101', periods=4, freq='H'))\n result = s.dt.strftime('%Y/%m/%d %H:%M:%S')\n expected = Series([\"2013/01/01 00:00:00\", \"2013/01/01 01:00:00\",\n \"2013/01/01 02:00:00\", \"2013/01/01 03:00:00\"])\n\n s = Series(period_range('20130101', periods=4, freq='L'))\n result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')\n expected = Series([\"2013/01/01 00:00:00.000\", \"2013/01/01 00:00:00.001\",\n \"2013/01/01 00:00:00.002\", \"2013/01/01 00:00:00.003\"])\n tm.assert_series_equal(result, expected)\n\n def test_valid_dt_with_missing_values(self):\n\n from datetime import date, time\n\n # GH 8689\n s = Series(date_range('20130101',periods=5,freq='D'))\n s.iloc[2] = pd.NaT\n\n for attr in ['microsecond','nanosecond','second','minute','hour','day']:\n expected = getattr(s.dt,attr).copy()\n expected.iloc[2] = np.nan\n result = getattr(s.dt,attr)\n tm.assert_series_equal(result, expected)\n\n result = s.dt.date\n expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')\n tm.assert_series_equal(result, expected)\n\n result = s.dt.time\n expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')\n tm.assert_series_equal(result, expected)\n\n def test_dt_accessor_api(self):\n # GH 9322\n from pandas.tseries.common import (CombinedDatetimelikeProperties,\n DatetimeProperties)\n self.assertIs(Series.dt, CombinedDatetimelikeProperties)\n\n s = Series(date_range('2000-01-01', periods=3))\n self.assertIsInstance(s.dt, DatetimeProperties)\n\n for s in [Series(np.arange(5)),\n Series(list('abcde')),\n Series(np.random.randn(5))]:\n with tm.assertRaisesRegexp(AttributeError,\n \"only use .dt accessor\"):\n s.dt\n self.assertFalse(hasattr(s, 'dt'))\n\n def test_tab_completion(self):\n # GH 9910\n s = Series(list('abcd'))\n # Series of str values should have .str but not .dt/.cat in __dir__\n self.assertTrue('str' in dir(s))\n self.assertTrue('dt' not in dir(s))\n self.assertTrue('cat' not in dir(s))\n\n # similiarly for .dt\n s = Series(date_range('1/1/2015', periods=5))\n self.assertTrue('dt' in dir(s))\n self.assertTrue('str' not in dir(s))\n self.assertTrue('cat' not in dir(s))\n\n # similiarly for .cat\n s = Series(list('abbcd'), dtype=\"category\")\n self.assertTrue('cat' in dir(s))\n self.assertTrue('str' not in dir(s))\n self.assertTrue('dt' not in dir(s))\n\n def test_binop_maybe_preserve_name(self):\n # names match, preserve\n result = self.ts * self.ts\n self.assertEqual(result.name, self.ts.name)\n result = self.ts.mul(self.ts)\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts * self.ts[:-2]\n self.assertEqual(result.name, self.ts.name)\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'something else'\n result = self.ts + cp\n self.assertIsNone(result.name)\n result = self.ts.add(cp)\n self.assertIsNone(result.name)\n\n ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']\n ops = ops + ['r' + op for op in ops]\n for op in ops:\n # names match, preserve\n s = self.ts.copy()\n result = getattr(s, op)(s)\n self.assertEqual(result.name, self.ts.name)\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'changed'\n result = getattr(s, op)(cp)\n self.assertIsNone(result.name)\n\n def test_combine_first_name(self):\n result = self.ts.combine_first(self.ts[:5])\n self.assertEqual(result.name, self.ts.name)\n\n def test_combine_first_dt64(self):\n from pandas.tseries.tools import to_datetime\n s0 = to_datetime(Series([\"2010\", np.NaN]))\n s1 = to_datetime(Series([np.NaN, \"2011\"]))\n rs = s0.combine_first(s1)\n xp = to_datetime(Series(['2010', '2011']))\n assert_series_equal(rs, xp)\n\n s0 = to_datetime(Series([\"2010\", np.NaN]))\n s1 = Series([np.NaN, \"2011\"])\n rs = s0.combine_first(s1)\n xp = Series([datetime(2010, 1, 1), '2011'])\n assert_series_equal(rs, xp)\n\n def test_get(self):\n\n # GH 6383\n s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,\n 45, 51, 39, 55, 43, 54, 52, 51, 54]))\n\n result = s.get(25, 0)\n expected = 0\n self.assertEqual(result,expected)\n\n s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,\n 45, 51, 39, 55, 43, 54, 52, 51, 54]),\n index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,\n 121.0, 144.0, 169.0, 196.0, 1225.0,\n 1296.0, 1369.0, 1444.0, 1521.0, 1600.0,\n 1681.0, 1764.0, 1849.0, 1936.0],\n dtype='object'))\n\n result = s.get(25, 0)\n expected = 43\n self.assertEqual(result,expected)\n\n # GH 7407\n # with a boolean accessor\n df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})\n vc = df.i.value_counts()\n result = vc.get(99,default='Missing')\n self.assertEqual(result,'Missing')\n\n vc = df.b.value_counts()\n result = vc.get(False,default='Missing')\n self.assertEqual(result,3)\n\n result = vc.get(True,default='Missing')\n self.assertEqual(result,'Missing')\n\n def test_delitem(self):\n\n # GH 5542\n # should delete the item inplace\n s = Series(lrange(5))\n del s[0]\n\n expected = Series(lrange(1,5),index=lrange(1,5))\n assert_series_equal(s, expected)\n\n del s[1]\n expected = Series(lrange(2,5),index=lrange(2,5))\n assert_series_equal(s, expected)\n\n # empty\n s = Series()\n def f():\n del s[0]\n self.assertRaises(KeyError, f)\n\n # only 1 left, del, add, del\n s = Series(1)\n del s[0]\n assert_series_equal(s, Series(dtype='int64'))\n s[0] = 1\n assert_series_equal(s, Series(1))\n del s[0]\n assert_series_equal(s, Series(dtype='int64'))\n\n def test_getitem_preserve_name(self):\n result = self.ts[self.ts > 0]\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts[[0, 2, 4]]\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts[5:10]\n self.assertEqual(result.name, self.ts.name)\n\n def test_getitem_setitem_ellipsis(self):\n s = Series(np.random.randn(10))\n\n np.fix(s)\n\n result = s[...]\n assert_series_equal(result, s)\n\n s[...] = 5\n self.assertTrue((result == 5).all())\n\n def test_getitem_negative_out_of_bounds(self):\n s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))\n\n self.assertRaises(IndexError, s.__getitem__, -11)\n self.assertRaises(IndexError, s.__setitem__, -11, 'foo')\n\n def test_multilevel_name_print(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n s = Series(lrange(0, len(index)), index=index, name='sth')\n expected = [\"first second\",\n \"foo one 0\",\n \" two 1\",\n \" three 2\",\n \"bar one 3\",\n \" two 4\",\n \"baz two 5\",\n \" three 6\",\n \"qux one 7\",\n \" two 8\",\n \" three 9\",\n \"Name: sth, dtype: int64\"]\n expected = \"\\n\".join(expected)\n self.assertEqual(repr(s), expected)\n\n def test_multilevel_preserve_name(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n s = Series(np.random.randn(len(index)), index=index, name='sth')\n\n result = s['foo']\n result2 = s.ix['foo']\n self.assertEqual(result.name, s.name)\n self.assertEqual(result2.name, s.name)\n\n def test_name_printing(self):\n # test small series\n s = Series([0, 1, 2])\n s.name = \"test\"\n self.assertIn(\"Name: test\", repr(s))\n s.name = None\n self.assertNotIn(\"Name:\", repr(s))\n # test big series (diff code path)\n s = Series(lrange(0, 1000))\n s.name = \"test\"\n self.assertIn(\"Name: test\", repr(s))\n s.name = None\n self.assertNotIn(\"Name:\", repr(s))\n\n s = Series(index=date_range('20010101', '20020101'), name='test')\n self.assertIn(\"Name: test\", repr(s))\n\n def test_pickle_preserve_name(self):\n unpickled = self._pickle_roundtrip_name(self.ts)\n self.assertEqual(unpickled.name, self.ts.name)\n\n def _pickle_roundtrip_name(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_argsort_preserve_name(self):\n result = self.ts.argsort()\n self.assertEqual(result.name, self.ts.name)\n\n def test_sort_index_name(self):\n result = self.ts.sort_index(ascending=False)\n self.assertEqual(result.name, self.ts.name)\n\n def test_to_sparse_pass_name(self):\n result = self.ts.to_sparse()\n self.assertEqual(result.name, self.ts.name)\n\n\nclass TestNanops(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def test_comparisons(self):\n left = np.random.randn(10)\n right = np.random.randn(10)\n left[:3] = np.nan\n\n result = nanops.nangt(left, right)\n expected = (left > right).astype('O')\n expected[:3] = np.nan\n\n assert_almost_equal(result, expected)\n\n s = Series(['a', 'b', 'c'])\n s2 = Series([False, True, False])\n\n # it works!\n s == s2\n s2 == s\n\n def test_sum_zero(self):\n arr = np.array([])\n self.assertEqual(nanops.nansum(arr), 0)\n\n arr = np.empty((10, 0))\n self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())\n\n # GH #844\n s = Series([], index=[])\n self.assertEqual(s.sum(), 0)\n\n df = DataFrame(np.empty((10, 0)))\n self.assertTrue((df.sum(1) == 0).all())\n\n def test_nansum_buglet(self):\n s = Series([1.0, np.nan], index=[0, 1])\n result = np.nansum(s)\n assert_almost_equal(result, 1)\n\n def test_overflow(self):\n # GH 6915\n # overflowing on the smaller int dtypes\n for dtype in ['int32','int64']:\n v = np.arange(5000000,dtype=dtype)\n s = Series(v)\n\n # no bottleneck\n result = s.sum(skipna=False)\n self.assertEqual(int(result),v.sum(dtype='int64'))\n result = s.min(skipna=False)\n self.assertEqual(int(result),0)\n result = s.max(skipna=False)\n self.assertEqual(int(result),v[-1])\n\n # use bottleneck if available\n result = s.sum()\n self.assertEqual(int(result),v.sum(dtype='int64'))\n result = s.min()\n self.assertEqual(int(result),0)\n result = s.max()\n self.assertEqual(int(result),v[-1])\n\n for dtype in ['float32', 'float64']:\n v = np.arange(5000000, dtype=dtype)\n s = Series(v)\n\n # no bottleneck\n result = s.sum(skipna=False)\n self.assertEqual(result, v.sum(dtype=dtype))\n result = s.min(skipna=False)\n self.assertTrue(np.allclose(float(result), 0.0))\n result = s.max(skipna=False)\n self.assertTrue(np.allclose(float(result), v[-1]))\n\n # use bottleneck if available\n result = s.sum()\n self.assertEqual(result, v.sum(dtype=dtype))\n result = s.min()\n self.assertTrue(np.allclose(float(result), 0.0))\n result = s.max()\n self.assertTrue(np.allclose(float(result), v[-1]))\n\nclass SafeForSparse(object):\n pass\n\n_ts = tm.makeTimeSeries()\n\nclass TestSeries(tm.TestCase, CheckNameIntegration):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n import warnings\n\n self.ts = _ts.copy()\n self.ts.name = 'ts'\n\n self.series = tm.makeStringSeries()\n self.series.name = 'series'\n\n self.objSeries = tm.makeObjectSeries()\n self.objSeries.name = 'objects'\n\n self.empty = Series([], index=[])\n\n def test_scalar_conversion(self):\n\n # Pass in scalar is disabled\n scalar = Series(0.5)\n self.assertNotIsInstance(scalar, float)\n\n # coercion\n self.assertEqual(float(Series([1.])), 1.0)\n self.assertEqual(int(Series([1.])), 1)\n self.assertEqual(long(Series([1.])), 1)\n\n\n def test_astype(self):\n s = Series(np.random.randn(5),name='foo')\n\n for dtype in ['float32','float64','int64','int32']:\n astyped = s.astype(dtype)\n self.assertEqual(astyped.dtype, dtype)\n self.assertEqual(astyped.name, s.name)\n\n def test_TimeSeries_deprecation(self):\n\n # deprecation TimeSeries, #10890\n with tm.assert_produces_warning(FutureWarning):\n pd.TimeSeries(1,index=date_range('20130101',periods=3))\n\n def test_constructor(self):\n # Recognize TimeSeries\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(self.ts.is_time_series)\n self.assertTrue(self.ts.index.is_all_dates)\n\n # Pass in Series\n derived = Series(self.ts)\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(derived.is_time_series)\n self.assertTrue(derived.index.is_all_dates)\n\n self.assertTrue(tm.equalContents(derived.index, self.ts.index))\n # Ensure new index is not created\n self.assertEqual(id(self.ts.index), id(derived.index))\n\n # Mixed type Series\n mixed = Series(['hello', np.NaN], index=[0, 1])\n self.assertEqual(mixed.dtype, np.object_)\n self.assertIs(mixed[1], np.NaN)\n\n with tm.assert_produces_warning(FutureWarning):\n self.assertFalse(self.empty.is_time_series)\n self.assertFalse(self.empty.index.is_all_dates)\n with tm.assert_produces_warning(FutureWarning):\n self.assertFalse(Series({}).is_time_series)\n self.assertFalse(Series({}).index.is_all_dates)\n self.assertRaises(Exception, Series, np.random.randn(3, 3),\n index=np.arange(3))\n\n mixed.name = 'Series'\n rs = Series(mixed).name\n xp = 'Series'\n self.assertEqual(rs, xp)\n\n # raise on MultiIndex GH4187\n m = MultiIndex.from_arrays([[1, 2], [3, 4]])\n self.assertRaises(NotImplementedError, Series, m)\n\n def test_constructor_empty(self):\n empty = Series()\n empty2 = Series([])\n assert_series_equal(empty, empty2)\n\n empty = Series(index=lrange(10))\n empty2 = Series(np.nan, index=lrange(10))\n assert_series_equal(empty, empty2)\n\n def test_constructor_series(self):\n index1 = ['d', 'b', 'a', 'c']\n index2 = sorted(index1)\n s1 = Series([4, 7, -5, 3], index=index1)\n s2 = Series(s1, index=index2)\n\n assert_series_equal(s2, s1.sort_index())\n\n def test_constructor_iterator(self):\n\n expected = Series(list(range(10)),dtype='int64')\n result = Series(range(10),dtype='int64')\n assert_series_equal(result, expected)\n\n def test_constructor_generator(self):\n gen = (i for i in range(10))\n\n result = Series(gen)\n exp = Series(lrange(10))\n assert_series_equal(result, exp)\n\n gen = (i for i in range(10))\n result = Series(gen, index=lrange(10, 20))\n exp.index = lrange(10, 20)\n assert_series_equal(result, exp)\n\n def test_constructor_map(self):\n # GH8909\n m = map(lambda x: x, range(10))\n\n result = Series(m)\n exp = Series(lrange(10))\n assert_series_equal(result, exp)\n\n m = map(lambda x: x, range(10))\n result = Series(m, index=lrange(10, 20))\n exp.index = lrange(10, 20)\n assert_series_equal(result, exp)\n\n def test_constructor_categorical(self):\n cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)\n res = Series(cat)\n self.assertTrue(res.values.equals(cat))\n\n def test_constructor_maskedarray(self):\n data = ma.masked_all((3,), dtype=float)\n result = Series(data)\n expected = Series([nan, nan, nan])\n assert_series_equal(result, expected)\n\n data[0] = 0.0\n data[2] = 2.0\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([0.0, nan, 2.0], index=index)\n assert_series_equal(result, expected)\n\n data[1] = 1.0\n result = Series(data, index=index)\n expected = Series([0.0, 1.0, 2.0], index=index)\n assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=int)\n result = Series(data)\n expected = Series([nan, nan, nan], dtype=float)\n assert_series_equal(result, expected)\n\n data[0] = 0\n data[2] = 2\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([0, nan, 2], index=index, dtype=float)\n assert_series_equal(result, expected)\n\n data[1] = 1\n result = Series(data, index=index)\n expected = Series([0, 1, 2], index=index, dtype=int)\n assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=bool)\n result = Series(data)\n expected = Series([nan, nan, nan], dtype=object)\n assert_series_equal(result, expected)\n\n data[0] = True\n data[2] = False\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([True, nan, False], index=index, dtype=object)\n assert_series_equal(result, expected)\n\n data[1] = True\n result = Series(data, index=index)\n expected = Series([True, True, False], index=index, dtype=bool)\n assert_series_equal(result, expected)\n\n from pandas import tslib\n data = ma.masked_all((3,), dtype='M8[ns]')\n result = Series(data)\n expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n data[0] = datetime(2001, 1, 1)\n data[2] = datetime(2001, 1, 3)\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([datetime(2001, 1, 1), tslib.iNaT,\n datetime(2001, 1, 3)], index=index, dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n data[1] = datetime(2001, 1, 2)\n result = Series(data, index=index)\n expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),\n datetime(2001, 1, 3)], index=index, dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n def test_constructor_default_index(self):\n s = Series([0, 1, 2])\n assert_almost_equal(s.index, np.arange(3))\n\n def test_constructor_corner(self):\n df = tm.makeTimeDataFrame()\n objs = [df, df]\n s = Series(objs, index=[0, 1])\n tm.assertIsInstance(s, Series)\n\n def test_constructor_sanitize(self):\n s = Series(np.array([1., 1., 8.]), dtype='i8')\n self.assertEqual(s.dtype, np.dtype('i8'))\n\n s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')\n self.assertEqual(s.dtype, np.dtype('f8'))\n\n def test_constructor_pass_none(self):\n s = Series(None, index=lrange(5))\n self.assertEqual(s.dtype, np.float64)\n\n s = Series(None, index=lrange(5), dtype=object)\n self.assertEqual(s.dtype, np.object_)\n\n # GH 7431\n # inference on the index\n s = Series(index=np.array([None]))\n expected = Series(index=Index([None]))\n assert_series_equal(s,expected)\n\n def test_constructor_cast(self):\n self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)\n\n def test_constructor_dtype_nocast(self):\n # 1572\n s = Series([1, 2, 3])\n\n s2 = Series(s, dtype=np.int64)\n\n s2[1] = 5\n self.assertEqual(s[1], 5)\n\n def test_constructor_datelike_coercion(self):\n\n # GH 9477\n # incorrectly infering on dateimelike looking when object dtype is specified\n s = Series([Timestamp('20130101'),'NOV'],dtype=object)\n self.assertEqual(s.iloc[0],Timestamp('20130101'))\n self.assertEqual(s.iloc[1],'NOV')\n self.assertTrue(s.dtype == object)\n\n # the dtype was being reset on the slicing and re-inferred to datetime even\n # thought the blocks are mixed\n belly = '216 3T19'.split()\n wing1 = '2T15 4H19'.split()\n wing2 = '416 4T20'.split()\n mat = pd.to_datetime('2016-01-22 2019-09-07'.split())\n df = pd.DataFrame({'wing1':wing1, 'wing2':wing2, 'mat':mat}, index=belly)\n\n result = df.loc['3T19']\n self.assertTrue(result.dtype == object)\n result = df.loc['216']\n self.assertTrue(result.dtype == object)\n\n def test_constructor_dtype_datetime64(self):\n import pandas.tslib as tslib\n\n s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))\n self.assertTrue(isnull(s).all())\n\n # in theory this should be all nulls, but since\n # we are not specifying a dtype is ambiguous\n s = Series(tslib.iNaT, index=lrange(5))\n self.assertFalse(isnull(s).all())\n\n s = Series(nan, dtype='M8[ns]', index=lrange(5))\n self.assertTrue(isnull(s).all())\n\n s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')\n self.assertTrue(isnull(s[1]))\n self.assertEqual(s.dtype, 'M8[ns]')\n\n s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')\n self.assertTrue(isnull(s[1]))\n self.assertEqual(s.dtype, 'M8[ns]')\n\n # GH3416\n dates = [\n np.datetime64(datetime(2013, 1, 1)),\n np.datetime64(datetime(2013, 1, 2)),\n np.datetime64(datetime(2013, 1, 3)),\n ]\n\n s = Series(dates)\n self.assertEqual(s.dtype, 'M8[ns]')\n\n s.ix[0] = np.nan\n self.assertEqual(s.dtype, 'M8[ns]')\n\n # invalid astypes\n for t in ['s', 'D', 'us', 'ms']:\n self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)\n\n # GH3414 related\n self.assertRaises(TypeError, lambda x: Series(\n Series(dates).astype('int') / 1000000, dtype='M8[ms]'))\n self.assertRaises(\n TypeError, lambda x: Series(dates, dtype='datetime64'))\n\n # invalid dates can be help as object\n result = Series([datetime(2,1,1)])\n self.assertEqual(result[0], datetime(2,1,1,0,0))\n\n result = Series([datetime(3000,1,1)])\n self.assertEqual(result[0], datetime(3000,1,1,0,0))\n\n # don't mix types\n result = Series([ Timestamp('20130101'), 1],index=['a','b'])\n self.assertEqual(result['a'], Timestamp('20130101'))\n self.assertEqual(result['b'], 1)\n\n # GH6529\n # coerce datetime64 non-ns properly\n dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')\n values2 = dates.view(np.ndarray).astype('datetime64[ns]')\n expected = Series(values2, dates)\n\n for dtype in ['s', 'D', 'ms', 'us', 'ns']:\n values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))\n result = Series(values1, dates)\n assert_series_equal(result,expected)\n\n # leave datetime.date alone\n dates2 = np.array([d.date() for d in dates.to_pydatetime()],\n dtype=object)\n series1 = Series(dates2, dates)\n self.assert_numpy_array_equal(series1.values,dates2)\n self.assertEqual(series1.dtype,object)\n\n # these will correctly infer a datetime\n s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range('20130101',periods=3)\n self.assertTrue(Series(dr).iloc[0].tz is None)\n dr = date_range('20130101',periods=3,tz='UTC')\n self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')\n\n # non-convertible\n s = Series([1479596223000, -1479590, pd.NaT])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(s[2] is pd.NaT)\n self.assertTrue('NaT' in str(s))\n\n # if we passed a NaT it remains\n s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(s[2] is pd.NaT)\n self.assertTrue('NaT' in str(s))\n\n # if we passed a nan it remains\n s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(s[2] is np.nan)\n self.assertTrue('NaN' in str(s))\n\n def test_constructor_with_datetime_tz(self):\n\n # 8260\n # support datetime64 with tz\n\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n s = Series(dr)\n self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')\n self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')\n self.assertTrue(com.is_datetime64tz_dtype(s.dtype))\n self.assertTrue('datetime64[ns, US/Eastern]' in str(s))\n\n # export\n result = s.values\n self.assertIsInstance(result, np.ndarray)\n self.assertTrue(result.dtype == 'datetime64[ns]')\n self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize('UTC').tz_convert(tz=s.dt.tz)))\n\n # indexing\n result = s.iloc[0]\n self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))\n result = s[0]\n self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))\n\n result = s[Series([True,True,False],index=s.index)]\n assert_series_equal(result,s[0:2])\n\n result = s.iloc[0:1]\n assert_series_equal(result,Series(dr[0:1]))\n\n # concat\n result = pd.concat([s.iloc[0:1],s.iloc[1:]])\n assert_series_equal(result,s)\n\n # astype\n result = s.astype(object)\n expected = Series(DatetimeIndex(s._values).asobject)\n assert_series_equal(result, expected)\n\n result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)\n assert_series_equal(result, s)\n\n # astype - datetime64[ns, tz]\n result = Series(s.values).astype('datetime64[ns, US/Eastern]')\n assert_series_equal(result, s)\n\n result = Series(s.values).astype(s.dtype)\n assert_series_equal(result, s)\n\n result = s.astype('datetime64[ns, CET]')\n expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET'))\n assert_series_equal(result, expected)\n\n # short str\n self.assertTrue('datetime64[ns, US/Eastern]' in str(s))\n\n # formatting with NaT\n result = s.shift()\n self.assertTrue('datetime64[ns, US/Eastern]' in str(result))\n self.assertTrue('NaT' in str(result))\n\n # long str\n t = Series(date_range('20130101',periods=1000,tz='US/Eastern'))\n self.assertTrue('datetime64[ns, US/Eastern]' in str(t))\n\n result = pd.DatetimeIndex(s,freq='infer')\n tm.assert_index_equal(result, dr)\n\n # inference\n s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])\n self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')\n self.assertTrue(lib.infer_dtype(s) == 'datetime64')\n\n s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])\n self.assertTrue(s.dtype == 'object')\n self.assertTrue(lib.infer_dtype(s) == 'datetime')\n\n def test_constructor_periodindex(self):\n # GH7932\n # converting a PeriodIndex when put in a Series\n\n pi = period_range('20130101',periods=5,freq='D')\n s = Series(pi)\n expected = Series(pi.asobject)\n assert_series_equal(s, expected)\n\n def test_constructor_dict(self):\n d = {'a': 0., 'b': 1., 'c': 2.}\n result = Series(d, index=['b', 'c', 'd', 'a'])\n expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])\n assert_series_equal(result, expected)\n\n pidx = tm.makePeriodIndex(100)\n d = {pidx[0]: 0, pidx[1]: 1}\n result = Series(d, index=pidx)\n expected = Series(np.nan, pidx)\n expected.ix[0] = 0\n expected.ix[1] = 1\n assert_series_equal(result, expected)\n\n def test_constructor_dict_multiindex(self):\n check = lambda result, expected: tm.assert_series_equal(\n result, expected, check_dtype=True, check_index_type=True,\n check_series_type=True)\n d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}\n _d = sorted(d.items())\n ser = Series(d)\n expected = Series([x[1] for x in _d],\n index=MultiIndex.from_tuples([x[0] for x in _d]))\n check(ser, expected)\n\n d['z'] = 111.\n _d.insert(0, ('z', d['z']))\n ser = Series(d)\n expected = Series(\n [x[1] for x in _d],\n index=Index([x[0] for x in _d], tupleize_cols=False))\n ser = ser.reindex(index=expected.index)\n check(ser, expected)\n\n def test_constructor_subclass_dict(self):\n data = tm.TestSubDict((x, 10.0 * x) for x in range(10))\n series = Series(data)\n refseries = Series(dict(compat.iteritems(data)))\n assert_series_equal(refseries, series)\n\n def test_constructor_dict_datetime64_index(self):\n # GH 9456\n\n dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']\n values = [42544017.198965244, 1234565, 40512335.181958228, -1]\n\n def create_data(constructor):\n return dict(zip((constructor(x) for x in dates_as_str), values))\n\n data_datetime64 = create_data(np.datetime64)\n data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))\n data_Timestamp = create_data(Timestamp)\n\n expected = Series(values, (Timestamp(x) for x in dates_as_str))\n\n result_datetime64 = Series(data_datetime64)\n result_datetime = Series(data_datetime)\n result_Timestamp = Series(data_Timestamp)\n\n assert_series_equal(result_datetime64, expected)\n assert_series_equal(result_datetime, expected)\n assert_series_equal(result_Timestamp, expected)\n\n def test_orderedDict_ctor(self):\n # GH3283\n import pandas\n import random\n data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])\n s = pandas.Series(data)\n self.assertTrue(all(s.values == list(data.values())))\n\n def test_orderedDict_subclass_ctor(self):\n # GH3283\n import pandas\n import random\n\n class A(OrderedDict):\n pass\n data = A([('col%s' % i, random.random()) for i in range(12)])\n s = pandas.Series(data)\n self.assertTrue(all(s.values == list(data.values())))\n\n def test_constructor_list_of_tuples(self):\n data = [(1, 1), (2, 2), (2, 3)]\n s = Series(data)\n self.assertEqual(list(s), data)\n\n def test_constructor_tuple_of_tuples(self):\n data = ((1, 1), (2, 2), (2, 3))\n s = Series(data)\n self.assertEqual(tuple(s), data)\n\n def test_constructor_set(self):\n values = set([1, 2, 3, 4, 5])\n self.assertRaises(TypeError, Series, values)\n values = frozenset(values)\n self.assertRaises(TypeError, Series, values)\n\n def test_fromDict(self):\n data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}\n\n series = Series(data)\n self.assertTrue(tm.is_sorted(series.index))\n\n data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}\n series = Series(data)\n self.assertEqual(series.dtype, np.object_)\n\n data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}\n series = Series(data)\n self.assertEqual(series.dtype, np.object_)\n\n data = {'a': '0', 'b': '1'}\n series = Series(data, dtype=float)\n self.assertEqual(series.dtype, np.float64)\n\n def test_setindex(self):\n # wrong type\n series = self.series.copy()\n self.assertRaises(TypeError, setattr, series, 'index', None)\n\n # wrong length\n series = self.series.copy()\n self.assertRaises(Exception, setattr, series, 'index',\n np.arange(len(series) - 1))\n\n # works\n series = self.series.copy()\n series.index = np.arange(len(series))\n tm.assertIsInstance(series.index, Index)\n\n def test_array_finalize(self):\n pass\n\n def test_pop(self):\n # GH 6600\n df = DataFrame({\n 'A': 0,\n 'B': np.arange(5,dtype='int64'),\n 'C': 0,\n })\n k = df.iloc[4]\n\n result = k.pop('B')\n self.assertEqual(result, 4)\n\n expected = Series([0, 0], index=['A', 'C'], name=4)\n assert_series_equal(k, expected)\n\n def test_not_hashable(self):\n s_empty = Series()\n s = Series([1])\n self.assertRaises(TypeError, hash, s_empty)\n self.assertRaises(TypeError, hash, s)\n\n def test_fromValue(self):\n\n nans = Series(np.NaN, index=self.ts.index)\n self.assertEqual(nans.dtype, np.float_)\n self.assertEqual(len(nans), len(self.ts))\n\n strings = Series('foo', index=self.ts.index)\n self.assertEqual(strings.dtype, np.object_)\n self.assertEqual(len(strings), len(self.ts))\n\n d = datetime.now()\n dates = Series(d, index=self.ts.index)\n self.assertEqual(dates.dtype, 'M8[ns]')\n self.assertEqual(len(dates), len(self.ts))\n\n def test_contains(self):\n tm.assert_contains_all(self.ts.index, self.ts)\n\n def test_pickle(self):\n unp_series = self._pickle_roundtrip(self.series)\n unp_ts = self._pickle_roundtrip(self.ts)\n assert_series_equal(unp_series, self.series)\n assert_series_equal(unp_ts, self.ts)\n\n def _pickle_roundtrip(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_getitem_get(self):\n idx1 = self.series.index[5]\n idx2 = self.objSeries.index[5]\n\n self.assertEqual(self.series[idx1], self.series.get(idx1))\n self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))\n\n self.assertEqual(self.series[idx1], self.series[5])\n self.assertEqual(self.objSeries[idx2], self.objSeries[5])\n\n self.assertEqual(\n self.series.get(-1), self.series.get(self.series.index[-1]))\n self.assertEqual(self.series[5], self.series.get(self.series.index[5]))\n\n # missing\n d = self.ts.index[0] - datetools.bday\n self.assertRaises(KeyError, self.ts.__getitem__, d)\n\n # None\n # GH 5652\n for s in [Series(), Series(index=list('abc'))]:\n result = s.get(None)\n self.assertIsNone(result)\n\n def test_iget(self):\n\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n s.iget(1)\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n s.irow(1)\n\n # 10711, deprecated\n with tm.assert_produces_warning(FutureWarning):\n s.iget_value(1)\n\n for i in range(len(s)):\n result = s.iloc[i]\n exp = s[s.index[i]]\n assert_almost_equal(result, exp)\n\n # pass a slice\n result = s.iloc[slice(1, 3)]\n expected = s.ix[2:4]\n assert_series_equal(result, expected)\n\n # test slice is a view\n result[:] = 0\n self.assertTrue((s[1:3] == 0).all())\n\n # list of integers\n result = s.iloc[[0, 2, 3, 4, 5]]\n expected = s.reindex(s.index[[0, 2, 3, 4, 5]])\n assert_series_equal(result, expected)\n\n def test_iget_nonunique(self):\n s = Series([0, 1, 2], index=[0, 1, 0])\n self.assertEqual(s.iloc[2], 2)\n\n def test_getitem_regression(self):\n s = Series(lrange(5), index=lrange(5))\n result = s[lrange(5)]\n assert_series_equal(result, s)\n\n def test_getitem_setitem_slice_bug(self):\n s = Series(lrange(10), lrange(10))\n result = s[-12:]\n assert_series_equal(result, s)\n\n result = s[-7:]\n assert_series_equal(result, s[3:])\n\n result = s[:-12]\n assert_series_equal(result, s[:0])\n\n s = Series(lrange(10), lrange(10))\n s[-12:] = 0\n self.assertTrue((s == 0).all())\n\n s[:-12] = 5\n self.assertTrue((s == 0).all())\n\n def test_getitem_int64(self):\n idx = np.int64(5)\n self.assertEqual(self.ts[idx], self.ts[5])\n\n def test_getitem_fancy(self):\n slice1 = self.series[[1, 2, 3]]\n slice2 = self.objSeries[[1, 2, 3]]\n self.assertEqual(self.series.index[2], slice1.index[1])\n self.assertEqual(self.objSeries.index[2], slice2.index[1])\n self.assertEqual(self.series[2], slice1[1])\n self.assertEqual(self.objSeries[2], slice2[1])\n\n def test_getitem_boolean(self):\n s = self.series\n mask = s > s.median()\n\n # passing list is OK\n result = s[list(mask)]\n expected = s[mask]\n assert_series_equal(result, expected)\n self.assert_numpy_array_equal(result.index, s.index[mask])\n\n def test_getitem_boolean_empty(self):\n s = Series([], dtype=np.int64)\n s.index.name = 'index_name'\n s = s[s.isnull()]\n self.assertEqual(s.index.name, 'index_name')\n self.assertEqual(s.dtype, np.int64)\n\n # GH5877\n # indexing with empty series\n s = Series(['A', 'B'])\n expected = Series(np.nan,index=['C'],dtype=object)\n result = s[Series(['C'], dtype=object)]\n assert_series_equal(result, expected)\n\n s = Series(['A', 'B'])\n expected = Series(dtype=object)\n result = s[Series([], dtype=object)]\n assert_series_equal(result, expected)\n\n # invalid because of the boolean indexer\n # that's empty or not-aligned\n def f():\n s[Series([], dtype=bool)]\n self.assertRaises(IndexingError, f)\n\n def f():\n s[Series([True], dtype=bool)]\n self.assertRaises(IndexingError, f)\n\n def test_getitem_generator(self):\n gen = (x > 0 for x in self.series)\n result = self.series[gen]\n result2 = self.series[iter(self.series > 0)]\n expected = self.series[self.series > 0]\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n def test_getitem_boolean_object(self):\n # using column from DataFrame\n\n s = self.series\n mask = s > s.median()\n omask = mask.astype(object)\n\n # getitem\n result = s[omask]\n expected = s[mask]\n assert_series_equal(result, expected)\n\n # setitem\n s2 = s.copy()\n cop = s.copy()\n cop[omask] = 5\n s2[mask] = 5\n assert_series_equal(cop, s2)\n\n # nans raise exception\n omask[5:10] = np.nan\n self.assertRaises(Exception, s.__getitem__, omask)\n self.assertRaises(Exception, s.__setitem__, omask, 5)\n\n def test_getitem_setitem_boolean_corner(self):\n ts = self.ts\n mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()\n\n # these used to raise...??\n\n self.assertRaises(Exception, ts.__getitem__, mask_shifted)\n self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)\n #ts[mask_shifted]\n #ts[mask_shifted] = 1\n\n self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)\n self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)\n #ts.ix[mask_shifted]\n #ts.ix[mask_shifted] = 2\n\n def test_getitem_setitem_slice_integers(self):\n s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])\n\n result = s[:4]\n expected = s.reindex([2, 4, 6, 8])\n assert_series_equal(result, expected)\n\n s[:4] = 0\n self.assertTrue((s[:4] == 0).all())\n self.assertTrue(not (s[4:] == 0).any())\n\n def test_getitem_out_of_bounds(self):\n # don't segfault, GH #495\n self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))\n\n # GH #917\n s = Series([])\n self.assertRaises(IndexError, s.__getitem__, -1)\n\n def test_getitem_setitem_integers(self):\n # caused bug without test\n s = Series([1, 2, 3], ['a', 'b', 'c'])\n\n self.assertEqual(s.ix[0], s['a'])\n s.ix[0] = 5\n self.assertAlmostEqual(s['a'], 5)\n\n def test_getitem_box_float64(self):\n value = self.ts[5]\n tm.assertIsInstance(value, np.float64)\n\n def test_getitem_ambiguous_keyerror(self):\n s = Series(lrange(10), index=lrange(0, 20, 2))\n self.assertRaises(KeyError, s.__getitem__, 1)\n self.assertRaises(KeyError, s.ix.__getitem__, 1)\n\n def test_getitem_unordered_dup(self):\n obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])\n self.assertTrue(np.isscalar(obj['c']))\n self.assertEqual(obj['c'], 0)\n\n def test_getitem_dups_with_missing(self):\n\n # breaks reindex, so need to use .ix internally\n # GH 4246\n s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])\n expected = s.ix[['foo', 'bar', 'bah', 'bam']]\n result = s[['foo', 'bar', 'bah', 'bam']]\n assert_series_equal(result, expected)\n\n def test_getitem_dups(self):\n s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)\n expected = Series([3,4],index=['C','C'],dtype=np.int64)\n result = s['C']\n assert_series_equal(result, expected)\n\n def test_getitem_dataframe(self):\n rng = list(range(10))\n s = pd.Series(10, index=rng)\n df = pd.DataFrame(rng, index=rng)\n self.assertRaises(TypeError, s.__getitem__, df>5)\n\n def test_setitem_ambiguous_keyerror(self):\n s = Series(lrange(10), index=lrange(0, 20, 2))\n\n # equivalent of an append\n s2 = s.copy()\n s2[1] = 5\n expected = s.append(Series([5],index=[1]))\n assert_series_equal(s2,expected)\n\n s2 = s.copy()\n s2.ix[1] = 5\n expected = s.append(Series([5],index=[1]))\n assert_series_equal(s2,expected)\n\n def test_setitem_float_labels(self):\n # note labels are floats\n s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])\n tmp = s.copy()\n\n s.ix[1] = 'zoo'\n tmp.iloc[2] = 'zoo'\n\n assert_series_equal(s, tmp)\n\n def test_slice(self):\n numSlice = self.series[10:20]\n numSliceEnd = self.series[-10:]\n objSlice = self.objSeries[10:20]\n\n self.assertNotIn(self.series.index[9], numSlice.index)\n self.assertNotIn(self.objSeries.index[9], objSlice.index)\n\n self.assertEqual(len(numSlice), len(numSlice.index))\n self.assertEqual(self.series[numSlice.index[0]],\n numSlice[numSlice.index[0]])\n\n self.assertEqual(numSlice.index[1], self.series.index[11])\n\n self.assertTrue(tm.equalContents(numSliceEnd,\n np.array(self.series)[-10:]))\n\n # test return view\n sl = self.series[10:20]\n sl[:] = 0\n self.assertTrue((self.series[10:20] == 0).all())\n\n def test_slice_can_reorder_not_uniquely_indexed(self):\n s = Series(1, index=['a', 'a', 'b', 'b', 'c'])\n result = s[::-1] # it works!\n\n def test_slice_float_get_set(self):\n\n self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])\n def f():\n self.ts[4.0:10.0] = 0\n self.assertRaises(TypeError, f)\n\n self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))\n self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)\n\n def test_slice_floats2(self):\n s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))\n\n self.assertEqual(len(s.ix[12.0:]), 8)\n self.assertEqual(len(s.ix[12.5:]), 7)\n\n i = np.arange(10, 20, dtype=float)\n i[2] = 12.2\n s.index = i\n self.assertEqual(len(s.ix[12.0:]), 8)\n self.assertEqual(len(s.ix[12.5:]), 7)\n\n def test_slice_float64(self):\n\n values = np.arange(10., 50., 2)\n index = Index(values)\n\n start, end = values[[5, 15]]\n\n s = Series(np.random.randn(20), index=index)\n\n result = s[start:end]\n expected = s.iloc[5:16]\n assert_series_equal(result, expected)\n\n result = s.loc[start:end]\n assert_series_equal(result, expected)\n\n df = DataFrame(np.random.randn(20, 3), index=index)\n\n result = df[start:end]\n expected = df.iloc[5:16]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[start:end]\n tm.assert_frame_equal(result, expected)\n\n def test_setitem(self):\n self.ts[self.ts.index[5]] = np.NaN\n self.ts[[1, 2, 17]] = np.NaN\n self.ts[6] = np.NaN\n self.assertTrue(np.isnan(self.ts[6]))\n self.assertTrue(np.isnan(self.ts[2]))\n self.ts[np.isnan(self.ts)] = 5\n self.assertFalse(np.isnan(self.ts[2]))\n\n # caught this bug when writing tests\n series = Series(tm.makeIntIndex(20).astype(float),\n index=tm.makeIntIndex(20))\n\n series[::2] = 0\n self.assertTrue((series[::2] == 0).all())\n\n # set item that's not contained\n s = self.series.copy()\n s['foobar'] = 1\n\n app = Series([1], index=['foobar'], name='series')\n expected = self.series.append(app)\n assert_series_equal(s, expected)\n\n # Test for issue #10193\n key = pd.Timestamp('2012-01-01')\n series = pd.Series()\n series[key] = 47\n expected = pd.Series(47, [key])\n assert_series_equal(series, expected)\n\n series = pd.Series([], pd.DatetimeIndex([], freq='D'))\n series[key] = 47\n expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))\n assert_series_equal(series, expected)\n\n def test_setitem_dtypes(self):\n\n # change dtypes\n # GH 4463\n expected = Series([np.nan,2,3])\n\n s = Series([1,2,3])\n s.iloc[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([1,2,3])\n s.loc[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([1,2,3])\n s[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([False])\n s.loc[0] = np.nan\n assert_series_equal(s,Series([np.nan]))\n\n s = Series([False,True])\n s.loc[0] = np.nan\n assert_series_equal(s,Series([np.nan,1.0]))\n\n def test_set_value(self):\n idx = self.ts.index[10]\n res = self.ts.set_value(idx, 0)\n self.assertIs(res, self.ts)\n self.assertEqual(self.ts[idx], 0)\n\n # equiv\n s = self.series.copy()\n res = s.set_value('foobar', 0)\n self.assertIs(res, s)\n self.assertEqual(res.index[-1], 'foobar')\n self.assertEqual(res['foobar'], 0)\n\n s = self.series.copy()\n s.loc['foobar'] = 0\n self.assertEqual(s.index[-1], 'foobar')\n self.assertEqual(s['foobar'], 0)\n\n def test_setslice(self):\n sl = self.ts[5:20]\n self.assertEqual(len(sl), len(sl.index))\n self.assertTrue(sl.index.is_unique)\n\n def test_basic_getitem_setitem_corner(self):\n # invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]\n with tm.assertRaisesRegexp(ValueError, 'tuple-index'):\n self.ts[:, 2]\n with tm.assertRaisesRegexp(ValueError, 'tuple-index'):\n self.ts[:, 2] = 2\n\n # weird lists. [slice(0, 5)] will work but not two slices\n result = self.ts[[slice(None, 5)]]\n expected = self.ts[:5]\n assert_series_equal(result, expected)\n\n # OK\n self.assertRaises(Exception, self.ts.__getitem__,\n [5, slice(None, None)])\n self.assertRaises(Exception, self.ts.__setitem__,\n [5, slice(None, None)], 2)\n\n def test_reshape_non_2d(self):\n # GH 4554\n x = Series(np.random.random(201), name='x')\n self.assertTrue(x.reshape(x.shape,) is x)\n\n # GH 2719\n a = Series([1, 2, 3, 4])\n result = a.reshape(2, 2)\n expected = a.values.reshape(2, 2)\n tm.assert_numpy_array_equal(result, expected)\n self.assertTrue(type(result) is type(expected))\n\n def test_reshape_2d_return_array(self):\n x = Series(np.random.random(201), name='x')\n result = x.reshape((-1, 1))\n self.assertNotIsInstance(result, Series)\n\n result2 = np.reshape(x, (-1, 1))\n self.assertNotIsInstance(result2, Series)\n\n result = x[:, None]\n expected = x.reshape((-1, 1))\n assert_almost_equal(result, expected)\n\n def test_basic_getitem_with_labels(self):\n indices = self.ts.index[[5, 10, 15]]\n\n result = self.ts[indices]\n expected = self.ts.reindex(indices)\n assert_series_equal(result, expected)\n\n result = self.ts[indices[0]:indices[2]]\n expected = self.ts.ix[indices[0]:indices[2]]\n assert_series_equal(result, expected)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 2, 5, 7, 8]\n arr_inds = np.array([0, 2, 5, 7, 8])\n result = s[inds]\n expected = s.reindex(inds)\n assert_series_equal(result, expected)\n\n result = s[arr_inds]\n expected = s.reindex(arr_inds)\n assert_series_equal(result, expected)\n\n def test_basic_setitem_with_labels(self):\n indices = self.ts.index[[5, 10, 15]]\n\n cp = self.ts.copy()\n exp = self.ts.copy()\n cp[indices] = 0\n exp.ix[indices] = 0\n assert_series_equal(cp, exp)\n\n cp = self.ts.copy()\n exp = self.ts.copy()\n cp[indices[0]:indices[2]] = 0\n exp.ix[indices[0]:indices[2]] = 0\n assert_series_equal(cp, exp)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 4, 6]\n arr_inds = np.array([0, 4, 6])\n\n cp = s.copy()\n exp = s.copy()\n s[inds] = 0\n s.ix[inds] = 0\n assert_series_equal(cp, exp)\n\n cp = s.copy()\n exp = s.copy()\n s[arr_inds] = 0\n s.ix[arr_inds] = 0\n assert_series_equal(cp, exp)\n\n inds_notfound = [0, 4, 5, 6]\n arr_inds_notfound = np.array([0, 4, 5, 6])\n self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)\n self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)\n\n def test_ix_getitem(self):\n inds = self.series.index[[3, 4, 7]]\n assert_series_equal(self.series.ix[inds], self.series.reindex(inds))\n assert_series_equal(self.series.ix[5::2], self.series[5::2])\n\n # slice with indices\n d1, d2 = self.ts.index[[5, 15]]\n result = self.ts.ix[d1:d2]\n expected = self.ts.truncate(d1, d2)\n assert_series_equal(result, expected)\n\n # boolean\n mask = self.series > self.series.median()\n assert_series_equal(self.series.ix[mask], self.series[mask])\n\n # ask for index value\n self.assertEqual(self.ts.ix[d1], self.ts[d1])\n self.assertEqual(self.ts.ix[d2], self.ts[d2])\n\n def test_ix_getitem_not_monotonic(self):\n d1, d2 = self.ts.index[[5, 15]]\n\n ts2 = self.ts[::2][[1, 2, 0]]\n\n self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))\n self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)\n\n def test_ix_getitem_setitem_integer_slice_keyerrors(self):\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n\n # this is OK\n cp = s.copy()\n cp.ix[4:10] = 0\n self.assertTrue((cp.ix[4:10] == 0).all())\n\n # so is this\n cp = s.copy()\n cp.ix[3:11] = 0\n self.assertTrue((cp.ix[3:11] == 0).values.all())\n\n result = s.ix[4:10]\n result2 = s.ix[3:11]\n expected = s.reindex([4, 6, 8, 10])\n\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]\n self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))\n self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)\n\n def test_ix_getitem_iterator(self):\n idx = iter(self.series.index[:10])\n result = self.series.ix[idx]\n assert_series_equal(result, self.series[:10])\n\n def test_where(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(cond).dropna()\n rs2 = s[cond]\n assert_series_equal(rs, rs2)\n\n rs = s.where(cond, -s)\n assert_series_equal(rs, s.abs())\n\n rs = s.where(cond)\n assert(s.shape == rs.shape)\n assert(rs is not s)\n\n # test alignment\n cond = Series([True,False,False,True,False],index=s.index)\n s2 = -(s.abs())\n\n expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)\n rs = s2.where(cond[:3])\n assert_series_equal(rs, expected)\n\n expected = s2.abs()\n expected.ix[0] = s2[0]\n rs = s2.where(cond[:3], -s2)\n assert_series_equal(rs, expected)\n\n self.assertRaises(ValueError, s.where, 1)\n self.assertRaises(ValueError, s.where, cond[:3].values, -s)\n\n # GH 2745\n s = Series([1, 2])\n s[[True, False]] = [0, 1]\n expected = Series([0, 2])\n assert_series_equal(s, expected)\n\n # failures\n self.assertRaises(\n ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])\n self.assertRaises(\n ValueError, s.__setitem__, tuple([[[True, False]]]), [])\n\n # unsafe dtype changes\n for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n s[mask] = lrange(2, 7)\n expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n # these are allowed operations, but are upcasted\n for dtype in [np.int64, np.float64]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n s[mask] = values\n expected = Series(values + lrange(5, 10), dtype='float64')\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n # GH 9731\n s = Series(np.arange(10), dtype='int64')\n mask = s > 5\n values = [2.5, 3.5, 4.5, 5.5]\n s[mask] = values\n expected = Series(lrange(6) + values, dtype='float64')\n assert_series_equal(s, expected)\n\n # can't do these as we are forced to change the itemsize of the input\n # to something we cannot\n for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n self.assertRaises(Exception, s.__setitem__, tuple(mask), values)\n\n # GH3235\n s = Series(np.arange(10), dtype='int64')\n mask = s < 5\n s[mask] = lrange(2, 7)\n expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n s = Series(np.arange(10), dtype='int64')\n mask = s > 5\n s[mask] = [0] * 4\n expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')\n assert_series_equal(s, expected)\n\n s = Series(np.arange(10))\n mask = s > 5\n def f():\n s[mask] = [5,4,3,2,1]\n self.assertRaises(ValueError, f)\n def f():\n s[mask] = [0] * 5\n self.assertRaises(ValueError, f)\n\n # dtype changes\n s = Series([1,2,3,4])\n result = s.where(s>2,np.nan)\n expected = Series([np.nan,np.nan,3,4])\n assert_series_equal(result, expected)\n\n # GH 4667\n # setting with None changes dtype\n s = Series(range(10)).astype(float)\n s[8] = None\n result = s[8]\n self.assertTrue(isnull(result))\n\n s = Series(range(10)).astype(float)\n s[s > 8] = None\n result = s[isnull(s)]\n expected = Series(np.nan,index=[9])\n assert_series_equal(result, expected)\n\n def test_where_setitem_invalid(self):\n\n # GH 2702\n # make sure correct exceptions are raised on invalid list assignment\n\n # slice\n s = Series(list('abc'))\n def f():\n s[0:3] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s[0:3] = list(range(3))\n expected = Series([0,1,2])\n assert_series_equal(s.astype(np.int64), expected, )\n\n # slice with step\n s = Series(list('abcdef'))\n def f():\n s[0:4:2] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s = Series(list('abcdef'))\n s[0:4:2] = list(range(2))\n expected = Series([0,'b',1,'d','e','f'])\n assert_series_equal(s, expected)\n\n # neg slices\n s = Series(list('abcdef'))\n def f():\n s[:-1] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s[-3:-1] = list(range(2))\n expected = Series(['a','b','c',0,1,'f'])\n assert_series_equal(s, expected)\n\n # list\n s = Series(list('abc'))\n def f():\n s[[0,1,2]] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s = Series(list('abc'))\n def f():\n s[[0,1,2]] = list(range(2))\n self.assertRaises(ValueError, f)\n\n # scalar\n s = Series(list('abc'))\n s[0] = list(range(10))\n expected = Series([list(range(10)),'b','c'])\n assert_series_equal(s, expected)\n\n def test_where_broadcast(self):\n # Test a variety of differently sized series\n for size in range(2, 6):\n # Test a variety of boolean indices\n for selection in [np.resize([True, False, False, False, False], size), # First element should be set\n # Set alternating elements]\n np.resize([True, False], size),\n np.resize([False], size)]: # No element should be set\n # Test a variety of different numbers as content\n for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:\n # Test numpy arrays, lists and tuples as the input to be\n # broadcast\n for arr in [np.array([item]), [item], (item,)]:\n data = np.arange(size, dtype=float)\n s = Series(data)\n s[selection] = arr\n # Construct the expected series by taking the source\n # data or item based on the selection\n expected = Series([item if use_item else data[i]\n for i, use_item in enumerate(selection)])\n assert_series_equal(s, expected)\n\n s = Series(data)\n result = s.where(~selection, arr)\n assert_series_equal(result, expected)\n\n def test_where_inplace(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.copy()\n\n rs.where(cond, inplace=True)\n assert_series_equal(rs.dropna(), s[cond])\n assert_series_equal(rs, s.where(cond))\n\n rs = s.copy()\n rs.where(cond, -s, inplace=True)\n assert_series_equal(rs, s.where(cond, -s))\n\n def test_where_dups(self):\n # GH 4550\n # where crashes with dups in index\n s1 = Series(list(range(3)))\n s2 = Series(list(range(3)))\n comb = pd.concat([s1,s2])\n result = comb.where(comb < 2)\n expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])\n assert_series_equal(result, expected)\n\n # GH 4548\n # inplace updating not working with dups\n comb[comb<1] = 5\n expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])\n assert_series_equal(comb, expected)\n\n comb[comb<2] += 10\n expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])\n assert_series_equal(comb, expected)\n\n def test_where_datetime(self):\n s = Series(date_range('20130102', periods=2))\n expected = Series([10, 10], dtype='datetime64[ns]')\n mask = np.array([False, False])\n\n rs = s.where(mask, [10, 10])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10.0)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, 10.0])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, np.nan])\n expected = Series([10, None], dtype='datetime64[ns]')\n assert_series_equal(rs, expected)\n\n def test_where_timedelta(self):\n s = Series([1, 2], dtype='timedelta64[ns]')\n expected = Series([10, 10], dtype='timedelta64[ns]')\n mask = np.array([False, False])\n\n rs = s.where(mask, [10, 10])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, 10.0)\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, 10.0])\n assert_series_equal(rs, expected)\n\n rs = s.where(mask, [10.0, np.nan])\n expected = Series([10, None], dtype='timedelta64[ns]')\n assert_series_equal(rs, expected)\n\n def test_mask(self):\n # compare with tested results in test_where\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(~cond, np.nan)\n assert_series_equal(rs, s.mask(cond))\n\n rs = s.where(~cond)\n rs2 = s.mask(cond)\n assert_series_equal(rs, rs2)\n\n rs = s.where(~cond, -s)\n rs2 = s.mask(cond, -s)\n assert_series_equal(rs, rs2)\n\n cond = Series([True, False, False, True, False], index=s.index)\n s2 = -(s.abs())\n rs = s2.where(~cond[:3])\n rs2 = s2.mask(cond[:3])\n assert_series_equal(rs, rs2)\n\n rs = s2.where(~cond[:3], -s2)\n rs2 = s2.mask(cond[:3], -s2)\n assert_series_equal(rs, rs2)\n\n self.assertRaises(ValueError, s.mask, 1)\n self.assertRaises(ValueError, s.mask, cond[:3].values, -s)\n\n # dtype changes\n s = Series([1,2,3,4])\n result = s.mask(s>2, np.nan)\n expected = Series([1, 2, np.nan, np.nan])\n assert_series_equal(result, expected)\n\n def test_mask_broadcast(self):\n # GH 8801\n # copied from test_where_broadcast\n for size in range(2, 6):\n for selection in [np.resize([True, False, False, False, False], size), # First element should be set\n # Set alternating elements]\n np.resize([True, False], size),\n np.resize([False], size)]: # No element should be set\n for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:\n for arr in [np.array([item]), [item], (item,)]:\n data = np.arange(size, dtype=float)\n s = Series(data)\n result = s.mask(selection, arr)\n expected = Series([item if use_item else data[i]\n for i, use_item in enumerate(selection)])\n assert_series_equal(result, expected)\n\n def test_mask_inplace(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.copy()\n rs.mask(cond, inplace=True)\n assert_series_equal(rs.dropna(), s[~cond])\n assert_series_equal(rs, s.mask(cond))\n\n rs = s.copy()\n rs.mask(cond, -s, inplace=True)\n assert_series_equal(rs, s.mask(cond, -s))\n\n def test_drop(self):\n\n # unique\n s = Series([1,2],index=['one','two'])\n expected = Series([1],index=['one'])\n result = s.drop(['two'])\n assert_series_equal(result,expected)\n result = s.drop('two', axis='rows')\n assert_series_equal(result,expected)\n\n # non-unique\n # GH 5248\n s = Series([1,1,2],index=['one','two','one'])\n expected = Series([1,2],index=['one','one'])\n result = s.drop(['two'], axis=0)\n assert_series_equal(result,expected)\n result = s.drop('two')\n assert_series_equal(result,expected)\n\n expected = Series([1],index=['two'])\n result = s.drop(['one'])\n assert_series_equal(result,expected)\n result = s.drop('one')\n assert_series_equal(result,expected)\n\n # single string/tuple-like\n s = Series(range(3),index=list('abc'))\n self.assertRaises(ValueError, s.drop, 'bc')\n self.assertRaises(ValueError, s.drop, ('a',))\n\n # errors='ignore'\n s = Series(range(3),index=list('abc'))\n result = s.drop('bc', errors='ignore')\n assert_series_equal(result, s)\n result = s.drop(['a', 'd'], errors='ignore')\n expected = s.ix[1:]\n assert_series_equal(result, expected)\n\n # bad axis\n self.assertRaises(ValueError, s.drop, 'one', axis='columns')\n\n # GH 8522\n s = Series([2,3], index=[True, False])\n self.assertTrue(s.index.is_object())\n result = s.drop(True)\n expected = Series([3],index=[False])\n assert_series_equal(result,expected)\n\n def test_ix_setitem(self):\n inds = self.series.index[[3, 4, 7]]\n\n result = self.series.copy()\n result.ix[inds] = 5\n\n expected = self.series.copy()\n expected[[3, 4, 7]] = 5\n assert_series_equal(result, expected)\n\n result.ix[5:10] = 10\n expected[5:10] = 10\n assert_series_equal(result, expected)\n\n # set slice with indices\n d1, d2 = self.series.index[[5, 15]]\n result.ix[d1:d2] = 6\n expected[5:16] = 6 # because it's inclusive\n assert_series_equal(result, expected)\n\n # set index value\n self.series.ix[d1] = 4\n self.series.ix[d2] = 6\n self.assertEqual(self.series[d1], 4)\n self.assertEqual(self.series[d2], 6)\n\n def test_where_numeric_with_string(self):\n # GH 9280\n s = pd.Series([1, 2, 3])\n w = s.where(s>1, 'X')\n\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n w = s.where(s>1, ['X', 'Y', 'Z'])\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n w = s.where(s>1, np.array(['X', 'Y', 'Z']))\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n def test_setitem_boolean(self):\n mask = self.series > self.series.median()\n\n # similiar indexed series\n result = self.series.copy()\n result[mask] = self.series * 2\n expected = self.series * 2\n assert_series_equal(result[mask], expected[mask])\n\n # needs alignment\n result = self.series.copy()\n result[mask] = (self.series * 2)[0:5]\n expected = (self.series * 2)[0:5].reindex_like(self.series)\n expected[-mask] = self.series[mask]\n assert_series_equal(result[mask], expected[mask])\n\n def test_ix_setitem_boolean(self):\n mask = self.series > self.series.median()\n\n result = self.series.copy()\n result.ix[mask] = 0\n expected = self.series\n expected[mask] = 0\n assert_series_equal(result, expected)\n\n def test_ix_setitem_corner(self):\n inds = list(self.series.index[[5, 8, 12]])\n self.series.ix[inds] = 5\n self.assertRaises(Exception, self.series.ix.__setitem__,\n inds + ['foo'], 5)\n\n def test_get_set_boolean_different_order(self):\n ordered = self.series.sort_values()\n\n # setting\n copy = self.series.copy()\n copy[ordered > 0] = 0\n\n expected = self.series.copy()\n expected[expected > 0] = 0\n\n assert_series_equal(copy, expected)\n\n # getting\n sel = self.series[ordered > 0]\n exp = self.series[self.series > 0]\n assert_series_equal(sel, exp)\n\n def test_repr(self):\n str(self.ts)\n str(self.series)\n str(self.series.astype(int))\n str(self.objSeries)\n\n str(Series(tm.randn(1000), index=np.arange(1000)))\n str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))\n\n # empty\n str(self.empty)\n\n # with NaNs\n self.series[5:7] = np.NaN\n str(self.series)\n\n # with Nones\n ots = self.ts.astype('O')\n ots[::2] = None\n repr(ots)\n\n # various names\n for name in ['', 1, 1.2, 'foo', u('\\u03B1\\u03B2\\u03B3'),\n 'loooooooooooooooooooooooooooooooooooooooooooooooooooong',\n ('foo', 'bar', 'baz'),\n (1, 2),\n ('foo', 1, 2.3),\n (u('\\u03B1'), u('\\u03B2'), u('\\u03B3')),\n (u('\\u03B1'), 'bar')]:\n self.series.name = name\n repr(self.series)\n\n biggie = Series(tm.randn(1000), index=np.arange(1000),\n name=('foo', 'bar', 'baz'))\n repr(biggie)\n\n # 0 as name\n ser = Series(np.random.randn(100), name=0)\n rep_str = repr(ser)\n self.assertIn(\"Name: 0\", rep_str)\n\n # tidy repr\n ser = Series(np.random.randn(1001), name=0)\n rep_str = repr(ser)\n self.assertIn(\"Name: 0\", rep_str)\n\n ser = Series([\"a\\n\\r\\tb\"], name=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n self.assertFalse(\"\\t\" in repr(ser))\n self.assertFalse(\"\\r\" in repr(ser))\n self.assertFalse(\"a\\n\" in repr(ser))\n\n # with empty series (#4651)\n s = Series([], dtype=np.int64, name='foo')\n self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)')\n\n s = Series([], dtype=np.int64, name=None)\n self.assertEqual(repr(s), 'Series([], dtype: int64)')\n\n def test_tidy_repr(self):\n a = Series([u(\"\\u05d0\")] * 1000)\n a.name = 'title1'\n repr(a) # should not raise exception\n\n def test_repr_bool_fails(self):\n s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])\n\n import sys\n\n buf = StringIO()\n tmp = sys.stderr\n sys.stderr = buf\n try:\n # it works (with no Cython exception barf)!\n repr(s)\n finally:\n sys.stderr = tmp\n self.assertEqual(buf.getvalue(), '')\n\n def test_repr_name_iterable_indexable(self):\n s = Series([1, 2, 3], name=np.int64(3))\n\n # it works!\n repr(s)\n\n s.name = (u(\"\\u05d0\"),) * 2\n repr(s)\n\n def test_repr_should_return_str(self):\n # http://docs.python.org/py3k/reference/datamodel.html#object.__repr__\n # http://docs.python.org/reference/datamodel.html#object.__repr__\n # ...The return value must be a string object.\n\n # (str on py2.x, str (unicode) on py3)\n\n data = [8, 5, 3, 5]\n index1 = [u(\"\\u03c3\"), u(\"\\u03c4\"), u(\"\\u03c5\"), u(\"\\u03c6\")]\n df = Series(data, index=index1)\n self.assertTrue(type(df.__repr__() == str)) # both py2 / 3\n\n def test_repr_max_rows(self):\n # GH 6863\n with pd.option_context('max_rows', None):\n str(Series(range(1001))) # should not raise exception\n\n def test_unicode_string_with_unicode(self):\n df = Series([u(\"\\u05d0\")], name=u(\"\\u05d1\"))\n if compat.PY3:\n str(df)\n else:\n compat.text_type(df)\n\n def test_bytestring_with_unicode(self):\n df = Series([u(\"\\u05d0\")], name=u(\"\\u05d1\"))\n if compat.PY3:\n bytes(df)\n else:\n str(df)\n\n def test_timeseries_repr_object_dtype(self):\n index = Index([datetime(2000, 1, 1) + timedelta(i)\n for i in range(1000)], dtype=object)\n ts = Series(np.random.randn(len(index)), index)\n repr(ts)\n\n ts = tm.makeTimeSeries(1000)\n self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))\n\n ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]\n repr(ts2).splitlines()[-1]\n\n def test_timeseries_periodindex(self):\n # GH2891\n from pandas import period_range\n prng = period_range('1/1/2011', '1/1/2012', freq='M')\n ts = Series(np.random.randn(len(prng)), prng)\n new_ts = self.round_trip_pickle(ts)\n self.assertEqual(new_ts.index.freq, 'M')\n\n def test_iter(self):\n for i, val in enumerate(self.series):\n self.assertEqual(val, self.series[i])\n\n for i, val in enumerate(self.ts):\n self.assertEqual(val, self.ts[i])\n\n def test_keys(self):\n # HACK: By doing this in two stages, we avoid 2to3 wrapping the call\n # to .keys() in a list()\n getkeys = self.ts.keys\n self.assertIs(getkeys(), self.ts.index)\n\n def test_values(self):\n self.assert_numpy_array_equal(self.ts, self.ts.values)\n\n def test_iteritems(self):\n for idx, val in compat.iteritems(self.series):\n self.assertEqual(val, self.series[idx])\n\n for idx, val in compat.iteritems(self.ts):\n self.assertEqual(val, self.ts[idx])\n\n # assert is lazy (genrators don't define reverse, lists do)\n self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, check_allna=True)\n\n def test_sum_inf(self):\n import pandas.core.nanops as nanops\n\n s = Series(np.random.randn(10))\n s2 = s.copy()\n\n s[5:8] = np.inf\n s2[5:8] = np.nan\n\n self.assertTrue(np.isinf(s.sum()))\n\n arr = np.random.randn(100, 100).astype('f4')\n arr[:, 2] = np.inf\n\n with cf.option_context(\"mode.use_inf_as_null\", True):\n assert_almost_equal(s.sum(), s2.sum())\n\n res = nanops.nansum(arr, axis=1)\n self.assertTrue(np.isinf(res).all())\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean)\n\n def test_median(self):\n self._check_stat_op('median', np.median)\n\n # test with integers, test failure\n int_ts = Series(np.ones(10, dtype=int), index=lrange(10))\n self.assertAlmostEqual(np.median(int_ts), int_ts.median())\n\n def test_mode(self):\n s = Series([12, 12, 11, 10, 19, 11])\n exp = Series([11, 12])\n assert_series_equal(s.mode(), exp)\n\n assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))\n\n lst = [5] * 20 + [1] * 10 + [6] * 25\n np.random.shuffle(lst)\n s = Series(lst)\n assert_series_equal(s.mode(), Series([6]))\n\n s = Series([5] * 10)\n assert_series_equal(s.mode(), Series([5]))\n\n s = Series(lst)\n s[0] = np.nan\n assert_series_equal(s.mode(), Series([6.]))\n\n s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))\n assert_series_equal(s.mode(), Series(['e']))\n\n s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')\n assert_series_equal(s.mode(), Series([], dtype=\"M8[ns]\"))\n s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',\n '2013-01-02'], dtype='M8[ns]')\n assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],\n dtype='M8[ns]'))\n\n def test_prod(self):\n self._check_stat_op('prod', np.prod)\n\n def test_min(self):\n self._check_stat_op('min', np.min, check_objects=True)\n\n def test_max(self):\n self._check_stat_op('max', np.max, check_objects=True)\n\n def test_var_std(self):\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n result = self.ts.std(ddof=4)\n expected = np.std(self.ts.values, ddof=4)\n assert_almost_equal(result, expected)\n\n result = self.ts.var(ddof=4)\n expected = np.var(self.ts.values, ddof=4)\n assert_almost_equal(result, expected)\n\n # 1 - element series with ddof=1\n s = self.ts.iloc[[0]]\n result = s.var(ddof=1)\n self.assertTrue(isnull(result))\n\n result = s.std(ddof=1)\n self.assertTrue(isnull(result))\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.ts.sem(ddof=4)\n expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))\n assert_almost_equal(result, expected)\n\n # 1 - element series with ddof=1\n s = self.ts.iloc[[0]]\n result = s.sem(ddof=1)\n self.assertTrue(isnull(result))\n\n def test_skew(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import skew\n alt = lambda x: skew(x, bias=False)\n self._check_stat_op('skew', alt)\n\n # test corner cases, skew() returns NaN unless there's at least 3 values\n min_N = 3\n for i in range(1, min_N + 1):\n s = Series(np.ones(i))\n df = DataFrame(np.ones((i, i)))\n if i < min_N:\n self.assertTrue(np.isnan(s.skew()))\n self.assertTrue(np.isnan(df.skew()).all())\n else:\n self.assertEqual(0, s.skew())\n self.assertTrue((df.skew() == 0).all())\n\n def test_kurt(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import kurtosis\n alt = lambda x: kurtosis(x, bias=False)\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])\n\n # test corner cases, kurt() returns NaN unless there's at least 4 values\n min_N = 4\n for i in range(1, min_N + 1):\n s = Series(np.ones(i))\n df = DataFrame(np.ones((i, i)))\n if i < min_N:\n self.assertTrue(np.isnan(s.kurt()))\n self.assertTrue(np.isnan(df.kurt()).all())\n else:\n self.assertEqual(0, s.kurt())\n self.assertTrue((df.kurt() == 0).all())\n\n def test_argsort(self):\n self._check_accum_op('argsort')\n argsorted = self.ts.argsort()\n self.assertTrue(issubclass(argsorted.dtype.type, np.integer))\n\n # GH 2967 (introduced bug in 0.11-dev I think)\n s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])\n self.assertEqual(s.dtype, 'datetime64[ns]')\n shifted = s.shift(-1)\n self.assertEqual(shifted.dtype, 'datetime64[ns]')\n self.assertTrue(isnull(shifted[4]))\n\n result = s.argsort()\n expected = Series(lrange(5), dtype='int64')\n assert_series_equal(result, expected)\n\n result = shifted.argsort()\n expected = Series(lrange(4) + [-1], dtype='int64')\n assert_series_equal(result, expected)\n\n def test_argsort_stable(self):\n s = Series(np.random.randint(0, 100, size=10000))\n mindexer = s.argsort(kind='mergesort')\n qindexer = s.argsort()\n\n mexpected = np.argsort(s.values, kind='mergesort')\n qexpected = np.argsort(s.values, kind='quicksort')\n\n self.assert_numpy_array_equal(mindexer, mexpected)\n self.assert_numpy_array_equal(qindexer, qexpected)\n self.assertFalse(np.array_equal(qindexer, mindexer))\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n s = Series(np.arange(6), index=index)\n\n # no change, position\n result = s.reorder_levels([0, 1, 2])\n assert_series_equal(s, result)\n\n # no change, labels\n result = s.reorder_levels(['L0', 'L1', 'L2'])\n assert_series_equal(s, result)\n\n # rotate, position\n result = s.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = Series(np.arange(6), index=e_idx)\n assert_series_equal(result, expected)\n\n result = s.reorder_levels([0, 0, 0])\n e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n names=['L0', 'L0', 'L0'])\n expected = Series(range(6), index=e_idx)\n assert_series_equal(result, expected)\n\n result = s.reorder_levels(['L0', 'L0', 'L0'])\n assert_series_equal(result, expected)\n\n def test_cumsum(self):\n self._check_accum_op('cumsum')\n\n def test_cumprod(self):\n self._check_accum_op('cumprod')\n\n def test_cummin(self):\n self.assert_numpy_array_equal(self.ts.cummin(),\n np.minimum.accumulate(np.array(self.ts)))\n ts = self.ts.copy()\n ts[::2] = np.NaN\n result = ts.cummin()[1::2]\n expected = np.minimum.accumulate(ts.valid())\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_cummax(self):\n self.assert_numpy_array_equal(self.ts.cummax(),\n np.maximum.accumulate(np.array(self.ts)))\n ts = self.ts.copy()\n ts[::2] = np.NaN\n result = ts.cummax()[1::2]\n expected = np.maximum.accumulate(ts.valid())\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_cummin_datetime64(self):\n s = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))\n result = s.cummin(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))\n result = s.cummin(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummax_datetime64(self):\n s = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))\n result = s.cummax(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))\n result = s.cummax(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummin_timedelta64(self):\n s = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))\n result = s.cummin(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))\n result = s.cummin(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummax_timedelta64(self):\n s = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))\n result = s.cummax(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))\n result = s.cummax(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_npdiff(self):\n raise nose.SkipTest(\"skipping due to Series no longer being an \"\n \"ndarray\")\n\n # no longer works as the return type of np.diff is now nd.array\n s = Series(np.arange(5))\n\n r = np.diff(s)\n assert_series_equal(Series([nan, 0, 0, 0, nan]), r)\n\n def _check_stat_op(self, name, alternate, check_objects=False, check_allna=False):\n import pandas.core.nanops as nanops\n\n def testit():\n f = getattr(Series, name)\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # idxmax, idxmin, min, and max are valid for dates\n if name not in ['max','min']:\n ds = Series(date_range('1/1/2001', periods=10))\n self.assertRaises(TypeError, f, ds)\n\n # skipna or no\n self.assertTrue(notnull(f(self.series)))\n self.assertTrue(isnull(f(self.series, skipna=False)))\n\n # check the result is correct\n nona = self.series.dropna()\n assert_almost_equal(f(nona), alternate(nona.values))\n assert_almost_equal(f(self.series), alternate(nona.values))\n\n allna = self.series * nan\n\n if check_allna:\n # xref 9422\n # bottleneck >= 1.0 give 0.0 for an allna Series sum\n try:\n self.assertTrue(nanops._USE_BOTTLENECK)\n import bottleneck as bn\n self.assertTrue(bn.__version__ >= LooseVersion('1.0'))\n self.assertEqual(f(allna),0.0)\n except:\n self.assertTrue(np.isnan(f(allna)))\n\n # dtype=object with None, it works!\n s = Series([1, 2, 3, None, 5])\n f(s)\n\n # 2888\n l = [0]\n l.extend(lrange(2 ** 40, 2 ** 40+1000))\n s = Series(l, dtype='int64')\n assert_almost_equal(float(f(s)), float(alternate(s.values)))\n\n # check date range\n if check_objects:\n s = Series(bdate_range('1/1/2000', periods=10))\n res = f(s)\n exp = alternate(s)\n self.assertEqual(res, exp)\n\n # check on string data\n if name not in ['sum','min','max']:\n self.assertRaises(TypeError, f, Series(list('abc')))\n\n # Invalid axis.\n self.assertRaises(ValueError, f, self.series, axis=1)\n\n # Unimplemented numeric_only parameter.\n if 'numeric_only' in getargspec(f).args:\n self.assertRaisesRegexp(NotImplementedError, name, f,\n self.series, numeric_only=True)\n\n testit()\n\n try:\n import bottleneck as bn\n nanops._USE_BOTTLENECK = False\n testit()\n nanops._USE_BOTTLENECK = True\n except ImportError:\n pass\n\n def _check_accum_op(self, name):\n func = getattr(np, name)\n self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))\n\n # with missing values\n ts = self.ts.copy()\n ts[::2] = np.NaN\n\n result = func(ts)[1::2]\n expected = func(np.array(ts.valid()))\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_round(self):\n # numpy.round doesn't preserve metadata, probably a numpy bug,\n # re: GH #314\n result = np.round(self.ts, 2)\n expected = Series(np.round(self.ts.values, 2), index=self.ts.index,\n name='ts')\n assert_series_equal(result, expected)\n self.assertEqual(result.name, self.ts.name)\n\n def test_prod_numpy16_bug(self):\n s = Series([1., 1., 1.], index=lrange(3))\n result = s.prod()\n self.assertNotIsInstance(result, Series)\n\n def test_quantile(self):\n from numpy import percentile\n\n q = self.ts.quantile(0.1)\n self.assertEqual(q, percentile(self.ts.valid(), 10))\n\n q = self.ts.quantile(0.9)\n self.assertEqual(q, percentile(self.ts.valid(), 90))\n\n # object dtype\n q = Series(self.ts,dtype=object).quantile(0.9)\n self.assertEqual(q, percentile(self.ts.valid(), 90))\n\n # datetime64[ns] dtype\n dts = self.ts.index.to_series()\n q = dts.quantile(.2)\n self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))\n\n # timedelta64[ns] dtype\n tds = dts.diff()\n q = tds.quantile(.25)\n self.assertEqual(q, pd.to_timedelta('24:00:00'))\n\n # GH7661\n result = Series([np.timedelta64('NaT')]).sum()\n self.assertTrue(result is pd.NaT)\n\n msg = 'percentiles should all be in the interval \\\\[0, 1\\\\]'\n for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:\n with tm.assertRaisesRegexp(ValueError, msg):\n self.ts.quantile(invalid)\n\n def test_quantile_multi(self):\n from numpy import percentile\n\n qs = [.1, .9]\n result = self.ts.quantile(qs)\n expected = pd.Series([percentile(self.ts.valid(), 10),\n percentile(self.ts.valid(), 90)],\n index=qs, name=self.ts.name)\n assert_series_equal(result, expected)\n\n dts = self.ts.index.to_series()\n dts.name = 'xxx'\n result = dts.quantile((.2, .2))\n expected = Series([Timestamp('2000-01-10 19:12:00'),\n Timestamp('2000-01-10 19:12:00')],\n index=[.2, .2], name='xxx')\n assert_series_equal(result, expected)\n\n result = self.ts.quantile([])\n expected = pd.Series([], name=self.ts.name)\n assert_series_equal(result, expected)\n\n def test_append(self):\n appendedSeries = self.series.append(self.objSeries)\n for idx, value in compat.iteritems(appendedSeries):\n if idx in self.series.index:\n self.assertEqual(value, self.series[idx])\n elif idx in self.objSeries.index:\n self.assertEqual(value, self.objSeries[idx])\n else:\n self.fail(\"orphaned index!\")\n\n self.assertRaises(ValueError, self.ts.append, self.ts,\n verify_integrity=True)\n\n def test_append_many(self):\n pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]\n\n result = pieces[0].append(pieces[1:])\n assert_series_equal(result, self.ts)\n\n def test_all_any(self):\n ts = tm.makeTimeSeries()\n bool_series = ts > 0\n self.assertFalse(bool_series.all())\n self.assertTrue(bool_series.any())\n\n # Alternative types, with implicit 'object' dtype.\n s = Series(['abc', True])\n self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'\n\n def test_all_any_params(self):\n # Check skipna, with implicit 'object' dtype.\n s1 = Series([np.nan, True])\n s2 = Series([np.nan, False])\n self.assertTrue(s1.all(skipna=False)) # nan && True => True\n self.assertTrue(s1.all(skipna=True))\n self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan\n self.assertFalse(s2.any(skipna=True))\n\n # Check level.\n s = pd.Series([False, False, True, True, False, True],\n index=[0, 0, 1, 1, 2, 2])\n assert_series_equal(s.all(level=0), Series([False, True, False]))\n assert_series_equal(s.any(level=0), Series([False, True, True]))\n\n # bool_only is not implemented with level option.\n self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)\n self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)\n\n # bool_only is not implemented alone.\n self.assertRaises(NotImplementedError, s.any, bool_only=True)\n self.assertRaises(NotImplementedError, s.all, bool_only=True)\n\n def test_op_method(self):\n def check(series, other, check_reverse=False):\n simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']\n if not compat.PY3:\n simple_ops.append('div')\n\n for opname in simple_ops:\n op = getattr(Series, opname)\n\n if op == 'div':\n alt = operator.truediv\n else:\n alt = getattr(operator, opname)\n\n result = op(series, other)\n expected = alt(series, other)\n tm.assert_almost_equal(result, expected)\n if check_reverse:\n rop = getattr(Series, \"r\" + opname)\n result = rop(series, other)\n expected = alt(other, series)\n tm.assert_almost_equal(result, expected)\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts[::2])\n check(self.ts, 5, check_reverse=True)\n check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)\n\n def test_neg(self):\n assert_series_equal(-self.series, -1 * self.series)\n\n def test_invert(self):\n assert_series_equal(-(self.series < 0), ~(self.series < 0))\n\n def test_modulo(self):\n\n # GH3590, modulo as ints\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] % p['second']\n expected = Series(p['first'].values %\n p['second'].values, dtype='float64')\n expected.iloc[0:3] = np.nan\n assert_series_equal(result, expected)\n\n result = p['first'] % 0\n expected = Series(np.nan, index=p.index, name='first')\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] % p['second']\n expected = Series(p['first'].values % p['second'].values)\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] % p['second']\n result2 = p['second'] % p['first']\n self.assertFalse(np.array_equal(result, result2))\n\n # GH 9144\n s = Series([0, 1])\n\n result = s % 0\n expected = Series([nan, nan])\n assert_series_equal(result, expected)\n\n result = 0 % s\n expected = Series([nan, 0.0])\n assert_series_equal(result, expected)\n\n def test_div(self):\n\n # no longer do integer div for any ops, but deal with the 0's\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] / p['second']\n expected = Series(p['first'].values.astype(float) / p['second'].values,\n dtype='float64')\n expected.iloc[0:3] = np.inf\n assert_series_equal(result, expected)\n\n result = p['first'] / 0\n expected = Series(np.inf, index=p.index, name='first')\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] / p['second']\n expected = Series(p['first'].values / p['second'].values)\n assert_series_equal(result, expected)\n\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})\n result = p['first'] / p['second']\n assert_series_equal(result, p['first'].astype('float64'), check_names=False)\n self.assertTrue(result.name is None)\n self.assertFalse(np.array_equal(result, p['second'] / p['first']))\n\n # inf signing\n s = Series([np.nan,1.,-1.])\n result = s / 0\n expected = Series([np.nan,np.inf,-np.inf])\n assert_series_equal(result, expected)\n\n # float/integer issue\n # GH 7785\n p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})\n expected = Series([-0.01,-np.inf])\n\n result = p['second'].div(p['first'])\n assert_series_equal(result, expected, check_names=False)\n\n result = p['second'] / p['first']\n assert_series_equal(result, expected)\n\n # GH 9144\n s = Series([-1, 0, 1])\n\n result = 0 / s\n expected = Series([0.0, nan, 0.0])\n assert_series_equal(result, expected)\n\n result = s / 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n result = s // 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n def test_operators(self):\n\n def _check_op(series, other, op, pos_only=False):\n left = np.abs(series) if pos_only else series\n right = np.abs(other) if pos_only else other\n\n cython_or_numpy = op(left, right)\n python = left.combine(right, op)\n tm.assert_almost_equal(cython_or_numpy, python)\n\n def check(series, other):\n simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']\n\n for opname in simple_ops:\n _check_op(series, other, getattr(operator, opname))\n\n _check_op(series, other, operator.pow, pos_only=True)\n\n _check_op(series, other, lambda x, y: operator.add(y, x))\n _check_op(series, other, lambda x, y: operator.sub(y, x))\n _check_op(series, other, lambda x, y: operator.truediv(y, x))\n _check_op(series, other, lambda x, y: operator.floordiv(y, x))\n _check_op(series, other, lambda x, y: operator.mul(y, x))\n _check_op(series, other, lambda x, y: operator.pow(y, x),\n pos_only=True)\n _check_op(series, other, lambda x, y: operator.mod(y, x))\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts * 0)\n check(self.ts, self.ts[::2])\n check(self.ts, 5)\n\n def check_comparators(series, other):\n _check_op(series, other, operator.gt)\n _check_op(series, other, operator.ge)\n _check_op(series, other, operator.eq)\n _check_op(series, other, operator.lt)\n _check_op(series, other, operator.le)\n\n check_comparators(self.ts, 5)\n check_comparators(self.ts, self.ts + 1)\n\n def test_operators_empty_int_corner(self):\n s1 = Series([], [], dtype=np.int32)\n s2 = Series({'x': 0.})\n tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))\n\n def test_constructor_dtype_timedelta64(self):\n\n # basic\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # mixed with NaT\n from pandas import tslib\n td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # improved inference\n # GH5689\n td = Series([np.timedelta64(300000000), pd.NaT])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), tslib.iNaT])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), np.nan])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([pd.NaT, np.timedelta64(300000000)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(1,'s')])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # these are frequency conversion astypes\n #for t in ['s', 'D', 'us', 'ms']:\n # self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)\n\n # valid astype\n td.astype('int64')\n\n # invalid casting\n self.assertRaises(TypeError, td.astype, 'int32')\n\n # this is an invalid casting\n def f():\n Series([timedelta(days=1), 'foo'],dtype='m8[ns]')\n self.assertRaises(Exception, f)\n\n # leave as object here\n td = Series([timedelta(days=i) for i in range(3)] + ['foo'])\n self.assertEqual(td.dtype, 'object')\n\n # these will correctly infer a timedelta\n s = Series([None, pd.NaT, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([np.nan, pd.NaT, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([pd.NaT, None, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([pd.NaT, np.nan, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n\n def test_operators_timedelta64(self):\n\n # invalid ops\n self.assertRaises(Exception, self.objSeries.__add__, 1)\n self.assertRaises(\n Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))\n self.assertRaises(Exception, self.objSeries.__sub__, 1)\n self.assertRaises(\n Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))\n\n # seriese ops\n v1 = date_range('2012-1-1', periods=3, freq='D')\n v2 = date_range('2012-1-2', periods=3, freq='D')\n rs = Series(v2) - Series(v1)\n xp = Series(1e9 * 3600 * 24, rs.index).astype(\n 'int64').astype('timedelta64[ns]')\n assert_series_equal(rs, xp)\n self.assertEqual(rs.dtype, 'timedelta64[ns]')\n\n df = DataFrame(dict(A=v1))\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # series on the rhs\n result = df['A'] - df['A'].shift()\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n result = df['A'] + td\n self.assertEqual(result.dtype, 'M8[ns]')\n\n # scalar Timestamp on rhs\n maxa = df['A'].max()\n tm.assertIsInstance(maxa, Timestamp)\n\n resultb = df['A'] - df['A'].max()\n self.assertEqual(resultb.dtype, 'timedelta64[ns]')\n\n # timestamp on lhs\n result = resultb + df['A']\n values = [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')]\n expected = Series(values, name='A')\n assert_series_equal(result, expected)\n\n # datetimes on rhs\n result = df['A'] - datetime(2001, 1, 1)\n expected = Series([timedelta(days=4017 + i) for i in range(3)], name='A')\n assert_series_equal(result, expected)\n self.assertEqual(result.dtype, 'm8[ns]')\n\n d = datetime(2001, 1, 1, 3, 4)\n resulta = df['A'] - d\n self.assertEqual(resulta.dtype, 'm8[ns]')\n\n # roundtrip\n resultb = resulta + d\n assert_series_equal(df['A'], resultb)\n\n # timedeltas on rhs\n td = timedelta(days=1)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(resultb, df['A'])\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # roundtrip\n td = timedelta(minutes=5, seconds=3)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(df['A'], resultb)\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # inplace\n value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))\n rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))\n self.assertEqual(rs[2], value)\n\n def test_timedeltas_with_DateOffset(self):\n\n # GH 4532\n # operate with pd.offsets\n s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])\n\n result = s + pd.offsets.Second(5)\n result2 = pd.offsets.Second(5) + s\n expected = Series(\n [Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])\n assert_series_equal(result, expected)\n\n # operate with np.timedelta64 correctly\n result = s + np.timedelta64(1, 's')\n result2 = np.timedelta64(1, 's') + s\n expected = Series(\n [Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + np.timedelta64(5, 'ms')\n result2 = np.timedelta64(5, 'ms') + s\n expected = Series(\n [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # valid DateOffsets\n for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',\n 'Milli', 'Nano' ]:\n op = getattr(pd.offsets,do)\n s + op(5)\n op(5) + s\n\n\n def test_timedelta64_operations_with_DateOffset(self):\n # GH 10699\n td = Series([timedelta(minutes=5, seconds=3)] * 3)\n result = td + pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=6, seconds=3)] * 3)\n assert_series_equal(result, expected)\n\n result = td - pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=4, seconds=3)] * 3)\n assert_series_equal(result, expected)\n\n result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),\n pd.offsets.Hour(2)])\n expected = Series([timedelta(minutes=6, seconds=3),\n timedelta(minutes=5, seconds=6),\n timedelta(hours=2, minutes=5, seconds=3)])\n assert_series_equal(result, expected)\n\n result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)\n expected = Series([timedelta(minutes=6, seconds=15)] * 3)\n assert_series_equal(result, expected)\n\n # valid DateOffsets\n for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',\n 'Milli', 'Nano' ]:\n op = getattr(pd.offsets,do)\n td + op(5)\n op(5) + td\n td - op(5)\n op(5) - td\n\n def test_timedelta64_operations_with_timedeltas(self):\n\n # td operate with td\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td2 = timedelta(minutes=5, seconds=4)\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) -Series(\n [timedelta(seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) -\n Series([timedelta(seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2,td1)\n\n # Now again, using pd.to_timedelta, which should build\n # a Series or a scalar, depending on input.\n td1 = Series(pd.to_timedelta(['00:05:03'] * 3))\n td2 = pd.to_timedelta('00:05:04')\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) -Series(\n [timedelta(seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) -\n Series([timedelta(seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2,td1)\n\n def test_timedelta64_operations_with_integers(self):\n\n # GH 4521\n # divide/multiply by integers\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n s2 = Series([2, 3, 4])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result,expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result,expected)\n\n result = s1 / 2\n expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result,expected)\n\n for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:\n s2 = Series([20, 30, 40],dtype=dtype)\n expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result,expected)\n\n result = s1 * 2\n expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n result = s1 * -1\n expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n # invalid ops\n for op in ['__true_div__','__div__','__mul__']:\n sop = getattr(s1,op,None)\n if sop is not None:\n self.assertRaises(TypeError, sop, s2.astype(float))\n self.assertRaises(TypeError, sop, 2.)\n\n for op in ['__add__','__sub__']:\n sop = getattr(s1,op,None)\n if sop is not None:\n self.assertRaises(TypeError, sop, 1)\n self.assertRaises(TypeError, sop, s2.values)\n\n def test_timedelta64_conversions(self):\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n\n for m in [1, 3, 10]:\n for unit in ['D','h','m','s','ms','us','ns']:\n\n # op\n expected = s1.apply(lambda x: x / np.timedelta64(m,unit))\n result = s1 / np.timedelta64(m,unit)\n assert_series_equal(result, expected)\n\n if m == 1 and unit != 'ns':\n\n # astype\n result = s1.astype(\"timedelta64[{0}]\".format(unit))\n assert_series_equal(result, expected)\n\n # reverse op\n expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)\n result = np.timedelta64(m,unit) / s1\n\n # astype\n s = Series(date_range('20130101',periods=3))\n result = s.astype(object)\n self.assertIsInstance(result.iloc[0],datetime)\n self.assertTrue(result.dtype == np.object_)\n\n result = s1.astype(object)\n self.assertIsInstance(result.iloc[0],timedelta)\n self.assertTrue(result.dtype == np.object_)\n\n def test_timedelta64_equal_timedelta_supported_ops(self):\n ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),\n Timestamp('20130228 22:00:00'),\n Timestamp('20130228 21:00:00')])\n\n intervals = 'D', 'h', 'm', 's', 'us'\n npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,\n 'm': 60 * 1000000, 's': 1000000, 'us': 1}\n\n def timedelta64(*args):\n return sum(starmap(np.timedelta64, zip(args, intervals)))\n\n for op, d, h, m, s, us in product([operator.add, operator.sub],\n *([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,\n microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n try:\n assert_series_equal(lhs, rhs)\n except:\n raise AssertionError(\n \"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\\n{6}\\n{7}\\n\".format(op, d, h, m, s, us, lhs, rhs))\n\n def test_timedelta_assignment(self):\n # GH 8209\n s = Series([])\n s.loc['B'] = timedelta(1)\n tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))\n\n s = s.reindex(s.index.insert(0, 'A'))\n tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))\n\n result = s.fillna(timedelta(1))\n expected = Series(Timedelta('1 days'),index=['A','B'])\n tm.assert_series_equal(result, expected)\n\n s.loc['A'] = timedelta(1)\n tm.assert_series_equal(s, expected)\n\n def test_operators_datetimelike(self):\n\n def run_ops(ops, get_ser, test_ser):\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not defined\n for op_str in ops:\n op = getattr(get_ser, op_str, None)\n with tm.assertRaisesRegexp(TypeError, 'operate'):\n op(test_ser)\n\n ### timedelta64 ###\n td1 = Series([timedelta(minutes=5,seconds=3)]*3)\n td1.iloc[2] = np.nan\n td2 = timedelta(minutes=5,seconds=4)\n ops = ['__mul__','__floordiv__','__pow__',\n '__rmul__','__rfloordiv__','__rpow__']\n run_ops(ops, td1, td2)\n td1 + td2\n td2 + td1\n td1 - td2\n td2 - td1\n td1 / td2\n td2 / td1\n\n ### datetime64 ###\n dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),\n Timestamp('20120103')])\n dt1.iloc[2] = np.nan\n dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),\n Timestamp('20120104')])\n ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__radd__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, dt1, dt2)\n dt1 - dt2\n dt2 - dt1\n\n ### datetime64 with timetimedelta ###\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n run_ops(ops, dt1, td1)\n dt1 + td1\n td1 + dt1\n dt1 - td1\n # TODO: Decide if this ought to work.\n # td1 - dt1\n\n ### timetimedelta with datetime64 ###\n ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__rsub__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, td1, dt1)\n td1 + dt1\n dt1 + td1\n\n # 8260, 10763\n # datetime64 with tz\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n dt1 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n td1 = Series(timedelta_range('1 days 1 min',periods=5, freq='H'))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n run_ops(ops, dt1, td1)\n\n result = dt1 + td1[0]\n expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt2 + td2[0]\n expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n # odd numpy behavior with scalar timedeltas\n if not _np_version_under1p8:\n result = td1[0] + dt1\n expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = td2[0] + dt2\n expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt1 - td1[0]\n expected = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n self.assertRaises(TypeError, lambda: td1[0] - dt1)\n\n result = dt2 - td2[0]\n expected = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n self.assertRaises(TypeError, lambda: td2[0] - dt2)\n\n result = dt1 + td1\n expected = (dt1.dt.tz_localize(None) + td1).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt2 + td2\n expected = (dt2.dt.tz_localize(None) + td2).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt1 - td1\n expected = (dt1.dt.tz_localize(None) - td1).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n result = dt2 - td2\n expected = (dt2.dt.tz_localize(None) - td2).dt.tz_localize('US/Eastern')\n assert_series_equal(result, expected)\n\n self.assertRaises(TypeError, lambda: td1 - dt1)\n self.assertRaises(TypeError, lambda: td2 - dt2)\n\n def test_ops_datetimelike_align(self):\n # GH 7500\n # datetimelike ops need to align\n dt = Series(date_range('2012-1-1', periods=3, freq='D'))\n dt.iloc[2] = np.nan\n dt2 = dt[::-1]\n\n expected = Series([timedelta(0), timedelta(0), pd.NaT])\n # name is reset\n result = dt2 - dt\n assert_series_equal(result, expected)\n\n expected = Series(expected, name=0)\n result = (dt2.to_frame() - dt.to_frame())[0]\n assert_series_equal(result, expected)\n\n def test_timedelta64_functions(self):\n\n from datetime import timedelta\n from pandas import date_range\n\n # index min/max\n td = Series(date_range('2012-1-1', periods=3, freq='D')) - \\\n Timestamp('20120101')\n\n result = td.idxmin()\n self.assertEqual(result, 0)\n\n result = td.idxmax()\n self.assertEqual(result, 2)\n\n # GH 2982\n # with NaT\n td[0] = np.nan\n\n result = td.idxmin()\n self.assertEqual(result, 1)\n\n result = td.idxmax()\n self.assertEqual(result, 2)\n\n # abs\n s1 = Series(date_range('20120101', periods=3))\n s2 = Series(date_range('20120102', periods=3))\n expected = Series(s2 - s1)\n\n # this fails as numpy returns timedelta64[us]\n #result = np.abs(s1-s2)\n # assert_frame_equal(result,expected)\n\n result = (s1 - s2).abs()\n assert_series_equal(result, expected)\n\n # max/min\n result = td.max()\n expected = Timedelta('2 days')\n self.assertEqual(result, expected)\n\n result = td.min()\n expected = Timedelta('1 days')\n self.assertEqual(result, expected)\n\n def test_ops_consistency_on_empty(self):\n\n # GH 7869\n # consistency on empty\n\n # float\n result = Series(dtype=float).sum()\n self.assertEqual(result,0)\n\n result = Series(dtype=float).mean()\n self.assertTrue(isnull(result))\n\n result = Series(dtype=float).median()\n self.assertTrue(isnull(result))\n\n # timedelta64[ns]\n result = Series(dtype='m8[ns]').sum()\n self.assertEqual(result, Timedelta(0))\n\n result = Series(dtype='m8[ns]').mean()\n self.assertTrue(result is pd.NaT)\n\n result = Series(dtype='m8[ns]').median()\n self.assertTrue(result is pd.NaT)\n\n def test_timedelta_fillna(self):\n #GH 3371\n s = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130102'), Timestamp('20130103 9:01:01')])\n td = s.diff()\n\n # reg fillna\n result = td.fillna(0)\n expected = Series([timedelta(0), timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n # interprested as seconds\n result = td.fillna(1)\n expected = Series([timedelta(seconds=1), timedelta(0),\n timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n result = td.fillna(timedelta(days=1, seconds=1))\n expected = Series([timedelta(days=1, seconds=1), timedelta(0),\n timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n result = td.fillna(np.timedelta64(int(1e9)))\n expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n from pandas import tslib\n result = td.fillna(tslib.NaT)\n expected = Series([tslib.NaT, timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')\n assert_series_equal(result, expected)\n\n # ffill\n td[2] = np.nan\n result = td.ffill()\n expected = td.fillna(0)\n expected[0] = np.nan\n assert_series_equal(result, expected)\n\n # bfill\n td[2] = np.nan\n result = td.bfill()\n expected = td.fillna(0)\n expected[2] = timedelta(days=1, seconds=9*3600+60+1)\n assert_series_equal(result, expected)\n\n def test_datetime64_fillna(self):\n\n s = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130102'), Timestamp('20130103 9:01:01')])\n s[2] = np.nan\n\n # reg fillna\n result = s.fillna(Timestamp('20130104'))\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130104'), Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n from pandas import tslib\n result = s.fillna(tslib.NaT)\n expected = s\n assert_series_equal(result, expected)\n\n # ffill\n result = s.ffill()\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130101'), Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n # bfill\n result = s.bfill()\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130103 9:01:01'),\n Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n # GH 6587\n # make sure that we are treating as integer when filling\n # this also tests inference of a datetime-like with NaT's\n s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])\n expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')\n result = s.fillna(method='backfill')\n assert_series_equal(result, expected)\n\n def test_datetime64_tz_fillna(self):\n for tz in ['US/Eastern', 'Asia/Tokyo']:\n # DatetimeBlock\n s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,\n Timestamp('2011-01-03 10:00'), pd.NaT])\n result = s.fillna(pd.Timestamp('2011-01-02 10:00'))\n expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),\n Timestamp('2011-01-03 10:00'), Timestamp('2011-01-02 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))\n expected = Series([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00'),\n Timestamp('2011-01-02 10:00', tz=tz)])\n self.assert_series_equal(expected, result)\n\n result = s.fillna('AAA')\n expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',\n Timestamp('2011-01-03 10:00'), 'AAA'], dtype=object)\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),\n 3: pd.Timestamp('2011-01-04 10:00')})\n expected = Series([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00'),\n Timestamp('2011-01-04 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),\n 3: pd.Timestamp('2011-01-04 10:00')})\n expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),\n Timestamp('2011-01-03 10:00'), Timestamp('2011-01-04 10:00')])\n self.assert_series_equal(expected, result)\n\n # DatetimeBlockTZ\n idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,\n '2011-01-03 10:00', pd.NaT], tz=tz)\n s = pd.Series(idx)\n result = s.fillna(pd.Timestamp('2011-01-02 10:00'))\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2011-01-02 10:00'),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2011-01-02 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))\n idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',\n '2011-01-03 10:00', '2011-01-02 10:00'],\n tz=tz)\n expected = Series(idx)\n self.assert_series_equal(expected, result)\n\n result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz).to_pydatetime())\n idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',\n '2011-01-03 10:00', '2011-01-02 10:00'],\n tz=tz)\n expected = Series(idx)\n self.assert_series_equal(expected, result)\n\n result = s.fillna('AAA')\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',\n Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],\n dtype=object)\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),\n 3: pd.Timestamp('2011-01-04 10:00')})\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2011-01-04 10:00')])\n self.assert_series_equal(expected, result)\n\n result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),\n 3: pd.Timestamp('2011-01-04 10:00', tz=tz)})\n expected = Series([Timestamp('2011-01-01 10:00', tz=tz),\n Timestamp('2011-01-02 10:00', tz=tz),\n Timestamp('2011-01-03 10:00', tz=tz),\n Timestamp('2011-01-04 10:00', tz=tz)])\n self.assert_series_equal(expected, result)\n\n def test_fillna_int(self):\n s = Series(np.random.randint(-100, 100, 50))\n s.fillna(method='ffill', inplace=True)\n assert_series_equal(s.fillna(method='ffill', inplace=False), s)\n\n def test_fillna_raise(self):\n s = Series(np.random.randint(-100, 100, 50))\n self.assertRaises(TypeError, s.fillna, [1, 2])\n self.assertRaises(TypeError, s.fillna, (1, 2))\n\n def test_raise_on_info(self):\n s = Series(np.random.randn(10))\n with tm.assertRaises(AttributeError):\n s.info()\n\n def test_isnull_for_inf(self):\n s = Series(['a', np.inf, np.nan, 1.0])\n with pd.option_context('mode.use_inf_as_null', True):\n r = s.isnull()\n dr = s.dropna()\n e = Series([False, True, True, False])\n de = Series(['a', 1.0], index=[0, 3])\n tm.assert_series_equal(r, e)\n tm.assert_series_equal(dr, de)\n\n\n# TimeSeries-specific\n\n def test_fillna(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n\n self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))\n\n ts[2] = np.NaN\n\n self.assert_numpy_array_equal(ts.fillna(method='ffill'),\n [0., 1., 1., 3., 4.])\n self.assert_numpy_array_equal(ts.fillna(method='backfill'),\n [0., 1., 3., 3., 4.])\n\n self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])\n\n self.assertRaises(ValueError, ts.fillna)\n self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')\n\n # GH 5703\n s1 = Series([np.nan])\n s2 = Series([1])\n result = s1.fillna(s2)\n expected = Series([1.])\n assert_series_equal(result,expected)\n result = s1.fillna({})\n assert_series_equal(result,s1)\n result = s1.fillna(Series(()))\n assert_series_equal(result,s1)\n result = s2.fillna(s1)\n assert_series_equal(result,s2)\n result = s1.fillna({ 0 : 1})\n assert_series_equal(result,expected)\n result = s1.fillna({ 1 : 1})\n assert_series_equal(result,Series([np.nan]))\n result = s1.fillna({ 0 : 1, 1 : 1})\n assert_series_equal(result,expected)\n result = s1.fillna(Series({ 0 : 1, 1 : 1}))\n assert_series_equal(result,expected)\n result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))\n assert_series_equal(result,s1)\n\n s1 = Series([0, 1, 2], list('abc'))\n s2 = Series([0, np.nan, 2], list('bac'))\n result = s2.fillna(s1)\n expected = Series([0,0,2.], list('bac'))\n assert_series_equal(result,expected)\n\n # limit\n s = Series(np.nan,index=[0,1,2])\n result = s.fillna(999,limit=1)\n expected = Series([999,np.nan,np.nan],index=[0,1,2])\n assert_series_equal(result,expected)\n\n result = s.fillna(999,limit=2)\n expected = Series([999,999,np.nan],index=[0,1,2])\n assert_series_equal(result,expected)\n\n # GH 9043\n # make sure a string representation of int/float values can be filled\n # correctly without raising errors or being converted\n vals = ['0', '1.5', '-0.3']\n for val in vals:\n s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')\n result = s.fillna(val)\n expected = Series([0, 1, val, val, 4], dtype='object')\n assert_series_equal(result, expected)\n\n def test_fillna_bug(self):\n x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])\n filled = x.fillna(method='ffill')\n expected = Series([nan, 1., 1., 3., 3.], x.index)\n assert_series_equal(filled, expected)\n\n filled = x.fillna(method='bfill')\n expected = Series([1., 1., 3., 3., nan], x.index)\n assert_series_equal(filled, expected)\n\n def test_fillna_inplace(self):\n x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])\n y = x.copy()\n\n y.fillna(value=0, inplace=True)\n\n expected = x.fillna(value=0)\n assert_series_equal(y, expected)\n\n def test_fillna_invalid_method(self):\n try:\n self.ts.fillna(method='ffil')\n except ValueError as inst:\n self.assertIn('ffil', str(inst))\n\n def test_ffill(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n ts[2] = np.NaN\n assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))\n\n def test_bfill(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n ts[2] = np.NaN\n assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))\n\n def test_sub_of_datetime_from_TimeSeries(self):\n from pandas.tseries.timedeltas import to_timedelta\n from datetime import datetime\n a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))\n b = datetime(1993, 6, 22, 13, 30)\n a = Series([a])\n result = to_timedelta(np.abs(a - b))\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n def test_datetime64_with_index(self):\n\n # arithmetic integer ops with an index\n s = Series(np.random.randn(5))\n expected = s - s.index.to_series()\n result = s - s.index\n assert_series_equal(result, expected)\n\n # GH 4629\n # arithmetic datetime64 ops with an index\n s = Series(date_range('20130101', periods=5),\n index=date_range('20130101', periods=5))\n expected = s - s.index.to_series()\n result = s - s.index\n assert_series_equal(result, expected)\n\n result = s - s.index.to_period()\n assert_series_equal(result, expected)\n\n df = DataFrame(np.random.randn(5,2),\n index=date_range('20130101', periods=5))\n df['date'] = Timestamp('20130102')\n df['expected'] = df['date'] - df.index.to_series()\n df['result'] = df['date'] - df.index\n assert_series_equal(df['result'], df['expected'], check_names=False)\n\n def test_timedelta64_nan(self):\n\n from pandas import tslib\n td = Series([timedelta(days=i) for i in range(10)])\n\n # nan ops on timedeltas\n td1 = td.copy()\n td1[0] = np.nan\n self.assertTrue(isnull(td1[0]))\n self.assertEqual(td1[0].value, tslib.iNaT)\n td1[0] = td[0]\n self.assertFalse(isnull(td1[0]))\n\n td1[1] = tslib.iNaT\n self.assertTrue(isnull(td1[1]))\n self.assertEqual(td1[1].value, tslib.iNaT)\n td1[1] = td[1]\n self.assertFalse(isnull(td1[1]))\n\n td1[2] = tslib.NaT\n self.assertTrue(isnull(td1[2]))\n self.assertEqual(td1[2].value, tslib.iNaT)\n td1[2] = td[2]\n self.assertFalse(isnull(td1[2]))\n\n # boolean setting\n # this doesn't work, not sure numpy even supports it\n #result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan\n #self.assertEqual(isnull(result).sum(), 7)\n\n # NumPy limitiation =(\n\n # def test_logical_range_select(self):\n # np.random.seed(12345)\n # selector = -0.5 <= self.ts <= 0.5\n # expected = (self.ts >= -0.5) & (self.ts <= 0.5)\n # assert_series_equal(selector, expected)\n\n def test_operators_na_handling(self):\n from decimal import Decimal\n from datetime import date\n s = Series([Decimal('1.3'), Decimal('2.3')],\n index=[date(2012, 1, 1), date(2012, 1, 2)])\n\n result = s + s.shift(1)\n result2 = s.shift(1) + s\n self.assertTrue(isnull(result[0]))\n self.assertTrue(isnull(result2[0]))\n\n s = Series(['foo', 'bar', 'baz', np.nan])\n result = 'prefix_' + s\n expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])\n assert_series_equal(result, expected)\n\n result = s + '_suffix'\n expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])\n assert_series_equal(result, expected)\n\n def test_object_comparisons(self):\n s = Series(['a', 'b', np.nan, 'c', 'a'])\n\n result = s == 'a'\n expected = Series([True, False, False, False, True])\n assert_series_equal(result, expected)\n\n result = s < 'a'\n expected = Series([False, False, False, False, False])\n assert_series_equal(result, expected)\n\n result = s != 'a'\n expected = -(s == 'a')\n assert_series_equal(result, expected)\n\n def test_comparison_operators_with_nas(self):\n s = Series(bdate_range('1/1/2000', periods=10), dtype=object)\n s[::2] = np.nan\n\n # test that comparisons work\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n val = s[5]\n\n f = getattr(operator, op)\n result = f(s, val)\n\n expected = f(s.dropna(), val).reindex(s.index)\n\n if op == 'ne':\n expected = expected.fillna(True).astype(bool)\n else:\n expected = expected.fillna(False).astype(bool)\n\n assert_series_equal(result, expected)\n\n # fffffffuuuuuuuuuuuu\n # result = f(val, s)\n # expected = f(val, s.dropna()).reindex(s.index)\n # assert_series_equal(result, expected)\n\n # boolean &, |, ^ should work with object arrays and propagate NAs\n\n ops = ['and_', 'or_', 'xor']\n mask = s.isnull()\n for bool_op in ops:\n f = getattr(operator, bool_op)\n\n filled = s.fillna(s[0])\n\n result = f(s < s[9], s > s[3])\n\n expected = f(filled < filled[9], filled > filled[3])\n expected[mask] = False\n assert_series_equal(result, expected)\n\n def test_comparison_object_numeric_nas(self):\n s = Series(np.random.randn(10), dtype=object)\n shifted = s.shift(2)\n\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n f = getattr(operator, op)\n\n result = f(s, shifted)\n expected = f(s.astype(float), shifted.astype(float))\n assert_series_equal(result, expected)\n\n def test_comparison_invalid(self):\n\n # GH4968\n # invalid date/int comparisons\n s = Series(range(5))\n s2 = Series(date_range('20010101', periods=5))\n\n for (x, y) in [(s,s2),(s2,s)]:\n self.assertRaises(TypeError, lambda : x == y)\n self.assertRaises(TypeError, lambda : x != y)\n self.assertRaises(TypeError, lambda : x >= y)\n self.assertRaises(TypeError, lambda : x > y)\n self.assertRaises(TypeError, lambda : x < y)\n self.assertRaises(TypeError, lambda : x <= y)\n\n def test_more_na_comparisons(self):\n left = Series(['a', np.nan, 'c'])\n right = Series(['a', np.nan, 'd'])\n\n result = left == right\n expected = Series([True, False, False])\n assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n assert_series_equal(result, expected)\n\n def test_comparison_different_length(self):\n a = Series(['a', 'b', 'c'])\n b = Series(['b', 'a'])\n self.assertRaises(ValueError, a.__lt__, b)\n\n a = Series([1, 2])\n b = Series([2, 3, 4])\n self.assertRaises(ValueError, a.__eq__, b)\n\n def test_comparison_label_based(self):\n\n # GH 4947\n # comparisons should be label based\n\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False], list('abc'))\n\n expected = Series([True, False, False], list('bca'))\n result = a & b\n assert_series_equal(result,expected)\n\n expected = Series([True, False, True], list('bca'))\n result = a | b\n assert_series_equal(result,expected)\n\n expected = Series([False, False, True], list('bca'))\n result = a ^ b\n assert_series_equal(result,expected)\n\n # rhs is bigger\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False, True], list('abcd'))\n\n expected = Series([True, False, False], list('bca'))\n result = a & b\n assert_series_equal(result,expected)\n\n expected = Series([True, False, True], list('bca'))\n result = a | b\n assert_series_equal(result,expected)\n\n # filling\n\n # vs empty\n result = a & Series([])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result,expected)\n\n result = a | Series([])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result,expected)\n\n # vs non-matching\n result = a & Series([1],['z'])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result,expected)\n\n result = a | Series([1],['z'])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result,expected)\n\n # identity\n # we would like s[s|e] == s to hold for any e, whether empty or not\n for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:\n result = a[a | e]\n assert_series_equal(result,a[a])\n\n # vs scalars\n index = list('bca')\n t = Series([True,False,True])\n\n for v in [True,1,2]:\n result = Series([True,False,True],index=index) | v\n expected = Series([True,True,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [np.nan,'foo']:\n self.assertRaises(TypeError, lambda : t | v)\n\n for v in [False,0]:\n result = Series([True,False,True],index=index) | v\n expected = Series([True,False,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [True,1]:\n result = Series([True,False,True],index=index) & v\n expected = Series([True,False,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [False,0]:\n result = Series([True,False,True],index=index) & v\n expected = Series([False,False,False],index=index)\n assert_series_equal(result,expected)\n for v in [np.nan]:\n self.assertRaises(TypeError, lambda : t & v)\n\n def test_operators_bitwise(self):\n # GH 9016: support bitwise op for integer types\n index = list('bca')\n\n s_tft = Series([True, False, True], index=index)\n s_fff = Series([False, False, False], index=index)\n s_tff = Series([True, False, False], index=index)\n s_empty = Series([])\n s_0101 = Series([0,1,0,1])\n s_0123 = Series(range(4),dtype='int64')\n s_3333 = Series([3] * 4)\n s_4444 = Series([4] * 4)\n\n res = s_tft & s_empty\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_tft | s_empty\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & s_3333\n expected = Series(range(4),dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123 | s_4444\n expected = Series(range(4, 8),dtype='int64')\n assert_series_equal(res, expected)\n\n s_a0b1c0 = Series([1], list('b'))\n\n res = s_tft & s_a0b1c0\n expected = s_tff\n assert_series_equal(res, expected)\n\n res = s_tft | s_a0b1c0\n expected = s_tft\n assert_series_equal(res, expected)\n\n n0 = 0\n res = s_tft & n0\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_0123 & n0\n expected = Series([0] * 4)\n assert_series_equal(res, expected)\n\n n1 = 1\n res = s_tft & n1\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & n1\n expected = Series([0, 1, 0, 1])\n assert_series_equal(res, expected)\n\n s_1111 = Series([1]*4, dtype='int8')\n res = s_0123 & s_1111\n expected = Series([0, 1, 0, 1], dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123.astype(np.int16) | s_1111.astype(np.int32)\n expected = Series([1, 1, 3, 3], dtype='int32')\n assert_series_equal(res, expected)\n\n self.assertRaises(TypeError, lambda: s_1111 & 'a')\n self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])\n self.assertRaises(TypeError, lambda: s_0123 & np.NaN)\n self.assertRaises(TypeError, lambda: s_0123 & 3.14)\n self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])\n\n # s_0123 will be all false now because of reindexing like s_tft\n assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))\n # s_tft will be all false now because of reindexing like s_0123\n assert_series_equal(s_0123 & s_tft, Series([False] * 4))\n assert_series_equal(s_0123 & False, Series([False] * 4))\n assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))\n assert_series_equal(s_0123 & [False], Series([False] * 4))\n assert_series_equal(s_0123 & (False), Series([False] * 4))\n assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))\n\n s_ftft = Series([False, True, False, True])\n assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)\n\n s_abNd = Series(['a','b',np.NaN,'d'])\n res = s_0123 & s_abNd\n expected = s_ftft\n assert_series_equal(res, expected)\n\n def test_between(self):\n s = Series(bdate_range('1/1/2000', periods=20).asobject)\n s[::2] = np.nan\n\n result = s[s.between(s[3], s[17])]\n expected = s[3:18].dropna()\n assert_series_equal(result, expected)\n\n result = s[s.between(s[3], s[17], inclusive=False)]\n expected = s[5:16].dropna()\n assert_series_equal(result, expected)\n\n def test_setitem_na(self):\n # these induce dtype changes\n expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n s[::2] = np.nan\n assert_series_equal(s, expected)\n\n # get's coerced to float, right?\n expected = Series([np.nan, 1, np.nan, 0])\n s = Series([True, True, False, False])\n s[::2] = np.nan\n assert_series_equal(s, expected)\n\n expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])\n s = Series(np.arange(10))\n s[:5] = np.nan\n assert_series_equal(s, expected)\n\n def test_scalar_na_cmp_corners(self):\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n def tester(a, b):\n return a & b\n\n self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))\n\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])\n s[::2] = np.nan\n\n expected = Series(True,index=s.index)\n expected[::2] = False\n assert_series_equal(tester(s, list(s)), expected)\n\n d = DataFrame({'A': s})\n # TODO: Fix this exception - needs to be fixed! (see GH5035)\n # (previously this was a TypeError because series returned\n # NotImplemented\n self.assertRaises(ValueError, tester, s, d)\n\n def test_idxmin(self):\n # test idxmin\n # _check_stat_op approach can not be used here because of isnull check.\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # skipna or no\n self.assertEqual(self.series[self.series.idxmin()], self.series.min())\n self.assertTrue(isnull(self.series.idxmin(skipna=False)))\n\n # no NaNs\n nona = self.series.dropna()\n self.assertEqual(nona[nona.idxmin()], nona.min())\n self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),\n nona.values.argmin())\n\n # all NaNs\n allna = self.series * nan\n self.assertTrue(isnull(allna.idxmin()))\n\n # datetime64[ns]\n from pandas import date_range\n s = Series(date_range('20130102', periods=6))\n result = s.idxmin()\n self.assertEqual(result, 0)\n\n s[0] = np.nan\n result = s.idxmin()\n self.assertEqual(result, 1)\n\n def test_idxmax(self):\n # test idxmax\n # _check_stat_op approach can not be used here because of isnull check.\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # skipna or no\n self.assertEqual(self.series[self.series.idxmax()], self.series.max())\n self.assertTrue(isnull(self.series.idxmax(skipna=False)))\n\n # no NaNs\n nona = self.series.dropna()\n self.assertEqual(nona[nona.idxmax()], nona.max())\n self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),\n nona.values.argmax())\n\n # all NaNs\n allna = self.series * nan\n self.assertTrue(isnull(allna.idxmax()))\n\n from pandas import date_range\n s = Series(date_range('20130102', periods=6))\n result = s.idxmax()\n self.assertEqual(result, 5)\n\n s[5] = np.nan\n result = s.idxmax()\n self.assertEqual(result, 4)\n\n # Float64Index\n # GH 5914\n s = pd.Series([1,2,3],[1.1,2.1,3.1])\n result = s.idxmax()\n self.assertEqual(result, 3.1)\n result = s.idxmin()\n self.assertEqual(result, 1.1)\n\n s = pd.Series(s.index, s.index)\n result = s.idxmax()\n self.assertEqual(result, 3.1)\n result = s.idxmin()\n self.assertEqual(result, 1.1)\n\n def test_ndarray_compat(self):\n\n # test numpy compat with Series as sub-class of NDFrame\n tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],\n index=date_range('1/1/2000', periods=1000))\n\n def f(x):\n return x[x.argmax()]\n result = tsdf.apply(f)\n expected = tsdf.max()\n assert_series_equal(result,expected)\n\n # .item()\n s = Series([1])\n result = s.item()\n self.assertEqual(result, 1)\n self.assertEqual(s.item(), s.iloc[0])\n\n # using an ndarray like function\n s = Series(np.random.randn(10))\n result = np.ones_like(s)\n expected = Series(1,index=range(10),dtype='float64')\n #assert_series_equal(result,expected)\n\n # ravel\n s = Series(np.random.randn(10))\n tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))\n\n # compress\n # GH 6658\n s = Series([0,1.,-1],index=list('abc'))\n result = np.compress(s>0,s)\n assert_series_equal(result, Series([1.],index=['b']))\n\n result = np.compress(s<-1,s)\n assert_series_equal(result, Series([],dtype='float64'))\n\n def test_complexx(self):\n\n # GH4819\n # complex access for ndarray compat\n a = np.arange(5)\n b = Series(a + 4j*a)\n tm.assert_almost_equal(a,b.real)\n tm.assert_almost_equal(4*a,b.imag)\n\n b.real = np.arange(5)+5\n tm.assert_almost_equal(a+5,b.real)\n tm.assert_almost_equal(4*a,b.imag)\n\n def test_underlying_data_conversion(self):\n\n # GH 4080\n df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))\n df.set_index(['a', 'b', 'c'], inplace=True)\n s = Series([1], index=[(2,2,2)])\n df['val'] = 0\n df\n df['val'].update(s)\n\n expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))\n expected.set_index(['a', 'b', 'c'], inplace=True)\n tm.assert_frame_equal(df,expected)\n\n # GH 3970\n # these are chained assignments as well\n pd.set_option('chained_assignment',None)\n df = DataFrame({ \"aa\":range(5), \"bb\":[2.2]*5})\n df[\"cc\"] = 0.0\n ck = [True]*len(df)\n df[\"bb\"].iloc[0] = .13\n df_tmp = df.iloc[ck]\n df[\"bb\"].iloc[0] = .15\n self.assertEqual(df['bb'].iloc[0], 0.15)\n pd.set_option('chained_assignment','raise')\n\n # GH 3217\n df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))\n df['c'] = np.nan\n df['c'].update(pd.Series(['foo'],index=[0]))\n\n expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))\n tm.assert_frame_equal(df,expected)\n\n def test_operators_corner(self):\n series = self.ts\n\n empty = Series([], index=Index([]))\n\n result = series + empty\n self.assertTrue(np.isnan(result).all())\n\n result = empty + Series([], index=Index([]))\n self.assertEqual(len(result), 0)\n\n # TODO: this returned NotImplemented earlier, what to do?\n # deltas = Series([timedelta(1)] * 5, index=np.arange(5))\n # sub_deltas = deltas[::2]\n # deltas5 = deltas * 5\n # deltas = deltas + sub_deltas\n\n # float + int\n int_ts = self.ts.astype(int)[:-5]\n added = self.ts + int_ts\n expected = self.ts.values[:-5] + int_ts.values\n self.assert_numpy_array_equal(added[:-5], expected)\n\n def test_operators_reverse_object(self):\n # GH 56\n arr = Series(np.random.randn(10), index=np.arange(10),\n dtype=object)\n\n def _check_op(arr, op):\n result = op(1., arr)\n expected = op(1., arr.astype(float))\n assert_series_equal(result.astype(float), expected)\n\n _check_op(arr, operator.add)\n _check_op(arr, operator.sub)\n _check_op(arr, operator.mul)\n _check_op(arr, operator.truediv)\n _check_op(arr, operator.floordiv)\n\n def test_series_frame_radd_bug(self):\n import operator\n\n # GH 353\n vals = Series(tm.rands_array(5, 10))\n result = 'foo_' + vals\n expected = vals.map(lambda x: 'foo_' + x)\n assert_series_equal(result, expected)\n\n frame = DataFrame({'vals': vals})\n result = 'foo_' + frame\n expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})\n tm.assert_frame_equal(result, expected)\n\n # really raise this time\n self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)\n\n def test_operators_frame(self):\n # rpow does not work with DataFrame\n df = DataFrame({'A': self.ts})\n\n tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A'])\n tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A'])\n tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A'])\n tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A'])\n\n def test_operators_combine(self):\n def _check_fill(meth, op, a, b, fill_value=0):\n exp_index = a.index.union(b.index)\n a = a.reindex(exp_index)\n b = b.reindex(exp_index)\n\n amask = isnull(a)\n bmask = isnull(b)\n\n exp_values = []\n for i in range(len(exp_index)):\n if amask[i]:\n if bmask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(fill_value, b[i]))\n elif bmask[i]:\n if amask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(a[i], fill_value))\n else:\n exp_values.append(op(a[i], b[i]))\n\n result = meth(a, b, fill_value=fill_value)\n expected = Series(exp_values, exp_index)\n assert_series_equal(result, expected)\n\n a = Series([nan, 1., 2., 3., nan], index=np.arange(5))\n b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))\n\n pairings = []\n for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:\n fv = 0\n lop = getattr(Series, op)\n lequiv = getattr(operator, op)\n rop = getattr(Series, 'r' + op)\n # bind op at definition time...\n requiv = lambda x, y, op=op: getattr(operator, op)(y, x)\n pairings.append((lop, lequiv, fv))\n pairings.append((rop, requiv, fv))\n\n if compat.PY3:\n pairings.append((Series.div, operator.truediv, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))\n else:\n pairings.append((Series.div, operator.div, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))\n\n for op, equiv_op, fv in pairings:\n result = op(a, b)\n exp = equiv_op(a, b)\n assert_series_equal(result, exp)\n _check_fill(op, equiv_op, a, b, fill_value=fv)\n # should accept axis=0 or axis='rows'\n op(a, b, axis=0)\n\n def test_combine_first(self):\n values = tm.makeIntIndex(20).values.astype(float)\n series = Series(values, index=tm.makeIntIndex(20))\n\n series_copy = series * 2\n series_copy[::2] = np.NaN\n\n # nothing used from the input\n combined = series.combine_first(series_copy)\n\n self.assert_numpy_array_equal(combined, series)\n\n # Holes filled from input\n combined = series_copy.combine_first(series)\n self.assertTrue(np.isfinite(combined).all())\n\n self.assert_numpy_array_equal(combined[::2], series[::2])\n self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])\n\n # mixed types\n index = tm.makeStringIndex(20)\n floats = Series(tm.randn(20), index=index)\n strings = Series(tm.makeStringIndex(10), index=index[::2])\n\n combined = strings.combine_first(floats)\n\n tm.assert_dict_equal(strings, combined, compare_keys=False)\n tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)\n\n # corner case\n s = Series([1., 2, 3], index=[0, 1, 2])\n result = s.combine_first(Series([], index=[]))\n assert_series_equal(s, result)\n\n def test_update(self):\n s = Series([1.5, nan, 3., 4., nan])\n s2 = Series([nan, 3.5, nan, 5.])\n s.update(s2)\n\n expected = Series([1.5, 3.5, 3., 5., np.nan])\n assert_series_equal(s, expected)\n\n # GH 3217\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\n df['c'] = np.nan\n\n # this will fail as long as series is a sub-class of ndarray\n # df['c'].update(Series(['foo'],index=[0])) #####\n\n def test_corr(self):\n tm._skip_if_no_scipy()\n\n import scipy.stats as stats\n\n # full overlap\n self.assertAlmostEqual(self.ts.corr(self.ts), 1)\n\n # partial overlap\n self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)\n\n self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))\n\n ts1 = self.ts[:15].reindex(self.ts.index)\n ts2 = self.ts[5:].reindex(self.ts.index)\n self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))\n\n # No overlap\n self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))\n\n # all NA\n cp = self.ts[:10].copy()\n cp[:] = np.nan\n self.assertTrue(isnull(cp.corr(cp)))\n\n A = tm.makeTimeSeries()\n B = tm.makeTimeSeries()\n result = A.corr(B)\n expected, _ = stats.pearsonr(A, B)\n self.assertAlmostEqual(result, expected)\n\n def test_corr_rank(self):\n tm._skip_if_no_scipy()\n\n import scipy\n import scipy.stats as stats\n\n # kendall and spearman\n A = tm.makeTimeSeries()\n B = tm.makeTimeSeries()\n A[-5:] = A[:5]\n result = A.corr(B, method='kendall')\n expected = stats.kendalltau(A, B)[0]\n self.assertAlmostEqual(result, expected)\n\n result = A.corr(B, method='spearman')\n expected = stats.spearmanr(A, B)[0]\n self.assertAlmostEqual(result, expected)\n\n # these methods got rewritten in 0.8\n if scipy.__version__ < LooseVersion('0.9'):\n raise nose.SkipTest(\"skipping corr rank because of scipy version \"\n \"{0}\".format(scipy.__version__))\n\n # results from R\n A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,\n 0.76910310, -0.06430576, -2.09704447, 0.40660407,\n -0.89926396, 0.94209606])\n B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,\n -0.01680292, 1.17258718, -1.06009347, -0.10222060,\n -0.89076239, 0.89372375])\n kexp = 0.4319297\n sexp = 0.5853767\n self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)\n self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)\n\n def test_cov(self):\n # full overlap\n self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)\n\n # partial overlap\n self.assertAlmostEqual(\n self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)\n\n # No overlap\n self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))\n\n # all NA\n cp = self.ts[:10].copy()\n cp[:] = np.nan\n self.assertTrue(isnull(cp.cov(cp)))\n\n # min_periods\n self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))\n\n ts1 = self.ts[:15].reindex(self.ts.index)\n ts2 = self.ts[5:].reindex(self.ts.index)\n self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))\n\n def test_copy(self):\n ts = self.ts.copy()\n\n ts[::2] = np.NaN\n\n # Did not modify original Series\n self.assertFalse(np.isnan(self.ts[0]))\n\n def test_count(self):\n self.assertEqual(self.ts.count(), len(self.ts))\n\n self.ts[::2] = np.NaN\n\n self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())\n\n mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])\n ts = Series(np.arange(len(mi)), index=mi)\n\n left = ts.count(level=1)\n right = Series([2, 3, 1], index=[1, 2, nan])\n assert_series_equal(left, right)\n\n ts.iloc[[0, 3, 5]] = nan\n assert_series_equal(ts.count(level=1), right - 1)\n\n def test_dtype(self):\n\n self.assertEqual(self.ts.dtype, np.dtype('float64'))\n self.assertEqual(self.ts.dtypes, np.dtype('float64'))\n self.assertEqual(self.ts.ftype, 'float64:dense')\n self.assertEqual(self.ts.ftypes, 'float64:dense')\n assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))\n assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))\n\n def test_dot(self):\n a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],\n columns=['p', 'q', 'r', 's']).T\n\n result = a.dot(b)\n expected = Series(np.dot(a.values, b.values),\n index=['1', '2', '3'])\n assert_series_equal(result, expected)\n\n # Check index alignment\n b2 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n assert_series_equal(result, expected)\n\n # Check ndarray argument\n result = a.dot(b.values)\n self.assertTrue(np.all(result == expected.values))\n assert_almost_equal(a.dot(b['2'].values), expected['2'])\n\n # Check series argument\n assert_almost_equal(a.dot(b['1']), expected['1'])\n assert_almost_equal(a.dot(b2['1']), expected['1'])\n\n self.assertRaises(Exception, a.dot, a.values[:3])\n self.assertRaises(ValueError, a.dot, b.T)\n\n def test_value_counts_nunique(self):\n\n # basics.rst doc example\n series = Series(np.random.randn(500))\n series[20:500] = np.nan\n series[10:20] = 5000\n result = series.nunique()\n self.assertEqual(result, 11)\n\n def test_unique(self):\n\n # 714 also, dtype=float\n s = Series([1.2345] * 100)\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n s = Series([1.2345] * 100, dtype='f4')\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n # NAs in object arrays #714\n s = Series(['foo'] * 100, dtype='O')\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n # decision about None\n s = Series([1, 2, 3, None, None, None], dtype=object)\n result = s.unique()\n expected = np.array([1, 2, 3, None], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n def test_dropna_empty(self):\n s = Series([])\n self.assertEqual(len(s.dropna()), 0)\n s.dropna(inplace=True)\n self.assertEqual(len(s), 0)\n\n # invalid axis\n self.assertRaises(ValueError, s.dropna, axis=1)\n\n\n def test_datetime64_tz_dropna(self):\n # DatetimeBlock\n s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,\n Timestamp('2011-01-03 10:00'), pd.NaT])\n result = s.dropna()\n expected = Series([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-03 10:00')], index=[0, 2])\n self.assert_series_equal(result, expected)\n\n # DatetimeBlockTZ\n idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,\n '2011-01-03 10:00', pd.NaT],\n tz='Asia/Tokyo')\n s = pd.Series(idx)\n self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')\n result = s.dropna()\n expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],\n index=[0, 2])\n self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')\n self.assert_series_equal(result, expected)\n\n def test_axis_alias(self):\n s = Series([1, 2, np.nan])\n assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))\n self.assertEqual(s.dropna().sum('rows'), 3)\n self.assertEqual(s._get_axis_number('rows'), 0)\n self.assertEqual(s._get_axis_name('rows'), 'index')\n\n def test_drop_duplicates(self):\n # check both int and object\n for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]:\n expected = Series([False, False, False, True])\n assert_series_equal(s.duplicated(), expected)\n assert_series_equal(s.drop_duplicates(), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, False, True, False])\n assert_series_equal(s.duplicated(keep='last'), expected)\n assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep='last', inplace=True)\n assert_series_equal(sc, s[~expected])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.duplicated(take_last=True), expected)\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])\n sc = s.copy()\n with tm.assert_produces_warning(FutureWarning):\n sc.drop_duplicates(take_last=True, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, False, True, True])\n assert_series_equal(s.duplicated(keep=False), expected)\n assert_series_equal(s.drop_duplicates(keep=False), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep=False, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n for s in [Series([1, 2, 3, 5, 3, 2, 4]),\n Series(['1', '2', '3', '5', '3', '2', '4'])]:\n expected = Series([False, False, False, False, True, True, False])\n assert_series_equal(s.duplicated(), expected)\n assert_series_equal(s.drop_duplicates(), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, True, True, False, False, False, False])\n assert_series_equal(s.duplicated(keep='last'), expected)\n assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep='last', inplace=True)\n assert_series_equal(sc, s[~expected])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.duplicated(take_last=True), expected)\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])\n sc = s.copy()\n with tm.assert_produces_warning(FutureWarning):\n sc.drop_duplicates(take_last=True, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n expected = Series([False, True, True, False, True, True, False])\n assert_series_equal(s.duplicated(keep=False), expected)\n assert_series_equal(s.drop_duplicates(keep=False), s[~expected])\n sc = s.copy()\n sc.drop_duplicates(keep=False, inplace=True)\n assert_series_equal(sc, s[~expected])\n\n def test_sort_values(self):\n\n ts = self.ts.copy()\n\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n ts.sort()\n\n self.assert_numpy_array_equal(ts, self.ts.sort_values())\n self.assert_numpy_array_equal(ts.index, self.ts.sort_values().index)\n\n ts.sort_values(ascending=False, inplace=True)\n self.assert_numpy_array_equal(ts, self.ts.sort_values(ascending=False))\n self.assert_numpy_array_equal(ts.index,\n self.ts.sort_values(ascending=False).index)\n\n # GH 5856/5853\n # Series.sort_values operating on a view\n df = DataFrame(np.random.randn(10,4))\n s = df.iloc[:,0]\n def f():\n s.sort_values(inplace=True)\n self.assertRaises(ValueError, f)\n\n # test order/sort inplace\n # GH6859\n ts1 = self.ts.copy()\n ts1.sort_values(ascending=False, inplace=True)\n ts2 = self.ts.copy()\n ts2.sort_values(ascending=False, inplace=True)\n assert_series_equal(ts1,ts2)\n\n ts1 = self.ts.copy()\n ts1 = ts1.sort_values(ascending=False, inplace=False)\n ts2 = self.ts.copy()\n ts2 = ts.sort_values(ascending=False)\n assert_series_equal(ts1,ts2)\n\n def test_sort_index(self):\n rindex = list(self.ts.index)\n random.shuffle(rindex)\n\n random_order = self.ts.reindex(rindex)\n sorted_series = random_order.sort_index()\n assert_series_equal(sorted_series, self.ts)\n\n # descending\n sorted_series = random_order.sort_index(ascending=False)\n assert_series_equal(sorted_series,\n self.ts.reindex(self.ts.index[::-1]))\n\n def test_sort_API(self):\n\n # API for 9816\n\n # sortlevel\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n s = Series([1, 2], mi)\n backwards = s.iloc[[1, 0]]\n\n res = s.sort_index(level='A')\n assert_series_equal(backwards, res)\n\n # sort_index\n rindex = list(self.ts.index)\n random.shuffle(rindex)\n\n random_order = self.ts.reindex(rindex)\n sorted_series = random_order.sort_index(level=0)\n assert_series_equal(sorted_series, self.ts)\n\n # compat on axis\n sorted_series = random_order.sort_index(axis=0)\n assert_series_equal(sorted_series, self.ts)\n\n self.assertRaises(ValueError, lambda : random_order.sort_values(axis=1))\n\n sorted_series = random_order.sort_index(level=0, axis=0)\n assert_series_equal(sorted_series, self.ts)\n\n self.assertRaises(ValueError, lambda : random_order.sort_index(level=0, axis=1))\n\n def test_order(self):\n\n # 9816 deprecated\n with tm.assert_produces_warning(FutureWarning):\n self.ts.order()\n\n ts = self.ts.copy()\n ts[:5] = np.NaN\n vals = ts.values\n\n result = ts.sort_values()\n self.assertTrue(np.isnan(result[-5:]).all())\n self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))\n\n result = ts.sort_values(na_position='first')\n self.assertTrue(np.isnan(result[:5]).all())\n self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))\n\n # something object-type\n ser = Series(['A', 'B'], [1, 2])\n # no failure\n ser.sort_values()\n\n # ascending=False\n ordered = ts.sort_values(ascending=False)\n expected = np.sort(ts.valid().values)[::-1]\n assert_almost_equal(expected, ordered.valid().values)\n ordered = ts.sort_values(ascending=False, na_position='first')\n assert_almost_equal(expected, ordered.valid().values)\n\n def test_nsmallest_nlargest(self):\n # float, int, datetime64 (use i8), timedelts64 (same),\n # object that are numbers, object that are strings\n\n base = [3, 2, 1, 2, 5]\n\n s_list = [\n Series(base, dtype='int8'),\n Series(base, dtype='int16'),\n Series(base, dtype='int32'),\n Series(base, dtype='int64'),\n Series(base, dtype='float32'),\n Series(base, dtype='float64'),\n Series(base, dtype='uint8'),\n Series(base, dtype='uint16'),\n Series(base, dtype='uint32'),\n Series(base, dtype='uint64'),\n Series(base).astype('timedelta64[ns]'),\n Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),\n ]\n\n raising = [\n Series([3., 2, 1, 2, '5'], dtype='object'),\n Series([3., 2, 1, 2, 5], dtype='object'),\n # not supported on some archs\n # Series([3., 2, 1, 2, 5], dtype='complex256'),\n Series([3., 2, 1, 2, 5], dtype='complex128'),\n ]\n\n for r in raising:\n dt = r.dtype\n msg = \"Cannot use method 'n(larg|small)est' with dtype %s\" % dt\n args = 2, len(r), 0, -1\n methods = r.nlargest, r.nsmallest\n for method, arg in product(methods, args):\n with tm.assertRaisesRegexp(TypeError, msg):\n method(arg)\n\n for s in s_list:\n\n assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])\n\n assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])\n\n assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])\n\n assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])\n with tm.assert_produces_warning(FutureWarning):\n assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])\n\n empty = s.iloc[0:0]\n assert_series_equal(s.nsmallest(0), empty)\n assert_series_equal(s.nsmallest(-1), empty)\n assert_series_equal(s.nlargest(0), empty)\n assert_series_equal(s.nlargest(-1), empty)\n\n assert_series_equal(s.nsmallest(len(s)), s.sort_values())\n assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())\n assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])\n assert_series_equal(s.nlargest(len(s) + 1),\n s.iloc[[4, 0, 1, 3, 2]])\n\n s = Series([3., np.nan, 1, 2, 5])\n assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])\n assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])\n\n msg = 'keep must be either \"first\", \"last\"'\n with tm.assertRaisesRegexp(ValueError, msg):\n s.nsmallest(keep='invalid')\n with tm.assertRaisesRegexp(ValueError, msg):\n s.nlargest(keep='invalid')\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.ts[::2] = np.nan\n self.ts[:10][::3] = 4.\n\n ranks = self.ts.rank()\n oranks = self.ts.astype('O').rank()\n\n assert_series_equal(ranks, oranks)\n\n mask = np.isnan(self.ts)\n filled = self.ts.fillna(np.inf)\n\n # rankdata returns a ndarray\n exp = Series(rankdata(filled),index=filled.index)\n exp[mask] = np.nan\n\n assert_almost_equal(ranks, exp)\n\n iseries = Series(np.arange(5).repeat(2))\n\n iranks = iseries.rank()\n exp = iseries.astype(float).rank()\n assert_series_equal(iranks, exp)\n iseries = Series(np.arange(5)) + 1.0\n exp = iseries / 5.0\n iranks = iseries.rank(pct=True)\n\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(1, 100))\n exp = Series(np.repeat(0.505, 100))\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries[1] = np.nan\n exp = Series(np.repeat(50.0 / 99.0, 100))\n exp[1] = np.nan\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1.0\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(np.nan, 100))\n exp = iseries.copy()\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n rng = date_range('1/1/1990', periods=5)\n iseries = Series(np.arange(5), rng) + 1\n iseries.ix[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])\n exp = Series([2, 1, 3, 5, 4, 6.0])\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n def test_rank_inf(self):\n raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')\n\n values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n\n def test_from_csv(self):\n\n with ensure_clean() as path:\n self.ts.to_csv(path)\n ts = Series.from_csv(path)\n assert_series_equal(self.ts, ts, check_names=False)\n self.assertTrue(ts.name is None)\n self.assertTrue(ts.index.name is None)\n\n # GH10483\n self.ts.to_csv(path, header=True)\n ts_h = Series.from_csv(path, header=0)\n self.assertTrue(ts_h.name == 'ts')\n\n self.series.to_csv(path)\n series = Series.from_csv(path)\n self.assertIsNone(series.name)\n self.assertIsNone(series.index.name)\n assert_series_equal(self.series, series, check_names=False)\n self.assertTrue(series.name is None)\n self.assertTrue(series.index.name is None)\n\n self.series.to_csv(path, header=True)\n series_h = Series.from_csv(path, header=0)\n self.assertTrue(series_h.name == 'series')\n\n outfile = open(path, 'w')\n outfile.write('1998-01-01|1.0\\n1999-01-01|2.0')\n outfile.close()\n series = Series.from_csv(path, sep='|')\n checkseries = Series(\n {datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})\n assert_series_equal(checkseries, series)\n\n series = Series.from_csv(path, sep='|', parse_dates=False)\n checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})\n assert_series_equal(checkseries, series)\n\n def test_to_csv(self):\n import io\n\n with ensure_clean() as path:\n self.ts.to_csv(path)\n\n lines = io.open(path, newline=None).readlines()\n assert(lines[1] != '\\n')\n\n self.ts.to_csv(path, index=False)\n arr = np.loadtxt(path)\n assert_almost_equal(arr, self.ts.values)\n\n def test_to_csv_unicode_index(self):\n buf = StringIO()\n s = Series([u(\"\\u05d0\"), \"d2\"], index=[u(\"\\u05d0\"), u(\"\\u05d1\")])\n\n s.to_csv(buf, encoding='UTF-8')\n buf.seek(0)\n\n s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')\n\n assert_series_equal(s, s2)\n\n def test_tolist(self):\n rs = self.ts.tolist()\n xp = self.ts.values.tolist()\n assert_almost_equal(rs, xp)\n\n # datetime64\n s = Series(self.ts.index)\n rs = s.tolist()\n self.assertEqual(self.ts.index[0], rs[0])\n\n def test_to_frame(self):\n self.ts.name = None\n rs = self.ts.to_frame()\n xp = pd.DataFrame(self.ts.values, index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n self.ts.name = 'testname'\n rs = self.ts.to_frame()\n xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n rs = self.ts.to_frame(name='testdifferent')\n xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n def test_to_dict(self):\n self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)\n\n def test_to_csv_float_format(self):\n\n with ensure_clean() as filename:\n ser = Series([0.123456, 0.234567, 0.567567])\n ser.to_csv(filename, float_format='%.2f')\n\n rs = Series.from_csv(filename)\n xp = Series([0.12, 0.23, 0.57])\n assert_series_equal(rs, xp)\n\n def test_to_csv_list_entries(self):\n s = Series(['jack and jill', 'jesse and frank'])\n\n split = s.str.split(r'\\s+and\\s+')\n\n buf = StringIO()\n split.to_csv(buf)\n\n def test_to_csv_path_is_none(self):\n # GH 8215\n # Series.to_csv() was returning None, inconsistent with\n # DataFrame.to_csv() which returned string\n s = Series([1, 2, 3])\n csv_str = s.to_csv(path=None)\n self.assertIsInstance(csv_str, str)\n\n def test_str_attribute(self):\n # GH9068\n methods = ['strip', 'rstrip', 'lstrip']\n s = Series([' jack', 'jill ', ' jesse ', 'frank'])\n for method in methods:\n expected = Series([getattr(str, method)(x) for x in s.values])\n assert_series_equal(getattr(Series.str, method)(s.str), expected)\n\n # str accessor only valid with string values\n s = Series(range(5))\n with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):\n s.str.repeat(2)\n\n def test_clip(self):\n val = self.ts.median()\n\n self.assertEqual(self.ts.clip_lower(val).min(), val)\n self.assertEqual(self.ts.clip_upper(val).max(), val)\n\n self.assertEqual(self.ts.clip(lower=val).min(), val)\n self.assertEqual(self.ts.clip(upper=val).max(), val)\n\n result = self.ts.clip(-0.5, 0.5)\n expected = np.clip(self.ts, -0.5, 0.5)\n assert_series_equal(result, expected)\n tm.assertIsInstance(expected, Series)\n\n def test_clip_types_and_nulls(self):\n\n sers = [Series([np.nan, 1.0, 2.0, 3.0]),\n Series([None, 'a', 'b', 'c']),\n Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]\n\n for s in sers:\n thresh = s[2]\n l = s.clip_lower(thresh)\n u = s.clip_upper(thresh)\n self.assertEqual(l[notnull(l)].min(), thresh)\n self.assertEqual(u[notnull(u)].max(), thresh)\n self.assertEqual(list(isnull(s)), list(isnull(l)))\n self.assertEqual(list(isnull(s)), list(isnull(u)))\n\n def test_clip_against_series(self):\n # GH #6966\n\n s = Series([1.0, 1.0, 4.0])\n threshold = Series([1.0, 2.0, 3.0])\n\n assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))\n assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))\n\n lower = Series([1.0, 2.0, 3.0])\n upper = Series([1.5, 2.5, 3.5])\n assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))\n assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))\n\n def test_valid(self):\n ts = self.ts.copy()\n ts[::2] = np.NaN\n\n result = ts.valid()\n self.assertEqual(len(result), ts.count())\n\n tm.assert_dict_equal(result, ts, compare_keys=False)\n\n def test_isnull(self):\n ser = Series([0, 5.4, 3, nan, -0.001])\n np.array_equal(\n ser.isnull(), Series([False, False, False, True, False]).values)\n ser = Series([\"hi\", \"\", nan])\n np.array_equal(ser.isnull(), Series([False, False, True]).values)\n\n def test_notnull(self):\n ser = Series([0, 5.4, 3, nan, -0.001])\n np.array_equal(\n ser.notnull(), Series([True, True, True, False, True]).values)\n ser = Series([\"hi\", \"\", nan])\n np.array_equal(ser.notnull(), Series([True, True, False]).values)\n\n def test_shift(self):\n shifted = self.ts.shift(1)\n unshifted = shifted.shift(-1)\n\n tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)\n\n offset = datetools.bday\n shifted = self.ts.shift(1, freq=offset)\n unshifted = shifted.shift(-1, freq=offset)\n\n assert_series_equal(unshifted, self.ts)\n\n unshifted = self.ts.shift(0, freq=offset)\n assert_series_equal(unshifted, self.ts)\n\n shifted = self.ts.shift(1, freq='B')\n unshifted = shifted.shift(-1, freq='B')\n\n assert_series_equal(unshifted, self.ts)\n\n # corner case\n unshifted = self.ts.shift(0)\n assert_series_equal(unshifted, self.ts)\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodSeries()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)\n\n shifted2 = ps.shift(1, 'B')\n shifted3 = ps.shift(1, datetools.bday)\n assert_series_equal(shifted2, shifted3)\n assert_series_equal(ps, shifted2.shift(-1, 'B'))\n\n self.assertRaises(ValueError, ps.shift, freq='D')\n\n # legacy support\n shifted4 = ps.shift(1, freq='B')\n assert_series_equal(shifted2, shifted4)\n\n shifted5 = ps.shift(1, freq=datetools.bday)\n assert_series_equal(shifted5, shifted4)\n\n # 32-bit taking\n # GH 8129\n index=date_range('2000-01-01',periods=5)\n for dtype in ['int32','int64']:\n s1 = Series(np.arange(5,dtype=dtype),index=index)\n p = s1.iloc[1]\n result = s1.shift(periods=p)\n expected = Series([np.nan,0,1,2,3],index=index)\n assert_series_equal(result,expected)\n\n # xref 8260\n # with tz\n s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')\n result = s-s.shift()\n assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))\n\n # incompat tz\n s2 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='CET'),name='foo')\n self.assertRaises(ValueError, lambda : s-s2)\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodSeries()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_series_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq='B')\n assert_series_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=datetools.bday)\n assert_series_equal(shifted, shifted3)\n\n self.assertRaises(ValueError, ps.tshift, freq='M')\n\n # DatetimeIndex\n shifted = self.ts.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_series_equal(self.ts, unshifted)\n\n shifted2 = self.ts.tshift(freq=self.ts.index.freq)\n assert_series_equal(shifted, shifted2)\n\n inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),\n name='ts')\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_series_equal(shifted, self.ts.tshift(1))\n assert_series_equal(unshifted, inferred_ts)\n\n no_freq = self.ts[[0, 5, 7]]\n self.assertRaises(ValueError, no_freq.tshift)\n\n def test_shift_int(self):\n ts = self.ts.astype(int)\n shifted = ts.shift(1)\n expected = ts.astype(float).shift(1)\n assert_series_equal(shifted, expected)\n\n def test_shift_categorical(self):\n # GH 9416\n s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')\n\n assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())\n\n sp1 = s.shift(1)\n assert_index_equal(s.index, sp1.index)\n self.assertTrue(np.all(sp1.values.codes[:1] == -1))\n self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))\n\n sn2 = s.shift(-2)\n assert_index_equal(s.index, sn2.index)\n self.assertTrue(np.all(sn2.values.codes[-2:] == -1))\n self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))\n\n assert_index_equal(s.values.categories, sp1.values.categories)\n assert_index_equal(s.values.categories, sn2.values.categories)\n\n def test_truncate(self):\n offset = datetools.bday\n\n ts = self.ts[::3]\n\n start, end = self.ts.index[3], self.ts.index[6]\n start_missing, end_missing = self.ts.index[2], self.ts.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_series_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_series_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_series_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_series_equal(truncated, expected)\n\n # corner case, empty series returned\n truncated = ts.truncate(after=self.ts.index[0] - offset)\n assert(len(truncated) == 0)\n\n truncated = ts.truncate(before=self.ts.index[-1] + offset)\n assert(len(truncated) == 0)\n\n self.assertRaises(ValueError, ts.truncate,\n before=self.ts.index[-1] + offset,\n after=self.ts.index[0] - offset)\n\n def test_ptp(self):\n N = 1000\n arr = np.random.randn(N)\n ser = Series(arr)\n self.assertEqual(np.ptp(ser), np.ptp(arr))\n\n def test_asof(self):\n # array or list or dates\n N = 50\n rng = date_range('1/1/1990', periods=N, freq='53s')\n ts = Series(np.random.randn(N), index=rng)\n ts[15:30] = np.nan\n dates = date_range('1/1/1990', periods=N * 3, freq='25s')\n\n result = ts.asof(dates)\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n mask = (result.index >= lb) & (result.index < ub)\n rs = result[mask]\n self.assertTrue((rs == ts[lb]).all())\n\n val = result[result.index[result.index >= ub][0]]\n self.assertEqual(ts[ub], val)\n\n self.ts[5:10] = np.NaN\n self.ts[15:20] = np.NaN\n\n val1 = self.ts.asof(self.ts.index[7])\n val2 = self.ts.asof(self.ts.index[19])\n\n self.assertEqual(val1, self.ts[4])\n self.assertEqual(val2, self.ts[14])\n\n # accepts strings\n val1 = self.ts.asof(str(self.ts.index[7]))\n self.assertEqual(val1, self.ts[4])\n\n # in there\n self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])\n\n # no as of value\n d = self.ts.index[0] - datetools.bday\n self.assertTrue(np.isnan(self.ts.asof(d)))\n\n def test_getitem_setitem_datetimeindex(self):\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04:00:00\"]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\"] = 0\n result[\"1990-01-01 04:00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = 0\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04:00:00\"\n rb = \"1990-01-01 07:00:00\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n # repeat all the above with naive datetimes\n result = ts[datetime(1990, 1, 1, 4)]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4)] = 0\n result[datetime(1990, 1, 1, 4)] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0\n result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = datetime(1990, 1, 1, 4)\n rb = datetime(1990, 1, 1, 7)\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts[ts.index[4]]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result[4:8] = ts[4:8]\n assert_series_equal(result, ts)\n\n # also test partial date slicing\n result = ts[\"1990-01-02\"]\n expected = ts[24:48]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-02\"] = 0\n result[\"1990-01-02\"] = ts[24:48]\n assert_series_equal(result, ts)\n\n def test_getitem_setitem_datetime_tz_pytz(self):\n tm._skip_if_no_pytz();\n from pytz import timezone as tz\n\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n\n # comparison dates with datetime MUST be localized!\n date = tz('US/Central').localize(datetime(1990, 1, 1, 3))\n result[date] = 0\n result[date] = ts[4]\n assert_series_equal(result, ts)\n\n\n def test_getitem_setitem_datetime_tz_dateutil(self):\n tm._skip_if_no_dateutil();\n from dateutil.tz import tzutc\n from pandas.tslib import _dateutil_gettz as gettz\n\n tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil\n\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0\n result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]\n assert_series_equal(result, ts)\n\n def test_getitem_setitem_periodindex(self):\n from pandas import period_range\n N = 50\n rng = period_range('1/1/1990', periods=N, freq='H')\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04\"]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\"] = 0\n result[\"1990-01-01 04\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04\":\"1990-01-01 07\"]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = 0\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04\"\n rb = \"1990-01-01 07\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n # GH 2782\n result = ts[ts.index[4]]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result[4:8] = ts[4:8]\n assert_series_equal(result, ts)\n\n def test_asof_periodindex(self):\n from pandas import period_range, PeriodIndex\n # array or list or dates\n N = 50\n rng = period_range('1/1/1990', periods=N, freq='H')\n ts = Series(np.random.randn(N), index=rng)\n ts[15:30] = np.nan\n dates = date_range('1/1/1990', periods=N * 3, freq='37min')\n\n result = ts.asof(dates)\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n pix = PeriodIndex(result.index.values, freq='H')\n mask = (pix >= lb) & (pix < ub)\n rs = result[mask]\n self.assertTrue((rs == ts[lb]).all())\n\n ts[5:10] = np.NaN\n ts[15:20] = np.NaN\n\n val1 = ts.asof(ts.index[7])\n val2 = ts.asof(ts.index[19])\n\n self.assertEqual(val1, ts[4])\n self.assertEqual(val2, ts[14])\n\n # accepts strings\n val1 = ts.asof(str(ts.index[7]))\n self.assertEqual(val1, ts[4])\n\n # in there\n self.assertEqual(ts.asof(ts.index[3]), ts[3])\n\n # no as of value\n d = ts.index[0].to_timestamp() - datetools.bday\n self.assertTrue(np.isnan(ts.asof(d)))\n\n def test_asof_more(self):\n from pandas import date_range\n s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],\n index=date_range('1/1/2000', periods=9))\n\n dates = s.index[[4, 5, 6, 2, 1]]\n\n result = s.asof(dates)\n expected = Series([2, 2, 3, 1, np.nan], index=dates)\n\n assert_series_equal(result, expected)\n\n s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],\n index=date_range('1/1/2000', periods=9))\n result = s.asof(s.index[0])\n self.assertEqual(result, s[0])\n\n def test_cast_on_putmask(self):\n\n # GH 2746\n\n # need to upcast\n s = Series([1, 2], index=[1, 2], dtype='int64')\n s[[True, False]] = Series([0], index=[1], dtype='int64')\n expected = Series([0, 2], index=[1, 2], dtype='int64')\n\n assert_series_equal(s, expected)\n\n def test_type_promote_putmask(self):\n\n # GH8387: test that changing types does not break alignment\n ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)\n left, mask = ts.copy(), ts > 0\n right = ts[mask].copy().map(str)\n left[mask] = right\n assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))\n\n s = Series([0, 1, 2, 0 ])\n mask = s > 0\n s2 = s[ mask ].map( str )\n s[mask] = s2\n assert_series_equal(s, Series([0, '1', '2', 0]))\n\n s = Series([0, 'foo', 'bar', 0 ])\n mask = Series([False, True, True, False])\n s2 = s[ mask ]\n s[mask] = s2\n assert_series_equal(s, Series([0, 'foo','bar', 0]))\n\n def test_astype_cast_nan_int(self):\n df = Series([1.0, 2.0, 3.0, np.nan])\n self.assertRaises(ValueError, df.astype, np.int64)\n\n def test_astype_cast_object_int(self):\n arr = Series([\"car\", \"house\", \"tree\", \"1\"])\n\n self.assertRaises(ValueError, arr.astype, int)\n self.assertRaises(ValueError, arr.astype, np.int64)\n self.assertRaises(ValueError, arr.astype, np.int8)\n\n arr = Series(['1', '2', '3', '4'], dtype=object)\n result = arr.astype(int)\n self.assert_numpy_array_equal(result, np.arange(1, 5))\n\n def test_astype_datetimes(self):\n import pandas.tslib as tslib\n\n s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n s = Series([datetime(2001, 1, 2, 0, 0)])\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])\n s[1] = np.nan\n self.assertEqual(s.dtype, 'M8[ns]')\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n def test_astype_str(self):\n # GH4405\n digits = string.digits\n s1 = Series([digits * 10, tm.rands(63), tm.rands(64),\n tm.rands(1000)])\n s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])\n types = (compat.text_type, np.str_)\n for typ in types:\n for s in (s1, s2):\n res = s.astype(typ)\n expec = s.map(compat.text_type)\n assert_series_equal(res, expec)\n\n # GH9757\n # Test str and unicode on python 2.x and just str on python 3.x\n for tt in set([str, compat.text_type]):\n ts = Series([Timestamp('2010-01-04 00:00:00')])\n s = ts.astype(tt)\n expected = Series([tt('2010-01-04')])\n assert_series_equal(s, expected)\n\n ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])\n s = ts.astype(tt)\n expected = Series([tt('2010-01-04 00:00:00-05:00')])\n assert_series_equal(s, expected)\n\n td = Series([Timedelta(1, unit='d')])\n s = td.astype(tt)\n expected = Series([tt('1 days 00:00:00.000000000')])\n assert_series_equal(s, expected)\n\n def test_astype_unicode(self):\n\n # GH7758\n # a bit of magic is required to set default encoding encoding to utf-8\n digits = string.digits\n test_series = [\n Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),\n Series([u('データーサイエンス、お前はもう死んでいる')]),\n\n ]\n\n former_encoding = None\n if not compat.PY3:\n # in python we can force the default encoding\n # for this test\n former_encoding = sys.getdefaultencoding()\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n if sys.getdefaultencoding() == \"utf-8\":\n test_series.append(Series([u('野菜食べないとやばい').encode(\"utf-8\")]))\n for s in test_series:\n res = s.astype(\"unicode\")\n expec = s.map(compat.text_type)\n assert_series_equal(res, expec)\n # restore the former encoding\n if former_encoding is not None and former_encoding != \"utf-8\":\n reload(sys)\n sys.setdefaultencoding(former_encoding)\n\n\n def test_map(self):\n index, data = tm.getMixedTypeDict()\n\n source = Series(data['B'], index=data['C'])\n target = Series(data['C'][:4], index=data['D'][:4])\n\n merged = target.map(source)\n\n for k, v in compat.iteritems(merged):\n self.assertEqual(v, source[target[k]])\n\n # input could be a dict\n merged = target.map(source.to_dict())\n\n for k, v in compat.iteritems(merged):\n self.assertEqual(v, source[target[k]])\n\n # function\n result = self.ts.map(lambda x: x * 2)\n self.assert_numpy_array_equal(result, self.ts * 2)\n\n # GH 10324\n a = Series([1, 2, 3, 4])\n b = Series([\"even\", \"odd\", \"even\", \"odd\"], dtype=\"category\")\n c = Series([\"even\", \"odd\", \"even\", \"odd\"])\n\n exp = Series([\"odd\", \"even\", \"odd\", np.nan], dtype=\"category\")\n self.assert_series_equal(a.map(b), exp)\n exp = Series([\"odd\", \"even\", \"odd\", np.nan])\n self.assert_series_equal(a.map(c), exp)\n\n a = Series(['a', 'b', 'c', 'd'])\n b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))\n c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))\n\n exp = Series([np.nan, 1, 2, 3])\n self.assert_series_equal(a.map(b), exp)\n exp = Series([np.nan, 1, 2, 3])\n self.assert_series_equal(a.map(c), exp)\n\n a = Series(['a', 'b', 'c', 'd'])\n b = Series(['B', 'C', 'D', 'E'], dtype='category',\n index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))\n c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))\n\n exp = Series([np.nan, 'B', 'C', 'D'], dtype='category')\n self.assert_series_equal(a.map(b), exp)\n exp = Series([np.nan, 'B', 'C', 'D'])\n self.assert_series_equal(a.map(c), exp)\n\n def test_map_compat(self):\n # related GH 8024\n s = Series([True,True,False],index=[1,2,3])\n result = s.map({ True : 'foo', False : 'bar' })\n expected = Series(['foo','foo','bar'],index=[1,2,3])\n assert_series_equal(result,expected)\n\n def test_map_int(self):\n left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})\n right = Series({1: 11, 2: 22, 3: 33})\n\n self.assertEqual(left.dtype, np.float_)\n self.assertTrue(issubclass(right.dtype.type, np.integer))\n\n merged = left.map(right)\n self.assertEqual(merged.dtype, np.float_)\n self.assertTrue(isnull(merged['d']))\n self.assertTrue(not isnull(merged['c']))\n\n def test_map_type_inference(self):\n s = Series(lrange(3))\n s2 = s.map(lambda x: np.where(x == 0, 0, 1))\n self.assertTrue(issubclass(s2.dtype.type, np.integer))\n\n def test_divide_decimal(self):\n ''' resolves issue #9787 '''\n from decimal import Decimal\n\n expected = Series([Decimal(5)])\n\n s = Series([Decimal(10)])\n s = s/Decimal(2)\n\n tm.assert_series_equal(expected, s)\n\n s = Series([Decimal(10)])\n s = s//Decimal(2)\n\n tm.assert_series_equal(expected, s)\n\n def test_map_decimal(self):\n from decimal import Decimal\n\n result = self.series.map(lambda x: Decimal(str(x)))\n self.assertEqual(result.dtype, np.object_)\n tm.assertIsInstance(result[0], Decimal)\n\n def test_map_na_exclusion(self):\n s = Series([1.5, np.nan, 3, np.nan, 5])\n\n result = s.map(lambda x: x * 2, na_action='ignore')\n exp = s * 2\n assert_series_equal(result, exp)\n\n def test_map_dict_with_tuple_keys(self):\n '''\n Due to new MultiIndex-ing behaviour in v0.14.0,\n dicts with tuple keys passed to map were being\n converted to a multi-index, preventing tuple values\n from being mapped properly.\n '''\n df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})\n label_mappings = {\n (1,): 'A',\n (2,): 'B',\n (3, 4): 'A',\n (5, 6): 'B'\n }\n df['labels'] = df['a'].map(label_mappings)\n df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)\n # All labels should be filled now\n tm.assert_series_equal(df['labels'], df['expected_labels'], check_names=False)\n\n def test_apply(self):\n assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))\n\n # elementwise-apply\n import math\n assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))\n\n # how to handle Series result, #2316\n result = self.ts.apply(lambda x: Series([x, x ** 2],\n index=['x', 'x^2']))\n expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})\n tm.assert_frame_equal(result, expected)\n\n # empty series\n s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))\n rs = s.apply(lambda x: x)\n tm.assert_series_equal(s, rs)\n # check all metadata (GH 9322)\n self.assertIsNot(s, rs)\n self.assertIs(s.index, rs.index)\n self.assertEqual(s.dtype, rs.dtype)\n self.assertEqual(s.name, rs.name)\n\n # index but no data\n s = Series(index=[1, 2, 3])\n rs = s.apply(lambda x: x)\n tm.assert_series_equal(s, rs)\n\n def test_apply_same_length_inference_bug(self):\n s = Series([1, 2])\n f = lambda x: (x, x + 1)\n\n result = s.apply(f)\n expected = s.map(f)\n assert_series_equal(result, expected)\n\n s = Series([1, 2, 3])\n result = s.apply(f)\n expected = s.map(f)\n assert_series_equal(result, expected)\n\n def test_apply_dont_convert_dtype(self):\n s = Series(np.random.randn(10))\n\n f = lambda x: x if x > 0 else np.nan\n result = s.apply(f, convert_dtype=False)\n self.assertEqual(result.dtype, object)\n\n def test_convert_objects(self):\n\n s = Series([1., 2, 3], index=['a', 'b', 'c'])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n # force numeric conversion\n r = s.copy().astype('O')\n r['a'] = '1'\n with tm.assert_produces_warning(FutureWarning):\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = '1.'\n with tm.assert_produces_warning(FutureWarning):\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = 'garbled'\n expected = s.copy()\n expected['a'] = np.nan\n with tm.assert_produces_warning(FutureWarning):\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, expected)\n\n # GH 4119, not converting a mixed type (e.g.floats and object)\n s = Series([1, 'na', 3, 4])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_numeric=True)\n expected = Series([1, np.nan, 3, 4])\n assert_series_equal(result, expected)\n\n s = Series([1, '', 3, 4])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_numeric=True)\n expected = Series([1, np.nan, 3, 4])\n assert_series_equal(result, expected)\n\n # dates\n s = Series(\n [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])\n s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(\n 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates=True, convert_numeric=False)\n expected = Series(\n [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=True)\n assert_series_equal(result, expected)\n\n expected = Series(\n [Timestamp(\n '20010101'), Timestamp('20010102'), Timestamp('20010103'),\n lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')\n with tm.assert_produces_warning(FutureWarning):\n result = s2.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n assert_series_equal(result, expected)\n with tm.assert_produces_warning(FutureWarning):\n result = s2.convert_objects(convert_dates='coerce',\n convert_numeric=True)\n assert_series_equal(result, expected)\n\n # preserver all-nans (if convert_dates='coerce')\n s = Series(['foo', 'bar', 1, 1.0], dtype='O')\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n assert_series_equal(result, s)\n\n # preserver if non-object\n s = Series([1], dtype='float32')\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce',\n convert_numeric=False)\n assert_series_equal(result, s)\n\n #r = s.copy()\n #r[0] = np.nan\n #result = r.convert_objects(convert_dates=True,convert_numeric=False)\n #self.assertEqual(result.dtype, 'M8[ns]')\n\n # dateutil parses some single letters into today's value as a date\n for x in 'abcdefghijklmnopqrstuvwxyz':\n s = Series([x])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce')\n assert_series_equal(result, s)\n s = Series([x.upper()])\n with tm.assert_produces_warning(FutureWarning):\n result = s.convert_objects(convert_dates='coerce')\n assert_series_equal(result, s)\n\n def test_convert_objects_preserve_bool(self):\n s = Series([1, True, 3, 5], dtype=object)\n with tm.assert_produces_warning(FutureWarning):\n r = s.convert_objects(convert_numeric=True)\n e = Series([1, 1, 3, 5], dtype='i8')\n tm.assert_series_equal(r, e)\n\n def test_convert_objects_preserve_all_bool(self):\n s = Series([False, True, False, False], dtype=object)\n with tm.assert_produces_warning(FutureWarning):\n r = s.convert_objects(convert_numeric=True)\n e = Series([False, True, False, False], dtype=bool)\n tm.assert_series_equal(r, e)\n\n # GH 10265\n def test_convert(self):\n # Tests: All to nans, coerce, true\n # Test coercion returns correct type\n s = Series(['a', 'b', 'c'])\n results = s._convert(datetime=True, coerce=True)\n expected = Series([lib.NaT] * 3)\n assert_series_equal(results, expected)\n\n results = s._convert(numeric=True, coerce=True)\n expected = Series([np.nan] * 3)\n assert_series_equal(results, expected)\n\n expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))\n results = s._convert(timedelta=True, coerce=True)\n assert_series_equal(results, expected)\n\n dt = datetime(2001, 1, 1, 0, 0)\n td = dt - datetime(2000, 1, 1, 0, 0)\n\n # Test coercion with mixed types\n s = Series(['a', '3.1415', dt, td])\n results = s._convert(datetime=True, coerce=True)\n expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])\n assert_series_equal(results, expected)\n\n results = s._convert(numeric=True, coerce=True)\n expected = Series([nan, 3.1415, nan, nan])\n assert_series_equal(results, expected)\n\n results = s._convert(timedelta=True, coerce=True)\n expected = Series([lib.NaT, lib.NaT, lib.NaT, td],\n dtype=np.dtype('m8[ns]'))\n assert_series_equal(results, expected)\n\n # Test standard conversion returns original\n results = s._convert(datetime=True)\n assert_series_equal(results, s)\n results = s._convert(numeric=True)\n expected = Series([nan, 3.1415, nan, nan])\n assert_series_equal(results, expected)\n results = s._convert(timedelta=True)\n assert_series_equal(results, s)\n\n # test pass-through and non-conversion when other types selected\n s = Series(['1.0','2.0','3.0'])\n results = s._convert(datetime=True, numeric=True, timedelta=True)\n expected = Series([1.0,2.0,3.0])\n assert_series_equal(results, expected)\n results = s._convert(True,False,True)\n assert_series_equal(results, s)\n\n s = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)],\n dtype='O')\n results = s._convert(datetime=True, numeric=True, timedelta=True)\n expected = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)])\n assert_series_equal(results, expected)\n results = s._convert(datetime=False,numeric=True,timedelta=True)\n assert_series_equal(results, s)\n\n td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)\n s = Series([td, td], dtype='O')\n results = s._convert(datetime=True, numeric=True, timedelta=True)\n expected = Series([td, td])\n assert_series_equal(results, expected)\n results = s._convert(True,True,False)\n assert_series_equal(results, s)\n\n\n s = Series([1., 2, 3], index=['a', 'b', 'c'])\n result = s._convert(numeric=True)\n assert_series_equal(result, s)\n\n # force numeric conversion\n r = s.copy().astype('O')\n r['a'] = '1'\n result = r._convert(numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = '1.'\n result = r._convert(numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = 'garbled'\n result = r._convert(numeric=True)\n expected = s.copy()\n expected['a'] = nan\n assert_series_equal(result, expected)\n\n # GH 4119, not converting a mixed type (e.g.floats and object)\n s = Series([1, 'na', 3, 4])\n result = s._convert(datetime=True, numeric=True)\n expected = Series([1, nan, 3, 4])\n assert_series_equal(result, expected)\n\n s = Series([1, '', 3, 4])\n result = s._convert(datetime=True, numeric=True)\n assert_series_equal(result, expected)\n\n # dates\n s = Series(\n [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])\n s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(\n 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')\n\n result = s._convert(datetime=True)\n expected = Series(\n [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n\n expected = Series(\n [Timestamp(\n '20010101'), Timestamp('20010102'), Timestamp('20010103'),\n lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')\n result = s2._convert(datetime=True,\n numeric=False,\n timedelta=False,\n coerce=True)\n assert_series_equal(result, expected)\n result = s2._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n\n s = Series(['foo', 'bar', 1, 1.0], dtype='O')\n result = s._convert(datetime=True, coerce=True)\n expected = Series([lib.NaT]*4)\n assert_series_equal(result, expected)\n\n # preserver if non-object\n s = Series([1], dtype='float32')\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, s)\n\n #r = s.copy()\n #r[0] = np.nan\n #result = r._convert(convert_dates=True,convert_numeric=False)\n #self.assertEqual(result.dtype, 'M8[ns]')\n\n # dateutil parses some single letters into today's value as a date\n expected = Series([lib.NaT])\n for x in 'abcdefghijklmnopqrstuvwxyz':\n s = Series([x])\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n s = Series([x.upper()])\n result = s._convert(datetime=True, coerce=True)\n assert_series_equal(result, expected)\n\n def test_convert_no_arg_error(self):\n s = Series(['1.0','2'])\n self.assertRaises(ValueError, s._convert)\n\n def test_convert_preserve_bool(self):\n s = Series([1, True, 3, 5], dtype=object)\n r = s._convert(datetime=True, numeric=True)\n e = Series([1, 1, 3, 5], dtype='i8')\n tm.assert_series_equal(r, e)\n\n def test_convert_preserve_all_bool(self):\n s = Series([False, True, False, False], dtype=object)\n r = s._convert(datetime=True, numeric=True)\n e = Series([False, True, False, False], dtype=bool)\n tm.assert_series_equal(r, e)\n\n def test_apply_args(self):\n s = Series(['foo,bar'])\n\n result = s.apply(str.split, args=(',',))\n self.assertEqual(result[0], ['foo', 'bar'])\n tm.assertIsInstance(result[0], list)\n\n def test_align(self):\n def _check_align(a, b, how='left', fill=None):\n aa, ab = a.align(b, join=how, fill_value=fill)\n\n join_index = a.index.join(b.index, how=how)\n if fill is not None:\n diff_a = aa.index.difference(join_index)\n diff_b = ab.index.difference(join_index)\n if len(diff_a) > 0:\n self.assertTrue((aa.reindex(diff_a) == fill).all())\n if len(diff_b) > 0:\n self.assertTrue((ab.reindex(diff_b) == fill).all())\n\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n if fill is not None:\n ea = ea.fillna(fill)\n eb = eb.fillna(fill)\n\n assert_series_equal(aa, ea)\n assert_series_equal(ab, eb)\n self.assertEqual(aa.name, 'ts')\n self.assertEqual(ea.name, 'ts')\n self.assertEqual(ab.name, 'ts')\n self.assertEqual(eb.name, 'ts')\n\n for kind in JOIN_TYPES:\n _check_align(self.ts[2:], self.ts[:-5], how=kind)\n _check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)\n\n # empty left\n _check_align(self.ts[:0], self.ts[:-5], how=kind)\n _check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)\n\n # empty right\n _check_align(self.ts[:-5], self.ts[:0], how=kind)\n _check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)\n\n # both empty\n _check_align(self.ts[:0], self.ts[:0], how=kind)\n _check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)\n\n def test_align_fill_method(self):\n def _check_align(a, b, how='left', method='pad', limit=None):\n aa, ab = a.align(b, join=how, method=method, limit=limit)\n\n join_index = a.index.join(b.index, how=how)\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n ea = ea.fillna(method=method, limit=limit)\n eb = eb.fillna(method=method, limit=limit)\n\n assert_series_equal(aa, ea)\n assert_series_equal(ab, eb)\n\n for kind in JOIN_TYPES:\n for meth in ['pad', 'bfill']:\n _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)\n _check_align(self.ts[2:], self.ts[:-5], how=kind,\n method=meth, limit=1)\n\n # empty left\n _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)\n _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,\n limit=1)\n\n # empty right\n _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)\n _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,\n limit=1)\n\n # both empty\n _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)\n _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,\n limit=1)\n\n def test_align_nocopy(self):\n b = self.ts[:5].copy()\n\n # do copy\n a = self.ts.copy()\n ra, _ = a.align(b, join='left')\n ra[:5] = 5\n self.assertFalse((a[:5] == 5).any())\n\n # do not copy\n a = self.ts.copy()\n ra, _ = a.align(b, join='left', copy=False)\n ra[:5] = 5\n self.assertTrue((a[:5] == 5).all())\n\n # do copy\n a = self.ts.copy()\n b = self.ts[:5].copy()\n _, rb = a.align(b, join='right')\n rb[:3] = 5\n self.assertFalse((b[:3] == 5).any())\n\n # do not copy\n a = self.ts.copy()\n b = self.ts[:5].copy()\n _, rb = a.align(b, join='right', copy=False)\n rb[:2] = 5\n self.assertTrue((b[:2] == 5).all())\n\n def test_align_sameindex(self):\n a, b = self.ts.align(self.ts, copy=False)\n self.assertIs(a.index, self.ts.index)\n self.assertIs(b.index, self.ts.index)\n\n # a, b = self.ts.align(self.ts, copy=True)\n # self.assertIsNot(a.index, self.ts.index)\n # self.assertIsNot(b.index, self.ts.index)\n\n def test_align_multiindex(self):\n # GH 10665\n\n midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],\n names=('a', 'b', 'c'))\n idx = pd.Index(range(2), name='b')\n s1 = pd.Series(np.arange(12,dtype='int64'), index=midx)\n s2 = pd.Series(np.arange(2,dtype='int64'), index=idx)\n\n # these must be the same results (but flipped)\n res1l, res1r = s1.align(s2, join='left')\n res2l, res2r = s2.align(s1, join='right')\n\n expl = s1\n tm.assert_series_equal(expl, res1l)\n tm.assert_series_equal(expl, res2r)\n expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)\n tm.assert_series_equal(expr, res1r)\n tm.assert_series_equal(expr, res2l)\n\n res1l, res1r = s1.align(s2, join='right')\n res2l, res2r = s2.align(s1, join='left')\n\n exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],\n names=('a', 'b', 'c'))\n expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)\n tm.assert_series_equal(expl, res1l)\n tm.assert_series_equal(expl, res2r)\n expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)\n tm.assert_series_equal(expr, res1r)\n tm.assert_series_equal(expr, res2l)\n\n def test_reindex(self):\n\n identity = self.series.reindex(self.series.index)\n\n # __array_interface__ is not defined for older numpies\n # and on some pythons\n try:\n self.assertTrue(np.may_share_memory(self.series.index, identity.index))\n except (AttributeError):\n pass\n\n self.assertTrue(identity.index.is_(self.series.index))\n self.assertTrue(identity.index.identical(self.series.index))\n\n subIndex = self.series.index[10:20]\n subSeries = self.series.reindex(subIndex)\n\n for idx, val in compat.iteritems(subSeries):\n self.assertEqual(val, self.series[idx])\n\n subIndex2 = self.ts.index[10:20]\n subTS = self.ts.reindex(subIndex2)\n\n for idx, val in compat.iteritems(subTS):\n self.assertEqual(val, self.ts[idx])\n stuffSeries = self.ts.reindex(subIndex)\n\n self.assertTrue(np.isnan(stuffSeries).all())\n\n # This is extremely important for the Cython code to not screw up\n nonContigIndex = self.ts.index[::2]\n subNonContig = self.ts.reindex(nonContigIndex)\n for idx, val in compat.iteritems(subNonContig):\n self.assertEqual(val, self.ts[idx])\n\n # return a copy the same index here\n result = self.ts.reindex()\n self.assertFalse((result is self.ts))\n\n def test_reindex_nan(self):\n ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])\n\n i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]\n assert_series_equal(ts.reindex(i), ts.iloc[j])\n\n ts.index = ts.index.astype('object')\n assert_series_equal(ts.reindex(i), ts.iloc[j])\n\n def test_reindex_corner(self):\n # (don't forget to fix this) I think it's fixed\n reindexed_dep = self.empty.reindex(self.ts.index, method='pad')\n\n # corner case: pad empty series\n reindexed = self.empty.reindex(self.ts.index, method='pad')\n\n # pass non-Index\n reindexed = self.ts.reindex(list(self.ts.index))\n assert_series_equal(self.ts, reindexed)\n\n # bad fill method\n ts = self.ts[::2]\n self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')\n\n def test_reindex_pad(self):\n\n s = Series(np.arange(10),dtype='int64')\n s2 = s[::2]\n\n reindexed = s2.reindex(s.index, method='pad')\n reindexed2 = s2.reindex(s.index, method='ffill')\n assert_series_equal(reindexed, reindexed2)\n\n expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))\n assert_series_equal(reindexed, expected)\n\n # GH4604\n s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])\n new_index = ['a','g','c','f']\n expected = Series([1,1,3,3],index=new_index)\n\n # this changes dtype because the ffill happens after\n result = s.reindex(new_index).ffill()\n assert_series_equal(result, expected.astype('float64'))\n\n result = s.reindex(new_index).ffill(downcast='infer')\n assert_series_equal(result, expected)\n\n expected = Series([1, 5, 3, 5], index=new_index)\n result = s.reindex(new_index, method='ffill')\n assert_series_equal(result, expected)\n\n # inferrence of new dtype\n s = Series([True,False,False,True],index=list('abcd'))\n new_index='agc'\n result = s.reindex(list(new_index)).ffill()\n expected = Series([True,True,False],index=list(new_index))\n assert_series_equal(result, expected)\n\n # GH4618 shifted series downcasting\n s = Series(False,index=lrange(0,5))\n result = s.shift(1).fillna(method='bfill')\n expected = Series(False,index=lrange(0,5))\n assert_series_equal(result, expected)\n\n def test_reindex_nearest(self):\n s = Series(np.arange(10, dtype='int64'))\n target = [0.1, 0.9, 1.5, 2.0]\n actual = s.reindex(target, method='nearest')\n expected = Series(np.around(target).astype('int64'), target)\n assert_series_equal(expected, actual)\n\n actual = s.reindex_like(actual, method='nearest')\n assert_series_equal(expected, actual)\n\n actual = s.reindex_like(actual, method='nearest', tolerance=1)\n assert_series_equal(expected, actual)\n\n actual = s.reindex(target, method='nearest', tolerance=0.2)\n expected = Series([0, 1, np.nan, 2], target)\n assert_series_equal(expected, actual)\n\n def test_reindex_backfill(self):\n pass\n\n def test_reindex_int(self):\n ts = self.ts[::2]\n int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)\n\n # this should work fine\n reindexed_int = int_ts.reindex(self.ts.index)\n\n # if NaNs introduced\n self.assertEqual(reindexed_int.dtype, np.float_)\n\n # NO NaNs introduced\n reindexed_int = int_ts.reindex(int_ts.index[::2])\n self.assertEqual(reindexed_int.dtype, np.int_)\n\n def test_reindex_bool(self):\n\n # A series other than float, int, string, or object\n ts = self.ts[::2]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n\n # this should work fine\n reindexed_bool = bool_ts.reindex(self.ts.index)\n\n # if NaNs introduced\n self.assertEqual(reindexed_bool.dtype, np.object_)\n\n # NO NaNs introduced\n reindexed_bool = bool_ts.reindex(bool_ts.index[::2])\n self.assertEqual(reindexed_bool.dtype, np.bool_)\n\n def test_reindex_bool_pad(self):\n # fail\n ts = self.ts[5:]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n filled_bool = bool_ts.reindex(self.ts.index, method='pad')\n self.assertTrue(isnull(filled_bool[:5]).all())\n\n def test_reindex_like(self):\n other = self.ts[::2]\n assert_series_equal(self.ts.reindex(other.index),\n self.ts.reindex_like(other))\n\n # GH 7179\n day1 = datetime(2013,3,5)\n day2 = datetime(2013,5,5)\n day3 = datetime(2014,3,5)\n\n series1 = Series([5, None, None],[day1, day2, day3])\n series2 = Series([None, None], [day1, day3])\n\n result = series1.reindex_like(series2, method='pad')\n expected = Series([5, np.nan], index=[day1, day3])\n assert_series_equal(result, expected)\n\n def test_reindex_fill_value(self):\n #------------------------------------------------------------\n # floats\n floats = Series([1., 2., 3.])\n result = floats.reindex([1, 2, 3])\n expected = Series([2., 3., np.nan], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = floats.reindex([1, 2, 3], fill_value=0)\n expected = Series([2., 3., 0], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # ints\n ints = Series([1, 2, 3])\n\n result = ints.reindex([1, 2, 3])\n expected = Series([2., 3., np.nan], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n # don't upcast\n result = ints.reindex([1, 2, 3], fill_value=0)\n expected = Series([2, 3, 0], index=[1, 2, 3])\n self.assertTrue(issubclass(result.dtype.type, np.integer))\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # objects\n objects = Series([1, 2, 3], dtype=object)\n\n result = objects.reindex([1, 2, 3])\n expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n result = objects.reindex([1, 2, 3], fill_value='foo')\n expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # bools\n bools = Series([True, False, True])\n\n result = bools.reindex([1, 2, 3])\n expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n result = bools.reindex([1, 2, 3], fill_value=False)\n expected = Series([False, True, False], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n def test_rename(self):\n renamer = lambda x: x.strftime('%Y%m%d')\n renamed = self.ts.rename(renamer)\n self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))\n\n # dict\n rename_dict = dict(zip(self.ts.index, renamed.index))\n renamed2 = self.ts.rename(rename_dict)\n assert_series_equal(renamed, renamed2)\n\n # partial dict\n s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')\n renamed = s.rename({'b': 'foo', 'd': 'bar'})\n self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])\n\n # index with name\n renamer = Series(\n np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')\n renamed = renamer.rename({})\n self.assertEqual(renamed.index.name, renamer.index.name)\n\n def test_rename_inplace(self):\n renamer = lambda x: x.strftime('%Y%m%d')\n expected = renamer(self.ts.index[0])\n\n self.ts.rename(renamer, inplace=True)\n self.assertEqual(self.ts.index[0], expected)\n\n def test_preserveRefs(self):\n seq = self.ts[[5, 10, 15]]\n seq[1] = np.NaN\n self.assertFalse(np.isnan(self.ts[10]))\n\n def test_ne(self):\n ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)\n expected = [True, True, False, True, True]\n self.assertTrue(tm.equalContents(ts.index != 5, expected))\n self.assertTrue(tm.equalContents(~(ts.index == 5), expected))\n\n def test_pad_nan(self):\n x = Series([np.nan, 1., np.nan, 3., np.nan],\n ['z', 'a', 'b', 'c', 'd'], dtype=float)\n\n x.fillna(method='pad', inplace=True)\n\n expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],\n ['z', 'a', 'b', 'c', 'd'], dtype=float)\n assert_series_equal(x[1:], expected[1:])\n self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))\n\n def test_unstack(self):\n from numpy import nan\n from pandas.util.testing import assert_frame_equal\n\n index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],\n labels=[[1, 1, 0, 0], [0, 1, 0, 2]])\n\n s = Series(np.arange(4.), index=index)\n unstacked = s.unstack()\n\n expected = DataFrame([[2., nan, 3.], [0., 1., nan]],\n index=['bar', 'foo'],\n columns=['one', 'three', 'two'])\n\n assert_frame_equal(unstacked, expected)\n\n unstacked = s.unstack(level=0)\n assert_frame_equal(unstacked, expected.T)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)\n unstacked = s.unstack(0)\n assert_frame_equal(unstacked, expected)\n\n # GH5873\n idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])\n ts = pd.Series([1,2], index=idx)\n left = ts.unstack()\n right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],\n columns=[nan, 3.5])\n print(left)\n print(right)\n assert_frame_equal(left, right)\n\n idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],\n ['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])\n ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)\n right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],\n columns=['cat', 'dog'])\n tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]\n right.index = pd.MultiIndex.from_tuples(tpls)\n assert_frame_equal(ts.unstack(level=0), right)\n\n def test_sortlevel(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n s = Series([1, 2], mi)\n backwards = s.iloc[[1, 0]]\n\n res = s.sortlevel('A')\n assert_series_equal(backwards, res)\n\n res = s.sortlevel(['A', 'B'])\n assert_series_equal(backwards, res)\n\n res = s.sortlevel('A', sort_remaining=False)\n assert_series_equal(s, res)\n\n res = s.sortlevel(['A', 'B'], sort_remaining=False)\n assert_series_equal(s, res)\n\n def test_head_tail(self):\n assert_series_equal(self.series.head(), self.series[:5])\n assert_series_equal(self.series.tail(), self.series[-5:])\n\n def test_isin(self):\n s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])\n\n result = s.isin(['A', 'C'])\n expected = Series([True, False, True, False, False, False, True, True])\n assert_series_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n # GH4763\n s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])\n with tm.assertRaises(TypeError):\n s.isin('a')\n\n with tm.assertRaises(TypeError):\n s = Series(['aaa', 'b', 'c'])\n s.isin('aaa')\n\n def test_isin_with_i8(self):\n # GH 5021\n\n expected = Series([True,True,False,False,False])\n expected2 = Series([False,True,False,False,False])\n\n # datetime64[ns]\n s = Series(date_range('jan-01-2013','jan-05-2013'))\n\n result = s.isin(s[0:2])\n assert_series_equal(result, expected)\n\n result = s.isin(s[0:2].values)\n assert_series_equal(result, expected)\n\n # fails on dtype conversion in the first place\n result = s.isin(s[0:2].values.astype('datetime64[D]'))\n assert_series_equal(result, expected)\n\n result = s.isin([s[1]])\n assert_series_equal(result, expected2)\n\n result = s.isin([np.datetime64(s[1])])\n assert_series_equal(result, expected2)\n\n # timedelta64[ns]\n s = Series(pd.to_timedelta(lrange(5),unit='d'))\n result = s.isin(s[0:2])\n assert_series_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# TimeSeries-specific\n def test_cummethods_bool(self):\n # GH 6270\n # looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2\n def cummin(x):\n return np.minimum.accumulate(x)\n\n def cummax(x):\n return np.maximum.accumulate(x)\n\n a = pd.Series([False, False, False, True, True, False, False])\n b = ~a\n c = pd.Series([False] * len(b))\n d = ~c\n methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,\n 'cummin': cummin, 'cummax': cummax}\n args = product((a, b, c, d), methods)\n for s, method in args:\n expected = Series(methods[method](s.values))\n result = getattr(s, method)()\n assert_series_equal(result, expected)\n\n e = pd.Series([False, True, nan, False])\n cse = pd.Series([0, 1, nan, 1], dtype=object)\n cpe = pd.Series([False, 0, nan, 0])\n cmin = pd.Series([False, False, nan, False])\n cmax = pd.Series([False, True, nan, True])\n expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,\n 'cummax': cmax}\n\n for method in methods:\n res = getattr(e, method)()\n assert_series_equal(res, expecteds[method])\n\n def test_replace(self):\n N = 100\n ser = Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n ser.replace([np.nan], -1, inplace=True)\n\n exp = ser.fillna(-1)\n assert_series_equal(ser, exp)\n\n rs = ser.replace(0., np.nan)\n ser[ser == 0.] = np.nan\n assert_series_equal(rs, ser)\n\n ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -1).all())\n self.assertTrue((rs[20:30] == -1).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -2).all())\n self.assertTrue((rs[20:30] == -3).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n\n self.assertTrue((ser[:5] == -1).all())\n self.assertTrue((ser[6:10] == -1).all())\n self.assertTrue((ser[20:30] == -1).all())\n\n ser = Series([np.nan, 0, np.inf])\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = Series(self.ts.index)\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n assert_series_equal(result, Series([4, 3, 2, 1, 0]))\n\n # API change from 0.12?\n # GH 5319\n ser = Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n assert_series_equal(result, expected)\n\n ser = Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n assert_series_equal(result, expected)\n #GH 5797\n ser = Series(date_range('20130101', periods=5))\n expected = ser.copy()\n expected.loc[2] = Timestamp('20120101')\n result = ser.replace({Timestamp('20130103'):\n Timestamp('20120101')})\n assert_series_equal(result, expected)\n result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))\n assert_series_equal(result, expected)\n\n def test_replace_with_single_list(self):\n ser = Series([0, 1, 2, 3, 4])\n result = ser.replace([1,2,3])\n assert_series_equal(result, Series([0,0,0,0,4]))\n\n s = ser.copy()\n s.replace([1,2,3],inplace=True)\n assert_series_equal(s, Series([0,0,0,0,4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n with tm.assertRaises(ValueError):\n s.replace([1,2,3],inplace=True,method='crash_cymbal')\n assert_series_equal(s, ser)\n\n def test_replace_mixed_types(self):\n s = Series(np.arange(5),dtype='int64')\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n sc.replace(to_rep, val, inplace=True)\n assert_series_equal(expected, r)\n assert_series_equal(expected, sc)\n\n # should NOT upcast to float\n e = Series([0,1,2,3,4])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = Series([0,1,2,3.5,4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = Series([0,1,2,3.5,'a'])\n tr, v = [3,4], [3.5,'a']\n check_replace(tr, v, e)\n\n # again casts to object\n e = Series([0,1,2,3.5,Timestamp('20130101')])\n tr, v = [3,4],[3.5,Timestamp('20130101')]\n check_replace(tr, v, e)\n\n # casts to float\n e = Series([0,1,2,3.5,1])\n tr, v = [3,4],[3.5,True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = date_range('1/1/2001', '1/10/2001',\n freq='D').to_series().reset_index(drop=True)\n result = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])\n expected = Series([1.0,2,'a'] + dr[3:].tolist(),dtype=object)\n assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = Series([True, False, True])\n result = s.replace('fun', 'in-the-sun')\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = Series([True, False, True])\n result = s.replace(True, '2u')\n expected = Series(['2u', False, '2u'])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = Series([True, False, True])\n result = s.replace(True, False)\n expected = Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = Series([True, False, True])\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n s.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_asfreq(self):\n ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),\n datetime(2009, 11, 30),\n datetime(2009, 12, 31)])\n\n daily_ts = ts.asfreq('B')\n monthly_ts = daily_ts.asfreq('BM')\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n daily_ts = ts.asfreq('B', method='pad')\n monthly_ts = daily_ts.asfreq('BM')\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n daily_ts = ts.asfreq(datetools.bday)\n monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n result = ts[:0].asfreq('M')\n self.assertEqual(len(result), 0)\n self.assertIsNot(result, ts)\n\n def test_diff(self):\n # Just run the function\n self.ts.diff()\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = s.diff()\n self.assertEqual(rs[1], 1)\n\n # neg n\n rs = self.ts.diff(-1)\n xp = self.ts - self.ts.shift(-1)\n assert_series_equal(rs, xp)\n\n # 0\n rs = self.ts.diff(0)\n xp = self.ts - self.ts\n assert_series_equal(rs, xp)\n\n # datetime diff (GH3100)\n s = Series(date_range('20130102', periods=5))\n rs = s - s.shift(1)\n xp = s.diff()\n assert_series_equal(rs, xp)\n\n # timedelta diff\n nrs = rs - rs.shift(1)\n nxp = xp.diff()\n assert_series_equal(nrs, nxp)\n\n # with tz\n s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'), name='foo')\n result = s.diff()\n assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))\n\n def test_pct_change(self):\n rs = self.ts.pct_change(fill_method=None)\n assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)\n\n rs = self.ts.pct_change(2)\n filled = self.ts.fillna(method='pad')\n assert_series_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.ts.pct_change(fill_method='bfill', limit=1)\n filled = self.ts.fillna(method='bfill', limit=1)\n assert_series_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.ts.pct_change(freq='5D')\n filled = self.ts.fillna(method='pad')\n assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1., 1.5, np.nan, 2.5, 3.])\n\n chg = s.pct_change()\n expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])\n assert_series_equal(chg, expected)\n\n def test_autocorr(self):\n # Just run the function\n corr1 = self.ts.autocorr()\n\n # Now run it with the lag parameter\n corr2 = self.ts.autocorr(lag=1)\n\n # corr() with lag needs Series of at least length 2\n if len(self.ts) <= 2:\n self.assertTrue(np.isnan(corr1))\n self.assertTrue(np.isnan(corr2))\n else:\n self.assertEqual(corr1, corr2)\n\n # Choose a random lag between 1 and length of Series - 2\n # and compare the result with the Series corr() function\n n = 1 + np.random.randint(max(1, len(self.ts) - 2))\n corr1 = self.ts.corr(self.ts.shift(n))\n corr2 = self.ts.autocorr(lag=n)\n\n # corr() with lag needs Series of at least length 2\n if len(self.ts) <= 2:\n self.assertTrue(np.isnan(corr1))\n self.assertTrue(np.isnan(corr2))\n else:\n self.assertEqual(corr1, corr2)\n\n def test_first_last_valid(self):\n ts = self.ts.copy()\n ts[:5] = np.NaN\n\n index = ts.first_valid_index()\n self.assertEqual(index, ts.index[5])\n\n ts[-5:] = np.NaN\n index = ts.last_valid_index()\n self.assertEqual(index, ts.index[-6])\n\n ts[:] = np.nan\n self.assertIsNone(ts.last_valid_index())\n self.assertIsNone(ts.first_valid_index())\n\n ser = Series([], index=[])\n self.assertIsNone(ser.last_valid_index())\n self.assertIsNone(ser.first_valid_index())\n\n def test_mpl_compat_hack(self):\n result = self.ts[:, np.newaxis]\n expected = self.ts.values[:, np.newaxis]\n assert_almost_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# GroupBy\n\n def test_select(self):\n n = len(self.ts)\n result = self.ts.select(lambda x: x >= self.ts.index[n // 2])\n expected = self.ts.reindex(self.ts.index[n // 2:])\n assert_series_equal(result, expected)\n\n result = self.ts.select(lambda x: x.weekday() == 2)\n expected = self.ts[self.ts.index.weekday == 2]\n assert_series_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# Misc not safe for sparse\n\n def test_dropna_preserve_name(self):\n self.ts[:5] = np.nan\n result = self.ts.dropna()\n self.assertEqual(result.name, self.ts.name)\n name = self.ts.name\n ts = self.ts.copy()\n ts.dropna(inplace=True)\n self.assertEqual(ts.name, name)\n\n def test_numpy_unique(self):\n # it works!\n result = np.unique(self.ts)\n\n def test_concat_empty_series_dtypes_roundtrips(self):\n\n # round-tripping with self & like self\n dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])\n\n for dtype in dtypes:\n self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)\n self.assertEqual(pd.concat([Series(dtype=dtype),\n Series(dtype=dtype)]).dtype, dtype)\n\n def int_result_type(dtype, dtype2):\n typs = set([dtype.kind,dtype2.kind])\n if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):\n return 'i'\n elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):\n return 'u'\n return None\n\n def float_result_type(dtype, dtype2):\n typs = set([dtype.kind,dtype2.kind])\n if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):\n return 'f'\n return None\n\n def get_result_type(dtype, dtype2):\n result = float_result_type(dtype, dtype2)\n if result is not None:\n return result\n result = int_result_type(dtype, dtype2)\n if result is not None:\n return result\n return 'O'\n\n for dtype in dtypes:\n for dtype2 in dtypes:\n if dtype == dtype2:\n continue\n\n expected = get_result_type(dtype, dtype2)\n result = pd.concat([Series(dtype=dtype),\n Series(dtype=dtype2)]).dtype\n self.assertEqual(result.kind, expected)\n\n def test_concat_empty_series_dtypes(self):\n\n # bools\n self.assertEqual(pd.concat([Series(dtype=np.bool_),\n Series(dtype=np.int32)]).dtype, np.int32)\n self.assertEqual(pd.concat([Series(dtype=np.bool_),\n Series(dtype=np.float32)]).dtype, np.object_)\n\n # datetimelike\n self.assertEqual(pd.concat([Series(dtype='m8[ns]'),\n Series(dtype=np.bool)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='m8[ns]'),\n Series(dtype=np.int64)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.bool)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.int64)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.bool_),\n Series(dtype=np.int64)]).dtype, np.object_)\n\n # categorical\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='category')]).dtype, 'category')\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='float64')]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='object')]).dtype, 'category')\n\n # sparse\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='float64').to_sparse()])\n self.assertEqual(result.dtype,np.float64)\n self.assertEqual(result.ftype,'float64:sparse')\n\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='float64')])\n self.assertEqual(result.dtype,np.float64)\n self.assertEqual(result.ftype,'float64:sparse')\n\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='object')])\n self.assertEqual(result.dtype,np.object_)\n self.assertEqual(result.ftype,'object:dense')\n\n def test_searchsorted_numeric_dtypes_scalar(self):\n s = Series([1, 2, 90, 1000, 3e9])\n r = s.searchsorted(30)\n e = 2\n tm.assert_equal(r, e)\n\n r = s.searchsorted([30])\n e = np.array([2])\n tm.assert_numpy_array_equal(r, e)\n\n def test_searchsorted_numeric_dtypes_vector(self):\n s = Series([1, 2, 90, 1000, 3e9])\n r = s.searchsorted([91, 2e6])\n e = np.array([3, 4])\n tm.assert_numpy_array_equal(r, e)\n\n def test_search_sorted_datetime64_scalar(self):\n s = Series(pd.date_range('20120101', periods=10, freq='2D'))\n v = pd.Timestamp('20120102')\n r = s.searchsorted(v)\n e = 1\n tm.assert_equal(r, e)\n\n def test_search_sorted_datetime64_list(self):\n s = Series(pd.date_range('20120101', periods=10, freq='2D'))\n v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]\n r = s.searchsorted(v)\n e = np.array([1, 2])\n tm.assert_numpy_array_equal(r, e)\n\n def test_searchsorted_sorter(self):\n # GH8490\n s = Series([3, 1, 2])\n r = s.searchsorted([0, 3], sorter=np.argsort(s))\n e = np.array([0, 2])\n tm.assert_numpy_array_equal(r, e)\n\n def test_to_frame_expanddim(self):\n # GH 9762\n\n class SubclassedSeries(Series):\n @property\n def _constructor_expanddim(self):\n return SubclassedFrame\n\n class SubclassedFrame(DataFrame):\n pass\n\n s = SubclassedSeries([1, 2, 3], name='X')\n result = s.to_frame()\n self.assertTrue(isinstance(result, SubclassedFrame))\n expected = SubclassedFrame({'X': [1, 2, 3]})\n assert_frame_equal(result, expected)\n\n\nclass TestSeriesNonUnique(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n pass\n\n def test_basic_indexing(self):\n s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])\n\n self.assertRaises(IndexError, s.__getitem__, 5)\n self.assertRaises(IndexError, s.__setitem__, 5, 0)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n s = s.sort_index()\n\n self.assertRaises(IndexError, s.__getitem__, 5)\n self.assertRaises(IndexError, s.__setitem__, 5, 0)\n\n\n def test_int_indexing(self):\n s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])\n\n self.assertRaises(KeyError, s.__getitem__, 5)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n # not monotonic\n s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])\n\n self.assertRaises(KeyError, s.__getitem__, 5)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n def test_datetime_indexing(self):\n from pandas import date_range\n\n index = date_range('1/1/2000', '1/7/2000')\n index = index.repeat(3)\n\n s = Series(len(index), index=index)\n stamp = Timestamp('1/8/2000')\n\n self.assertRaises(KeyError, s.__getitem__, stamp)\n s[stamp] = 0\n self.assertEqual(s[stamp], 0)\n\n # not monotonic\n s = Series(len(index), index=index)\n s = s[::-1]\n\n self.assertRaises(KeyError, s.__getitem__, stamp)\n s[stamp] = 0\n self.assertEqual(s[stamp], 0)\n\n def test_reset_index(self):\n df = tm.makeDataFrame()[:5]\n ser = df.stack()\n ser.index.names = ['hash', 'category']\n\n ser.name = 'value'\n df = ser.reset_index()\n self.assertIn('value', df)\n\n df = ser.reset_index(name='value2')\n self.assertIn('value2', df)\n\n # check inplace\n s = ser.reset_index(drop=True)\n s2 = ser\n s2.reset_index(drop=True, inplace=True)\n assert_series_equal(s, s2)\n\n # level\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n rs = s.reset_index(level=1)\n self.assertEqual(len(rs.columns), 2)\n\n rs = s.reset_index(level=[0, 2], drop=True)\n self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))\n tm.assertIsInstance(rs, Series)\n\n def test_set_index_makes_timeseries(self):\n idx = tm.makeDateIndex(10)\n\n s = Series(lrange(10))\n s.index = idx\n\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(s.is_time_series == True)\n self.assertTrue(s.index.is_all_dates == True)\n\n def test_timeseries_coercion(self):\n idx = tm.makeDateIndex(10000)\n ser = Series(np.random.randn(len(idx)), idx.astype(object))\n with tm.assert_produces_warning(FutureWarning):\n self.assertTrue(ser.is_time_series)\n self.assertTrue(ser.index.is_all_dates)\n self.assertIsInstance(ser.index, DatetimeIndex)\n\n def test_replace(self):\n N = 100\n ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -1).all())\n self.assertTrue((rs[20:30] == -1).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -2).all())\n self.assertTrue((rs[20:30] == -3).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n self.assertTrue((ser[:5] == -1).all())\n self.assertTrue((ser[6:10] == -1).all())\n self.assertTrue((ser[20:30] == -1).all())\n\n def test_repeat(self):\n s = Series(np.random.randn(3), index=['a', 'b', 'c'])\n\n reps = s.repeat(5)\n exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))\n assert_series_equal(reps, exp)\n\n to_rep = [2, 3, 4]\n reps = s.repeat(to_rep)\n exp = Series(s.values.repeat(to_rep),\n index=s.index.values.repeat(to_rep))\n assert_series_equal(reps, exp)\n\n def test_unique_data_ownership(self):\n # it works! #1807\n Series(Series([\"a\", \"c\", \"b\"]).unique()).sort_values()\n\n def test_datetime_timedelta_quantiles(self):\n # covers #9694\n self.assertTrue(pd.isnull(Series([],dtype='M8[ns]').quantile(.5)))\n self.assertTrue(pd.isnull(Series([],dtype='m8[ns]').quantile(.5)))\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nFolium\n-------\n\nMake beautiful, interactive maps with Python and Leaflet.js\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport codecs\nimport functools\nimport json\nfrom uuid import uuid4\n\nfrom jinja2 import Environment, PackageLoader\nfrom pkg_resources import resource_string\n\nfrom folium import utilities\nfrom folium.six import text_type, binary_type, iteritems\n\nimport sys\n\n\nENV = Environment(loader=PackageLoader('folium', 'templates'))\n\n\ndef initialize_notebook():\n \"\"\"Initialize the IPython notebook display elements.\"\"\"\n try:\n from IPython.core.display import display, HTML\n except ImportError:\n print(\"IPython Notebook could not be loaded.\")\n\n lib_css = ENV.get_template('ipynb_init_css.html')\n lib_js = ENV.get_template('ipynb_init_js.html')\n leaflet_dvf = ENV.get_template('leaflet-dvf.markers.min.js')\n\n display(HTML(lib_css.render()))\n display(HTML(lib_js.render({'leaflet_dvf': leaflet_dvf.render()})))\n\n\ndef iter_obj(type):\n \"\"\"Decorator to keep count of different map object types in self.mk_cnt.\"\"\"\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n self.mark_cnt[type] = self.mark_cnt.get(type, 0) + 1\n func_result = func(self, *args, **kwargs)\n return func_result\n return wrapper\n return decorator\n\n\nclass Map(object):\n \"\"\"Create a Map with Folium.\"\"\"\n\n def __init__(self, location=None, width='100%', height='100%',\n tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,\n zoom_start=10, attr=None, min_lat=-90, max_lat=90,\n min_lon=-180, max_lon=180):\n \"\"\"Create a Map with Folium and Leaflet.js\n\n Generate a base map of given width and height with either default\n tilesets or a custom tileset URL. The following tilesets are built-in\n to Folium. Pass any of the following to the \"tiles\" keyword:\n - \"OpenStreetMap\"\n - \"MapQuest Open\"\n - \"MapQuest Open Aerial\"\n - \"Mapbox Bright\" (Limited levels of zoom for free tiles)\n - \"Mapbox Control Room\" (Limited levels of zoom for free tiles)\n - \"Stamen\" (Terrain, Toner, and Watercolor)\n - \"Cloudmade\" (Must pass API key)\n - \"Mapbox\" (Must pass API key)\n - \"CartoDB\" (positron and dark_matter)\n You can pass a custom tileset to Folium by passing a Leaflet-style\n URL to the tiles parameter:\n http://{s}.yourtiles.com/{z}/{x}/{y}.png\n\n Parameters\n ----------\n location: tuple or list, default None\n Latitude and Longitude of Map (Northing, Easting).\n width: pixel int or percentage string (default: '100%')\n Width of the map.\n height: pixel int or percentage string (default: '100%')\n Height of the map.\n tiles: str, default 'OpenStreetMap'\n Map tileset to use. Can use defaults or pass a custom URL.\n API_key: str, default None\n API key for Cloudmade or Mapbox tiles.\n max_zoom: int, default 18\n Maximum zoom depth for the map.\n zoom_start: int, default 10\n Initial zoom level for the map.\n attr: string, default None\n Map tile attribution; only required if passing custom tile URL.\n\n Returns\n -------\n Folium Map Object\n\n Examples\n --------\n >>>map = folium.Map(location=[45.523, -122.675], width=750, height=500)\n >>>map = folium.Map(location=[45.523, -122.675],\n tiles='Mapbox Control Room')\n >>>map = folium.Map(location=(45.523, -122.675), max_zoom=20,\n tiles='Cloudmade', API_key='YourKey')\n >>>map = folium.Map(location=[45.523, -122.675], zoom_start=2,\n tiles=('http://{s}.tiles.mapbox.com/v3/'\n 'mapbox.control-room/{z}/{x}/{y}.png'),\n attr='Mapbox attribution')\n\n \"\"\"\n\n # Inits.\n self.map_path = None\n self.render_iframe = False\n self.map_type = 'base'\n self.map_id = '_'.join(['folium', uuid4().hex])\n\n # Mark counter, JSON, Plugins.\n self.mark_cnt = {}\n self.json_data = {}\n self.plugins = {}\n\n # No location means we will use automatic bounds and ignore zoom\n self.location = location\n\n # If location is not passed, we center the map at 0,0\n if not location:\n location = [0, 0]\n zoom_start = min_zoom\n\n # Map Size Parameters.\n try:\n if isinstance(width, int):\n width_type = 'px'\n assert width > 0\n else:\n width_type = '%'\n width = int(width.strip('%'))\n assert 0 <= width <= 100\n except:\n msg = \"Cannot parse width {!r} as {!r}\".format\n raise ValueError(msg(width, width_type))\n self.width = width\n\n try:\n if isinstance(height, int):\n height_type = 'px'\n assert height > 0\n else:\n height_type = '%'\n height = int(height.strip('%'))\n assert 0 <= height <= 100\n except:\n msg = \"Cannot parse height {!r} as {!r}\".format\n raise ValueError(msg(height, height_type))\n self.height = height\n\n self.map_size = {'width': width, 'height': height}\n self._size = ('style=\"width: {0}{1}; height: {2}{3}\"'\n .format(width, width_type, height, height_type))\n # Templates.\n self.env = ENV\n self.template_vars = dict(lat=location[0],\n lon=location[1],\n size=self._size,\n max_zoom=max_zoom,\n zoom_level=zoom_start,\n map_id=self.map_id,\n min_zoom=min_zoom,\n min_lat=min_lat,\n max_lat=max_lat,\n min_lon=min_lon,\n max_lon=max_lon)\n\n # Tiles.\n self.tiles = ''.join(tiles.lower().strip().split())\n if self.tiles in ('cloudmade', 'mapbox') and not API_key:\n raise ValueError('You must pass an API key if using Cloudmade'\n ' or non-default Mapbox tiles.')\n\n self.default_tiles = ['openstreetmap', 'mapboxcontrolroom',\n 'mapquestopen', 'mapquestopenaerial',\n 'mapboxbright', 'mapbox', 'cloudmade',\n 'stamenterrain', 'stamentoner',\n 'stamenwatercolor',\n 'cartodbpositron', 'cartodbdark_matter']\n self.tile_types = {}\n for tile in self.default_tiles:\n tile_path = 'tiles/%s' % tile\n self.tile_types[tile] = {\n 'templ': self.env.get_template('%s/%s' % (tile_path,\n 'tiles.txt')),\n 'attr': self.env.get_template('%s/%s' % (tile_path,\n 'attr.txt')),\n }\n\n if self.tiles in self.tile_types:\n self.template_vars['Tiles'] = (self.tile_types[self.tiles]['templ']\n .render(API_key=API_key))\n self.template_vars['attr'] = (self.tile_types[self.tiles]['attr']\n .render())\n else:\n self.template_vars['Tiles'] = tiles\n if not attr:\n raise ValueError('Custom tiles must'\n ' also be passed an attribution')\n if isinstance(attr, binary_type):\n attr = text_type(attr, 'utf8')\n self.template_vars['attr'] = attr\n self.tile_types.update({'Custom': {'template': tiles,\n 'attr': attr}})\n\n self.added_layers = []\n self.template_vars.setdefault('wms_layers', [])\n self.template_vars.setdefault('tile_layers', [])\n\n @iter_obj('simple')\n def add_tile_layer(self, tile_name=None, tile_url=None, active=False):\n \"\"\"Adds a simple tile layer.\n\n Parameters\n ----------\n tile_name: string\n name of the tile layer\n tile_url: string\n url location of the tile layer\n active: boolean\n should the layer be active when added\n \"\"\"\n if tile_name not in self.added_layers:\n tile_name = tile_name.replace(\" \", \"_\")\n tile_temp = self.env.get_template('tile_layer.js')\n\n tile = tile_temp.render({'tile_name': tile_name,\n 'tile_url': tile_url})\n\n self.template_vars.setdefault('tile_layers', []).append((tile))\n\n self.added_layers.append({tile_name: tile_url})\n\n @iter_obj('simple')\n def add_wms_layer(self, wms_name=None, wms_url=None, wms_format=None,\n wms_layers=None, wms_transparent=True):\n \"\"\"Adds a simple tile layer.\n\n Parameters\n ----------\n wms_name: string\n name of wms layer\n wms_url : string\n url of wms layer\n \"\"\"\n if wms_name not in self.added_layers:\n wms_name = wms_name.replace(\" \", \"_\")\n wms_temp = self.env.get_template('wms_layer.js')\n\n wms = wms_temp.render({\n 'wms_name': wms_name,\n 'wms_url': wms_url,\n 'wms_format': wms_format,\n 'wms_layer_names': wms_layers,\n 'wms_transparent': str(wms_transparent).lower()})\n self.template_vars.setdefault('wms_layers', []).append((wms))\n self.added_layers.append({wms_name: wms_url})\n\n @iter_obj('simple')\n def add_layers_to_map(self):\n \"\"\"\n Required function to actually add the layers to the HTML packet.\n \"\"\"\n layers_temp = self.env.get_template('add_layers.js')\n\n data_string = ''\n for i, layer in enumerate(self.added_layers):\n name = list(layer.keys())[0]\n data_string += '\\\"'\n data_string += name\n data_string += '\\\"'\n data_string += ': '\n data_string += name\n if i < len(self.added_layers)-1:\n data_string += \",\\n\"\n else:\n data_string += \"\\n\"\n\n data_layers = layers_temp.render({'layers': data_string})\n self.template_vars.setdefault('data_layers', []).append((data_string))\n\n @iter_obj('simple')\n def simple_marker(self, location=None, popup=None,\n marker_color='blue', marker_icon='info-sign',\n clustered_marker=False, icon_angle=0, width=300):\n \"\"\"Create a simple stock Leaflet marker on the map, with optional\n popup text or Vincent visualization.\n\n Parameters\n ----------\n location: tuple or list, default None\n Latitude and Longitude of Marker (Northing, Easting)\n popup: string or tuple, default 'Pop Text'\n Input text or visualization for object. Can pass either text,\n or a tuple of the form (Vincent object, 'vis_path.json')\n It is possible to adjust the width of text/HTML popups\n using the optional keywords `width`. (Leaflet default is 300px.)\n marker_color\n color of marker you want\n marker_icon\n icon from (http://getbootstrap.com/components/) you want on the\n marker\n clustered_marker\n boolean of whether or not you want the marker clustered with\n other markers\n\n Returns\n -------\n Marker names and HTML in obj.template_vars\n\n Example\n -------\n >>>map.simple_marker(location=[45.5, -122.3], popup='Portland, OR')\n >>>map.simple_marker(location=[45.5, -122.3], popup=(vis, 'vis.json'))\n\n \"\"\"\n count = self.mark_cnt['simple']\n\n mark_temp = self.env.get_template('simple_marker.js')\n\n marker_num = 'marker_{0}'.format(count)\n add_line = \"{'icon':\"+marker_num+\"_icon}\"\n\n icon_temp = self.env.get_template('simple_icon.js')\n icon = icon_temp.render({'icon': marker_icon,\n 'icon_name': marker_num+\"_icon\",\n 'markerColor': marker_color,\n 'icon_angle': icon_angle})\n\n # Get marker and popup.\n marker = mark_temp.render({'marker': 'marker_' + str(count),\n 'lat': location[0],\n 'lon': location[1],\n 'icon': add_line\n })\n\n popup_out = self._popup_render(popup=popup, mk_name='marker_',\n count=count, width=width)\n if clustered_marker:\n add_mark = 'clusteredmarkers.addLayer(marker_{0})'.format(count)\n name = 'cluster_markers'\n else:\n add_mark = 'map.addLayer(marker_{0})'.format(count)\n name = 'custom_markers'\n append = (icon, marker, popup_out, add_mark)\n self.template_vars.setdefault(name, []).append(append)\n\n @iter_obj('line')\n def line(self, locations,\n line_color=None, line_opacity=None, line_weight=None,\n popup=None, popup_width=300):\n \"\"\"Add a line to the map with optional styles.\n\n Parameters\n ----------\n locations: list of points (latitude, longitude)\n Latitude and Longitude of line (Northing, Easting)\n line_color: string, default Leaflet's default ('#03f')\n line_opacity: float, default Leaflet's default (0.5)\n line_weight: float, default Leaflet's default (5)\n popup: string or tuple, default 'Pop Text'\n Input text or visualization for object. Can pass either text,\n or a tuple of the form (Vincent object, 'vis_path.json')\n\n Note: If the optional styles are omitted, they will not be included\n in the HTML output and will obtain the Leaflet defaults listed above.\n\n Example\n -------\n >>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)])\n >>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)],\n line_color='red', line_opacity=1.0)\n\n \"\"\"\n count = self.mark_cnt['line']\n\n line_temp = self.env.get_template('polyline.js')\n\n polyline_opts = {'color': line_color, 'weight': line_weight,\n 'opacity': line_opacity}\n\n varname = 'line_{}'.format(count)\n line_rendered = line_temp.render({'line': varname,\n 'locations': locations,\n 'options': polyline_opts})\n\n popup_out = self._popup_render(popup=popup, mk_name='line_',\n count=count, width=popup_width)\n\n add_line = 'map.addLayer({});'.format(varname)\n append = (line_rendered, popup_out, add_line)\n self.template_vars.setdefault('lines', []).append((append))\n\n @iter_obj('multiline')\n def multiline(self, locations, line_color=None, line_opacity=None,\n line_weight=None):\n \"\"\"Add a multiPolyline to the map with optional styles.\n\n A multiPolyline is single layer that consists of several polylines that\n share styling/popup.\n\n Parameters\n ----------\n locations: list of lists of points (latitude, longitude)\n Latitude and Longitude of line (Northing, Easting)\n line_color: string, default Leaflet's default ('#03f')\n line_opacity: float, default Leaflet's default (0.5)\n line_weight: float, default Leaflet's default (5)\n\n Note: If the optional styles are omitted, they will not be included\n in the HTML output and will obtain the Leaflet defaults listed above.\n\n Example\n -------\n # FIXME: Add another example.\n >>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],\n [(45.5237, -122.675), (45.5237, -122.675)],\n [(45.5238, -122.675), (45.5238, -122.675)]])\n >>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],\n [(45.5237, -122.675), (45.5237, -122.675)],\n [(45.5238, -122.675), (45.5238, -122.675)]],\n line_color='red', line_weight=2,\n line_opacity=1.0)\n \"\"\"\n\n count = self.mark_cnt['multiline']\n\n multiline_temp = self.env.get_template('multi_polyline.js')\n\n multiline_opts = {'color': line_color, 'weight': line_weight,\n 'opacity': line_opacity}\n\n varname = 'multiline_{}'.format(count)\n multiline_rendered = multiline_temp.render({'multiline': varname,\n 'locations': locations,\n 'options': multiline_opts})\n\n add_multiline = 'map.addLayer({});'.format(varname)\n append = (multiline_rendered, add_multiline)\n self.template_vars.setdefault('multilines', []).append(append)\n\n @iter_obj('circle')\n def circle_marker(self, location=None, radius=500, popup=None,\n line_color='black', fill_color='black',\n fill_opacity=0.6):\n \"\"\"Create a simple circle marker on the map, with optional popup text\n or Vincent visualization.\n\n Parameters\n ----------\n location: tuple or list, default None\n Latitude and Longitude of Marker (Northing, Easting)\n radius: int, default 500\n Circle radius, in pixels\n popup: string or tuple, default 'Pop Text'\n Input text or visualization for object. Can pass either text,\n or a tuple of the form (Vincent object, 'vis_path.json')\n line_color: string, default black\n Line color. Can pass hex value here as well.\n fill_color: string, default black\n Fill color. Can pass hex value here as well.\n fill_opacity: float, default 0.6\n Circle fill opacity\n\n Returns\n -------\n Circle names and HTML in obj.template_vars\n\n Example\n -------\n >>>map.circle_marker(location=[45.5, -122.3],\n radius=1000, popup='Portland, OR')\n >>>map.circle_marker(location=[45.5, -122.3],\n radius=1000, popup=(bar_chart, 'bar_data.json'))\n\n \"\"\"\n count = self.mark_cnt['circle']\n\n circle_temp = self.env.get_template('circle_marker.js')\n\n circle = circle_temp.render({'circle': 'circle_' + str(count),\n 'radius': radius,\n 'lat': location[0], 'lon': location[1],\n 'line_color': line_color,\n 'fill_color': fill_color,\n 'fill_opacity': fill_opacity})\n\n popup_out = self._popup_render(popup=popup, mk_name='circle_',\n count=count)\n\n add_mark = 'map.addLayer(circle_{0})'.format(count)\n\n self.template_vars.setdefault('markers', []).append((circle,\n popup_out,\n add_mark))\n\n @iter_obj('polygon')\n def polygon_marker(self, location=None, line_color='black', line_opacity=1,\n line_weight=2, fill_color='blue', fill_opacity=1,\n num_sides=4, rotation=0, radius=15, popup=None):\n \"\"\"Custom markers using the Leaflet Data Vis Framework.\n\n\n Parameters\n ----------\n location: tuple or list, default None\n Latitude and Longitude of Marker (Northing, Easting)\n line_color: string, default 'black'\n Marker line color\n line_opacity: float, default 1\n Line opacity, scale 0-1\n line_weight: int, default 2\n Stroke weight in pixels\n fill_color: string, default 'blue'\n Marker fill color\n fill_opacity: float, default 1\n Marker fill opacity\n num_sides: int, default 4\n Number of polygon sides\n rotation: int, default 0\n Rotation angle in degrees\n radius: int, default 15\n Marker radius, in pixels\n popup: string or tuple, default 'Pop Text'\n Input text or visualization for object. Can pass either text,\n or a tuple of the form (Vincent object, 'vis_path.json')\n\n Returns\n -------\n Polygon marker names and HTML in obj.template_vars\n\n \"\"\"\n\n count = self.mark_cnt['polygon']\n\n poly_temp = self.env.get_template('poly_marker.js')\n\n polygon = poly_temp.render({'marker': 'polygon_' + str(count),\n 'lat': location[0],\n 'lon': location[1],\n 'line_color': line_color,\n 'line_opacity': line_opacity,\n 'line_weight': line_weight,\n 'fill_color': fill_color,\n 'fill_opacity': fill_opacity,\n 'num_sides': num_sides,\n 'rotation': rotation,\n 'radius': radius})\n\n popup_out = self._popup_render(popup=popup, mk_name='polygon_',\n count=count)\n\n add_mark = 'map.addLayer(polygon_{0})'.format(count)\n\n self.template_vars.setdefault('markers', []).append((polygon,\n popup_out,\n add_mark))\n # Update JS/CSS and other Plugin files.\n js_temp = self.env.get_template('dvf_js_ref.txt').render()\n self.template_vars.update({'dvf_js': js_temp})\n\n polygon_js = resource_string('folium',\n 'plugins/leaflet-dvf.markers.min.js')\n\n self.plugins.update({'leaflet-dvf.markers.min.js': polygon_js})\n\n def lat_lng_popover(self):\n \"\"\"Enable popovers to display Lat and Lon on each click.\"\"\"\n\n latlng_temp = self.env.get_template('lat_lng_popover.js')\n self.template_vars.update({'lat_lng_pop': latlng_temp.render()})\n\n def click_for_marker(self, popup=None):\n \"\"\"Enable the addition of markers via clicking on the map. The marker\n popup defaults to Lat/Lon, but custom text can be passed via the\n popup parameter. Double click markers to remove them.\n\n Parameters\n ----------\n popup:\n Custom popup text\n\n Example\n -------\n >>>map.click_for_marker(popup='Your Custom Text')\n\n \"\"\"\n latlng = '\"Latitude: \" + lat + \"<br>Longitude: \" + lng '\n click_temp = self.env.get_template('click_for_marker.js')\n if popup:\n popup_txt = ''.join(['\"', popup, '\"'])\n else:\n popup_txt = latlng\n click_str = click_temp.render({'popup': popup_txt})\n self.template_vars.update({'click_pop': click_str})\n\n def fit_bounds(self, bounds, padding_top_left=None,\n padding_bottom_right=None, padding=None, max_zoom=None):\n \"\"\"Fit the map to contain a bounding box with the maximum zoom level possible.\n\n Parameters\n ----------\n bounds: list of (latitude, longitude) points\n Bounding box specified as two points [southwest, northeast]\n padding_top_left: (x, y) point, default None\n Padding in the top left corner. Useful if some elements in\n the corner, such as controls, might obscure objects you're zooming\n to.\n padding_bottom_right: (x, y) point, default None\n Padding in the bottom right corner.\n padding: (x, y) point, default None\n Equivalent to setting both top left and bottom right padding to\n the same value.\n max_zoom: int, default None\n Maximum zoom to be used.\n\n Example\n -------\n >>> map.fit_bounds([[52.193636, -2.221575], [52.636878, -1.139759]])\n\n \"\"\"\n options = {\n 'paddingTopLeft': padding_top_left,\n 'paddingBottomRight': padding_bottom_right,\n 'padding': padding,\n 'maxZoom': max_zoom,\n }\n fit_bounds_options = {}\n for key, opt in options.items():\n if opt:\n fit_bounds_options[key] = opt\n fit_bounds = self.env.get_template('fit_bounds.js')\n fit_bounds_str = fit_bounds.render({\n 'bounds': json.dumps(bounds),\n 'fit_bounds_options': json.dumps(fit_bounds_options),\n })\n\n self.template_vars.update({'fit_bounds': fit_bounds_str})\n\n\n def _auto_bounds(self):\n if 'fit_bounds' in self.template_vars:\n return\n # Get count for each feature type\n ft_names = [\"marker\", \"line\", \"circle\", \"polygon\", \"multiline\"]\n ft_names = [i for i in ft_names if i in self.mark_cnt]\n\n # Make a comprehensive list of all the features we want to fit\n feat_str = [\"{name}_{count}\".format(name=ft_name,\n count=self.mark_cnt[ft_name])\n for ft_name in ft_names\n for count in range(1, self.mark_cnt[ft_name]+1)\n ]\n feat_str = \"[\" + ', '.join(feat_str) + \"]\"\n\n fit_bounds = self.env.get_template('fit_bounds.js')\n fit_bounds_str = fit_bounds.render({\n 'autobounds': not self.location,\n 'features': feat_str,\n 'fit_bounds_options': json.dumps({'padding' : [30, 30]}),\n })\n\n self.template_vars.update({'fit_bounds': fit_bounds_str.strip()})\n\n\n def _popup_render(self, popup=None, mk_name=None, count=None,\n width=300):\n \"\"\"Popup renderer: either text or Vincent/Vega.\n\n Parameters\n ----------\n popup: str or Vincent tuple, default None\n String for text popup, or tuple of (Vincent object, json_path)\n mk_name: str, default None\n Type of marker. Simple, Circle, etc.\n count: int, default None\n Count of marker\n \"\"\"\n if not popup:\n return ''\n else:\n if sys.version_info >= (3, 0):\n utype, stype = str, bytes\n else:\n utype, stype = unicode, str\n\n if isinstance(popup, (utype, stype)):\n popup_temp = self.env.get_template('simple_popup.js')\n if isinstance(popup, utype):\n popup_txt = popup.encode('ascii', 'xmlcharrefreplace')\n else:\n popup_txt = popup\n if sys.version_info >= (3, 0):\n popup_txt = popup_txt.decode()\n pop_txt = json.dumps(str(popup_txt))\n return popup_temp.render({'pop_name': mk_name + str(count),\n 'pop_txt': pop_txt, 'width': width})\n elif isinstance(popup, tuple):\n # Update template with JS libs.\n vega_temp = self.env.get_template('vega_ref.txt').render()\n jquery_temp = self.env.get_template('jquery_ref.txt').render()\n d3_temp = self.env.get_template('d3_ref.txt').render()\n vega_parse = self.env.get_template('vega_parse.js').render()\n self.template_vars.update({'vega': vega_temp,\n 'd3': d3_temp,\n 'jquery': jquery_temp,\n 'vega_parse': vega_parse})\n\n # Parameters for Vega template.\n vega = popup[0]\n mark = ''.join([mk_name, str(count)])\n json_out = popup[1]\n div_id = popup[1].split('.')[0]\n width = vega.width\n height = vega.height\n if isinstance(vega.padding, dict):\n width += vega.padding['left']+vega.padding['right']\n height += vega.padding['top']+vega.padding['bottom']\n else:\n width += 75\n height += 50\n max_width = self.map_size['width']\n vega_id = '#' + div_id\n popup_temp = self.env.get_template('vega_marker.js')\n return popup_temp.render({'mark': mark, 'div_id': div_id,\n 'width': width, 'height': height,\n 'max_width': max_width,\n 'json_out': json_out,\n 'vega_id': vega_id})\n else:\n raise TypeError(\"Unrecognized popup type: {!r}\".format(popup))\n\n @iter_obj('geojson')\n def geo_json(self, geo_path=None, geo_str=None, data_out='data.json',\n data=None, columns=None, key_on=None, threshold_scale=None,\n fill_color='blue', fill_opacity=0.6, line_color='black',\n line_weight=1, line_opacity=1, legend_name=None,\n topojson=None, reset=False):\n \"\"\"Apply a GeoJSON overlay to the map.\n\n Plot a GeoJSON overlay on the base map. There is no requirement\n to bind data (passing just a GeoJSON plots a single-color overlay),\n but there is a data binding option to map your columnar data to\n different feature objects with a color scale.\n\n If data is passed as a Pandas dataframe, the \"columns\" and \"key-on\"\n keywords must be included, the first to indicate which DataFrame\n columns to use, the second to indicate the layer in the GeoJSON\n on which to key the data. The 'columns' keyword does not need to be\n passed for a Pandas series.\n\n Colors are generated from color brewer (http://colorbrewer2.org/)\n sequential palettes on a D3 threshold scale. The scale defaults to the\n following quantiles: [0, 0.5, 0.75, 0.85, 0.9]. A custom scale can be\n passed to `threshold_scale` of length <=6, in order to match the\n color brewer range.\n\n TopoJSONs can be passed as \"geo_path\", but the \"topojson\" keyword must\n also be passed with the reference to the topojson objects to convert.\n See the topojson.feature method in the TopoJSON API reference:\n https://github.com/mbostock/topojson/wiki/API-Reference\n\n\n Parameters\n ----------\n geo_path: string, default None\n URL or File path to your GeoJSON data\n geo_str: string, default None\n String of GeoJSON, alternative to geo_path\n data_out: string, default 'data.json'\n Path to write Pandas DataFrame/Series to JSON if binding data\n data: Pandas DataFrame or Series, default None\n Data to bind to the GeoJSON.\n columns: dict or tuple, default None\n If the data is a Pandas DataFrame, the columns of data to be bound.\n Must pass column 1 as the key, and column 2 the values.\n key_on: string, default None\n Variable in the GeoJSON file to bind the data to. Must always\n start with 'feature' and be in JavaScript objection notation.\n Ex: 'feature.id' or 'feature.properties.statename'.\n threshold_scale: list, default None\n Data range for D3 threshold scale. Defaults to the following range\n of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest\n order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.\n fill_color: string, default 'blue'\n Area fill color. Can pass a hex code, color name, or if you are\n binding data, one of the following color brewer palettes:\n 'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',\n 'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.\n fill_opacity: float, default 0.6\n Area fill opacity, range 0-1.\n line_color: string, default 'black'\n GeoJSON geopath line color.\n line_weight: int, default 1\n GeoJSON geopath line weight.\n line_opacity: float, default 1\n GeoJSON geopath line opacity, range 0-1.\n legend_name: string, default None\n Title for data legend. If not passed, defaults to columns[1].\n topojson: string, default None\n If using a TopoJSON, passing \"objects.yourfeature\" to the topojson\n keyword argument will enable conversion to GeoJSON.\n reset: boolean, default False\n Remove all current geoJSON layers, start with new layer\n\n Output\n ------\n GeoJSON data layer in obj.template_vars\n\n Example\n -------\n >>> m.geo_json(geo_path='us-states.json', line_color='blue',\n line_weight=3)\n >>> m.geo_json(geo_path='geo.json', data=df,\n columns=['Data 1', 'Data 2'],\n key_on='feature.properties.myvalue', fill_color='PuBu',\n threshold_scale=[0, 20, 30, 40, 50, 60])\n >>> m.geo_json(geo_path='countries.json', topojson='objects.countries')\n \"\"\"\n\n if reset:\n reset_vars = ['json_paths', 'func_vars', 'color_scales',\n 'geo_styles', 'gjson_layers', 'map_legends',\n 'topo_convert']\n for var in reset_vars:\n self.template_vars.update({var: []})\n self.mark_cnt['geojson'] = 1\n\n def json_style(style_cnt, line_color, line_weight, line_opacity,\n fill_color, fill_opacity, quant_fill):\n \"\"\"Generate JSON styling function from template\"\"\"\n style_temp = self.env.get_template('geojson_style.js')\n style = style_temp.render({'style': style_cnt,\n 'line_color': line_color,\n 'line_weight': line_weight,\n 'line_opacity': line_opacity,\n 'fill_color': fill_color,\n 'fill_opacity': fill_opacity,\n 'quantize_fill': quant_fill})\n return style\n\n # Set map type to geojson.\n self.map_type = 'geojson'\n\n # Get JSON map layer template pieces, convert TopoJSON if necessary.\n # geo_str is really a hack.\n if geo_path:\n geo_path = \".defer(d3.json, '{0}')\".format(geo_path)\n elif geo_str:\n fmt = (\".defer(function(callback)\"\n \"{{callback(null, JSON.parse('{}'))}})\").format\n geo_path = fmt(geo_str)\n if topojson is None:\n map_var = '_'.join(['gjson', str(self.mark_cnt['geojson'])])\n layer_var = map_var\n else:\n map_var = '_'.join(['tjson', str(self.mark_cnt['geojson'])])\n topo_obj = '.'.join([map_var, topojson])\n layer_var = '_'.join(['topo', str(self.mark_cnt['geojson'])])\n topo_templ = self.env.get_template('topo_func.js')\n topo_func = topo_templ.render({'map_var': layer_var,\n 't_var': map_var,\n 't_var_obj': topo_obj})\n topo_lib = self.env.get_template('topojson_ref.txt').render()\n self.template_vars.update({'topojson': topo_lib})\n self.template_vars.setdefault('topo_convert',\n []).append(topo_func)\n\n style_count = '_'.join(['style', str(self.mark_cnt['geojson'])])\n\n # Get Data binding pieces if available.\n if data is not None:\n\n import pandas as pd\n\n # Create DataFrame with only the relevant columns.\n if isinstance(data, pd.DataFrame):\n data = pd.concat([data[columns[0]], data[columns[1]]], axis=1)\n\n # Save data to JSON.\n self.json_data[data_out] = utilities.transform_data(data)\n\n # Add data to queue.\n d_path = \".defer(d3.json, '{0}')\".format(data_out)\n self.template_vars.setdefault('json_paths', []).append(d_path)\n\n # Add data variable to makeMap function.\n data_var = '_'.join(['data', str(self.mark_cnt['geojson'])])\n self.template_vars.setdefault('func_vars', []).append(data_var)\n\n # D3 Color scale.\n series = data[columns[1]]\n if threshold_scale and len(threshold_scale) > 6:\n raise ValueError\n domain = threshold_scale or utilities.split_six(series=series)\n if len(domain) > 253:\n raise ValueError('The threshold scale must be length <= 253')\n if not utilities.color_brewer(fill_color):\n raise ValueError('Please pass a valid color brewer code to '\n 'fill_local. See docstring for valid codes.')\n\n palette = utilities.color_brewer(fill_color, len(domain))\n d3range = palette[0: len(domain) + 1]\n tick_labels = utilities.legend_scaler(domain)\n\n color_temp = self.env.get_template('d3_threshold.js')\n d3scale = color_temp.render({'domain': domain,\n 'range': d3range})\n self.template_vars.setdefault('color_scales', []).append(d3scale)\n\n # Create legend.\n name = legend_name or columns[1]\n leg_templ = self.env.get_template('d3_map_legend.js')\n legend = leg_templ.render({'lin_max': int(domain[-1]*1.1),\n 'tick_labels': tick_labels,\n 'caption': name})\n self.template_vars.setdefault('map_legends', []).append(legend)\n\n # Style with color brewer colors.\n matchColor = 'color(matchKey({0}, {1}))'.format(key_on, data_var)\n style = json_style(style_count, line_color, line_weight,\n line_opacity, None, fill_opacity, matchColor)\n else:\n style = json_style(style_count, line_color, line_weight,\n line_opacity, fill_color, fill_opacity, None)\n\n layer = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'\n 'onEachFeature: onEachFeature}}).addTo(map)'\n .format(self.mark_cnt['geojson'], layer_var, style_count))\n\n self.template_vars.setdefault('json_paths', []).append(geo_path)\n self.template_vars.setdefault('func_vars', []).append(map_var)\n self.template_vars.setdefault('geo_styles', []).append(style)\n self.template_vars.setdefault('gjson_layers', []).append(layer)\n\n def _build_map(self, html_templ=None, templ_type='string'):\n self._auto_bounds()\n \"\"\"Build HTML/JS/CSS from Templates given current map type.\"\"\"\n if html_templ is None:\n map_types = {'base': 'fol_template.html',\n 'geojson': 'geojson_template.html'}\n\n # Check current map type.\n type_temp = map_types[self.map_type]\n\n html_templ = self.env.get_template(type_temp)\n else:\n if templ_type == 'string':\n html_templ = self.env.from_string(html_templ)\n\n self.HTML = html_templ.render(self.template_vars)\n\n def create_map(self, path='map.html', plugin_data_out=True, template=None):\n \"\"\"Write Map output to HTML and data output to JSON if available.\n\n Parameters:\n -----------\n path: string, default 'map.html'\n Path for HTML output for map\n plugin_data_out: boolean, default True\n If using plugins such as awesome markers, write all plugin\n data such as JS/CSS/images to path\n template: string, default None\n Custom template to render\n\n \"\"\"\n self.map_path = path\n self._build_map(template)\n\n with codecs.open(path, 'w', 'utf8') as f:\n f.write(self.HTML)\n\n if self.json_data:\n for path, data in iteritems(self.json_data):\n with open(path, 'w') as g:\n json.dump(data, g)\n\n if self.plugins and plugin_data_out:\n for name, plugin in iteritems(self.plugins):\n with open(name, 'w') as f:\n if isinstance(plugin, binary_type):\n plugin = text_type(plugin, 'utf8')\n f.write(plugin)\n\n def _repr_html_(self):\n \"\"\"Build the HTML representation for IPython.\"\"\"\n map_types = {'base': 'ipynb_repr.html',\n 'geojson': 'ipynb_iframe.html'}\n\n # Check current map type.\n type_temp = map_types[self.map_type]\n if self.render_iframe:\n type_temp = 'ipynb_iframe.html'\n templ = self.env.get_template(type_temp)\n self._build_map(html_templ=templ, templ_type='temp')\n if self.map_type == 'geojson' or self.render_iframe:\n if not self.map_path:\n raise ValueError('Use create_map to set the path!')\n return templ.render(path=self.map_path, width=self.width,\n height=self.height)\n return self.HTML\n\n def display(self):\n \"\"\"Display the visualization inline in the IPython notebook.\n\n This is deprecated, use the following instead::\n\n from IPython.display import display\n display(viz)\n \"\"\"\n from IPython.core.display import display, HTML\n display(HTML(self._repr_html_()))\n",
"\"\"\"\n Tests for the pandas.io.common functionalities\n\"\"\"\nfrom pandas.compat import StringIO\nimport os\nfrom os.path import isabs\n\nimport pandas.util.testing as tm\n\nfrom pandas.io import common\n\n\nclass TestCommonIOCapabilities(tm.TestCase):\n\n def test_expand_user(self):\n filename = '~/sometest'\n expanded_name = common._expand_user(filename)\n\n self.assertNotEqual(expanded_name, filename)\n self.assertTrue(isabs(expanded_name))\n self.assertEqual(os.path.expanduser(filename), expanded_name)\n\n def test_expand_user_normal_path(self):\n filename = '/somefolder/sometest'\n expanded_name = common._expand_user(filename)\n\n self.assertEqual(expanded_name, filename)\n self.assertEqual(os.path.expanduser(filename), expanded_name)\n\n def test_get_filepath_or_buffer_with_path(self):\n filename = '~/sometest'\n filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)\n self.assertNotEqual(filepath_or_buffer, filename)\n self.assertTrue(isabs(filepath_or_buffer))\n self.assertEqual(os.path.expanduser(filename), filepath_or_buffer)\n\n def test_get_filepath_or_buffer_with_buffer(self):\n input_buffer = StringIO()\n filepath_or_buffer, _, _ = common.get_filepath_or_buffer(input_buffer)\n self.assertEqual(filepath_or_buffer, input_buffer)\n",
"\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import division\n# pylint: disable=E1101,E1103\n# pylint: disable=W0212,W0231,W0703,W0622\n\nimport functools\nimport collections\nimport itertools\nimport sys\nimport types\nimport warnings\n\nfrom numpy import nan as NA\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas.core.common import (isnull, notnull, PandasError, _try_sort, _not_none,\n _default_index, _maybe_upcast, is_sequence,\n _infer_dtype_from_scalar, _values_from_object,\n is_list_like, _maybe_box_datetimelike,\n is_categorical_dtype, is_object_dtype,\n is_internal_type, is_datetimetz,\n _possibly_infer_to_datetimelike, _dict_compat)\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.index import Index, MultiIndex, _ensure_index\nfrom pandas.core.indexing import (maybe_droplevels,\n convert_to_index_sliceable,\n check_bool_indexer)\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.series import Series\nfrom pandas.core.categorical import Categorical\nimport pandas.computation.expressions as expressions\nfrom pandas.computation.eval import eval as _eval\nfrom numpy import percentile as _quantile\nfrom pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u,\n OrderedDict, raise_with_traceback)\nfrom pandas import compat\nfrom pandas.sparse.array import SparseArray\nfrom pandas.util.decorators import (cache_readonly, deprecate, Appender,\n Substitution, deprecate_kwarg)\n\nfrom pandas.tseries.period import PeriodIndex\nfrom pandas.tseries.index import DatetimeIndex\nfrom pandas.tseries.tdi import TimedeltaIndex\n\n\nimport pandas.core.algorithms as algos\nimport pandas.core.base as base\nimport pandas.core.common as com\nimport pandas.core.format as fmt\nimport pandas.core.nanops as nanops\nimport pandas.core.ops as ops\n\nimport pandas.lib as lib\nimport pandas.algos as _algos\n\nfrom pandas.core.config import get_option\n\n#----------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame',\n axes_single_arg=\"{0, 1, 'index', 'columns'}\")\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame objects by performing a database-style join operation by\ncolumns or indexes.\n\nIf joining columns on columns, the DataFrame indexes *will be\nignored*. Otherwise if joining indexes on indexes or indexes on a column or\ncolumns, the index will be passed on.\n\nParameters\n----------%s\nright : DataFrame\nhow : {'left', 'right', 'outer', 'inner'}, default 'inner'\n * left: use only keys from left frame (SQL: left outer join)\n * right: use only keys from right frame (SQL: right outer join)\n * outer: use union of keys from both frames (SQL: full outer join)\n * inner: use intersection of keys from both frames (SQL: inner join)\non : label or list\n Field names to join on. Must be found in both DataFrames. If on is\n None and not merging on indexes, then it merges on the intersection of\n the columns by default.\nleft_on : label or list, or array-like\n Field names to join on in left DataFrame. Can be a vector or list of\n vectors of the length of the DataFrame to use a particular vector as\n the join key instead of columns\nright_on : label or list, or array-like\n Field names to join on in right DataFrame or vector/list of vectors per\n left_on docs\nleft_index : boolean, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels\nright_index : boolean, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index\nsort : boolean, default False\n Sort the join keys lexicographically in the result DataFrame\nsuffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and right\n side, respectively\ncopy : boolean, default True\n If False, do not copy data unnecessarily\nindicator : boolean or string, default False\n If True, adds a column to output DataFrame called \"_merge\" with\n information on the source of each row.\n If string, column with information on source of each row will be added to\n output DataFrame, and column will be named value of string.\n Information column is Categorical-type and takes on a value of \"left_only\"\n for observations whose merge key only appears in 'left' DataFrame,\n \"right_only\" for observations whose merge key only appears in 'right'\n DataFrame, and \"both\" if the observation's merge key is found in both.\n\n .. versionadded:: 0.17.0\n\nExamples\n--------\n\n>>> A >>> B\n lkey value rkey value\n0 foo 1 0 foo 5\n1 bar 2 1 bar 6\n2 baz 3 2 qux 7\n3 foo 4 3 bar 8\n\n>>> merge(A, B, left_on='lkey', right_on='rkey', how='outer')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 4 foo 5\n2 bar 2 bar 6\n3 bar 2 bar 8\n4 baz 3 NaN NaN\n5 NaN NaN qux 7\n\nReturns\n-------\nmerged : DataFrame\n The output type will the be same as 'left', if it is a subclass\n of DataFrame.\n\"\"\"\n\n#----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame):\n\n \"\"\" Two-dimensional size-mutable, potentially heterogeneous tabular data\n structure with labeled axes (rows and columns). Arithmetic operations\n align on both row and column labels. Can be thought of as a dict-like\n container for Series objects. The primary pandas data structure\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, or DataFrame\n Dict can contain Series, arrays, constants, or list-like objects\n index : Index or array-like\n Index to use for resulting frame. Will default to np.arange(n) if\n no indexing information part of input data and no index provided\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n np.arange(n) if no column labels are provided\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n >>> d = {'col1': ts1, 'col2': ts2}\n >>> df = DataFrame(data=d, index=index)\n >>> df2 = DataFrame(np.random.randn(10, 5))\n >>> df3 = DataFrame(np.random.randn(10, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n\n See also\n --------\n DataFrame.from_records : constructor from tuples, also record arrays\n DataFrame.from_dict : from dicts of Series, arrays, or dicts\n DataFrame.from_items : from sequence of (key, value) pairs\n pandas.read_csv, pandas.read_table, pandas.read_clipboard\n \"\"\"\n\n @property\n def _constructor(self):\n return DataFrame\n\n _constructor_sliced = Series\n\n @property\n def _constructor_expanddim(self):\n from pandas.core.panel import Panel\n return Panel\n\n def __init__(self, data=None, index=None, columns=None, dtype=None,\n copy=False):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._data\n\n if isinstance(data, BlockManager):\n mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif isinstance(data, dict):\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,\n copy)\n\n # a masked array\n else:\n mask = ma.getmaskarray(data)\n if mask.any():\n data, fill_value = _maybe_upcast(data, copy=True)\n data[mask] = fill_value\n else:\n data = data.copy()\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = dict((k, data[k]) for k in data_columns)\n if columns is None:\n columns = data_columns\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, 'name', None):\n mgr = self._init_dict({data.name: data}, index, columns,\n dtype=dtype)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n elif isinstance(data, (list, types.GeneratorType)):\n if isinstance(data, types.GeneratorType):\n data = list(data)\n if len(data) > 0:\n if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:\n arrays, columns = _to_arrays(data, columns, dtype=dtype)\n columns = _ensure_index(columns)\n\n # set the index\n if index is None:\n if isinstance(data[0], Series):\n index = _get_names_from_index(data)\n elif isinstance(data[0], Categorical):\n index = _default_index(len(data[0]))\n else:\n index = _default_index(len(data))\n\n mgr = _arrays_to_mgr(arrays, columns, index, columns,\n dtype=dtype)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n else:\n mgr = self._init_dict({}, index, columns, dtype=dtype)\n elif isinstance(data, collections.Iterator):\n raise TypeError(\"data argument can't be an iterator\")\n else:\n try:\n arr = np.array(data, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as e:\n exc = TypeError('DataFrame constructor called with '\n 'incompatible data and dtype: %s' % e)\n raise_with_traceback(exc)\n\n if arr.ndim == 0 and index is not None and columns is not None:\n if isinstance(data, compat.string_types) and dtype is None:\n dtype = np.object_\n if dtype is None:\n dtype, data = _infer_dtype_from_scalar(data)\n\n values = np.empty((len(index), len(columns)), dtype=dtype)\n values.fill(data)\n mgr = self._init_ndarray(values, index, columns, dtype=dtype,\n copy=False)\n else:\n raise PandasError('DataFrame constructor not properly called!')\n\n NDFrame.__init__(self, mgr, fastpath=True)\n\n def _init_dict(self, data, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n if columns is not None:\n columns = _ensure_index(columns)\n\n # GH10856\n # raise ValueError if only scalars in dict\n if index is None:\n extract_index(list(data.values()))\n\n # prefilter if columns passed\n data = dict((k, v) for k, v in compat.iteritems(data)\n if k in columns)\n\n if index is None:\n index = extract_index(list(data.values()))\n\n else:\n index = _ensure_index(index)\n\n arrays = []\n data_names = []\n for k in columns:\n if k not in data:\n # no obvious \"empty\" int column\n if dtype is not None and issubclass(dtype.type,\n np.integer):\n continue\n\n if dtype is None:\n # 1783\n v = np.empty(len(index), dtype=object)\n elif np.issubdtype(dtype, np.flexible):\n v = np.empty(len(index), dtype=object)\n else:\n v = np.empty(len(index), dtype=dtype)\n\n v.fill(NA)\n else:\n v = data[k]\n data_names.append(k)\n arrays.append(v)\n\n else:\n keys = list(data.keys())\n if not isinstance(data, OrderedDict):\n keys = _try_sort(keys)\n columns = data_names = Index(keys)\n arrays = [data[k] for k in keys]\n\n return _arrays_to_mgr(arrays, data_names, index, columns,\n dtype=dtype)\n\n def _init_ndarray(self, values, index, columns, dtype=None,\n copy=False):\n # input must be a ndarray, list, Series, index\n\n if isinstance(values, Series):\n if columns is None:\n if values.name is not None:\n columns = [values.name]\n if index is None:\n index = values.index\n else:\n values = values.reindex(index)\n\n # zero len case (GH #2234)\n if not len(values) and columns is not None and len(columns):\n values = np.empty((0, 1), dtype=object)\n\n # helper to create the axes as indexes\n def _get_axes(N, K, index=index, columns=columns):\n # return axes or defaults\n\n if index is None:\n index = _default_index(N)\n else:\n index = _ensure_index(index)\n\n if columns is None:\n columns = _default_index(K)\n else:\n columns = _ensure_index(columns)\n return index, columns\n\n # we could have a categorical type passed or coerced to 'category'\n # recast this to an _arrays_to_mgr\n if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype):\n\n if not hasattr(values,'dtype'):\n values = _prep_ndarray(values, copy=copy)\n values = values.ravel()\n elif copy:\n values = values.copy()\n\n index, columns = _get_axes(len(values),1)\n return _arrays_to_mgr([ values ], columns, index, columns,\n dtype=dtype)\n elif is_datetimetz(values):\n return self._init_dict({ 0 : values }, index, columns,\n dtype=dtype)\n\n # by definition an array here\n # the dtypes will be coerced to a single dtype\n values = _prep_ndarray(values, copy=copy)\n\n if dtype is not None:\n\n if values.dtype != dtype:\n try:\n values = values.astype(dtype)\n except Exception as orig:\n e = ValueError(\"failed to cast to '%s' (Exception was: %s)\"\n % (dtype, orig))\n raise_with_traceback(e)\n\n index, columns = _get_axes(*values.shape)\n values = values.T\n\n # if we don't have a dtype specified, then try to convert objects\n # on the entire block; this is to convert if we have datetimelike's\n # embedded in an object type\n if dtype is None and is_object_dtype(values):\n values = _possibly_infer_to_datetimelike(values)\n\n return create_block_manager_from_blocks([values], [columns, index])\n\n @property\n def axes(self):\n \"\"\"\n Return a list with the row axis labels and column axis labels as the\n only members. They are returned in that order.\n \"\"\"\n return [self.index, self.columns]\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n \"\"\"\n return (len(self.index), len(self.columns))\n\n def _repr_fits_vertical_(self):\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width=False):\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns. In case off non-interactive session, no\n boundaries apply.\n\n ignore_width is here so ipnb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n\n width, height = fmt.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if ((max_columns and nb_columns > max_columns) or\n ((not ignore_width) and width and nb_columns > (width // 2))):\n return False\n\n if (ignore_width # used by repr_html under IPython notebook\n # scripts ignore terminal dims\n or not com.in_interactive_session()):\n return True\n\n if (get_option('display.width') is not None or\n com.in_ipython_frontend()):\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actualy checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[:min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max([len(l) for l in value.split('\\n')])\n\n return repr_width < width\n\n def _info_repr(self):\n \"\"\"True if the repr should show the info view.\"\"\"\n info_repr_option = (get_option(\"display.large_repr\") == \"info\")\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular DataFrame\n\n Invoked by unicode(df) in py2 only. Yields a Unicode String in both\n py2/py3.\n \"\"\"\n buf = StringIO(u(\"\"))\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = fmt.get_console_size()\n else:\n width = None\n self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,\n line_width=width, show_dimensions=show_dimensions)\n\n return buf.getvalue()\n\n def _repr_html_(self):\n \"\"\"\n Return a html representation for a particular DataFrame.\n Mainly for IPython notebook.\n \"\"\"\n # qtconsole doesn't report its line width, and also\n # behaves badly when outputting an HTML table\n # that doesn't fit the window, so disable it.\n # XXX: In IPython 3.x and above, the Qt console will not attempt to\n # display HTML, so this check can be removed when support for IPython 2.x\n # is no longer needed.\n if com.in_qtconsole():\n # 'HTML output is disabled in QtConsole'\n return None\n\n if self._info_repr():\n buf = StringIO(u(\"\"))\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace('<', r'<', 1).replace('>',\n r'>', 1)\n return '<pre>' + val + '</pre>'\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n return self.to_html(max_rows=max_rows, max_cols=max_cols,\n show_dimensions=show_dimensions,\n notebook=True)\n else:\n return None\n\n def iteritems(self):\n \"\"\"\n Iterator over (column name, Series) pairs.\n\n See also\n --------\n iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.\n itertuples : Iterate over the rows of a DataFrame as tuples of the values.\n\n \"\"\"\n if self.columns.is_unique and hasattr(self, '_item_cache'):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self._ixs(i,axis=1)\n\n def iterrows(self):\n \"\"\"\n Iterate over the rows of a DataFrame as (index, Series) pairs.\n\n Notes\n -----\n\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns tuples of the values\n and which is generally faster as ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n\n Returns\n -------\n it : generator\n A generator that iterates over the rows of the frame.\n\n See also\n --------\n itertuples : Iterate over the rows of a DataFrame as tuples of the values.\n iteritems : Iterate over (column name, Series) pairs.\n\n \"\"\"\n columns = self.columns\n for k, v in zip(self.index, self.values):\n s = Series(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index=True):\n \"\"\"\n Iterate over the rows of DataFrame as tuples, with index value\n as first element of the tuple.\n\n Parameters\n ----------\n index : boolean, default True\n If True, return the index as the first element of the tuple.\n\n See also\n --------\n iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.\n iteritems : Iterate over (column name, Series) pairs.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]}, index=['a', 'b'])\n >>> df\n col1 col2\n a 1 0.1\n b 2 0.2\n >>> for row in df.itertuples():\n ... print(row)\n ('a', 1, 0.10000000000000001)\n ('b', 2, 0.20000000000000001)\n\n \"\"\"\n arrays = []\n if index:\n arrays.append(self.index)\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n return zip(*arrays)\n\n if compat.PY3: # pragma: no cover\n items = iteritems\n\n def __len__(self):\n \"\"\"Returns length of info axis, but here we use the index \"\"\"\n return len(self.index)\n\n def dot(self, other):\n \"\"\"\n Matrix multiplication with DataFrame or Series objects\n\n Parameters\n ----------\n other : DataFrame or Series\n\n Returns\n -------\n dot_product : DataFrame or Series\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if (len(common) > len(self.columns) or\n len(common) > len(other.index)):\n raise ValueError('matrices are not aligned')\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError('Dot product shape mismatch, %s vs %s' %\n (lvals.shape, rvals.shape))\n\n if isinstance(other, DataFrame):\n return self._constructor(np.dot(lvals, rvals),\n index=left.index,\n columns=other.columns)\n elif isinstance(other, Series):\n return Series(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return Series(result, index=left.index)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n #----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient='columns', dtype=None):\n \"\"\"\n Construct DataFrame from dict of array-like or dicts\n\n Parameters\n ----------\n data : dict\n {field : array-like} or {field : dict}\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer\n\n Returns\n -------\n DataFrame\n \"\"\"\n index, columns = None, None\n orient = orient.lower()\n if orient == 'index':\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient != 'columns': # pragma: no cover\n raise ValueError('only recognize index or columns for orient')\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n @deprecate_kwarg(old_arg_name='outtype', new_arg_name='orient')\n def to_dict(self, orient='dict'):\n \"\"\"Convert DataFrame to dictionary.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - dict (default) : dict like {column -> {index -> value}}\n - list : dict like {column -> [values]}\n - series : dict like {column -> Series(values)}\n - split : dict like\n {index -> [index], columns -> [columns], data -> [values]}\n - records : list like\n [{column -> value}, ... , {column -> value}]\n - index : dict like {index -> {column -> value}}\n\n .. versionadded:: 0.17.0\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n Returns\n -------\n result : dict like {column -> {index -> value}}\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\"DataFrame columns are not unique, some \"\n \"columns will be omitted.\", UserWarning)\n if orient.lower().startswith('d'):\n return dict((k, v.to_dict()) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('l'):\n return dict((k, v.tolist()) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('sp'):\n return {'index': self.index.tolist(),\n 'columns': self.columns.tolist(),\n 'data': self.values.tolist()}\n elif orient.lower().startswith('s'):\n return dict((k, v) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('r'):\n return [dict((k, v) for k, v in zip(self.columns, row))\n for row in self.values]\n elif orient.lower().startswith('i'):\n return dict((k, v.to_dict()) for k, v in self.iterrows())\n else:\n raise ValueError(\"orient '%s' not understood\" % orient)\n\n def to_gbq(self, destination_table, project_id, chunksize=10000,\n verbose=True, reauth=False, if_exists='fail'):\n \"\"\"Write a DataFrame to a Google BigQuery table.\n\n THIS IS AN EXPERIMENTAL LIBRARY\n\n Parameters\n ----------\n dataframe : DataFrame\n DataFrame to be written\n destination_table : string\n Name of table to be written, in the form 'dataset.tablename'\n project_id : str\n Google BigQuery Account project ID.\n chunksize : int (default 10000)\n Number of rows to be inserted in each chunk from the dataframe.\n verbose : boolean (default True)\n Show percentage complete\n reauth : boolean (default False)\n Force Google BigQuery to reauthenticate the user. This is useful\n if multiple accounts are used.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n 'fail': If table exists, do nothing.\n 'replace': If table exists, drop it, recreate it, and insert data.\n 'append': If table exists, insert data. Create if does not exist.\n\n .. versionadded:: 0.17.0\n \"\"\"\n\n from pandas.io import gbq\n return gbq.to_gbq(self, destination_table, project_id=project_id,\n chunksize=chunksize, verbose=verbose,\n reauth=reauth, if_exists=if_exists)\n\n @classmethod\n def from_records(cls, data, index=None, exclude=None, columns=None,\n coerce_float=False, nrows=None):\n \"\"\"\n Convert structured or record ndarray to DataFrame\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns)\n coerce_float : boolean, default False\n Attempt to convert values to non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets\n\n Returns\n -------\n df : DataFrame\n \"\"\"\n\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = _ensure_index(columns)\n\n if com.is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, 'dtype') and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = _ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns = []\n for k, v in compat.iteritems(data):\n if k in columns:\n arr_columns.append(k)\n arrays.append(v)\n\n arrays, arr_columns = _reorder_arrays(arrays, arr_columns,\n columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = _to_arrays(data, columns)\n if columns is not None:\n columns = _ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = _to_arrays(data, columns,\n coerce_float=coerce_float)\n\n arr_columns = _ensure_index(arr_columns)\n if columns is not None:\n columns = _ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if (isinstance(index, compat.string_types) or\n not hasattr(index, \"__iter__\")):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n to_remove = [arr_columns.get_loc(field) for field in index]\n\n result_index = MultiIndex.from_arrays(\n [arrays[i] for i in to_remove], names=index)\n\n exclude.update(index)\n except Exception:\n result_index = index\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = _arrays_to_mgr(arrays, arr_columns, result_index,\n columns)\n\n return cls(mgr)\n\n def to_records(self, index=True, convert_datetime64=True):\n \"\"\"\n Convert DataFrame to record array. Index will be put in the\n 'index' field of the record array if requested\n\n Parameters\n ----------\n index : boolean, default True\n Include index in resulting record array, stored in 'index' field\n convert_datetime64 : boolean, default True\n Whether to convert the index to datetime.datetime if it is a\n DatetimeIndex\n\n Returns\n -------\n y : recarray\n \"\"\"\n if index:\n if com.is_datetime64_dtype(self.index) and convert_datetime64:\n ix_vals = [self.index.to_pydatetime()]\n else:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = lmap(np.array, zip(*self.index.values))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [self[c].get_values() for c in self.columns]\n\n count = 0\n index_names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = 'level_%d' % count\n count += 1\n elif index_names[0] is None:\n index_names = ['index']\n names = index_names + lmap(str, self.columns)\n else:\n arrays = [self[c].get_values() for c in self.columns]\n names = lmap(str, self.columns)\n\n dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])\n return np.rec.fromarrays(arrays, dtype=dtype, names=names)\n\n @classmethod\n def from_items(cls, items, columns=None, orient='columns'):\n \"\"\"\n Convert (key, value) pairs to DataFrame. The keys will be the axis\n index (usually the columns, but depends on the specified\n orientation). The values should be arrays or Series.\n\n Parameters\n ----------\n items : sequence of (key, value) pairs\n Values should be arrays or Series.\n columns : sequence of column labels, optional\n Must be passed if orient='index'.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the\n input correspond to column labels, pass 'columns'\n (default). Otherwise if the keys correspond to the index,\n pass 'index'.\n\n Returns\n -------\n frame : DataFrame\n \"\"\"\n keys, values = lzip(*items)\n\n if orient == 'columns':\n if columns is not None:\n columns = _ensure_index(columns)\n\n idict = dict(items)\n if len(idict) < len(items):\n if not columns.equals(_ensure_index(keys)):\n raise ValueError('With non-unique item names, passed '\n 'columns must be identical')\n arrays = values\n else:\n arrays = [idict[k] for k in columns if k in idict]\n else:\n columns = _ensure_index(keys)\n arrays = values\n\n return cls._from_arrays(arrays, columns, None)\n elif orient == 'index':\n if columns is None:\n raise TypeError(\"Must pass columns with orient='index'\")\n\n keys = _ensure_index(keys)\n\n arr = np.array(values, dtype=object).T\n data = [lib.maybe_convert_objects(v) for v in arr]\n return cls._from_arrays(data, columns, keys)\n else: # pragma: no cover\n raise ValueError(\"'orient' must be either 'columns' or 'index'\")\n\n @classmethod\n def _from_arrays(cls, arrays, columns, index, dtype=None):\n mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n return cls(mgr)\n\n @classmethod\n def from_csv(cls, path, header=0, sep=',', index_col=0,\n parse_dates=True, encoding=None, tupleize_cols=False,\n infer_datetime_format=False):\n \"\"\"\n Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv` instead).\n\n It is preferable to use the more powerful :func:`pandas.read_csv`\n for most general purposes, but ``from_csv`` makes for an easy\n roundtrip to and from a file (the exact counterpart of\n ``to_csv``), especially with a DataFrame of time series data.\n\n This method only differs from the preferred :func:`pandas.read_csv`\n in some defaults:\n\n - `index_col` is ``0`` instead of ``None`` (take first column as index\n by default)\n - `parse_dates` is ``True`` instead of ``False`` (try parsing the index\n as datetime by default)\n\n So a ``pd.DataFrame.from_csv(path)`` can be replaced by\n ``pd.read_csv(path, index_col=0, parse_dates=True)``.\n\n Parameters\n ----------\n path : string file path or file handle / StringIO\n header : int, default 0\n Row to use as header (skip prior rows)\n sep : string, default ','\n Field delimiter\n index_col : int or sequence, default 0\n Column to use for index. If a sequence is given, a MultiIndex\n is used. Different default from read_table\n parse_dates : boolean, default True\n Parse dates. Different default from read_table\n tupleize_cols : boolean, default False\n write multi_index columns as a list of tuples (if True)\n or new (expanded format) if False)\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n\n See also\n --------\n pandas.read_csv\n\n Returns\n -------\n y : DataFrame\n\n \"\"\"\n from pandas.io.parsers import read_table\n return read_table(path, header=header, sep=sep,\n parse_dates=parse_dates, index_col=index_col,\n encoding=encoding, tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format)\n\n def to_sparse(self, fill_value=None, kind='block'):\n \"\"\"\n Convert to SparseDataFrame\n\n Parameters\n ----------\n fill_value : float, default NaN\n kind : {'block', 'integer'}\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n from pandas.core.sparse import SparseDataFrame\n return SparseDataFrame(self._series, index=self.index,\n default_kind=kind,\n default_fill_value=fill_value)\n\n def to_panel(self):\n \"\"\"\n Transform long (stacked) format (DataFrame) into wide (3D, Panel)\n format.\n\n Currently the index of the DataFrame must be a 2-level MultiIndex. This\n may be generalized later\n\n Returns\n -------\n panel : Panel\n \"\"\"\n # only support this kind for now\n if (not isinstance(self.index, MultiIndex) or # pragma: no cover\n len(self.index.levels) != 2):\n raise NotImplementedError('Only 2-level MultiIndex are supported.')\n\n if not self.index.is_unique:\n raise ValueError(\"Can't convert non-uniquely indexed \"\n \"DataFrame to Panel\")\n\n self._consolidate_inplace()\n\n # minor axis must be sorted\n if self.index.lexsort_depth < 2:\n selfsorted = self.sortlevel(0)\n else:\n selfsorted = self\n\n major_axis, minor_axis = selfsorted.index.levels\n major_labels, minor_labels = selfsorted.index.labels\n shape = len(major_axis), len(minor_axis)\n\n # preserve names, if any\n major_axis = major_axis.copy()\n major_axis.name = self.index.names[0]\n\n minor_axis = minor_axis.copy()\n minor_axis.name = self.index.names[1]\n\n # create new axes\n new_axes = [selfsorted.columns, major_axis, minor_axis]\n\n # create new manager\n new_mgr = selfsorted._data.reshape_nd(axes=new_axes,\n labels=[major_labels, minor_labels],\n shape=shape,\n ref_items=selfsorted.columns)\n\n return self._constructor_expanddim(new_mgr)\n\n to_wide = deprecate('to_wide', to_panel)\n\n def to_csv(self, path_or_buf=None, sep=\",\", na_rep='', float_format=None,\n columns=None, header=True, index=True, index_label=None,\n mode='w', encoding=None, quoting=None,\n quotechar='\"', line_terminator='\\n', chunksize=None,\n tupleize_cols=False, date_format=None, doublequote=True,\n escapechar=None, decimal='.', **kwds):\n \"\"\"Write DataFrame to a comma-separated values (csv) file\n\n Parameters\n ----------\n path_or_buf : string or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n sep : character, default ','\n Field delimiter for the output file.\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n columns : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is assumed\n to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R\n nanRep : None\n deprecated, use na_rep\n mode : str\n Python write mode, default 'w'\n encoding : string, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n line_terminator : string, default '\\\\n'\n The newline character or character sequence to use in the output\n file\n quoting : optional constant from csv module\n defaults to csv.QUOTE_MINIMAL\n quotechar : string (length 1), default '\\\"'\n character used to quote fields\n doublequote : boolean, default True\n Control quoting of `quotechar` inside a field\n escapechar : string (length 1), default None\n character used to escape `sep` and `quotechar` when appropriate\n chunksize : int or None\n rows to write at a time\n tupleize_cols : boolean, default False\n write multi_index columns as a list of tuples (if True)\n or new (expanded format) if False)\n date_format : string, default None\n Format string for datetime objects\n decimal: string, default '.'\n Character recognized as decimal separator. E.g. use ',' for European data\n\n .. versionadded:: 0.16.0\n\n \"\"\"\n\n formatter = fmt.CSVFormatter(self, path_or_buf,\n line_terminator=line_terminator,\n sep=sep, encoding=encoding,\n quoting=quoting, na_rep=na_rep,\n float_format=float_format, cols=columns,\n header=header, index=index,\n index_label=index_label, mode=mode,\n chunksize=chunksize, quotechar=quotechar,\n engine=kwds.get(\"engine\"),\n tupleize_cols=tupleize_cols,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal)\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',\n float_format=None, columns=None, header=True, index=True,\n index_label=None, startrow=0, startcol=0, engine=None,\n merge_cells=True, encoding=None, inf_rep='inf',\n verbose=True):\n \"\"\"\n Write DataFrame to a excel sheet\n\n Parameters\n ----------\n excel_writer : string or ExcelWriter object\n File path or existing ExcelWriter\n sheet_name : string, default 'Sheet1'\n Name of sheet which will contain DataFrame\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n columns : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow :\n upper left cell row to dump data frame\n startcol :\n upper left cell column to dump data frame\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : boolean, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding: string, default None\n encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : string, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel)\n\n Notes\n -----\n If passing an existing ExcelWriter object, then the sheet will be added\n to the existing workbook. This can be used to save different\n DataFrames to one workbook:\n\n >>> writer = ExcelWriter('output.xlsx')\n >>> df1.to_excel(writer,'Sheet1')\n >>> df2.to_excel(writer,'Sheet2')\n >>> writer.save()\n\n For compatibility with to_csv, to_excel serializes lists and dicts to\n strings before writing.\n \"\"\"\n from pandas.io.excel import ExcelWriter\n need_save = False\n if encoding is None:\n encoding = 'ascii'\n\n if isinstance(excel_writer, compat.string_types):\n excel_writer = ExcelWriter(excel_writer, engine=engine)\n need_save = True\n\n formatter = fmt.ExcelFormatter(self,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep)\n formatted_cells = formatter.get_formatted_cells()\n excel_writer.write_cells(formatted_cells, sheet_name,\n startrow=startrow, startcol=startcol)\n if need_save:\n excel_writer.save()\n\n def to_stata(\n self, fname, convert_dates=None, write_index=True, encoding=\"latin-1\",\n byteorder=None, time_stamp=None, data_label=None):\n \"\"\"\n A class for writing Stata binary dta files from array-like objects\n\n Parameters\n ----------\n fname : file path or buffer\n Where to save the dta file.\n convert_dates : dict\n Dictionary mapping column of datetime types to the stata internal\n format that you want to use for the dates. Options are\n 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a\n number or a name.\n encoding : str\n Default is latin-1. Note that Stata does not support unicode.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". The default is None which uses\n `sys.byteorder`\n\n Examples\n --------\n >>> writer = StataWriter('./data_file.dta', data)\n >>> writer.write_file()\n\n Or with dates\n\n >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})\n >>> writer.write_file()\n \"\"\"\n from pandas.io.stata import StataWriter\n writer = StataWriter(fname, self, convert_dates=convert_dates,\n encoding=encoding, byteorder=byteorder,\n time_stamp=time_stamp, data_label=data_label,\n write_index=write_index)\n writer.write_file()\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_string(self, buf=None, columns=None, col_space=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n justify=None, line_width=None, max_rows=None, max_cols=None,\n show_dimensions=False):\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n \"\"\"\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header, index=index,\n line_width=line_width,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions)\n formatter.to_string()\n\n if buf is None:\n result = formatter.buf.getvalue()\n return result\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n justify=None, bold_rows=True, classes=None, escape=True,\n max_rows=None, max_cols=None, show_dimensions=False,\n notebook=False):\n \"\"\"\n Render a DataFrame as an HTML table.\n\n `to_html`-specific options:\n\n bold_rows : boolean, default True\n Make the row labels bold in the output\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table\n escape : boolean, default True\n Convert the characters <, >, and & to HTML-safe sequences.=\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n max_cols : int, optional\n Maximum number of columns to show before truncating. If None, show\n all.\n\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning, stacklevel=2)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header, index=index,\n bold_rows=bold_rows,\n escape=escape,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions)\n formatter.to_html(classes=classes, notebook=notebook)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n @Appender(fmt.common_docstring + fmt.return_docstring, indents=1)\n def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n bold_rows=True, column_format=None,\n longtable=False, escape=True):\n \"\"\"\n Render a DataFrame to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\\\usepackage{booktabs}.\n\n `to_latex`-specific options:\n\n bold_rows : boolean, default True\n Make the row labels bold in the output\n column_format : str, default None\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns\n longtable : boolean, default False\n Use a longtable environment instead of tabular. Requires adding\n a \\\\usepackage{longtable} to your LaTeX preamble.\n escape : boolean, default True\n When set to False prevents from escaping latex special\n characters in column names.\n\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning, stacklevel=2)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n header=header, index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape)\n formatter.to_latex(column_format=column_format, longtable=longtable)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):\n \"\"\"\n Concise summary of a DataFrame.\n\n Parameters\n ----------\n verbose : {None, True, False}, optional\n Whether to print the full summary.\n None follows the `display.max_info_columns` setting.\n True or False overrides the `display.max_info_columns` setting.\n buf : writable buffer, defaults to sys.stdout\n max_cols : int, default None\n Determines whether full summary or short summary is printed.\n None follows the `display.max_info_columns` setting.\n memory_usage : boolean, default None\n Specifies whether total memory usage of the DataFrame\n elements (including index) should be displayed. None follows\n the `display.memory_usage` setting. True or False overrides\n the `display.memory_usage` setting. Memory usage is shown in\n human-readable units (base-2 representation).\n null_counts : boolean, default None\n Whether to show the non-null counts\n If None, then only show if the frame is smaller than max_info_rows and max_info_columns.\n If True, always show counts.\n If False, never show counts.\n\n \"\"\"\n from pandas.core.format import _put_lines\n\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n\n lines.append(str(type(self)))\n lines.append(self.index.summary())\n\n if len(self.columns) == 0:\n lines.append('Empty %s' % type(self).__name__)\n _put_lines(buf, lines)\n return\n\n cols = self.columns\n\n # hack\n if max_cols is None:\n max_cols = get_option(\n 'display.max_info_columns', len(self.columns) + 1)\n\n max_rows = get_option('display.max_info_rows', len(self) + 1)\n\n if null_counts is None:\n show_counts = ((len(self.columns) <= max_cols) and\n (len(self) < max_rows))\n else:\n show_counts = null_counts\n exceeds_info_cols = len(self.columns) > max_cols\n\n def _verbose_repr():\n lines.append('Data columns (total %d columns):' %\n len(self.columns))\n space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4\n counts = None\n\n tmpl = \"%s%s\"\n if show_counts:\n counts = self.count()\n if len(cols) != len(counts): # pragma: no cover\n raise AssertionError('Columns must equal counts (%d != %d)' %\n (len(cols), len(counts)))\n tmpl = \"%s non-null %s\"\n\n dtypes = self.dtypes\n for i, col in enumerate(self.columns):\n dtype = dtypes[col]\n col = com.pprint_thing(col)\n\n count = \"\"\n if show_counts:\n count = counts.iloc[i]\n\n lines.append(_put_str(col, space) +\n tmpl % (count, dtype))\n\n def _non_verbose_repr():\n lines.append(self.columns.summary(name='Columns'))\n\n def _sizeof_fmt(num, size_qualifier):\n # returns size in human readable format\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f%s %s\" % (num, size_qualifier, x)\n num /= 1024.0\n return \"%3.1f%s %s\" % (num, size_qualifier, 'PB')\n\n if verbose:\n _verbose_repr()\n elif verbose is False: # specifically set to False, not nesc None\n _non_verbose_repr()\n else:\n if exceeds_info_cols:\n _non_verbose_repr()\n else:\n _verbose_repr()\n\n counts = self.get_dtype_counts()\n dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]\n lines.append('dtypes: %s' % ', '.join(dtypes))\n if memory_usage is None:\n memory_usage = get_option('display.memory_usage')\n if memory_usage: # append memory usage of df to display\n # size_qualifier is just a best effort; not guaranteed to catch all\n # cases (e.g., it misses categorical data even with object\n # categories)\n size_qualifier = ('+' if 'object' in counts\n or is_object_dtype(self.index) else '')\n mem_usage = self.memory_usage(index=True).sum()\n lines.append(\"memory usage: %s\\n\" %\n _sizeof_fmt(mem_usage, size_qualifier))\n _put_lines(buf, lines)\n\n def memory_usage(self, index=False):\n \"\"\"Memory usage of DataFrame columns.\n\n Parameters\n ----------\n index : bool\n Specifies whether to include memory usage of DataFrame's\n index in returned Series. If `index=True` (default is False)\n the first index of the Series is `Index`.\n\n Returns\n -------\n sizes : Series\n A series with column names as index and memory usage of\n columns with units of bytes.\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array.\n\n See Also\n --------\n numpy.ndarray.nbytes\n \"\"\"\n result = Series([ c.values.nbytes for col, c in self.iteritems() ],\n index=self.columns)\n if index:\n result = Series(self.index.nbytes,\n index=['Index']).append(result)\n return result\n\n def transpose(self):\n \"\"\"Transpose index and columns\"\"\"\n return super(DataFrame, self).transpose(1, 0)\n\n T = property(transpose)\n\n #----------------------------------------------------------------------\n # Picklability\n\n # legacy pickle formats\n def _unpickle_frame_compat(self, state): # pragma: no cover\n from pandas.core.common import _unpickle_array\n if len(state) == 2: # pragma: no cover\n series, idx = state\n columns = sorted(series)\n else:\n series, cols, idx = state\n columns = _unpickle_array(cols)\n\n index = _unpickle_array(idx)\n self._data = self._init_dict(series, index, columns, None)\n\n def _unpickle_matrix_compat(self, state): # pragma: no cover\n from pandas.core.common import _unpickle_array\n # old unpickling\n (vals, idx, cols), object_state = state\n\n index = _unpickle_array(idx)\n dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),\n copy=False)\n\n if object_state is not None:\n ovals, _, ocols = object_state\n objects = DataFrame(ovals, index=index,\n columns=_unpickle_array(ocols),\n copy=False)\n\n dm = dm.join(objects)\n\n self._data = dm._data\n\n #----------------------------------------------------------------------\n #----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, index, col, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed column and index\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n\n if takeable:\n series = self._iget_item_cache(col)\n return _maybe_box_datetimelike(series._values[index])\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n return engine.get_value(series.get_values(), index)\n\n def set_value(self, index, col, value, takeable=False):\n \"\"\"\n Put single value at passed column and index\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar value\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n frame : DataFrame\n If label pair is contained, will be reference to calling DataFrame,\n otherwise a new object\n \"\"\"\n try:\n if takeable is True:\n series = self._iget_item_cache(col)\n return series.set_value(index, value, takeable=True)\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n engine.set_value(series._values, index, value)\n return self\n except (KeyError, TypeError):\n\n # set using a non-recursive method & reset the cache\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n return self\n\n def irow(self, i, copy=False):\n \"\"\"\n DEPRECATED. Use ``.iloc[i]`` instead\n \"\"\"\n\n warnings.warn(\"irow(i) is deprecated. Please use .iloc[i]\",\n FutureWarning, stacklevel=2)\n return self._ixs(i, axis=0)\n\n def icol(self, i):\n \"\"\"\n DEPRECATED. Use ``.iloc[:, i]`` instead\n \"\"\"\n warnings.warn(\"icol(i) is deprecated. Please use .iloc[:,i]\",\n FutureWarning, stacklevel=2)\n return self._ixs(i, axis=1)\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n # irow\n if axis == 0:\n\n \"\"\"\n Notes\n -----\n If slice passed, the resulting data will be a view\n \"\"\"\n\n if isinstance(i, slice):\n return self[i]\n else:\n label = self.index[i]\n if isinstance(label, Index):\n # a location index by definition\n result = self.take(i, axis=axis)\n copy=True\n else:\n new_values = self._data.fast_xs(i)\n if lib.isscalar(new_values):\n return new_values\n\n # if we are a copy, mark as such\n copy = isinstance(new_values,np.ndarray) and new_values.base is None\n result = Series(new_values, index=self.columns,\n name=self.index[i], dtype=new_values.dtype)\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n\n \"\"\"\n Notes\n -----\n If slice passed, the resulting data will be a view\n \"\"\"\n\n label = self.columns[i]\n if isinstance(i, slice):\n # need to return view\n lab_slice = slice(label[0], label[-1])\n return self.ix[:, lab_slice]\n else:\n if isinstance(label, Index):\n return self.take(i, axis=1, convert=True)\n\n index_len = len(self.index)\n\n # if the values returned are not the same length\n # as the index (iow a not found value), iget returns\n # a 0-len ndarray. This is effectively catching\n # a numpy error (as numpy should really raise)\n values = self._data.iget(i)\n\n if index_len and not len(values):\n values = np.array([np.nan] * index_len, dtype=object)\n result = self._constructor_sliced.from_array(\n values, index=self.index,\n name=label, fastpath=True)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def iget_value(self, i, j):\n \"\"\"\n DEPRECATED. Use ``.iat[i, j]`` instead\n \"\"\"\n warnings.warn(\"iget_value(i, j) is deprecated. Please use .iat[i, j]\",\n FutureWarning, stacklevel=2)\n return self.iat[i, j]\n\n def __getitem__(self, key):\n\n # shortcut if we are an actual column\n is_mi_columns = isinstance(self.columns, MultiIndex)\n try:\n if key in self.columns and not is_mi_columns:\n return self._getitem_column(key)\n except:\n pass\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n return self._getitem_slice(indexer)\n\n if isinstance(key, (Series, np.ndarray, Index, list)):\n # either boolean or fancy integer index\n return self._getitem_array(key)\n elif isinstance(key, DataFrame):\n return self._getitem_frame(key)\n elif is_mi_columns:\n return self._getitem_multilevel(key)\n else:\n return self._getitem_column(key)\n\n def _getitem_column(self, key):\n \"\"\" return the actual column \"\"\"\n\n # get column\n if self.columns.is_unique:\n return self._get_item_cache(key)\n\n # duplicate columns & possible reduce dimensionaility\n result = self._constructor(self._data.get(key))\n if result.columns.is_unique:\n result = result[key]\n\n return result\n\n def _getitem_slice(self, key):\n return self._slice(key, axis=0)\n\n def _getitem_array(self, key):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\"Boolean Series key will be reindexed to match \"\n \"DataFrame index.\", UserWarning)\n elif len(key) != len(self.index):\n raise ValueError('Item wrong length %d instead of %d.' %\n (len(key), len(self.index)))\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self.take(indexer, axis=0, convert=False)\n else:\n indexer = self.ix._convert_to_indexer(key, axis=1)\n return self.take(indexer, axis=1, convert=True)\n\n def _getitem_multilevel(self, key):\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = self._constructor(new_values, index=self.index,\n columns=result_columns).__finalize__(self)\n if len(result.columns) == 1:\n top = result.columns[0]\n if ((type(top) == str and top == '') or\n (type(top) == tuple and top[0] == '')):\n result = result['']\n if isinstance(result, Series):\n result = self._constructor_sliced(result, index=self.index, name=key)\n\n result._set_is_copy(self)\n return result\n else:\n return self._get_item_cache(key)\n\n def _getitem_frame(self, key):\n if key.values.dtype != np.bool_:\n raise ValueError('Must pass DataFrame with boolean values only')\n return self.where(key)\n\n def query(self, expr, **kwargs):\n \"\"\"Query the columns of a frame with a boolean expression.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n expr : string\n The query string to evaluate. You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n kwargs : dict\n See the documentation for :func:`pandas.eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n q : DataFrame\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`pandas.eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n See Also\n --------\n pandas.eval\n DataFrame.eval\n\n Examples\n --------\n >>> from numpy.random import randn\n >>> from pandas import DataFrame\n >>> df = DataFrame(randn(10, 2), columns=list('ab'))\n >>> df.query('a > b')\n >>> df[df.a > df.b] # same result as the previous expression\n \"\"\"\n kwargs['level'] = kwargs.pop('level', 0) + 1\n res = self.eval(expr, **kwargs)\n\n try:\n return self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n return self[res]\n\n def eval(self, expr, **kwargs):\n \"\"\"Evaluate an expression in the context of the calling DataFrame\n instance.\n\n Parameters\n ----------\n expr : string\n The expression string to evaluate.\n kwargs : dict\n See the documentation for :func:`~pandas.eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ret : ndarray, scalar, or pandas object\n\n See Also\n --------\n pandas.DataFrame.query\n pandas.eval\n\n Notes\n -----\n For more details see the API documentation for :func:`~pandas.eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> from numpy.random import randn\n >>> from pandas import DataFrame\n >>> df = DataFrame(randn(10, 2), columns=list('ab'))\n >>> df.eval('a + b')\n >>> df.eval('c = a + b')\n \"\"\"\n resolvers = kwargs.pop('resolvers', None)\n kwargs['level'] = kwargs.pop('level', 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n resolvers = dict(self.iteritems()), index_resolvers\n kwargs['target'] = self\n kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers\n return _eval(expr, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None):\n \"\"\"Return a subset of a DataFrame including/excluding columns based on\n their ``dtype``.\n\n Parameters\n ----------\n include, exclude : list-like\n A list of dtypes or strings to be included/excluded. You must pass\n in a non-empty sequence for at least one of these.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n TypeError\n * If either of ``include`` or ``exclude`` is not a sequence\n\n Returns\n -------\n subset : DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Notes\n -----\n * To select all *numeric* types use the numpy dtype ``numpy.number``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__\n * To select Pandas categorical dtypes, use 'category'\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 0.3962 True 1\n 1 0.1459 False 2\n 2 0.2623 True 1\n 3 0.0764 False 2\n 4 -0.9703 True 1\n 5 -1.2094 False 2\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1\n 1 2\n 2 1\n 3 2\n 4 1\n 5 2\n >>> df.select_dtypes(exclude=['floating'])\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n \"\"\"\n include, exclude = include or (), exclude or ()\n if not (com.is_list_like(include) and com.is_list_like(exclude)):\n raise TypeError('include and exclude must both be non-string'\n ' sequences')\n selection = tuple(map(frozenset, (include, exclude)))\n\n if not any(selection):\n raise ValueError('at least one of include or exclude must be '\n 'nonempty')\n\n # convert the myriad valid dtypes object to a single representation\n include, exclude = map(lambda x:\n frozenset(map(com._get_dtype_from_object, x)),\n selection)\n for dtypes in (include, exclude):\n com._invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError('include and exclude overlap on %s'\n % (include & exclude))\n\n # empty include/exclude -> defaults to True\n # three cases (we've already raised if both are empty)\n # case 1: empty include, nonempty exclude\n # we have True, True, ... True for include, same for exclude\n # in the loop below we get the excluded\n # and when we call '&' below we get only the excluded\n # case 2: nonempty include, empty exclude\n # same as case 1, but with include\n # case 3: both nonempty\n # the \"union\" of the logic of case 1 and case 2:\n # we get the included and excluded, and return their logical and\n include_these = Series(not bool(include), index=self.columns)\n exclude_these = Series(not bool(exclude), index=self.columns)\n\n def is_dtype_instance_mapper(column, dtype):\n return column, functools.partial(issubclass, dtype.type)\n\n for column, f in itertools.starmap(is_dtype_instance_mapper,\n self.dtypes.iteritems()):\n if include: # checks for the case of empty include or exclude\n include_these[column] = any(map(f, include))\n if exclude:\n exclude_these[column] = not any(map(f, exclude))\n\n dtype_indexer = include_these & exclude_these\n return self.loc[com._get_info_slice(self, dtype_indexer)]\n\n def _box_item_values(self, key, values):\n items = self.columns[self.columns.get_loc(key)]\n if values.ndim == 2:\n return self._constructor(values.T, columns=items, index=self.index)\n else:\n return self._box_col_values(values, items)\n\n def _box_col_values(self, values, items):\n \"\"\" provide boxed values for a column \"\"\"\n return self._constructor_sliced.from_array(values, index=self.index,\n name=items, fastpath=True)\n\n def __setitem__(self, key, value):\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n elif isinstance(key, DataFrame):\n self._setitem_frame(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key, value):\n self._check_setitem_copy()\n self.ix._setitem_with_indexer(key, value)\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError('Item wrong length %d instead of %d!' %\n (len(key), len(self.index)))\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.ix._setitem_with_indexer(indexer, value)\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError('Columns must be same length as key')\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n indexer = self.ix._convert_to_indexer(key, axis=1)\n self._check_setitem_copy()\n self.ix._setitem_with_indexer((slice(None), indexer), value)\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if key.values.size and not com.is_bool_dtype(key.values):\n raise TypeError('Must pass DataFrame with boolean values only')\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self.where(-key, value, inplace=True)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n ensure that if we don't have an index, that we can create one from the\n passed value\n \"\"\"\n # GH5632, make sure that we are a Series convertible\n if not len(self.index) and is_list_like(value):\n try:\n value = Series(value)\n except:\n raise ValueError('Cannot set a frame with no defined index '\n 'and a value that cannot be converted to a '\n 'Series')\n\n self._data = self._data.reindex_axis(value.index.copy(), axis=1,\n fill_value=np.nan)\n\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n\n self._ensure_valid_index(value)\n value = self._sanitize_column(key, value)\n NDFrame._set_item(self, key, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exeption to occur first\n if len(self):\n self._check_setitem_copy()\n\n def insert(self, loc, column, value, allow_duplicates=False):\n \"\"\"\n Insert column into DataFrame at specified location.\n\n If `allow_duplicates` is False, raises Exception if column\n is already contained in the DataFrame.\n\n Parameters\n ----------\n loc : int\n Must have 0 <= loc <= len(columns)\n column : object\n value : int, Series, or array-like\n \"\"\"\n self._ensure_valid_index(value)\n value = self._sanitize_column(column, value)\n self._data.insert(\n loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs):\n \"\"\"\n Assign new columns to a DataFrame, returning a new object\n (a copy) with all the original columns in addition to the new ones.\n\n .. versionadded:: 0.16.0\n\n Parameters\n ----------\n kwargs : keyword, value pairs\n keywords are the column names. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. If the values are\n not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n df : DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your\n arguments may not be preserved. The make things predicatable,\n the columns are inserted in alphabetical order, at the end of\n your DataFrame. Assigning multiple columns within the same\n ``assign`` is possible, but you cannot reference other columns\n created within the same ``assign`` call.\n\n Examples\n --------\n >>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(ln_A = lambda x: np.log(x.A))\n A B ln_A\n 0 1 0.426905 0.000000\n 1 2 -0.780949 0.693147\n 2 3 -0.418711 1.098612\n 3 4 -0.269708 1.386294\n 4 5 -0.274002 1.609438\n 5 6 -0.500792 1.791759\n 6 7 1.649697 1.945910\n 7 8 -1.495604 2.079442\n 8 9 0.549296 2.197225\n 9 10 -0.758542 2.302585\n\n Where the value already exists and is inserted:\n\n >>> newcol = np.log(df['A'])\n >>> df.assign(ln_A=newcol)\n A B ln_A\n 0 1 0.426905 0.000000\n 1 2 -0.780949 0.693147\n 2 3 -0.418711 1.098612\n 3 4 -0.269708 1.386294\n 4 5 -0.274002 1.609438\n 5 6 -0.500792 1.791759\n 6 7 1.649697 1.945910\n 7 8 -1.495604 2.079442\n 8 9 0.549296 2.197225\n 9 10 -0.758542 2.302585\n \"\"\"\n data = self.copy()\n\n # do all calculations first...\n results = {}\n for k, v in kwargs.items():\n\n if callable(v):\n results[k] = v(data)\n else:\n results[k] = v\n\n # ... and then assign\n for k, v in sorted(results.items()):\n data[k] = v\n\n return data\n\n def _sanitize_column(self, key, value):\n # Need to make sure new columns (which go into the BlockManager as new\n # blocks) are always copied\n\n def reindexer(value):\n # reindex if necessary\n\n if value.index.equals(self.index) or not len(self.index):\n value = value._values.copy()\n else:\n\n # GH 4107\n try:\n value = value.reindex(self.index).values\n except Exception as e:\n\n # duplicate axis\n if not value.index.is_unique:\n raise e\n\n # other\n raise TypeError('incompatible index of inserted column '\n 'with frame index')\n return value\n\n if isinstance(value, Series):\n value = reindexer(value)\n\n elif isinstance(value, DataFrame):\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex_axis(cols, axis=1)\n # now align rows\n value = reindexer(value).T\n\n elif isinstance(value, Categorical):\n value = value.copy()\n\n elif (isinstance(value, Index) or is_sequence(value)):\n from pandas.core.series import _sanitize_index\n\n # turn me into an ndarray\n value = _sanitize_index(value, self.index, copy=False)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = com._possibly_convert_platform(value)\n else:\n value = com._asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = _possibly_infer_to_datetimelike(value)\n\n else:\n # upcast the scalar\n dtype, value = _infer_dtype_from_scalar(value)\n value = np.repeat(value, len(self.index)).astype(dtype)\n value = com._possibly_cast_to_datetime(value, dtype)\n\n # return internal types directly\n if is_internal_type(value):\n return value\n\n # broadcast across multiple columns if necessary\n if key in self.columns and value.ndim == 1:\n if not self.columns.is_unique or isinstance(self.columns,\n MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n return np.atleast_2d(np.asarray(value))\n\n @property\n def _series(self):\n result = {}\n for idx, item in enumerate(self.columns):\n result[item] = Series(self._data.iget(idx), index=self.index,\n name=item)\n return result\n\n def lookup(self, row_labels, col_labels):\n \"\"\"Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup\n col_labels : sequence\n The column labels to use for lookup\n\n Notes\n -----\n Akin to::\n\n result = []\n for row, col in zip(row_labels, col_labels):\n result.append(df.get_value(row, col))\n\n Examples\n --------\n values : ndarray\n The found values\n\n \"\"\"\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError('Row labels must have same size as column labels')\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError('One or more row labels was not found')\n if (cidx == -1).any():\n raise KeyError('One or more column labels was not found')\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype='O')\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self.get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n #----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, tolerance, method,\n fill_value, copy):\n frame = self\n\n columns = axes['columns']\n if columns is not None:\n frame = frame._reindex_columns(columns, copy, level, fill_value,\n limit, tolerance)\n\n index = axes['index']\n if index is not None:\n frame = frame._reindex_index(index, method, copy, level,\n fill_value, limit, tolerance)\n\n return frame\n\n def _reindex_index(self, new_index, method, copy, level, fill_value=NA,\n limit=None, tolerance=None):\n new_index, indexer = self.index.reindex(new_index, method, level,\n limit=limit,\n tolerance=tolerance)\n return self._reindex_with_indexers({0: [new_index, indexer]},\n copy=copy, fill_value=fill_value,\n allow_dups=False)\n\n def _reindex_columns(self, new_columns, copy, level, fill_value=NA,\n limit=None, tolerance=None):\n new_columns, indexer = self.columns.reindex(new_columns, level=level,\n limit=limit,\n tolerance=tolerance)\n return self._reindex_with_indexers({1: [new_columns, indexer]},\n copy=copy, fill_value=fill_value,\n allow_dups=False)\n\n def _reindex_multi(self, axes, copy, fill_value):\n \"\"\" we are guaranteed non-Nones in the axes! \"\"\"\n\n new_index, row_indexer = self.index.reindex(axes['index'])\n new_columns, col_indexer = self.columns.reindex(axes['columns'])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = com.take_2d_multi(self.values, indexer,\n fill_value=fill_value)\n return self._constructor(new_values, index=new_index,\n columns=new_columns)\n else:\n return self._reindex_with_indexers({0: [new_index, row_indexer],\n 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value)\n\n @Appender(_shared_docs['align'] % _shared_doc_kwargs)\n def align(self, other, join='outer', axis=None, level=None, copy=True,\n fill_value=None, method=None, limit=None, fill_axis=0,\n broadcast_axis=None):\n return super(DataFrame, self).align(other, join=join, axis=axis, level=level, copy=copy,\n fill_value=fill_value, method=method, limit=limit,\n fill_axis=fill_axis, broadcast_axis=broadcast_axis)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, index=None, columns=None, **kwargs):\n return super(DataFrame, self).reindex(index=index, columns=columns,\n **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(DataFrame, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, index=None, columns=None, **kwargs):\n return super(DataFrame, self).rename(index=index, columns=columns,\n **kwargs)\n\n @Appender(_shared_docs['fillna'] % _shared_doc_kwargs)\n def fillna(self, value=None, method=None, axis=None, inplace=False,\n limit=None, downcast=None, **kwargs):\n return super(DataFrame, self).fillna(value=value, method=method,\n axis=axis, inplace=inplace,\n limit=limit, downcast=downcast,\n **kwargs)\n\n @Appender(_shared_docs['shift'] % _shared_doc_kwargs)\n def shift(self, periods=1, freq=None, axis=0):\n return super(DataFrame, self).shift(periods=periods, freq=freq,\n axis=axis)\n\n def set_index(self, keys, drop=True, append=False, inplace=False,\n verify_integrity=False):\n \"\"\"\n Set the DataFrame index (row labels) using one or more existing\n columns. By default yields a new object.\n\n Parameters\n ----------\n keys : column label or list of column labels / arrays\n drop : boolean, default True\n Delete columns to be used as the new index\n append : boolean, default False\n Whether to append columns to existing index\n inplace : boolean, default False\n Modify the DataFrame in place (do not create a new object)\n verify_integrity : boolean, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method\n\n Examples\n --------\n >>> indexed_df = df.set_index(['A', 'B'])\n >>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])\n >>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])\n\n Returns\n -------\n dataframe : DataFrame\n \"\"\"\n if not isinstance(keys, list):\n keys = [keys]\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names = []\n if append:\n names = [x for x in self.index.names]\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index.get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove = []\n for col in keys:\n if isinstance(col, MultiIndex):\n # append all but the last column so we don't have to modify\n # the end of this loop\n for n in range(col.nlevels - 1):\n arrays.append(col.get_level_values(n))\n\n level = col.get_level_values(col.nlevels - 1)\n names.extend(col.names)\n elif isinstance(col, Series):\n level = col.values\n names.append(col.name)\n elif isinstance(col, Index):\n level = col\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray, Index)):\n level = col\n names.append(None)\n else:\n level = frame[col]._values\n names.append(col)\n if drop:\n to_remove.append(col)\n arrays.append(level)\n\n index = MultiIndex.from_arrays(arrays, names=names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index.get_duplicates()\n raise ValueError('Index has duplicate keys: %s' % duplicates)\n\n for c in to_remove:\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n def reset_index(self, level=None, drop=False, inplace=False, col_level=0,\n col_fill=''):\n \"\"\"\n For DataFrame with multi-level index, return new DataFrame with\n labeling information in the columns under the index names, defaulting\n to 'level_0', 'level_1', etc. if any are None. For a standard index,\n the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default\n drop : boolean, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : boolean, default False\n Modify the DataFrame in place (do not create a new object)\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n resetted : DataFrame\n \"\"\"\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n def _maybe_casted_values(index, labels=None):\n if isinstance(index, PeriodIndex):\n values = index.asobject.values\n elif (isinstance(index, DatetimeIndex) and\n index.tz is not None):\n values = index\n else:\n values = index.values\n if values.dtype == np.object_:\n values = lib.maybe_convert_objects(values)\n\n # if we have the labels, extract the values with a mask\n if labels is not None:\n mask = labels == -1\n values = values.take(labels)\n if mask.any():\n values, changed = com._maybe_upcast_putmask(values,\n mask, np.nan)\n return values\n\n new_index = np.arange(len(new_obj),dtype='int64')\n if isinstance(self.index, MultiIndex):\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < len(self.index.levels):\n new_index = self.index.droplevel(level)\n\n if not drop:\n names = self.index.names\n zipped = lzip(self.index.levels, self.index.labels)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(zipped))):\n col_name = names[i]\n if col_name is None:\n col_name = 'level_%d' % i\n\n if multi_col:\n if col_fill is None:\n col_name = tuple([col_name] *\n self.columns.nlevels)\n else:\n name_lst = [col_fill] * self.columns.nlevels\n lev_num = self.columns._get_level_number(col_level)\n name_lst[lev_num] = col_name\n col_name = tuple(name_lst)\n\n # to ndarray and maybe infer different dtype\n level_values = _maybe_casted_values(lev, lab)\n if level is None or i in level:\n new_obj.insert(0, col_name, level_values)\n\n elif not drop:\n name = self.index.name\n if name is None or name == 'index':\n name = 'index' if 'index' not in self else 'level_0'\n if isinstance(self.columns, MultiIndex):\n if col_fill is None:\n name = tuple([name] * self.columns.nlevels)\n else:\n name_lst = [col_fill] * self.columns.nlevels\n lev_num = self.columns._get_level_number(col_level)\n name_lst[lev_num] = name\n name = tuple(name_lst)\n values = _maybe_casted_values(self.index)\n new_obj.insert(0, name, values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n\n #----------------------------------------------------------------------\n # Reindex-based selection methods\n\n def dropna(self, axis=0, how='any', thresh=None, subset=None,\n inplace=False):\n \"\"\"\n Return object with labels on given axis omitted where alternately any\n or all of the data are missing\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof\n Pass tuple or list to drop on multiple axes\n how : {'any', 'all'}\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n int value : require that many non-NA values\n subset : array-like\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include\n inplace : boolean, defalt False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : DataFrame\n \"\"\"\n if isinstance(axis, (tuple, list)):\n result = self\n for ax in axis:\n result = result.dropna(how=how, thresh=thresh,\n subset=subset, axis=ax)\n else:\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check,subset)))\n agg_obj = self.take(indices,axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == 'any':\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == 'all':\n mask = count > 0\n else:\n if how is not None:\n raise ValueError('invalid how option: %s' % how)\n else:\n raise TypeError('must specify how or thresh')\n\n result = self.take(mask.nonzero()[0], axis=axis, convert=False)\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})\n @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3)\n def drop_duplicates(self, subset=None, keep='first', inplace=False):\n \"\"\"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n take_last : deprecated\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy\n cols : kwargs only argument of subset [deprecated]\n\n Returns\n -------\n deduplicated : DataFrame\n \"\"\"\n duplicated = self.duplicated(subset, keep=keep)\n\n if inplace:\n inds, = (-duplicated).nonzero()\n new_data = self._data.take(inds)\n self._update_inplace(new_data)\n else:\n return self[-duplicated]\n\n @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})\n @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3)\n def duplicated(self, subset=None, keep='first'):\n \"\"\"\n Return boolean Series denoting duplicate rows, optionally only\n considering certain columns\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Mark duplicates as ``True`` except for the\n first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the\n last occurrence.\n - False : Mark all duplicates as ``True``.\n take_last : deprecated\n cols : kwargs only argument of subset [deprecated]\n\n Returns\n -------\n duplicated : Series\n \"\"\"\n from pandas.core.groupby import get_group_index\n from pandas.core.algorithms import factorize\n from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT\n\n def f(vals):\n\n # if we have integers we can directly index with these\n if com.is_integer_dtype(vals):\n from pandas.core.nanops import unique1d\n labels, shape = vals, unique1d(vals)\n else:\n labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))\n return labels.astype('i8',copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif not np.iterable(subset) or \\\n isinstance(subset, compat.string_types) or \\\n isinstance(subset, tuple) and subset in self.columns:\n subset = subset,\n\n vals = (self[col].values for col in subset)\n labels, shape = map(list, zip( * map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n return Series(duplicated_int64(ids, keep), index=self.index)\n\n #----------------------------------------------------------------------\n # Sorting\n\n @Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)\n def sort_values(self, by, axis=0, ascending=True, inplace=False,\n kind='quicksort', na_position='last'):\n\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n\n if axis != 0:\n raise ValueError('When sorting by column, axis must be 0 '\n '(rows)')\n if not isinstance(by, list):\n by = [by]\n if com.is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError('Length of ascending (%d) != length of by'\n ' (%d)' % (len(ascending), len(by)))\n if len(by) > 1:\n from pandas.core.groupby import _lexsort_indexer\n\n def trans(v):\n if com.needs_i8_conversion(v):\n return v.view('i8')\n return v\n keys = []\n for x in by:\n k = self[x].values\n if k.ndim == 2:\n raise ValueError('Cannot sort by duplicate column %s' % str(x))\n keys.append(trans(k))\n indexer = _lexsort_indexer(keys, orders=ascending,\n na_position=na_position)\n indexer = com._ensure_platform_int(indexer)\n else:\n from pandas.core.groupby import _nargsort\n\n by = by[0]\n k = self[by].values\n if k.ndim == 2:\n\n # try to be helpful\n if isinstance(self.columns, MultiIndex):\n raise ValueError('Cannot sort by column %s in a multi-index'\n ' you need to explicity provide all the levels'\n % str(by))\n\n raise ValueError('Cannot sort by duplicate column %s'\n % str(by))\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = _nargsort(k, kind=kind, ascending=ascending,\n na_position=na_position)\n\n new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis),\n convert=False, verify=False)\n\n if inplace:\n return self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def sort(self, columns=None, axis=0, ascending=True,\n inplace=False, kind='quicksort', na_position='last'):\n \"\"\"\n DEPRECATED: use :meth:`DataFrame.sort_values`\n\n Sort DataFrame either by labels (along either axis) or by the values in\n column(s)\n\n Parameters\n ----------\n columns : object\n Column name(s) in frame. Accepts a column name or a list\n for a nested sort. A tuple will be interpreted as the\n levels of a multi-index.\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Sort index/rows versus columns\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n This option is only applied when sorting on a single column or label.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Examples\n --------\n >>> result = df.sort(['A', 'B'], ascending=[1, 0])\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n\n if columns is None:\n warnings.warn(\"sort(....) is deprecated, use sort_index(.....)\",\n FutureWarning, stacklevel=2)\n return self.sort_index(axis=axis, ascending=ascending, inplace=inplace)\n\n warnings.warn(\"sort(columns=....) is deprecated, use sort_values(by=.....)\",\n FutureWarning, stacklevel=2)\n return self.sort_values(by=columns, axis=axis, ascending=ascending,\n inplace=inplace, kind=kind, na_position=na_position)\n\n @Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)\n def sort_index(self, axis=0, level=None, ascending=True, inplace=False,\n kind='quicksort', na_position='last', sort_remaining=True, by=None):\n\n # 10726\n if by is not None:\n warnings.warn(\"by argument to sort_index is deprecated, pls use .sort_values(by=...)\",\n FutureWarning, stacklevel=2)\n if level is not None:\n raise ValueError(\"unable to simultaneously sort by and level\")\n return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)\n\n\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n\n # sort by the index\n if level is not None:\n\n new_axis, indexer = labels.sortlevel(level, ascending=ascending,\n sort_remaining=sort_remaining)\n\n elif isinstance(labels, MultiIndex):\n from pandas.core.groupby import _lexsort_indexer\n\n # make sure that the axis is lexsorted to start\n # if not we need to reconstruct to get the correct indexer\n if not labels.is_lexsorted():\n labels = MultiIndex.from_tuples(labels.values)\n\n indexer = _lexsort_indexer(labels.labels, orders=ascending,\n na_position=na_position)\n else:\n from pandas.core.groupby import _nargsort\n\n indexer = _nargsort(labels, kind=kind, ascending=ascending,\n na_position=na_position)\n\n new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis),\n convert=False, verify=False)\n\n if inplace:\n return self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def sortlevel(self, level=0, axis=0, ascending=True,\n inplace=False, sort_remaining=True):\n \"\"\"\n Sort multilevel index by chosen axis and primary level. Data will be\n lexicographically sorted by the chosen level followed by the other\n levels (in order)\n\n Parameters\n ----------\n level : int\n axis : {0 or 'index', 1 or 'columns'}, default 0\n ascending : boolean, default True\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n sort_remaining : boolean, default True\n Sort by the other levels too.\n\n Returns\n -------\n sorted : DataFrame\n\n See Also\n --------\n DataFrame.sort_index(level=...)\n\n \"\"\"\n return self.sort_index(level=level, axis=axis, ascending=ascending,\n inplace=inplace, sort_remaining=sort_remaining)\n\n\n def _nsorted(self, columns, n, method, keep):\n if not com.is_list_like(columns):\n columns = [columns]\n columns = list(columns)\n ser = getattr(self[columns[0]], method)(n, keep=keep)\n ascending = dict(nlargest=False, nsmallest=True)[method]\n return self.loc[ser.index].sort_values(columns, ascending=ascending,\n kind='mergesort')\n\n def nlargest(self, n, columns, keep='first'):\n \"\"\"Get the rows of a DataFrame sorted by the `n` largest\n values of `columns`.\n\n .. versionadded:: 0.17.0\n\n Parameters\n ----------\n n : int\n Number of items to retrieve\n columns : list or str\n Column name or names to order by\n keep : {'first', 'last', False}, default 'first'\n Where there are duplicate values:\n - ``first`` : take the first occurrence.\n - ``last`` : take the last occurrence.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> df = DataFrame({'a': [1, 10, 8, 11, -1],\n ... 'b': list('abdce'),\n ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})\n >>> df.nlargest(3, 'a')\n a b c\n 3 11 c 3\n 1 10 b 2\n 2 8 d NaN\n \"\"\"\n return self._nsorted(columns, n, 'nlargest', keep)\n\n def nsmallest(self, n, columns, keep='first'):\n \"\"\"Get the rows of a DataFrame sorted by the `n` smallest\n values of `columns`.\n\n .. versionadded:: 0.17.0\n\n Parameters\n ----------\n n : int\n Number of items to retrieve\n columns : list or str\n Column name or names to order by\n keep : {'first', 'last', False}, default 'first'\n Where there are duplicate values:\n - ``first`` : take the first occurrence.\n - ``last`` : take the last occurrence.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> df = DataFrame({'a': [1, 10, 8, 11, -1],\n ... 'b': list('abdce'),\n ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})\n >>> df.nsmallest(3, 'a')\n a b c\n 4 -1 e 4\n 0 1 a 1\n 2 8 d NaN\n \"\"\"\n return self._nsorted(columns, n, 'nsmallest', keep)\n\n def swaplevel(self, i, j, axis=0):\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : type of caller (new object)\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n result.index = result.index.swaplevel(i, j)\n else:\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order, axis=0):\n \"\"\"\n Rearrange index levels using input order.\n May not drop or duplicate levels\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : int\n Where to reorder levels.\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis),\n MultiIndex): # pragma: no cover\n raise TypeError('Can only reorder levels on a hierarchical axis.')\n\n result = self.copy()\n\n if axis == 0:\n result.index = result.index.reorder_levels(order)\n else:\n result.columns = result.columns.reorder_levels(order)\n return result\n\n #----------------------------------------------------------------------\n # Arithmetic / combination related\n\n def _combine_frame(self, other, func, fill_value=None, level=None):\n this, other = self.align(other, join='outer', level=level, copy=False)\n new_index, new_columns = this.index, this.columns\n\n def _arith_op(left, right):\n if fill_value is not None:\n left_mask = isnull(left)\n right_mask = isnull(right)\n left = left.copy()\n right = right.copy()\n\n # one but not both\n mask = left_mask ^ right_mask\n left[left_mask & mask] = fill_value\n right[right_mask & mask] = fill_value\n\n return func(left, right)\n\n if this._is_mixed_type or other._is_mixed_type:\n\n # unique\n if this.columns.is_unique:\n\n def f(col):\n r = _arith_op(this[col].values, other[col].values)\n return self._constructor_sliced(r, index=new_index,\n dtype=r.dtype)\n\n result = dict([(col, f(col)) for col in this])\n\n # non-unique\n else:\n\n def f(i):\n r = _arith_op(this.iloc[:, i].values,\n other.iloc[:, i].values)\n return self._constructor_sliced(r, index=new_index,\n dtype=r.dtype)\n\n result = dict([\n (i, f(i)) for i, col in enumerate(this.columns)\n ])\n result = self._constructor(result, index=new_index, copy=False)\n result.columns = new_columns\n return result\n\n else:\n result = _arith_op(this.values, other.values)\n\n return self._constructor(result, index=new_index,\n columns=new_columns, copy=False)\n\n def _combine_series(self, other, func, fill_value=None, axis=None,\n level=None):\n if axis is not None:\n axis = self._get_axis_name(axis)\n if axis == 'index':\n return self._combine_match_index(other, func, level=level, fill_value=fill_value)\n else:\n return self._combine_match_columns(other, func, level=level, fill_value=fill_value)\n return self._combine_series_infer(other, func, level=level, fill_value=fill_value)\n\n def _combine_series_infer(self, other, func, level=None, fill_value=None):\n if len(other) == 0:\n return self * NA\n\n if len(self) == 0:\n # Ambiguous case, use _series so works with DataFrame\n return self._constructor(data=self._series, index=self.index,\n columns=self.columns)\n\n return self._combine_match_columns(other, func, level=level, fill_value=fill_value)\n\n def _combine_match_index(self, other, func, level=None, fill_value=None):\n left, right = self.align(other, join='outer', axis=0, level=level, copy=False)\n if fill_value is not None:\n raise NotImplementedError(\"fill_value %r not supported.\" %\n fill_value)\n return self._constructor(func(left.values.T, right.values).T,\n index=left.index,\n columns=self.columns, copy=False)\n\n def _combine_match_columns(self, other, func, level=None, fill_value=None):\n left, right = self.align(other, join='outer', axis=1, level=level, copy=False)\n if fill_value is not None:\n raise NotImplementedError(\"fill_value %r not supported\" %\n fill_value)\n\n new_data = left._data.eval(\n func=func, other=right, axes=[left.columns, self.index])\n return self._constructor(new_data)\n\n def _combine_const(self, other, func, raise_on_error=True):\n if self.empty:\n return self\n\n new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error)\n return self._constructor(new_data)\n\n def _compare_frame_evaluate(self, other, func, str_rep):\n\n # unique\n if self.columns.is_unique:\n def _compare(a, b):\n return dict([(col, func(a[col], b[col])) for col in a.columns])\n new_data = expressions.evaluate(_compare, str_rep, self, other)\n return self._constructor(data=new_data, index=self.index,\n columns=self.columns, copy=False)\n # non-unique\n else:\n def _compare(a, b):\n return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))\n for i, col in enumerate(a.columns)])\n new_data = expressions.evaluate(_compare, str_rep, self, other)\n result = self._constructor(data=new_data, index=self.index,\n copy=False)\n result.columns = self.columns\n return result\n\n def _compare_frame(self, other, func, str_rep):\n if not self._indexed_same(other):\n raise ValueError('Can only compare identically-labeled '\n 'DataFrame objects')\n return self._compare_frame_evaluate(other, func, str_rep)\n\n def _flex_compare_frame(self, other, func, str_rep, level):\n if not self._indexed_same(other):\n self, other = self.align(other, 'outer', level=level, copy=False)\n return self._compare_frame_evaluate(other, func, str_rep)\n\n def combine(self, other, func, fill_value=None, overwrite=True):\n \"\"\"\n Add two DataFrame objects and do not propagate NaN values, so if for a\n (column, time) one frame is missing a value, it will default to the\n other frame's value (which might be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n func : function\n fill_value : scalar value\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling frame\n\n Returns\n -------\n result : DataFrame\n \"\"\"\n\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isnull(series)\n other_mask = isnull(otherSeries)\n\n # don't overwrite columns unecessarily\n # DO propogate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n # if we have different dtypes, possibily promote\n new_dtype = this_dtype\n if this_dtype != other_dtype:\n new_dtype = com._lcd_dtypes(this_dtype, other_dtype)\n series = series.astype(new_dtype)\n otherSeries = otherSeries.astype(new_dtype)\n\n # see if we need to be represented as i8 (datetimelike)\n # try to keep us at this dtype\n needs_i8_conversion = com.needs_i8_conversion(new_dtype)\n if needs_i8_conversion:\n this_dtype = new_dtype\n arr = func(series, otherSeries, True)\n else:\n arr = func(series, otherSeries)\n\n if do_fill:\n arr = com.ensure_float(arr)\n arr[this_mask & other_mask] = NA\n\n # try to downcast back to the original dtype\n if needs_i8_conversion:\n arr = com._possibly_cast_to_datetime(arr, this_dtype)\n else:\n arr = com._possibly_downcast_to_dtype(arr, this_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result,\n index=new_index,\n columns=new_columns)._convert(datetime=True,\n copy=False)\n\n def combine_first(self, other):\n \"\"\"\n Combine two DataFrame objects and default to non-null values in frame\n calling the method. Result index columns will be the union of the\n respective indexes and columns\n\n Parameters\n ----------\n other : DataFrame\n\n Examples\n --------\n a's values prioritized, use values from b to fill holes:\n\n >>> a.combine_first(b)\n\n\n Returns\n -------\n combined : DataFrame\n \"\"\"\n def combiner(x, y, needs_i8_conversion=False):\n x_values = x.values if hasattr(x, 'values') else x\n y_values = y.values if hasattr(y, 'values') else y\n if needs_i8_conversion:\n mask = isnull(x)\n x_values = x_values.view('i8')\n y_values = y_values.view('i8')\n else:\n mask = isnull(x_values)\n\n return expressions.where(mask, y_values, x_values,\n raise_on_error=True)\n\n return self.combine(other, combiner, overwrite=False)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify DataFrame in place using non-NA values from passed\n DataFrame. Aligns on indices\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n join : {'left'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling frame\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : boolean\n If True, will raise an error if the DataFrame and other both\n contain data in the same place.\n \"\"\"\n # TODO: Support other joins\n if join != 'left': # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col].values\n that = other[col].values\n if filter_func is not None:\n mask = ~filter_func(this) | isnull(that)\n else:\n if raise_conflict:\n mask_this = notnull(that)\n mask_that = notnull(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isnull(that)\n\n # don't overwrite columns unecessarily\n if mask.all():\n continue\n else:\n mask = notnull(this)\n\n self[col] = expressions.where(\n mask, this, that, raise_on_error=True)\n\n #----------------------------------------------------------------------\n # Misc methods\n\n def first_valid_index(self):\n \"\"\"\n Return label for first non-NA/null value\n \"\"\"\n return self.index[self.count(1) > 0][0]\n\n def last_valid_index(self):\n \"\"\"\n Return label for last non-NA/null value\n \"\"\"\n return self.index[self.count(1) > 0][-1]\n\n #----------------------------------------------------------------------\n # Data reshaping\n\n def pivot(self, index=None, columns=None, values=None):\n \"\"\"\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from index / columns to form axes and return either\n DataFrame or Panel, depending on whether you request a single value\n column (DataFrame) or all columns (Panel)\n\n Parameters\n ----------\n index : string or object, optional\n Column name to use to make new frame's index. If None, uses\n existing index.\n columns : string or object\n Column name to use to make new frame's columns\n values : string or object, optional\n Column name to use for populating new frame's values\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods\n\n Examples\n --------\n >>> df\n foo bar baz\n 0 one A 1.\n 1 one B 2.\n 2 one C 3.\n 3 two A 4.\n 4 two B 5.\n 5 two C 6.\n\n >>> df.pivot('foo', 'bar', 'baz')\n A B C\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot('foo', 'bar')['baz']\n A B C\n one 1 2 3\n two 4 5 6\n\n Returns\n -------\n pivoted : DataFrame\n If no values column specified, will have hierarchically indexed\n columns\n \"\"\"\n from pandas.core.reshape import pivot\n return pivot(self, index=index, columns=columns, values=values)\n\n def stack(self, level=-1, dropna=True):\n \"\"\"\n Pivot a level of the (possibly hierarchical) column labels, returning a\n DataFrame (or Series in the case of an object with a single level of\n column labels) having a hierarchical index with a new inner-most level\n of row labels.\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default last level\n Level(s) to stack, can pass level name\n dropna : boolean, default True\n Whether to drop rows in the resulting Frame/Series with no valid\n values\n\n Examples\n ----------\n >>> s\n a b\n one 1. 2.\n two 3. 4.\n\n >>> s.stack()\n one a 1\n b 2\n two a 3\n b 4\n\n Returns\n -------\n stacked : DataFrame or Series\n \"\"\"\n from pandas.core.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n return stack_multiple(self, level, dropna=dropna)\n else:\n return stack(self, level, dropna=dropna)\n\n def unstack(self, level=-1):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels, returning\n a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels. If the index is not a MultiIndex,\n the output will be a Series (the analogue of stack when the columns are\n not a MultiIndex).\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name\n\n See also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.\n b 3.\n two a 2.\n b 4.\n\n Returns\n -------\n unstacked : DataFrame or Series\n \"\"\"\n from pandas.core.reshape import unstack\n return unstack(self, level)\n\n #----------------------------------------------------------------------\n # Time series-related\n\n def diff(self, periods=1, axis=0):\n \"\"\"\n 1st discrete difference of object\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming difference\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Take difference over rows (0) or columns (1).\n\n .. versionadded: 0.16.1\n\n Returns\n -------\n diffed : DataFrame\n \"\"\"\n bm_axis = self._get_block_manager_axis(axis)\n new_data = self._data.diff(n=periods, axis=bm_axis)\n return self._constructor(new_data)\n\n #----------------------------------------------------------------------\n # Function application\n\n def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,\n args=(), **kwds):\n \"\"\"\n Applies function along input axis of DataFrame.\n\n Objects passed to functions are Series objects having index\n either the DataFrame's index (axis=0) or the columns (axis=1).\n Return type depends on whether passed function aggregates, or the\n reduce argument if the DataFrame is empty.\n\n Parameters\n ----------\n func : function\n Function to apply to each column/row\n axis : {0 or 'index', 1 or 'columns'}, default 0\n * 0 or 'index': apply function to each column\n * 1 or 'columns': apply function to each row\n broadcast : boolean, default False\n For aggregation functions, return object of same size with values\n propagated\n reduce : boolean or None, default None\n Try to apply reduction procedures. If the DataFrame is empty,\n apply will use reduce to determine whether the result should be a\n Series or a DataFrame. If reduce is None (the default), apply's\n return value will be guessed by calling func an empty Series (note:\n while guessing, exceptions raised by func will be ignored). If\n reduce is True a Series will always be returned, and if False a\n DataFrame will always be returned.\n raw : boolean, default False\n If False, convert each row or column into a Series. If raw=True the\n passed function will receive ndarray objects instead. If you are\n just applying a NumPy reduction function this will achieve much\n better performance\n args : tuple\n Positional arguments to pass to function in addition to the\n array/series\n Additional keyword arguments will be passed as keywords to the function\n\n Notes\n -----\n In the current implementation apply calls func twice on the\n first column/row to decide whether it can take a fast or slow\n code path. This can lead to unexpected behavior if func has\n side-effects, as they will take effect twice for the first\n column/row.\n\n Examples\n --------\n >>> df.apply(numpy.sqrt) # returns DataFrame\n >>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)\n >>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)\n\n See also\n --------\n DataFrame.applymap: For elementwise operations\n\n Returns\n -------\n applied : Series or DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if kwds or args and not isinstance(func, np.ufunc):\n f = lambda x: func(x, *args, **kwds)\n else:\n f = func\n\n if len(self.columns) == 0 and len(self.index) == 0:\n return self._apply_empty_result(func, axis, reduce, *args, **kwds)\n\n if isinstance(f, np.ufunc):\n results = f(self.values)\n return self._constructor(data=results, index=self.index,\n columns=self.columns, copy=False)\n else:\n if not broadcast:\n if not all(self.shape):\n return self._apply_empty_result(func, axis, reduce, *args,\n **kwds)\n\n if raw and not self._is_mixed_type:\n return self._apply_raw(f, axis)\n else:\n if reduce is None:\n reduce = True\n return self._apply_standard(f, axis, reduce=reduce)\n else:\n return self._apply_broadcast(f, axis)\n\n def _apply_empty_result(self, func, axis, reduce, *args, **kwds):\n if reduce is None:\n reduce = False\n try:\n reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),\n Series)\n except Exception:\n pass\n\n if reduce:\n return Series(NA, index=self._get_agg_axis(axis))\n else:\n return self.copy()\n\n def _apply_raw(self, func, axis):\n try:\n result = lib.reduce(self.values, func, axis=axis)\n except Exception:\n result = np.apply_along_axis(func, axis, self.values)\n\n # TODO: mixed type case\n if result.ndim == 2:\n return DataFrame(result, index=self.index,\n columns=self.columns)\n else:\n return Series(result, index=self._get_agg_axis(axis))\n\n def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):\n\n # skip if we are mixed datelike and trying reduce across axes\n # GH6125\n if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type:\n reduce=False\n\n # try to reduce first (by default)\n # this only matters if the reduction in values is of different dtype\n # e.g. if we want to apply to a SparseFrame, then can't directly reduce\n if reduce:\n\n values = self.values\n\n # Create a dummy Series from an empty array\n index = self._get_axis(axis)\n empty_arr = np.empty(len(index), dtype=values.dtype)\n dummy = Series(empty_arr, index=self._get_axis(axis),\n dtype=values.dtype)\n\n try:\n labels = self._get_agg_axis(axis)\n result = lib.reduce(values, func, axis=axis, dummy=dummy,\n labels=labels)\n return Series(result, index=labels)\n except Exception:\n pass\n\n dtype = object if self._is_mixed_type else None\n if axis == 0:\n series_gen = (self._ixs(i,axis=1) for i in range(len(self.columns)))\n res_index = self.columns\n res_columns = self.index\n elif axis == 1:\n res_index = self.index\n res_columns = self.columns\n values = self.values\n series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype)\n for i, (arr, name) in\n enumerate(zip(values, res_index)))\n else: # pragma : no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))\n\n i = None\n keys = []\n results = {}\n if ignore_failures:\n successes = []\n for i, v in enumerate(series_gen):\n try:\n results[i] = func(v)\n keys.append(v.name)\n successes.append(i)\n except Exception:\n pass\n # so will work with MultiIndex\n if len(successes) < len(res_index):\n res_index = res_index.take(successes)\n else:\n try:\n for i, v in enumerate(series_gen):\n results[i] = func(v)\n keys.append(v.name)\n except Exception as e:\n if hasattr(e, 'args'):\n # make sure i is defined\n if i is not None:\n k = res_index[i]\n e.args = e.args + ('occurred at index %s' %\n com.pprint_thing(k),)\n raise\n\n if len(results) > 0 and is_sequence(results[0]):\n if not isinstance(results[0], Series):\n index = res_columns\n else:\n index = None\n\n result = self._constructor(data=results, index=index)\n result.columns = res_index\n\n if axis == 1:\n result = result.T\n result = result._convert(datetime=True, timedelta=True, copy=False)\n\n else:\n\n result = Series(results)\n result.index = res_index\n\n return result\n\n def _apply_broadcast(self, func, axis):\n if axis == 0:\n target = self\n elif axis == 1:\n target = self.T\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % axis)\n\n result_values = np.empty_like(target.values)\n columns = target.columns\n for i, col in enumerate(columns):\n result_values[:, i] = func(target[col])\n\n result = self._constructor(result_values, index=target.index,\n columns=target.columns)\n\n if axis == 1:\n result = result.T\n\n return result\n\n def applymap(self, func):\n \"\"\"\n Apply a function to a DataFrame that is intended to operate\n elementwise, i.e. like doing map(func, series) for each series in the\n DataFrame\n\n Parameters\n ----------\n func : function\n Python function, returns a single value from a single value\n\n Returns\n -------\n applied : DataFrame\n\n See also\n --------\n DataFrame.apply : For operations on rows/columns\n\n \"\"\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if com.needs_i8_conversion(x):\n f = com.i8_boxer(x)\n x = lib.map_infer(_values_from_object(x), f)\n return lib.map_infer(_values_from_object(x), func)\n return self.apply(infer)\n\n #----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(self, other, ignore_index=False, verify_integrity=False):\n \"\"\"\n Append rows of `other` to the end of this frame, returning a new\n object. Columns not in this frame are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : boolean, default False\n If True, do not use the index labels.\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n Returns\n -------\n appended : DataFrame\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in the\n DataFrame's index, the order of the columns in the resulting DataFrame\n will be unchanged.\n\n See also\n --------\n pandas.concat : General function to concatenate DataFrame, Series\n or Panel objects\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError('Can only append a Series if ignore_index=True'\n ' or if the Series has a name')\n\n index = None if other.name is None else [other.name]\n combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist()\n other = other.reindex(combined_columns, copy=False)\n other = DataFrame(other.values.reshape((1, len(other))),\n index=index,\n columns=combined_columns)\n other = other._convert(datetime=True, timedelta=True)\n\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list) and not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.ix[:, self.columns]\n\n from pandas.tools.merge import concat\n if isinstance(other, (list, tuple)):\n to_concat = [self] + other\n else:\n to_concat = [self, other]\n return concat(to_concat, ignore_index=ignore_index,\n verify_integrity=verify_integrity)\n\n def join(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n \"\"\"\n Join columns with other DataFrame either on index or on a key\n column. Efficiently Join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series with name field set, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame\n on : column name, tuple/list of column names, or array-like\n Column(s) to use for joining, otherwise join on index. If multiples\n columns given, the passed DataFrame must have a MultiIndex. Can\n pass an array as the join key if not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n sort : boolean, default False\n Order result DataFrame lexicographically by the join key. If False,\n preserves the index order of the calling (left) DataFrame\n\n Notes\n -----\n on, lsuffix, and rsuffix options are not supported when passing a list\n of DataFrame objects\n\n Returns\n -------\n joined : DataFrame\n \"\"\"\n # For SparseDataFrame's benefit\n return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,\n rsuffix=rsuffix, sort=sort)\n\n def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n from pandas.tools.merge import merge, concat\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError('Other Series must have a name')\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n return merge(self, other, left_on=on, how=how,\n left_index=on is None, right_index=True,\n suffixes=(lsuffix, rsuffix), sort=sort)\n else:\n if on is not None:\n raise ValueError('Joining multiple DataFrames only supported'\n ' for joining on index')\n\n # join indexes only using concat\n if how == 'left':\n how = 'outer'\n join_axes = [self.index]\n else:\n join_axes = None\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n if can_concat:\n return concat(frames, axis=1, join=how, join_axes=join_axes,\n verify_integrity=True)\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(joined, frame, how=how,\n left_index=True, right_index=True)\n\n return joined\n\n @Substitution('')\n @Appender(_merge_doc, indents=2)\n def merge(self, right, how='inner', on=None, left_on=None, right_on=None,\n left_index=False, right_index=False, sort=False,\n suffixes=('_x', '_y'), copy=True, indicator=False):\n from pandas.tools.merge import merge\n return merge(self, right, how=how, on=on,\n left_on=left_on, right_on=right_on,\n left_index=left_index, right_index=right_index, sort=sort,\n suffixes=suffixes, copy=copy, indicator=indicator)\n\n def round(self, decimals=0, out=None):\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n .. versionadded:: 0.17.0\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.random([3, 3]),\n ... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])\n >>> df\n A B C\n first 0.028208 0.992815 0.173891\n second 0.038683 0.645646 0.577595\n third 0.877076 0.149370 0.491027\n >>> df.round(2)\n A B C\n first 0.03 0.99 0.17\n second 0.04 0.65 0.58\n third 0.88 0.15 0.49\n >>> df.round({'A': 1, 'C': 2})\n A B C\n first 0.0 0.992815 0.17\n second 0.0 0.645646 0.58\n third 0.9 0.149370 0.49\n >>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])\n >>> df.round(decimals)\n A B C\n first 0.0 1 0.17\n second 0.0 1 0.58\n third 0.9 0 0.49\n\n Returns\n -------\n DataFrame object\n \"\"\"\n from pandas.tools.merge import concat\n\n def _dict_round(df, decimals):\n for col in df:\n try:\n yield np.round(df[col], decimals[col])\n except KeyError:\n yield df[col]\n\n if isinstance(decimals, (dict, Series)):\n new_cols = [col for col in _dict_round(self, decimals)]\n elif com.is_integer(decimals):\n # Dispatch to numpy.round\n new_cols = [np.round(self[col], decimals) for col in self]\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n if len(new_cols) > 0:\n return concat(new_cols, axis=1)\n else:\n return self\n\n #----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method='pearson', min_periods=1):\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'}\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for pearson\n and spearman correlation\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n\n if method == 'pearson':\n correl = _algos.nancorr(com._ensure_float64(mat),\n minp=min_periods)\n elif method == 'spearman':\n correl = _algos.nancorr_spearman(com._ensure_float64(mat),\n minp=min_periods)\n else:\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = NA\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n\n return self._constructor(correl, index=cols, columns=cols)\n\n def cov(self, min_periods=None):\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n y : DataFrame\n\n Notes\n -----\n `y` contains the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-1 (unbiased estimator).\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n\n if notnull(mat).all():\n if min_periods is not None and min_periods > len(mat):\n baseCov = np.empty((mat.shape[1], mat.shape[1]))\n baseCov.fill(np.nan)\n else:\n baseCov = np.cov(mat.T)\n baseCov = baseCov.reshape((len(cols), len(cols)))\n else:\n baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True,\n minp=min_periods)\n\n return self._constructor(baseCov, index=cols, columns=cols)\n\n def corrwith(self, other, axis=0, drop=False):\n \"\"\"\n Compute pairwise correlation between rows or columns of two DataFrame\n objects.\n\n Parameters\n ----------\n other : DataFrame\n axis : {0 or 'index', 1 or 'columns'}, default 0\n 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise\n drop : boolean, default False\n Drop missing indices from result, default returns union of all\n\n Returns\n -------\n correls : Series\n \"\"\"\n axis = self._get_axis_number(axis)\n if isinstance(other, Series):\n return self.apply(other.corr, axis=axis)\n\n this = self._get_numeric_data()\n other = other._get_numeric_data()\n\n left, right = this.align(other, join='inner', copy=False)\n\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n if axis == 1:\n left = left.T\n right = right.T\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n if not drop:\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n correl = correl.reindex(result_index)\n\n return correl\n\n #----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(self, axis=0, level=None, numeric_only=False):\n \"\"\"\n Return Series with number of non-NA/null observations over requested\n axis. Works with non-floating point data as well (detects NaN and None)\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a DataFrame\n numeric_only : boolean, default False\n Include only float, int, boolean data\n\n Returns\n -------\n count : Series (or DataFrame if level specified)\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis,\n numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = Series(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type:\n result = notnull(frame).sum(axis=axis)\n else:\n counts = notnull(frame.values).sum(axis=axis)\n result = Series(counts, index=frame._get_agg_axis(axis))\n\n return result.astype('int64')\n\n def _count_level(self, level, axis=0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\"Can only count levels on hierarchical %s.\" %\n self._get_axis_name(axis))\n\n if frame._is_mixed_type:\n # Since we have mixed types, calling notnull(frame.values) might\n # upcast everything to object\n mask = notnull(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n mask = notnull(frame.values)\n\n if axis == 1:\n # We're transposing the mask rather than frame to avoid potential\n # upcasts to object, which induces a ~20x slowdown\n mask = mask.T\n\n if isinstance(level, compat.string_types):\n level = count_axis._get_level_number(level)\n\n level_index = count_axis.levels[level]\n labels = com._ensure_int64(count_axis.labels[level])\n counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)\n\n result = DataFrame(counts, index=level_index,\n columns=agg_axis)\n\n if axis == 1:\n # Undo our earlier transpose\n return result.T\n else:\n return result\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n axis = self._get_axis_number(axis)\n f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)\n labels = self._get_agg_axis(axis)\n\n # exclude timedelta/datetime unless we are uniform types\n if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:\n numeric_only = True\n\n if numeric_only is None:\n try:\n values = self.values\n result = f(values)\n except Exception as e:\n\n # try by-column first\n if filter_type is None and axis == 0:\n try:\n\n # this can end up with a non-reduction\n # but not always. if the types are mixed\n # with datelike then need to make sure a series\n result = self.apply(f,reduce=False)\n if result.ndim == self.ndim:\n result = result.iloc[0]\n return result\n except:\n pass\n\n if filter_type is None or filter_type == 'numeric':\n data = self._get_numeric_data()\n elif filter_type == 'bool':\n data = self._get_bool_data()\n else: # pragma: no cover\n e = NotImplementedError(\"Handling exception with filter_\"\n \"type %s not implemented.\"\n % filter_type)\n raise_with_traceback(e)\n result = f(data.values)\n labels = data._get_agg_axis(axis)\n else:\n if numeric_only:\n if filter_type is None or filter_type == 'numeric':\n data = self._get_numeric_data()\n elif filter_type == 'bool':\n data = self._get_bool_data()\n else: # pragma: no cover\n msg = (\"Generating numeric_only data with filter_type %s\"\n \"not supported.\" % filter_type)\n raise NotImplementedError(msg)\n values = data.values\n labels = data._get_agg_axis(axis)\n else:\n values = self.values\n result = f(values)\n\n if is_object_dtype(result.dtype):\n try:\n if filter_type is None or filter_type == 'numeric':\n result = result.astype(np.float64)\n elif filter_type == 'bool' and notnull(result).all():\n result = result.astype(np.bool_)\n except (ValueError, TypeError):\n\n # try to coerce to the original dtypes item by item if we can\n if axis == 0:\n result = com._coerce_to_dtypes(result, self.dtypes)\n\n return Series(result, index=labels)\n\n def idxmin(self, axis=0, skipna=True):\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA\n\n Returns\n -------\n idxmin : Series\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n See Also\n --------\n Series.idxmin\n \"\"\"\n axis = self._get_axis_number(axis)\n indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else NA for i in indices]\n return Series(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis=0, skipna=True):\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be first index.\n\n Returns\n -------\n idxmax : Series\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n See Also\n --------\n Series.idxmax\n \"\"\"\n axis = self._get_axis_number(axis)\n indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else NA for i in indices]\n return Series(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num):\n \"\"\" let's be explict about this \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)\n\n def mode(self, axis=0, numeric_only=False):\n \"\"\"\n Gets the mode(s) of each element along the axis selected. Empty if nothing\n has 2+ occurrences. Adds a row for each mode per label, fills in gaps\n with nan.\n\n Note that there could be multiple values returned for the selected\n axis (when more than one item share the maximum frequency), which is the\n reason why a dataframe is returned. If you want to impute missing values\n with the mode in a dataframe ``df``, you can just do this:\n ``df.fillna(df.mode().iloc[0])``\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n * 0 or 'index' : get mode of each column\n * 1 or 'columns' : get mode of each row\n numeric_only : boolean, default False\n if True, only apply to numeric columns\n\n Returns\n -------\n modes : DataFrame (sorted)\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})\n >>> df.mode()\n A\n 0 1\n 1 2\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n f = lambda s: s.mode()\n return data.apply(f, axis=axis)\n\n def quantile(self, q=0.5, axis=0, numeric_only=True):\n \"\"\"\n Return values at the given quantile over requested axis, a la\n numpy.percentile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute\n axis : {0, 1, 'index', 'columns'} (default 0)\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n\n\n Returns\n -------\n quantiles : Series or DataFrame\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n\n >>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n \"\"\"\n self._check_percentile(q)\n per = np.asarray(q) * 100\n\n if not com.is_list_like(per):\n per = [per]\n q = [q]\n squeeze = True\n else:\n squeeze = False\n\n def f(arr, per):\n if arr._is_datelike_mixed_type:\n values = _values_from_object(arr).view('i8')\n else:\n values = arr.astype(float)\n values = values[notnull(values)]\n if len(values) == 0:\n return NA\n else:\n return _quantile(values, per)\n\n data = self._get_numeric_data() if numeric_only else self\n\n axis = self._get_axis_number(axis)\n\n if axis == 1:\n data = data.T\n\n # need to know which cols are timestamp going in so that we can\n # map timestamp over them after getting the quantile.\n is_dt_col = data.dtypes.map(com.is_datetime64_dtype)\n is_dt_col = is_dt_col[is_dt_col].index\n\n quantiles = [[f(vals, x) for x in per]\n for (_, vals) in data.iteritems()]\n\n result = self._constructor(quantiles, index=data._info_axis,\n columns=q).T\n if len(is_dt_col) > 0:\n result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp)\n if squeeze:\n if result.shape == (1, 1):\n result = result.T.iloc[:, 0] # don't want scalar\n else:\n result = result.T.squeeze()\n result.name = None # For groupby, so it can set an index name\n return result\n\n def rank(self, axis=0, numeric_only=None, method='average',\n na_option='keep', ascending=True, pct=False):\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Ranks over columns (0) or rows (1)\n numeric_only : boolean, default None\n Include only float, int, boolean data\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if numeric_only is None:\n try:\n ranks = algos.rank(self.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option,\n pct=pct)\n return self._constructor(ranks, index=self.index,\n columns=self.columns)\n except TypeError:\n numeric_only = True\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n ranks = algos.rank(data.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option, pct=pct)\n return self._constructor(ranks, index=data.index, columns=data.columns)\n\n def to_timestamp(self, freq=None, how='start', axis=0, copy=True):\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period\n\n Parameters\n ----------\n freq : string, default frequency of PeriodIndex\n Desired frequency\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default)\n copy : boolean, default True\n If false then underlying input data is not copied\n\n Returns\n -------\n df : DataFrame with DatetimeIndex\n \"\"\"\n new_data = self._data\n if copy:\n new_data = new_data.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))\n elif axis == 1:\n new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))\n\n return self._constructor(new_data)\n\n def to_period(self, freq=None, axis=0, copy=True):\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed)\n\n Parameters\n ----------\n freq : string, default\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default)\n copy : boolean, default True\n If False then underlying input data is not copied\n\n Returns\n -------\n ts : TimeSeries with PeriodIndex\n \"\"\"\n new_data = self._data\n if copy:\n new_data = new_data.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n new_data.set_axis(1, self.index.to_period(freq=freq))\n elif axis == 1:\n new_data.set_axis(0, self.columns.to_period(freq=freq))\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))\n\n return self._constructor(new_data)\n\n def isin(self, values):\n \"\"\"\n Return boolean DataFrame showing whether each element in the\n DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dictionary\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dictionary, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n\n DataFrame of booleans\n\n Examples\n --------\n When ``values`` is a list:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n >>> df.isin([1, 3, 12, 'a'])\n A B\n 0 True True\n 1 False False\n 2 True False\n\n When ``values`` is a dict:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})\n >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})\n A B\n 0 True False # Note that B didn't match the 1 here.\n 1 False True\n 2 True True\n\n When ``values`` is a Series or DataFrame:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})\n >>> df.isin(other)\n A B\n 0 True False\n 1 False False # Column A in `other` has a 3, but not at index 1.\n 2 True True\n \"\"\"\n if isinstance(values, dict):\n from collections import defaultdict\n from pandas.tools.merge import concat\n values = defaultdict(list, values)\n return concat((self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)), axis=1)\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"ValueError: cannot compute isin with\"\n \" a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis='index')\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"ValueError: cannot compute isin with\"\n \" a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\"only list-like or dict-like objects are\"\n \" allowed to be passed to DataFrame.isin(), \"\n \"you passed a \"\n \"{0!r}\".format(type(values).__name__))\n return DataFrame(lib.ismember(self.values.ravel(),\n set(values)).reshape(self.shape),\n self.index,\n self.columns)\n\n #----------------------------------------------------------------------\n # Deprecated stuff\n\n def combineAdd(self, other):\n \"\"\"\n DEPRECATED. Use ``DataFrame.add(other, fill_value=0.)`` instead.\n\n Add two DataFrame objects and do not propagate\n NaN values, so if for a (column, time) one frame is missing a\n value, it will default to the other frame's value (which might\n be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n\n Returns\n -------\n DataFrame\n\n See also\n --------\n DataFrame.add\n\n \"\"\"\n warnings.warn(\"'combineAdd' is deprecated. Use \"\n \"'DataFrame.add(other, fill_value=0.)' instead\",\n FutureWarning, stacklevel=2)\n return self.add(other, fill_value=0.)\n\n def combineMult(self, other):\n \"\"\"\n DEPRECATED. Use ``DataFrame.mul(other, fill_value=1.)`` instead.\n\n Multiply two DataFrame objects and do not propagate NaN values, so if\n for a (column, time) one frame is missing a value, it will default to\n the other frame's value (which might be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n\n Returns\n -------\n DataFrame\n\n See also\n --------\n DataFrame.mul\n\n \"\"\"\n warnings.warn(\"'combineMult' is deprecated. Use \"\n \"'DataFrame.mul(other, fill_value=1.)' instead\",\n FutureWarning, stacklevel=2)\n return self.mul(other, fill_value=1.)\n\n\nDataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,\n axes_are_reversed=True, aliases={'rows': 0})\nDataFrame._add_numeric_operations()\n\n_EMPTY_SERIES = Series([])\n\ndef _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n # figure out the index, if necessary\n if index is None:\n index = extract_index(arrays)\n else:\n index = _ensure_index(index)\n\n # don't force copy because getting jammed in an ndarray anyway\n arrays = _homogenize(arrays, index, dtype)\n\n # from BlockManager perspective\n axes = [_ensure_index(columns), _ensure_index(index)]\n\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n\ndef extract_index(data):\n from pandas.core.index import _union_indexes\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_series = False\n have_dicts = False\n\n for v in data:\n if isinstance(v, Series):\n have_series = True\n indexes.append(v.index)\n elif isinstance(v, dict):\n have_dicts = True\n indexes.append(list(v.keys()))\n elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:\n have_raw_arrays = True\n raw_lengths.append(len(v))\n\n if not indexes and not raw_lengths:\n raise ValueError('If using all scalar values, you must pass'\n ' an index')\n\n if have_series or have_dicts:\n index = _union_indexes(indexes)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('arrays must all be same length')\n\n if have_dicts:\n raise ValueError('Mixing dicts with non-Series may lead to '\n 'ambiguous ordering.')\n\n if have_series:\n if lengths[0] != len(index):\n msg = ('array length %d does not match index length %d'\n % (lengths[0], len(index)))\n raise ValueError(msg)\n else:\n index = Index(np.arange(lengths[0]))\n\n return _ensure_index(index)\n\n\ndef _prep_ndarray(values, copy=True):\n if not isinstance(values, (np.ndarray, Series, Index)):\n if len(values) == 0:\n return np.empty((0, 0), dtype=object)\n\n def convert(v):\n return com._possibly_convert_platform(v)\n\n # we could have a 1-dim or 2-dim list here\n # this is equiv of np.asarray, but does object conversion\n # and platform dtype preservation\n try:\n if com.is_list_like(values[0]) or hasattr(values[0], 'len'):\n values = np.array([convert(v) for v in values])\n else:\n values = convert(values)\n except:\n values = convert(values)\n\n else:\n\n # drop subclass info, do not copy data\n values = np.asarray(values)\n if copy:\n values = values.copy()\n\n if values.ndim == 1:\n values = values.reshape((values.shape[0], 1))\n elif values.ndim != 2:\n raise ValueError('Must pass 2-d input')\n\n return values\n\n\ndef _to_arrays(data, columns, coerce_float=False, dtype=None):\n \"\"\"\n Return list of arrays, columns\n \"\"\"\n if isinstance(data, DataFrame):\n if columns is not None:\n arrays = [data._ixs(i,axis=1).values for i, col in enumerate(data.columns)\n if col in columns]\n else:\n columns = data.columns\n arrays = [data._ixs(i,axis=1).values for i in range(len(columns))]\n\n return arrays, columns\n\n if not len(data):\n if isinstance(data, np.ndarray):\n columns = data.dtype.names\n if columns is not None:\n return [[]] * len(columns), columns\n return [], [] # columns if columns is not None else []\n if isinstance(data[0], (list, tuple)):\n return _list_to_arrays(data, columns, coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], collections.Mapping):\n return _list_of_dict_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], Series):\n return _list_of_series_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], Categorical):\n if columns is None:\n columns = _default_index(len(data))\n return data, columns\n elif (isinstance(data, (np.ndarray, Series, Index))\n and data.dtype.names is not None):\n\n columns = list(data.dtype.names)\n arrays = [data[k] for k in columns]\n return arrays, columns\n else:\n # last ditch effort\n data = lmap(tuple, data)\n return _list_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n\n\ndef _masked_rec_array_to_mgr(data, index, columns, dtype, copy):\n \"\"\" extract from a masked rec array and create the manager \"\"\"\n\n # essentially process a record array then fill it\n fill_value = data.fill_value\n fdata = ma.getdata(data)\n if index is None:\n index = _get_names_from_index(fdata)\n if index is None:\n index = _default_index(len(data))\n index = _ensure_index(index)\n\n if columns is not None:\n columns = _ensure_index(columns)\n arrays, arr_columns = _to_arrays(fdata, columns)\n\n # fill if needed\n new_arrays = []\n for fv, arr, col in zip(fill_value, arrays, arr_columns):\n mask = ma.getmaskarray(data[col])\n if mask.any():\n arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)\n arr[mask] = fv\n new_arrays.append(arr)\n\n # create the manager\n arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)\n if columns is None:\n columns = arr_columns\n\n mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)\n\n if copy:\n mgr = mgr.copy()\n return mgr\n\n\ndef _reorder_arrays(arrays, arr_columns, columns):\n # reorder according to the columns\n if (columns is not None and len(columns) and arr_columns is not None and\n len(arr_columns)):\n indexer = _ensure_index(\n arr_columns).get_indexer(columns)\n arr_columns = _ensure_index(\n [arr_columns[i] for i in indexer])\n arrays = [arrays[i] for i in indexer]\n return arrays, arr_columns\n\n\ndef _list_to_arrays(data, columns, coerce_float=False, dtype=None):\n if len(data) > 0 and isinstance(data[0], tuple):\n content = list(lib.to_object_array_tuples(data).T)\n else:\n # list of lists\n content = list(lib.to_object_array(data).T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n\n\ndef _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):\n from pandas.core.index import _get_combined_index\n\n if columns is None:\n columns = _get_combined_index([\n s.index for s in data if getattr(s, 'index', None) is not None\n ])\n\n indexer_cache = {}\n\n aligned_values = []\n for s in data:\n index = getattr(s, 'index', None)\n if index is None:\n index = _default_index(len(s))\n\n if id(index) in indexer_cache:\n indexer = indexer_cache[id(index)]\n else:\n indexer = indexer_cache[id(index)] = index.get_indexer(columns)\n\n values = _values_from_object(s)\n aligned_values.append(com.take_1d(values, indexer))\n\n values = np.vstack(aligned_values)\n\n if values.dtype == np.object_:\n content = list(values.T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n else:\n return values.T, columns\n\n\ndef _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):\n if columns is None:\n gen = (list(x.keys()) for x in data)\n columns = lib.fast_unique_multiple_list_gen(gen)\n\n # assure that they are of the base dict class and not of derived\n # classes\n data = [(type(d) is dict) and d or dict(d) for d in data]\n\n content = list(lib.dicts_to_array(data, list(columns)).T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n\n\ndef _convert_object_array(content, columns, coerce_float=False, dtype=None):\n if columns is None:\n columns = _default_index(len(content))\n else:\n if len(columns) != len(content): # pragma: no cover\n # caller's responsibility to check for this...\n raise AssertionError('%d columns passed, passed data had %s '\n 'columns' % (len(columns), len(content)))\n\n # provide soft conversion of object dtypes\n def convert(arr):\n if dtype != object and dtype != np.object:\n arr = lib.maybe_convert_objects(arr, try_float=coerce_float)\n arr = com._possibly_cast_to_datetime(arr, dtype)\n return arr\n\n arrays = [ convert(arr) for arr in content ]\n\n return arrays, columns\n\n\ndef _get_names_from_index(data):\n index = lrange(len(data))\n has_some_name = any([getattr(s, 'name', None) is not None for s in data])\n if not has_some_name:\n return index\n\n count = 0\n for i, s in enumerate(data):\n n = getattr(s, 'name', None)\n if n is not None:\n index[i] = n\n else:\n index[i] = 'Unnamed %d' % count\n count += 1\n\n return index\n\n\ndef _homogenize(data, index, dtype=None):\n from pandas.core.series import _sanitize_array\n\n oindex = None\n homogenized = []\n\n for v in data:\n if isinstance(v, Series):\n if dtype is not None:\n v = v.astype(dtype)\n if v.index is not index:\n # Forces alignment. No need to copy data since we\n # are putting it into an ndarray later\n v = v.reindex(index, copy=False)\n else:\n if isinstance(v, dict):\n if oindex is None:\n oindex = index.astype('O')\n\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n v = _dict_compat(v)\n else:\n v = dict(v)\n v = lib.fast_multiget(v, oindex.values, default=NA)\n v = _sanitize_array(v, index, dtype=dtype, copy=False,\n raise_cast_failure=False)\n\n homogenized.append(v)\n\n return homogenized\n\n\ndef _from_nested_dict(data):\n # TODO: this should be seriously cythonized\n new_data = OrderedDict()\n for index, s in compat.iteritems(data):\n for col, v in compat.iteritems(s):\n new_data[col] = new_data.get(col, OrderedDict())\n new_data[col][index] = v\n return new_data\n\n\ndef _put_str(s, space):\n return ('%s' % s)[:space].ljust(space)\n\n\n#----------------------------------------------------------------------\n# Add plotting methods to DataFrame\n\nimport pandas.tools.plotting as gfx\n\nDataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods, gfx.FramePlotMethods)\nDataFrame.hist = gfx.hist_frame\n\n\n@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)\ndef boxplot(self, column=None, by=None, ax=None, fontsize=None,\n rot=0, grid=True, figsize=None, layout=None, return_type=None,\n **kwds):\n import pandas.tools.plotting as plots\n import matplotlib.pyplot as plt\n ax = plots.boxplot(self, column=column, by=by, ax=ax,\n fontsize=fontsize, grid=grid, rot=rot,\n figsize=figsize, layout=layout, return_type=return_type,\n **kwds)\n plt.draw_if_interactive()\n return ax\n\nDataFrame.boxplot = boxplot\n\nops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)\nops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"\"\"\"\nContains data structures designed for manipulating panel (3-dimensional) data\n\"\"\"\n# pylint: disable=E1103,W0231,W0212,W0621\nfrom __future__ import division\nfrom pandas.compat import (map, zip, range, lrange, lmap, u, OrderedDict,\n OrderedDefaultdict)\nfrom pandas import compat\nimport warnings\nimport numpy as np\nfrom pandas.core.common import (PandasError, _try_sort, _default_index,\n _infer_dtype_from_scalar, notnull, is_list_like)\nfrom pandas.core.categorical import Categorical\nfrom pandas.core.index import (Index, MultiIndex, _ensure_index,\n _get_combined_index)\nfrom pandas.core.indexing import maybe_droplevels\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.tools.util import cartesian_product\nfrom pandas import compat\nfrom pandas.util.decorators import (deprecate, Appender, Substitution,\n deprecate_kwarg)\nimport pandas.core.common as com\nimport pandas.core.ops as ops\nimport pandas.computation.expressions as expressions\nfrom pandas import lib\nfrom pandas.core.ops import _op_descriptions\n\n\n_shared_doc_kwargs = dict(\n axes='items, major_axis, minor_axis',\n klass=\"Panel\",\n axes_single_arg=\"{0, 1, 2, 'items', 'major_axis', 'minor_axis'}\")\n_shared_doc_kwargs['args_transpose'] = (\"three positional arguments: each one\"\n \"of\\n %s\" %\n _shared_doc_kwargs['axes_single_arg'])\n\n\ndef _ensure_like_indices(time, panels):\n \"\"\"\n Makes sure that time and panels are conformable\n \"\"\"\n n_time = len(time)\n n_panel = len(panels)\n u_panels = np.unique(panels) # this sorts!\n u_time = np.unique(time)\n if len(u_time) == n_time:\n time = np.tile(u_time, len(u_panels))\n if len(u_panels) == n_panel:\n panels = np.repeat(u_panels, len(u_time))\n return time, panels\n\n\ndef panel_index(time, panels, names=['time', 'panel']):\n \"\"\"\n Returns a multi-index suitable for a panel-like DataFrame\n\n Parameters\n ----------\n time : array-like\n Time index, does not have to repeat\n panels : array-like\n Panel index, does not have to repeat\n names : list, optional\n List containing the names of the indices\n\n Returns\n -------\n multi_index : MultiIndex\n Time index is the first level, the panels are the second level.\n\n Examples\n --------\n >>> years = range(1960,1963)\n >>> panels = ['A', 'B', 'C']\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),\n (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),\n (1962, 'C')], dtype=object)\n\n or\n\n >>> import numpy as np\n >>> years = np.repeat(range(1960,1963), 3)\n >>> panels = np.tile(['A', 'B', 'C'], 3)\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),\n (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),\n (1962, 'C')], dtype=object)\n \"\"\"\n time, panels = _ensure_like_indices(time, panels)\n time_factor = Categorical.from_array(time, ordered=True)\n panel_factor = Categorical.from_array(panels, ordered=True)\n\n labels = [time_factor.codes, panel_factor.codes]\n levels = [time_factor.categories, panel_factor.categories]\n return MultiIndex(levels, labels, sortorder=None, names=names,\n verify_integrity=False)\n\n\nclass Panel(NDFrame):\n\n \"\"\"\n Represents wide format panel data, stored as 3-dimensional array\n\n Parameters\n ----------\n data : ndarray (items x major x minor), or dict of DataFrames\n items : Index or array-like\n axis=0\n major_axis : Index or array-like\n axis=1\n minor_axis : Index or array-like\n axis=2\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n \"\"\"\n\n @property\n def _constructor(self):\n return type(self)\n\n _constructor_sliced = DataFrame\n\n def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,\n copy=False, dtype=None):\n self._init_data(data=data, items=items, major_axis=major_axis,\n minor_axis=minor_axis, copy=copy, dtype=dtype)\n\n def _init_data(self, data, copy, dtype, **kwargs):\n \"\"\"\n Generate ND initialization; axes are passed\n as required objects to __init__\n \"\"\"\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]\n\n if kwargs:\n raise TypeError('_init_data() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n axes = None\n if isinstance(data, BlockManager):\n if any(x is not None for x in passed_axes):\n axes = [x if x is not None else y\n for x, y in zip(passed_axes, data.axes)]\n mgr = data\n elif isinstance(data, dict):\n mgr = self._init_dict(data, passed_axes, dtype=dtype)\n copy = False\n dtype = None\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)\n copy = False\n dtype = None\n elif lib.isscalar(data) and all(x is not None for x in passed_axes):\n if dtype is None:\n dtype, data = _infer_dtype_from_scalar(data)\n values = np.empty([len(x) for x in passed_axes], dtype=dtype)\n values.fill(data)\n mgr = self._init_matrix(values, passed_axes, dtype=dtype, copy=False)\n copy = False\n else: # pragma: no cover\n raise PandasError('Panel constructor not properly called!')\n\n NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)\n\n def _init_dict(self, data, axes, dtype=None):\n haxis = axes.pop(self._info_axis_number)\n\n # prefilter if haxis passed\n if haxis is not None:\n haxis = _ensure_index(haxis)\n data = OrderedDict((k, v) for k, v\n in compat.iteritems(data) if k in haxis)\n else:\n ks = list(data.keys())\n if not isinstance(data, OrderedDict):\n ks = _try_sort(ks)\n haxis = Index(ks)\n\n for k, v in compat.iteritems(data):\n if isinstance(v, dict):\n data[k] = self._constructor_sliced(v)\n\n # extract axis for remaining axes & create the slicemap\n raxes = [self._extract_axis(self, data, axis=i)\n if a is None else a for i, a in enumerate(axes)]\n raxes_sm = self._extract_axes_for_slice(self, raxes)\n\n # shallow copy\n arrays = []\n haxis_shape = [len(a) for a in raxes]\n for h in haxis:\n v = values = data.get(h)\n if v is None:\n values = np.empty(haxis_shape, dtype=dtype)\n values.fill(np.nan)\n elif isinstance(v, self._constructor_sliced):\n d = raxes_sm.copy()\n d['copy'] = False\n v = v.reindex(**d)\n if dtype is not None:\n v = v.astype(dtype)\n values = v.values\n arrays.append(values)\n\n return self._init_arrays(arrays, haxis, [haxis] + raxes)\n\n def _init_arrays(self, arrays, arr_names, axes):\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n @classmethod\n def from_dict(cls, data, intersect=False, orient='items', dtype=None):\n \"\"\"\n Construct Panel from dict of DataFrame objects\n\n Parameters\n ----------\n data : dict\n {field : DataFrame}\n intersect : boolean\n Intersect indexes of input DataFrames\n orient : {'items', 'minor'}, default 'items'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the items of the result panel, pass 'items'\n (default). Otherwise if the columns of the values of the passed\n DataFrame objects should be the items (which in the case of\n mixed-dtype data you should do), instead pass 'minor'\n dtype : dtype, default None\n Data type to force, otherwise infer\n\n Returns\n -------\n Panel\n \"\"\"\n orient = orient.lower()\n if orient == 'minor':\n new_data = OrderedDefaultdict(dict)\n for col, df in compat.iteritems(data):\n for item, s in compat.iteritems(df):\n new_data[item][col] = s\n data = new_data\n elif orient != 'items': # pragma: no cover\n raise ValueError('Orientation must be one of {items, minor}.')\n\n d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)\n ks = list(d['data'].keys())\n if not isinstance(d['data'], OrderedDict):\n ks = list(sorted(ks))\n d[cls._info_axis_name] = Index(ks)\n return cls(**d)\n\n def __getitem__(self, key):\n if isinstance(self._info_axis, MultiIndex):\n return self._getitem_multilevel(key)\n if not (is_list_like(key) or isinstance(key, slice)):\n return super(Panel, self).__getitem__(key)\n return self.ix[key]\n\n def _getitem_multilevel(self, key):\n info = self._info_axis\n loc = info.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_index = info[loc]\n result_index = maybe_droplevels(new_index, key)\n slices = [loc] + [slice(None) for x in range(\n self._AXIS_LEN - 1)]\n new_values = self.values[slices]\n\n d = self._construct_axes_dict(self._AXIS_ORDERS[1:])\n d[self._info_axis_name] = result_index\n result = self._constructor(new_values, **d)\n return result\n else:\n return self._get_item_cache(key)\n\n def _init_matrix(self, data, axes, dtype=None, copy=False):\n values = self._prep_ndarray(self, data, copy=copy)\n\n if dtype is not None:\n try:\n values = values.astype(dtype)\n except Exception:\n raise ValueError('failed to cast to %s' % dtype)\n\n shape = values.shape\n fixed_axes = []\n for i, ax in enumerate(axes):\n if ax is None:\n ax = _default_index(shape[i])\n else:\n ax = _ensure_index(ax)\n fixed_axes.append(ax)\n\n return create_block_manager_from_blocks([values], fixed_axes)\n\n #----------------------------------------------------------------------\n # Comparison methods\n\n def _compare_constructor(self, other, func):\n if not self._indexed_same(other):\n raise Exception('Can only compare identically-labeled '\n 'same type objects')\n\n new_data = {}\n for col in self._info_axis:\n new_data[col] = func(self[col], other[col])\n\n d = self._construct_axes_dict(copy=False)\n return self._constructor(data=new_data, **d)\n\n #----------------------------------------------------------------------\n # Magic methods\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular Panel\n\n Invoked by unicode(df) in py2 only.\n Yields a Unicode String in both py2/py3.\n \"\"\"\n\n class_name = str(self.__class__)\n\n shape = self.shape\n dims = u('Dimensions: %s') % ' x '.join(\n [\"%d (%s)\" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])\n\n def axis_pretty(a):\n v = getattr(self, a)\n if len(v) > 0:\n return u('%s axis: %s to %s') % (a.capitalize(),\n com.pprint_thing(v[0]),\n com.pprint_thing(v[-1]))\n else:\n return u('%s axis: None') % a.capitalize()\n\n output = '\\n'.join(\n [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])\n return output\n\n def _get_plane_axes_index(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes indexes\n \"\"\"\n axis_name = self._get_axis_name(axis)\n\n if axis_name == 'major_axis':\n index = 'minor_axis'\n columns = 'items'\n if axis_name == 'minor_axis':\n index = 'major_axis'\n columns = 'items'\n elif axis_name == 'items':\n index = 'major_axis'\n columns = 'minor_axis'\n\n return index, columns\n\n def _get_plane_axes(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes\n \"\"\"\n return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]\n\n fromDict = from_dict\n\n def to_sparse(self, fill_value=None, kind='block'):\n \"\"\"\n Convert to SparsePanel\n\n Parameters\n ----------\n fill_value : float, default NaN\n kind : {'block', 'integer'}\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n from pandas.core.sparse import SparsePanel\n frames = dict(compat.iteritems(self))\n return SparsePanel(frames, items=self.items,\n major_axis=self.major_axis,\n minor_axis=self.minor_axis,\n default_kind=kind,\n default_fill_value=fill_value)\n\n def to_excel(self, path, na_rep='', engine=None, **kwargs):\n \"\"\"\n Write each DataFrame in Panel to a separate excel sheet\n\n Parameters\n ----------\n path : string or ExcelWriter object\n File path or existing ExcelWriter\n na_rep : string, default ''\n Missing data representation\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n\n Other Parameters\n ----------------\n float_format : string, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n\n Notes\n -----\n Keyword arguments (and na_rep) are passed to the ``to_excel`` method\n for each DataFrame written.\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n if isinstance(path, compat.string_types):\n writer = ExcelWriter(path, engine=engine)\n else:\n writer = path\n kwargs['na_rep'] = na_rep\n\n for item, df in compat.iteritems(self):\n name = str(item)\n df.to_excel(writer, name, **kwargs)\n writer.save()\n\n def as_matrix(self):\n self._consolidate_inplace()\n return self._data.as_matrix()\n\n #----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, *args, **kwargs):\n \"\"\"\n Quickly retrieve single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n nargs = len(args)\n nreq = self._AXIS_LEN\n\n # require an arg for each axis\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis, you gave'\n ' {0} args, but {1} are required'.format(nargs,\n nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('get_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n return lower.get_value(*args[1:], takeable=takeable)\n\n def set_value(self, *args, **kwargs):\n \"\"\"\n Quickly set single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n value : scalar\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n panel : Panel\n If label combo is contained, will be reference to calling Panel,\n otherwise a new object\n \"\"\"\n # require an arg for each axis and the value\n nargs = len(args)\n nreq = self._AXIS_LEN + 1\n\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis plus the '\n 'value provided, you gave {0} args, but {1} are '\n 'required'.format(nargs, nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('set_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n try:\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n lower.set_value(*args[1:], takeable=takeable)\n return self\n except KeyError:\n axes = self._expand_axes(args)\n d = self._construct_axes_dict_from(self, axes, copy=False)\n result = self.reindex(**d)\n args = list(args)\n likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])\n made_bigger = not np.array_equal(\n axes[0], self._info_axis)\n # how to make this logic simpler?\n if made_bigger:\n com._possibly_cast_item(result, args[0], likely_dtype)\n\n return result.set_value(*args)\n\n def _box_item_values(self, key, values):\n if self.ndim == values.ndim:\n result = self._constructor(values)\n\n # a dup selection will yield a full ndim\n if result._get_axis(0).is_unique:\n result = result[key]\n\n return result\n\n d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])\n return self._constructor_sliced(values, **d)\n\n def __setitem__(self, key, value):\n shape = tuple(self.shape)\n if isinstance(value, self._constructor_sliced):\n value = value.reindex(\n **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))\n mat = value.values\n elif isinstance(value, np.ndarray):\n if value.shape != shape[1:]:\n raise ValueError(\n 'shape of value must be {0}, shape of given object was '\n '{1}'.format(shape[1:], tuple(map(int, value.shape))))\n mat = np.asarray(value)\n elif np.isscalar(value):\n dtype, value = _infer_dtype_from_scalar(value)\n mat = np.empty(shape[1:], dtype=dtype)\n mat.fill(value)\n else:\n raise TypeError('Cannot set item of type: %s' % str(type(value)))\n\n mat = mat.reshape(tuple([1]) + shape[1:])\n NDFrame._set_item(self, key, mat)\n\n def _unpickle_panel_compat(self, state): # pragma: no cover\n \"Unpickle the panel\"\n _unpickle = com._unpickle_array\n vals, items, major, minor = state\n\n items = _unpickle(items)\n major = _unpickle(major)\n minor = _unpickle(minor)\n values = _unpickle(vals)\n wp = Panel(values, items, major, minor)\n self._data = wp._data\n\n def conform(self, frame, axis='items'):\n \"\"\"\n Conform input DataFrame to align with chosen axis pair.\n\n Parameters\n ----------\n frame : DataFrame\n axis : {'items', 'major', 'minor'}\n\n Axis the input corresponds to. E.g., if axis='major', then\n the frame's columns would be items, and the index would be\n values of the minor axis\n\n Returns\n -------\n DataFrame\n \"\"\"\n axes = self._get_plane_axes(axis)\n return frame.reindex(**self._extract_axes_for_slice(self, axes))\n\n def head(self, n=5):\n raise NotImplementedError\n\n def tail(self, n=5):\n raise NotImplementedError\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" don't allow a multi reindex on Panel or above ndim \"\"\"\n return False\n\n def align(self, other, **kwargs):\n raise NotImplementedError\n\n def dropna(self, axis=0, how='any', inplace=False):\n \"\"\"\n Drop 2D from panel, holding passed axis constant\n\n Parameters\n ----------\n axis : int, default 0\n Axis to hold constant. E.g. axis=1 will drop major_axis entries\n having a certain amount of NA data\n how : {'all', 'any'}, default 'any'\n 'any': one or more values are NA in the DataFrame along the\n axis. For 'all' they all must be.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : Panel\n \"\"\"\n axis = self._get_axis_number(axis)\n\n values = self.values\n mask = com.notnull(values)\n\n for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):\n mask = mask.sum(ax)\n\n per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])\n\n if how == 'all':\n cond = mask > 0\n else:\n cond = mask == per_slice\n\n new_ax = self._get_axis(axis)[cond]\n result = self.reindex_axis(new_ax, axis=axis)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _combine(self, other, func, axis=0):\n if isinstance(other, Panel):\n return self._combine_panel(other, func)\n elif isinstance(other, DataFrame):\n return self._combine_frame(other, func, axis=axis)\n elif np.isscalar(other):\n return self._combine_const(other, func)\n else:\n raise NotImplementedError(str(type(other)) + \n ' is not supported in combine operation with ' + \n str(type(self)))\n\n def _combine_const(self, other, func):\n new_values = func(self.values, other)\n d = self._construct_axes_dict()\n return self._constructor(new_values, **d)\n\n def _combine_frame(self, other, func, axis=0):\n index, columns = self._get_plane_axes(axis)\n axis = self._get_axis_number(axis)\n\n other = other.reindex(index=index, columns=columns)\n\n if axis == 0:\n new_values = func(self.values, other.values)\n elif axis == 1:\n new_values = func(self.values.swapaxes(0, 1), other.values.T)\n new_values = new_values.swapaxes(0, 1)\n elif axis == 2:\n new_values = func(self.values.swapaxes(0, 2), other.values)\n new_values = new_values.swapaxes(0, 2)\n\n return self._constructor(new_values, self.items, self.major_axis,\n self.minor_axis)\n\n def _combine_panel(self, other, func):\n items = self.items.union(other.items)\n major = self.major_axis.union(other.major_axis)\n minor = self.minor_axis.union(other.minor_axis)\n\n # could check that everything's the same size, but forget it\n this = self.reindex(items=items, major=major, minor=minor)\n other = other.reindex(items=items, major=major, minor=minor)\n\n result_values = func(this.values, other.values)\n\n return self._constructor(result_values, items, major, minor)\n\n def major_xs(self, key, copy=None):\n \"\"\"\n Return slice of panel along major axis\n\n Parameters\n ----------\n key : object\n Major axis label\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : DataFrame\n index -> minor axis, columns -> items\n\n Notes\n -----\n major_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n return self.xs(key, axis=self._AXIS_LEN - 2)\n\n def minor_xs(self, key, copy=None):\n \"\"\"\n Return slice of panel along minor axis\n\n Parameters\n ----------\n key : object\n Minor axis label\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : DataFrame\n index -> major axis, columns -> items\n\n Notes\n -----\n minor_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n return self.xs(key, axis=self._AXIS_LEN - 1)\n\n def xs(self, key, axis=1, copy=None):\n \"\"\"\n Return slice of panel along selected axis\n\n Parameters\n ----------\n key : object\n Label\n axis : {'items', 'major', 'minor}, default 1/'major'\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : ndim(self)-1\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n return self[key]\n\n self._consolidate_inplace()\n axis_number = self._get_axis_number(axis)\n new_data = self._data.xs(key, axis=axis_number, copy=False)\n result = self._construct_return_type(new_data)\n copy = new_data.is_mixed_type\n result._set_is_copy(self, copy=copy)\n return result\n\n _xs = xs\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n ax = self._get_axis(axis)\n key = ax[i]\n\n # xs cannot handle a non-scalar key, so just reindex here\n # if we have a multi-index and a single tuple, then its a reduction (GH 7516)\n if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):\n if is_list_like(key):\n indexer = {self._get_axis_name(axis): key}\n return self.reindex(**indexer)\n\n # a reduction\n if axis == 0:\n values = self._data.iget(i)\n return self._box_item_values(key, values)\n\n # xs by position\n self._consolidate_inplace()\n new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)\n return self._construct_return_type(new_data)\n\n def groupby(self, function, axis='major'):\n \"\"\"\n Group data on given axis, returning GroupBy object\n\n Parameters\n ----------\n function : callable\n Mapping function for chosen access\n axis : {'major', 'minor', 'items'}, default 'major'\n\n Returns\n -------\n grouped : PanelGroupBy\n \"\"\"\n from pandas.core.groupby import PanelGroupBy\n axis = self._get_axis_number(axis)\n return PanelGroupBy(self, function, axis=axis)\n\n def to_frame(self, filter_observations=True):\n \"\"\"\n Transform wide format into long (stacked) format as DataFrame whose\n columns are the Panel's items and whose index is a MultiIndex formed\n of the Panel's major and minor axes.\n\n Parameters\n ----------\n filter_observations : boolean, default True\n Drop (major, minor) pairs without a complete set of observations\n across all the items\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n _, N, K = self.shape\n\n if filter_observations:\n # shaped like the return DataFrame\n mask = com.notnull(self.values).all(axis=0)\n # size = mask.sum()\n selector = mask.ravel()\n else:\n # size = N * K\n selector = slice(None, None)\n\n data = {}\n for item in self.items:\n data[item] = self[item].values.ravel()[selector]\n\n def construct_multi_parts(idx, n_repeat, n_shuffle=1):\n axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)\n labels = [x[selector] for x in axis_idx.labels]\n levels = axis_idx.levels\n names = axis_idx.names\n return labels, levels, names\n\n def construct_index_parts(idx, major=True):\n levels = [idx]\n if major:\n labels = [np.arange(N).repeat(K)[selector]]\n names = idx.name or 'major'\n else:\n labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]\n labels = [labels.ravel()[selector]]\n names = idx.name or 'minor'\n names = [names]\n return labels, levels, names\n\n if isinstance(self.major_axis, MultiIndex):\n major_labels, major_levels, major_names = construct_multi_parts(\n self.major_axis, n_repeat=K)\n else:\n major_labels, major_levels, major_names = construct_index_parts(\n self.major_axis)\n\n if isinstance(self.minor_axis, MultiIndex):\n minor_labels, minor_levels, minor_names = construct_multi_parts(\n self.minor_axis, n_repeat=N, n_shuffle=K)\n else:\n minor_labels, minor_levels, minor_names = construct_index_parts(\n self.minor_axis, major=False)\n\n levels = major_levels + minor_levels\n labels = major_labels + minor_labels\n names = major_names + minor_names\n\n index = MultiIndex(levels=levels, labels=labels,\n names=names, verify_integrity=False)\n\n return DataFrame(data, index=index, columns=self.items)\n\n to_long = deprecate('to_long', to_frame)\n toLong = deprecate('toLong', to_frame)\n\n def apply(self, func, axis='major', **kwargs):\n \"\"\"\n Applies function along input axis of the Panel\n\n Parameters\n ----------\n func : function\n Function to apply to each combination of 'other' axes\n e.g. if axis = 'items', then the combination of major_axis/minor_axis\n will be passed a Series\n axis : {'major', 'minor', 'items'}\n Additional keyword arguments will be passed as keywords to the function\n\n Examples\n --------\n >>> p.apply(numpy.sqrt) # returns a Panel\n >>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)\n >>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)\n >>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)\n\n Returns\n -------\n result : Pandas Object\n \"\"\"\n\n if kwargs and not isinstance(func, np.ufunc):\n f = lambda x: func(x, **kwargs)\n else:\n f = func\n\n # 2d-slabs\n if isinstance(axis, (tuple,list)) and len(axis) == 2:\n return self._apply_2d(f, axis=axis)\n\n axis = self._get_axis_number(axis)\n\n # try ufunc like\n if isinstance(f, np.ufunc):\n try:\n result = np.apply_along_axis(func, axis, self.values)\n return self._wrap_result(result, axis=axis)\n except (AttributeError):\n pass\n\n # 1d\n return self._apply_1d(f, axis=axis)\n\n def _apply_1d(self, func, axis):\n\n axis_name = self._get_axis_name(axis)\n ax = self._get_axis(axis)\n ndim = self.ndim\n values = self.values\n\n # iter thru the axes\n slice_axis = self._get_axis(axis)\n slice_indexer = [0]*(ndim-1)\n indexer = np.zeros(ndim, 'O')\n indlist = list(range(ndim))\n indlist.remove(axis)\n indexer[axis] = slice(None, None)\n indexer.put(indlist, slice_indexer)\n planes = [ self._get_axis(axi) for axi in indlist ]\n shape = np.array(self.shape).take(indlist)\n\n # all the iteration points\n points = cartesian_product(planes)\n\n results = []\n for i in range(np.prod(shape)):\n\n # construct the object\n pts = tuple([ p[i] for p in points ])\n indexer.put(indlist, slice_indexer)\n\n obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)\n result = func(obj)\n\n results.append(result)\n\n # increment the indexer\n slice_indexer[-1] += 1\n n = -1\n while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):\n slice_indexer[n-1] += 1\n slice_indexer[n] = 0\n n -= 1\n\n # empty object\n if not len(results):\n return self._constructor(**self._construct_axes_dict())\n\n # same ndim as current\n if isinstance(results[0],Series):\n arr = np.vstack([ r.values for r in results ])\n arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))\n tranp = np.array([axis]+indlist).argsort()\n arr = arr.transpose(tuple(list(tranp)))\n return self._constructor(arr,**self._construct_axes_dict())\n\n # ndim-1 shape\n results = np.array(results).reshape(shape)\n if results.ndim == 2 and axis_name != self._info_axis_name:\n results = results.T\n planes = planes[::-1]\n return self._construct_return_type(results,planes)\n\n def _apply_2d(self, func, axis):\n \"\"\" handle 2-d slices, equiv to iterating over the other axis \"\"\"\n\n ndim = self.ndim\n axis = [ self._get_axis_number(a) for a in axis ]\n\n # construct slabs, in 2-d this is a DataFrame result\n indexer_axis = list(range(ndim))\n for a in axis:\n indexer_axis.remove(a)\n indexer_axis = indexer_axis[0]\n\n slicer = [ slice(None,None) ] * ndim\n ax = self._get_axis(indexer_axis)\n\n results = []\n for i, e in enumerate(ax):\n\n slicer[indexer_axis] = i\n sliced = self.iloc[tuple(slicer)]\n\n obj = func(sliced)\n results.append((e,obj))\n\n return self._construct_return_type(dict(results))\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n if numeric_only:\n raise NotImplementedError(\n 'Panel.{0} does not implement numeric_only.'.format(name))\n\n axis_name = self._get_axis_name(axis)\n axis_number = self._get_axis_number(axis_name)\n f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)\n\n result = f(self.values)\n\n axes = self._get_plane_axes(axis_name)\n if result.ndim == 2 and axis_name != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n def _construct_return_type(self, result, axes=None):\n \"\"\" return the type for the ndim of the result \"\"\"\n ndim = getattr(result,'ndim',None)\n\n # need to assume they are the same\n if ndim is None:\n if isinstance(result,dict):\n ndim = getattr(list(compat.itervalues(result))[0],'ndim',0)\n\n # have a dict, so top-level is +1 dim\n if ndim != 0:\n ndim += 1\n\n # scalar\n if ndim == 0:\n return Series(result)\n\n # same as self\n elif self.ndim == ndim:\n \"\"\" return the construction dictionary for these axes \"\"\"\n if axes is None:\n return self._constructor(result)\n return self._constructor(result, **self._construct_axes_dict())\n\n # sliced\n elif self.ndim == ndim + 1:\n if axes is None:\n return self._constructor_sliced(result)\n return self._constructor_sliced(\n result, **self._extract_axes_for_slice(self, axes))\n\n raise PandasError('invalid _construct_return_type [self->%s] '\n '[result->%s]' % (self, result))\n\n def _wrap_result(self, result, axis):\n axis = self._get_axis_name(axis)\n axes = self._get_plane_axes(axis)\n if result.ndim == 2 and axis != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None\n else kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None\n else kwargs.pop('minor', None))\n return super(Panel, self).reindex(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None\n else kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None\n else kwargs.pop('minor', None))\n return super(Panel, self).rename(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(Panel, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['transpose'] % _shared_doc_kwargs)\n def transpose(self, *args, **kwargs):\n return super(Panel, self).transpose(*args, **kwargs)\n\n @Appender(_shared_docs['fillna'] % _shared_doc_kwargs)\n def fillna(self, value=None, method=None, axis=None, inplace=False,\n limit=None, downcast=None, **kwargs):\n return super(Panel, self).fillna(value=value, method=method,\n axis=axis, inplace=inplace,\n limit=limit, downcast=downcast,\n **kwargs)\n\n def count(self, axis='major'):\n \"\"\"\n Return number of observations over requested axis.\n\n Parameters\n ----------\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n count : DataFrame\n \"\"\"\n i = self._get_axis_number(axis)\n\n values = self.values\n mask = np.isfinite(values)\n result = mask.sum(axis=i,dtype='int64')\n\n return self._wrap_result(result, axis)\n\n @deprecate_kwarg(old_arg_name='lags', new_arg_name='periods')\n def shift(self, periods=1, freq=None, axis='major'):\n \"\"\"\n Shift index by desired number of periods with an optional time freq.\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original. This is different\n from the behavior of DataFrame.shift()\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, optional\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n shifted : Panel\n \"\"\"\n if freq:\n return self.tshift(periods, freq, axis=axis)\n\n return super(Panel, self).slice_shift(periods, axis=axis)\n\n def tshift(self, periods=1, freq=None, axis='major'):\n return super(Panel, self).tshift(periods, freq, axis)\n\n def join(self, other, how='left', lsuffix='', rsuffix=''):\n \"\"\"\n Join items with other Panel either on major and minor axes column\n\n Parameters\n ----------\n other : Panel or list of Panels\n Index should be similar to one of the columns in this one\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n\n Returns\n -------\n joined : Panel\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(other, Panel):\n join_major, join_minor = self._get_join_index(other, how)\n this = self.reindex(major=join_major, minor=join_minor)\n other = other.reindex(major=join_major, minor=join_minor)\n merged_data = this._data.merge(other._data, lsuffix, rsuffix)\n return self._constructor(merged_data)\n else:\n if lsuffix or rsuffix:\n raise ValueError('Suffixes not supported when passing '\n 'multiple panels')\n\n if how == 'left':\n how = 'outer'\n join_axes = [self.major_axis, self.minor_axis]\n elif how == 'right':\n raise ValueError('Right join not supported with multiple '\n 'panels')\n else:\n join_axes = None\n\n return concat([self] + list(other), axis=0, join=how,\n join_axes=join_axes, verify_integrity=True)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify Panel in place using non-NA values from passed\n Panel, or object coercible to Panel. Aligns on items\n\n Parameters\n ----------\n other : Panel, or object coercible to Panel\n join : How to join individual DataFrames\n {'left', 'right', 'outer', 'inner'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling panel\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : bool\n If True, will raise an error if a DataFrame and other both\n contain data in the same place.\n \"\"\"\n\n if not isinstance(other, self._constructor):\n other = self._constructor(other)\n\n axis_name = self._info_axis_name\n axis_values = self._info_axis\n other = other.reindex(**{axis_name: axis_values})\n\n for frame in axis_values:\n self[frame].update(other[frame], join, overwrite, filter_func,\n raise_conflict)\n\n def _get_join_index(self, other, how):\n if how == 'left':\n join_major, join_minor = self.major_axis, self.minor_axis\n elif how == 'right':\n join_major, join_minor = other.major_axis, other.minor_axis\n elif how == 'inner':\n join_major = self.major_axis.intersection(other.major_axis)\n join_minor = self.minor_axis.intersection(other.minor_axis)\n elif how == 'outer':\n join_major = self.major_axis.union(other.major_axis)\n join_minor = self.minor_axis.union(other.minor_axis)\n return join_major, join_minor\n\n # miscellaneous data creation\n @staticmethod\n def _extract_axes(self, data, axes, **kwargs):\n \"\"\" return a list of the axis indicies \"\"\"\n return [self._extract_axis(self, data, axis=i, **kwargs) for i, a\n in enumerate(axes)]\n\n @staticmethod\n def _extract_axes_for_slice(self, axes):\n \"\"\" return the slice dictionary for these axes \"\"\"\n return dict([(self._AXIS_SLICEMAP[i], a)\n for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -\n len(axes):], axes)])\n\n @staticmethod\n def _prep_ndarray(self, values, copy=True):\n if not isinstance(values, np.ndarray):\n values = np.asarray(values)\n # NumPy strings are a pain, convert to object\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype=object, copy=True)\n else:\n if copy:\n values = values.copy()\n if values.ndim != self._AXIS_LEN:\n raise ValueError(\"The number of dimensions required is {0}, \"\n \"but the number of dimensions of the \"\n \"ndarray given was {1}\".format(self._AXIS_LEN,\n values.ndim))\n return values\n\n @staticmethod\n def _homogenize_dict(self, frames, intersect=True, dtype=None):\n \"\"\"\n Conform set of _constructor_sliced-like objects to either\n an intersection of indices / columns or a union.\n\n Parameters\n ----------\n frames : dict\n intersect : boolean, default True\n\n Returns\n -------\n dict of aligned results & indicies\n \"\"\"\n\n result = dict()\n # caller differs dict/ODict, presered type\n if isinstance(frames, OrderedDict):\n result = OrderedDict()\n\n adj_frames = OrderedDict()\n for k, v in compat.iteritems(frames):\n if isinstance(v, dict):\n adj_frames[k] = self._constructor_sliced(v)\n else:\n adj_frames[k] = v\n\n axes = self._AXIS_ORDERS[1:]\n axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(\n self, adj_frames, axes, intersect=intersect))])\n\n reindex_dict = dict(\n [(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])\n reindex_dict['copy'] = False\n for key, frame in compat.iteritems(adj_frames):\n if frame is not None:\n result[key] = frame.reindex(**reindex_dict)\n else:\n result[key] = None\n\n axes_dict['data'] = result\n axes_dict['dtype'] = dtype\n return axes_dict\n\n @staticmethod\n def _extract_axis(self, data, axis=0, intersect=False):\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_frames = False\n\n for v in data.values():\n if isinstance(v, self._constructor_sliced):\n have_frames = True\n indexes.append(v._get_axis(axis))\n elif v is not None:\n have_raw_arrays = True\n raw_lengths.append(v.shape[axis])\n\n if have_frames:\n index = _get_combined_index(indexes, intersect=intersect)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('ndarrays must match shape on axis %d' % axis)\n\n if have_frames:\n if lengths[0] != len(index):\n raise AssertionError('Length of data and index must match')\n else:\n index = Index(np.arange(lengths[0]))\n\n if index is None:\n index = Index([])\n\n return _ensure_index(index)\n\n @classmethod\n def _add_aggregate_operations(cls, use_numexpr=True):\n \"\"\" add the operations to the cls; evaluate the doc strings again \"\"\"\n\n # doc strings substitors\n _agg_doc = \"\"\"\nWrapper method for %%s\n\nParameters\n----------\nother : %s or %s\"\"\" % (cls._constructor_sliced.__name__, cls.__name__) + \"\"\"\naxis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\n Axis to broadcast over\n\nReturns\n-------\n\"\"\" + cls.__name__ + \"\\n\"\n\n def _panel_arith_method(op, name, str_rep=None, default_axis=None,\n fill_zeros=None, **eval_kwargs):\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True,\n **eval_kwargs)\n except TypeError:\n result = op(x, y)\n\n # handles discrepancy between numpy and numexpr on division/mod\n # by 0 though, given that these are generally (always?)\n # non-scalars, I'm not sure whether it's worth it at the moment\n result = com._fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n if name in _op_descriptions:\n op_name = name.replace('__', '')\n op_desc = _op_descriptions[op_name]\n if op_desc['reversed']:\n equiv = 'other ' + op_desc['op'] + ' panel'\n else:\n equiv = 'panel ' + op_desc['op'] + ' other'\n\n _op_doc = \"\"\"\n %%s of series and other, element-wise (binary operator `%%s`).\n Equivalent to ``%%s``.\n\n Parameters\n ----------\n other : %s or %s\"\"\" % (cls._constructor_sliced.__name__, cls.__name__) + \"\"\"\n axis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\n Axis to broadcast over\n\n Returns\n -------\n \"\"\" + cls.__name__ + \"\"\"\n\n See also\n --------\n \"\"\" + cls.__name__ + \".%s\\n\"\n doc = _op_doc % (op_desc['desc'], op_name, equiv, op_desc['reverse'])\n else:\n doc = _agg_doc % name\n\n @Appender(doc)\n def f(self, other, axis=0):\n return self._combine(other, na_op, axis=axis)\n f.__name__ = name\n return f\n\n # add `div`, `mul`, `pow`, etc..\n ops.add_flex_arithmetic_methods(\n cls, _panel_arith_method, use_numexpr=use_numexpr,\n flex_comp_method=ops._comp_method_PANEL)\n\nPanel._setup_axes(axes=['items', 'major_axis', 'minor_axis'],\n info_axis=0,\n stat_axis=1,\n aliases={'major': 'major_axis',\n 'minor': 'minor_axis'},\n slicers={'major_axis': 'index',\n 'minor_axis': 'columns'})\n\nops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)\nPanel._add_aggregate_operations()\nPanel._add_numeric_operations()\n\n# legacy\nclass WidePanel(Panel):\n\n def __init__(self, *args, **kwargs):\n\n # deprecation, #10892\n warnings.warn(\"WidePanel is deprecated. Please use Panel\",\n FutureWarning, stacklevel=2)\n\n super(WidePanel, self).__init__(*args, **kwargs)\n\nclass LongPanel(DataFrame):\n\n def __init__(self, *args, **kwargs):\n\n # deprecation, #10892\n warnings.warn(\"LongPanel is deprecated. Please use DataFrame\",\n FutureWarning, stacklevel=2)\n\n super(LongPanel, self).__init__(*args, **kwargs)\n"
] | [
[
"numpy.asarray",
"numpy.arange",
"numpy.issubdtype",
"numpy.median",
"numpy.ones",
"numpy.concatenate",
"numpy.round",
"numpy.apply_along_axis",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.distutils.misc_util.msvc_runtime_library",
"numpy.distutils.log.debug",
"numpy.distutils.misc_util.get_build_architecture",
"numpy.distutils.log.warn",
"numpy.distutils.lib2def.getnm",
"numpy.distutils.lib2def.parse_nm",
"numpy.distutils.log.info"
],
[
"pandas.util.testing.assertIsInstance",
"pandas.Series",
"pandas.compat.BytesIO",
"numpy.linspace",
"pandas.util.testing.ensure_clean",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.io.parsers.TextParser",
"numpy.iinfo",
"pandas.DataFrame.from_records",
"pandas.core.datetools.to_datetime",
"pandas.read_csv",
"numpy.arange",
"pandas.compat.StringIO",
"pandas.compat.text_type",
"pandas.Index",
"pandas.util.testing.assert_series_equal",
"pandas.compat.lmap",
"pandas.tseries.tools.to_datetime",
"pandas.compat.long",
"pandas.util.testing._skip_if_python26",
"pandas.util.testing._skip_if_32bit",
"pandas.concat",
"pandas.compat.u",
"pandas.compat.is_platform_windows",
"pandas.MultiIndex",
"pandas.util.testing.assert_almost_equal",
"pandas.compat.parse_date",
"pandas.read_table",
"numpy.int64",
"numpy.random.rand",
"pandas.util.testing.assert_equal",
"pandas.io.parsers.read_csv",
"pandas.lib.convert_sql_column",
"numpy.array",
"pandas.util.testing.makeCustomDataframe",
"pandas.isnull",
"pandas.util.testing.assertRaisesRegexp",
"numpy.array_equal",
"pandas.io.parsers.read_table",
"pandas.util.testing.get_data_path",
"pandas.tseries.index.date_range",
"pandas.util.testing.assertRaises",
"pandas.MultiIndex.from_arrays",
"numpy.empty",
"pandas.lib.Timestamp",
"pandas.util.testing.rands",
"pandas.io.parsers._concat_date_cols",
"pandas.io.parsers.read_fwf",
"pandas.Timestamp",
"pandas.compat.lrange",
"pandas.compat.range"
],
[
"pandas.PeriodIndex",
"numpy.sqrt",
"pandas.util.testing.makeObjectSeries",
"pandas.util.testing.assert_contains_all",
"pandas.util.testing._skip_if_no_pytz",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"numpy.all",
"pandas.util.testing.assert_index_equal",
"numpy.exp",
"numpy.where",
"numpy.unique",
"pandas.compat.text_type",
"numpy.nansum",
"numpy.diff",
"pandas.util.testing.makePeriodSeries",
"pandas.concat",
"pandas.core.index.MultiIndex.from_arrays",
"scipy.stats.pearsonr",
"numpy.median",
"pandas.offsets.Milli",
"pandas.util.testing.getMixedTypeDict",
"pandas.core.common.is_integer",
"pandas.util.testing.assert_equal",
"pandas.date_range",
"scipy.stats.kurtosis",
"numpy.array",
"pandas.timedelta_range",
"pandas.CategoricalIndex",
"pandas.util.testing.is_sorted",
"pandas.period_range",
"pandas.core.common.is_datetime64tz_dtype",
"numpy.random.shuffle",
"numpy.datetime64",
"scipy.stats.kendalltau",
"numpy.isinf",
"pandas.compat.range",
"numpy.resize",
"pandas.Series",
"pandas.core.common.is_integer_dtype",
"numpy.asarray",
"numpy.var",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.may_share_memory",
"numpy.reshape",
"pandas.compat.StringIO",
"pandas.core.index.MultiIndex.from_tuples",
"numpy.std",
"pandas.core.config.option_context",
"pandas.set_option",
"pandas.util.testing.equalContents",
"scipy.stats.skew",
"pandas.tseries.index.Timestamp",
"pandas.util.testing.makeStringIndex",
"numpy.timedelta64",
"numpy.int64",
"numpy.random.rand",
"pandas.offsets.Second",
"numpy.array_equal",
"pandas.util.testing.assertRaisesRegexp",
"pandas.util.testing.makeFloatSeries",
"numpy.ptp",
"numpy.ones",
"pandas.offsets.Minute",
"pandas.lib.infer_dtype",
"pandas.util.testing.rands",
"numpy.isscalar",
"pandas.to_timedelta",
"pandas.util.testing.makePeriodIndex",
"pandas.util.testing.assert_dict_equal",
"numpy.empty",
"numpy.around",
"pandas.DataFrame",
"numpy.round",
"pandas.compat.iteritems",
"numpy.random.randint",
"numpy.clip",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.Index",
"pandas.util.testing._skip_if_no_scipy",
"numpy.repeat",
"pandas.notnull",
"pandas.bdate_range",
"numpy.isnan",
"pandas.tseries.index.DatetimeIndex",
"pandas.util.testing.assert_almost_equal",
"pandas.Float64Index",
"numpy.argsort",
"pandas.tseries.tdi.Timedelta",
"pandas.util.testing._skip_if_no_dateutil",
"pandas.tslib._dateutil_gettz",
"pandas.isnull",
"pandas.util.testing.assertRaises",
"pandas.util.testing.makeStringSeries",
"pandas.util.testing.makeIntIndex",
"pandas.compat.zip",
"pandas.tseries.tdi.TimedeltaIndex",
"pandas.core.common.pprint_thing",
"pandas.read_pickle",
"pandas.offsets.Hour",
"numpy.loadtxt",
"numpy.dot",
"pandas.core.common.is_list_like",
"pandas.to_datetime",
"pandas.util.testing.assertIsInstance",
"pandas.util.testing.ensure_clean",
"numpy.minimum.accumulate",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"scipy.stats.spearmanr",
"numpy.fix",
"pandas.util.testing.makeDataFrame",
"pandas.util.testing.makeTimeDataFrame",
"numpy.ones_like",
"pandas.core.nanops.nansum",
"numpy.arange",
"numpy.finfo",
"pandas.Series.from_csv",
"pandas.util.testing.makeDateIndex",
"pandas.util.testing.rands_array",
"pandas.core.index.MultiIndex",
"pandas.Categorical",
"pandas.option_context",
"pandas.core.nanops.nangt",
"pandas.util.testing.randn",
"numpy.ma.masked_all",
"numpy.random.random",
"pandas.util.testing.makeTimeSeries",
"numpy.abs",
"scipy.stats.rankdata",
"numpy.isfinite",
"numpy.compress",
"pandas.MultiIndex.from_arrays",
"numpy.sort",
"numpy.maximum.accumulate",
"pandas.Timestamp",
"pandas.compat.lrange"
],
[
"pandas.concat"
],
[
"pandas.compat.StringIO",
"pandas.io.common._expand_user",
"pandas.io.common.get_filepath_or_buffer"
],
[
"pandas.core.common.is_categorical_dtype",
"pandas.core.common.is_bool_dtype",
"pandas.compat.lzip",
"pandas.core.common._default_index",
"pandas.core.common._possibly_infer_to_datetimelike",
"pandas.core.reshape.stack",
"pandas.compat.OrderedDict",
"pandas.compat.raise_with_traceback",
"pandas.core.common._infer_dtype_from_scalar",
"pandas.computation.eval.eval",
"pandas.core.common._dict_compat",
"pandas.util.decorators.Appender",
"pandas.core.common.is_object_dtype",
"pandas.lib.to_object_array_tuples",
"pandas.core.common._try_sort",
"pandas.core.index.MultiIndex.from_arrays",
"pandas.tools.merge.concat",
"pandas.core.reshape.stack_multiple",
"pandas.core.common.is_integer",
"pandas.core.nanops.nanargmax",
"numpy.array",
"pandas.core.common._lcd_dtypes",
"pandas.io.parsers.read_table",
"pandas.core.groupby._lexsort_indexer",
"pandas.core.groupby.get_group_index",
"pandas.core.index._union_indexes",
"pandas.core.common.in_interactive_session",
"numpy.vstack",
"pandas.compat.range",
"pandas.core.generic.NDFrame.__init__",
"pandas.core.common._maybe_upcast",
"pandas.core.common.is_integer_dtype",
"pandas.computation.expressions.where",
"numpy.asarray",
"numpy.ma.getdata",
"pandas.io.stata.StataWriter",
"pandas.core.common._ensure_platform_int",
"pandas.io.gbq.to_gbq",
"pandas.core.common._maybe_upcast_putmask",
"pandas.core.series._sanitize_index",
"numpy.ma.getmaskarray",
"pandas.compat.StringIO",
"pandas.core.common.is_iterator",
"pandas.core.index._ensure_index",
"pandas.core.index.MultiIndex.from_tuples",
"pandas.util.decorators.deprecate",
"pandas.core.common.take_1d",
"pandas.compat.u",
"pandas.core.common._possibly_downcast_to_dtype",
"pandas.tools.merge.merge",
"pandas.computation.expressions.evaluate",
"pandas.core.sparse.SparseDataFrame",
"numpy.rec.fromarrays",
"pandas.core.nanops.get_corr_func",
"pandas.core.common.take_2d_multi",
"numpy.empty",
"pandas.core.index.Index",
"pandas.util.decorators.deprecate_kwarg",
"numpy.issubdtype",
"pandas.core.common._unpickle_array",
"pandas.core.common._get_info_slice",
"numpy.round",
"pandas.core.internals.create_block_manager_from_blocks",
"pandas.compat.iteritems",
"pandas.core.common.in_ipython_frontend",
"pandas.core.series.Series",
"pandas.core.common.needs_i8_conversion",
"pandas.core.ops.add_special_arithmetic_methods",
"pandas.core.common._possibly_cast_to_datetime",
"pandas.core.nanops.nanargmin",
"pandas.core.common._ensure_float64",
"pandas.core.common.in_qtconsole",
"pandas.core.common._ensure_int64",
"pandas.core.common.is_datetimetz",
"pandas.core.common.ensure_float",
"pandas.core.reshape.pivot",
"pandas.core.common.PandasError",
"pandas.core.common._possibly_convert_platform",
"pandas.core.base.AccessorProperty",
"pandas.core.algorithms.rank",
"numpy.cov",
"pandas.lib.fast_multiget",
"pandas.tools.plotting.boxplot",
"pandas.core.nanops.unique1d",
"pandas.core.format.ExcelFormatter",
"pandas.lib.fast_unique_multiple_list_gen",
"numpy.iterable",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.io.excel.ExcelWriter",
"pandas.core.common._asarray_tuplesafe",
"pandas.lib.to_object_array",
"pandas.core.internals.create_block_manager_from_arrays",
"pandas.core.common.is_bool_indexer",
"pandas.core.common._maybe_box_datetimelike",
"numpy.percentile",
"matplotlib.pyplot.draw_if_interactive",
"pandas.compat.zip",
"pandas.core.common.pprint_thing",
"pandas.core.format.DataFrameFormatter",
"pandas.core.series.Series.from_array",
"pandas.core.common.is_list_like",
"numpy.dot",
"pandas.lib.reduce",
"pandas.core.common.is_sequence",
"pandas.lib.maybe_convert_objects",
"pandas.core.generic.NDFrame._set_item",
"pandas.core.format._put_lines",
"pandas.core.common.i8_boxer",
"pandas.core.common.notnull",
"pandas.core.indexing.check_bool_indexer",
"pandas.core.config.get_option",
"numpy.empty_like",
"numpy.arange",
"pandas.compat.lmap",
"numpy.apply_along_axis",
"pandas.lib.isscalar",
"pandas.core.common.is_internal_type",
"pandas.core.indexing.convert_to_index_sliceable",
"pandas.core.common._coerce_to_dtypes",
"pandas.core.series._sanitize_array",
"pandas.hashtable.duplicated_int64",
"pandas.core.format.get_console_size",
"pandas.util.decorators.Substitution",
"numpy.isfinite",
"pandas.core.groupby._nargsort",
"pandas.core.reshape.unstack",
"numpy.compress",
"pandas.core.common._values_from_object",
"pandas.core.common.isnull",
"pandas.core.common.is_datetime64_dtype",
"pandas.core.common._invalidate_string_dtypes",
"pandas.core.indexing.maybe_droplevels"
],
[
"pandas.core.generic.NDFrame.__init__",
"pandas.core.common.is_list_like",
"pandas.util.decorators.deprecate_kwarg",
"pandas.core.categorical.Categorical.from_array",
"numpy.asarray",
"pandas.compat.range",
"pandas.compat.map",
"pandas.core.internals.create_block_manager_from_blocks",
"pandas.core.generic.NDFrame._set_item",
"pandas.compat.iteritems",
"pandas.core.frame.DataFrame",
"pandas.core.common._default_index",
"pandas.core.groupby.PanelGroupBy",
"pandas.core.common.notnull",
"pandas.compat.OrderedDefaultdict",
"pandas.compat.OrderedDict",
"pandas.core.series.Series",
"numpy.unique",
"pandas.core.ops.add_special_arithmetic_methods",
"numpy.arange",
"pandas.core.common._infer_dtype_from_scalar",
"pandas.core.index._ensure_index",
"pandas.util.decorators.deprecate",
"numpy.apply_along_axis",
"pandas.lib.isscalar",
"numpy.zeros",
"pandas.core.common._possibly_cast_item",
"pandas.util.decorators.Appender",
"pandas.core.index.MultiIndex",
"pandas.core.common.pprint_thing",
"pandas.core.index._get_combined_index",
"pandas.core.common._try_sort",
"pandas.core.common.PandasError",
"pandas.compat.u",
"pandas.core.common._fill_zeros",
"pandas.tools.util.cartesian_product",
"pandas.compat.itervalues",
"numpy.array",
"pandas.computation.expressions.evaluate",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.io.excel.ExcelWriter",
"numpy.isfinite",
"pandas.core.internals.create_block_manager_from_arrays",
"numpy.array_equal",
"pandas.core.sparse.SparsePanel",
"numpy.empty",
"pandas.compat.zip",
"numpy.prod",
"numpy.isscalar",
"pandas.core.indexing.maybe_droplevels",
"numpy.vstack",
"pandas.core.index.Index"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.24",
"1.22",
"1.23"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"0.17",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"1.1",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
arcada-uas/doccano | [
"c29aece3dd4504eeaaa3466af0663bfe18b90dc1"
] | [
"backend/data_export/tests/test_dataset.py"
] | [
"import unittest\nfrom unittest.mock import MagicMock\n\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\n\nfrom data_export.pipeline.dataset import Dataset\n\n\nclass TestDataset(unittest.TestCase):\n def setUp(self):\n example = MagicMock()\n example.to_dict.return_value = {\"data\": \"example\"}\n self.examples = MagicMock()\n self.examples.__iter__.return_value = [example]\n label = MagicMock()\n label.find_by.return_value = {\"labels\": [\"label\"]}\n self.labels = MagicMock()\n self.labels.__iter__.return_value = [label]\n\n def test_to_dataframe(self):\n dataset = Dataset(self.examples, self.labels)\n df = dataset.to_dataframe()\n expected = pd.DataFrame([{\"data\": \"example\", \"labels\": [\"label\"]}])\n assert_frame_equal(df, expected)\n"
] | [
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kiritigowda/Anakin | [
"4ba2329153163590e11875dc6b4150031066915d"
] | [
"tools/external_converter_v2/parser/kill_fluid/fluid_helper.py"
] | [
"from ..proto import *\nfrom ..graph_io import *\nimport paddle.fluid as fluid\nimport numpy as np\nfrom paddle.fluid.core import VarDesc, AttrType\n\n\ndef union(list_a, list_b):\n\treturn list(set(list_a).union(set(list_b)))\n\ndef difference(list_a, list_b):\n\treturn list(set(list_a).difference(set(list_b)))\n\n\nclass Edge_for_fluid:\n\n\tdef __init__(self, param, target, var):\n\t\tself.param = param\n\t\tself.target = target\n\t\tself.var = var\n\n\nclass Fluid_edger:\n\n\tdef __init__(self, param = None, target = None, var = None):\n\t\tself.edges = []\n\t\tif param is not None and target is not None:\n\t\t\tedge = Edge_for_fluid(param, target, var)\n\t\t\tself.edges.append(edge)\n\n\tdef __call__(self):\n\t\treturn self.all_targets()\n\n\tdef add(self, param, target, var = None):\n\t\tedge = Edge_for_fluid(param, target, var)\n\t\tself.edges.append(edge)\n\n\tdef rm_edges_by_param(self, param):\n\t\tfor edge in self.edges:\n\t\t\tif edge.param == param:\n\t\t\t\tedge_idx = self.edges.index(edge)\n\t\t\t\tdel self.edges[edge_idx]\n\n\tdef rm(self, target):\n\t\tres = -1\n\t\tfor edge in self.edges:\n\t\t\tif target == edge.target:\n\t\t\t\tedge_idx = self.edges.index(edge)\n\t\t\t\tdel self.edges[edge_idx]\n\t\t\t\tres = res + 1\n\t\tif res != 0:\n\t\t\tpass\n\n\tdef mv(self, old_target, new_target):\n\t\tres = -1\n\t\tfor edge in self.edges:\n\t\t\tif old_target == edge.target:\n\t\t\t\tedge.target = new_target\n\t\t\t\tres = res + 1\n\t\tif res != 0:\n\t\t\tpass\n\n\tdef all_params(self):\n\t\tparams = []\n\t\tfor edge in self.edges:\n\t\t\tif edge.param not in params:\n\t\t\t\tparams.append(edge.param)\n\t\treturn params\n\n\tdef all_targets(self):\n\t\ttargets = []\n\t\tfor edge in self.edges:\n\t\t\ttargets.append(edge.target)\n\t\treturn targets\n\n\tdef targets(self, param):\n\t\ttargets = []\n\t\tfor edge in self.edges:\n\t\t\tif edge.param == param:\n\t\t\t\ttargets.append(edge.target)\n\t\treturn targets\n\n\tdef target(self, param, idx = 0):\n\t\treturn self.targets(param)[idx]\n\n\tdef clear(self):\n\t\ttargets_list = self.all_targets()\n\t\tfor target in targets_list:\n\t\t\tself.rm(target)\n\n\tdef targets_with_params(self):\n\t\tlist_of_targets_and_params = []\n\t\tfor edge in self.edges:\n\t\t\ttarget_and_param = [edge.target, edge.param]\n\t\t\tlist_of_targets_and_params.append(target_and_param)\n\t\treturn list_of_targets_and_params\n\n\tdef vars_by_target(self, target):\n\t\tvars = []\n\t\tfor edge in self.edges:\n\t\t\tif edge.target == target and edge.var is not None:\n\t\t\t\tvars.append(edge.var)\n\t\treturn vars\n\n\tdef __getitem__(self, idx):\n\t\tif idx < len(self.edges):\n\t\t\treturn self.edges[idx]\n\t\treturn None\n\n\nclass Fluid_helper:\n\n\tdef __init__(self, scope, block):\n\t\tself.scope = scope\n\t\tself.block = block\n\n\tdef args_by_input_param(self, op, param_name):\n\t\tif param_name in op.input_names:\n\t\t\treturn op.input(param_name)\n\t\telse:\n\t\t\traise NameError('ERROR: param_name %s is not exists.' % ( param_name ) )\n\n\tdef args_by_output_param(self, op, param_name):\n\t\tif param_name in op.output_names:\n\t\t\treturn op.output(param_name)\n\t\telse:\n\t\t\traise NameError('ERROR: param_name %s is not exists.' % ( param_name ) )\n\n\tdef var_by_input_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.args_by_input_param(op, param_name)[var_idx]\n\t\tvar = self.block.var(var_name)\n\t\treturn var\n\n\tdef var_by_output_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.args_by_output_param(op, param_name)[var_idx]\n\t\tvar = self.block.var(var_name)\n\t\treturn var\n\n\tdef var_name_by_param(self, op, param_name, var_idx = 0):\n\t\tif param_name not in op.input_names + op.output_names:\n\t\t\traise NameError('ERROR: param_name %s is not exists.' % ( param_name ) )\n\t\telif param_name in op.input_names:\n\t\t\tif len(op.input(param_name)) > 0:\n\t\t\t\tvar_name_unicode = op.input(param_name)[var_idx]\n\t\t\telse:\n\t\t\t\traise NameError('ERROR: param %s has not var.' % ( param_name ) )\n\t\telif param_name in op.output_names:\n\t\t\tif len(op.output(param_name)) > 0:\n\t\t\t\tvar_name_unicode = op.output(param_name)[var_idx]\n\t\t\telse:\n\t\t\t\traise NameError('ERROR: param %s has not var.' % ( param_name ) )\n\t\tvar = self.block.var(var_name_unicode)\n\t\tvar_name = var.name\n\t\treturn var_name\n\n\tdef var_by_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\tvar = self.block.var(var_name)\n\t\treturn var\n\n\tdef shape_by_var_name(self, var_name, layout = 'NCHW'):\n\t\tvar = self.block.var(var_name)\n\t\tlong_tuple = var.shape\n\t\tlong_list = list(long_tuple)\n\t\tif layout == 'NCHW':\n\t\t\tint_list_4d = map(int, [1]*(4-len(long_list)) + long_list)\n\t\t\treturn int_list_4d\n\t\telif layout == 'UNMODIFIED':\n\t\t\treturn long_list\n\t\telse:\n\t\t\traise NameError('ERROR: layout %s is not implemented yet.' % ( layout ) )\n\n\tdef np_data_by_var_name(self, var_name):\n\t\tnumpy_array = fluid.executor.fetch_var(var_name, self.scope, True)\n\t\treturn numpy_array\n\n\tdef dtype_by_var_name(self, var_name):\n\t\tvar = self.block.var(var_name)\n\t\tfluid_var_type = var.dtype\n\t\tdtype = ANAKIN_TENSOR_DTYPE[fluid_var_type]\n\t\treturn dtype\n\n\tdef is_persistable_param(self, op, param_name, var_idx = 0):\n\t\tvar = self.var_by_param(op, param_name, var_idx)\n\t\tis_persistable_var = var.persistable\n\t\treturn is_persistable_var\n\n\tdef var_shape_by_param(self, transpose, op, param_name, var_idx = 0, layout = 'NCHW'):\n\t\tif transpose is True:\n\t\t\traise NameError('ERROR: var_shape transpose is not implemented yet.')\n\t\telse:\n\t\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\t\tshape = self.shape_by_var_name(var_name, layout)\n\t\t\treturn shape\n\n\tdef data_with_shape_by_param(self,\n\t\t\t\t\t\t\t\t op,\n\t\t\t\t\t\t\t\t param_name,\n\t\t\t\t\t\t\t\t transpose = False,\n\t\t\t\t\t\t\t\t axes = None,\n\t\t\t\t\t\t\t\t var_idx = 0,\n\t\t\t\t\t\t\t\t is_flat_list = True,\n\t\t\t\t\t\t\t\t layout = 'NCHW'):\n\n\t\tnp.set_printoptions(threshold=np.inf, suppress=True)\n\n\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\tnp_array = self.np_data_by_var_name(var_name)\n\t\tif transpose is True:\n\t\t\tnp_array = np.transpose(np_array, axes)\n\t\tnp_shape = np.shape(np_array)\n\t\tif layout == 'NCHW':\n\t\t\tnp_shape = map(int, [1]*(4-len(np_shape)) + list(np_shape))\n\t\tif is_flat_list is True:\n\t\t\tflat_list = list(np_array.flatten())\n\t\t\treturn [flat_list, np_shape]\n\t\telse:\n\t\t\treturn [np_array, np_shape]\n\n\tdef np_param(self,\n\t\t\t\t op,\n\t\t\t\t param_name,\n\t\t\t\t transpose = False,\n\t\t\t\t axes = None,\n\t\t\t\t var_idx = 0):\n\n\t\t[data, np_shape] = self.data_with_shape_by_param(op, param_name, transpose, \\\n\t\t\taxes, var_idx, False)\n\t\treturn data\n\n\tdef dtype_by_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\tdtype = self.dtype_by_var_name(var_name)\n\t\treturn dtype\n\n\tdef is_list_type(self, op, attr_name):\n\t\tif op.has_attr(attr_name):\n\t\t\tfluid_attr_type = op.attr_type(attr_name)\n\t\t\tif fluid_attr_type in ANAKIN_ATTR_IS_LIST.keys():\n\t\t\t\treturn ANAKIN_ATTR_IS_LIST[fluid_attr_type]\n\t\t\telse:\n\t\t\t\treturn False # AttrType.LONG\n\t\telse:\n\t\t\traise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) )\n\n\tdef dtype_of_attr(self, op, attr_name):\n\t\tif op.has_attr(attr_name):\n\t\t\tfluid_attr_type = op.attr_type(attr_name)\n\t\t\tif fluid_attr_type in ANAKIN_ATTR_DTYPE.keys():\n\t\t\t\treturn ANAKIN_ATTR_DTYPE[fluid_attr_type]\n\t\t\telse:\n\t\t\t\treturn INT32 # AttrType.LONG\n\t\telse:\n\t\t\traise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) )\n\n\tdef attr_data_required(self, op, attr_name):\n\t\tdata = op.attr(attr_name)\n\t\tis_list = self.is_list_type(op, attr_name)\n\t\tdtype = self.dtype_of_attr(op, attr_name)\n\t\tif dtype not in [INT32, FLOAT, STR]:\n\t\t\treturn data\n\t\telif dtype == INT32:\n\t\t\treturn map(int, data) if is_list else int(data)\n\t\telif dtype == FLOAT:\n\t\t\treturn map(float, data) if is_list else float(data)\n\t\telif dtype == STR:\n\t\t\treturn bytes(data)\n\n\tdef attr_data(self, op, attr_name, default_value = 0, type = None):\n\t\tif op.has_attr(attr_name):\n\t\t\treturn self.attr_data_required(op, attr_name)\n\t\telse:\n\t\t\t#raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) )\n\t\t\treturn default_value\n\n\tdef param_tensor_sh(self,\n\t\t\t\t\t\top,\n\t\t\t\t\t\tparam_name,\n\t\t\t\t\t\ttranspose = False,\n\t\t\t\t\t\taxes = None,\n\t\t\t\t\t\treshape = None,\n\t\t\t\t\t\tvar_idx = 0,\n\t\t\t\t\t\tlayout = 'NCHW'):\n\n\t\ttensor = TensorProtoIO()\n\t\t[flat_data, shape] = self.data_with_shape_by_param(op, param_name, transpose, \\\n\t\t\taxes, var_idx, True, layout)\n\t\tdtype = self.dtype_by_param(op, param_name, var_idx)\n\t\ttensor.set_data_type(dtype)\n\t\tif dtype in ANAKIN_TENSOR_DTYPESTR.keys():\n\t\t\ttensor.set_data(flat_data, ANAKIN_TENSOR_DTYPESTR[dtype])\n\t\t\t#pass #debug\n\t\telse:\n\t\t\traise NameError('ERROR: Unknown data type (%s)' % ( dtype ) )\n\t\tif reshape is not None:\n\t\t\ttensor.set_shape(reshape)\n\t\telse:\n\t\t\ttensor.set_shape(shape)\n\t\treturn [tensor, shape]\n\n\tdef param_tensor(self,\n\t\t\t\t\t op,\n\t\t\t\t\t param_name,\n\t\t\t\t\t transpose = False,\n\t\t\t\t\t axes = None,\n\t\t\t\t\t reshape = None,\n\t\t\t\t\t var_idx = 0,\n\t\t\t\t\t layout = 'NCHW'):\n\n\t\t[tensor, shape] = self.param_tensor_sh(op, param_name, transpose, axes, \\\n\t\t\treshape, var_idx, layout)\n\t\treturn tensor\n\n\tdef create_tensor(self, data_list, data_shape, dtype):\n\t\ttensor = TensorProtoIO()\n\t\ttensor.set_data_type(dtype)\n\t\ttensor.set_data(data_list, ANAKIN_TENSOR_DTYPESTR[dtype])\n\t\ttensor.set_shape(data_shape)\n\t\treturn tensor\n\n\tdef gru_tensor_convert(self, origin_h2h, origin_i2h, origin_b, offset=[2, 1, 0]):\n\t\thidden_size = int(origin_b.size // 3)\n\t\tword_size = int(origin_i2h.size // hidden_size // 3)\n\t\ttar_h2h=np.array(origin_h2h.flatten().tolist()[2*hidden_size*hidden_size:]\\\n\t\t\t+np.array(origin_h2h.flatten().tolist()[:2*hidden_size*hidden_size])\\\n\t\t\t.reshape(hidden_size,2,hidden_size)[:,[1,0],:].flatten().tolist())\\\n\t\t.reshape(1,1,hidden_size,3*hidden_size)\n\t\ttar_i2h=origin_i2h.reshape(word_size,3,hidden_size)[:,offset,:]\\\n\t\t.reshape(1,1,word_size,3*hidden_size)\n\t\ttar_b=origin_b.reshape(3, hidden_size)[offset, :].reshape(1,1,1,3 * hidden_size)\n\t\ttar_i2h_h2h=np.concatenate([tar_i2h.flatten(),tar_h2h.flatten()])\\\n\t\t.reshape(1,1,1,3*hidden_size*hidden_size+3*word_size*hidden_size)\n\t\treturn tar_i2h_h2h, tar_b\n\n\tdef lstm_fc_tensor_merge_convert(self, origin_hidden_size, origin_lstm_w, origin_lstm_b, origin_fc_w, origin_fc_b):\n\n\t\tlayer_size = int (origin_hidden_size // 4)\n\t\tinput_size = int (origin_fc_w.size // origin_hidden_size)\n\t\tlstm_bias_num = int (origin_lstm_b.size // layer_size)\n\t\ttar_w = np.vstack((np.hstack((origin_fc_w[:, 1 * layer_size : 2 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_fc_w[:, 2 * layer_size : 3 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_fc_w[:, : 1 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_fc_w[:, 3 * layer_size :])),\n\t\t\t\t\t\t np.hstack((origin_lstm_w[:, 1 * layer_size : 2 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_lstm_w[:, 2 * layer_size : 3 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_lstm_w[:, : 1 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_lstm_w[:, 3 * layer_size : ]))))\n\n\t\tif origin_fc_b is not None:\n\t\t\tsplit_fc_bc = origin_fc_b.flatten()[: 1 * layer_size]\n\t\t\tsplit_fc_bi = origin_fc_b.flatten()[1 * layer_size : 2 * layer_size]\n\t\t\tsplit_fc_bf = origin_fc_b.flatten()[2 * layer_size : 3 * layer_size]\n\t\t\tsplit_fc_bo = origin_fc_b.flatten()[3 * layer_size : 4 * layer_size]\n\t\telse:\n\t\t\tsplit_fc_bc = np.zeros(layer_size)\n\t\t\tsplit_fc_bi = np.zeros(layer_size)\n\t\t\tsplit_fc_bf = np.zeros(layer_size)\n\t\t\tsplit_fc_bo = np.zeros(layer_size)\n\n\t\tsplit_lstm_bc = origin_lstm_b.flatten()[: 1 * layer_size]\n\t\tsplit_lstm_bi = origin_lstm_b.flatten()[1 * layer_size: 2 * layer_size]\n\t\tsplit_lstm_bf = origin_lstm_b.flatten()[2 * layer_size: 3 * layer_size]\n\t\tsplit_lstm_bo = origin_lstm_b.flatten()[3 * layer_size: 4 * layer_size]\n\t\tsplit_lstm_bc = np.add(split_lstm_bc, split_fc_bc)\n\t\tsplit_lstm_bi = np.add(split_lstm_bi, split_fc_bi)\n\t\tsplit_lstm_bf = np.add(split_lstm_bf, split_fc_bf)\n\t\tsplit_lstm_bo = np.add(split_lstm_bo, split_fc_bo)\n\n\t\tif lstm_bias_num == 4:\n\t\t\ttar_b = np.array(split_lstm_bi.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bf.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bc.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bo.flatten().tolist())\n\t\telse:\n\t\t\tsplit_lstm_wic = origin_lstm_b.flatten()[4 * layer_size : 5 * layer_size]\n\t\t\tsplit_lstm_wfc = origin_lstm_b.flatten()[5 * layer_size : 6 * layer_size]\n\t\t\tsplit_lstm_woc = origin_lstm_b.flatten()[6 * layer_size :]\n\t\t\ttar_b = np.array(split_lstm_bi.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bf.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bc.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bo.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_wic.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_wfc.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_woc.flatten().tolist())\n\t\treturn tar_w.reshape(input_size+ layer_size, 4 * layer_size, 1, 1),\\\n\t\t\t tar_b.reshape(1, origin_lstm_b.size, 1, 1)\n\n\nclass Fluid_comparator:\n\n\tdef __init__(self, helper):\n\t\tself.helper = helper\n\t\tself.only_list = ['feed', 'fetch']\n\n\tdef compare_by_param(self, op_a, op_b, param):\n\t\tis_weight_a = self.helper.is_persistable_param(op_a, param)\n\t\tis_weight_b = self.helper.is_persistable_param(op_b, param)\n\t\tif is_weight_a and is_weight_b:\n\t\t\tnp_a = self.helper.np_param(op_a, param)\n\t\t\tnp_b = self.helper.np_param(op_b, param)\n\t\t\tif (np_a == np_b).all() == True:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telif is_weight_a is is_weight_b:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef have_same_weights(self, op_a, op_b):\n\t\tis_same = True\n\t\tif op_a.input_names == op_b.input_names:\n\t\t\tparams = op_a.input_names\n\t\t\tfor param in params:\n\t\t\t\tif self.compare_by_param(op_a, op_b, param) is False:\n\t\t\t\t\tis_same = False\n\t\t\treturn is_same\n\t\telse:\n\t\t\treturn False\n\n\tdef compare_by_attr(self, op_a, op_b, attr_name):\n\t\tdata_a = self.helper.attr_data(op_a, attr_name)\n\t\tdata_b = self.helper.attr_data(op_b, attr_name)\n\t\treturn data_a == data_b\n\n\tdef have_same_attrs(self, op_a, op_b):\n\t\tis_same = True\n\t\tif op_a.attr_names == op_b.attr_names:\n\t\t\tattrs = op_a.attr_names\n\t\t\tfor attr in attrs:\n\t\t\t\tif self.compare_by_attr(op_a, op_b, attr) is False:\n\t\t\t\t\tis_same = False\n\t\t\treturn is_same\n\t\telse:\n\t\t\treturn False\n\n\tdef brothers(self, op_list):\n\t\tis_same = True\n\t\tif len(op_list) > 1:\n\t\t\tidx = 0\n\t\t\tfor op_b in op_list[1:]:\n\t\t\t\tif op_b.type not in self.only_list:\n\t\t\t\t\tidx = op_list.index(op_b)\n\t\t\t\t\top_a = op_list[idx - 1]\n\t\t\t\t\tif op_a.type not in self.only_list:\n\t\t\t\t\t\tsame_weights = self.have_same_weights(op_a, op_b)\n\t\t\t\t\t\tsame_attrs = self.have_same_attrs(op_a, op_b)\n\t\t\t\t\t\tif (same_weights and same_attrs) is False:\n\t\t\t\t\t\t\tis_same = False\n\t\t\t\t\telse:\n\t\t\t\t\t\traise NameError('ERROR: %s is in only_list.' % ( op_a.type ))\n\t\t\t\telse:\n\t\t\t\t\traise NameError('ERROR: %s is in only_list.' % ( op_b.type ))\n\t\t\treturn is_same\n\t\telse:\n\t\t\traise NameError('ERROR: Members of op_list must be greater than 2.')\n\n\nANAKIN_TENSOR_DTYPE = {\n\tVarDesc.VarType.BOOL: BOOLEN,\n\tVarDesc.VarType.INT32: INT32,\n\tVarDesc.VarType.FP16: FLOAT16,\n\tVarDesc.VarType.FP32: FLOAT,\n\tVarDesc.VarType.FP64: DOUBLE,\n}\n\nANAKIN_TENSOR_DTYPESTR = {\n\tSTR: \"string\",\n\tINT32: \"int\",\n\tFLOAT: \"float\",\n\tBOOLEN: \"bool\",\n}\n\nANAKIN_ATTR_DTYPE = {\n\tAttrType.INT: INT32,\n\tAttrType.INTS: INT32,\n\tAttrType.FLOAT: FLOAT,\n\tAttrType.FLOATS: FLOAT,\n\tAttrType.STRING: STR,\n\tAttrType.STRINGS: STR,\n\tAttrType.BOOL: BOOLEN,\n\tAttrType.BOOLS: BOOLEN,\n}\n\nANAKIN_ATTR_IS_LIST = {\n\tAttrType.INT: False,\n\tAttrType.INTS: True,\n\tAttrType.FLOAT: False,\n\tAttrType.FLOATS: True,\n\tAttrType.STRING: False,\n\tAttrType.STRINGS: True,\n\tAttrType.BOOL: False,\n\tAttrType.BOOLS: True,\n}\n\nAPPEND_BIAS_OP_TYPE = [\n\t'FC',\n\t'mul',\n\t'sequence_conv',\n\t'conv2d',\n\t'conv2d_transpose',\n\t'depthwise_conv2d',\n\t'elementwise_mul',\n]\n\nAPPEND_ACT_OP_TYPE = [\n\t'FC',\n\t'mul',\n\t'sequence_conv',\n\t'conv2d',\n\t'conv2d_transpose',\n\t'batch_norm',\n\t'layer_norm',\n\t'row_conv',\n\t'reshape',\n]\n"
] | [
[
"numpy.hstack",
"numpy.set_printoptions",
"numpy.shape",
"numpy.transpose",
"numpy.add",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Fengdalu/LEARN-AN-EFFECTIVE-LIP-READING-MODEL-WITHOUT-PAINS | [
"8d5eef415c19b4c5e161259b1222fbfec6a5edb0"
] | [
"model/video_cnn.py"
] | [
"# coding: utf-8\nimport math\nimport numpy as np\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=1)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, se=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n self.se = se\n \n if(self.se):\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.conv3 = conv1x1(planes, planes//16)\n self.conv4 = conv1x1(planes//16, planes)\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n \n if self.downsample is not None:\n residual = self.downsample(x)\n \n if(self.se):\n w = self.gap(out)\n w = self.conv3(w)\n w = self.relu(w)\n w = self.conv4(w).sigmoid()\n \n out = out * w\n \n out = out + residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, se=False):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.se = se\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n \n \n self.avgpool = nn.AdaptiveAvgPool2d(1)\n \n self.bn = nn.BatchNorm1d(512)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, se=self.se))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, se=self.se))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.bn(x)\n return x \n\n\nclass VideoCNN(nn.Module):\n def __init__(self, se=False):\n super(VideoCNN, self).__init__()\n \n # frontend3D\n self.frontend3D = nn.Sequential(\n nn.Conv3d(1, 64, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False),\n nn.BatchNorm3d(64),\n nn.ReLU(True),\n nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))\n )\n # resnet\n self.resnet18 = ResNet(BasicBlock, [2, 2, 2, 2], se=se)\n self.dropout = nn.Dropout(p=0.5)\n\n # backend_gru\n # initialize\n self._initialize_weights()\n \n def visual_frontend_forward(self, x):\n x = x.transpose(1, 2)\n x = self.frontend3D(x)\n x = x.transpose(1, 2)\n x = x.contiguous()\n x = x.view(-1, 64, x.size(3), x.size(4))\n x = self.resnet18(x)\n return x \n \n def forward(self, x):\n b, t = x.size()[:2]\n\n x = self.visual_frontend_forward(x)\n \n #x = self.dropout(x)\n feat = x.view(b, -1, 512)\n\n x = x.view(b, -1, 512) \n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.Conv2d",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DavidIbarr/nmma | [
"109fdd57add52cfea3553df8346981d6a117a7e7"
] | [
"nmma/em/create_injection_slurm.py"
] | [
"import os\nimport argparse\nimport json\nimport pandas as pd\n\nimport bilby\nfrom bilby_pipe.create_injections import InjectionCreator\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Slurm files from nmma injection file\")\n parser.add_argument(\n \"--prior-file\",\n type=str,\n required=True,\n help=\"The prior file from which to generate injections\",\n )\n parser.add_argument(\n \"--injection-file\",\n type=str,\n required=True,\n help=\"The bilby injection json file to be used\",\n )\n parser.add_argument(\n \"--analysis-file\",\n type=str,\n required=True,\n help=\"The analysis bash script to be replicated\",\n )\n parser.add_argument(\"-o\", \"--outdir\", type=str, default=\"outdir\")\n args = parser.parse_args()\n\n # load the injection json file\n if args.injection_file:\n if args.injection_file.endswith(\".json\"):\n with open(args.injection_file, \"rb\") as f:\n injection_data = json.load(f)\n datadict = injection_data[\"injections\"][\"content\"]\n dataframe_from_inj = pd.DataFrame.from_dict(datadict)\n else:\n print(\"Only json supported.\")\n exit(1)\n\n if len(dataframe_from_inj) > 0:\n args.n_injection = len(dataframe_from_inj)\n\n # create the injection dataframe from the prior_file\n injection_creator = InjectionCreator(\n prior_file=args.prior_file,\n prior_dict=None,\n n_injection=args.n_injection,\n default_prior=\"PriorDict\",\n gps_file=None,\n trigger_time=0,\n generation_seed=0,\n )\n dataframe_from_prior = injection_creator.get_injection_dataframe()\n\n # combine the dataframes\n dataframe = pd.DataFrame.merge(\n dataframe_from_inj,\n dataframe_from_prior,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n for index, row in dataframe.iterrows():\n with open(args.analysis_file, \"r\") as file:\n analysis = file.read()\n\n outdir = os.path.join(args.outdir, str(index))\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n\n priors = bilby.gw.prior.PriorDict(args.prior_file)\n priors.to_file(outdir, label=\"injection\")\n priorfile = os.path.join(outdir, \"injection.prior\")\n injfile = os.path.join(outdir, \"lc.csv\")\n\n analysis = analysis.replace(\"PRIOR\", priorfile)\n analysis = analysis.replace(\"OUTDIR\", outdir)\n analysis = analysis.replace(\"INJOUT\", injfile)\n analysis = analysis.replace(\"INJNUM\", str(index))\n analysis_file = os.path.join(outdir, \"inference.sh\")\n\n fid = open(analysis_file, \"w\")\n fid.write(analysis)\n fid.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.DataFrame.merge",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dkswxd/unetpp_pytorch_qiu | [
"7f139d0c71110052399f0a93b55a39ba85897561"
] | [
"tool/metric.py"
] | [
"import numpy as np\nfrom sklearn import metrics\nfrom PIL import Image\n\ndef get_metrics(pred, logits, gt):\n if isinstance(logits, list):\n logits = logits[-1]\n result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0]),\n 'auc': roc(gt, logits)}\n return result\n\ndef get_metrics_without_roc(pred, gt):\n result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0])}\n return result\n\ndef show_metrics(metrics):\n con_mat = np.zeros((2,2))\n auc = 0.0\n for m in metrics:\n con_mat += m['confusion_matrix']\n auc += m['auc']\n auc /= len(metrics)\n result = {'confusion_matrix': con_mat.tolist(),\n 'accuracy': accuracy(con_mat),\n 'kappa': kappa(con_mat),\n 'precision': precision(con_mat),\n 'sensitivity': sensitivity(con_mat),\n 'specificity': specificity(con_mat),\n 'auc': auc,\n }\n return result\n\ndef show_metrics_without_roc(metrics):\n con_mat = np.zeros((2,2))\n for m in metrics:\n con_mat += m['confusion_matrix']\n result = {'confusion_matrix': con_mat,\n 'accuracy': accuracy(con_mat),\n 'kappa': kappa(con_mat),\n 'precision': precision(con_mat),\n 'sensitivity': sensitivity(con_mat),\n 'specificity': specificity(con_mat),\n }\n return result\n\ndef show_metrics_from_save_image(data):\n pred = data[:,:,0] // 255\n gt = data[:,:,1] // 255\n metrics = [get_metrics_without_roc(pred, gt)]\n return show_metrics_without_roc(metrics)\n\ndef kappa(matrix):\n matrix = np.array(matrix)\n n = np.sum(matrix)\n sum_po = 0\n sum_pe = 0\n for i in range(len(matrix[0])):\n sum_po += matrix[i][i]\n row = np.sum(matrix[i, :])\n col = np.sum(matrix[:, i])\n sum_pe += row * col\n po = sum_po / n\n pe = sum_pe / (n * n)\n # print(po, pe)\n return (po - pe) / (1 - pe)\n\n\ndef sensitivity(matrix):\n return matrix[0][0]/(matrix[0][0]+matrix[1][0])\n\n\ndef specificity(matrix):\n return matrix[1][1]/(matrix[1][1]+matrix[0][1])\n\n\ndef precision(matrix):\n return matrix[0][0]/(matrix[0][0]+matrix[0][1])\n\ndef roc(gt, logits):\n gtlist = gt.flatten()\n predlist = logits.detach().cpu().numpy()[0, 1, ...].flatten()\n\n fpr, tpr, thresholds = metrics.roc_curve(gtlist, predlist, pos_label=1)\n roc_auc = metrics.auc(fpr, tpr) # auc为Roc曲线下的面积\n return roc_auc\n\n\ndef accuracy(matrix):\n return (matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1])\n\ndef error_rate(predictions, labels):\n \"\"\"\n Return the error rate based on dense predictions and 1-hot labels.\n \"\"\"\n return 100.0 - (\n 100.0 *\n np.sum(np.argmin(predictions, 3) == np.argmin(labels, 3)) /\n (predictions.shape[0] * predictions.shape[1] * predictions.shape[2]))\n\ndef save_predict(filename, data, gt, pred):\n pred = pred * 255\n gt = gt[0, 1, :, :]\n gt = np.where(gt > 0.5, 255, 0)\n differ = np.stack([np.zeros_like(pred), gt, pred], -1)\n pred = np.stack([pred, pred, pred], -1)\n gt = np.stack([gt, gt, gt], -1)\n data = np.transpose(data, (0, 2, 3, 1))[0,...]\n if data.shape[2] == 60:\n data = data[:, :, 10:40:10]\n elif data.shape[2] == 1:\n data = np.concatenate([data, data, data], -1)\n elif data.shape[2] == 15:\n data = data[:, :, 0:15:5]\n data -= np.min(data, axis=(0,1))\n data /= (np.max(data, axis=(0,1))/255)\n data = data.astype(np.uint8)\n img = Image.fromarray(np.concatenate([data, pred, gt, differ], axis=1).astype(np.uint8))\n img.save(filename)\n\ndef save_logits(filename, pred):\n pred = pred * 255\n pred = np.stack([pred, pred, pred], -1)\n img = Image.fromarray(pred.astype(np.uint8))\n img.save(filename)\n"
] | [
[
"numpy.min",
"sklearn.metrics.roc_curve",
"numpy.stack",
"numpy.concatenate",
"numpy.max",
"numpy.where",
"numpy.zeros_like",
"numpy.argmin",
"numpy.transpose",
"sklearn.metrics.auc",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cnheider/pyro | [
"60bcab73ada30c2b3f05d525690c9664ff6fc22e",
"60bcab73ada30c2b3f05d525690c9664ff6fc22e",
"60bcab73ada30c2b3f05d525690c9664ff6fc22e"
] | [
"pyro/infer/trace_elbo.py",
"tests/distributions/test_sparse_multivariate_normal.py",
"examples/utils/mnist_cached.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport numbers\nimport warnings\n\nimport torch\nfrom torch.autograd import Variable\n\nimport pyro\nimport pyro.poutine as poutine\nfrom pyro.distributions.util import is_identically_zero\nfrom pyro.infer.elbo import ELBO\nfrom pyro.infer.enum import iter_discrete_traces\nfrom pyro.infer.util import torch_backward, torch_data_sum, torch_sum\nfrom pyro.poutine.util import prune_subsample_sites\nfrom pyro.util import check_model_guide_match, is_nan\n\n\ndef check_enum_discrete_can_run(model_trace, guide_trace):\n \"\"\"\n Checks whether `enum_discrete` is supported for the given (model, guide) pair.\n\n :param Trace model: A model trace.\n :param Trace guide: A guide trace.\n :raises: NotImplementedError\n \"\"\"\n # Check that all batch_log_pdf shapes are the same,\n # since we currently do not correctly handle broadcasting.\n model_trace.compute_batch_log_pdf()\n guide_trace.compute_batch_log_pdf()\n shapes = {}\n for source, trace in [(\"model\", model_trace), (\"guide\", guide_trace)]:\n for name, site in trace.nodes.items():\n if site[\"type\"] == \"sample\":\n shapes[site[\"batch_log_pdf\"].size()] = (source, name)\n if len(shapes) > 1:\n raise NotImplementedError(\n \"enum_discrete does not support mixture of batched and un-batched variables. \"\n \"Try rewriting your model to avoid batching or running with enum_discrete=False. \"\n \"Found the following variables of different batch shapes:\\n{}\".format(\n \"\\n\".join([\"{} {}: shape = {}\".format(source, name, tuple(shape))\n for shape, (source, name) in sorted(shapes.items())])))\n\n\nclass Trace_ELBO(ELBO):\n \"\"\"\n A trace implementation of ELBO-based SVI\n \"\"\"\n\n def _get_traces(self, model, guide, *args, **kwargs):\n \"\"\"\n runs the guide and runs the model against the guide with\n the result packaged as a trace generator\n \"\"\"\n\n for i in range(self.num_particles):\n if self.enum_discrete:\n # This iterates over a bag of traces, for each particle.\n for scale, guide_trace in iter_discrete_traces(\"flat\", guide, *args, **kwargs):\n model_trace = poutine.trace(poutine.replay(model, guide_trace),\n graph_type=\"flat\").get_trace(*args, **kwargs)\n\n check_model_guide_match(model_trace, guide_trace)\n guide_trace = prune_subsample_sites(guide_trace)\n model_trace = prune_subsample_sites(model_trace)\n check_enum_discrete_can_run(model_trace, guide_trace)\n\n guide_trace.compute_score_parts()\n log_r = model_trace.batch_log_pdf() - guide_trace.batch_log_pdf()\n weight = scale / self.num_particles\n yield weight, model_trace, guide_trace, log_r\n continue\n\n guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)\n model_trace = poutine.trace(poutine.replay(model, guide_trace)).get_trace(*args, **kwargs)\n\n check_model_guide_match(model_trace, guide_trace)\n guide_trace = prune_subsample_sites(guide_trace)\n model_trace = prune_subsample_sites(model_trace)\n\n guide_trace.compute_score_parts()\n log_r = model_trace.log_pdf() - guide_trace.log_pdf()\n weight = 1.0 / self.num_particles\n yield weight, model_trace, guide_trace, log_r\n\n def _is_batched(self, weight):\n return self.enum_discrete and \\\n isinstance(weight, Variable) and \\\n weight.dim() > 0 and \\\n weight.size(0) > 1\n\n def loss(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: returns an estimate of the ELBO\n :rtype: float\n\n Evaluates the ELBO with an estimator that uses num_particles many samples/particles.\n \"\"\"\n elbo = 0.0\n for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):\n elbo_particle = weight * 0\n\n if self._is_batched(weight):\n log_pdf = \"batch_log_pdf\"\n else:\n log_pdf = \"log_pdf\"\n for name in model_trace.nodes.keys():\n if model_trace.nodes[name][\"type\"] == \"sample\":\n if model_trace.nodes[name][\"is_observed\"]:\n elbo_particle += model_trace.nodes[name][log_pdf]\n else:\n elbo_particle += model_trace.nodes[name][log_pdf]\n elbo_particle -= guide_trace.nodes[name][log_pdf]\n\n # drop terms of weight zero to avoid nans\n if isinstance(weight, numbers.Number):\n if weight == 0.0:\n elbo_particle = torch.zeros_like(elbo_particle)\n else:\n elbo_particle[weight == 0] = 0.0\n\n elbo += torch_data_sum(weight * elbo_particle)\n\n loss = -elbo\n if is_nan(loss):\n warnings.warn('Encountered NAN loss')\n return loss\n\n def loss_and_grads(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: returns an estimate of the ELBO\n :rtype: float\n\n Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator.\n Performs backward on the latter. Num_particle many samples are used to form the estimators.\n \"\"\"\n elbo = 0.0\n # grab a trace from the generator\n for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):\n elbo_particle = weight * 0\n surrogate_elbo_particle = weight * 0\n batched = self._is_batched(weight)\n # compute elbo and surrogate elbo\n if batched:\n log_pdf = \"batch_log_pdf\"\n else:\n log_pdf = \"log_pdf\"\n for name, model_site in model_trace.nodes.items():\n if model_site[\"type\"] == \"sample\":\n model_log_pdf = model_site[log_pdf]\n if model_site[\"is_observed\"]:\n elbo_particle += model_log_pdf\n surrogate_elbo_particle += model_log_pdf\n else:\n guide_site = guide_trace.nodes[name]\n guide_log_pdf, score_function_term, entropy_term = guide_site[\"score_parts\"]\n\n if not batched:\n guide_log_pdf = guide_log_pdf.sum()\n elbo_particle += model_log_pdf - guide_log_pdf\n surrogate_elbo_particle += model_log_pdf\n\n if not is_identically_zero(entropy_term):\n if not batched:\n entropy_term = entropy_term.sum()\n surrogate_elbo_particle -= entropy_term\n\n if not is_identically_zero(score_function_term):\n if not batched:\n score_function_term = score_function_term.sum()\n surrogate_elbo_particle += log_r.detach() * score_function_term\n\n # drop terms of weight zero to avoid nans\n if isinstance(weight, numbers.Number):\n if weight == 0.0:\n elbo_particle = torch.zeros_like(elbo_particle)\n surrogate_elbo_particle = torch.zeros_like(surrogate_elbo_particle)\n else:\n weight_eq_zero = (weight == 0)\n elbo_particle[weight_eq_zero] = 0.0\n surrogate_elbo_particle[weight_eq_zero] = 0.0\n\n elbo += torch_data_sum(weight * elbo_particle)\n surrogate_elbo_particle = torch_sum(weight * surrogate_elbo_particle)\n\n # collect parameters to train from model and guide\n trainable_params = set(site[\"value\"]\n for trace in (model_trace, guide_trace)\n for site in trace.nodes.values()\n if site[\"type\"] == \"param\")\n\n if trainable_params:\n surrogate_loss_particle = -surrogate_elbo_particle\n torch_backward(surrogate_loss_particle)\n pyro.get_param_store().mark_params_active(trainable_params)\n\n loss = -elbo\n if is_nan(loss):\n warnings.warn('Encountered NAN loss')\n return loss\n",
"from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom pyro.distributions import MultivariateNormal, SparseMultivariateNormal\n\nfrom tests.common import assert_equal\n\n\ndef test_scale_tril():\n loc = Variable(torch.Tensor([1, 2, 1, 2, 0]))\n D = Variable(torch.Tensor([1, 2, 3, 4, 5]))\n W = Variable(torch.Tensor([[1, -1, 2, 3, 4], [2, 3, 1, 2, 4]]))\n cov = D.diag() + W.t().matmul(W)\n\n mvn = MultivariateNormal(loc, cov)\n sparse_mvn = SparseMultivariateNormal(loc, D, W)\n\n assert_equal(mvn.scale_tril, sparse_mvn.scale_tril)\n\n\ndef test_log_prob():\n loc = Variable(torch.Tensor([2, 1, 1, 2, 2]))\n D = Variable(torch.Tensor([1, 2, 3, 1, 3]))\n W = Variable(torch.Tensor([[1, -1, 2, 2, 4], [2, 1, 1, 2, 6]]))\n x = Variable(torch.Tensor([2, 3, 4, 1, 7]))\n cov = D.diag() + W.t().matmul(W)\n\n mvn = MultivariateNormal(loc, cov)\n sparse_mvn = SparseMultivariateNormal(loc, D, W)\n\n assert_equal(mvn.log_prob(x), sparse_mvn.log_prob(x))\n\n\ndef test_variance():\n loc = Variable(torch.Tensor([1, 1, 1, 2, 0]))\n D = Variable(torch.Tensor([1, 2, 2, 4, 5]))\n W = Variable(torch.Tensor([[3, -1, 3, 3, 4], [2, 3, 1, 3, 4]]))\n cov = D.diag() + W.t().matmul(W)\n\n mvn = MultivariateNormal(loc, cov)\n sparse_mvn = SparseMultivariateNormal(loc, D, W)\n\n assert_equal(mvn.variance, sparse_mvn.variance)\n",
"import torch\nfrom torchvision.datasets import MNIST\nimport numpy as np\nfrom functools import reduce\nfrom torch.utils.data import DataLoader\n# This file contains utilities for caching, transforming and splitting MNIST data\n# efficiently. By default, a Pytorch DataLoader will apply the transform every epoch\n# we avoid this by caching the data early on in MNISTCached class\n\n\n# transformations for MNIST data\ndef fn_x_mnist(x, use_cuda):\n # normalize pixel values of the image to be in [0,1] instead of [0,255]\n xp = x * (1. / 255)\n\n # transform x to a linear tensor from bx * a1 * a2 * ... --> bs * A\n xp_1d_size = reduce(lambda a, b: a * b, xp.size()[1:])\n xp = xp.view(-1, xp_1d_size)\n\n # send the data to GPU(s)\n if use_cuda:\n xp = xp.cuda()\n\n return xp\n\n\ndef fn_y_mnist(y, use_cuda):\n yp = torch.zeros(y.size(0), 10)\n\n # send the data to GPU(s)\n if use_cuda:\n yp = yp.cuda()\n y = y.cuda()\n\n # transform the label y (integer between 0 and 9) to a one-hot\n yp = yp.scatter_(1, y.view(-1, 1), 1.0)\n return yp\n\n\ndef get_ss_indices_per_class(y, sup_per_class):\n # number of indices to consider\n n_idxs = y.size()[0]\n\n # calculate the indices per class\n idxs_per_class = {j: [] for j in range(10)}\n\n # for each index identify the class and add the index to the right class\n for i in range(n_idxs):\n curr_y = y[i]\n for j in range(10):\n if curr_y[j] == 1:\n idxs_per_class[j].append(i)\n break\n\n idxs_sup = []\n idxs_unsup = []\n for j in range(10):\n np.random.shuffle(idxs_per_class[j])\n idxs_sup.extend(idxs_per_class[j][:sup_per_class])\n idxs_unsup.extend(idxs_per_class[j][sup_per_class:len(idxs_per_class[j])])\n\n return idxs_sup, idxs_unsup\n\n\ndef split_sup_unsup_valid(X, y, sup_num, validation_num=10000):\n \"\"\"\n helper function for splitting the data into supervised, un-supervised and validation parts\n :param X: images\n :param y: labels (digits)\n :param sup_num: what number of examples is supervised\n :param validation_num: what number of last examples to use for validation\n :return: splits of data by sup_num number of supervised examples\n \"\"\"\n\n # validation set is the last 10,000 examples\n X_valid = X[-validation_num:]\n y_valid = y[-validation_num:]\n\n X = X[0:-validation_num]\n y = y[0:-validation_num]\n\n assert sup_num % 10 == 0, \"unable to have equal number of images per class\"\n\n # number of supervised examples per class\n sup_per_class = int(sup_num / 10)\n\n idxs_sup, idxs_unsup = get_ss_indices_per_class(y, sup_per_class)\n X_sup = X[idxs_sup]\n y_sup = y[idxs_sup]\n X_unsup = X[idxs_unsup]\n y_unsup = y[idxs_unsup]\n\n return X_sup, y_sup, X_unsup, y_unsup, X_valid, y_valid\n\n\ndef print_distribution_labels(y):\n \"\"\"\n helper function for printing the distribution of class labels in a dataset\n :param y: tensor of class labels given as one-hots\n :return: a dictionary of counts for each label from y\n \"\"\"\n counts = {j: 0 for j in range(10)}\n for i in range(y.size()[0]):\n for j in range(10):\n if y[i][j] == 1:\n counts[j] += 1\n break\n print(counts)\n\n\nclass MNISTCached(MNIST):\n \"\"\"\n a wrapper around MNIST to load and cache the transformed data\n once at the beginning of the inference\n \"\"\"\n\n # static class variables for caching training data\n train_data_size = 50000\n train_data_sup, train_labels_sup = None, None\n train_data_unsup, train_labels_unsup = None, None\n validation_size = 10000\n data_valid, labels_valid = None, None\n test_size = 10000\n\n def __init__(self, mode, sup_num, use_cuda=True, *args, **kwargs):\n super(MNISTCached, self).__init__(train=mode in [\"sup\", \"unsup\", \"valid\"], *args, **kwargs)\n\n # transformations on MNIST data (normalization and one-hot conversion for labels)\n def transform(x):\n return fn_x_mnist(x, use_cuda)\n\n def target_transform(y):\n return fn_y_mnist(y, use_cuda)\n\n self.mode = mode\n\n assert mode in [\"sup\", \"unsup\", \"test\", \"valid\"], \"invalid train/test option values\"\n\n if mode in [\"sup\", \"unsup\", \"valid\"]:\n\n # transform the training data if transformations are provided\n if transform is not None:\n self.train_data = (transform(self.train_data.float()))\n if target_transform is not None:\n self.train_labels = (target_transform(self.train_labels))\n\n if MNISTCached.train_data_sup is None:\n if sup_num is None:\n assert mode == \"unsup\"\n MNISTCached.train_data_unsup, MNISTCached.train_labels_unsup = \\\n self.train_data, self.train_labels\n else:\n MNISTCached.train_data_sup, MNISTCached.train_labels_sup, \\\n MNISTCached.train_data_unsup, MNISTCached.train_labels_unsup, \\\n MNISTCached.data_valid, MNISTCached.labels_valid = \\\n split_sup_unsup_valid(self.train_data, self.train_labels, sup_num)\n\n if mode == \"sup\":\n self.train_data, self.train_labels = MNISTCached.train_data_sup, MNISTCached.train_labels_sup\n elif mode == \"unsup\":\n self.train_data = MNISTCached.train_data_unsup\n\n # making sure that the unsupervised labels are not available to inference\n self.train_labels = (torch.Tensor(\n MNISTCached.train_labels_unsup.shape[0]).view(-1, 1)) * np.nan\n else:\n self.train_data, self.train_labels = MNISTCached.data_valid, MNISTCached.labels_valid\n\n else:\n # transform the testing data if transformations are provided\n if transform is not None:\n self.test_data = (transform(self.test_data.float()))\n if target_transform is not None:\n self.test_labels = (target_transform(self.test_labels))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index or slice object\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.mode in [\"sup\", \"unsup\", \"valid\"]:\n img, target = self.train_data[index], self.train_labels[index]\n elif self.mode == \"test\":\n img, target = self.test_data[index], self.test_labels[index]\n else:\n assert False, \"invalid mode: {}\".format(self.mode)\n return img, target\n\n\ndef setup_data_loaders(dataset, use_cuda, batch_size, sup_num=None, root='./data', download=True, **kwargs):\n \"\"\"\n helper function for setting up pytorch data loaders for a semi-supervised dataset\n :param dataset: the data to use\n :param use_cuda: use GPU(s) for training\n :param batch_size: size of a batch of data to output when iterating over the data loaders\n :param sup_num: number of supervised data examples\n :param root: where on the filesystem should the dataset be\n :param download: download the dataset (if it doesn't exist already)\n :param kwargs: other params for the pytorch data loader\n :return: three data loaders: (supervised data for training, un-supervised data for training,\n supervised data for testing)\n \"\"\"\n # instantiate the dataset as training/testing sets\n if 'num_workers' not in kwargs:\n kwargs = {'num_workers': 0, 'pin_memory': False}\n\n cached_data = {}\n loaders = {}\n for mode in [\"unsup\", \"test\", \"sup\", \"valid\"]:\n if sup_num is None and mode == \"sup\":\n # in this special case, we do not want \"sup\" and \"valid\" data loaders\n return loaders[\"unsup\"], loaders[\"test\"]\n cached_data[mode] = dataset(root=root, mode=mode, download=download,\n sup_num=sup_num, use_cuda=use_cuda)\n loaders[mode] = DataLoader(cached_data[mode], batch_size=batch_size, shuffle=True, **kwargs)\n\n return loaders\n"
] | [
[
"torch.zeros_like"
],
[
"torch.Tensor"
],
[
"torch.Tensor",
"torch.utils.data.DataLoader",
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sriram-Ravula/ncsnv2 | [
"f610b59441a34063fae1c02aa06837b7eec95c03"
] | [
"models/__init__.py"
] | [
"import torch\nimport numpy as np\n\ndef get_sigmas(config):\n if config.model.sigma_dist == 'geometric':\n sigmas = torch.tensor(\n np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),\n config.model.num_classes))).float().to(config.device)\n elif config.model.sigma_dist == 'uniform':\n sigmas = torch.tensor(\n np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)\n ).float().to(config.device)\n\n else:\n raise NotImplementedError('sigma distribution not supported')\n\n return sigmas\n\[email protected]_grad()\ndef anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,\n final_only=False, verbose=False, denoise=True, add_noise=True):\n images = []\n\n with torch.no_grad():\n for c, sigma in enumerate(sigmas):\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration\n labels = labels.long()\n step_size = step_lr * (sigma / sigmas[-1]) ** 2\n for s in range(n_steps_each):\n grad = scorenet(x_mod, labels)\n\n #choose whether to add random noise during each gradient ascent step\n if add_noise:\n noise = torch.randn_like(x_mod) \n else:\n noise = torch.zeros_like(x_mod)\n\n #calculate l2 norms of gradient (score) and the additive noise for logging\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n\n x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step\n\n #calc l2 norm of iterate variable for logging\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n\n #calc snr as scaled version of [||s(x, \\sigma_i)|| / ||z_t||] and mean of score for logging\n snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm\n grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2\n\n if not final_only:\n images.append(x_mod.to('cpu'))\n if verbose:\n print(\"level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}\".format(\n c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))\n\n #final denoising step if desired - removes the very last additive z_L \n if denoise:\n last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)\n last_noise = last_noise.long()\n x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)\n images.append(x_mod.to('cpu'))\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images\n\[email protected]_grad()\ndef langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,\n final_only=False, verbose=False, denoise=True, add_noise=True, \n decimate_sigma=None, mode=None, true_x=None):\n images = []\n\n #if desired, decimate the number of noise scales to speed up inference\n if decimate_sigma is not None:\n sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one\n sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list\n # num_sigmas = sigmas.shape[0] // decimate_sigma\n # sigmas_temp = []\n # for i in range(num_sigmas):\n # sigmas_temp.append(sigmas[-1])\n sigmas = sigmas_temp #swap the new decimated sigma list for the main one\n\n mse = torch.nn.MSELoss()\n\n N, C, H, W = x_mod.shape\n\n steps = np.geomspace(start=5, stop=1, num=len(sigmas))\n\n c2 = 1\n\n with torch.no_grad():\n #outer loop over noise scales\n for c, sigma in enumerate(sigmas):\n #dummy target 1...T depending on iteration\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c \n labels = labels.long()\n\n #step_size = step_lr * (sigma / sigmas[-1]) ** 2\n step_size = steps[c]\n\n #Inner loop over T\n for s in range(n_steps_each):\n #s(x_t) ~= \\grad_x log p(x) -- THE PRIOR\n grad = scorenet(x_mod, labels)\n\n prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n #prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2\n\n #calculate the maximum likelihood gradient - i.e. MSE gradient\n #A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1]\n if mode=='denoising':\n Axt = x_mod \n mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x\n else:\n Axt = torch.matmul(A, x_mod.view(N, -1, 1))\n mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient\n #mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient\n\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n #likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2\n\n if c == 0 and s == 0:\n c2 = prior_norm.item() / likelihood_norm.item()\n mle_grad = mle_grad * c2 #MSE gradient\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n\n\n #The final gradient\n grad = grad - mle_grad\n\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n #grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2\n\n #choose whether to add random noise during each gradient ascent step\n if add_noise:\n noise = torch.randn_like(x_mod) \n else:\n noise = torch.zeros_like(x_mod)\n\n x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step\n\n #calc l2 norm of iterate variable for logging\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm\n mse_iter = mse(Axt, y)\n if true_x is not None:\n mse_true = mse(true_x, x_mod)\n\n if not final_only:\n images.append(x_mod.to('cpu'))\n if verbose:\n print(\"\\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \\\n image_norm: {:.4f}, train_mse: {:.4f}\".format( \\\n c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \\\n mse_iter.item()))\n \n if true_x is not None:\n print(\"true_mse: {:.4f}\".format(mse_true.item()))\n\n #final denoising step if desired - removes the very last additive z_L \n if denoise:\n last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)\n last_noise = last_noise.long()\n x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)\n images.append(x_mod.to('cpu'))\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images\n\[email protected]_grad()\ndef inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True,\n final_only=False, verbose=False, likelihood_every=1,\n decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type=\"l2\"):\n images = []\n\n #if desired, decimate the number of noise scales to speed up inference\n if decimate_sigma is not None:\n if sigma_type == 'subsample': #grab equally-spaced sigma values\n sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() \n sigmas_temp.append(sigmas[-1]) \n\n elif sigma_type == 'last': #grab just the last sigma value multiple times\n num_sigmas = sigmas.shape[0] // decimate_sigma\n sigmas_temp = []\n for i in range(num_sigmas):\n sigmas_temp.append(sigmas[-1])\n\n else:\n sigmas_temp = sigmas\n\n sigmas = sigmas_temp \n\n mse = torch.nn.MSELoss()\n\n N, C, H, W = x_mod.shape\n\n steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas))\n\n likelihood_norm = 0\n\n with torch.no_grad():\n if sigma_type == 'last':\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099 \n labels = labels.long()\n for c, sigma in enumerate(sigmas):\n if sigma_type == 'subsample':\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c\n labels = labels.long()\n elif sigma_type != 'last':\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n labels = labels.long()\n\n step_size = steps[c]\n\n #s(x_t) ~= \\grad_x log p(x) -- THE PRIOR\n grad = scorenet(x_mod, labels) * c1\n\n prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n\n if c % likelihood_every == 0:\n #\\grad_x log p(y | x) -- LIKELIHOOD\n if mode=='denoising':\n Axt = x_mod\n if likelihood_type == \"l2\":\n mle_grad = (Axt - y) * c2 \n elif likelihood_type == \"l1\":\n mle_grad = torch.sign(Axt - y) * c2 \n else:\n Axt = torch.matmul(A, x_mod.view(N, -1, 1)) \n if likelihood_type == \"l2\":\n mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 \n elif likelihood_type == \"l1\":\n mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2 \n\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n\n if auto_c2 and c == 0:\n c2 = prior_norm.item() / likelihood_norm.item()\n mle_grad = mle_grad * c2 #MSE gradient\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n\n grad = grad - mle_grad\n\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n\n x_mod = x_mod + step_size * grad\n #x_mod = torch.clamp(x_mod, 0.0, 1.0)\n\n #calc l2 norm of iterate variable for logging\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n mse_iter = mse(Axt, y)\n if true_x is not None:\n mse_true = mse(true_x, x_mod)\n\n if not final_only:\n images.append(x_mod.cpu())\n if verbose:\n print(\"\\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \\\n image_norm: {:.4f}, train_mse: {:.4f}\".format( \\\n c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \\\n mse_iter.item()))\n \n if true_x is not None:\n print(\"true_mse: {:.4f}\".format(mse_true.item()))\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images\n\[email protected]_grad()\ndef anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,\n n_steps_each=100, step_lr=0.000008):\n \"\"\"\n Currently only good for 32x32 images. Assuming the right half is missing.\n \"\"\"\n\n images = []\n\n #refer_image is the untainted x (?)\n #right now this only works with 3-channel images\n refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)\n refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)\n\n \n x_mod = x_mod.view(-1, 3, image_size, image_size)\n cols = image_size // 2\n half_refer_image = refer_image[..., :cols]\n with torch.no_grad():\n for c, sigma in enumerate(sigmas):\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n labels = labels.long()\n step_size = step_lr * (sigma / sigmas[-1]) ** 2\n\n for s in range(n_steps_each):\n images.append(x_mod.to('cpu'))\n corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma\n x_mod[:, :, :, :cols] = corrupted_half_image\n noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)\n grad = scorenet(x_mod, labels)\n x_mod = x_mod + step_size * grad + noise\n print(\"class: {}, step_size: {}, mean {}, max {}\".format(c, step_size, grad.abs().mean(),\n grad.abs().max()))\n\n return images\n\[email protected]_grad()\ndef anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,\n final_only=False, verbose=False):\n images = []\n\n n_rows = x_mod.shape[0]\n\n x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)\n x_mod = x_mod.reshape(-1, *x_mod.shape[2:])\n\n for c, sigma in enumerate(sigmas):\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n labels = labels.long()\n step_size = step_lr * (sigma / sigmas[-1]) ** 2\n for s in range(n_steps_each):\n grad = scorenet(x_mod, labels)\n\n noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],\n device=x_mod.device)\n noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],\n device=x_mod.device)\n angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)\n\n noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \\\n noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]\n\n noise = noise.reshape(-1, *noise.shape[2:])\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n\n x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)\n\n snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm\n\n if not final_only:\n images.append(x_mod.to('cpu'))\n if verbose:\n print(\n \"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}\".format(\n c, step_size, image_norm.item(), grad_norm.item(), snr.item()))\n\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images"
] | [
[
"torch.randn_like",
"torch.linspace",
"numpy.log",
"torch.ones",
"numpy.sqrt",
"numpy.linspace",
"torch.sin",
"torch.sign",
"torch.randn",
"torch.transpose",
"torch.zeros_like",
"torch.no_grad",
"torch.nn.MSELoss",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WBobby/pytorch | [
"655960460ccca936fa5c06df6bbafd25b5582115",
"655960460ccca936fa5c06df6bbafd25b5582115"
] | [
"torch/ao/quantization/fuse_modules.py",
"torch/fx/experimental/fx_acc/acc_normalizer.py"
] | [
"\nimport copy\n\nimport torch.nn as nn\n\nfrom torch.quantization.fuser_method_mappings import get_fuser_method\n# for backward compatiblity\nfrom torch.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401\nfrom torch.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401\n\nfrom typing import List, Optional\n\n# Generalization of getattr\ndef _get_module(model, submodule_key):\n tokens = submodule_key.split('.')\n cur_mod = model\n for s in tokens:\n cur_mod = getattr(cur_mod, s)\n return cur_mod\n\n# Generalization of setattr\ndef _set_module(model, submodule_key, module):\n tokens = submodule_key.split('.')\n sub_tokens = tokens[:-1]\n cur_mod = model\n for s in sub_tokens:\n cur_mod = getattr(cur_mod, s)\n\n setattr(cur_mod, tokens[-1], module)\n\ndef fuse_known_modules(mod_list, additional_fuser_method_mapping=None):\n r\"\"\"Returns a list of modules that fuses the operations specified\n in the input module list.\n\n Fuses only the following sequence of modules:\n conv, bn\n conv, bn, relu\n conv, relu\n linear, bn\n linear, relu\n For these sequences, the first element in the output module list performs\n the fused operation. The rest of the elements are set to nn.Identity()\n \"\"\"\n types = tuple(type(m) for m in mod_list)\n fuser_method = get_fuser_method(types, additional_fuser_method_mapping)\n if fuser_method is None:\n raise NotImplementedError(\"Cannot fuse modules: {}\".format(types))\n new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)\n fused = fuser_method(*mod_list)\n # NOTE: forward hooks not processed in the two following for loops will be lost after the fusion\n # Move pre forward hooks of the base module to resulting fused module\n for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items():\n fused.register_forward_pre_hook(pre_hook_fn)\n del mod_list[0]._forward_pre_hooks[handle_id]\n # Move post forward hooks of the last module to resulting fused module\n for handle_id, hook_fn in mod_list[-1]._forward_hooks.items():\n fused.register_forward_hook(hook_fn)\n del mod_list[-1]._forward_hooks[handle_id]\n new_mod[0] = fused\n\n for i in range(1, len(mod_list)):\n identity = nn.Identity()\n identity.training = mod_list[0].training\n new_mod[i] = identity\n\n return new_mod\n\ndef _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):\n if fuse_custom_config_dict is None:\n fuse_custom_config_dict = {}\n additional_fuser_method_mapping = fuse_custom_config_dict.get(\"additional_fuser_method_mapping\", {})\n mod_list = []\n for item in modules_to_fuse:\n mod_list.append(_get_module(model, item))\n\n # Fuse list of modules\n new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping)\n\n # Replace original module list with fused module list\n for i, item in enumerate(modules_to_fuse):\n _set_module(model, item, new_mod_list[i])\n\ndef fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):\n r\"\"\"Fuses a list of modules into a single module\n\n Fuses only the following sequence of modules:\n conv, bn\n conv, bn, relu\n conv, relu\n linear, relu\n bn, relu\n All other sequences are left unchanged.\n For these sequences, replaces the first item in the list\n with the fused module, replacing the rest of the modules\n with identity.\n\n Args:\n model: Model containing the modules to be fused\n modules_to_fuse: list of list of module names to fuse. Can also be a list\n of strings if there is only a single list of modules to fuse.\n inplace: bool specifying if fusion happens in place on the model, by default\n a new model is returned\n fuser_func: Function that takes in a list of modules and outputs a list of fused modules\n of the same length. For example,\n fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]\n Defaults to torch.quantization.fuse_known_modules\n `fuse_custom_config_dict`: custom configuration for fusion\n\n .. code-block:: python\n\n # Example of fuse_custom_config_dict\n fuse_custom_config_dict = {\n # Additional fuser_method mapping\n \"additional_fuser_method_mapping\": {\n (torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn\n },\n }\n\n Returns:\n model with fused modules. A new copy is created if inplace=True.\n\n Examples::\n\n >>> m = myModel()\n >>> # m is a module containing the sub-modules below\n >>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']]\n >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)\n >>> output = fused_m(input)\n\n >>> m = myModel()\n >>> # Alternately provide a single list of modules to fuse\n >>> modules_to_fuse = ['conv1', 'bn1', 'relu1']\n >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)\n >>> output = fused_m(input)\n\n \"\"\"\n if not inplace:\n model = copy.deepcopy(model)\n\n if all(isinstance(module_element, str) for module_element in modules_to_fuse):\n # Handle case of modules_to_fuse being a list\n _fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict)\n else:\n # Handle case of modules_to_fuse being a list of lists\n for module_list in modules_to_fuse:\n _fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict)\n return model\n",
"import inspect\nimport re\nfrom typing import NamedTuple, Optional, Callable, Dict, List, Tuple, Union, Any, Set\n\nimport torch.fx.experimental.fx_acc.acc_utils as acc_utils\nimport torch\nimport torch.fx\nfrom torch.fx.node import _get_qualified_name\n\n# Need to keep up-to-date with https://fburl.com/codesearch/7r2hhh53\nALIAS_MAP = {\n \"input\": (\"input\", \"x\", \"a\", \"x1\"),\n \"dim\": (\"dim\", \"axis\"),\n \"keepdim\": (\"keepdim\", \"keepdims\"),\n \"other\": (\"other\", \"x2\"),\n}\n\n# Type used for arg replacement tuples. The list represents the argument signature of\n# some callable. Each item in the list is a tuple, where for each member of a tuple:\n# - The first member is union of either:\n# - A tuple of all potential alias kwarg str names of the source signature, or\n# - A tuple of a single str representing the single kwarg name allowed.\n# - The second member is the str name of the kwarg to map it to. This is either from the\n# signature of the acc_op, or for custom mapped nodes from the original unnormalized op.\n# - The third member is a bool representing whether this arg is optional, i.e. whether it\n# is allowed to not be present in the original input args.\nArgReplacementTuplesType = List[Tuple[Tuple[str, ...], str, bool]]\n\n\nclass NormalizationInfo(NamedTuple):\n \"\"\"\n Holds normalization info for some FX node, where the FX node will be mapped either\n via new_fn_target and arg_replacement_tuples, or via custom_mapping_fn.\n\n If via new_fn_target and arg_replacement_tuples:\n - new_fn_target is the target function to replace the original node with\n (generally some function from acc_ops).\n\n - arg_replacement_tuples describes how to map the original FX node's args/kwargs to\n the new FX node. If set to None, then the kwargs are copied directly from the\n original FX node. Else, this is list of three-member tuples, where each tuple\n represents a mapping from either an arg or kwarg in the original FX node to the\n kwarg it should be mapped to. If for ops registered with `register_acc_op` then\n this is a mapping to the the new FX node for the acc_op. Otherwise it is for some\n op registered with `register_custom_acc_mapper_fn`, in which case this is a\n mapping for the original input node so its args are normalized to kwargs before\n being custom normalized to acc_ops. The third member of the tuple is a bool\n representing whether this argument is optional; if False and the arg is not\n present then an assertion will be thrown. The index of the tuple indicates where\n the original arg is in node.args and the string name indicates which original\n kwarg it is.\n\n If via custom_mapping_fn, then custom_mapping_fn is some function that takes the\n original FX node as input and returns the FX node that should replace it. This means\n it was registered via `register_custom_acc_mapper_fn`.\n \"\"\"\n\n new_fn_target: Callable\n arg_replacement_tuples: Optional[ArgReplacementTuplesType]\n custom_mapping_fn: Optional[Callable]\n kwargs_to_move_to_acc_out_ty: Optional[Optional[List[Tuple[str, str]]]]\n needs_shapes_for_normalization: bool\n\n\n# Dict from (op, target) to NormalizationInfo for that op.\n_normalization_dict: Dict[Tuple[str, Union[str, Callable]], NormalizationInfo] = {}\n\n# Set of all the acc ops.\n_acc_ops: Set[Callable] = set()\n\n\ndef _insert_fun(\n op_and_target: Tuple[str, Union[str, Callable]],\n arg_replacement_tuples: List[Tuple],\n new_fn_target: Optional[Callable] = None,\n custom_mapping_fn: Optional[Callable] = None,\n kwargs_to_move_to_acc_out_ty: Optional[Optional[List[Tuple[str, str]]]] = None,\n needs_shapes_for_normalization=False,\n allow_normalize_from_torch_package=False,\n):\n if op_and_target[0] == \"call_function\":\n assert callable(op_and_target[1])\n elif op_and_target[0] == \"call_method\":\n assert isinstance(op_and_target[1], str)\n elif op_and_target[0] == \"call_module\":\n assert isinstance(op_and_target[1], type)\n\n # Finalize arg replacement tuples.\n # 1. Check to see if they have the `is_optional` bool, and if not defaulting it to\n # False.\n # 2. Some kwargs might have aliases. e.g. \"a\", \"x\" and \"x1\" are aliases of \"input\".\n # Here we replace `orig_kwarg` with a tuple of all aliases if it has aliases.\n final_arg_replacement_tuples = []\n for arg_replacement_tuple in arg_replacement_tuples:\n if len(arg_replacement_tuple) == 2:\n orig_kwarg, new_kwarg, is_optional = *arg_replacement_tuple, False\n else:\n assert len(arg_replacement_tuple) == 3\n orig_kwarg, new_kwarg, is_optional = arg_replacement_tuple\n\n if not isinstance(orig_kwarg, tuple):\n orig_kwarg = (orig_kwarg,)\n\n # Use set to avoid duplicates.\n orig_kwarg_set = set(orig_kwarg)\n\n for k in orig_kwarg:\n if k in ALIAS_MAP:\n orig_kwarg_set.update(ALIAS_MAP[k])\n final_arg_replacement_tuples.append(\n (tuple(orig_kwarg_set), new_kwarg, is_optional)\n )\n\n assert op_and_target not in _normalization_dict.keys()\n norm_info = NormalizationInfo(\n new_fn_target=new_fn_target, # type: ignore[arg-type]\n arg_replacement_tuples=final_arg_replacement_tuples,\n custom_mapping_fn=custom_mapping_fn,\n kwargs_to_move_to_acc_out_ty=kwargs_to_move_to_acc_out_ty,\n needs_shapes_for_normalization=needs_shapes_for_normalization,\n )\n _normalization_dict[op_and_target] = norm_info\n\n # If allow_normalize_from_torch_package then add another entry to\n # _normalization_dict where we look for the qualified name of the target with the\n # torch_package module prefix. Note that we leave off any integer at the end of\n # \"<torch_package_>\" in order to allow for whatever mangling index is used.\n if allow_normalize_from_torch_package:\n torch_package_op_and_target = (\n op_and_target[0], # type: ignore[]\n f\"<torch_package_>.{_get_qualified_name(op_and_target[1])}\", # type: ignore[arg-type]\n )\n _normalization_dict[torch_package_op_and_target] = norm_info\n\n\ndef _get_dup_signature_tuples(fn: Callable) -> List[Tuple[str, str]]:\n \"\"\"\n Helper that inspects the arg signature of `fn` and returns a list of tuples, where\n each tuple is a pair of duplicated names which is used for arg_replacement_tuples.\n \"\"\"\n sig_tuples: List[Tuple[str, str]] = []\n for param in inspect.signature(inspect.unwrap(fn)).parameters:\n sig_tuples.append((param, param))\n return sig_tuples\n\n\ndef register_acc_op(acc_op: Callable):\n \"\"\"\n For a new acc op, add this as decorator to register it.\n \"\"\"\n _acc_ops.add(acc_op)\n return acc_op\n\n\ndef register_acc_op_mapping(\n op_and_target: Tuple[str, Union[str, Callable]],\n arg_replacement_tuples: Optional[\n List[Union[Tuple[Union[str, Tuple[str, ...]], str], Tuple[Union[str, Tuple[str, ...]], str, bool]]]\n ] = None,\n kwargs_to_move_to_acc_out_ty: Optional[List[Tuple[str, str]]] = None,\n):\n \"\"\"\n Use this decorator to map a non-acc operator to an acc operator.\n\n Args:\n op_and_target: A tuple that contains op and target of the node that represents the non-acc operator.\n arg_replacement_tuples: Please refer to the comment on above for `ArgReplacementTuplesType`.\n kwargs_to_move_to_acc_out_ty: The kwargs we want to move out from the non-acc op kwargs to acc_out_ty.\n \"\"\"\n\n def insert(new_fn_target: Callable):\n # If arg_replacement_tuples is None then assume we use the same signature for\n # the acc_op and the original op.\n if arg_replacement_tuples is None:\n final_arg_replacement_tuples = _get_dup_signature_tuples(new_fn_target)\n else:\n final_arg_replacement_tuples = arg_replacement_tuples # type: ignore[assignment]\n\n _insert_fun(\n op_and_target=op_and_target,\n new_fn_target=new_fn_target,\n arg_replacement_tuples=final_arg_replacement_tuples, # type: ignore[arg-type]\n kwargs_to_move_to_acc_out_ty=kwargs_to_move_to_acc_out_ty,\n )\n return new_fn_target\n\n return insert\n\n\ndef register_custom_acc_mapper_fn(\n op_and_target: Tuple[str, Union[str, Callable]],\n arg_replacement_tuples: List[Union[Tuple[Union[str, Tuple[str, ...]], str], Tuple[Union[str, Tuple[str, ...]], str, bool]]],\n needs_shapes_for_normalization=False,\n allow_normalize_from_torch_package=False,\n):\n def insert(custom_mapping_fn: Callable):\n _insert_fun(\n op_and_target=op_and_target,\n custom_mapping_fn=custom_mapping_fn,\n arg_replacement_tuples=arg_replacement_tuples, # type: ignore[arg-type]\n needs_shapes_for_normalization=needs_shapes_for_normalization,\n allow_normalize_from_torch_package=allow_normalize_from_torch_package,\n )\n return custom_mapping_fn\n\n return insert\n\n\ndef move_kwargs_to_acc_out_ty(\n node_or_normalization_info: Union[NormalizationInfo, torch.fx.Node],\n new_kwargs: Dict[str, Any],\n):\n \"\"\"\n Given `node_or_normalization_info` which is either NormalizationInfo for a node, or\n a node to fetch NormalizationInfo for, check if kwargs_to_move_to_acc_out_ty exists\n in the NormalizationInfo, and if so perform the move of kwargs to acc_out_ty.\n \"\"\"\n\n if isinstance(node_or_normalization_info, torch.fx.Node):\n node = node_or_normalization_info\n normalization_info = _normalization_dict.get((node.op, node.target))\n else:\n assert isinstance(node_or_normalization_info, NormalizationInfo)\n normalization_info = node_or_normalization_info\n\n assert normalization_info is not None\n if normalization_info.kwargs_to_move_to_acc_out_ty is None:\n return\n\n assert acc_utils.is_acc_op_with_kwarg(\n normalization_info.new_fn_target, \"acc_out_ty\"\n )\n\n # Build a dict representing the new TensorMetadata to use for acc_out_ty,\n # and then remove the kwarg from the new_kwargs since it's passed in via\n # acc_out_ty instead.\n tmd_dict: Dict[str, Any] = {}\n for (\n orig_kwarg_name,\n tmd_field_name,\n ) in normalization_info.kwargs_to_move_to_acc_out_ty:\n tmd_dict[tmd_field_name] = new_kwargs[orig_kwarg_name]\n del new_kwargs[orig_kwarg_name]\n # Note: allow_partial_spec here because we are only using the tensor metadata tuple\n # here to pass specific values into the function. For example, for quantization we\n # only need to provide dtype/q_scale/q_zero_point, but is_quantized and qscheme are\n # not passed in.\n new_kwargs[\"acc_out_ty\"] = acc_utils.build_raw_tensor_meta(**tmd_dict)\n\n\ndef get_normalized_kwargs(\n node: torch.fx.Node, arg_replacement_tuples: ArgReplacementTuplesType\n):\n new_kwargs = {}\n final_arg_is_varg = False\n for i, replacement_tuple in enumerate(arg_replacement_tuples):\n orig_kwargs_names, new_kwarg_name, is_optional = replacement_tuple\n\n # Check if this is a varg and if so break/process the rest outside the loop.\n if len(orig_kwargs_names) == 1 and orig_kwargs_names[0] == \"*\":\n assert i == len(arg_replacement_tuples) - 1\n final_arg_is_varg = True\n break\n\n # If nothing is found in node.kwargs it means the kwarg is in node.arg\n # or it's optional. In this case, we set orig_kwargs_name to None.\n assert isinstance(orig_kwargs_names, tuple)\n orig_kwargs_name = next(\n (key for key in orig_kwargs_names if key in node.kwargs),\n None,\n )\n\n # If can't find in node.kwargs then it should be in the i index\n # of node.args.\n if orig_kwargs_name is None:\n if i < len(node.args):\n new_kwargs[new_kwarg_name] = node.args[i]\n else:\n # Verify the arg we're trying to normalize was optional.\n assert is_optional\n else:\n new_kwargs[new_kwarg_name] = node.kwargs[orig_kwargs_name]\n\n # If using var args then process the rest of the args now.\n if final_arg_is_varg:\n var_arg_idx = len(arg_replacement_tuples) - 1\n new_kwarg_name = arg_replacement_tuples[var_arg_idx][1]\n rest_of_args = []\n for i in range(var_arg_idx, len(node.args)):\n rest_of_args.append(node.args[i])\n new_kwargs[new_kwarg_name] = rest_of_args\n\n return new_kwargs\n\n\ndef normalize(mod: torch.fx.GraphModule, expect_nodes_have_shapes: bool = False):\n assert len(_normalization_dict) > 0\n graph = mod.graph\n\n # For \"call_module\" node we return _base_class_origin if it's a\n # RewrittenModule, otherwise, return its type. For other nodes,\n # we return node.target.\n def get_target(mod: torch.fx.GraphModule, node: torch.fx.Node):\n if node.op != \"call_module\":\n return node.target\n\n # Find the module that node.target points to\n m = dict(mod.named_modules())[node.target]\n return getattr(m, \"_base_class_origin\", type(m))\n\n def normalize_to_acc_op(\n node: torch.fx.Node,\n normalization_info: NormalizationInfo,\n normalized_args: Tuple[Any, ...],\n normalized_kwargs: Dict[str, Any],\n ):\n # If there's a custom mapping function then use it.\n if normalization_info.custom_mapping_fn is not None:\n # For custom mapping, the normalized_kwargs are used for the original op,\n # i.e. *before* custom acc_ops normalization. Do that now.\n node.args = normalized_args\n node.kwargs = normalized_kwargs\n new_node = normalization_info.custom_mapping_fn(node, mod)\n # If a new node is returned then use it to replace the old node. Otherwise\n # the custom mapping function did its own replacement, so return early.\n if new_node is None:\n return\n else:\n # If there's kwargs_to_move_to_acc_out_ty then use it to setup acc_out_ty in\n # normalized_kwargs, and remove the kwarg from normalized_kwargs.\n move_kwargs_to_acc_out_ty(normalization_info, normalized_kwargs)\n\n # All acc ops are functions. Create a call to the correct acc_ops target using\n # the normalized kwargs provided.\n with graph.inserting_before(node):\n new_node = graph.create_node(\n \"call_function\",\n normalization_info.new_fn_target,\n args=normalized_args,\n kwargs=normalized_kwargs,\n name=node.name,\n )\n new_node.meta = node.meta.copy()\n\n # Finally replace the original node with the normalized node.\n node.replace_all_uses_with(new_node)\n graph.erase_node(node)\n\n for node in graph.nodes:\n if node.op in {\"placeholder\", \"get_attr\", \"output\"}:\n continue\n\n normalization_info = _normalization_dict.get((node.op, get_target(mod, node)))\n\n # Also check if the torch_packaged version of the op was specified to be normalized.\n if normalization_info is None and node.op == \"call_function\":\n # Strip off the mangle_index suffix here before checking the map.\n target = re.sub(\n r\"\\A<torch_package_\\d+>\",\n \"<torch_package_>\",\n _get_qualified_name(node.target),\n )\n torch_package_op_and_target = (node.op, target)\n normalization_info = _normalization_dict.get(torch_package_op_and_target)\n\n if normalization_info is None:\n continue\n\n # Get the normalized kwargs to be used by normalize_to_acc_op below. If\n # normalization_info.arg_replacement_tuples is empty then assume the function\n # signature must be left as is.\n assert normalization_info.arg_replacement_tuples is not None\n if len(normalization_info.arg_replacement_tuples) == 0:\n normalized_args = node.args\n normalized_kwargs = node.kwargs\n else:\n normalized_args = ()\n try:\n normalized_kwargs = get_normalized_kwargs(\n node, normalization_info.arg_replacement_tuples\n )\n except Exception:\n print(\n f\"Error during kwarg normalization for: {node.format_node()}; \"\n f\"arg_replacement_tuples={normalization_info.arg_replacement_tuples}\"\n )\n raise\n\n if (\n normalization_info.needs_shapes_for_normalization\n and not expect_nodes_have_shapes\n ):\n # All nodes needing shapes for normalization should be custom mapped.\n assert normalization_info.custom_mapping_fn is not None\n # For custom mapping, the normalized_kwargs are used for the original op,\n # i.e. *before* custom acc_ops normalization. Do that now so that whoever\n # consumes the graph next (e.g. shape inference) can use kwargs safely.\n node.args = normalized_args\n node.kwargs = normalized_kwargs\n continue\n\n try:\n normalize_to_acc_op(\n node, normalization_info, normalized_args, normalized_kwargs\n )\n except Exception:\n print(f\"Error during normalization for node: {node.format_node()}\")\n raise\n\n # If there are any dead nodes left after normalization, eliminate them now.\n mod.graph.eliminate_dead_code()\n"
] | [
[
"torch.nn.Identity",
"torch.quantization.fuser_method_mappings.get_fuser_method"
],
[
"torch.fx.node._get_qualified_name",
"torch.fx.experimental.fx_acc.acc_utils.is_acc_op_with_kwarg",
"torch.fx.experimental.fx_acc.acc_utils.build_raw_tensor_meta"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SmolakK/HuMobi | [
"67b40f839a843123093582935e89f91e16bc4374"
] | [
"tools/processing.py"
] | [
"import pandas as pd\nimport numpy as np\n\n\ndef top_time(ind=None, gs=None):\n\t\"\"\"\n\tSelects the location (by coordinates) which was visited for the longest period during given time interval\n\t:param ind: user id\n\t:param gs: GeoDataFrame from groupby execution containing all the data in the given time interval\n\t:return: user id (if given) and the data for the longest visited location\n\t\"\"\"\n\taggregated = []\n\tfor tstamp, g in gs: # for each record in the GeoDataFrame\n\t\tif len(g) > 1: # if there is more than one record\n\t\t\tdiff_places = (g['geometry'].shift(-1) != g['geometry']).iloc[:-1] # checks when coordinates change\n\t\t\tif diff_places.any(): # if there is change in locations\n\t\t\t\tg_res = g.reset_index() # drop index\n\t\t\t\tdiffs = g_res.shift(-1)['datetime'] - g_res['datetime'] # find time differences (spent in location)\n\t\t\t\tjoined_dfs = g_res.join(diffs, rsuffix='a') # add them to locations\n\t\t\t\tjoined_dfs['geometry'] = g_res['geometry'].astype(str) # copy geometry as string\n\t\t\t\tpoint_max = joined_dfs.groupby('geometry')['datetimea'].sum().idxmax() # grouping locations find the longest time sum\n\t\t\t\tselected = g[g['geometry'].astype(str) == point_max] # select the location with the highest total time\n\t\t\telse:\n\t\t\t\tselected = g # if one location visited - copy GeoDataFrame\n\t\telse:\n\t\t\tselected = g\n\t\taggregated.append(selected)\n\tif ind is None:\n\t\treturn pd.concat(aggregated)\n\telse:\n\t\treturn ind, pd.concat(aggregated)\n\n\ndef mode_geoseries(ind, gs):\n\t\"\"\"\n\tCalculates mode for GeoSeries\n\t:param ind: identifier\n\t:param gs: GeoSeries\n\t:return: identifier and a mode for GeoSeries\n\t\"\"\"\n\taggregated = []\n\tfor g in gs:\n\t\tif g[1].empty:\n\t\t\taggregated.append(None)\n\t\telse:\n\t\t\tselected = g[1].mode()\n\t\t\tselected = selected.set_index(g[1].index)\n\t\t\taggregated.append(selected)\n\treturn ind, pd.concat(aggregated)\n\n\ndef rowwise_average(gs, row_count=None):\n\t\"\"\"\n\tCalculates an average for each row in each group - rowwise.\n\t:param gs: GeoSeries\n\t:param row_count: defines how much rows should be considered\n\t:return: averaged GeoSeries rowwise\n\t\"\"\"\n\tif row_count is None:\n\t\trow_count = gs.groupby(level=0).size().max()\n\treturn pd.Series([gs.groupby(level=0).nth(n).mean() for n in range(row_count)])\n\n\ndef groupwise_average(gs):\n\t\"\"\"\n\tCalculates an average from each group of GeoSeries\n\t:param gs: GeoSeries\n\t:return: averaged GeoSeries\n\t\"\"\"\n\treturn gs.groupby(level=0).mean()\n\n\ndef groupwise_normalise(gs):\n\t\"\"\"\n\tNormalises each group of GeoSeries\n\t:param gs: GeoSeries\n\t:return: normalised GeoSeries\n\t\"\"\"\n\treturn gs.groupby(level=0).apply(lambda x: x / x.sum())\n\n\ndef groupwise_expansion(gs):\n\t\"\"\"\n\tCalculates expanding mean for each group of GeoSeries\n\t:param gs: GeoSeries\n\t:return: averaged GeoSeries\n\t\"\"\"\n\treturn gs.groupby(level=0).expanding().mean()\n\n\ndef total_normalise(gs):\n\t\"\"\"\n\tPerforms complete normalisation of GeoSeries\n\t:param gs: GeoSeries\n\t:return: normalised GeoSeries\n\t\"\"\"\n\treturn gs / gs.sum()\n\n\ndef start_end(trajectories_frame):\n\t\"\"\"\n\tCompresses stops in TrajectoriesFrame by adding start and end of visits in locations\n\t:param trajectories_frame: TrajectoriesFrame object class\n\t:return: compressed TrajectoriesFrame\n\t\"\"\"\n\tto_concat = []\n\tif 'date' not in trajectories_frame.columns:\n\t\ttrajectories_frame['date'] = trajectories_frame.index.get_level_values(1)\n\tfor gs in trajectories_frame.groupby(level=0):\n\t\tfirsts = gs[1][gs[1]['geometry'].shift() != gs[1]['geometry']]\n\t\tlasts = gs[1][gs[1]['geometry'].shift(-1) != gs[1]['geometry']]\n\t\tfirsts.loc[:, 'start'] = firsts['date']\n\t\tlasts = lasts.set_index(firsts.index)\n\t\tfirsts.loc[:, 'end'] = lasts['date']\n\t\tfirsts = firsts[firsts['start'] != firsts['end']]\n\t\tto_concat.append(firsts)\n\treturn pd.concat(to_concat)\n"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
temper8/MatBench | [
"1ea24d18af35b57ef2d61148709eb6d49835fe97"
] | [
"show_config.py"
] | [
"import numpy as np \nnp.show_config()"
] | [
[
"numpy.show_config"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
clsteel/DeepPostures | [
"8a7bed8f1e47e4a502080bf6edd513b822ea0bdf"
] | [
"MSSE-2021/train_model.py"
] | [
"# Copyright 2021 Supun Nakandala. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport random\nimport math\nimport argparse\n\nsys.path.append('./')\nfrom commons import cnn_bi_lstm_model, input_iterator\n\n# Setting random seeds\ntf.random.set_random_seed(2019)\nrandom.seed(2019)\nnp.random.seed(2019)\n\ndef get_train_ops(y, logits, learning_rate, n_classes, class_weights):\n y = tf.reshape(y, [-1])\n logits = tf.reshape(logits, [-1, n_classes])\n balanced_accuracy, update_op = tf.metrics.mean_per_class_accuracy(y, tf.argmax(logits, 1), n_classes)\n y = tf.reshape(tf.one_hot(y, depth=n_classes, axis=1), [-1, n_classes])\n\n loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y) * tf.reduce_sum(tf.constant(class_weights, dtype=tf.float32) * y, axis=1))\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n\n return train_op, update_op, balanced_accuracy, loss\n\n\ndef window_generator(data_root, win_size_10s, subject_ids):\n x_segments = []; y_segments = []\n for subject_id in subject_ids:\n for x_seq, _, y_seq in input_iterator(data_root, subject_id, train=True):\n x_window = []; y_window = []\n for x,y in zip(x_seq, y_seq):\n x_window.append(x)\n y_window.append(y)\n\n if len(y_window) == win_size_10s:\n yield np.stack(x_window, axis=0), np.stack(y_window, axis=0)\n x_window = []; y_window = []\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Argument parser for training CNN model.')\n optional_arguments = parser._action_groups.pop()\n required_arguments = parser.add_argument_group('required arguments')\n required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True)\n\n optional_arguments.add_argument('--transfer-learning-model', help='Transfer learning model name (default: CHAP_ALL_ADULTS)', default=None, required=False, choices=['CHAP_ALL_ADULTS'])\n optional_arguments.add_argument('--learning-rate', help='Learning rate for training the model (default: 0.0001)', default=1e-4, type=float, required=False)\n optional_arguments.add_argument('--num-epochs', help='Number of epochs to train the model (default: 15)', default=15, type=int, required=False)\n optional_arguments.add_argument('--batch-size', help='Training batch size (default: 16)', default=16, type=int, required=False)\n \n optional_arguments.add_argument('--amp-factor', help='Factor to increase the number of neurons in the CNN layers (default: 2)', default=2, type=int, required=False)\n optional_arguments.add_argument('--cnn-window-size', help='CNN window size in seconds on which the predictions to be made (default: 10)', default=10, type=int, required=False)\n optional_arguments.add_argument('--bi-lstm-window-size', help='BiLSTM window size in minutes on which the predictions to be smoothed (default: 7)', default=7, type=int, required=False)\n \n optional_arguments.add_argument('--shuffle-buffer-size', help='Training data shuffle buffer size in terms of number of records (default: 10000)', default=10000, type=int, required=False)\n optional_arguments.add_argument('--training-data-fraction', help='Percentage of subjects to be used for training (default: 60)', default=60, type=int, required=False)\n optional_arguments.add_argument('--validation-data-fraction', help='Percentage of subjects to be used for validation (default: 20)', default=20, type=int, required=False)\n optional_arguments.add_argument('--testing-data-fraction', help='Percentage of subjects to be used for testing (default: 20)', default=20, type=int, required=False)\n optional_arguments.add_argument('--model-checkpoint-path', help='Path where the trained model will be saved (default: ./model-checkpoint)', default='./model-checkpoint', required=False)\n \n optional_arguments.add_argument('--num-classes', help='Number of classes in the training dataset (default: 2)', default=2, type=int, required=False)\n optional_arguments.add_argument('--class-weights', help='Class weights for loss aggregation (default: [1.0, 1.0])', default='[1.0, 1.0]', required=False)\n optional_arguments.add_argument('--down-sample-frequency', help='Downsample frequency in Hz for GT3X data (default: 10)', default=10, type=int, required=False)\n optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true')\n parser._action_groups.append(optional_arguments)\n args = parser.parse_args()\n\n if os.path.exists(args.model_checkpoint_path):\n raise Exception('Model checkpoint: {} already exists.'.format(args.model_checkpoint_path))\n\n if args.transfer_learning_model:\n if args.transfer_learning_model == 'CHAP_ALL_ADULTS':\n args.amp_factor = 2\n args.cnn_window_size = 10\n args.bi_lstm_win_size = 7\n else:\n raise Exception('Unsupported transfer learning model: {}'.format(args.transfer_learning_model))\n \n assert (args.training_data_fraction + args.validation_data_fraction + args.testing_data_fraction) == 100, 'Train, validation,test split fractions should add up to 100%'\n \n subject_ids = [fname.split('.')[0] for fname in os.listdir(args.pre_processed_dir)]\n random.shuffle(subject_ids)\n\n n_train_subjects = int(math.ceil(len(subject_ids) * args.training_data_fraction / 100.))\n train_subjects = subject_ids[:n_train_subjects]\n subject_ids = subject_ids[n_train_subjects:]\n\n test_frac = args.testing_data_fraction / (100.0 - args.training_data_fraction) * 100\n n_test_subjects = int(math.ceil(len(subject_ids) * test_frac / 100.))\n test_subjects = subject_ids[:n_test_subjects]\n valid_subjects = subject_ids[n_test_subjects:] \n\n output_shapes = ((args.bi_lstm_window_size*(60//args.cnn_window_size), args.cnn_window_size*args.down_sample_frequency, 3), (args.bi_lstm_window_size*(60//args.cnn_window_size)))\n bi_lstm_win_size = 60//args.down_sample_frequency * args.bi_lstm_window_size\n train_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, train_subjects),output_types=(tf.float32, tf.int32),\n output_shapes=output_shapes).shuffle(args.shuffle_buffer_size).batch(args.batch_size).prefetch(10)\n valid_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, valid_subjects),output_types=(tf.float32, tf.int32),\n output_shapes=output_shapes).batch(args.batch_size).prefetch(10)\n test_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, test_subjects),output_types=(tf.float32, tf.int32),\n output_shapes=output_shapes).batch(args.batch_size).prefetch(10)\n \n iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n\n train_init_op = iterator.make_initializer(train_dataset)\n valid_init_op = iterator.make_initializer(valid_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n x, y = iterator.get_next()\n \n x = tf.reshape(x, [-1, args.cnn_window_size*args.down_sample_frequency, 3, 1])\n x = tf.identity(x, name='input')\n y = tf.reshape(y, [-1, bi_lstm_win_size])\n\n learning_rate = tf.placeholder(tf.float32)\n logits = cnn_bi_lstm_model(x, args.amp_factor, bi_lstm_win_size, args.num_classes)\n output = tf.argmax(tf.reshape(logits, [-1, args.num_classes]), axis=1, name='output')\n prediction = tf.identity(tf.argmax(logits, axis=1), name='prediction')\n\n class_weights = eval(args.class_weights) \n train_op, update_op, balanced_accuracy, loss = get_train_ops(y, logits, learning_rate, args.num_classes, class_weights)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n if args.transfer_learning_model:\n ckpt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pre-trained-models', '{}_CKPT'.format(args.transfer_learning_model), 'model')\n # Weights for the final classification layer (dense) are ignored\n variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if not v.name.startswith('dense/')]\n restorer = tf.train.Saver(variables)\n restorer.restore(sess, ckpt_path)\n \n if not args.silent:\n print('Training subjects: {}'.format(train_subjects))\n print('Validation subjects: {}'.format(valid_subjects))\n print('Testing subjects: {}'.format(test_subjects))\n\n for epoch in range(args.num_epochs):\n for label, init_op, subjects in zip([\"Train\", \"Validation\", \"Test\"],\n [train_init_op, valid_init_op, test_init_op], [train_subjects, valid_subjects, test_subjects]):\n sess.run(tf.local_variables_initializer())\n sess.run(init_op)\n losses = []\n while True:\n try:\n if label == \"Train\":\n _, _, l = sess.run([train_op, update_op, loss], feed_dict={learning_rate: args.learning_rate})\n elif label == \"Validation\":\n _, l = sess.run([update_op, loss])\n elif label == \"Test\":\n _, l = sess.run([update_op, loss])\n losses.append(l)\n except tf.errors.OutOfRangeError:\n if not args.silent:\n ba = sess.run(balanced_accuracy)\n print(\"Epoch: %d, %s Loss: %f, Balanced Accuracy: %f\" %(epoch, label, sum(losses), ba))\n break\n\n if not os.path.exists(args.model_checkpoint_path):\n os.makedirs(args.model_checkpoint_path)\n\n tf.saved_model.simple_save(sess, os.path.join(args.model_checkpoint_path, 'CUSTOM_MODEL'), inputs={\"input\": x}, outputs={\"output\": output})\n\n if not args.silent:\n print('Model saved in path: {}'.format(args.model_checkpoint_path)) \n"
] | [
[
"tensorflow.train.Saver",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.constant",
"tensorflow.local_variables_initializer",
"numpy.random.seed",
"tensorflow.get_collection",
"tensorflow.reshape",
"tensorflow.identity",
"tensorflow.placeholder",
"numpy.stack",
"tensorflow.global_variables_initializer",
"tensorflow.data.Iterator.from_structure",
"tensorflow.train.AdamOptimizer",
"tensorflow.one_hot",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.random.set_random_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fchouteau/imgaug | [
"b282b97c13a27a32f91c2e2666db1e128e00cfde",
"b282b97c13a27a32f91c2e2666db1e128e00cfde"
] | [
"imgaug/augmenters/size.py",
"imgaug/imgaug.py"
] | [
"\"\"\"\nAugmenters that somehow change the size of the images.\n\nList of augmenters:\n\n * :class:`Resize`\n * :class:`CropAndPad`\n * :class:`Crop`\n * :class:`Pad`\n * :class:`PadToFixedSize`\n * :class:`CenterPadToFixedSize`\n * :class:`CropToFixedSize`\n * :class:`CenterCropToFixedSize`\n * :class:`CropToMultiplesOf`\n * :class:`CenterCropToMultiplesOf`\n * :class:`PadToMultiplesOf`\n * :class:`CenterPadToMultiplesOf`\n * :class:`CropToPowersOf`\n * :class:`CenterCropToPowersOf`\n * :class:`PadToPowersOf`\n * :class:`CenterPadToPowersOf`\n * :class:`CropToAspectRatio`\n * :class:`CenterCropToAspectRatio`\n * :class:`PadToAspectRatio`\n * :class:`CenterPadToAspectRatio`\n * :class:`CropToSquare`\n * :class:`CenterCropToSquare`\n * :class:`PadToSquare`\n * :class:`CenterPadToSquare`\n * :class:`KeepSizeByResize`\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport re\nimport functools\n\nimport numpy as np\nimport cv2\n\nimport imgaug as ia\nfrom imgaug.imgaug import _normalize_cv2_input_arr_\nfrom . import meta\nfrom .. import parameters as iap\n\n\ndef _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True):\n if prevent_zero_size:\n top, right, bottom, left = _crop_prevent_zero_size(\n shape[0], shape[1], top, right, bottom, left)\n\n height, width = shape[0:2]\n x1 = left\n x2 = width - right\n y1 = top\n y2 = height - bottom\n\n # these steps prevent negative sizes\n # if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis\n # note that if height/width of arr is zero, then y2==y1 or x2==x1, which\n # is still valid, even if height/width is zero and results in a zero-sized\n # axis\n x2 = max(x2, x1)\n y2 = max(y2, y1)\n\n return x1, y1, x2, y2\n\n\ndef _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True):\n x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left,\n prevent_zero_size=prevent_zero_size)\n return arr[y1:y2, x1:x2, ...]\n\n\ndef _crop_and_pad_arr(arr, croppings, paddings, pad_mode=\"constant\",\n pad_cval=0, keep_size=False):\n height, width = arr.shape[0:2]\n\n image_cr = _crop_arr_(arr, *croppings)\n\n image_cr_pa = pad(\n image_cr,\n top=paddings[0], right=paddings[1],\n bottom=paddings[2], left=paddings[3],\n mode=pad_mode, cval=pad_cval)\n\n if keep_size:\n image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))\n\n return image_cr_pa\n\n\ndef _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img,\n pad_mode=\"constant\", pad_cval=0.0, keep_size=False):\n return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img,\n paddings_img, pad_mode, pad_cval,\n keep_size)\n\n\ndef _crop_and_pad_segmap_(segmap, croppings_img, paddings_img,\n pad_mode=\"constant\", pad_cval=0, keep_size=False):\n return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img,\n paddings_img, pad_mode, pad_cval,\n keep_size)\n\n\ndef _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img,\n paddings_img, pad_mode=\"constant\",\n pad_cval=None, keep_size=False):\n if isinstance(augmentable, ia.HeatmapsOnImage):\n arr_attr_name = \"arr_0to1\"\n pad_cval = pad_cval if pad_cval is not None else 0.0\n else:\n assert isinstance(augmentable, ia.SegmentationMapsOnImage), (\n \"Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s.\" % (\n type(augmentable)))\n arr_attr_name = \"arr\"\n pad_cval = pad_cval if pad_cval is not None else 0\n\n arr = getattr(augmentable, arr_attr_name)\n arr_shape_orig = arr.shape\n augm_shape = augmentable.shape\n\n croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape)\n paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape)\n\n croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1],\n *croppings_proj)\n\n arr_cr = _crop_arr_(arr,\n croppings_proj[0], croppings_proj[1],\n croppings_proj[2], croppings_proj[3])\n arr_cr_pa = pad(\n arr_cr,\n top=paddings_proj[0], right=paddings_proj[1],\n bottom=paddings_proj[2], left=paddings_proj[3],\n mode=pad_mode,\n cval=pad_cval)\n\n setattr(augmentable, arr_attr_name, arr_cr_pa)\n\n if keep_size:\n augmentable = augmentable.resize(arr_shape_orig[0:2])\n else:\n augmentable.shape = _compute_shape_after_crop_and_pad(\n augmentable.shape, croppings_img, paddings_img)\n return augmentable\n\n\ndef _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size):\n # using the trbl function instead of croppings_img has the advantage\n # of incorporating prevent_zero_size, dealing with zero-sized input image\n # axis and dealing the negative crop amounts\n x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img)\n crop_left = x1\n crop_top = y1\n\n shape_orig = kpsoi.shape\n shifted = kpsoi.shift_(\n x=-crop_left+paddings_img[3],\n y=-crop_top+paddings_img[0])\n shifted.shape = _compute_shape_after_crop_and_pad(\n shape_orig, croppings_img, paddings_img)\n if keep_size:\n shifted = shifted.on_(shape_orig)\n return shifted\n\n\ndef _compute_shape_after_crop_and_pad(old_shape, croppings, paddings):\n x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings)\n new_shape = list(old_shape)\n new_shape[0] = y2 - y1 + paddings[0] + paddings[2]\n new_shape[1] = x2 - x1 + paddings[1] + paddings[3]\n return tuple(new_shape)\n\n\ndef _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom,\n crop_left):\n remaining_height = height - (crop_top + crop_bottom)\n remaining_width = width - (crop_left + crop_right)\n if remaining_height < 1:\n regain = abs(remaining_height) + 1\n regain_top = regain // 2\n regain_bottom = regain // 2\n if regain_top + regain_bottom < regain:\n regain_top += 1\n\n if regain_top > crop_top:\n diff = regain_top - crop_top\n regain_top = crop_top\n regain_bottom += diff\n elif regain_bottom > crop_bottom:\n diff = regain_bottom - crop_bottom\n regain_bottom = crop_bottom\n regain_top += diff\n\n crop_top = crop_top - regain_top\n crop_bottom = crop_bottom - regain_bottom\n\n if remaining_width < 1:\n regain = abs(remaining_width) + 1\n regain_right = regain // 2\n regain_left = regain // 2\n if regain_right + regain_left < regain:\n regain_right += 1\n\n if regain_right > crop_right:\n diff = regain_right - crop_right\n regain_right = crop_right\n regain_left += diff\n elif regain_left > crop_left:\n diff = regain_left - crop_left\n regain_left = crop_left\n regain_right += diff\n\n crop_right = crop_right - regain_right\n crop_left = crop_left - regain_left\n\n return (\n max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0),\n max(crop_left, 0))\n\n\ndef _project_size_changes(trbl, from_shape, to_shape):\n if from_shape[0:2] == to_shape[0:2]:\n return trbl\n\n height_to = to_shape[0]\n width_to = to_shape[1]\n height_from = from_shape[0]\n width_from = from_shape[1]\n\n top = trbl[0]\n right = trbl[1]\n bottom = trbl[2]\n left = trbl[3]\n\n # Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap\n # is exactly half the size of an image and the size change on an axis is\n # an odd value. Then the projected value would end up being <something>.5\n # and the rounding would always round up to the next integer. If both\n # sides then have the same change, they are both rounded up, resulting\n # in more change than expected.\n # E.g. image height is 8, map height is 4, change is 3 at the top and 3 at\n # the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded\n # up to 2.0. Hence, the maps are changed by 4 (100% of the map height,\n # vs. 6 for images, which is 75% of the image height).\n top = _int_r(height_to * (top/height_from) - 1e-4)\n right = _int_r(width_to * (right/width_from) + 1e-4)\n bottom = _int_r(height_to * (bottom/height_from) + 1e-4)\n left = _int_r(width_to * (left/width_from) - 1e-4)\n\n return top, right, bottom, left\n\n\ndef _int_r(value):\n return int(np.round(value))\n\n\n# TODO somehow integrate this with pad()\ndef _handle_pad_mode_param(pad_mode):\n pad_modes_available = {\n \"constant\", \"edge\", \"linear_ramp\", \"maximum\", \"mean\", \"median\",\n \"minimum\", \"reflect\", \"symmetric\", \"wrap\"}\n if pad_mode == ia.ALL:\n return iap.Choice(list(pad_modes_available))\n if ia.is_string(pad_mode):\n assert pad_mode in pad_modes_available, (\n \"Value '%s' is not a valid pad mode. Valid pad modes are: %s.\" % (\n pad_mode, \", \".join(pad_modes_available)))\n return iap.Deterministic(pad_mode)\n if isinstance(pad_mode, list):\n assert all([v in pad_modes_available for v in pad_mode]), (\n \"At least one in list %s is not a valid pad mode. Valid pad \"\n \"modes are: %s.\" % (str(pad_mode), \", \".join(pad_modes_available)))\n return iap.Choice(pad_mode)\n if isinstance(pad_mode, iap.StochasticParameter):\n return pad_mode\n raise Exception(\n \"Expected pad_mode to be ia.ALL or string or list of strings or \"\n \"StochasticParameter, got %s.\" % (type(pad_mode),))\n\n\ndef _handle_position_parameter(position):\n if position == \"uniform\":\n return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0)\n if position == \"normal\":\n return (\n iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),\n minval=0.0, maxval=1.0),\n iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),\n minval=0.0, maxval=1.0)\n )\n if position == \"center\":\n return iap.Deterministic(0.5), iap.Deterministic(0.5)\n if (ia.is_string(position)\n and re.match(r\"^(left|center|right)-(top|center|bottom)$\",\n position)):\n mapping = {\"top\": 0.0, \"center\": 0.5, \"bottom\": 1.0, \"left\": 0.0,\n \"right\": 1.0}\n return (\n iap.Deterministic(mapping[position.split(\"-\")[0]]),\n iap.Deterministic(mapping[position.split(\"-\")[1]])\n )\n if isinstance(position, iap.StochasticParameter):\n return position\n if isinstance(position, tuple):\n assert len(position) == 2, (\n \"Expected tuple with two entries as position parameter. \"\n \"Got %d entries with types %s..\" % (\n len(position), str([type(item) for item in position])))\n for item in position:\n if ia.is_single_number(item) and (item < 0 or item > 1.0):\n raise Exception(\n \"Both position values must be within the value range \"\n \"[0.0, 1.0]. Got type %s with value %.8f.\" % (\n type(item), item,))\n position = [iap.Deterministic(item)\n if ia.is_single_number(item)\n else item for item in position]\n\n only_sparams = all([isinstance(item, iap.StochasticParameter)\n for item in position])\n assert only_sparams, (\n \"Expected tuple with two entries that are both either \"\n \"StochasticParameter or float/int. Got types %s.\" % (\n str([type(item) for item in position])\n ))\n return tuple(position)\n raise Exception(\n \"Expected one of the following as position parameter: string \"\n \"'uniform', string 'normal', string 'center', a string matching \"\n \"regex ^(left|center|right)-(top|center|bottom)$, a single \"\n \"StochasticParameter or a tuple of two entries, both being either \"\n \"StochasticParameter or floats or int. Got instead type %s with \"\n \"content '%s'.\" % (\n type(position),\n (str(position)\n if len(str(position)) < 20\n else str(position)[0:20] + \"...\")\n )\n )\n\n\n# TODO this is the same as in imgaug.py, make DRY\ndef _assert_two_or_three_dims(shape):\n if hasattr(shape, \"shape\"):\n shape = shape.shape\n assert len(shape) in [2, 3], (\n \"Expected image with two or three dimensions, but got %d dimensions \"\n \"and shape %s.\" % (len(shape), shape))\n\n\ndef pad(arr, top=0, right=0, bottom=0, left=0, mode=\"constant\", cval=0):\n \"\"\"Pad an image-like array on its top/right/bottom/left side.\n\n This function is a wrapper around :func:`numpy.pad`.\n\n Supported dtypes\n ----------------\n\n * ``uint8``: yes; fully tested (1)\n * ``uint16``: yes; fully tested (1)\n * ``uint32``: yes; fully tested (2) (3)\n * ``uint64``: yes; fully tested (2) (3)\n * ``int8``: yes; fully tested (1)\n * ``int16``: yes; fully tested (1)\n * ``int32``: yes; fully tested (1)\n * ``int64``: yes; fully tested (2) (3)\n * ``float16``: yes; fully tested (2) (3)\n * ``float32``: yes; fully tested (1)\n * ``float64``: yes; fully tested (1)\n * ``float128``: yes; fully tested (2) (3)\n * ``bool``: yes; tested (2) (3)\n\n - (1) Uses ``cv2`` if `mode` is one of: ``\"constant\"``, ``\"edge\"``,\n ``\"reflect\"``, ``\"symmetric\"``. Otherwise uses ``numpy``.\n - (2) Uses ``numpy``.\n - (3) Rejected by ``cv2``.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pad.\n\n top : int, optional\n Amount of pixels to add to the top side of the image.\n Must be ``0`` or greater.\n\n right : int, optional\n Amount of pixels to add to the right side of the image.\n Must be ``0`` or greater.\n\n bottom : int, optional\n Amount of pixels to add to the bottom side of the image.\n Must be ``0`` or greater.\n\n left : int, optional\n Amount of pixels to add to the left side of the image.\n Must be ``0`` or greater.\n\n mode : str, optional\n Padding mode to use. See :func:`numpy.pad` for details.\n In case of mode ``constant``, the parameter `cval` will be used as\n the ``constant_values`` parameter to :func:`numpy.pad`.\n In case of mode ``linear_ramp``, the parameter `cval` will be used as\n the ``end_values`` parameter to :func:`numpy.pad`.\n\n cval : number or iterable of number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details. The cval is expected to match the\n input array's dtype and value range. If an iterable is used, it is\n expected to contain one value per channel. The number of values\n and number of channels are expected to match.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C) ndarray\n Padded array with height ``H'=H+top+bottom`` and width\n ``W'=W+left+right``.\n\n \"\"\"\n import imgaug.dtypes as iadt\n\n _assert_two_or_three_dims(arr)\n assert all([v >= 0 for v in [top, right, bottom, left]]), (\n \"Expected padding amounts that are >=0, but got %d, %d, %d, %d \"\n \"(top, right, bottom, left)\" % (top, right, bottom, left))\n\n is_multi_cval = ia.is_iterable(cval)\n\n if top > 0 or right > 0 or bottom > 0 or left > 0:\n min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype)\n\n # without the if here there are crashes for float128, e.g. if\n # cval is an int (just using float(cval) seems to not be accurate\n # enough)\n if arr.dtype.name == \"float128\":\n cval = np.float128(cval) # pylint: disable=no-member\n\n if is_multi_cval:\n cval = np.clip(cval, min_value, max_value)\n else:\n cval = max(min(cval, max_value), min_value)\n\n # Note that copyMakeBorder() hangs/runs endlessly if arr has an\n # axis of size 0 and mode is \"reflect\".\n # Numpy also complains in these cases if mode is not \"constant\".\n has_zero_sized_axis = any([axis == 0 for axis in arr.shape])\n if has_zero_sized_axis:\n mode = \"constant\"\n\n mapping_mode_np_to_cv2 = {\n \"constant\": cv2.BORDER_CONSTANT,\n \"edge\": cv2.BORDER_REPLICATE,\n \"linear_ramp\": None,\n \"maximum\": None,\n \"mean\": None,\n \"median\": None,\n \"minimum\": None,\n \"reflect\": cv2.BORDER_REFLECT_101,\n \"symmetric\": cv2.BORDER_REFLECT,\n \"wrap\": None,\n cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,\n cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,\n cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,\n cv2.BORDER_REFLECT: cv2.BORDER_REFLECT\n }\n bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None\n\n # these datatypes all simply generate a \"TypeError: src data type = X\n # is not supported\" error\n bad_datatype_cv2 = (\n arr.dtype.name\n in [\"uint32\", \"uint64\", \"int64\", \"float16\", \"float128\", \"bool\"]\n )\n\n # OpenCV turns the channel axis for arrays with 0 channels to 512\n # TODO add direct test for this. indirectly tested via Pad\n bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0)\n\n if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2:\n # convert cval to expected type, as otherwise we get TypeError\n # for np inputs\n kind = arr.dtype.kind\n if is_multi_cval:\n cval = [float(cval_c) if kind == \"f\" else int(cval_c)\n for cval_c in cval]\n else:\n cval = float(cval) if kind == \"f\" else int(cval)\n\n if arr.ndim == 2 or arr.shape[2] <= 4:\n # without this, only the first channel is padded with the cval,\n # all following channels with 0\n if arr.ndim == 3 and not is_multi_cval:\n cval = tuple([cval] * arr.shape[2])\n\n arr_pad = cv2.copyMakeBorder(\n _normalize_cv2_input_arr_(arr),\n top=top, bottom=bottom, left=left, right=right,\n borderType=mapping_mode_np_to_cv2[mode], value=cval)\n if arr.ndim == 3 and arr_pad.ndim == 2:\n arr_pad = arr_pad[..., np.newaxis]\n else:\n result = []\n channel_start_idx = 0\n cval = cval if is_multi_cval else tuple([cval] * arr.shape[2])\n while channel_start_idx < arr.shape[2]:\n arr_c = arr[..., channel_start_idx:channel_start_idx+4]\n cval_c = cval[channel_start_idx:channel_start_idx+4]\n arr_pad_c = cv2.copyMakeBorder(\n _normalize_cv2_input_arr_(arr_c),\n top=top, bottom=bottom, left=left, right=right,\n borderType=mapping_mode_np_to_cv2[mode], value=cval_c)\n arr_pad_c = np.atleast_3d(arr_pad_c)\n result.append(arr_pad_c)\n channel_start_idx += 4\n arr_pad = np.concatenate(result, axis=2)\n else:\n # paddings for 2d case\n paddings_np = [(top, bottom), (left, right)]\n\n # add paddings for 3d case\n if arr.ndim == 3:\n paddings_np.append((0, 0))\n\n if mode == \"constant\":\n if arr.ndim > 2 and is_multi_cval:\n arr_pad_chans = [\n np.pad(arr[..., c], paddings_np[0:2], mode=mode,\n constant_values=cval[c])\n for c in np.arange(arr.shape[2])]\n arr_pad = np.stack(arr_pad_chans, axis=-1)\n else:\n arr_pad = np.pad(arr, paddings_np, mode=mode,\n constant_values=cval)\n elif mode == \"linear_ramp\":\n if arr.ndim > 2 and is_multi_cval:\n arr_pad_chans = [\n np.pad(arr[..., c], paddings_np[0:2], mode=mode,\n end_values=cval[c])\n for c in np.arange(arr.shape[2])]\n arr_pad = np.stack(arr_pad_chans, axis=-1)\n else:\n arr_pad = np.pad(arr, paddings_np, mode=mode,\n end_values=cval)\n else:\n arr_pad = np.pad(arr, paddings_np, mode=mode)\n\n return arr_pad\n return np.copy(arr)\n\n\ndef pad_to_aspect_ratio(arr, aspect_ratio, mode=\"constant\", cval=0,\n return_pad_amounts=False):\n \"\"\"Pad an image array on its sides so that it matches a target aspect ratio.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.augmenters.size.pad`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pad.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the\n image having twice as much width as height.\n\n mode : str, optional\n Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.\n\n cval : number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details.\n\n return_pad_amounts : bool, optional\n If ``False``, then only the padded image will be returned. If\n ``True``, a ``tuple`` with two entries will be returned, where the\n first entry is the padded image and the second entry are the amounts\n by which each image side was padded. These amounts are again a\n ``tuple`` of the form ``(top, right, bottom, left)``, with each value\n being an ``int``.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C) ndarray\n Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray, fulfilling the\n given `aspect_ratio`.\n\n tuple of int\n Amounts by which the image was padded on each side, given as a\n ``tuple`` ``(top, right, bottom, left)``.\n This ``tuple`` is only returned if `return_pad_amounts` was set to\n ``True``.\n\n \"\"\"\n pad_top, pad_right, pad_bottom, pad_left = \\\n compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio)\n arr_padded = pad(\n arr,\n top=pad_top,\n right=pad_right,\n bottom=pad_bottom,\n left=pad_left,\n mode=mode,\n cval=cval\n )\n\n if return_pad_amounts:\n return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)\n return arr_padded\n\n\ndef pad_to_multiples_of(arr, height_multiple, width_multiple, mode=\"constant\",\n cval=0, return_pad_amounts=False):\n \"\"\"Pad an image array until its side lengths are multiples of given values.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.augmenters.size.pad`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pad.\n\n height_multiple : None or int\n The desired multiple of the height. The computed padding amount will\n reflect a padding that increases the y axis size until it is a multiple\n of this value.\n\n width_multiple : None or int\n The desired multiple of the width. The computed padding amount will\n reflect a padding that increases the x axis size until it is a multiple\n of this value.\n\n mode : str, optional\n Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.\n\n cval : number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details.\n\n return_pad_amounts : bool, optional\n If ``False``, then only the padded image will be returned. If\n ``True``, a ``tuple`` with two entries will be returned, where the\n first entry is the padded image and the second entry are the amounts\n by which each image side was padded. These amounts are again a\n ``tuple`` of the form ``(top, right, bottom, left)``, with each value\n being an integer.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C) ndarray\n Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray.\n\n tuple of int\n Amounts by which the image was padded on each side, given as a\n ``tuple`` ``(top, right, bottom, left)``.\n This ``tuple`` is only returned if `return_pad_amounts` was set to\n ``True``.\n\n \"\"\"\n pad_top, pad_right, pad_bottom, pad_left = \\\n compute_paddings_to_reach_multiples_of(\n arr, height_multiple, width_multiple)\n arr_padded = pad(\n arr,\n top=pad_top,\n right=pad_right,\n bottom=pad_bottom,\n left=pad_left,\n mode=mode,\n cval=cval\n )\n\n if return_pad_amounts:\n return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)\n return arr_padded\n\n\ndef compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio):\n \"\"\"Compute pad amounts required to fulfill an aspect ratio.\n\n \"Pad amounts\" here denotes the number of pixels that have to be added to\n each side to fulfill the desired constraint.\n\n The aspect ratio is given as ``ratio = width / height``.\n Depending on which dimension is smaller (height or width), only the\n corresponding sides (top/bottom or left/right) will be padded.\n\n The axis-wise padding amounts are always distributed equally over the\n sides of the respective axis (i.e. left and right, top and bottom). For\n odd pixel amounts, one pixel will be left over after the equal\n distribution and could be added to either side of the axis. This function\n will always add such a left over pixel to the bottom (y-axis) or\n right (x-axis) side.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute pad amounts.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the\n image having twice as much width as height.\n\n Returns\n -------\n tuple of int\n Required padding amounts to reach the target aspect ratio, given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n _assert_two_or_three_dims(arr)\n assert aspect_ratio > 0, (\n \"Expected to get an aspect ratio >0, got %.4f.\" % (aspect_ratio,))\n\n pad_top = 0\n pad_right = 0\n pad_bottom = 0\n pad_left = 0\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n if height == 0:\n height = 1\n pad_bottom += 1\n if width == 0:\n width = 1\n pad_right += 1\n\n aspect_ratio_current = width / height\n\n if aspect_ratio_current < aspect_ratio:\n # image is more vertical than desired, width needs to be increased\n diff = (aspect_ratio * height) - width\n pad_right += int(np.ceil(diff / 2))\n pad_left += int(np.floor(diff / 2))\n elif aspect_ratio_current > aspect_ratio:\n # image is more horizontal than desired, height needs to be increased\n diff = ((1/aspect_ratio) * width) - height\n pad_top += int(np.floor(diff / 2))\n pad_bottom += int(np.ceil(diff / 2))\n\n return pad_top, pad_right, pad_bottom, pad_left\n\n\ndef compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio):\n \"\"\"Compute crop amounts required to fulfill an aspect ratio.\n\n \"Crop amounts\" here denotes the number of pixels that have to be removed\n from each side to fulfill the desired constraint.\n\n The aspect ratio is given as ``ratio = width / height``.\n Depending on which dimension is smaller (height or width), only the\n corresponding sides (top/bottom or left/right) will be cropped.\n\n The axis-wise padding amounts are always distributed equally over the\n sides of the respective axis (i.e. left and right, top and bottom). For\n odd pixel amounts, one pixel will be left over after the equal\n distribution and could be added to either side of the axis. This function\n will always add such a left over pixel to the bottom (y-axis) or\n right (x-axis) side.\n\n If an aspect ratio cannot be reached exactly, this function will return\n rather one pixel too few than one pixel too many.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute crop amounts.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the\n image having twice as much width as height.\n\n Returns\n -------\n tuple of int\n Required cropping amounts to reach the target aspect ratio, given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n _assert_two_or_three_dims(arr)\n assert aspect_ratio > 0, (\n \"Expected to get an aspect ratio >0, got %.4f.\" % (aspect_ratio,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n assert shape[0] > 0, (\n \"Expected to get an array with height >0, got shape %s.\" % (shape,))\n\n height, width = shape[0:2]\n aspect_ratio_current = width / height\n\n top = 0\n right = 0\n bottom = 0\n left = 0\n\n if aspect_ratio_current < aspect_ratio:\n # image is more vertical than desired, height needs to be reduced\n # c = H - W/r\n crop_amount = height - (width / aspect_ratio)\n crop_amount = min(crop_amount, height - 1)\n top = int(np.floor(crop_amount / 2))\n bottom = int(np.ceil(crop_amount / 2))\n elif aspect_ratio_current > aspect_ratio:\n # image is more horizontal than desired, width needs to be reduced\n # c = W - Hr\n crop_amount = width - height * aspect_ratio\n crop_amount = min(crop_amount, width - 1)\n left = int(np.floor(crop_amount / 2))\n right = int(np.ceil(crop_amount / 2))\n\n return top, right, bottom, left\n\n\ndef compute_paddings_to_reach_multiples_of(arr, height_multiple,\n width_multiple):\n \"\"\"Compute pad amounts until img height/width are multiples of given values.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute pad amounts.\n\n height_multiple : None or int\n The desired multiple of the height. The computed padding amount will\n reflect a padding that increases the y axis size until it is a multiple\n of this value.\n\n width_multiple : None or int\n The desired multiple of the width. The computed padding amount will\n reflect a padding that increases the x axis size until it is a multiple\n of this value.\n\n Returns\n -------\n tuple of int\n Required padding amounts to reach multiples of the provided values,\n given as a ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, multiple):\n if multiple is None:\n return 0, 0\n if axis_size == 0:\n to_pad = multiple\n elif axis_size % multiple == 0:\n to_pad = 0\n else:\n to_pad = multiple - (axis_size % multiple)\n return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_multiple is not None:\n assert height_multiple > 0, (\n \"Can only pad to multiples of 1 or larger, got %d.\" % (\n height_multiple,))\n if width_multiple is not None:\n assert width_multiple > 0, (\n \"Can only pad to multiples of 1 or larger, got %d.\" % (\n width_multiple,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_multiple)\n left, right = _compute_axis_value(width, width_multiple)\n\n return top, right, bottom, left\n\n\ndef compute_croppings_to_reach_multiples_of(arr, height_multiple,\n width_multiple):\n \"\"\"Compute croppings to reach multiples of given heights/widths.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required cropping amounts are distributed per\n image axis.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute crop amounts.\n\n height_multiple : None or int\n The desired multiple of the height. The computed croppings will\n reflect a crop operation that decreases the y axis size until it is\n a multiple of this value.\n\n width_multiple : None or int\n The desired multiple of the width. The computed croppings amount will\n reflect a crop operation that decreases the x axis size until it is\n a multiple of this value.\n\n Returns\n -------\n tuple of int\n Required cropping amounts to reach multiples of the provided values,\n given as a ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, multiple):\n if multiple is None:\n return 0, 0\n if axis_size == 0:\n to_crop = 0\n elif axis_size % multiple == 0:\n to_crop = 0\n else:\n to_crop = axis_size % multiple\n return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_multiple is not None:\n assert height_multiple > 0, (\n \"Can only crop to multiples of 1 or larger, got %d.\" % (\n height_multiple,))\n if width_multiple is not None:\n assert width_multiple > 0, (\n \"Can only crop to multiples of 1 or larger, got %d.\" % (\n width_multiple,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_multiple)\n left, right = _compute_axis_value(width, width_multiple)\n\n return top, right, bottom, left\n\n\ndef compute_paddings_to_reach_powers_of(arr, height_base, width_base,\n allow_zero_exponent=False):\n \"\"\"Compute paddings to reach powers of given base values.\n\n For given axis size ``S``, padded size ``S'`` (``S' >= S``) and base ``B``\n this function computes paddings that fulfill ``S' = B^E``, where ``E``\n is any exponent from the discrete interval ``[0 .. inf)``.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute pad amounts.\n\n height_base : None or int\n The desired base of the height.\n\n width_base : None or int\n The desired base of the width.\n\n allow_zero_exponent : bool, optional\n Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes\n with size ``0`` or ``1`` will be padded up to size ``B^0=1`` and\n axes with size ``1 < S <= B`` will be padded up to ``B^1=B``.\n If ``False``, the minimum output axis size is always at least ``B``.\n\n Returns\n -------\n tuple of int\n Required padding amounts to fulfill ``S' = B^E`` given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, base):\n if base is None:\n return 0, 0\n if axis_size == 0:\n to_pad = 1 if allow_zero_exponent else base\n elif axis_size <= base:\n to_pad = base - axis_size\n else:\n # log_{base}(axis_size) in numpy\n exponent = np.log(axis_size) / np.log(base)\n\n to_pad = (base ** int(np.ceil(exponent))) - axis_size\n\n return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_base is not None:\n assert height_base > 1, (\n \"Can only pad to base larger than 1, got %d.\" % (height_base,))\n if width_base is not None:\n assert width_base > 1, (\n \"Can only pad to base larger than 1, got %d.\" % (width_base,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_base)\n left, right = _compute_axis_value(width, width_base)\n\n return top, right, bottom, left\n\n\ndef compute_croppings_to_reach_powers_of(arr, height_base, width_base,\n allow_zero_exponent=False):\n \"\"\"Compute croppings to reach powers of given base values.\n\n For given axis size ``S``, cropped size ``S'`` (``S' <= S``) and base ``B``\n this function computes croppings that fulfill ``S' = B^E``, where ``E``\n is any exponent from the discrete interval ``[0 .. inf)``.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required cropping amounts are distributed per\n image axis.\n\n .. note::\n\n For axes where ``S == 0``, this function alwayws returns zeros as\n croppings.\n\n For axes where ``1 <= S < B`` see parameter `allow_zero_exponent`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute crop amounts.\n\n height_base : None or int\n The desired base of the height.\n\n width_base : None or int\n The desired base of the width.\n\n allow_zero_exponent : bool\n Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes\n with size ``1 <= S < B`` will be cropped to size ``B^0=1``.\n If ``False``, axes with sizes ``S < B`` will not be changed.\n\n Returns\n -------\n tuple of int\n Required cropping amounts to fulfill ``S' = B^E`` given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, base):\n if base is None:\n return 0, 0\n if axis_size == 0:\n to_crop = 0\n elif axis_size < base:\n # crop down to B^0 = 1\n to_crop = axis_size - 1 if allow_zero_exponent else 0\n else:\n # log_{base}(axis_size) in numpy\n exponent = np.log(axis_size) / np.log(base)\n\n to_crop = axis_size - (base ** int(exponent))\n\n return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_base is not None:\n assert height_base > 1, (\n \"Can only crop to base larger than 1, got %d.\" % (height_base,))\n if width_base is not None:\n assert width_base > 1, (\n \"Can only crop to base larger than 1, got %d.\" % (width_base,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_base)\n left, right = _compute_axis_value(width, width_base)\n\n return top, right, bottom, left\n\n\[email protected](alt_func=\"Resize\",\n comment=\"Resize has the exactly same interface as Scale.\")\ndef Scale(*args, **kwargs):\n \"\"\"Augmenter that resizes images to specified heights and widths.\"\"\"\n # pylint: disable=invalid-name\n return Resize(*args, **kwargs)\n\n\nclass Resize(meta.Augmenter):\n \"\"\"Augmenter that resizes images to specified heights and widths.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n size : 'keep' or int or float or tuple of int or tuple of float or list of int or list of float or imgaug.parameters.StochasticParameter or dict\n The new size of the images.\n\n * If this has the string value ``keep``, the original height and\n width values will be kept (image is not resized).\n * If this is an ``int``, this value will always be used as the new\n height and width of the images.\n * If this is a ``float`` ``v``, then per image the image's height\n ``H`` and width ``W`` will be changed to ``H*v`` and ``W*v``.\n * If this is a ``tuple``, it is expected to have two entries\n ``(a, b)``. If at least one of these are ``float`` s, a value\n will be sampled from range ``[a, b]`` and used as the ``float``\n value to resize the image (see above). If both are ``int`` s, a\n value will be sampled from the discrete range ``[a..b]`` and\n used as the integer value to resize the image (see above).\n * If this is a ``list``, a random value from the ``list`` will be\n picked to resize the image. All values in the ``list`` must be\n ``int`` s or ``float`` s (no mixture is possible).\n * If this is a ``StochasticParameter``, then this parameter will\n first be queried once per image. The resulting value will be used\n for both height and width.\n * If this is a ``dict``, it may contain the keys ``height`` and\n ``width`` or the keys ``shorter-side`` and ``longer-side``. Each\n key may have the same datatypes as above and describes the\n scaling on x and y-axis or the shorter and longer axis,\n respectively. Both axis are sampled independently. Additionally,\n one of the keys may have the value ``keep-aspect-ratio``, which\n means that the respective side of the image will be resized so\n that the original aspect ratio is kept. This is useful when only\n resizing one image size by a pixel value (e.g. resize images to\n a height of ``64`` pixels and resize the width so that the\n overall aspect ratio is maintained).\n\n interpolation : imgaug.ALL or int or str or list of int or list of str or imgaug.parameters.StochasticParameter, optional\n Interpolation to use.\n\n * If ``imgaug.ALL``, then a random interpolation from ``nearest``,\n ``linear``, ``area`` or ``cubic`` will be picked (per image).\n * If ``int``, then this interpolation will always be used.\n Expected to be any of the following:\n ``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``,\n ``cv2.INTER_CUBIC``\n * If string, then this interpolation will always be used.\n Expected to be any of the following:\n ``nearest``, ``linear``, ``area``, ``cubic``\n * If ``list`` of ``int`` / ``str``, then a random one of the values\n will be picked per image as the interpolation.\n * If a ``StochasticParameter``, then this parameter will be\n queried per image and is expected to return an ``int`` or\n ``str``.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Resize(32)\n\n Resize all images to ``32x32`` pixels.\n\n >>> aug = iaa.Resize(0.5)\n\n Resize all images to ``50`` percent of their original size.\n\n >>> aug = iaa.Resize((16, 22))\n\n Resize all images to a random height and width within the discrete\n interval ``[16..22]`` (uniformly sampled per image).\n\n >>> aug = iaa.Resize((0.5, 0.75))\n\n Resize all any input image so that its height (``H``) and width (``W``)\n become ``H*v`` and ``W*v``, where ``v`` is uniformly sampled from the\n interval ``[0.5, 0.75]``.\n\n >>> aug = iaa.Resize([16, 32, 64])\n\n Resize all images either to ``16x16``, ``32x32`` or ``64x64`` pixels.\n\n >>> aug = iaa.Resize({\"height\": 32})\n\n Resize all images to a height of ``32`` pixels and keeps the original\n width.\n\n >>> aug = iaa.Resize({\"height\": 32, \"width\": 48})\n\n Resize all images to a height of ``32`` pixels and a width of ``48``.\n\n >>> aug = iaa.Resize({\"height\": 32, \"width\": \"keep-aspect-ratio\"})\n\n Resize all images to a height of ``32`` pixels and resizes the\n x-axis (width) so that the aspect ratio is maintained.\n\n >>> aug = iaa.Resize(\n >>> {\"shorter-side\": 224, \"longer-side\": \"keep-aspect-ratio\"})\n\n Resize all images to a height/width of ``224`` pixels, depending on which\n axis is shorter and resize the other axis so that the aspect ratio is\n maintained.\n\n >>> aug = iaa.Resize({\"height\": (0.5, 0.75), \"width\": [16, 32, 64]})\n\n Resize all images to a height of ``H*v``, where ``H`` is the original\n height and ``v`` is a random value sampled from the interval\n ``[0.5, 0.75]``. The width/x-axis of each image is resized to either\n ``16`` or ``32`` or ``64`` pixels.\n\n >>> aug = iaa.Resize(32, interpolation=[\"linear\", \"cubic\"])\n\n Resize all images to ``32x32`` pixels. Randomly use either ``linear``\n or ``cubic`` interpolation.\n\n \"\"\"\n\n def __init__(self, size, interpolation=\"cubic\",\n seed=None, name=None, **old_kwargs):\n super(Resize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n\n self.size, self.size_order = self._handle_size_arg(size, False)\n self.interpolation = self._handle_interpolation_arg(interpolation)\n\n @classmethod\n def _handle_size_arg(cls, size, subcall):\n def _dict_to_size_tuple(val1, val2):\n kaa = \"keep-aspect-ratio\"\n not_both_kaa = (val1 != kaa or val2 != kaa)\n assert not_both_kaa, (\n \"Expected at least one value to not be \\\"keep-aspect-ratio\\\", \"\n \"but got it two times.\")\n\n size_tuple = []\n for k in [val1, val2]:\n if k in [\"keep-aspect-ratio\", \"keep\"]:\n entry = iap.Deterministic(k)\n else:\n entry = cls._handle_size_arg(k, True)\n size_tuple.append(entry)\n return tuple(size_tuple)\n\n def _contains_any_key(dict_, keys):\n return any([key in dict_ for key in keys])\n\n # HW = height, width\n # SL = shorter, longer\n size_order = \"HW\"\n\n if size == \"keep\":\n result = iap.Deterministic(\"keep\")\n elif ia.is_single_number(size):\n assert size > 0, \"Expected only values > 0, got %s\" % (size,)\n result = iap.Deterministic(size)\n elif not subcall and isinstance(size, dict):\n if len(size.keys()) == 0:\n result = iap.Deterministic(\"keep\")\n elif _contains_any_key(size, [\"height\", \"width\"]):\n height = size.get(\"height\", \"keep\")\n width = size.get(\"width\", \"keep\")\n result = _dict_to_size_tuple(height, width)\n elif _contains_any_key(size, [\"shorter-side\", \"longer-side\"]):\n shorter = size.get(\"shorter-side\", \"keep\")\n longer = size.get(\"longer-side\", \"keep\")\n result = _dict_to_size_tuple(shorter, longer)\n size_order = \"SL\"\n else:\n raise ValueError(\n \"Expected dictionary containing no keys, \"\n \"the keys \\\"height\\\" and/or \\\"width\\\", \"\n \"or the keys \\\"shorter-side\\\" and/or \\\"longer-side\\\". \"\n \"Got keys: %s.\" % (str(size.keys()),))\n elif isinstance(size, tuple):\n assert len(size) == 2, (\n \"Expected size tuple to contain exactly 2 values, \"\n \"got %d.\" % (len(size),))\n assert size[0] > 0 and size[1] > 0, (\n \"Expected size tuple to only contain values >0, \"\n \"got %d and %d.\" % (size[0], size[1]))\n if ia.is_single_float(size[0]) or ia.is_single_float(size[1]):\n result = iap.Uniform(size[0], size[1])\n else:\n result = iap.DiscreteUniform(size[0], size[1])\n elif isinstance(size, list):\n if len(size) == 0:\n result = iap.Deterministic(\"keep\")\n else:\n all_int = all([ia.is_single_integer(v) for v in size])\n all_float = all([ia.is_single_float(v) for v in size])\n assert all_int or all_float, (\n \"Expected to get only integers or floats.\")\n assert all([v > 0 for v in size]), (\n \"Expected all values to be >0.\")\n result = iap.Choice(size)\n elif isinstance(size, iap.StochasticParameter):\n result = size\n else:\n raise ValueError(\n \"Expected number, tuple of two numbers, list of numbers, \"\n \"dictionary of form \"\n \"{'height': number/tuple/list/'keep-aspect-ratio'/'keep', \"\n \"'width': <analogous>}, dictionary of form \"\n \"{'shorter-side': number/tuple/list/'keep-aspect-ratio'/\"\n \"'keep', 'longer-side': <analogous>} \"\n \"or StochasticParameter, got %s.\" % (type(size),)\n )\n\n if subcall:\n return result\n return result, size_order\n\n @classmethod\n def _handle_interpolation_arg(cls, interpolation):\n if interpolation == ia.ALL:\n interpolation = iap.Choice(\n [\"nearest\", \"linear\", \"area\", \"cubic\"])\n elif ia.is_single_integer(interpolation):\n interpolation = iap.Deterministic(interpolation)\n elif ia.is_string(interpolation):\n interpolation = iap.Deterministic(interpolation)\n elif ia.is_iterable(interpolation):\n interpolation = iap.Choice(interpolation)\n elif isinstance(interpolation, iap.StochasticParameter):\n pass\n else:\n raise Exception(\n \"Expected int or string or iterable or StochasticParameter, \"\n \"got %s.\" % (type(interpolation),))\n return interpolation\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n nb_rows = batch.nb_rows\n samples = self._draw_samples(nb_rows, random_state)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n # TODO this uses the same interpolation as for images for heatmaps\n # while other augmenters resort to cubic\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps, \"arr_0to1\", samples)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps, \"arr\",\n (samples[0], samples[1], [None] * nb_rows))\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n input_was_array = False\n input_dtype = None\n if ia.is_np_array(images):\n input_was_array = True\n input_dtype = images.dtype\n\n samples_a, samples_b, samples_ip = samples\n result = []\n for i, image in enumerate(images):\n h, w = self._compute_height_width(image.shape, samples_a[i],\n samples_b[i], self.size_order)\n image_rs = ia.imresize_single_image(image, (h, w),\n interpolation=samples_ip[i])\n result.append(image_rs)\n\n if input_was_array:\n all_same_size = (len({image.shape for image in result}) == 1)\n if all_same_size:\n result = np.array(result, dtype=input_dtype)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples):\n result = []\n samples_h, samples_w, samples_ip = samples\n\n for i, augmentable in enumerate(augmentables):\n arr = getattr(augmentable, arr_attr_name)\n arr_shape = arr.shape\n img_shape = augmentable.shape\n h_img, w_img = self._compute_height_width(\n img_shape, samples_h[i], samples_w[i], self.size_order)\n h = int(np.round(h_img * (arr_shape[0] / img_shape[0])))\n w = int(np.round(w_img * (arr_shape[1] / img_shape[1])))\n h = max(h, 1)\n w = max(w, 1)\n if samples_ip[0] is not None:\n # TODO change this for heatmaps to always have cubic or\n # automatic interpolation?\n augmentable_resize = augmentable.resize(\n (h, w), interpolation=samples_ip[i])\n else:\n augmentable_resize = augmentable.resize((h, w))\n augmentable_resize.shape = (h_img, w_img) + img_shape[2:]\n result.append(augmentable_resize)\n\n return result\n\n def _augment_keypoints_by_samples(self, kpsois, samples):\n result = []\n samples_a, samples_b, _samples_ip = samples\n for i, kpsoi in enumerate(kpsois):\n h, w = self._compute_height_width(\n kpsoi.shape, samples_a[i], samples_b[i], self.size_order)\n new_shape = (h, w) + kpsoi.shape[2:]\n keypoints_on_image_rs = kpsoi.on_(new_shape)\n\n result.append(keypoints_on_image_rs)\n\n return result\n\n def _draw_samples(self, nb_images, random_state):\n rngs = random_state.duplicate(3)\n if isinstance(self.size, tuple):\n samples_h = self.size[0].draw_samples(nb_images,\n random_state=rngs[0])\n samples_w = self.size[1].draw_samples(nb_images,\n random_state=rngs[1])\n else:\n samples_h = self.size.draw_samples(nb_images, random_state=rngs[0])\n samples_w = samples_h\n\n samples_ip = self.interpolation.draw_samples(nb_images,\n random_state=rngs[2])\n return samples_h, samples_w, samples_ip\n\n @classmethod\n def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order):\n imh, imw = image_shape[0:2]\n\n if size_order == 'SL':\n # size order: short, long\n if imh < imw:\n h, w = sample_a, sample_b\n else:\n w, h = sample_a, sample_b\n else:\n # size order: height, width\n h, w = sample_a, sample_b\n\n if ia.is_single_float(h):\n assert h > 0, \"Expected 'h' to be >0, got %.4f\" % (h,)\n h = int(np.round(imh * h))\n h = h if h > 0 else 1\n elif h == \"keep\":\n h = imh\n if ia.is_single_float(w):\n assert w > 0, \"Expected 'w' to be >0, got %.4f\" % (w,)\n w = int(np.round(imw * w))\n w = w if w > 0 else 1\n elif w == \"keep\":\n w = imw\n\n # at least the checks for keep-aspect-ratio must come after\n # the float checks, as they are dependent on the results\n # this is also why these are not written as elifs\n if h == \"keep-aspect-ratio\":\n h_per_w_orig = imh / imw\n h = int(np.round(w * h_per_w_orig))\n if w == \"keep-aspect-ratio\":\n w_per_h_orig = imw / imh\n w = int(np.round(h * w_per_h_orig))\n\n return h, w\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.size, self.interpolation, self.size_order]\n\n\nclass _CropAndPadSamplingResult(object):\n def __init__(self, crop_top, crop_right, crop_bottom, crop_left,\n pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval):\n self.crop_top = crop_top\n self.crop_right = crop_right\n self.crop_bottom = crop_bottom\n self.crop_left = crop_left\n self.pad_top = pad_top\n self.pad_right = pad_right\n self.pad_bottom = pad_bottom\n self.pad_left = pad_left\n self.pad_mode = pad_mode\n self.pad_cval = pad_cval\n\n @property\n def croppings(self):\n \"\"\"Get absolute pixel amounts of croppings as a TRBL tuple.\"\"\"\n return self.crop_top, self.crop_right, self.crop_bottom, self.crop_left\n\n @property\n def paddings(self):\n \"\"\"Get absolute pixel amounts of paddings as a TRBL tuple.\"\"\"\n return self.pad_top, self.pad_right, self.pad_bottom, self.pad_left\n\n\nclass CropAndPad(meta.Augmenter):\n \"\"\"Crop/pad images by pixel amounts or fractions of image sizes.\n\n Cropping removes pixels at the sides (i.e. extracts a subimage from\n a given full image). Padding adds pixels to the sides (e.g. black pixels).\n\n This augmenter will never crop images below a height or width of ``1``.\n\n .. note::\n\n This augmenter automatically resizes images back to their original size\n after it has augmented them. To deactivate this, add the\n parameter ``keep_size=False``.\n\n Supported dtypes\n ----------------\n\n if (keep_size=False):\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested\n * ``uint64``: yes; tested\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested\n * ``int64``: yes; tested\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested\n * ``bool``: yes; tested\n\n if (keep_size=True):\n\n minimum of (\n ``imgaug.augmenters.size.CropAndPad(keep_size=False)``,\n :func:`~imgaug.imgaug.imresize_many_images`\n )\n\n Parameters\n ----------\n px : None or int or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop (negative values) or pad (positive values)\n on each side of the image. Either this or the parameter `percent` may\n be set, not both at the same time.\n\n * If ``None``, then pixel-based cropping/padding will not be used.\n * If ``int``, then that exact number of pixels will always be\n cropped/padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left), unless `sample_independently` is set to ``False``,\n as then only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,\n then each side will be cropped/padded by a random amount sampled\n uniformly per image and side from the inteval ``[a, b]``. If\n however `sample_independently` is set to ``False``, only one\n value will be sampled per image and used for all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``int`` (always\n crop/pad by exactly that value), a ``tuple`` of two ``int`` s\n ``a`` and ``b`` (crop/pad by an amount within ``[a, b]``), a\n ``list`` of ``int`` s (crop/pad by a random value that is\n contained in the ``list``) or a ``StochasticParameter`` (sample\n the amount to crop/pad from that parameter).\n\n percent : None or number or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop (negative values) or pad (positive values)\n on each side of the image given as a *fraction* of the image\n height/width. E.g. if this is set to ``-0.1``, the augmenter will\n always crop away ``10%`` of the image's height at both the top and the\n bottom (both ``10%`` each), as well as ``10%`` of the width at the\n right and left.\n Expected value range is ``(-1.0, inf)``.\n Either this or the parameter `px` may be set, not both\n at the same time.\n\n * If ``None``, then fraction-based cropping/padding will not be\n used.\n * If ``number``, then that fraction will always be cropped/padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left). If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,\n then each side will be cropped/padded by a random fraction\n sampled uniformly per image and side from the interval\n ``[a, b]``. If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``float``\n (always crop/pad by exactly that percent value), a ``tuple`` of\n two ``float`` s ``a`` and ``b`` (crop/pad by a fraction from\n ``[a, b]``), a ``list`` of ``float`` s (crop/pad by a random\n value that is contained in the list) or a ``StochasticParameter``\n (sample the percentage to crop/pad from that parameter).\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n Padding mode to use. The available modes match the numpy padding modes,\n i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,\n ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes\n ``constant`` and ``linear_ramp`` use extra values, which are provided\n by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for\n more details.\n\n * If ``imgaug.ALL``, then a random mode from all available modes\n will be sampled per image.\n * If a ``str``, it will be used as the pad mode for all images.\n * If a ``list`` of ``str``, a random one of these will be sampled\n per image and used as the mode.\n * If ``StochasticParameter``, a random mode will be sampled from\n this parameter per image.\n\n pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional\n The constant value to use if the pad mode is ``constant`` or the end\n value to use if the mode is ``linear_ramp``.\n See :func:`~imgaug.imgaug.pad` for more details.\n\n * If ``number``, then that value will be used.\n * If a ``tuple`` of two ``number`` s and at least one of them is\n a ``float``, then a random number will be uniformly sampled per\n image from the continuous interval ``[a, b]`` and used as the\n value. If both ``number`` s are ``int`` s, the interval is\n discrete.\n * If a ``list`` of ``number``, then a random value will be chosen\n from the elements of the ``list`` and used as the value.\n * If ``StochasticParameter``, a random value will be sampled from\n that parameter per image.\n\n keep_size : bool, optional\n After cropping and padding, the result image will usually have a\n different height/width compared to the original input image. If this\n parameter is set to ``True``, then the cropped/padded image will be\n resized to the input image's size, i.e. the augmenter's output shape\n is always identical to the input shape.\n\n sample_independently : bool, optional\n If ``False`` *and* the values for `px`/`percent` result in exactly\n *one* probability distribution for all image sides, only one single\n value will be sampled from that probability distribution and used for\n all sides. I.e. the crop/pad amount then is the same for all sides.\n If ``True``, four values will be sampled independently, one per side.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropAndPad(px=(-10, 0))\n\n Crop each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[-10..0]``.\n\n >>> aug = iaa.CropAndPad(px=(0, 10))\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding happens by\n zero-padding, i.e. it adds black pixels (default setting).\n\n >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=\"edge\")\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding uses the\n ``edge`` mode from numpy's pad function, i.e. the pixel colors around\n the image sides are repeated.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=[\"constant\", \"edge\"])\n\n Similar to the previous example, but uses zero-padding (``constant``) for\n half of the images and ``edge`` padding for the other half.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))\n\n Similar to the previous example, but uses any available padding mode.\n In case the padding mode ends up being ``constant`` or ``linear_ramp``,\n and random intensity is uniformly sampled (once per image) from the\n discrete interval ``[0..255]`` and used as the intensity of the new\n pixels.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), sample_independently=False)\n\n Pad each side by a random pixel value sampled uniformly once per image\n from the discrete interval ``[0..10]``. Each sampled value is used\n for *all* sides of the corresponding image.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), keep_size=False)\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. Afterwards, do **not**\n resize the padded image back to the input image's size. This will increase\n the image's height and width by a maximum of ``20`` pixels.\n\n >>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n\n Pad the top and bottom by a random pixel value sampled uniformly from the\n discrete interval ``[0..10]``. Pad the left and right analogously by\n a random value sampled from ``[0..5]``. Each value is always sampled\n independently.\n\n >>> aug = iaa.CropAndPad(percent=(0, 0.1))\n\n Pad each side by a random fraction sampled uniformly from the continuous\n interval ``[0.0, 0.10]``. The fraction is sampled once per image and\n side. E.g. a sampled fraction of ``0.1`` for the top side would pad by\n ``0.1*H``, where ``H`` is the height of the input image.\n\n >>> aug = iaa.CropAndPad(\n >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))\n\n Pads each side by either ``5%`` or ``10%``. The values are sampled\n once per side and image.\n\n >>> aug = iaa.CropAndPad(px=(-10, 10))\n\n Sample uniformly per image and side a value ``v`` from the discrete range\n ``[-10..10]``. Then either crop (negative sample) or pad (positive sample)\n the side by ``v`` pixels.\n\n \"\"\"\n\n def __init__(self, px=None, percent=None, pad_mode=\"constant\", pad_cval=0,\n keep_size=True, sample_independently=True,\n seed=None, name=None, **old_kwargs):\n # pylint: disable=invalid-name\n super(CropAndPad, self).__init__(\n seed=seed, name=name, **old_kwargs)\n\n self.mode, self.all_sides, self.top, self.right, self.bottom, \\\n self.left = self._handle_px_and_percent_args(px, percent)\n\n self.pad_mode = _handle_pad_mode_param(pad_mode)\n # TODO enable ALL here, like in e.g. Affine\n self.pad_cval = iap.handle_discrete_param(\n pad_cval, \"pad_cval\", value_range=None, tuple_to_uniform=True,\n list_to_choice=True, allow_floats=True)\n\n self.keep_size = keep_size\n self.sample_independently = sample_independently\n\n # set these to None to use the same values as sampled for the\n # images (not tested)\n self._pad_mode_heatmaps = \"constant\"\n self._pad_mode_segmentation_maps = \"constant\"\n self._pad_cval_heatmaps = 0.0\n self._pad_cval_segmentation_maps = 0\n\n @classmethod\n def _handle_px_and_percent_args(cls, px, percent):\n # pylint: disable=invalid-name\n all_sides = None\n top, right, bottom, left = None, None, None, None\n\n if px is None and percent is None:\n mode = \"noop\"\n elif px is not None and percent is not None:\n raise Exception(\"Can only pad by pixels or percent, not both.\")\n elif px is not None:\n mode = \"px\"\n all_sides, top, right, bottom, left = cls._handle_px_arg(px)\n else: # = elif percent is not None:\n mode = \"percent\"\n all_sides, top, right, bottom, left = cls._handle_percent_arg(\n percent)\n return mode, all_sides, top, right, bottom, left\n\n @classmethod\n def _handle_px_arg(cls, px):\n # pylint: disable=invalid-name\n all_sides = None\n top, right, bottom, left = None, None, None, None\n\n if ia.is_single_integer(px):\n all_sides = iap.Deterministic(px)\n elif isinstance(px, tuple):\n assert len(px) in [2, 4], (\n \"Expected 'px' given as a tuple to contain 2 or 4 \"\n \"entries, got %d.\" % (len(px),))\n\n def handle_param(p):\n if ia.is_single_integer(p):\n return iap.Deterministic(p)\n if isinstance(p, tuple):\n assert len(p) == 2, (\n \"Expected tuple of 2 values, got %d.\" % (len(p)))\n only_ints = (\n ia.is_single_integer(p[0])\n and ia.is_single_integer(p[1]))\n assert only_ints, (\n \"Expected tuple of integers, got %s and %s.\" % (\n type(p[0]), type(p[1])))\n return iap.DiscreteUniform(p[0], p[1])\n if isinstance(p, list):\n assert len(p) > 0, (\n \"Expected non-empty list, but got empty one.\")\n assert all([ia.is_single_integer(val) for val in p]), (\n \"Expected list of ints, got types %s.\" % (\n \", \".join([str(type(v)) for v in p])))\n return iap.Choice(p)\n if isinstance(p, iap.StochasticParameter):\n return p\n raise Exception(\n \"Expected int, tuple of two ints, list of ints or \"\n \"StochasticParameter, got type %s.\" % (type(p),))\n\n if len(px) == 2:\n all_sides = handle_param(px)\n else: # len == 4\n top = handle_param(px[0])\n right = handle_param(px[1])\n bottom = handle_param(px[2])\n left = handle_param(px[3])\n elif isinstance(px, iap.StochasticParameter):\n top = right = bottom = left = px\n else:\n raise Exception(\n \"Expected int, tuple of 4 \"\n \"ints/tuples/lists/StochasticParameters or \"\n \"StochasticParameter, got type %s.\" % (type(px),))\n return all_sides, top, right, bottom, left\n\n @classmethod\n def _handle_percent_arg(cls, percent):\n all_sides = None\n top, right, bottom, left = None, None, None, None\n\n if ia.is_single_number(percent):\n assert percent > -1.0, (\n \"Expected 'percent' to be >-1.0, got %.4f.\" % (percent,))\n all_sides = iap.Deterministic(percent)\n elif isinstance(percent, tuple):\n assert len(percent) in [2, 4], (\n \"Expected 'percent' given as a tuple to contain 2 or 4 \"\n \"entries, got %d.\" % (len(percent),))\n\n def handle_param(p):\n if ia.is_single_number(p):\n return iap.Deterministic(p)\n if isinstance(p, tuple):\n assert len(p) == 2, (\n \"Expected tuple of 2 values, got %d.\" % (len(p),))\n only_numbers = (\n ia.is_single_number(p[0])\n and ia.is_single_number(p[1]))\n assert only_numbers, (\n \"Expected tuple of numbers, got %s and %s.\" % (\n type(p[0]), type(p[1])))\n assert p[0] > -1.0 and p[1] > -1.0, (\n \"Expected tuple of values >-1.0, got %.4f and \"\n \"%.4f.\" % (p[0], p[1]))\n return iap.Uniform(p[0], p[1])\n if isinstance(p, list):\n assert len(p) > 0, (\n \"Expected non-empty list, but got empty one.\")\n assert all([ia.is_single_number(val) for val in p]), (\n \"Expected list of numbers, got types %s.\" % (\n \", \".join([str(type(v)) for v in p])))\n assert all([val > -1.0 for val in p]), (\n \"Expected list of values >-1.0, got values %s.\" % (\n \", \".join([\"%.4f\" % (v,) for v in p])))\n return iap.Choice(p)\n if isinstance(p, iap.StochasticParameter):\n return p\n raise Exception(\n \"Expected int, tuple of two ints, list of ints or \"\n \"StochasticParameter, got type %s.\" % (type(p),))\n\n if len(percent) == 2:\n all_sides = handle_param(percent)\n else: # len == 4\n top = handle_param(percent[0])\n right = handle_param(percent[1])\n bottom = handle_param(percent[2])\n left = handle_param(percent[3])\n elif isinstance(percent, iap.StochasticParameter):\n top = right = bottom = left = percent\n else:\n raise Exception(\n \"Expected number, tuple of 4 \"\n \"numbers/tuples/lists/StochasticParameters or \"\n \"StochasticParameter, got type %s.\" % (type(percent),))\n return all_sides, top, right, bottom, left\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n shapes = batch.get_rowwise_shapes()\n samples = self._draw_samples(random_state, shapes)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps,\n self._pad_mode_heatmaps, self._pad_cval_heatmaps,\n samples)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps,\n self._pad_mode_segmentation_maps,\n self._pad_cval_segmentation_maps, samples)\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n result = []\n for i, image in enumerate(images):\n samples_i = samples[i]\n\n image_cr_pa = _crop_and_pad_arr(\n image, samples_i.croppings, samples_i.paddings,\n samples_i.pad_mode, samples_i.pad_cval, self.keep_size)\n\n result.append(image_cr_pa)\n\n if ia.is_np_array(images):\n if self.keep_size:\n result = np.array(result, dtype=images.dtype)\n else:\n nb_shapes = len({image.shape for image in result})\n if nb_shapes == 1:\n result = np.array(result, dtype=images.dtype)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, pad_mode, pad_cval,\n samples):\n result = []\n for i, augmentable in enumerate(augmentables):\n samples_img = samples[i]\n\n augmentable = _crop_and_pad_hms_or_segmaps_(\n augmentable,\n croppings_img=samples_img.croppings,\n paddings_img=samples_img.paddings,\n pad_mode=(pad_mode\n if pad_mode is not None\n else samples_img.pad_mode),\n pad_cval=(pad_cval\n if pad_cval is not None\n else samples_img.pad_cval),\n keep_size=self.keep_size\n )\n\n result.append(augmentable)\n\n return result\n\n def _augment_keypoints_by_samples(self, keypoints_on_images, samples):\n result = []\n for i, keypoints_on_image in enumerate(keypoints_on_images):\n samples_i = samples[i]\n\n kpsoi_aug = _crop_and_pad_kpsoi_(\n keypoints_on_image, croppings_img=samples_i.croppings,\n paddings_img=samples_i.paddings, keep_size=self.keep_size)\n result.append(kpsoi_aug)\n\n return result\n\n def _draw_samples(self, random_state, shapes):\n nb_rows = len(shapes)\n\n if self.mode == \"noop\":\n top = right = bottom = left = np.full((nb_rows,), 0,\n dtype=np.int32)\n else:\n if self.all_sides is not None:\n if self.sample_independently:\n samples = self.all_sides.draw_samples(\n (nb_rows, 4), random_state=random_state)\n top = samples[:, 0]\n right = samples[:, 1]\n bottom = samples[:, 2]\n left = samples[:, 3]\n else:\n sample = self.all_sides.draw_samples(\n (nb_rows,), random_state=random_state)\n top = right = bottom = left = sample\n else:\n top = self.top.draw_samples(\n (nb_rows,), random_state=random_state)\n right = self.right.draw_samples(\n (nb_rows,), random_state=random_state)\n bottom = self.bottom.draw_samples(\n (nb_rows,), random_state=random_state)\n left = self.left.draw_samples(\n (nb_rows,), random_state=random_state)\n\n if self.mode == \"px\":\n # no change necessary for pixel values\n pass\n elif self.mode == \"percent\":\n # percentage values have to be transformed to pixel values\n shapes_arr = np.array([shape[0:2] for shape in shapes],\n dtype=np.float32)\n heights = shapes_arr[:, 0]\n widths = shapes_arr[:, 1]\n top = np.round(heights * top).astype(np.int32)\n right = np.round(widths * right).astype(np.int32)\n bottom = np.round(heights * bottom).astype(np.int32)\n left = np.round(widths * left).astype(np.int32)\n else:\n raise Exception(\"Invalid mode\")\n\n def _only_above_zero(arr):\n arr = np.copy(arr)\n mask = (arr < 0)\n arr[mask] = 0\n return arr\n\n crop_top = _only_above_zero((-1) * top)\n crop_right = _only_above_zero((-1) * right)\n crop_bottom = _only_above_zero((-1) * bottom)\n crop_left = _only_above_zero((-1) * left)\n\n pad_top = _only_above_zero(top)\n pad_right = _only_above_zero(right)\n pad_bottom = _only_above_zero(bottom)\n pad_left = _only_above_zero(left)\n\n pad_mode = self.pad_mode.draw_samples((nb_rows,),\n random_state=random_state)\n pad_cval = self.pad_cval.draw_samples((nb_rows,),\n random_state=random_state)\n\n # TODO vectorize this part -- especially return only one instance\n result = []\n for i, shape in enumerate(shapes):\n height, width = shape[0:2]\n crop_top_i, crop_right_i, crop_bottom_i, crop_left_i = \\\n _crop_prevent_zero_size(\n height, width,\n crop_top[i], crop_right[i], crop_bottom[i], crop_left[i])\n\n # add here any_crop_y to not warn in case of zero height/width\n # images\n any_crop_y = (crop_top_i > 0 or crop_bottom_i > 0)\n if any_crop_y and crop_top_i + crop_bottom_i >= height:\n ia.warn(\n \"Expected generated crop amounts in CropAndPad for top and \"\n \"bottom image side to be less than the image's height, but \"\n \"got %d (top) and %d (bottom) vs. image height %d. This \"\n \"will result in an image with output height=1 (if input \"\n \"height was >=1) or output height=0 (if input height \"\n \"was 0).\" % (crop_top_i, crop_bottom_i, height))\n\n # add here any_crop_x to not warn in case of zero height/width\n # images\n any_crop_x = (crop_left_i > 0 or crop_right_i > 0)\n if any_crop_x and crop_left_i + crop_right_i >= width:\n ia.warn(\n \"Expected generated crop amounts in CropAndPad for left \"\n \"and right image side to be less than the image's width, \"\n \"but got %d (left) and %d (right) vs. image width %d. \"\n \"This will result in an image with output width=1 (if \"\n \"input width was >=1) or output width=0 (if input width \"\n \"was 0).\" % (crop_left_i, crop_right_i, width))\n\n result.append(\n _CropAndPadSamplingResult(\n crop_top=crop_top_i,\n crop_right=crop_right_i,\n crop_bottom=crop_bottom_i,\n crop_left=crop_left_i,\n pad_top=pad_top[i],\n pad_right=pad_right[i],\n pad_bottom=pad_bottom[i],\n pad_left=pad_left[i],\n pad_mode=pad_mode[i],\n pad_cval=pad_cval[i]))\n return result\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.all_sides, self.top, self.right, self.bottom, self.left,\n self.pad_mode, self.pad_cval]\n\n\nclass Pad(CropAndPad):\n \"\"\"Pad images, i.e. adds columns/rows of pixels to them.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropAndPad`.\n\n Parameters\n ----------\n px : None or int or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to pad on each side of the image.\n Expected value range is ``[0, inf)``.\n Either this or the parameter `percent` may be set, not both at the same\n time.\n\n * If ``None``, then pixel-based padding will not be used.\n * If ``int``, then that exact number of pixels will always be\n padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left), unless `sample_independently` is set to ``False``,\n as then only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,\n then each side will be padded by a random amount sampled\n uniformly per image and side from the inteval ``[a, b]``. If\n however `sample_independently` is set to ``False``, only one\n value will be sampled per image and used for all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``int`` (always\n pad by exactly that value), a ``tuple`` of two ``int`` s\n ``a`` and ``b`` (pad by an amount within ``[a, b]``), a\n ``list`` of ``int`` s (pad by a random value that is\n contained in the ``list``) or a ``StochasticParameter`` (sample\n the amount to pad from that parameter).\n\n percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to pad\n on each side of the image given as a *fraction* of the image\n height/width. E.g. if this is set to ``0.1``, the augmenter will\n always pad ``10%`` of the image's height at both the top and the\n bottom (both ``10%`` each), as well as ``10%`` of the width at the\n right and left.\n Expected value range is ``[0.0, inf)``.\n Either this or the parameter `px` may be set, not both\n at the same time.\n\n * If ``None``, then fraction-based padding will not be\n used.\n * If ``number``, then that fraction will always be padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left). If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,\n then each side will be padded by a random fraction\n sampled uniformly per image and side from the interval\n ``[a, b]``. If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``float``\n (always pad by exactly that fraction), a ``tuple`` of\n two ``float`` s ``a`` and ``b`` (pad by a fraction from\n ``[a, b]``), a ``list`` of ``float`` s (pad by a random\n value that is contained in the list) or a ``StochasticParameter``\n (sample the percentage to pad from that parameter).\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n Padding mode to use. The available modes match the numpy padding modes,\n i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,\n ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes\n ``constant`` and ``linear_ramp`` use extra values, which are provided\n by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for\n more details.\n\n * If ``imgaug.ALL``, then a random mode from all available modes\n will be sampled per image.\n * If a ``str``, it will be used as the pad mode for all images.\n * If a ``list`` of ``str``, a random one of these will be sampled\n per image and used as the mode.\n * If ``StochasticParameter``, a random mode will be sampled from\n this parameter per image.\n\n pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional\n The constant value to use if the pad mode is ``constant`` or the end\n value to use if the mode is ``linear_ramp``.\n See :func:`~imgaug.imgaug.pad` for more details.\n\n * If ``number``, then that value will be used.\n * If a ``tuple`` of two ``number`` s and at least one of them is\n a ``float``, then a random number will be uniformly sampled per\n image from the continuous interval ``[a, b]`` and used as the\n value. If both ``number`` s are ``int`` s, the interval is\n discrete.\n * If a ``list`` of ``number``, then a random value will be chosen\n from the elements of the ``list`` and used as the value.\n * If ``StochasticParameter``, a random value will be sampled from\n that parameter per image.\n\n keep_size : bool, optional\n After padding, the result image will usually have a\n different height/width compared to the original input image. If this\n parameter is set to ``True``, then the padded image will be\n resized to the input image's size, i.e. the augmenter's output shape\n is always identical to the input shape.\n\n sample_independently : bool, optional\n If ``False`` *and* the values for `px`/`percent` result in exactly\n *one* probability distribution for all image sides, only one single\n value will be sampled from that probability distribution and used for\n all sides. I.e. the pad amount then is the same for all sides.\n If ``True``, four values will be sampled independently, one per side.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Pad(px=(0, 10))\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding happens by\n zero-padding, i.e. it adds black pixels (default setting).\n\n >>> aug = iaa.Pad(px=(0, 10), pad_mode=\"edge\")\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding uses the\n ``edge`` mode from numpy's pad function, i.e. the pixel colors around\n the image sides are repeated.\n\n >>> aug = iaa.Pad(px=(0, 10), pad_mode=[\"constant\", \"edge\"])\n\n Similar to the previous example, but uses zero-padding (``constant``) for\n half of the images and ``edge`` padding for the other half.\n\n >>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))\n\n Similar to the previous example, but uses any available padding mode.\n In case the padding mode ends up being ``constant`` or ``linear_ramp``,\n and random intensity is uniformly sampled (once per image) from the\n discrete interval ``[0..255]`` and used as the intensity of the new\n pixels.\n\n >>> aug = iaa.Pad(px=(0, 10), sample_independently=False)\n\n Pad each side by a random pixel value sampled uniformly once per image\n from the discrete interval ``[0..10]``. Each sampled value is used\n for *all* sides of the corresponding image.\n\n >>> aug = iaa.Pad(px=(0, 10), keep_size=False)\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. Afterwards, do **not**\n resize the padded image back to the input image's size. This will increase\n the image's height and width by a maximum of ``20`` pixels.\n\n >>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n\n Pad the top and bottom by a random pixel value sampled uniformly from the\n discrete interval ``[0..10]``. Pad the left and right analogously by\n a random value sampled from ``[0..5]``. Each value is always sampled\n independently.\n\n >>> aug = iaa.Pad(percent=(0, 0.1))\n\n Pad each side by a random fraction sampled uniformly from the continuous\n interval ``[0.0, 0.10]``. The fraction is sampled once per image and\n side. E.g. a sampled fraction of ``0.1`` for the top side would pad by\n ``0.1*H``, where ``H`` is the height of the input image.\n\n >>> aug = iaa.Pad(\n >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))\n\n Pads each side by either ``5%`` or ``10%``. The values are sampled\n once per side and image.\n\n \"\"\"\n\n def __init__(self, px=None, percent=None, pad_mode=\"constant\", pad_cval=0,\n keep_size=True, sample_independently=True,\n seed=None, name=None, **old_kwargs):\n def recursive_validate(value):\n if value is None:\n return value\n if ia.is_single_number(value):\n assert value >= 0, \"Expected value >0, got %.4f\" % (value,)\n return value\n if isinstance(value, iap.StochasticParameter):\n return value\n if isinstance(value, tuple):\n return tuple([recursive_validate(v_) for v_ in value])\n if isinstance(value, list):\n return [recursive_validate(v_) for v_ in value]\n raise Exception(\n \"Expected None or int or float or StochasticParameter or \"\n \"list or tuple, got %s.\" % (type(value),))\n\n px = recursive_validate(px)\n percent = recursive_validate(percent)\n\n super(Pad, self).__init__(\n px=px,\n percent=percent,\n pad_mode=pad_mode,\n pad_cval=pad_cval,\n keep_size=keep_size,\n sample_independently=sample_independently,\n seed=seed, name=name, **old_kwargs)\n\n\nclass Crop(CropAndPad):\n \"\"\"Crop images, i.e. remove columns/rows of pixels at the sides of images.\n\n This augmenter allows to extract smaller-sized subimages from given\n full-sized input images. The number of pixels to cut off may be defined\n in absolute values or as fractions of the image sizes.\n\n This augmenter will never crop images below a height or width of ``1``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropAndPad`.\n\n Parameters\n ----------\n px : None or int or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop on each side of the image.\n Expected value range is ``[0, inf)``.\n Either this or the parameter `percent` may be set, not both at the same\n time.\n\n * If ``None``, then pixel-based cropping will not be used.\n * If ``int``, then that exact number of pixels will always be\n cropped.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left), unless `sample_independently` is set to ``False``,\n as then only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,\n then each side will be cropped by a random amount sampled\n uniformly per image and side from the inteval ``[a, b]``. If\n however `sample_independently` is set to ``False``, only one\n value will be sampled per image and used for all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``int`` (always\n crop by exactly that value), a ``tuple`` of two ``int`` s\n ``a`` and ``b`` (crop by an amount within ``[a, b]``), a\n ``list`` of ``int`` s (crop by a random value that is\n contained in the ``list``) or a ``StochasticParameter`` (sample\n the amount to crop from that parameter).\n\n percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop\n on each side of the image given as a *fraction* of the image\n height/width. E.g. if this is set to ``0.1``, the augmenter will\n always crop ``10%`` of the image's height at both the top and the\n bottom (both ``10%`` each), as well as ``10%`` of the width at the\n right and left.\n Expected value range is ``[0.0, 1.0)``.\n Either this or the parameter `px` may be set, not both\n at the same time.\n\n * If ``None``, then fraction-based cropping will not be\n used.\n * If ``number``, then that fraction will always be cropped.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left). If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,\n then each side will be cropped by a random fraction\n sampled uniformly per image and side from the interval\n ``[a, b]``. If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``float``\n (always crop by exactly that fraction), a ``tuple`` of\n two ``float`` s ``a`` and ``b`` (crop by a fraction from\n ``[a, b]``), a ``list`` of ``float`` s (crop by a random\n value that is contained in the list) or a ``StochasticParameter``\n (sample the percentage to crop from that parameter).\n\n keep_size : bool, optional\n After cropping, the result image will usually have a\n different height/width compared to the original input image. If this\n parameter is set to ``True``, then the cropped image will be\n resized to the input image's size, i.e. the augmenter's output shape\n is always identical to the input shape.\n\n sample_independently : bool, optional\n If ``False`` *and* the values for `px`/`percent` result in exactly\n *one* probability distribution for all image sides, only one single\n value will be sampled from that probability distribution and used for\n all sides. I.e. the crop amount then is the same for all sides.\n If ``True``, four values will be sampled independently, one per side.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Crop(px=(0, 10))\n\n Crop each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``.\n\n >>> aug = iaa.Crop(px=(0, 10), sample_independently=False)\n\n Crop each side by a random pixel value sampled uniformly once per image\n from the discrete interval ``[0..10]``. Each sampled value is used\n for *all* sides of the corresponding image.\n\n >>> aug = iaa.Crop(px=(0, 10), keep_size=False)\n\n Crop each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. Afterwards, do **not**\n resize the cropped image back to the input image's size. This will decrease\n the image's height and width by a maximum of ``20`` pixels.\n\n >>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n\n Crop the top and bottom by a random pixel value sampled uniformly from the\n discrete interval ``[0..10]``. Crop the left and right analogously by\n a random value sampled from ``[0..5]``. Each value is always sampled\n independently.\n\n >>> aug = iaa.Crop(percent=(0, 0.1))\n\n Crop each side by a random fraction sampled uniformly from the continuous\n interval ``[0.0, 0.10]``. The fraction is sampled once per image and\n side. E.g. a sampled fraction of ``0.1`` for the top side would crop by\n ``0.1*H``, where ``H`` is the height of the input image.\n\n >>> aug = iaa.Crop(\n >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))\n\n Crops each side by either ``5%`` or ``10%``. The values are sampled\n once per side and image.\n\n \"\"\"\n\n def __init__(self, px=None, percent=None, keep_size=True,\n sample_independently=True,\n seed=None, name=None, **old_kwargs):\n def recursive_negate(value):\n if value is None:\n return value\n if ia.is_single_number(value):\n assert value >= 0, \"Expected value >0, got %.4f.\" % (value,)\n return -value\n if isinstance(value, iap.StochasticParameter):\n return iap.Multiply(value, -1)\n if isinstance(value, tuple):\n return tuple([recursive_negate(v_) for v_ in value])\n if isinstance(value, list):\n return [recursive_negate(v_) for v_ in value]\n raise Exception(\n \"Expected None or int or float or StochasticParameter or \"\n \"list or tuple, got %s.\" % (type(value),))\n\n px = recursive_negate(px)\n percent = recursive_negate(percent)\n\n super(Crop, self).__init__(\n px=px,\n percent=percent,\n keep_size=keep_size,\n sample_independently=sample_independently,\n seed=seed, name=name, **old_kwargs)\n\n\n# TODO maybe rename this to PadToMinimumSize?\n# TODO this is very similar to CropAndPad, maybe add a way to generate crop\n# values imagewise via a callback in in CropAndPad?\n# TODO why is padding mode and cval here called pad_mode, pad_cval but in other\n# cases mode/cval?\nclass PadToFixedSize(meta.Augmenter):\n \"\"\"Pad images to a predefined minimum width and/or height.\n\n If images are already at the minimum width/height or are larger, they will\n not be padded. Note that this also means that images will not be cropped if\n they exceed the required width/height.\n\n The augmenter randomly decides per image how to distribute the required\n padding amounts over the image axis. E.g. if 2px have to be padded on the\n left or right to reach the required width, the augmenter will sometimes\n add 2px to the left and 0px to the right, sometimes add 2px to the right\n and 0px to the left and sometimes add 1px to both sides. Set `position`\n to ``center`` to prevent that.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.augmenters.size.pad`.\n\n Parameters\n ----------\n width : int or None\n Pad images up to this minimum width.\n If ``None``, image widths will not be altered.\n\n height : int or None\n Pad images up to this minimum height.\n If ``None``, image heights will not be altered.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.CropAndPad.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.CropAndPad.__init__`.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n Sets the center point of the padding, which determines how the\n required padding amounts are distributed to each side. For a ``tuple``\n ``(a, b)``, both ``a`` and ``b`` are expected to be in range\n ``[0.0, 1.0]`` and describe the fraction of padding applied to the\n left/right (low/high values for ``a``) and the fraction of padding\n applied to the top/bottom (low/high values for ``b``). A padding\n position at ``(0.5, 0.5)`` would be the center of the image and\n distribute the padding equally to all sides. A padding position at\n ``(0.0, 1.0)`` would be the left-bottom and would apply 100% of the\n required padding to the bottom and left sides of the image so that\n the bottom left corner becomes more and more the new image\n center (depending on how much is padded).\n\n * If string ``uniform`` then the share of padding is randomly and\n uniformly distributed over each side.\n Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.\n * If string ``normal`` then the share of padding is distributed\n based on a normal distribution, leading to a focus on the\n center of the images.\n Equivalent to\n ``(Clip(Normal(0.5, 0.45/2), 0, 1),\n Clip(Normal(0.5, 0.45/2), 0, 1))``.\n * If string ``center`` then center point of the padding is\n identical to the image center.\n Equivalent to ``(0.5, 0.5)``.\n * If a string matching regex\n ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top``\n or ``center-bottom`` then sets the center point of the padding\n to the X-Y position matching that description.\n * If a tuple of float, then expected to have exactly two entries\n between ``0.0`` and ``1.0``, which will always be used as the\n combination the position matching (x, y) form.\n * If a ``StochasticParameter``, then that parameter will be queried\n once per call to ``augment_*()`` to get ``Nx2`` center positions\n in ``(x, y)`` form (with ``N`` the number of images).\n * If a ``tuple`` of ``StochasticParameter``, then expected to have\n exactly two entries that will both be queried per call to\n ``augment_*()``, each for ``(N,)`` values, to get the center\n positions. First parameter is used for ``x`` coordinates,\n second for ``y`` coordinates.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToFixedSize(width=100, height=100)\n\n For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do\n nothing for the other edges. The padding is randomly (uniformly)\n distributed over the sides, so that e.g. sometimes most of the required\n padding is applied to the left, sometimes to the right (analogous\n top/bottom).\n\n >>> aug = iaa.PadToFixedSize(width=100, height=100, position=\"center\")\n\n For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do\n nothing for the other image sides. The padding is always equally\n distributed over the left/right and top/bottom sides.\n\n >>> aug = iaa.PadToFixedSize(width=100, height=100, pad_mode=ia.ALL)\n\n For image sides smaller than ``100`` pixels, pad to ``100`` pixels and\n use any possible padding mode for that. Do nothing for the other image\n sides. The padding is always equally distributed over the left/right and\n top/bottom sides.\n\n >>> aug = iaa.Sequential([\n >>> iaa.PadToFixedSize(width=100, height=100),\n >>> iaa.CropToFixedSize(width=100, height=100)\n >>> ])\n\n Pad images smaller than ``100x100`` until they reach ``100x100``.\n Analogously, crop images larger than ``100x100`` until they reach\n ``100x100``. The output images therefore have a fixed size of ``100x100``.\n\n \"\"\"\n\n def __init__(self, width, height, pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToFixedSize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n self.size = (width, height)\n\n # Position of where to pad. The further to the top left this is, the\n # larger the share of pixels that will be added to the top and left\n # sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) to only\n # add at the top and left, (Deterministic(1.0), Deterministic(1.0))\n # to only add at the bottom right. Analogously (0.5, 0.5) pads equally\n # on both axis, (0.0, 1.0) pads left and bottom, (1.0, 0.0) pads right\n # and top.\n self.position = _handle_position_parameter(position)\n\n self.pad_mode = _handle_pad_mode_param(pad_mode)\n # TODO enable ALL here like in eg Affine\n self.pad_cval = iap.handle_discrete_param(\n pad_cval, \"pad_cval\", value_range=None, tuple_to_uniform=True,\n list_to_choice=True, allow_floats=True)\n\n # set these to None to use the same values as sampled for the\n # images (not tested)\n self._pad_mode_heatmaps = \"constant\"\n self._pad_mode_segmentation_maps = \"constant\"\n self._pad_cval_heatmaps = 0.0\n self._pad_cval_segmentation_maps = 0\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n # Providing the whole batch to _draw_samples() would not be necessary\n # for this augmenter. The number of rows would be sufficient. This\n # formulation however enables derived augmenters to use rowwise shapes\n # without having to compute them here for this augmenter.\n samples = self._draw_samples(batch, random_state)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps, samples, self._pad_mode_heatmaps,\n self._pad_cval_heatmaps)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps, samples, self._pad_mode_heatmaps,\n self._pad_cval_heatmaps)\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n result = []\n sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples\n for i, (image, size) in enumerate(zip(images, sizes)):\n width_min, height_min = size\n height_image, width_image = image.shape[:2]\n paddings = self._calculate_paddings(height_image, width_image,\n height_min, width_min,\n pad_xs[i], pad_ys[i])\n\n image = _crop_and_pad_arr(\n image, (0, 0, 0, 0), paddings, pad_modes[i], pad_cvals[i],\n keep_size=False)\n\n result.append(image)\n\n # TODO result is always a list. Should this be converted to an array\n # if possible (not guaranteed that all images have same size,\n # some might have been larger than desired height/width)\n return result\n\n def _augment_keypoints_by_samples(self, keypoints_on_images, samples):\n result = []\n sizes, pad_xs, pad_ys, _, _ = samples\n for i, (kpsoi, size) in enumerate(zip(keypoints_on_images, sizes)):\n width_min, height_min = size\n height_image, width_image = kpsoi.shape[:2]\n paddings_img = self._calculate_paddings(height_image, width_image,\n height_min, width_min,\n pad_xs[i], pad_ys[i])\n\n keypoints_padded = _crop_and_pad_kpsoi_(\n kpsoi, (0, 0, 0, 0), paddings_img,\n keep_size=False)\n\n result.append(keypoints_padded)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, samples, pad_mode,\n pad_cval):\n sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples\n\n for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):\n width_min, height_min = size\n height_img, width_img = augmentable.shape[:2]\n paddings_img = self._calculate_paddings(\n height_img, width_img, height_min, width_min,\n pad_xs[i], pad_ys[i])\n\n # TODO for the previous method (and likely the new/current one\n # too):\n # for 30x30 padded to 32x32 with 15x15 heatmaps this results\n # in paddings of 1 on each side (assuming\n # position=(0.5, 0.5)) giving 17x17 heatmaps when they should\n # be 16x16. Error is due to each side getting projected 0.5\n # padding which is rounded to 1. This doesn't seem right.\n augmentables[i] = _crop_and_pad_hms_or_segmaps_(\n augmentables[i],\n (0, 0, 0, 0),\n paddings_img,\n pad_mode=pad_mode if pad_mode is not None else pad_modes[i],\n pad_cval=pad_cval if pad_cval is not None else pad_cvals[i],\n keep_size=False)\n\n return augmentables\n\n def _draw_samples(self, batch, random_state):\n nb_images = batch.nb_rows\n rngs = random_state.duplicate(4)\n\n if isinstance(self.position, tuple):\n pad_xs = self.position[0].draw_samples(nb_images,\n random_state=rngs[0])\n pad_ys = self.position[1].draw_samples(nb_images,\n random_state=rngs[1])\n else:\n pads = self.position.draw_samples((nb_images, 2),\n random_state=rngs[0])\n pad_xs = pads[:, 0]\n pad_ys = pads[:, 1]\n\n pad_modes = self.pad_mode.draw_samples(nb_images,\n random_state=rngs[2])\n pad_cvals = self.pad_cval.draw_samples(nb_images,\n random_state=rngs[3])\n\n # We return here the sizes even though they are static as it allows\n # derived augmenters to define image-specific heights/widths.\n return [self.size] * nb_images, pad_xs, pad_ys, pad_modes, pad_cvals\n\n @classmethod\n def _calculate_paddings(cls, height_image, width_image,\n height_min, width_min, pad_xs_i, pad_ys_i):\n pad_top = 0\n pad_right = 0\n pad_bottom = 0\n pad_left = 0\n\n if width_min is not None and width_image < width_min:\n pad_total_x = width_min - width_image\n pad_left = int((1-pad_xs_i) * pad_total_x)\n pad_right = pad_total_x - pad_left\n\n if height_min is not None and height_image < height_min:\n pad_total_y = height_min - height_image\n pad_top = int((1-pad_ys_i) * pad_total_y)\n pad_bottom = pad_total_y - pad_top\n\n return pad_top, pad_right, pad_bottom, pad_left\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.size[0], self.size[1], self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToFixedSize(PadToFixedSize):\n \"\"\"Pad images equally on all sides up to given minimum heights/widths.\n\n This is an alias for :class:`~imgaug.augmenters.size.PadToFixedSize`\n with ``position=\"center\"``. It spreads the pad amounts equally over\n all image sides, while :class:`~imgaug.augmenters.size.PadToFixedSize`\n by defaults spreads them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width : int or None\n See :func:`PadToFixedSize.__init__`.\n\n height : int or None\n See :func:`PadToFixedSize.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToFixedSize(height=20, width=30)\n\n Create an augmenter that pads images up to ``20x30``, with the padded\n rows added *equally* on the top and bottom (analogous for the padded\n columns).\n\n \"\"\"\n\n def __init__(self, width, height, pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToFixedSize, self).__init__(\n width=width, height=height, pad_mode=pad_mode, pad_cval=pad_cval,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\n# TODO maybe rename this to CropToMaximumSize ?\n# TODO this is very similar to CropAndPad, maybe add a way to generate crop\n# values imagewise via a callback in in CropAndPad?\n# TODO add crop() function in imgaug, similar to pad\nclass CropToFixedSize(meta.Augmenter):\n \"\"\"Crop images down to a predefined maximum width and/or height.\n\n If images are already at the maximum width/height or are smaller, they\n will not be cropped. Note that this also means that images will not be\n padded if they are below the required width/height.\n\n The augmenter randomly decides per image how to distribute the required\n cropping amounts over the image axis. E.g. if 2px have to be cropped on\n the left or right to reach the required width, the augmenter will\n sometimes remove 2px from the left and 0px from the right, sometimes\n remove 2px from the right and 0px from the left and sometimes remove 1px\n from both sides. Set `position` to ``center`` to prevent that.\n\n Supported dtypes\n ----------------\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested\n * ``uint64``: yes; tested\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested\n * ``int64``: yes; tested\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested\n * ``bool``: yes; tested\n\n Parameters\n ----------\n width : int or None\n Crop images down to this maximum width.\n If ``None``, image widths will not be altered.\n\n height : int or None\n Crop images down to this maximum height.\n If ``None``, image heights will not be altered.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n Sets the center point of the cropping, which determines how the\n required cropping amounts are distributed to each side. For a\n ``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in\n range ``[0.0, 1.0]`` and describe the fraction of cropping applied\n to the left/right (low/high values for ``a``) and the fraction\n of cropping applied to the top/bottom (low/high values for ``b``).\n A cropping position at ``(0.5, 0.5)`` would be the center of the\n image and distribute the cropping equally over all sides. A cropping\n position at ``(1.0, 0.0)`` would be the right-top and would apply\n 100% of the required cropping to the right and top sides of the image.\n\n * If string ``uniform`` then the share of cropping is randomly\n and uniformly distributed over each side.\n Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.\n * If string ``normal`` then the share of cropping is distributed\n based on a normal distribution, leading to a focus on the center\n of the images.\n Equivalent to\n ``(Clip(Normal(0.5, 0.45/2), 0, 1),\n Clip(Normal(0.5, 0.45/2), 0, 1))``.\n * If string ``center`` then center point of the cropping is\n identical to the image center.\n Equivalent to ``(0.5, 0.5)``.\n * If a string matching regex\n ``^(left|center|right)-(top|center|bottom)$``, e.g.\n ``left-top`` or ``center-bottom`` then sets the center point of\n the cropping to the X-Y position matching that description.\n * If a tuple of float, then expected to have exactly two entries\n between ``0.0`` and ``1.0``, which will always be used as the\n combination the position matching (x, y) form.\n * If a ``StochasticParameter``, then that parameter will be queried\n once per call to ``augment_*()`` to get ``Nx2`` center positions\n in ``(x, y)`` form (with ``N`` the number of images).\n * If a ``tuple`` of ``StochasticParameter``, then expected to have\n exactly two entries that will both be queried per call to\n ``augment_*()``, each for ``(N,)`` values, to get the center\n positions. First parameter is used for ``x`` coordinates,\n second for ``y`` coordinates.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToFixedSize(width=100, height=100)\n\n For image sides larger than ``100`` pixels, crop to ``100`` pixels. Do\n nothing for the other sides. The cropping amounts are randomly (and\n uniformly) distributed over the sides of the image.\n\n >>> aug = iaa.CropToFixedSize(width=100, height=100, position=\"center\")\n\n For sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing\n for the other sides. The cropping amounts are always equally distributed\n over the left/right sides of the image (and analogously for top/bottom).\n\n >>> aug = iaa.Sequential([\n >>> iaa.PadToFixedSize(width=100, height=100),\n >>> iaa.CropToFixedSize(width=100, height=100)\n >>> ])\n\n Pad images smaller than ``100x100`` until they reach ``100x100``.\n Analogously, crop images larger than ``100x100`` until they reach\n ``100x100``. The output images therefore have a fixed size of ``100x100``.\n\n \"\"\"\n\n def __init__(self, width, height, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToFixedSize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n self.size = (width, height)\n\n # Position of where to crop. The further to the top left this is,\n # the larger the share of pixels that will be cropped from the top\n # and left sides. I.e. set to (Deterministic(0.0), Deterministic(0.0))\n # to only crop at the top and left,\n # (Deterministic(1.0), Deterministic(1.0)) to only crop at the bottom\n # right. Analogously (0.5, 0.5) crops equally on both axis,\n # (0.0, 1.0) crops left and bottom, (1.0, 0.0) crops right and top.\n self.position = _handle_position_parameter(position)\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n # Providing the whole batch to _draw_samples() would not be necessary\n # for this augmenter. The number of rows would be sufficient. This\n # formulation however enables derived augmenters to use rowwise shapes\n # without having to compute them here for this augmenter.\n samples = self._draw_samples(batch, random_state)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps, samples)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps, samples)\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n result = []\n sizes, offset_xs, offset_ys = samples\n for i, (image, size) in enumerate(zip(images, sizes)):\n w, h = size\n height_image, width_image = image.shape[0:2]\n\n croppings = self._calculate_crop_amounts(\n height_image, width_image, h, w, offset_ys[i], offset_xs[i])\n\n image_cropped = _crop_and_pad_arr(image, croppings, (0, 0, 0, 0),\n keep_size=False)\n\n result.append(image_cropped)\n\n return result\n\n def _augment_keypoints_by_samples(self, kpsois, samples):\n result = []\n sizes, offset_xs, offset_ys = samples\n for i, (kpsoi, size) in enumerate(zip(kpsois, sizes)):\n w, h = size\n height_image, width_image = kpsoi.shape[0:2]\n\n croppings_img = self._calculate_crop_amounts(\n height_image, width_image, h, w, offset_ys[i], offset_xs[i])\n\n kpsoi_cropped = _crop_and_pad_kpsoi_(\n kpsoi, croppings_img, (0, 0, 0, 0), keep_size=False)\n\n result.append(kpsoi_cropped)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, samples):\n sizes, offset_xs, offset_ys = samples\n for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):\n w, h = size\n height_image, width_image = augmentable.shape[0:2]\n\n croppings_img = self._calculate_crop_amounts(\n height_image, width_image, h, w, offset_ys[i], offset_xs[i])\n\n augmentables[i] = _crop_and_pad_hms_or_segmaps_(\n augmentable, croppings_img, (0, 0, 0, 0), keep_size=False)\n\n return augmentables\n\n @classmethod\n def _calculate_crop_amounts(cls, height_image, width_image,\n height_max, width_max,\n offset_y, offset_x):\n crop_top = 0\n crop_right = 0\n crop_bottom = 0\n crop_left = 0\n\n if height_max is not None and height_image > height_max:\n crop_top = int(offset_y * (height_image - height_max))\n crop_bottom = height_image - height_max - crop_top\n\n if width_max is not None and width_image > width_max:\n crop_left = int(offset_x * (width_image - width_max))\n crop_right = width_image - width_max - crop_left\n\n return crop_top, crop_right, crop_bottom, crop_left\n\n def _draw_samples(self, batch, random_state):\n nb_images = batch.nb_rows\n rngs = random_state.duplicate(2)\n\n if isinstance(self.position, tuple):\n offset_xs = self.position[0].draw_samples(nb_images,\n random_state=rngs[0])\n offset_ys = self.position[1].draw_samples(nb_images,\n random_state=rngs[1])\n else:\n offsets = self.position.draw_samples((nb_images, 2),\n random_state=rngs[0])\n offset_xs = offsets[:, 0]\n offset_ys = offsets[:, 1]\n\n offset_xs = 1.0 - offset_xs\n offset_ys = 1.0 - offset_ys\n\n # We return here the sizes even though they are static as it allows\n # derived augmenters to define image-specific heights/widths.\n return [self.size] * nb_images, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.size[0], self.size[1], self.position]\n\n\nclass CenterCropToFixedSize(CropToFixedSize):\n \"\"\"Take a crop from the center of each image.\n\n This is an alias for :class:`~imgaug.augmenters.size.CropToFixedSize` with\n ``position=\"center\"``.\n\n .. note::\n\n If images already have a width and/or height below the provided\n width and/or height then this augmenter will do nothing for the\n respective axis. Hence, resulting images can be smaller than the\n provided axis sizes.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width : int or None\n See :func:`CropToFixedSize.__init__`.\n\n height : int or None\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> crop = iaa.CenterCropToFixedSize(height=20, width=10)\n\n Create an augmenter that takes ``20x10`` sized crops from the center of\n images.\n\n \"\"\"\n\n def __init__(self, width, height,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToFixedSize, self).__init__(\n width=width, height=height, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToMultiplesOf(CropToFixedSize):\n \"\"\"Crop images down until their height/width is a multiple of a value.\n\n .. note::\n\n For a given axis size ``A`` and multiple ``M``, if ``A`` is in the\n interval ``[0 .. M]``, the axis will not be changed.\n As a result, this augmenter can still produce axis sizes that are\n not multiples of the given values.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n Multiple for the width. Images will be cropped down until their\n width is a multiple of this value.\n If ``None``, image widths will not be altered.\n\n height_multiple : int or None\n Multiple for the height. Images will be cropped down until their\n height is a multiple of this value.\n If ``None``, image heights will not be altered.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that crops images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToMultiplesOf, self).__init__(\n width=None, height=None, position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_multiple = width_multiple\n self.height_multiple = height_multiple\n\n def _draw_samples(self, batch, random_state):\n _sizes, offset_xs, offset_ys = super(\n CropToMultiplesOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n croppings = compute_croppings_to_reach_multiples_of(\n shape,\n height_multiple=self.height_multiple,\n width_multiple=self.width_multiple)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in CropToFixedSize\n new_size = (\n width - croppings[1] - croppings[3],\n height - croppings[0] - croppings[2]\n )\n sizes.append(new_size)\n\n return sizes, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_multiple, self.height_multiple, self.position]\n\n\nclass CenterCropToMultiplesOf(CropToMultiplesOf):\n \"\"\"Crop images equally on all sides until H/W are multiples of given values.\n\n This is the same as :class:`~imgaug.augmenters.size.CropToMultiplesOf`,\n but uses ``position=\"center\"`` by default, which spreads the crop amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.CropToMultiplesOf` by default spreads\n them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n See :func:`CropToMultiplesOf.__init__`.\n\n height_multiple : int or None\n See :func:`CropToMultiplesOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterCropToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that crops images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToMultiplesOf, self).__init__(\n width_multiple=width_multiple,\n height_multiple=height_multiple,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToMultiplesOf(PadToFixedSize):\n \"\"\"Pad images until their height/width is a multiple of a value.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n Multiple for the width. Images will be padded until their\n width is a multiple of this value.\n If ``None``, image widths will not be altered.\n\n height_multiple : int or None\n Multiple for the height. Images will be padded until their\n height is a multiple of this value.\n If ``None``, image heights will not be altered.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that pads images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple,\n pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToMultiplesOf, self).__init__(\n width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_multiple = width_multiple\n self.height_multiple = height_multiple\n\n def _draw_samples(self, batch, random_state):\n _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(\n PadToMultiplesOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n paddings = compute_paddings_to_reach_multiples_of(\n shape,\n height_multiple=self.height_multiple,\n width_multiple=self.width_multiple)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in PadToFixedSize\n new_size = (\n width + paddings[1] + paddings[3],\n height + paddings[0] + paddings[2]\n )\n sizes.append(new_size)\n\n return sizes, pad_xs, pad_ys, pad_modes, pad_cvals\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_multiple, self.height_multiple,\n self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToMultiplesOf(PadToMultiplesOf):\n \"\"\"Pad images equally on all sides until H/W are multiples of given values.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToMultiplesOf`, but\n uses ``position=\"center\"`` by default, which spreads the pad amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.PadToMultiplesOf` by default spreads them\n randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n See :func:`PadToMultiplesOf.__init__`.\n\n height_multiple : int or None\n See :func:`PadToMultiplesOf.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that pads images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple,\n pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToMultiplesOf, self).__init__(\n width_multiple=width_multiple,\n height_multiple=height_multiple,\n pad_mode=pad_mode,\n pad_cval=pad_cval,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToPowersOf(CropToFixedSize):\n \"\"\"Crop images until their height/width is a power of a base.\n\n This augmenter removes pixels from an axis with size ``S`` leading to the\n new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a\n provided base (e.g. ``2``) and ``E`` is an exponent from the discrete\n interval ``[1 .. inf)``.\n\n .. note::\n\n This augmenter does nothing for axes with size less than ``B^1 = B``.\n If you have images with ``S < B^1``, it is recommended\n to combine this augmenter with a padding augmenter that pads each\n axis up to ``B``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n Base for the width. Images will be cropped down until their\n width fulfills ``width' = width_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image widths will not be altered.\n\n height_base : int or None\n Base for the height. Images will be cropped down until their\n height fulfills ``height' = height_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image heights will not be altered.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)\n\n Create an augmenter that crops each image down to powers of ``3`` along\n the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.\n 2, 4, 8, 16, ...).\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToPowersOf, self).__init__(\n width=None, height=None, position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_base = width_base\n self.height_base = height_base\n\n def _draw_samples(self, batch, random_state):\n _sizes, offset_xs, offset_ys = super(\n CropToPowersOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n croppings = compute_croppings_to_reach_powers_of(\n shape,\n height_base=self.height_base,\n width_base=self.width_base)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in CropToFixedSize\n new_size = (\n width - croppings[1] - croppings[3],\n height - croppings[0] - croppings[2]\n )\n sizes.append(new_size)\n\n return sizes, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_base, self.height_base, self.position]\n\n\nclass CenterCropToPowersOf(CropToPowersOf):\n \"\"\"Crop images equally on all sides until H/W is a power of a base.\n\n This is the same as :class:`~imgaug.augmenters.size.CropToPowersOf`, but\n uses ``position=\"center\"`` by default, which spreads the crop amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.CropToPowersOf` by default spreads them\n randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n See :func:`CropToPowersOf.__init__`.\n\n height_base : int or None\n See :func:`CropToPowersOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)\n\n Create an augmenter that crops each image down to powers of ``3`` along\n the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.\n 2, 4, 8, 16, ...).\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToPowersOf, self).__init__(\n width_base=width_base, height_base=height_base, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToPowersOf(PadToFixedSize):\n \"\"\"Pad images until their height/width is a power of a base.\n\n This augmenter adds pixels to an axis with size ``S`` leading to the\n new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a\n provided base (e.g. ``2``) and ``E`` is an exponent from the discrete\n interval ``[1 .. inf)``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n Base for the width. Images will be padded down until their\n width fulfills ``width' = width_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image widths will not be altered.\n\n height_base : int or None\n Base for the height. Images will be padded until their\n height fulfills ``height' = height_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image heights will not be altered.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToPowersOf(height_base=3, width_base=2)\n\n Create an augmenter that pads each image to powers of ``3`` along the\n y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,\n 4, 8, 16, ...).\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base,\n pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToPowersOf, self).__init__(\n width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_base = width_base\n self.height_base = height_base\n\n def _draw_samples(self, batch, random_state):\n _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(\n PadToPowersOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n paddings = compute_paddings_to_reach_powers_of(\n shape,\n height_base=self.height_base,\n width_base=self.width_base)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in PadToFixedSize\n new_size = (\n width + paddings[1] + paddings[3],\n height + paddings[0] + paddings[2]\n )\n sizes.append(new_size)\n\n return sizes, pad_xs, pad_ys, pad_modes, pad_cvals\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_base, self.height_base,\n self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToPowersOf(PadToPowersOf):\n \"\"\"Pad images equally on all sides until H/W is a power of a base.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToPowersOf`, but uses\n ``position=\"center\"`` by default, which spreads the pad amounts equally\n over all image sides, while :class:`~imgaug.augmenters.size.PadToPowersOf`\n by default spreads them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n See :func:`PadToPowersOf.__init__`.\n\n height_base : int or None\n See :func:`PadToPowersOf.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToPowersOf(height_base=5, width_base=2)\n\n Create an augmenter that pads each image to powers of ``3`` along the\n y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,\n 4, 8, 16, ...).\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base,\n pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToPowersOf, self).__init__(\n width_base=width_base, height_base=height_base,\n pad_mode=pad_mode, pad_cval=pad_cval,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToAspectRatio(CropToFixedSize):\n \"\"\"Crop images until their width/height matches an aspect ratio.\n\n This augmenter removes either rows or columns until the image reaches\n the desired aspect ratio given in ``width / height``. The cropping\n operation is stopped once the desired aspect ratio is reached or the image\n side to crop reaches a size of ``1``. If any side of the image starts\n with a size of ``0``, the image will not be changed.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n The desired aspect ratio, given as ``width/height``. E.g. a ratio\n of ``2.0`` denotes an image that is twice as wide as it is high.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToAspectRatio(2.0)\n\n Create an augmenter that crops each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToAspectRatio, self).__init__(\n width=None, height=None, position=position,\n seed=seed, name=name, **old_kwargs)\n self.aspect_ratio = aspect_ratio\n\n def _draw_samples(self, batch, random_state):\n _sizes, offset_xs, offset_ys = super(\n CropToAspectRatio, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n\n if height == 0 or width == 0:\n croppings = (0, 0, 0, 0)\n else:\n croppings = compute_croppings_to_reach_aspect_ratio(\n shape,\n aspect_ratio=self.aspect_ratio)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in CropToFixedSize\n new_size = (\n width - croppings[1] - croppings[3],\n height - croppings[0] - croppings[2]\n )\n sizes.append(new_size)\n\n return sizes, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.aspect_ratio, self.position]\n\n\nclass CenterCropToAspectRatio(CropToAspectRatio):\n \"\"\"Crop images equally on all sides until they reach an aspect ratio.\n\n This is the same as :class:`~imgaug.augmenters.size.CropToAspectRatio`, but\n uses ``position=\"center\"`` by default, which spreads the crop amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads\n them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n See :func:`CropToAspectRatio.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterCropToAspectRatio(2.0)\n\n Create an augmenter that crops each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToAspectRatio, self).__init__(\n aspect_ratio=aspect_ratio, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToAspectRatio(PadToFixedSize):\n \"\"\"Pad images until their width/height matches an aspect ratio.\n\n This augmenter adds either rows or columns until the image reaches\n the desired aspect ratio given in ``width / height``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n The desired aspect ratio, given as ``width/height``. E.g. a ratio\n of ``2.0`` denotes an image that is twice as wide as it is high.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToAspectRatio(2.0)\n\n Create an augmenter that pads each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio, pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToAspectRatio, self).__init__(\n width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n self.aspect_ratio = aspect_ratio\n\n def _draw_samples(self, batch, random_state):\n _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(\n PadToAspectRatio, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n\n paddings = compute_paddings_to_reach_aspect_ratio(\n shape,\n aspect_ratio=self.aspect_ratio)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in PadToFixedSize\n new_size = (\n width + paddings[1] + paddings[3],\n height + paddings[0] + paddings[2]\n )\n sizes.append(new_size)\n\n return sizes, pad_xs, pad_ys, pad_modes, pad_cvals\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.aspect_ratio, self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToAspectRatio(PadToAspectRatio):\n \"\"\"Pad images equally on all sides until H/W matches an aspect ratio.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToAspectRatio`, but\n uses ``position=\"center\"`` by default, which spreads the pad amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.PadToAspectRatio` by default spreads them\n randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n See :func:`PadToAspectRatio.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n deterministic : bool, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToAspectRatio(2.0)\n\n Create am augmenter that pads each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio, pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToAspectRatio, self).__init__(\n aspect_ratio=aspect_ratio, position=\"center\",\n pad_mode=pad_mode, pad_cval=pad_cval,\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToSquare(CropToAspectRatio):\n \"\"\"Crop images until their width and height are identical.\n\n This is identical to :class:`~imgaug.augmenters.size.CropToAspectRatio`\n with ``aspect_ratio=1.0``.\n\n Images with axis sizes of ``0`` will not be altered.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToSquare()\n\n Create an augmenter that crops each image until its square, i.e. height\n and width match.\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToSquare, self).__init__(\n aspect_ratio=1.0, position=position,\n seed=seed, name=name, **old_kwargs)\n\n\nclass CenterCropToSquare(CropToSquare):\n \"\"\"Crop images equally on all sides until their height/width are identical.\n\n In contrast to :class:`~imgaug.augmenters.size.CropToSquare`, this\n augmenter always tries to spread the columns/rows to remove equally over\n both sides of the respective axis to be cropped.\n :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads the\n croppings randomly.\n\n This augmenter is identical to :class:`~imgaug.augmenters.size.CropToSquare`\n with ``position=\"center\"``, and thereby the same as\n :class:`~imgaug.augmenters.size.CropToAspectRatio` with\n ``aspect_ratio=1.0, position=\"center\"``.\n\n Images with axis sizes of ``0`` will not be altered.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterCropToSquare()\n\n Create an augmenter that crops each image until its square, i.e. height\n and width match.\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, seed=None, name=None, **old_kwargs):\n super(CenterCropToSquare, self).__init__(\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToSquare(PadToAspectRatio):\n \"\"\"Pad images until their height and width are identical.\n\n This augmenter is identical to\n :class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToSquare()\n\n Create an augmenter that pads each image until its square, i.e. height\n and width match.\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, pad_mode=\"constant\", pad_cval=0, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToSquare, self).__init__(\n aspect_ratio=1.0, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n\n\nclass CenterPadToSquare(PadToSquare):\n \"\"\"Pad images equally on all sides until their height & width are identical.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToSquare`, but uses\n ``position=\"center\"`` by default, which spreads the pad amounts equally\n over all image sides, while :class:`~imgaug.augmenters.size.PadToSquare`\n by default spreads them randomly. This augmenter is thus also identical to\n :class:`~imgaug.augmenters.size.PadToAspectRatio` with\n ``aspect_ratio=1.0, position=\"center\"``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n deterministic : bool, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToSquare()\n\n Create an augmenter that pads each image until its square, i.e. height\n and width match.\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToSquare, self).__init__(\n pad_mode=pad_mode, pad_cval=pad_cval, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass KeepSizeByResize(meta.Augmenter):\n \"\"\"Resize images back to their input sizes after applying child augmenters.\n\n Combining this with e.g. a cropping augmenter as the child will lead to\n images being resized back to the input size after the crop operation was\n applied. Some augmenters have a ``keep_size`` argument that achieves the\n same goal (if set to ``True``), though this augmenter offers control over\n the interpolation mode and which augmentables to resize (images, heatmaps,\n segmentation maps).\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n children : Augmenter or list of imgaug.augmenters.meta.Augmenter or None, optional\n One or more augmenters to apply to images. These augmenters may change\n the image size.\n\n interpolation : KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional\n The interpolation mode to use when resizing images.\n Can take any value that :func:`~imgaug.imgaug.imresize_single_image`\n accepts, e.g. ``cubic``.\n\n * If this is ``KeepSizeByResize.NO_RESIZE`` then images will not\n be resized.\n * If this is a single ``str``, it is expected to have one of the\n following values: ``nearest``, ``linear``, ``area``, ``cubic``.\n * If this is a single integer, it is expected to have a value\n identical to one of: ``cv2.INTER_NEAREST``,\n ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``.\n * If this is a ``list`` of ``str`` or ``int``, it is expected that\n each ``str``/``int`` is one of the above mentioned valid ones.\n A random one of these values will be sampled per image.\n * If this is a ``StochasticParameter``, it will be queried once per\n call to ``_augment_images()`` and must return ``N`` ``str`` s or\n ``int`` s (matching the above mentioned ones) for ``N`` images.\n\n interpolation_heatmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional\n The interpolation mode to use when resizing heatmaps.\n Meaning and valid values are similar to `interpolation`. This\n parameter may also take the value ``KeepSizeByResize.SAME_AS_IMAGES``,\n which will lead to copying the interpolation modes used for the\n corresponding images. The value may also be returned on a per-image\n basis if `interpolation_heatmaps` is provided as a\n ``StochasticParameter`` or may be one possible value if it is\n provided as a ``list`` of ``str``.\n\n interpolation_segmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional\n The interpolation mode to use when resizing segmentation maps.\n Similar to `interpolation_heatmaps`.\n **Note**: For segmentation maps, only ``NO_RESIZE`` or nearest\n neighbour interpolation (i.e. ``nearest``) make sense in the vast\n majority of all cases.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.KeepSizeByResize(\n >>> iaa.Crop((20, 40), keep_size=False)\n >>> )\n\n Apply random cropping to input images, then resize them back to their\n original input sizes. The resizing is done using this augmenter instead\n of the corresponding internal resizing operation in ``Crop``.\n\n >>> aug = iaa.KeepSizeByResize(\n >>> iaa.Crop((20, 40), keep_size=False),\n >>> interpolation=\"nearest\"\n >>> )\n\n Same as in the previous example, but images are now always resized using\n nearest neighbour interpolation.\n\n >>> aug = iaa.KeepSizeByResize(\n >>> iaa.Crop((20, 40), keep_size=False),\n >>> interpolation=[\"nearest\", \"cubic\"],\n >>> interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES,\n >>> interpolation_segmaps=iaa.KeepSizeByResize.NO_RESIZE\n >>> )\n\n Similar to the previous example, but images are now sometimes resized\n using linear interpolation and sometimes using nearest neighbour\n interpolation. Heatmaps are resized using the same interpolation as was\n used for the corresponding image. Segmentation maps are not resized and\n will therefore remain at their size after cropping.\n\n \"\"\"\n\n NO_RESIZE = \"NO_RESIZE\"\n SAME_AS_IMAGES = \"SAME_AS_IMAGES\"\n\n def __init__(self, children,\n interpolation=\"cubic\",\n interpolation_heatmaps=SAME_AS_IMAGES,\n interpolation_segmaps=\"nearest\",\n seed=None, name=None, **old_kwargs):\n super(KeepSizeByResize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n self.children = children\n\n def _validate_param(val, allow_same_as_images):\n valid_ips_and_resize = ia.IMRESIZE_VALID_INTERPOLATIONS \\\n + [KeepSizeByResize.NO_RESIZE]\n if allow_same_as_images and val == self.SAME_AS_IMAGES:\n return self.SAME_AS_IMAGES\n if val in valid_ips_and_resize:\n return iap.Deterministic(val)\n if isinstance(val, list):\n assert len(val) > 0, (\n \"Expected a list of at least one interpolation method. \"\n \"Got an empty list.\")\n valid_ips_here = valid_ips_and_resize\n if allow_same_as_images:\n valid_ips_here = valid_ips_here \\\n + [KeepSizeByResize.SAME_AS_IMAGES]\n only_valid_ips = all([ip in valid_ips_here for ip in val])\n assert only_valid_ips, (\n \"Expected each interpolations to be one of '%s', got \"\n \"'%s'.\" % (str(valid_ips_here), str(val)))\n return iap.Choice(val)\n if isinstance(val, iap.StochasticParameter):\n return val\n raise Exception(\n \"Expected interpolation to be one of '%s' or a list of \"\n \"these values or a StochasticParameter. Got type %s.\" % (\n str(ia.IMRESIZE_VALID_INTERPOLATIONS), type(val)))\n\n self.children = meta.handle_children_list(children, self.name, \"then\")\n self.interpolation = _validate_param(interpolation, False)\n self.interpolation_heatmaps = _validate_param(interpolation_heatmaps,\n True)\n self.interpolation_segmaps = _validate_param(interpolation_segmaps,\n True)\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n with batch.propagation_hooks_ctx(self, hooks, parents):\n images_were_array = None\n if batch.images is not None:\n images_were_array = ia.is_np_array(batch.images)\n shapes_orig = self._get_shapes(batch)\n\n samples = self._draw_samples(batch.nb_rows, random_state)\n\n batch = self.children.augment_batch_(\n batch, parents=parents + [self], hooks=hooks)\n\n if batch.images is not None:\n batch.images = self._keep_size_images(\n batch.images, shapes_orig[\"images\"], images_were_array,\n samples)\n\n if batch.heatmaps is not None:\n # dont use shapes_orig[\"images\"] because they might be None\n batch.heatmaps = self._keep_size_maps(\n batch.heatmaps, shapes_orig[\"heatmaps\"],\n shapes_orig[\"heatmaps_arr\"], samples[1])\n\n if batch.segmentation_maps is not None:\n # dont use shapes_orig[\"images\"] because they might be None\n batch.segmentation_maps = self._keep_size_maps(\n batch.segmentation_maps, shapes_orig[\"segmentation_maps\"],\n shapes_orig[\"segmentation_maps_arr\"], samples[2])\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._keep_size_keypoints,\n shapes_orig=shapes_orig[augm_name],\n interpolations=samples[0])\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value,\n func)\n setattr(batch, augm_name, cbaois)\n return batch\n\n @classmethod\n def _keep_size_images(cls, images, shapes_orig, images_were_array,\n samples):\n interpolations, _, _ = samples\n\n gen = zip(images, interpolations, shapes_orig)\n result = []\n for image, interpolation, input_shape in gen:\n if interpolation == KeepSizeByResize.NO_RESIZE:\n result.append(image)\n else:\n result.append(\n ia.imresize_single_image(image, input_shape[0:2],\n interpolation))\n\n if images_were_array:\n # note here that NO_RESIZE can have led to different shapes\n nb_shapes = len({image.shape for image in result})\n if nb_shapes == 1:\n result = np.array(result, dtype=images.dtype)\n\n return result\n\n @classmethod\n def _keep_size_maps(cls, augmentables, shapes_orig_images,\n shapes_orig_arrs, interpolations):\n result = []\n gen = zip(augmentables, interpolations,\n shapes_orig_arrs, shapes_orig_images)\n for augmentable, interpolation, arr_shape_orig, img_shape_orig in gen:\n if interpolation == \"NO_RESIZE\":\n result.append(augmentable)\n else:\n augmentable = augmentable.resize(\n arr_shape_orig[0:2], interpolation=interpolation)\n augmentable.shape = img_shape_orig\n result.append(augmentable)\n\n return result\n\n @classmethod\n def _keep_size_keypoints(cls, kpsois_aug, shapes_orig, interpolations):\n result = []\n gen = zip(kpsois_aug, interpolations, shapes_orig)\n for kpsoi_aug, interpolation, input_shape in gen:\n if interpolation == KeepSizeByResize.NO_RESIZE:\n result.append(kpsoi_aug)\n else:\n result.append(kpsoi_aug.on_(input_shape))\n\n return result\n\n @classmethod\n def _get_shapes(cls, batch):\n result = dict()\n for column in batch.columns:\n result[column.name] = [cell.shape for cell in column.value]\n\n if batch.heatmaps is not None:\n result[\"heatmaps_arr\"] = [\n cell.arr_0to1.shape for cell in batch.heatmaps]\n\n if batch.segmentation_maps is not None:\n result[\"segmentation_maps_arr\"] = [\n cell.arr.shape for cell in batch.segmentation_maps]\n\n return result\n\n def _draw_samples(self, nb_images, random_state):\n rngs = random_state.duplicate(3)\n interpolations = self.interpolation.draw_samples((nb_images,),\n random_state=rngs[0])\n\n if self.interpolation_heatmaps == KeepSizeByResize.SAME_AS_IMAGES:\n interpolations_heatmaps = np.copy(interpolations)\n else:\n interpolations_heatmaps = self.interpolation_heatmaps.draw_samples(\n (nb_images,), random_state=rngs[1]\n )\n\n # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`\n # works here only if the datatype of the array is such that it\n # may contain strings. It does not work properly for e.g.\n # integer arrays and will produce a single bool output, even\n # for arrays with more than one entry.\n same_as_imgs_idx = [ip == self.SAME_AS_IMAGES\n for ip in interpolations_heatmaps]\n\n interpolations_heatmaps[same_as_imgs_idx] = \\\n interpolations[same_as_imgs_idx]\n\n if self.interpolation_segmaps == KeepSizeByResize.SAME_AS_IMAGES:\n interpolations_segmaps = np.copy(interpolations)\n else:\n # TODO This used previously the same seed as the heatmaps part\n # leading to the same sampled values. Was that intentional?\n # Doesn't look like it should be that way.\n interpolations_segmaps = self.interpolation_segmaps.draw_samples(\n (nb_images,), random_state=rngs[2]\n )\n\n # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`\n # works here only if the datatype of the array is such that it\n # may contain strings. It does not work properly for e.g.\n # integer arrays and will produce a single bool output, even\n # for arrays with more than one entry.\n same_as_imgs_idx = [ip == self.SAME_AS_IMAGES\n for ip in interpolations_segmaps]\n\n interpolations_segmaps[same_as_imgs_idx] = \\\n interpolations[same_as_imgs_idx]\n\n return interpolations, interpolations_heatmaps, interpolations_segmaps\n\n def _to_deterministic(self):\n aug = self.copy()\n aug.children = aug.children.to_deterministic()\n aug.deterministic = True\n aug.random_state = self.random_state.derive_rng_()\n return aug\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.interpolation, self.interpolation_heatmaps]\n\n def get_children_lists(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_children_lists`.\"\"\"\n return [self.children]\n\n def __str__(self):\n pattern = (\n \"%s(\"\n \"interpolation=%s, \"\n \"interpolation_heatmaps=%s, \"\n \"name=%s, \"\n \"children=%s, \"\n \"deterministic=%s\"\n \")\")\n return pattern % (\n self.__class__.__name__, self.interpolation,\n self.interpolation_heatmaps, self.name, self.children,\n self.deterministic)\n",
"\"\"\"Collection of basic functions used throughout imgaug.\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport math\nimport numbers\nimport sys\nimport os\nimport json\nimport types\nimport functools\n# collections.abc exists since 3.3 and is expected to be used for 3.8+\ntry:\n from collections.abc import Iterable\nexcept ImportError:\n from collections import Iterable\n\nimport numpy as np\nimport cv2\nimport imageio\nimport six\nimport six.moves as sm\nimport skimage.draw\nimport skimage.measure\n\n\nALL = \"ALL\"\n\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# filepath to the quokka image, its annotations and depth map\nQUOKKA_FP = os.path.join(FILE_DIR, \"quokka.jpg\")\nQUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, \"quokka_annotations.json\")\nQUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(\n FILE_DIR, \"quokka_depth_map_halfres.png\")\n\nDEFAULT_FONT_FP = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"DejaVuSans.ttf\"\n)\n\n\n# to check if a dtype instance is among these dtypes, use e.g.\n# `dtype.type in NP_FLOAT_TYPES` do not just use `dtype in NP_FLOAT_TYPES` as\n# that would fail\nNP_FLOAT_TYPES = set(np.sctypes[\"float\"])\nNP_INT_TYPES = set(np.sctypes[\"int\"])\nNP_UINT_TYPES = set(np.sctypes[\"uint\"])\n\nIMSHOW_BACKEND_DEFAULT = \"matplotlib\"\n\nIMRESIZE_VALID_INTERPOLATIONS = [\n \"nearest\", \"linear\", \"area\", \"cubic\",\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]\n\n\n###############################################################################\n# Helpers for deprecation\n###############################################################################\n\nclass DeprecationWarning(Warning): # pylint: disable=redefined-builtin\n \"\"\"Warning for deprecated calls.\n\n Since python 2.7 DeprecatedWarning is silent by default. So we define\n our own DeprecatedWarning here so that it is not silent by default.\n\n \"\"\"\n\n\ndef warn(msg, category=UserWarning, stacklevel=2):\n \"\"\"Generate a a warning with stacktrace.\n\n Parameters\n ----------\n msg : str\n The message of the warning.\n\n category : class\n The class of the warning to produce.\n\n stacklevel : int, optional\n How many steps above this function to \"jump\" in the stacktrace when\n displaying file and line number of the error message.\n Usually ``2``.\n\n \"\"\"\n import warnings\n warnings.warn(msg, category=category, stacklevel=stacklevel)\n\n\ndef warn_deprecated(msg, stacklevel=2):\n \"\"\"Generate a non-silent deprecation warning with stacktrace.\n\n The used warning is ``imgaug.imgaug.DeprecationWarning``.\n\n Parameters\n ----------\n msg : str\n The message of the warning.\n\n stacklevel : int, optional\n How many steps above this function to \"jump\" in the stacktrace when\n displaying file and line number of the error message.\n Usually ``2``\n\n \"\"\"\n warn(msg, category=DeprecationWarning, stacklevel=stacklevel)\n\n\nclass deprecated(object): # pylint: disable=invalid-name\n \"\"\"Decorator to mark deprecated functions with warning.\n\n Adapted from\n <https://github.com/scikit-image/scikit-image/blob/master/skimage/_shared/utils.py>.\n\n Parameters\n ----------\n alt_func : None or str, optional\n If given, tell user what function to use instead.\n\n behavior : {'warn', 'raise'}, optional\n Behavior during call to deprecated function: ``warn`` means that the\n user is warned that the function is deprecated; ``raise`` means that\n an error is raised.\n\n removed_version : None or str, optional\n The package version in which the deprecated function will be removed.\n\n comment : None or str, optional\n An optional comment that will be appended to the warning message.\n\n \"\"\"\n\n def __init__(self, alt_func=None, behavior=\"warn\", removed_version=None,\n comment=None):\n self.alt_func = alt_func\n self.behavior = behavior\n self.removed_version = removed_version\n self.comment = comment\n\n def __call__(self, func):\n alt_msg = None\n if self.alt_func is not None:\n alt_msg = \"Use ``%s`` instead.\" % (self.alt_func,)\n\n rmv_msg = None\n if self.removed_version is not None:\n rmv_msg = \"It will be removed in version %s.\" % (\n self.removed_version,)\n\n comment_msg = None\n if self.comment is not None and len(self.comment) > 0:\n comment_msg = \"%s.\" % (self.comment.rstrip(\". \"),)\n\n addendum = \" \".join([submsg\n for submsg\n in [alt_msg, rmv_msg, comment_msg]\n if submsg is not None])\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n # getargpec() is deprecated\n # pylint: disable=deprecated-method\n\n # TODO add class name if class method\n import inspect\n # arg_names = func.__code__.co_varnames\n\n # getargspec() was deprecated in py3, but doesn't exist in py2\n if hasattr(inspect, \"getfullargspec\"):\n arg_names = inspect.getfullargspec(func)[0]\n else:\n arg_names = inspect.getargspec(func)[0]\n\n if \"self\" in arg_names or \"cls\" in arg_names:\n main_msg = \"Method ``%s.%s()`` is deprecated.\" % (\n args[0].__class__.__name__, func.__name__)\n else:\n main_msg = \"Function ``%s()`` is deprecated.\" % (\n func.__name__,)\n\n msg = (main_msg + \" \" + addendum).rstrip(\" \").replace(\"``\", \"`\")\n\n if self.behavior == \"warn\":\n warn_deprecated(msg, stacklevel=3)\n elif self.behavior == \"raise\":\n raise DeprecationWarning(msg)\n return func(*args, **kwargs)\n\n # modify doc string to display deprecation warning\n doc = \"**Deprecated**. \" + addendum\n if wrapped.__doc__ is None:\n wrapped.__doc__ = doc\n else:\n wrapped.__doc__ = doc + \"\\n\\n \" + wrapped.__doc__\n\n return wrapped\n\n###############################################################################\n\n\ndef is_np_array(val):\n \"\"\"Check whether a variable is a numpy array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy array. Otherwise ``False``.\n\n \"\"\"\n # using np.generic here via isinstance(val, (np.ndarray, np.generic))\n # seems to also fire for scalar numpy values even though those are not\n # arrays\n return isinstance(val, np.ndarray)\n\n\ndef is_np_scalar(val):\n \"\"\"Check whether a variable is a numpy scalar.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy scalar. Otherwise ``False``.\n\n \"\"\"\n # Note that isscalar() alone also fires for thinks like python strings\n # or booleans.\n # The isscalar() was added to make this function not fire for non-scalar\n # numpy types. Not sure if it is necessary.\n return isinstance(val, np.generic) and np.isscalar(val)\n\n\ndef is_single_integer(val):\n \"\"\"Check whether a variable is an ``int``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is an ``int``. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, numbers.Integral) and not isinstance(val, bool)\n\n\ndef is_single_float(val):\n \"\"\"Check whether a variable is a ``float``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a ``float``. Otherwise ``False``.\n\n \"\"\"\n return (\n isinstance(val, numbers.Real)\n and not is_single_integer(val)\n and not isinstance(val, bool)\n )\n\n\ndef is_single_number(val):\n \"\"\"Check whether a variable is a ``number``, i.e. an ``int`` or ``float``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a ``number``. Otherwise ``False``.\n\n \"\"\"\n return is_single_integer(val) or is_single_float(val)\n\n\ndef is_iterable(val):\n \"\"\"\n Checks whether a variable is iterable.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is an iterable. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, Iterable)\n\n\n# TODO convert to is_single_string() or rename is_single_integer/float/number()\ndef is_string(val):\n \"\"\"Check whether a variable is a string.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a string. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, six.string_types)\n\n\ndef is_single_bool(val):\n \"\"\"Check whether a variable is a ``bool``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a ``bool``. Otherwise ``False``.\n\n \"\"\"\n # pylint: disable=unidiomatic-typecheck\n return type(val) == type(True)\n\n\ndef is_integer_array(val):\n \"\"\"Check whether a variable is a numpy integer array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy integer array. Otherwise ``False``.\n\n \"\"\"\n return is_np_array(val) and issubclass(val.dtype.type, np.integer)\n\n\ndef is_float_array(val):\n \"\"\"Check whether a variable is a numpy float array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy float array. Otherwise ``False``.\n\n \"\"\"\n return is_np_array(val) and issubclass(val.dtype.type, np.floating)\n\n\ndef is_callable(val):\n \"\"\"Check whether a variable is a callable, e.g. a function.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a callable. Otherwise ``False``.\n\n \"\"\"\n # python 3.x with x <= 2 does not support callable(), apparently\n if sys.version_info[0] == 3 and sys.version_info[1] <= 2:\n return hasattr(val, '__call__')\n return callable(val)\n\n\ndef is_generator(val):\n \"\"\"Check whether a variable is a generator.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` is the variable is a generator. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, types.GeneratorType)\n\n\ndef flatten(nested_iterable):\n \"\"\"Flatten arbitrarily nested lists/tuples.\n\n Code partially taken from https://stackoverflow.com/a/10824420.\n\n Parameters\n ----------\n nested_iterable\n A ``list`` or ``tuple`` of arbitrarily nested values.\n\n Yields\n ------\n any\n All values in `nested_iterable`, flattened.\n\n \"\"\"\n # don't just check if something is iterable here, because then strings\n # and arrays will be split into their characters and components\n if not isinstance(nested_iterable, (list, tuple)):\n yield nested_iterable\n else:\n for i in nested_iterable:\n if isinstance(i, (list, tuple)):\n for j in flatten(i):\n yield j\n else:\n yield i\n\n\n# TODO no longer used anywhere. deprecate?\ndef caller_name():\n \"\"\"Return the name of the caller, e.g. a function.\n\n Returns\n -------\n str\n The name of the caller as a string\n\n \"\"\"\n # pylint: disable=protected-access\n return sys._getframe(1).f_code.co_name\n\n\ndef seed(entropy=None, seedval=None):\n \"\"\"Set the seed of imgaug's global RNG.\n\n The global RNG controls most of the \"randomness\" in imgaug.\n\n The global RNG is the default one used by all augmenters. Under special\n circumstances (e.g. when an augmenter is switched to deterministic mode),\n the global RNG is replaced with a local one. The state of that replacement\n may be dependent on the global RNG's state at the time of creating the\n child RNG.\n\n .. note::\n\n This function is not yet marked as deprecated, but might be in the\n future. The preferred way to seed `imgaug` is via\n :func:`~imgaug.random.seed`.\n\n Parameters\n ----------\n entropy : int\n The seed value to use.\n\n seedval : None or int, optional\n Deprecated.\n\n \"\"\"\n assert entropy is not None or seedval is not None, (\n \"Expected argument 'entropy' or 'seedval' to be not-None, but both\"\n \"were None.\")\n\n if seedval is not None:\n assert entropy is None, (\n \"Argument 'seedval' is the outdated name for 'entropy'. Hence, \"\n \"if it is provided, 'entropy' must be None. Got 'entropy' value \"\n \"of type %s.\" % (type(entropy),))\n\n warn_deprecated(\"Parameter 'seedval' is deprecated. Use \"\n \"'entropy' instead.\")\n entropy = seedval\n\n import imgaug.random\n imgaug.random.seed(entropy)\n\n\n@deprecated(\"imgaug.random.normalize_generator\")\ndef normalize_random_state(random_state):\n \"\"\"Normalize various inputs to a numpy random generator.\n\n Parameters\n ----------\n random_state : None or int or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.bit_generator.SeedSequence or numpy.random.RandomState\n See :func:`~imgaug.random.normalize_generator`.\n\n Returns\n -------\n numpy.random.Generator or numpy.random.RandomState\n In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator`` (even if\n the input was a ``RandomState``).\n\n \"\"\"\n import imgaug.random\n return imgaug.random.normalize_generator_(random_state)\n\n\n@deprecated(\"imgaug.random.get_global_rng\")\ndef current_random_state():\n \"\"\"Get or create the current global RNG of imgaug.\n\n Note that the first call to this function will create a global RNG.\n\n Returns\n -------\n imgaug.random.RNG\n The global RNG to use.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.get_global_rng()\n\n\n@deprecated(\"imgaug.random.convert_seed_to_rng\")\ndef new_random_state(seed=None, fully_random=False):\n \"\"\"Create a new numpy random number generator.\n\n Parameters\n ----------\n seed : None or int, optional\n The seed value to use. If ``None`` and `fully_random` is ``False``,\n the seed will be derived from the global RNG. If `fully_random` is\n ``True``, the seed will be provided by the OS.\n\n fully_random : bool, optional\n Whether the seed will be provided by the OS.\n\n Returns\n -------\n numpy.random.Generator or numpy.random.RandomState\n In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.\n Both are initialized with the provided seed.\n\n \"\"\"\n # pylint: disable=redefined-outer-name\n import imgaug.random\n if seed is None:\n if fully_random:\n return imgaug.random.RNG.create_fully_random()\n return imgaug.random.RNG.create_pseudo_random_()\n return imgaug.random.RNG(seed)\n\n\n# TODO seems to not be used anywhere anymore\n@deprecated(\"imgaug.random.convert_seed_to_rng\")\ndef dummy_random_state():\n \"\"\"Create a dummy random state using a seed of ``1``.\n\n Returns\n -------\n imgaug.random.RNG\n The new random state.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.RNG(1)\n\n\n@deprecated(\"imgaug.random.copy_generator_unless_global_rng\")\ndef copy_random_state(random_state, force_copy=False):\n \"\"\"Copy an existing numpy (random number) generator.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n The generator to copy.\n\n force_copy : bool, optional\n If ``True``, this function will always create a copy of every random\n state. If ``False``, it will not copy numpy's default random state,\n but all other random states.\n\n Returns\n -------\n rs_copy : numpy.random.RandomState\n The copied random state.\n\n \"\"\"\n import imgaug.random\n if force_copy:\n return imgaug.random.copy_generator(random_state)\n return imgaug.random.copy_generator_unless_global_generator(random_state)\n\n\n@deprecated(\"imgaug.random.derive_generator_\")\ndef derive_random_state(random_state):\n \"\"\"Derive a child numpy random generator from another one.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n The generator from which to derive a new child generator.\n\n Returns\n -------\n numpy.random.Generator or numpy.random.RandomState\n In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.\n In both cases a derived child generator.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.derive_generator_(random_state)\n\n\n@deprecated(\"imgaug.random.derive_generators_\")\ndef derive_random_states(random_state, n=1):\n \"\"\"Derive child numpy random generators from another one.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n The generator from which to derive new child generators.\n\n n : int, optional\n Number of child generators to derive.\n\n Returns\n -------\n list of numpy.random.Generator or list of numpy.random.RandomState\n In numpy <=1.16 a ``list`` of ``RandomState`` s,\n in 1.17+ a ``list`` of ``Generator`` s.\n In both cases lists of derived child generators.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.derive_generators_(random_state, n=n)\n\n\n@deprecated(\"imgaug.random.advance_generator_\")\ndef forward_random_state(random_state):\n \"\"\"Advance a numpy random generator's internal state.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n Generator of which to advance the internal state.\n\n \"\"\"\n import imgaug.random\n imgaug.random.advance_generator_(random_state)\n\n\ndef _quokka_normalize_extract(extract):\n \"\"\"Generate a normalized rectangle for the standard quokka image.\n\n Parameters\n ----------\n extract : 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Unnormalized representation of the image subarea to be extracted.\n\n * If ``str`` ``square``, then a squared area\n ``(x: 0 to max 643, y: 0 to max 643)`` will be extracted from\n the image.\n * If a ``tuple``, then expected to contain four ``number`` s\n denoting ``(x1, y1, x2, y2)``.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBox`, then that\n bounding box's area will be extracted from the image.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBoxesOnImage`,\n then expected to contain exactly one bounding box and a shape\n matching the full image dimensions (i.e. ``(643, 960, *)``).\n Then the one bounding box will be used similar to\n ``BoundingBox`` above.\n\n Returns\n -------\n imgaug.augmentables.bbs.BoundingBox\n Normalized representation of the area to extract from the standard\n quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n if extract == \"square\":\n bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)\n elif isinstance(extract, tuple) and len(extract) == 4:\n bb = BoundingBox(x1=extract[0], y1=extract[1],\n x2=extract[2], y2=extract[3])\n elif isinstance(extract, BoundingBox):\n bb = extract\n elif isinstance(extract, BoundingBoxesOnImage):\n assert len(extract.bounding_boxes) == 1, (\n \"Provided BoundingBoxesOnImage instance may currently only \"\n \"contain a single bounding box.\")\n assert extract.shape[0:2] == (643, 960), (\n \"Expected BoundingBoxesOnImage instance on an image of shape \"\n \"(643, 960, ?). Got shape %s.\" % (extract.shape,))\n bb = extract.bounding_boxes[0]\n else:\n raise Exception(\n \"Expected 'square' or tuple of four entries or BoundingBox or \"\n \"BoundingBoxesOnImage for parameter 'extract', \"\n \"got %s.\" % (type(extract),)\n )\n return bb\n\n\n# TODO is this the same as the project functions in augmentables?\ndef _compute_resized_shape(from_shape, to_shape):\n \"\"\"Compute the intended new shape of an image-like array after resizing.\n\n Parameters\n ----------\n from_shape : tuple or ndarray\n Old shape of the array. Usually expected to be a ``tuple`` of form\n ``(H, W)`` or ``(H, W, C)`` or alternatively an array with two or\n three dimensions.\n\n to_shape : None or tuple of ints or tuple of floats or int or float or ndarray\n New shape of the array.\n\n * If ``None``, then `from_shape` will be used as the new shape.\n * If an ``int`` ``V``, then the new shape will be ``(V, V, [C])``,\n where ``C`` will be added if it is part of `from_shape`.\n * If a ``float`` ``V``, then the new shape will be\n ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old\n height/width.\n * If a ``tuple`` ``(H', W', [C'])`` of ints, then ``H'`` and ``W'``\n will be used as the new height and width.\n * If a ``tuple`` ``(H', W', [C'])`` of floats (except ``C``), then\n ``H'`` and ``W'`` will be used as the new height and width.\n * If a numpy array, then the array's shape will be used.\n\n Returns\n -------\n tuple of int\n New shape.\n\n \"\"\"\n if is_np_array(from_shape):\n from_shape = from_shape.shape\n if is_np_array(to_shape):\n to_shape = to_shape.shape\n\n to_shape_computed = list(from_shape)\n\n if to_shape is None:\n pass\n elif isinstance(to_shape, tuple):\n assert len(from_shape) in [2, 3]\n assert len(to_shape) in [2, 3]\n\n if len(from_shape) == 3 and len(to_shape) == 3:\n assert from_shape[2] == to_shape[2]\n elif len(to_shape) == 3:\n to_shape_computed.append(to_shape[2])\n\n is_to_s_valid_values = all(\n [v is None or is_single_number(v) for v in to_shape[0:2]])\n assert is_to_s_valid_values, (\n \"Expected the first two entries in to_shape to be None or \"\n \"numbers, got types %s.\" % (\n str([type(v) for v in to_shape[0:2]]),))\n\n for i, from_shape_i in enumerate(from_shape[0:2]):\n if to_shape[i] is None:\n to_shape_computed[i] = from_shape_i\n elif is_single_integer(to_shape[i]):\n to_shape_computed[i] = to_shape[i]\n else: # float\n to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))\n elif is_single_integer(to_shape) or is_single_float(to_shape):\n to_shape_computed = _compute_resized_shape(\n from_shape, (to_shape, to_shape))\n else:\n raise Exception(\n \"Expected to_shape to be None or ndarray or tuple of floats or \"\n \"tuple of ints or single int or single float, \"\n \"got %s.\" % (type(to_shape),))\n\n return tuple(to_shape_computed)\n\n\ndef quokka(size=None, extract=None):\n \"\"\"Return an image of a quokka as a numpy array.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n Size of the output image. Input into\n :func:`~imgaug.imgaug.imresize_single_image`. Usually expected to be a\n ``tuple`` ``(H, W)``, where ``H`` is the desired height and ``W`` is\n the width. If ``None``, then the image will not be resized.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea of the quokka image to extract:\n\n * If ``None``, then the whole image will be used.\n * If ``str`` ``square``, then a squared area\n ``(x: 0 to max 643, y: 0 to max 643)`` will be extracted from\n the image.\n * If a ``tuple``, then expected to contain four ``number`` s\n denoting ``(x1, y1, x2, y2)``.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBox`, then that\n bounding box's area will be extracted from the image.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBoxesOnImage`,\n then expected to contain exactly one bounding box and a shape\n matching the full image dimensions (i.e. ``(643, 960, *)``).\n Then the one bounding box will be used similar to\n ``BoundingBox`` above.\n\n Returns\n -------\n (H,W,3) ndarray\n The image array of dtype ``uint8``.\n\n \"\"\"\n img = imageio.imread(QUOKKA_FP, pilmode=\"RGB\")\n if extract is not None:\n bb = _quokka_normalize_extract(extract)\n img = bb.extract_from_image(img)\n if size is not None:\n shape_resized = _compute_resized_shape(img.shape, size)\n img = imresize_single_image(img, shape_resized[0:2])\n return img\n\n\ndef quokka_square(size=None):\n \"\"\"Return an (square) image of a quokka as a numpy array.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n Size of the output image. Input into\n :func:`~imgaug.imgaug.imresize_single_image`. Usually expected to be a\n ``tuple`` ``(H, W)``, where ``H`` is the desired height and ``W`` is\n the width. If ``None``, then the image will not be resized.\n\n Returns\n -------\n (H,W,3) ndarray\n The image array of dtype ``uint8``.\n\n \"\"\"\n return quokka(size=size, extract=\"square\")\n\n\ndef quokka_heatmap(size=None, extract=None):\n \"\"\"Return a heatmap (here: depth map) for the standard example quokka image.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n See :func:`~imgaug.imgaug.quokka`.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.heatmaps.HeatmapsOnImage\n Depth map as an heatmap object. Values close to ``0.0`` denote objects\n that are close to the camera. Values close to ``1.0`` denote objects\n that are furthest away (among all shown objects).\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.heatmaps import HeatmapsOnImage\n\n img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode=\"RGB\")\n img = imresize_single_image(img, (643, 960), interpolation=\"cubic\")\n\n if extract is not None:\n bb = _quokka_normalize_extract(extract)\n img = bb.extract_from_image(img)\n if size is None:\n size = img.shape[0:2]\n\n shape_resized = _compute_resized_shape(img.shape, size)\n img = imresize_single_image(img, shape_resized[0:2])\n img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB\n img_0to1 = img_0to1.astype(np.float32) / 255.0\n img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away\n\n return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))\n\n\ndef quokka_segmentation_map(size=None, extract=None):\n \"\"\"Return a segmentation map for the standard example quokka image.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n See :func:`~imgaug.imgaug.quokka`.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.segmaps.SegmentationMapsOnImage\n Segmentation map object.\n\n \"\"\"\n # pylint: disable=invalid-name\n # TODO get rid of this deferred import\n from imgaug.augmentables.segmaps import SegmentationMapsOnImage\n\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n\n xx = []\n yy = []\n for kp_dict in json_dict[\"polygons\"][0][\"keypoints\"]:\n x = kp_dict[\"x\"]\n y = kp_dict[\"y\"]\n xx.append(x)\n yy.append(y)\n\n img_seg = np.zeros((643, 960, 1), dtype=np.int32)\n rr, cc = skimage.draw.polygon(\n np.array(yy), np.array(xx), shape=img_seg.shape)\n img_seg[rr, cc, 0] = 1\n\n if extract is not None:\n bb = _quokka_normalize_extract(extract)\n img_seg = bb.extract_from_image(img_seg)\n\n segmap = SegmentationMapsOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))\n\n if size is not None:\n shape_resized = _compute_resized_shape(img_seg.shape, size)\n segmap = segmap.resize(shape_resized[0:2])\n segmap.shape = tuple(shape_resized[0:2]) + (3,)\n\n return segmap\n\n\ndef quokka_keypoints(size=None, extract=None):\n \"\"\"Return example keypoints on the standard example quokke image.\n\n The keypoints cover the eyes, ears, nose and paws.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the keypoints are placed. If\n ``None``, then the keypoints are not projected to any new size\n (positions on the original image are used). ``float`` s lead to\n relative size changes, ``int`` s to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.kps.KeypointsOnImage\n Example keypoints on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n\n left, top = 0, 0\n if extract is not None:\n bb_extract = _quokka_normalize_extract(extract)\n left = bb_extract.x1\n top = bb_extract.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n keypoints = []\n for kp_dict in json_dict[\"keypoints\"]:\n keypoints.append(Keypoint(x=kp_dict[\"x\"] - left, y=kp_dict[\"y\"] - top))\n if extract is not None:\n shape = (bb_extract.height, bb_extract.width, 3)\n else:\n shape = (643, 960, 3)\n kpsoi = KeypointsOnImage(keypoints, shape=shape)\n if size is not None:\n shape_resized = _compute_resized_shape(shape, size)\n kpsoi = kpsoi.on(shape_resized)\n return kpsoi\n\n\ndef quokka_bounding_boxes(size=None, extract=None):\n \"\"\"Return example bounding boxes on the standard example quokke image.\n\n Currently only a single bounding box is returned that covers the quokka.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the BBs are placed. If ``None``, then\n the BBs are not projected to any new size (positions on the original\n image are used). ``float`` s lead to relative size changes, ``int`` s\n to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.bbs.BoundingBoxesOnImage\n Example BBs on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n left, top = 0, 0\n if extract is not None:\n bb_extract = _quokka_normalize_extract(extract)\n left = bb_extract.x1\n top = bb_extract.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n bbs = []\n for bb_dict in json_dict[\"bounding_boxes\"]:\n bbs.append(\n BoundingBox(\n x1=bb_dict[\"x1\"] - left,\n y1=bb_dict[\"y1\"] - top,\n x2=bb_dict[\"x2\"] - left,\n y2=bb_dict[\"y2\"] - top\n )\n )\n if extract is not None:\n shape = (bb_extract.height, bb_extract.width, 3)\n else:\n shape = (643, 960, 3)\n bbsoi = BoundingBoxesOnImage(bbs, shape=shape)\n if size is not None:\n shape_resized = _compute_resized_shape(shape, size)\n bbsoi = bbsoi.on(shape_resized)\n return bbsoi\n\n\ndef quokka_polygons(size=None, extract=None):\n \"\"\"\n Returns example polygons on the standard example quokke image.\n\n The result contains one polygon, covering the quokka's outline.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the polygons are placed. If ``None``,\n then the polygons are not projected to any new size (positions on the\n original image are used). ``float`` s lead to relative size changes,\n ``int`` s to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.polys.PolygonsOnImage\n Example polygons on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.polys import Polygon, PolygonsOnImage\n\n left, top = 0, 0\n if extract is not None:\n bb_extract = _quokka_normalize_extract(extract)\n left = bb_extract.x1\n top = bb_extract.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n polygons = []\n for poly_json in json_dict[\"polygons\"]:\n polygons.append(\n Polygon([(point[\"x\"] - left, point[\"y\"] - top)\n for point in poly_json[\"keypoints\"]])\n )\n if extract is not None:\n shape = (bb_extract.height, bb_extract.width, 3)\n else:\n shape = (643, 960, 3)\n psoi = PolygonsOnImage(polygons, shape=shape)\n if size is not None:\n shape_resized = _compute_resized_shape(shape, size)\n psoi = psoi.on(shape_resized)\n return psoi\n\n\n# TODO change this to some atan2 stuff?\ndef angle_between_vectors(v1, v2):\n \"\"\"Calculcate the angle in radians between vectors `v1` and `v2`.\n\n From\n http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python\n\n Parameters\n ----------\n v1 : (N,) ndarray\n First vector.\n\n v2 : (N,) ndarray\n Second vector.\n\n Returns\n -------\n float\n Angle in radians.\n\n Examples\n --------\n >>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))\n 1.570796...\n\n >>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))\n 0.0\n\n >>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))\n 3.141592...\n\n \"\"\"\n # pylint: disable=invalid-name\n length1 = np.linalg.norm(v1)\n length2 = np.linalg.norm(v2)\n v1_unit = (v1 / length1) if length1 > 0 else np.float32(v1) * 0\n v2_unit = (v2 / length2) if length2 > 0 else np.float32(v2) * 0\n return np.arccos(np.clip(np.dot(v1_unit, v2_unit), -1.0, 1.0))\n\n\n# TODO is this used anywhere?\n# TODO this might also be covered by augmentables.utils or\n# augmentables.polys/lines\ndef compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):\n \"\"\"Compute the intersection point of two lines.\n\n Taken from https://stackoverflow.com/a/20679579 .\n\n Parameters\n ----------\n x1 : number\n x coordinate of the first point on line 1.\n (The lines extends beyond this point.)\n\n y1 : number\n y coordinate of the first point on line 1.\n (The lines extends beyond this point.)\n\n x2 : number\n x coordinate of the second point on line 1.\n (The lines extends beyond this point.)\n\n y2 : number\n y coordinate of the second point on line 1.\n (The lines extends beyond this point.)\n\n x3 : number\n x coordinate of the first point on line 2.\n (The lines extends beyond this point.)\n\n y3 : number\n y coordinate of the first point on line 2.\n (The lines extends beyond this point.)\n\n x4 : number\n x coordinate of the second point on line 2.\n (The lines extends beyond this point.)\n\n y4 : number\n y coordinate of the second point on line 2.\n (The lines extends beyond this point.)\n\n Returns\n -------\n tuple of number or bool\n The coordinate of the intersection point as a ``tuple`` ``(x, y)``.\n If the lines are parallel (no intersection point or an infinite number\n of them), the result is ``False``.\n\n \"\"\"\n # pylint: disable=invalid-name\n def _make_line(point1, point2):\n line_y = (point1[1] - point2[1])\n line_x = (point2[0] - point1[0])\n slope = (point1[0] * point2[1] - point2[0] * point1[1])\n return line_y, line_x, -slope\n\n line1 = _make_line((x1, y1), (x2, y2))\n line2 = _make_line((x3, y3), (x4, y4))\n\n D = line1[0] * line2[1] - line1[1] * line2[0]\n Dx = line1[2] * line2[1] - line1[1] * line2[2]\n Dy = line1[0] * line2[2] - line1[2] * line2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n return False\n\n\n# TODO replace by cv2.putText()?\ndef draw_text(img, y, x, text, color=(0, 255, 0), size=25):\n \"\"\"Draw text on an image.\n\n This uses by default DejaVuSans as its font, which is included in this\n library.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: no\n * ``float32``: yes; not tested\n * ``float64``: no\n * ``float128``: no\n * ``bool``: no\n\n TODO check if other dtypes could be enabled\n\n Parameters\n ----------\n img : (H,W,3) ndarray\n The image array to draw text on.\n Expected to be of dtype ``uint8`` or ``float32`` (expected value\n range is ``[0.0, 255.0]``).\n\n y : int\n x-coordinate of the top left corner of the text.\n\n x : int\n y- coordinate of the top left corner of the text.\n\n text : str\n The text to draw.\n\n color : iterable of int, optional\n Color of the text to draw. For RGB-images this is expected to be an\n RGB color.\n\n size : int, optional\n Font size of the text to draw.\n\n Returns\n -------\n (H,W,3) ndarray\n Input image with text drawn on it.\n\n \"\"\"\n from PIL import (\n Image as PIL_Image,\n ImageDraw as PIL_ImageDraw,\n ImageFont as PIL_ImageFont\n )\n\n assert img.dtype.name in [\"uint8\", \"float32\"], (\n \"Can currently draw text only on images of dtype 'uint8' or \"\n \"'float32'. Got dtype %s.\" % (img.dtype.name,))\n\n input_dtype = img.dtype\n if img.dtype == np.float32:\n img = img.astype(np.uint8)\n\n img = PIL_Image.fromarray(img)\n font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)\n context = PIL_ImageDraw.Draw(img)\n context.text((x, y), text, fill=tuple(color), font=font)\n img_np = np.asarray(img)\n\n # PIL/asarray returns read only array\n if not img_np.flags[\"WRITEABLE\"]:\n try:\n # this seems to no longer work with np 1.16 (or was pillow\n # updated?)\n img_np.setflags(write=True)\n except ValueError as ex:\n if \"cannot set WRITEABLE flag to True of this array\" in str(ex):\n img_np = np.copy(img_np)\n\n if img_np.dtype != input_dtype:\n img_np = img_np.astype(input_dtype)\n\n return img_np\n\n\n# TODO rename sizes to size?\ndef imresize_many_images(images, sizes=None, interpolation=None):\n \"\"\"Resize each image in a list or array to a specified size.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: no (1)\n * ``uint64``: no (2)\n * ``int8``: yes; tested (3)\n * ``int16``: yes; tested\n * ``int32``: limited; tested (4)\n * ``int64``: no (2)\n * ``float16``: yes; tested (5)\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: no (1)\n * ``bool``: yes; tested (6)\n\n - (1) rejected by ``cv2.imresize``\n - (2) results too inaccurate\n - (3) mapped internally to ``int16`` when interpolation!=\"nearest\"\n - (4) only supported for interpolation=\"nearest\", other interpolations\n lead to cv2 error\n - (5) mapped internally to ``float32``\n - (6) mapped internally to ``uint8``\n\n Parameters\n ----------\n images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray\n Array of the images to resize.\n Usually recommended to be of dtype ``uint8``.\n\n sizes : float or iterable of int or iterable of float\n The new size of the images, given either as a fraction (a single\n float) or as a ``(height, width)`` ``tuple`` of two integers or as a\n ``(height fraction, width fraction)`` ``tuple`` of two floats.\n\n interpolation : None or str or int, optional\n The interpolation to use during resize.\n If ``int``, then expected to be one of:\n\n * ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)\n * ``cv2.INTER_LINEAR`` (linear interpolation)\n * ``cv2.INTER_AREA`` (area interpolation)\n * ``cv2.INTER_CUBIC`` (cubic interpolation)\n\n If ``str``, then expected to be one of:\n\n * ``nearest`` (identical to ``cv2.INTER_NEAREST``)\n * ``linear`` (identical to ``cv2.INTER_LINEAR``)\n * ``area`` (identical to ``cv2.INTER_AREA``)\n * ``cubic`` (identical to ``cv2.INTER_CUBIC``)\n\n If ``None``, the interpolation will be chosen automatically. For size\n increases, ``area`` interpolation will be picked and for size\n decreases, ``linear`` interpolation will be picked.\n\n Returns\n -------\n (N,H',W',[C]) ndarray\n Array of the resized images.\n\n Examples\n --------\n >>> import imgaug as ia\n >>> images = np.zeros((2, 8, 16, 3), dtype=np.uint8)\n >>> images_resized = ia.imresize_many_images(images, 2.0)\n >>> images_resized.shape\n (2, 16, 32, 3)\n\n Convert two RGB images of height ``8`` and width ``16`` to images of\n height ``2*8=16`` and width ``2*16=32``.\n\n >>> images_resized = ia.imresize_many_images(images, (2.0, 4.0))\n >>> images_resized.shape\n (2, 16, 64, 3)\n\n Convert two RGB images of height ``8`` and width ``16`` to images of\n height ``2*8=16`` and width ``4*16=64``.\n\n >>> images_resized = ia.imresize_many_images(images, (16, 32))\n >>> images_resized.shape\n (2, 16, 32, 3)\n\n Converts two RGB images of height ``8`` and width ``16`` to images of\n height ``16`` and width ``32``.\n\n \"\"\"\n # pylint: disable=too-many-statements\n\n # we just do nothing if the input contains zero images\n # one could also argue that an exception would be appropriate here\n if len(images) == 0:\n return images\n\n # verify that sizes contains only values >0\n if is_single_number(sizes) and sizes <= 0:\n raise ValueError(\n \"If 'sizes' is given as a single number, it is expected to \"\n \"be >= 0, got %.8f.\" % (sizes,))\n\n # change after the validation to make the above error messages match the\n # original input\n if is_single_number(sizes):\n sizes = (sizes, sizes)\n else:\n assert len(sizes) == 2, (\n \"If 'sizes' is given as a tuple, it is expected be a tuple of two \"\n \"entries, got %d entries.\" % (len(sizes),))\n assert all([is_single_number(val) and val >= 0 for val in sizes]), (\n \"If 'sizes' is given as a tuple, it is expected be a tuple of two \"\n \"ints or two floats, each >= 0, got types %s with values %s.\" % (\n str([type(val) for val in sizes]), str(sizes)))\n\n # if input is a list, call this function N times for N images\n # but check beforehand if all images have the same shape, then just\n # convert to a single array and de-convert afterwards\n if isinstance(images, list):\n nb_shapes = len({image.shape for image in images})\n if nb_shapes == 1:\n return list(imresize_many_images(\n np.array(images), sizes=sizes, interpolation=interpolation))\n\n return [\n imresize_many_images(\n image[np.newaxis, ...],\n sizes=sizes,\n interpolation=interpolation)[0, ...]\n for image in images]\n\n shape = images.shape\n assert images.ndim in [3, 4], \"Expected array of shape (N, H, W, [C]), \" \\\n \"got shape %s\" % (str(shape),)\n nb_images = shape[0]\n height_image, width_image = shape[1], shape[2]\n nb_channels = shape[3] if images.ndim > 3 else None\n\n height_target, width_target = sizes[0], sizes[1]\n height_target = (int(np.round(height_image * height_target))\n if is_single_float(height_target)\n else height_target)\n width_target = (int(np.round(width_image * width_target))\n if is_single_float(width_target)\n else width_target)\n\n if height_target == height_image and width_target == width_image:\n return np.copy(images)\n\n # return empty array if input array contains zero-sized axes\n # note that None==0 is not True (for case nb_channels=None)\n if 0 in [height_target, width_target, nb_channels]:\n shape_out = tuple([shape[0], height_target, width_target]\n + list(shape[3:]))\n return np.zeros(shape_out, dtype=images.dtype)\n\n # place this after the (h==h' and w==w') check so that images with\n # zero-sized don't result in errors if the aren't actually resized\n # verify that all input images have height/width > 0\n has_zero_size_axes = any([axis == 0 for axis in images.shape[1:]])\n assert not has_zero_size_axes, (\n \"Cannot resize images, because at least one image has a height and/or \"\n \"width and/or number of channels of zero. \"\n \"Observed shapes were: %s.\" % (\n str([image.shape for image in images]),))\n\n inter = interpolation\n assert inter is None or inter in IMRESIZE_VALID_INTERPOLATIONS, (\n \"Expected 'interpolation' to be None or one of %s. Got %s.\" % (\n \", \".join(\n [str(valid_ip) for valid_ip in IMRESIZE_VALID_INTERPOLATIONS]\n ),\n str(inter)\n )\n )\n if inter is None:\n if height_target > height_image or width_target > width_image:\n inter = cv2.INTER_AREA\n else:\n inter = cv2.INTER_LINEAR\n elif inter in [\"nearest\", cv2.INTER_NEAREST]:\n inter = cv2.INTER_NEAREST\n elif inter in [\"linear\", cv2.INTER_LINEAR]:\n inter = cv2.INTER_LINEAR\n elif inter in [\"area\", cv2.INTER_AREA]:\n inter = cv2.INTER_AREA\n else: # if ip in [\"cubic\", cv2.INTER_CUBIC]:\n inter = cv2.INTER_CUBIC\n\n # TODO find more beautiful way to avoid circular imports\n from . import dtypes as iadt\n if inter == cv2.INTER_NEAREST:\n iadt.gate_dtypes(\n images,\n allowed=[\"bool\",\n \"uint8\", \"uint16\",\n \"int8\", \"int16\", \"int32\",\n \"float16\", \"float32\", \"float64\"],\n disallowed=[\"uint32\", \"uint64\", \"uint128\", \"uint256\",\n \"int64\", \"int128\", \"int256\",\n \"float96\", \"float128\", \"float256\"],\n augmenter=None)\n else:\n iadt.gate_dtypes(\n images,\n allowed=[\"bool\",\n \"uint8\", \"uint16\",\n \"int8\", \"int16\",\n \"float16\", \"float32\", \"float64\"],\n disallowed=[\"uint32\", \"uint64\", \"uint128\", \"uint256\",\n \"int32\", \"int64\", \"int128\", \"int256\",\n \"float96\", \"float128\", \"float256\"],\n augmenter=None)\n\n result_shape = (nb_images, height_target, width_target)\n if nb_channels is not None:\n result_shape = result_shape + (nb_channels,)\n result = np.zeros(result_shape, dtype=images.dtype)\n for i, image in enumerate(images):\n input_dtype = image.dtype\n input_dtype_name = input_dtype.name\n\n if input_dtype_name == \"bool\":\n image = image.astype(np.uint8) * 255\n elif input_dtype_name == \"int8\" and inter != cv2.INTER_NEAREST:\n image = image.astype(np.int16)\n elif input_dtype_name == \"float16\":\n image = image.astype(np.float32)\n\n if nb_channels is not None and nb_channels > 512:\n channels = [\n cv2.resize(image[..., c], (width_target, height_target),\n interpolation=inter) for c in sm.xrange(nb_channels)]\n result_img = np.stack(channels, axis=-1)\n else:\n result_img = cv2.resize(\n image, (width_target, height_target), interpolation=inter)\n\n assert result_img.dtype.name == image.dtype.name, (\n \"Expected cv2.resize() to keep the input dtype '%s', but got \"\n \"'%s'. This is an internal error. Please report.\" % (\n image.dtype.name, result_img.dtype.name\n )\n )\n\n # cv2 removes the channel axis if input was (H, W, 1)\n # we re-add it (but only if input was not (H, W))\n if (len(result_img.shape) == 2 and nb_channels is not None\n and nb_channels == 1):\n result_img = result_img[:, :, np.newaxis]\n\n if input_dtype_name == \"bool\":\n result_img = result_img > 127\n elif input_dtype_name == \"int8\" and inter != cv2.INTER_NEAREST:\n # TODO somehow better avoid circular imports here\n from . import dtypes as iadt\n result_img = iadt.restore_dtypes_(result_img, np.int8)\n elif input_dtype_name == \"float16\":\n # TODO see above\n from . import dtypes as iadt\n result_img = iadt.restore_dtypes_(result_img, np.float16)\n result[i] = result_img\n return result\n\n\ndef _assert_two_or_three_dims(shape):\n if hasattr(shape, \"shape\"):\n shape = shape.shape\n assert len(shape) in [2, 3], (\n \"Expected image with two or three dimensions, but got %d dimensions \"\n \"and shape %s.\" % (len(shape), shape))\n\n\ndef imresize_single_image(image, sizes, interpolation=None):\n \"\"\"Resize a single image.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n image : (H,W,C) ndarray or (H,W) ndarray\n Array of the image to resize.\n Usually recommended to be of dtype ``uint8``.\n\n sizes : float or iterable of int or iterable of float\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n interpolation : None or str or int, optional\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Returns\n -------\n (H',W',C) ndarray or (H',W') ndarray\n The resized image.\n\n \"\"\"\n _assert_two_or_three_dims(image)\n\n grayscale = False\n if image.ndim == 2:\n grayscale = True\n image = image[:, :, np.newaxis]\n\n rs = imresize_many_images(\n image[np.newaxis, :, :, :], sizes, interpolation=interpolation)\n if grayscale:\n return rs[0, :, :, 0]\n return rs[0, ...]\n\n\ndef pool(arr, block_size, func, pad_mode=\"constant\", pad_cval=0,\n preserve_dtype=True, cval=None):\n \"\"\"Resize an array by pooling values within blocks.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested (2)\n * ``uint64``: no (1)\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested (2)\n * ``int64``: no (1)\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested (2)\n * ``bool``: yes; tested\n\n - (1) results too inaccurate (at least when using np.average as func)\n - (2) Note that scikit-image documentation says that the wrapped\n pooling function converts inputs to ``float64``. Actual tests\n showed no indication of that happening (at least when using\n preserve_dtype=True).\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool. Ideally of datatype ``float64``.\n\n block_size : int or tuple of int\n Spatial size of each group of values to pool, aka kernel size.\n\n * If a single ``int``, then a symmetric block of that size along\n height and width will be used.\n * If a ``tuple`` of two values, it is assumed to be the block size\n along height and width of the image-like, with pooling happening\n per channel.\n * If a ``tuple`` of three values, it is assumed to be the block size\n along height, width and channels.\n\n func : callable\n Function to apply to a given block in order to convert it to a single\n number, e.g. :func:`numpy.average`, :func:`numpy.min`,\n :func:`numpy.max`.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder. See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details.\n\n preserve_dtype : bool, optional\n Whether to convert the array back to the input datatype if it is\n changed away from that in the pooling process.\n\n cval : None or number, optional\n Deprecated. Old name for `pad_cval`.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after pooling.\n\n \"\"\"\n # TODO find better way to avoid circular import\n from . import dtypes as iadt\n from .augmenters import size as iasize\n\n if arr.size == 0:\n return np.copy(arr)\n\n iadt.gate_dtypes(arr,\n allowed=[\"bool\",\n \"uint8\", \"uint16\", \"uint32\",\n \"int8\", \"int16\", \"int32\",\n \"float16\", \"float32\", \"float64\", \"float128\"],\n disallowed=[\"uint64\", \"uint128\", \"uint256\",\n \"int64\", \"int128\", \"int256\",\n \"float256\"],\n augmenter=None)\n\n if cval is not None:\n warn_deprecated(\"`cval` is a deprecated argument in pool(). \"\n \"Use `pad_cval` instead.\")\n pad_cval = cval\n\n _assert_two_or_three_dims(arr)\n\n is_valid_int = is_single_integer(block_size) and block_size >= 1\n is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \\\n and [is_single_integer(val) and val >= 1 for val in block_size]\n assert is_valid_int or is_valid_tuple, (\n \"Expected argument 'block_size' to be a single integer >0 or \"\n \"a tuple of 2 or 3 values with each one being >0. Got %s.\" % (\n str(block_size)))\n\n if is_single_integer(block_size):\n block_size = [block_size, block_size]\n if len(block_size) < arr.ndim:\n block_size = list(block_size) + [1]\n\n # We use custom padding here instead of the one from block_reduce(),\n # because (1) it is expected to be faster and (2) it allows us more\n # flexibility wrt to padding modes.\n arr = iasize.pad_to_multiples_of(\n arr,\n height_multiple=block_size[0],\n width_multiple=block_size[1],\n mode=pad_mode,\n cval=pad_cval\n )\n\n input_dtype = arr.dtype\n\n arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func,\n cval=cval)\n if preserve_dtype and arr_reduced.dtype.name != input_dtype.name:\n arr_reduced = arr_reduced.astype(input_dtype)\n return arr_reduced\n\n\n# TODO does OpenCV have a faster avg pooling method?\ndef avg_pool(arr, block_size, pad_mode=\"reflect\", pad_cval=128,\n preserve_dtype=True, cval=None):\n \"\"\"Resize an array using average pooling.\n\n Defaults to ``pad_mode=\"reflect\"`` to ensure that padded values do not\n affect the average.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n cval : None or number, optional\n Deprecated. Old name for `pad_cval`.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after average pooling.\n\n \"\"\"\n return pool(arr, block_size, np.average, pad_mode=pad_mode,\n pad_cval=pad_cval, preserve_dtype=preserve_dtype, cval=cval)\n\n\ndef max_pool(arr, block_size, pad_mode=\"edge\", pad_cval=0,\n preserve_dtype=True, cval=None):\n \"\"\"Resize an array using max-pooling.\n\n Defaults to ``pad_mode=\"edge\"`` to ensure that padded values do not affect\n the maximum, even if the dtype was something else than ``uint8``.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n cval : None or number, optional\n Deprecated. Old name for `pad_cval`.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after max-pooling.\n\n \"\"\"\n return pool(arr, block_size, np.max, pad_mode=pad_mode,\n pad_cval=pad_cval, preserve_dtype=preserve_dtype, cval=cval)\n\n\ndef min_pool(arr, block_size, pad_mode=\"edge\", pad_cval=255,\n preserve_dtype=True):\n \"\"\"Resize an array using min-pooling.\n\n Defaults to ``pad_mode=\"edge\"`` to ensure that padded values do not affect\n the minimum, even if the dtype was something else than ``uint8``.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after min-pooling.\n\n \"\"\"\n return pool(arr, block_size, np.min, pad_mode=pad_mode, pad_cval=pad_cval,\n preserve_dtype=preserve_dtype)\n\n\ndef median_pool(arr, block_size, pad_mode=\"reflect\", pad_cval=128,\n preserve_dtype=True):\n \"\"\"Resize an array using median-pooling.\n\n Defaults to ``pad_mode=\"reflect\"`` to ensure that padded values do not\n affect the average.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after min-pooling.\n\n \"\"\"\n return pool(arr, block_size, np.median, pad_mode=pad_mode,\n pad_cval=pad_cval, preserve_dtype=preserve_dtype)\n\n\ndef draw_grid(images, rows=None, cols=None):\n \"\"\"Combine multiple images into a single grid-like image.\n\n Calling this function with four images of the same shape and ``rows=2``,\n ``cols=2`` will combine the four images to a single image array of shape\n ``(2*H, 2*W, C)``, where ``H`` is the height of any of the images\n (analogous ``W``) and ``C`` is the number of channels of any image.\n\n Calling this function with four images of the same shape and ``rows=4``,\n ``cols=1`` is analogous to calling :func:`numpy.vstack` on the images.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; fully tested\n * ``uint32``: yes; fully tested\n * ``uint64``: yes; fully tested\n * ``int8``: yes; fully tested\n * ``int16``: yes; fully tested\n * ``int32``: yes; fully tested\n * ``int64``: yes; fully tested\n * ``float16``: yes; fully tested\n * ``float32``: yes; fully tested\n * ``float64``: yes; fully tested\n * ``float128``: yes; fully tested\n * ``bool``: yes; fully tested\n\n Parameters\n ----------\n images : (N,H,W,3) ndarray or iterable of (H,W,3) array\n The input images to convert to a grid.\n\n rows : None or int, optional\n The number of rows to show in the grid.\n If ``None``, it will be automatically derived.\n\n cols : None or int, optional\n The number of cols to show in the grid.\n If ``None``, it will be automatically derived.\n\n Returns\n -------\n (H',W',3) ndarray\n Image of the generated grid.\n\n \"\"\"\n nb_images = len(images)\n assert nb_images > 0, \"Expected to get at least one image, got none.\"\n\n if is_np_array(images):\n assert images.ndim == 4, (\n \"Expected to get an array of four dimensions denoting \"\n \"(N, H, W, C), got %d dimensions and shape %s.\" % (\n images.ndim, images.shape))\n else:\n assert is_iterable(images), (\n \"Expected to get an iterable of ndarrays, \"\n \"got %s.\" % (type(images),))\n assert all([is_np_array(image) for image in images]), (\n \"Expected to get an iterable of ndarrays, \"\n \"got types %s.\" % (\n \", \".join([str(type(image)) for image in images],)))\n assert all([image.ndim == 3 for image in images]), (\n \"Expected to get images with three dimensions. Got shapes %s.\" % (\n \", \".join([str(image.shape) for image in images])))\n assert len({image.dtype.name for image in images}) == 1, (\n \"Expected to get images with the same dtypes, got dtypes %s.\" % (\n \", \".join([image.dtype.name for image in images])))\n assert len({image.shape[-1] for image in images}) == 1, (\n \"Expected to get images with the same number of channels, \"\n \"got shapes %s.\" % (\n \", \".join([str(image.shape) for image in images])))\n\n cell_height = max([image.shape[0] for image in images])\n cell_width = max([image.shape[1] for image in images])\n nb_channels = images[0].shape[2]\n\n if rows is None and cols is None:\n rows = cols = int(math.ceil(math.sqrt(nb_images)))\n elif rows is not None:\n cols = int(math.ceil(nb_images / rows))\n elif cols is not None:\n rows = int(math.ceil(nb_images / cols))\n assert rows * cols >= nb_images, (\n \"Expected rows*cols to lead to at least as many cells as there were \"\n \"images provided, but got %d rows, %d cols (=%d cells) for %d \"\n \"images. \" % (rows, cols, rows*cols, nb_images))\n\n width = cell_width * cols\n height = cell_height * rows\n dtype = images.dtype if is_np_array(images) else images[0].dtype\n grid = np.zeros((height, width, nb_channels), dtype=dtype)\n cell_idx = 0\n for row_idx in sm.xrange(rows):\n for col_idx in sm.xrange(cols):\n if cell_idx < nb_images:\n image = images[cell_idx]\n cell_y1 = cell_height * row_idx\n cell_y2 = cell_y1 + image.shape[0]\n cell_x1 = cell_width * col_idx\n cell_x2 = cell_x1 + image.shape[1]\n grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image\n cell_idx += 1\n\n return grid\n\n\ndef show_grid(images, rows=None, cols=None):\n \"\"\"Combine multiple images into a single image and plot the result.\n\n This will show a window of the results of :func:`~imgaug.imgaug.draw_grid`.\n\n dtype support::\n\n minimum of (\n :func:`~imgaug.imgaug.draw_grid`,\n :func:`~imgaug.imgaug.imshow`\n )\n\n Parameters\n ----------\n images : (N,H,W,3) ndarray or iterable of (H,W,3) array\n See :func:`~imgaug.imgaug.draw_grid`.\n\n rows : None or int, optional\n See :func:`~imgaug.imgaug.draw_grid`.\n\n cols : None or int, optional\n See :func:`~imgaug.imgaug.draw_grid`.\n\n \"\"\"\n grid = draw_grid(images, rows=rows, cols=cols)\n imshow(grid)\n\n\ndef imshow(image, backend=IMSHOW_BACKEND_DEFAULT):\n \"\"\"Show an image in a window.\n\n dtype support::\n\n * ``uint8``: yes; not tested\n * ``uint16``: ?\n * ``uint32``: ?\n * ``uint64``: ?\n * ``int8``: ?\n * ``int16``: ?\n * ``int32``: ?\n * ``int64``: ?\n * ``float16``: ?\n * ``float32``: ?\n * ``float64``: ?\n * ``float128``: ?\n * ``bool``: ?\n\n Parameters\n ----------\n image : (H,W,3) ndarray\n Image to show.\n\n backend : {'matplotlib', 'cv2'}, optional\n Library to use to show the image. May be either matplotlib or\n OpenCV ('cv2'). OpenCV tends to be faster, but apparently causes more\n technical issues.\n\n \"\"\"\n assert backend in [\"matplotlib\", \"cv2\"], (\n \"Expected backend 'matplotlib' or 'cv2', got %s.\" % (backend,))\n\n if backend == \"cv2\":\n image_bgr = image\n if image.ndim == 3 and image.shape[2] in [3, 4]:\n image_bgr = image[..., 0:3][..., ::-1]\n\n win_name = \"imgaug-default-window\"\n cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)\n cv2.imshow(win_name, image_bgr)\n cv2.waitKey(0)\n cv2.destroyWindow(win_name)\n else:\n # import only when necessary (faster startup; optional dependency;\n # less fragile -- see issue #225)\n import matplotlib.pyplot as plt\n\n dpi = 96\n h, w = image.shape[0] / dpi, image.shape[1] / dpi\n # if the figure is too narrow, the footer may appear and make the fig\n # suddenly wider (ugly)\n w = max(w, 6)\n\n fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)\n fig.canvas.set_window_title(\"imgaug.imshow(%s)\" % (image.shape,))\n # cmap=gray is automatically only activate for grayscale images\n ax.imshow(image, cmap=\"gray\")\n plt.show()\n\n\ndef do_assert(condition, message=\"Assertion failed.\"):\n \"\"\"Assert that a ``condition`` holds or raise an ``Exception`` otherwise.\n\n This was added because `assert` statements are removed in optimized code.\n It replaced `assert` statements throughout the library, but that was\n reverted again for readability and performance reasons.\n\n Parameters\n ----------\n condition : bool\n If ``False``, an exception is raised.\n\n message : str, optional\n Error message.\n\n \"\"\"\n if not condition:\n raise AssertionError(str(message))\n\n\ndef _normalize_cv2_input_arr_(arr):\n flags = arr.flags\n if not flags[\"OWNDATA\"]:\n arr = np.copy(arr)\n flags = arr.flags\n if not flags[\"C_CONTIGUOUS\"]:\n arr = np.ascontiguousarray(arr)\n return arr\n\n\ndef apply_lut(image, table):\n \"\"\"Map an input image to a new one using a lookup table.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.apply_lut_`.\n\n Parameters\n ----------\n image : ndarray\n See :func:`~imgaug.imgaug.apply_lut_`.\n\n table : ndarray or list of ndarray\n See :func:`~imgaug.imgaug.apply_lut_`.\n\n Returns\n -------\n ndarray\n Image after mapping via lookup table.\n\n \"\"\"\n return apply_lut_(np.copy(image), table)\n\n\n# TODO make this function compatible with short max sized images, probably\n# isn't right now\ndef apply_lut_(image, table):\n \"\"\"Map an input image in-place to a new one using a lookup table.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: no\n * ``float32``: no\n * ``float64``: no\n * ``float128``: no\n * ``bool``: no\n\n Parameters\n ----------\n image : ndarray\n Image of dtype ``uint8`` and shape ``(H,W)`` or ``(H,W,C)``.\n\n table : ndarray or list of ndarray\n Table of dtype ``uint8`` containing the mapping from old to new\n values. Either a ``list`` of ``C`` ``(256,)`` arrays or a single\n array of shape ``(256,)`` or ``(256, C)`` or ``(1, 256, C)``.\n In case of ``(256,)`` the same table is used for all channels,\n otherwise a channelwise table is used and ``C`` is expected to match\n the number of channels.\n\n Returns\n -------\n ndarray\n Image after mapping via lookup table.\n This *might* be the same array instance as provided via `image`.\n\n \"\"\"\n\n image_shape_orig = image.shape\n nb_channels = 1 if len(image_shape_orig) == 2 else image_shape_orig[-1]\n\n if 0 in image_shape_orig:\n return image\n\n image = _normalize_cv2_input_arr_(image)\n\n # [(256,), (256,), ...] => (256, C)\n if isinstance(table, list):\n assert len(table) == nb_channels, (\n \"Expected to get %d tables (one per channel), got %d instead.\" % (\n nb_channels, len(table)))\n table = np.stack(table, axis=-1)\n\n # (256, C) => (1, 256, C)\n if table.shape == (256, nb_channels):\n table = table[np.newaxis, :, :]\n\n assert table.shape == (256,) or table.shape == (1, 256, nb_channels), (\n \"Expected 'table' to be any of the following: \"\n \"A list of C (256,) arrays, an array of shape (256,), an array of \"\n \"shape (256, C), an array of shape (1, 256, C). Transformed 'table' \"\n \"up to shape %s for image with shape %s (C=%d).\" % (\n table.shape, image_shape_orig, nb_channels))\n\n if nb_channels > 512:\n if table.shape == (256,):\n table = np.tile(table[np.newaxis, :, np.newaxis],\n (1, 1, nb_channels))\n\n subluts = []\n for group_idx in np.arange(int(np.ceil(nb_channels / 512))):\n c_start = group_idx * 512\n c_end = c_start + 512\n subluts.append(apply_lut_(image[:, :, c_start:c_end],\n table[:, :, c_start:c_end]))\n\n return np.concatenate(subluts, axis=2)\n\n assert image.dtype.name == \"uint8\", (\n \"Expected uint8 image, got dtype %s.\" % (image.dtype.name,))\n assert table.dtype.name == \"uint8\", (\n \"Expected uint8 table, got dtype %s.\" % (table.dtype.name,))\n\n image = cv2.LUT(image, table, dst=image)\n return image\n\n\nclass HooksImages(object):\n \"\"\"Class to intervene with image augmentation runs.\n\n This is e.g. useful to dynamically deactivate some augmenters.\n\n Parameters\n ----------\n activator : None or callable, optional\n A function that gives permission to execute an augmenter.\n The expected interface is::\n\n ``f(images, augmenter, parents, default)``\n\n where ``images`` are the input images to augment, ``augmenter`` is the\n instance of the augmenter to execute, ``parents`` are previously\n executed augmenters and ``default`` is an expected default value to be\n returned if the activator function does not plan to make a decision\n for the given inputs.\n\n propagator : None or callable, optional\n A function that gives permission to propagate the augmentation further\n to the children of an augmenter. This happens after the activator.\n In theory, an augmenter may augment images itself (if allowed by the\n activator) and then execute child augmenters afterwards (if allowed by\n the propagator). If the activator returned ``False``, the propagation\n step will never be executed.\n The expected interface is::\n\n ``f(images, augmenter, parents, default)``\n\n with all arguments having identical meaning to the activator.\n\n preprocessor : None or callable, optional\n A function to call before an augmenter performed any augmentations.\n The interface is:\n\n ``f(images, augmenter, parents)``\n\n with all arguments having identical meaning to the activator.\n It is expected to return the input images, optionally modified.\n\n postprocessor : None or callable, optional\n A function to call after an augmenter performed augmentations.\n The interface is the same as for the `preprocessor`.\n\n Examples\n --------\n >>> import numpy as np\n >>> import imgaug as ia\n >>> import imgaug.augmenters as iaa\n >>> seq = iaa.Sequential([\n >>> iaa.GaussianBlur(3.0, name=\"blur\"),\n >>> iaa.Dropout(0.05, name=\"dropout\"),\n >>> iaa.Affine(translate_px=-5, name=\"affine\")\n >>> ])\n >>> images = [np.zeros((10, 10), dtype=np.uint8)]\n >>>\n >>> def activator(images, augmenter, parents, default):\n >>> return False if augmenter.name in [\"blur\", \"dropout\"] else default\n >>>\n >>> seq_det = seq.to_deterministic()\n >>> images_aug = seq_det.augment_images(images)\n >>> heatmaps = [np.random.rand(*(3, 10, 10))]\n >>> heatmaps_aug = seq_det.augment_images(\n >>> heatmaps,\n >>> hooks=ia.HooksImages(activator=activator)\n >>> )\n\n This augments images and their respective heatmaps in the same way.\n The heatmaps however are only modified by ``Affine``, not by\n ``GaussianBlur`` or ``Dropout``.\n\n \"\"\"\n\n def __init__(self, activator=None, propagator=None, preprocessor=None,\n postprocessor=None):\n self.activator = activator\n self.propagator = propagator\n self.preprocessor = preprocessor\n self.postprocessor = postprocessor\n\n def is_activated(self, images, augmenter, parents, default):\n \"\"\"Estimate whether an augmenter may be executed.\n\n This also affects propagation of data to child augmenters.\n\n Returns\n -------\n bool\n If ``True``, the augmenter may be executed.\n Otherwise ``False``.\n\n \"\"\"\n if self.activator is None:\n return default\n return self.activator(images, augmenter, parents, default)\n\n def is_propagating(self, images, augmenter, parents, default):\n \"\"\"Estimate whether an augmenter may call its children.\n\n This function decides whether an augmenter with children is allowed\n to call these in order to further augment the inputs.\n Note that if the augmenter itself performs augmentations (before/after\n calling its children), these may still be executed, even if this\n method returns ``False``.\n\n Returns\n -------\n bool\n If ``True``, the augmenter may propagate data to its children.\n Otherwise ``False``.\n\n \"\"\"\n if self.propagator is None:\n return default\n return self.propagator(images, augmenter, parents, default)\n\n def preprocess(self, images, augmenter, parents):\n \"\"\"Preprocess input data per augmenter before augmentation.\n\n Returns\n -------\n (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray\n The input images, optionally modified.\n\n \"\"\"\n if self.preprocessor is None:\n return images\n return self.preprocessor(images, augmenter, parents)\n\n def postprocess(self, images, augmenter, parents):\n \"\"\"Postprocess input data per augmenter after augmentation.\n\n Returns\n -------\n (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray\n The input images, optionally modified.\n\n \"\"\"\n if self.postprocessor is None:\n return images\n return self.postprocessor(images, augmenter, parents)\n\n\nclass HooksHeatmaps(HooksImages):\n \"\"\"Class to intervene with heatmap augmentation runs.\n\n This is e.g. useful to dynamically deactivate some augmenters.\n\n This class is currently the same as the one for images. This may or may\n not change in the future.\n\n \"\"\"\n\n\nclass HooksKeypoints(HooksImages):\n \"\"\"Class to intervene with keypoint augmentation runs.\n\n This is e.g. useful to dynamically deactivate some augmenters.\n\n This class is currently the same as the one for images. This may or may\n not change in the future.\n\n \"\"\"\n\n\n#####################################################################\n# Create classes/functions that were moved to other files and create\n# DeprecatedWarnings when they are called.\n#####################################################################\n\ndef _mark_moved_class_or_function(class_name_old, module_name_new,\n class_name_new):\n # pylint: disable=redefined-outer-name\n class_name_new = (class_name_new\n if class_name_new is not None\n else class_name_old)\n\n def _func(*args, **kwargs):\n import importlib\n warn_deprecated(\n \"Using imgaug.imgaug.%s is deprecated. Use %s.%s instead.\" % (\n class_name_old, module_name_new, class_name_new\n ))\n module = importlib.import_module(module_name_new)\n return getattr(module, class_name_new)(*args, **kwargs)\n\n return _func\n\n\nMOVED = [\n (\"Keypoint\", \"imgaug.augmentables.kps\", None),\n (\"KeypointsOnImage\", \"imgaug.augmentables.kps\", None),\n (\"BoundingBox\", \"imgaug.augmentables.bbs\", None),\n (\"BoundingBoxesOnImage\", \"imgaug.augmentables.bbs\", None),\n (\"Polygon\", \"imgaug.augmentables.polys\", None),\n (\"PolygonsOnImage\", \"imgaug.augmentables.polys\", None),\n (\"MultiPolygon\", \"imgaug.augmentables.polys\", None),\n (\"_ConcavePolygonRecoverer\", \"imgaug.augmentables.polys\", None),\n (\"HeatmapsOnImage\", \"imgaug.augmentables.heatmaps\", None),\n (\"SegmentationMapsOnImage\", \"imgaug.augmentables.segmaps\", None),\n (\"Batch\", \"imgaug.augmentables.batches\", None),\n (\"BatchLoader\", \"imgaug.multicore\", None),\n (\"BackgroundAugmenter\", \"imgaug.multicore\", None),\n (\"compute_geometric_median\", \"imgaug.augmentables.kps\", None),\n (\"_convert_points_to_shapely_line_string\", \"imgaug.augmentables.polys\",\n None),\n (\"_interpolate_point_pair\", \"imgaug.augmentables.polys\", None),\n (\"_interpolate_points\", \"imgaug.augmentables.polys\", None),\n (\"_interpolate_points_by_max_distance\", \"imgaug.augmentables.polys\", None),\n (\"pad\", \"imgaug.augmenters.size\", None),\n (\"pad_to_aspect_ratio\", \"imgaug.augmenters.size\", None),\n (\"pad_to_multiples_of\", \"imgaug.augmenters.size\", None),\n (\"compute_paddings_for_aspect_ratio\", \"imgaug.augmenters.size\",\n \"compute_paddings_to_reach_aspect_ratio\"),\n (\"compute_paddings_to_reach_multiples_of\", \"imgaug.augmenters.size\", None),\n (\"compute_paddings_to_reach_exponents_of\", \"imgaug.augmenters.size\", None)\n]\n\nfor class_name_old, module_name_new, class_name_new in MOVED:\n locals()[class_name_old] = _mark_moved_class_or_function(\n class_name_old, module_name_new, class_name_new)\n"
] | [
[
"numpy.log",
"numpy.pad",
"numpy.clip",
"numpy.arange",
"numpy.stack",
"numpy.float128",
"numpy.round",
"numpy.ceil",
"numpy.copy",
"numpy.full",
"numpy.concatenate",
"numpy.atleast_3d",
"numpy.floor",
"numpy.array"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.ascontiguousarray",
"numpy.linalg.norm",
"matplotlib.pyplot.subplots",
"numpy.stack",
"numpy.concatenate",
"numpy.round",
"numpy.copy",
"numpy.tile",
"numpy.ceil",
"numpy.isscalar",
"numpy.float32",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cuicaihao/Data_Science_Python | [
"ca4cb64bf9afc1011c192586362d0dd036e9441e"
] | [
"10.Algorithms_Data_Structure/Searching_n_Sorting/QuickSort.py"
] | [
"import numpy as np\n\n\ndef partition(arr, low, high):\n i = (low-1) # index of smaller element\n pivot = arr[high] # pivot\n\n for j in range(low, high):\n\n # If current element is smaller than the pivot\n if arr[j] < pivot:\n\n # increment index of smaller element\n i = i+1\n arr[i], arr[j] = arr[j], arr[i]\n\n arr[i+1], arr[high] = arr[high], arr[i+1]\n return (i + 1)\n\n\ndef quickSort(arr, low, high):\n if low < high:\n\n # pi is partitioning index, arr[p] is now\n # at right place\n pi = partition(arr, low, high)\n\n # Separately sort elements before\n # partition and after partition\n quickSort(arr, low, pi-1)\n quickSort(arr, pi + 1, high)\n\n # Driver code to test above\n# arr = [10, 7, 8, 9, 1, 5]\narr = np.random.randint(0, 1000000, 200000)\nn = len(arr)\nquickSort(arr, 0, n-1)\n# print(f\"Sorted array is: {arr}\")\n"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mvdoc/himalaya | [
"7e3866287b835e2cc0a5c9848331e19c14896309"
] | [
"himalaya/kernel_ridge/tests/test_random_search_kernel.py"
] | [
"import pytest\n\nimport numpy as np\nimport sklearn.linear_model\nimport sklearn.model_selection\nimport scipy.linalg\n\nfrom himalaya.backend import set_backend\nfrom himalaya.backend import ALL_BACKENDS\nfrom himalaya.utils import assert_array_almost_equal\nfrom himalaya.scoring import r2_score\n\nfrom himalaya.kernel_ridge import solve_multiple_kernel_ridge_random_search\n\n\ndef _create_dataset(backend, n_targets=4):\n n_featuress = (100, 200)\n n_samples = 80\n n_gammas = 3\n\n Xs = [\n backend.asarray(backend.randn(n_samples, n_features), backend.float64)\n for n_features in n_featuress\n ]\n Ks = backend.stack([X @ X.T for X in Xs])\n\n ws = [\n backend.asarray(backend.randn(n_features, n_targets), backend.float64)\n for n_features in n_featuress\n ]\n Ys = backend.stack([X @ w for X, w in zip(Xs, ws)])\n Y = Ys.sum(0)\n\n gammas = backend.asarray(backend.rand(n_gammas, Ks.shape[0]),\n backend.float64)\n gammas /= gammas.sum(1)[:, None]\n\n return Ks, Y, gammas, Xs\n\n\[email protected]('local_alpha', [True, False])\[email protected]('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_local_alphah(\n backend, local_alpha):\n _test_solve_multiple_kernel_ridge_random_search(backend=backend,\n local_alpha=local_alpha)\n\n\[email protected]('n_targets_batch', [None, 3])\[email protected]('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_n_targets_batch(\n backend, n_targets_batch):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, n_targets_batch=n_targets_batch)\n\n\[email protected]('n_alphas_batch', [None, 2])\[email protected]('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_n_alphas_batch(\n backend, n_alphas_batch):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, n_alphas_batch=n_alphas_batch)\n\n\[email protected]('return_weights', ['primal', 'dual'])\[email protected]('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_return_weights(\n backend, return_weights):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, return_weights=return_weights)\n\n\[email protected]('diagonalize_method', ['eigh', 'svd'])\[email protected]('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_diagonalize_method(\n backend, diagonalize_method):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, diagonalize_method=diagonalize_method)\n\n\ndef _test_solve_multiple_kernel_ridge_random_search(\n backend, n_targets_batch=None, n_alphas_batch=None,\n return_weights=\"dual\", diagonalize_method=\"eigh\", local_alpha=True):\n backend = set_backend(backend)\n\n Ks, Y, gammas, Xs = _create_dataset(backend)\n alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)\n n_targets = Y.shape[1]\n cv = sklearn.model_selection.check_cv(10)\n\n ############\n # run solver\n results = solve_multiple_kernel_ridge_random_search(\n Ks, Y, n_iter=gammas, alphas=alphas, score_func=r2_score, cv=cv,\n n_targets_batch=n_targets_batch, Xs=Xs, progress_bar=False,\n return_weights=return_weights, n_alphas_batch=n_alphas_batch,\n diagonalize_method=diagonalize_method, local_alpha=local_alpha)\n best_deltas, refit_weights, cv_scores = results\n\n #########################################\n # compare with sklearn.linear_model.Ridge\n if local_alpha: # only compare when each target optimizes alpha\n test_scores = []\n for gamma in backend.sqrt(gammas):\n X = backend.concatenate([x * g for x, g in zip(Xs, gamma)], 1)\n for train, test in cv.split(X):\n for alpha in alphas:\n model = sklearn.linear_model.Ridge(\n alpha=backend.to_numpy(alpha), fit_intercept=False)\n model = model.fit(backend.to_numpy(X[train]),\n backend.to_numpy(Y[train]))\n predictions = backend.asarray_like(\n model.predict(backend.to_numpy(X[test])), Y)\n test_scores.append(r2_score(Y[test], predictions))\n\n test_scores = backend.stack(test_scores)\n test_scores = test_scores.reshape(len(gammas), cv.get_n_splits(),\n len(alphas), n_targets)\n test_scores_mean = backend.max(test_scores.mean(1), 1)\n assert_array_almost_equal(cv_scores, test_scores_mean, decimal=5)\n\n ######################\n # test refited_weights\n for tt in range(n_targets):\n gamma = backend.exp(best_deltas[:, tt])\n alpha = 1.0\n\n if return_weights == 'primal':\n # compare primal weights with sklearn.linear_model.Ridge\n X = backend.concatenate(\n [X * backend.sqrt(g) for X, g in zip(Xs, gamma)], 1)\n model = sklearn.linear_model.Ridge(fit_intercept=False,\n alpha=backend.to_numpy(alpha))\n w1 = model.fit(backend.to_numpy(X),\n backend.to_numpy(Y[:, tt])).coef_\n w1 = np.split(w1, np.cumsum([X.shape[1] for X in Xs][:-1]), axis=0)\n w1 = [backend.asarray(w) for w in w1]\n w1_scaled = backend.concatenate(\n [w * backend.sqrt(g) for w, g, in zip(w1, gamma)])\n assert_array_almost_equal(w1_scaled, refit_weights[:, tt],\n decimal=5)\n\n elif return_weights == 'dual':\n # compare dual weights with scipy.linalg.solve\n Ks_64 = backend.asarray(Ks, dtype=backend.float64)\n gamma_64 = backend.asarray(gamma, dtype=backend.float64)\n K = backend.matmul(Ks_64.T, gamma_64).T\n reg = backend.asarray_like(np.eye(K.shape[0]), K) * alpha\n Y_64 = backend.asarray(Y, dtype=backend.float64)\n c1 = scipy.linalg.solve(backend.to_numpy(K + reg),\n backend.to_numpy(Y_64[:, tt]))\n c1 = backend.asarray_like(c1, K)\n assert_array_almost_equal(c1, refit_weights[:, tt], decimal=5)\n\n\[email protected]('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_single_alpha_numpy(backend):\n backend = set_backend(backend)\n # just a smoke test, so make it minimal\n Ks, Y, gammas, Xs = _create_dataset(backend)\n alphas = 1.0\n # make Y a numpy array\n Y = backend.to_numpy(Y)\n results = solve_multiple_kernel_ridge_random_search(\n Ks, Y, n_iter=gammas, alphas=alphas\n )\n\n\[email protected]('backend', ALL_BACKENDS)\[email protected]('n_kernels', [1, 2])\ndef test_solve_multiple_kernel_ridge_random_search_global_alpha(backend, n_kernels):\n backend = set_backend(backend)\n # add more targets to make sure we get some variability\n Ks, Y, gammas, Xs = _create_dataset(backend, n_targets=20)\n alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)\n cv = sklearn.model_selection.check_cv(5)\n\n deltas, *_, best_alphas = solve_multiple_kernel_ridge_random_search(\n Ks[:n_kernels],\n Y,\n n_iter=50,\n progress_bar=False,\n alphas=alphas,\n cv=cv,\n local_alpha=False,\n return_alphas=True\n )\n # test that we return a single combination of deltas\n deltas = backend.to_numpy(deltas)\n if deltas.ndim == 1:\n assert np.allclose(deltas[0], deltas)\n else:\n for dd in deltas:\n assert np.allclose(dd[0], dd)\n\n # test that we return a single alpha\n best_alphas = backend.to_numpy(best_alphas)\n assert np.allclose(best_alphas[0], best_alphas)"
] | [
[
"numpy.cumsum",
"numpy.eye",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andriyor/moviepy | [
"8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a"
] | [
"moviepy/video/tools/drawing.py"
] | [
"\"\"\"Deals with making images (np arrays). It provides drawing\nmethods that are difficult to do with the existing Python libraries.\n\"\"\"\n\nimport numpy as np\n\n\ndef blit(im1, im2, pos=None, mask=None):\n \"\"\"Blit an image over another.\n\n Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the\n ``mask`` if provided.\n \"\"\"\n if pos is None:\n pos = (0, 0) # pragma: no cover\n else:\n # Cast to tuple in case pos is not subscriptable.\n pos = tuple(pos)\n im2.paste(im1, pos, mask)\n return im2\n\n\ndef color_gradient(\n size,\n p1,\n p2=None,\n vector=None,\n radius=None,\n color_1=0.0,\n color_2=1.0,\n shape=\"linear\",\n offset=0,\n):\n \"\"\"Draw a linear, bilinear, or radial gradient.\n\n The result is a picture of size ``size``, whose color varies\n gradually from color `color_1` in position ``p1`` to color ``color_2``\n in position ``p2``.\n\n If it is a RGB picture the result must be transformed into\n a 'uint8' array to be displayed normally:\n\n Parameters\n ----------\n\n size : tuple or list\n Size (width, height) in pixels of the final image array.\n\n p1 : tuple or list\n Position for the first coordinate of the gradient in pixels (x, y).\n The color 'before' ``p1`` is ``color_1`` and it gradually changes in\n the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``.\n\n p2 : tuple or list, optional\n Position for the second coordinate of the gradient in pixels (x, y).\n Coordinates (x, y) of the limit point for ``color_1``\n and ``color_2``.\n\n vector : tuple or list, optional\n A vector (x, y) in pixels that can be provided instead of ``p2``.\n ``p2`` is then defined as (p1 + vector).\n\n color_1 : tuple or list, optional\n Starting color for the gradient. As default, black. Either floats\n between 0 and 1 (for gradients used in masks) or [R, G, B] arrays\n (for colored gradients).\n\n color_2 : tuple or list, optional\n Color for the second point in the gradient. As default, white. Either\n floats between 0 and 1 (for gradients used in masks) or [R, G, B]\n arrays (for colored gradients).\n\n shape : str, optional\n Shape of the gradient. Can be either ``\"linear\"``, ``\"bilinear\"`` or\n ``\"circular\"``. In a linear gradient the color varies in one direction,\n from point ``p1`` to point ``p2``. In a bilinear gradient it also\n varies symmetrically from ``p1`` in the other direction. In a circular\n gradient it goes from ``color_1`` to ``color_2`` in all directions.\n\n radius : float, optional\n If ``shape=\"radial\"``, the radius of the gradient is defined with the\n parameter ``radius``, in pixels.\n\n offset : float, optional\n Real number between 0 and 1 indicating the fraction of the vector\n at which the gradient actually starts. For instance if ``offset``\n is 0.9 in a gradient going from p1 to p2, then the gradient will\n only occur near p2 (before that everything is of color ``color_1``)\n If the offset is 0.9 in a radial gradient, the gradient will\n occur in the region located between 90% and 100% of the radius,\n this creates a blurry disc of radius ``d(p1, p2)``.\n\n Returns\n -------\n\n image\n An Numpy array of dimensions (width, height, n_colors) of type float\n representing the image of the gradient.\n\n Examples\n --------\n\n >>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black\n [[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]\n >>>\n >>> color_gradient( # from red to green\n ... (10, 1), # size\n ... (0, 0), # p1\n ... p2=(10, 0),\n ... color_1=(255, 0, 0), # red\n ... color_2=(0, 255, 0), # green\n ... )\n [[[ 0. 255. 0. ]\n [ 25.5 229.5 0. ]\n [ 51. 204. 0. ]\n [ 76.5 178.5 0. ]\n [102. 153. 0. ]\n [127.5 127.5 0. ]\n [153. 102. 0. ]\n [178.5 76.5 0. ]\n [204. 51. 0. ]\n [229.5 25.5 0. ]]]\n \"\"\"\n # np-arrayize and change x,y coordinates to y,x\n w, h = size\n\n color_1 = np.array(color_1).astype(float)\n color_2 = np.array(color_2).astype(float)\n\n if shape == \"bilinear\":\n if vector is None:\n if p2 is None:\n raise ValueError(\"You must provide either 'p2' or 'vector'\")\n vector = np.array(p2) - np.array(p1)\n\n m1, m2 = [\n color_gradient(\n size,\n p1,\n vector=v,\n color_1=1.0,\n color_2=0.0,\n shape=\"linear\",\n offset=offset,\n )\n for v in [vector, [-v for v in vector]]\n ]\n\n arr = np.maximum(m1, m2)\n if color_1.size > 1:\n arr = np.dstack(3 * [arr])\n return arr * color_1 + (1 - arr) * color_2\n\n p1 = np.array(p1[::-1]).astype(float)\n\n M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)\n\n if shape == \"linear\":\n if vector is None:\n if p2 is not None:\n vector = np.array(p2[::-1]) - p1\n else:\n raise ValueError(\"You must provide either 'p2' or 'vector'\")\n else:\n vector = np.array(vector[::-1])\n\n norm = np.linalg.norm(vector)\n n_vec = vector / norm ** 2 # norm 1/norm(vector)\n\n p1 = p1 + offset * vector\n arr = (M - p1).dot(n_vec) / (1 - offset)\n arr = np.minimum(1, np.maximum(0, arr))\n if color_1.size > 1:\n arr = np.dstack(3 * [arr])\n return arr * color_1 + (1 - arr) * color_2\n\n elif shape == \"radial\":\n if (radius or 0) == 0:\n arr = np.ones((h, w))\n else:\n arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius\n arr = arr / ((1 - offset) * radius)\n arr = np.minimum(1.0, np.maximum(0, arr))\n\n if color_1.size > 1:\n arr = np.dstack(3 * [arr])\n return (1 - arr) * color_1 + arr * color_2\n raise ValueError(\"Invalid shape, should be either 'radial', 'linear' or 'bilinear'\")\n\n\ndef color_split(\n size,\n x=None,\n y=None,\n p1=None,\n p2=None,\n vector=None,\n color_1=0,\n color_2=1.0,\n gradient_width=0,\n):\n \"\"\"Make an image split in 2 colored regions.\n\n Returns an array of size ``size`` divided in two regions called 1 and\n 2 in what follows, and which will have colors color_1 and color_2\n respectively.\n\n Parameters\n ----------\n\n x : int, optional\n If provided, the image is split horizontally in x, the left\n region being region 1.\n\n y : int, optional\n If provided, the image is split vertically in y, the top region\n being region 1.\n\n p1, p2: tuple or list, optional\n Positions (x1, y1), (x2, y2) in pixels, where the numbers can be\n floats. Region 1 is defined as the whole region on the left when\n going from ``p1`` to ``p2``.\n\n p1, vector: tuple or list, optional\n ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be\n floats. Region 1 is then the region on the left when starting\n in position ``p1`` and going in the direction given by ``vector``.\n\n gradient_width : float, optional\n If not zero, the split is not sharp, but gradual over a region of\n width ``gradient_width`` (in pixels). This is preferable in many\n situations (for instance for antialiasing).\n\n Examples\n --------\n\n >>> size = [200, 200]\n >>>\n >>> # an image with all pixels with x<50 =0, the others =1\n >>> color_split(size, x=50, color_1=0, color_2=1)\n >>>\n >>> # an image with all pixels with y<50 red, the others green\n >>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])\n >>>\n >>> # An image split along an arbitrary line (see below)\n >>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1)\n \"\"\"\n if gradient_width or ((x is None) and (y is None)):\n if p2 is not None:\n vector = np.array(p2) - np.array(p1)\n elif x is not None:\n vector = np.array([0, -1.0])\n p1 = np.array([x, 0])\n elif y is not None:\n vector = np.array([1.0, 0.0])\n p1 = np.array([0, y])\n\n x, y = vector\n vector = np.array([y, -x]).astype(\"float\")\n norm = np.linalg.norm(vector)\n vector = max(0.1, gradient_width) * vector / norm\n return color_gradient(\n size, p1, vector=vector, color_1=color_1, color_2=color_2, shape=\"linear\"\n )\n else:\n w, h = size\n shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))\n arr = np.zeros(shape)\n if x:\n arr[:, :x] = color_1\n arr[:, x:] = color_2\n elif y:\n arr[:y] = color_1\n arr[y:] = color_2\n return arr\n\n\ndef circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):\n \"\"\"Draw an image with a circle.\n\n Draws a circle of color ``color``, on a background of color ``bg_color``,\n on a screen of size ``screensize`` at the position ``center=(x, y)``,\n with a radius ``radius`` but slightly blurred on the border by ``blur``\n pixels.\n\n Parameters\n ----------\n\n screensize : tuple or list\n Size of the canvas.\n\n center : tuple or list\n Center of the circle.\n\n radius : float\n Radius of the circle, in pixels.\n\n bg_color : tuple or float, optional\n Color for the background of the canvas. As default, black.\n\n blur : float, optional\n Blur for the border of the circle.\n\n Examples\n --------\n\n >>> from moviepy.video.tools.drawing import circle\n >>>\n >>> circle(\n ... (5, 5), # size\n ... (2, 2), # center\n ... 2, # radius\n ... )\n array([[0. , 0. , 0. , 0. , 0. ],\n [0. , 0.58578644, 1. , 0.58578644, 0. ],\n [0. , 1. , 1. , 1. , 0. ],\n [0. , 0.58578644, 1. , 0.58578644, 0. ],\n [0. , 0. , 0. , 0. , 0. ]])\n \"\"\"\n offset = 1.0 * (radius - blur) / radius if radius else 0\n return color_gradient(\n screensize,\n p1=center,\n radius=radius,\n color_1=color,\n color_2=bg_color,\n shape=\"radial\",\n offset=offset,\n )\n"
] | [
[
"numpy.maximum",
"numpy.linalg.norm",
"numpy.dstack",
"numpy.ones",
"numpy.isscalar",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hsfzxjy/svdnet-pytorch | [
"8f485d0b162c23b20449f7ee80c955e0b20950ae"
] | [
"train_svdnet_xent.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nimport numpy as np\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.losses import CrossEntropyLoss, DeepSupervision\nfrom torchreid.utils.iotools import check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger, RankLogger\nfrom torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \\\n load_pretrained_weights, save_checkpoint, resume_from_checkpoint\nfrom torchreid.utils.reidtools import visualize_ranked_results\nfrom torchreid.utils.generaltools import set_random_seed\nfrom torchreid.eval_metrics import evaluate\nfrom torchreid.optimizers import init_optimizer\nfrom torchreid.lr_schedulers import init_lr_scheduler\n\n\nos.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))\n\ntestloader_dict = trainloader = criterion = None\nuse_gpu = False\n\n# global variables\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef corr_metric(W: 'K x N'):\n\n G = W.permute(1, 0) @ W\n return torch.trace(G) / abs(G).sum()\n\n\ndef replace_weight(layer):\n\n with torch.no_grad():\n # NECESSARY! The weight of Linear layer has been transposed!\n A = layer.weight.t()\n M, N = A.size()\n M: 2048\n N: 1024\n U, S, V = torch.svd(A, some=False)\n W = A @ V\n W: '2048 x 1024 = M x N'\n\n NW = torch.zeros_like(A)\n\n for i in range(N):\n\n curr_N = W.size(1)\n\n W_norm = torch.norm(W, p=2, dim=0)\n W_norm: 'curr_N'\n\n index = i\n vec_i = A[:, i]\n vec_i_norm = torch.norm(vec_i)\n\n co = (A[:, i].view(M, 1).t() @ W).view(curr_N)\n co: 'curr_N'\n co = co / vec_i_norm\n absco = abs(co / W_norm)\n maxco_index = torch.max(absco, 0)[1].item()\n\n NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index])\n\n # Remove selected column vector from W\n W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})]\n\n layer.weight.copy_(NW.t())\n print(layer.weight)\n\n return layer\n\n\ndef main():\n global args, criterion, testloader_dict, trainloader, use_gpu\n\n set_random_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'test.log' if args.evaluate else 'train.log'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n else:\n warnings.warn('Currently using CPU, however, GPU is highly recommended')\n\n print('Initializing image data manager')\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, testloader_dict = dm.return_dataloaders()\n\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n\n if args.load_weights and check_isfile(args.load_weights):\n load_pretrained_weights(model, args.load_weights)\n\n model = nn.DataParallel(model).cuda() if use_gpu else model\n\n criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)\n\n if args.resume and check_isfile(args.resume):\n args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None)\n resumed = True\n else:\n resumed = False\n\n if args.evaluate:\n print('Evaluate only')\n\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)\n\n if args.visualize_ranks:\n visualize_ranked_results(\n distmat, dm.return_testdataset_by_name(name),\n save_dir=osp.join(args.save_dir, 'ranked_results', name),\n topk=20\n )\n return\n\n time_start = time.time()\n # ranklogger = RankLogger(args.source_names, args.target_names)\n print('=> Start training')\n\n if not resumed:\n train_base(model)\n train_RRI(model, 7)\n\n elapsed = round(time.time() - time_start)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print('Elapsed {}'.format(elapsed))\n # ranklogger.show_summary()\n\n\ndef train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False):\n losses = AverageMeter()\n accs = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n model.train()\n\n # if fixbase or args.always_fixbase:\n # open_specified_layers(model, args.open_layers)\n # else:\n # open_all_layers(model)\n\n end = time.time()\n for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):\n data_time.update(time.time() - end)\n\n if use_gpu:\n imgs, pids = imgs.cuda(), pids.cuda()\n\n outputs = model(imgs)\n loss = sum(criterion(x, pids) for x in outputs) / len(outputs)\n # if isinstance(outputs, (tuple, list)):\n # loss = DeepSupervision(criterion, outputs, pids)\n # else:\n # loss = criterion(outputs, pids)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n\n losses.update(loss.item(), pids.size(0))\n accs.update(accuracy(outputs, pids)[0])\n\n if (batch_idx + 1) % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc {acc.val:.2f} ({acc.avg:.2f})\\t'.format(\n epoch + 1, batch_idx + 1, len(trainloader),\n batch_time=batch_time,\n data_time=data_time,\n loss=losses,\n acc=accs\n ))\n\n end = time.time()\n\n\ndef test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):\n batch_time = AverageMeter()\n\n model.eval()\n\n with torch.no_grad():\n qf, q_pids, q_camids = [], [], []\n for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):\n if use_gpu:\n imgs = imgs.cuda()\n\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n qf = torch.cat(qf, 0)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))\n\n gf, g_pids, g_camids = [], [], []\n end = time.time()\n for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):\n if use_gpu:\n imgs = imgs.cuda()\n\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.cat(gf, 0)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))\n\n print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))\n\n m, n = qf.size(0), gf.size(0)\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.numpy()\n\n print('Computing CMC and mAP')\n cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)\n\n print('Results ----------')\n print('mAP: {:.1%}'.format(mAP))\n print('CMC curve')\n for r in ranks:\n print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))\n print('------------------')\n\n if return_distmat:\n return distmat\n return cmc[0]\n\n\ndef get_base_optimizer(model):\n\n kwargs = {\n 'weight_decay': 5e-4,\n 'lr': 0.0003,\n 'betas': (0.9, 0.999),\n }\n param_groups = model.parameters()\n\n optimizer = torch.optim.Adam(param_groups, **kwargs)\n scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1)\n\n return optimizer, scheduler\n\n\ndef get_base_sgd_optimizer(model):\n\n kwargs = {\n 'weight_decay': 5e-4,\n 'lr': 0.001,\n 'momentum': 0.9,\n }\n\n param_groups = model.parameters()\n\n optimizer = torch.optim.SGD(param_groups, **kwargs)\n scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1)\n\n return optimizer, scheduler\n\n\ndef get_RRI_optimizer(\n model,\n lr\n):\n\n kwargs = {\n 'weight_decay': 5e-4,\n 'lr': lr,\n 'momentum': 0.9,\n }\n param_groups = model.parameters()\n\n optimizer = torch.optim.SGD(param_groups, **kwargs)\n scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1)\n\n return optimizer, scheduler\n\n\ndef train_R(model, lr, T, fix_eigen_layer: bool=False):\n\n eigen_layers = model.module.get_fcs()\n\n if fix_eigen_layer:\n for eigen_layer in eigen_layers:\n eigen_layer.eval()\n for p in eigen_layer.parameters():\n p.requires_grad = False\n\n stage_name = 'restraint'\n else:\n model.train()\n for p in model.parameters():\n p.requires_grad = True\n\n stage_name = 'relaxation'\n\n prefix = '{}_{}_'.format(T, stage_name)\n\n optimizer, scheduler = get_RRI_optimizer(model, lr)\n\n for epoch in range(20):\n train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)\n\n scheduler.step()\n\n print('=> Test')\n\n if (epoch + 1) % args.eval_freq == 0:\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n rank1 = test(model, queryloader, galleryloader, use_gpu)\n\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'rank1': rank1,\n 'epoch': 0,\n 'arch': args.arch,\n 'optimizer': (),\n }, args.save_dir, prefix=prefix)\n\n\ndef train_base(model):\n\n use_sgd = os.environ.get('sgd') is not None\n\n optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer\n\n optimizer, scheduler = get_base_optimizer(model)\n\n model.train()\n print('=== train base ===')\n\n if True:\n open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']\n\n print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))\n\n for epoch in range(10):\n\n open_specified_layers(model, open_layers)\n train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)\n\n print('Done. All layers are open to train for {} epochs'.format(60))\n open_all_layers(model)\n\n optimizer, scheduler = optimizer_getter(model)\n\n for epoch in range(60):\n train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)\n scheduler.step()\n\n print('=> Test')\n\n if (epoch + 1) % args.eval_freq == 0:\n\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n rank1 = test(model, queryloader, galleryloader, use_gpu)\n\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'rank1': rank1,\n 'epoch': 0,\n 'arch': args.arch,\n 'optimizer': optimizer.state_dict(),\n }, args.save_dir, prefix='base_')\n\n\ndef train_RRI(model, Ts: int=7):\n\n base_lrs = [0.001] * 3 + [0.0001] * 10\n\n for T in range(Ts):\n print('=== T = {} ==='.format(T))\n print('Replacing eigen layer weight...')\n for eigen_layer in model.module.get_fcs():\n replace_weight(eigen_layer)\n print('Replaced.')\n print('--- Restraint ({}) ---'.format(T))\n train_R(model, base_lrs[T], T, fix_eigen_layer=True)\n print('--- Relaxation ({}) ---'.format(T))\n train_R(model, base_lrs[T], T, fix_eigen_layer=False)\n\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n rank1 = test(model, queryloader, galleryloader, use_gpu)\n\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'rank1': rank1,\n 'epoch': 0,\n 'arch': args.arch,\n 'optimizer': (),\n }, args.save_dir, prefix='final_')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.optim.Adam",
"torch.svd",
"torch.norm",
"torch.max",
"torch.cat",
"numpy.asarray",
"torch.sign",
"torch.zeros_like",
"torch.pow",
"torch.no_grad",
"torch.cuda.is_available",
"torch.optim.SGD",
"torch.nn.DataParallel",
"torch.trace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kerel-fs/poliastro | [
"1ad2074aebb7cf18f507ac44931d1e18fec53dad"
] | [
"src/poliastro/core/perturbations.py"
] | [
"import numpy as np\nfrom numpy.linalg import norm\n\nfrom ._jit import jit\n\n\n@jit\ndef J2_perturbation(t0, state, k, J2, R):\n r\"\"\"Calculates J2_perturbation acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = \\frac{3}{2}\\frac{J_{2}\\mu R^{2}}{r^{4}}\\left [\\frac{x}{r}\\left ( 5\\frac{z^{2}}{r^{2}}-1 \\right )\\vec{i} + \\frac{y}{r}\\left ( 5\\frac{z^{2}}{r^{2}}-1 \\right )\\vec{j} + \\frac{z}{r}\\left ( 5\\frac{z^{2}}{r^{2}}-3 \\right )\\vec{k}\\right]\n\n .. versionadded:: 0.9.0\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n J2: float\n oblateness factor\n R: float\n attractor radius\n\n Note\n ----\n The J2 accounts for the oblateness of the attractor. The formula is given in\n Howard Curtis, (12.30)\n\n \"\"\"\n r_vec = state[:3]\n r = norm(r_vec)\n\n factor = (3.0 / 2.0) * k * J2 * (R ** 2) / (r ** 5)\n\n a_x = 5.0 * r_vec[2] ** 2 / r ** 2 - 1\n a_y = 5.0 * r_vec[2] ** 2 / r ** 2 - 1\n a_z = 5.0 * r_vec[2] ** 2 / r ** 2 - 3\n return np.array([a_x, a_y, a_z]) * r_vec * factor\n\n\n@jit\ndef J3_perturbation(t0, state, k, J3, R):\n r\"\"\"Calculates J3_perturbation acceleration (km/s2)\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n J3: float\n oblateness factor\n R: float\n attractor radius\n\n Note\n ----\n The J3 accounts for the oblateness of the attractor. The formula is given in\n Howard Curtis, problem 12.8\n This perturbation has not been fully validated, see https://github.com/poliastro/poliastro/pull/398\n\n \"\"\"\n r_vec = state[:3]\n r = norm(r_vec)\n\n factor = (1.0 / 2.0) * k * J3 * (R ** 3) / (r ** 5)\n cos_phi = r_vec[2] / r\n\n a_x = 5.0 * r_vec[0] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)\n a_y = 5.0 * r_vec[1] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)\n a_z = 3.0 * (35.0 / 3.0 * cos_phi ** 4 - 10.0 * cos_phi ** 2 + 1)\n return np.array([a_x, a_y, a_z]) * factor\n\n\n@jit\ndef atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0):\n r\"\"\"Calculates atmospheric drag acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = -\\frac{1}{2}\\rho v_{rel}\\left ( \\frac{C_{d}A}{m} \\right )\\vec{v_{rel}}\n\n\n .. versionadded:: 0.9.0\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n R : float\n radius of the attractor (km)\n C_D: float\n dimensionless drag coefficient ()\n A: float\n frontal area of the spacecraft (km^2)\n m: float\n mass of the spacecraft (kg)\n H0 : float\n atmospheric scale height, (km)\n rho0: float\n the exponent density pre-factor, (kg / m^3)\n\n Note\n ----\n This function provides the acceleration due to atmospheric drag. We follow\n Howard Curtis, section 12.4\n the atmospheric density model is rho(H) = rho0 x exp(-H / H0)\n\n \"\"\"\n H = norm(state[:3])\n\n v_vec = state[3:]\n v = norm(v_vec)\n B = C_D * A / m\n rho = rho0 * np.exp(-(H - R) / H0)\n\n return -(1.0 / 2.0) * rho * B * v * v_vec\n\n\n@jit\ndef shadow_function(r_sat, r_sun, R):\n r\"\"\"Determines whether the satellite is in attractor's shadow, uses algorithm 12.3 from Howard Curtis\n\n Parameters\n ----------\n r_sat : numpy.ndarray\n position of the satellite in the frame of attractor (km)\n r_sun : numpy.ndarray\n position of star in the frame of attractor (km)\n R : float\n radius of body (attractor) that creates shadow (km)\n\n \"\"\"\n\n r_sat_norm = np.sqrt(np.sum(r_sat ** 2))\n r_sun_norm = np.sqrt(np.sum(r_sun ** 2))\n\n theta = np.arccos(np.dot(r_sat, r_sun) / r_sat_norm / r_sun_norm)\n theta_1 = np.arccos(R / r_sat_norm)\n theta_2 = np.arccos(R / r_sun_norm)\n\n return theta < theta_1 + theta_2\n\n\ndef third_body(t0, state, k, k_third, third_body):\n r\"\"\"Calculates 3rd body acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = \\mu_{m}\\left ( \\frac{\\vec{r_{m/s}}}{r_{m/s}^3} - \\frac{\\vec{r_{m}}}{r_{m}^3} \\right )\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n third_body: a callable object returning the position of 3rd body\n third body that causes the perturbation\n\n Note\n ----\n This formula is taken from Howard Curtis, section 12.10. As an example, a third body could be\n the gravity from the Moon acting on a small satellite.\n\n \"\"\"\n\n body_r = third_body(t0)\n delta_r = body_r - state[:3]\n return k_third * delta_r / norm(delta_r) ** 3 - k_third * body_r / norm(body_r) ** 3\n\n\ndef radiation_pressure(t0, state, k, R, C_R, A, m, Wdivc_s, star):\n r\"\"\"Calculates radiation pressure acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = -\\nu \\frac{S}{c} \\left ( \\frac{C_{r}A}{m} \\right )\\frac{\\vec{r}}{r}\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n R : float\n radius of the attractor\n C_R: float\n dimensionless radiation pressure coefficient, 1 < C_R < 2 ()\n A: float\n effective spacecraft area (km^2)\n m: float\n mass of the spacecraft (kg)\n Wdivc_s : float\n total star emitted power divided by the speed of light (W * s / km)\n star: a callable object returning the position of star in attractor frame\n star position\n\n Note\n ----\n This function provides the acceleration due to star light pressure. We follow\n Howard Curtis, section 12.9\n\n \"\"\"\n\n r_star = star(t0)\n r_sat = state[:3]\n P_s = Wdivc_s / (norm(r_star) ** 2)\n\n nu = float(shadow_function(r_sat, r_star, R))\n return -nu * P_s * (C_R * A / m) * r_star / norm(r_star)\n"
] | [
[
"numpy.dot",
"numpy.arccos",
"numpy.linalg.norm",
"numpy.exp",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tgquintela/pySpatialTools | [
"e028008f9750521bf7d311f7cd3323c88d621ea4",
"e028008f9750521bf7d311f7cd3323c88d621ea4",
"e028008f9750521bf7d311f7cd3323c88d621ea4",
"e028008f9750521bf7d311f7cd3323c88d621ea4",
"e028008f9750521bf7d311f7cd3323c88d621ea4",
"e028008f9750521bf7d311f7cd3323c88d621ea4"
] | [
"pySpatialTools/utils/artificial_data/artificial_measure.py",
"pySpatialTools/tests/test_perturbation.py",
"pySpatialTools/FeatureManagement/aux_descriptormodels/completers.py",
"pySpatialTools/Retrieve/aux_retriever_parsing.py",
"pySpatialTools/tests/test_preprocess.py",
"pySpatialTools/Testers/check_features.py"
] | [
"\n\"\"\"\nartificial measure\n------------------\nCreation of artificial measure\n\"\"\"\n\nimport numpy as np\n\n\n############################### Create measure ################################\n###############################################################################\ndef create_artificial_measure_array(n_k, n_vals_i, n_feats):\n \"\"\"Create artificial random measure in the array form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: np.ndarray\n the transformed measure computed by the whole spatial descriptor model.\n\n \"\"\"\n measure = np.random.random((n_vals_i, n_feats, n_k))\n return measure\n\n\ndef create_artificial_measure_append(n_k, n_vals_i, n_feats):\n \"\"\"Create artificial random measure in the list form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: list\n the transformed measure computed by the whole spatial descriptor model.\n\n \"\"\"\n rounds = np.random.randint(1, 40)\n measure = create_empty_append(n_k, n_vals_i, n_feats)\n for i in range(rounds):\n n_iss = np.random.randint(1, 10)\n vals_i = create_vals_i(n_iss, n_vals_i, n_k)\n x_i = create_features_i_dict(n_feats, n_iss, n_k)\n for k in range(len(vals_i)):\n for i in range(len(vals_i[k])):\n measure[k][vals_i[k][i]].append(x_i[k][i])\n return measure\n\n\ndef create_artificial_measure_replacelist(n_k, n_vals_i, n_feats,\n unique_=False):\n \"\"\"Create artificial random measure in the replacelist form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n unique_: boolean (default=False)\n if there are no collapse.\n\n Returns\n -------\n measure: list\n the transformed measure computed by the whole spatial descriptor model.\n\n \"\"\"\n last = 0\n rounds = np.random.randint(1, 40)\n measure = create_empty_replacelist(n_k, n_vals_i, n_feats)\n for i in range(rounds):\n n_iss = np.random.randint(1, 10)\n if unique_:\n vals_i = np.array([last+np.arange(n_iss)]*n_k)\n last += n_iss\n else:\n vals_i = create_vals_i(n_iss, n_vals_i, n_k)\n x_i = create_features_i_dict(n_feats, n_iss, n_k)\n for k in range(len(vals_i)):\n measure[k][0].append(x_i[k])\n measure[k][1].append(vals_i[k])\n return measure\n\n\n############################### Empty measure #################################\n###############################################################################\ndef create_empty_array(n_k, n_vals_i, n_feats):\n \"\"\"Create null measure in the array form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: np.ndarray\n the null measure to be fill by the computation of the spatial\n descriptor model.\n\n \"\"\"\n return np.zeros((n_vals_i, n_feats, n_k))\n\n\ndef create_empty_append(n_k, n_iss, n_feats):\n \"\"\"Create null measure in the list form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: list\n the null measure to be fill by the computation of the spatial\n descriptor model.\n\n \"\"\"\n return [[[]]*n_iss]*n_k\n\n\ndef create_empty_replacelist(n_k, n_iss, n_feats):\n \"\"\"Create null measure in the replacelist form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: list\n the null measure to be fill by the computation of the spatial\n descriptor model.\n\n \"\"\"\n return [[[], []]]*n_k\n\n\n############################### Vals_i creation ###############################\n###############################################################################\ndef create_vals_i(n_iss, nvals, n_k):\n \"\"\"\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n vals_i: np.ndarray\n the associated stored indices for the element indices.\n\n \"\"\"\n return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss))\n\n\n############################### Empty features ################################\n###############################################################################\ndef create_empty_features_array(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an array-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: np.ndarray\n the null features we want to compute.\n\n \"\"\"\n return np.zeros((n_k, n_iss, n_feats))\n\n\ndef create_empty_features_dict(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an listdict-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: list\n the null features we want to compute.\n\n \"\"\"\n return [[{}]*n_iss]*n_k\n\n\n################################ X_i features #################################\n###############################################################################\ndef create_features_i_array(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an array-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: np.ndarray\n the null features we want to compute.\n\n \"\"\"\n x_i = np.random.random((n_k, n_iss, n_feats))\n return x_i\n\n\ndef create_features_i_dict(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an listdict-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: list\n the null features we want to compute.\n\n \"\"\"\n x_i = []\n for k in range(n_k):\n x_i_k = []\n for i in range(n_iss):\n keys = np.unique(np.random.randint(1, n_feats, n_feats))\n keys = [str(e) for e in keys]\n values = np.random.random(len(keys))\n x_i_k.append(dict(zip(keys, values)))\n x_i.append(x_i_k)\n return x_i\n",
"\n\"\"\"\ntest Perturbations\n------------------\ntest for perturbations.\n\n\"\"\"\n\nimport numpy as np\n\n## Retrievers\nfrom pySpatialTools.Retrieve import KRetriever, CircRetriever\n\n## Features\nfrom pySpatialTools.FeatureManagement.features_retriever import\\\n FeaturesManager\nfrom pySpatialTools.FeatureManagement.features_objects import BaseFeatures,\\\n ImplicitFeatures, ExplicitFeatures\nfrom pySpatialTools.utils.perturbations import PermutationPerturbation,\\\n NonePerturbation, JitterLocations, PermutationIndPerturbation,\\\n ContiniousIndPerturbation, DiscreteIndPerturbation, MixedFeaturePertubation\nfrom pySpatialTools.utils.perturbations import BasePerturbation\nfrom pySpatialTools.utils.perturbations import sp_general_filter_perturbations,\\\n feat_filter_perturbations, ret_filter_perturbations\n\nfrom pySpatialTools.FeatureManagement.Descriptors import AvgDescriptor\n\n\ndef test():\n n = 1000\n locs = np.random.random((n, 2))*100\n k_perturb1, k_perturb2, k_perturb3 = 5, 10, 3\n k_perturb4 = k_perturb1+k_perturb2+k_perturb3\n\n ## Perturbations features\n feat_arr0 = np.random.randint(0, 20, (n, 1))\n feat_arr1 = np.random.random((n, 10))\n feat_arr = np.hstack([feat_arr0, feat_arr1])\n\n ###########################################################################\n #### GeneralPermutations\n ## Create perturbations\n class DummyPerturbation(BasePerturbation):\n _categorytype = 'feature'\n _perturbtype = 'dummy'\n\n def __init__(self, ):\n self._initialization()\n self.features_p = np.random.random((10, 10, 10))\n self.locations_p = np.random.random((100, 2, 5))\n self.relations_p = np.random.random((100, 2, 5))\n dummypert = DummyPerturbation()\n\n # Testing main functions\n dummypert.apply2indice(0, 0)\n dummypert.apply2locs(locs)\n dummypert.apply2locs_ind(locs, 0, 0)\n dummypert.apply2features(feat_arr)\n dummypert.apply2features_ind(feat_arr, 0, 0)\n dummypert.apply2relations(None)\n dummypert.apply2relations_ind(None, 0, 0)\n dummypert.apply2discretizations(None)\n dummypert.selfcompute_features(feat_arr)\n dummypert.selfcompute_locations(locs)\n dummypert.selfcompute_relations(None)\n dummypert.selfcompute_discretizations(None)\n\n ###########################################################################\n #### Permutations\n ## Create perturbations\n perturbation1 = PermutationPerturbation((n, k_perturb1))\n reind = np.vstack([np.random.permutation(n) for i in range(k_perturb1)])\n perturbation1 = PermutationPerturbation(reind.T)\n\n # Testing main functions individually\n perturbation1.apply2indice(0, 0)\n perturbation1.apply2locs(locs)\n# perturbation1.apply2locs_ind(locs, 0, 0)\n perturbation1.selfcompute_locations(locs)\n perturbation1.apply2features(feat_arr)\n perturbation1.apply2features_ind(feat_arr, 0, 0)\n perturbation1.selfcompute_features(feat_arr)\n\n # Perturbations in Retriever\n ret1 = KRetriever(locs)\n ret2 = CircRetriever(locs)\n ret1.add_perturbations(perturbation1)\n ret2.add_perturbations(perturbation1)\n assert(ret1.k_perturb == perturbation1.k_perturb)\n assert(ret2.k_perturb == perturbation1.k_perturb)\n\n # Perturbations in Descriptors\n# features = ImplicitFeatures(feat_arr)\n# features.add_perturbations(perturbation1)\n# avgdesc = AvgDescriptor()\n# features = FeaturesManager(features, avgdesc)\n# assert(features.k_perturb == perturbation1.k_perturb)\n\n ###########################################################################\n #### NonePerturbation\n ## Create perturbations\n perturbation2 = NonePerturbation(k_perturb2)\n\n # Testing main functions individually\n perturbation2.apply2indice(0, 0)\n perturbation2.apply2locs(locs)\n# perturbation2.apply2locs_ind(locs, 0, 0)\n perturbation2.selfcompute_locations(locs)\n perturbation2.apply2features(feat_arr)\n# perturbation2.apply2features_ind(feat_arr, 0, 0)\n perturbation2.selfcompute_features(feat_arr)\n\n # Perturbations in Retriever\n ret1 = KRetriever(locs)\n ret2 = CircRetriever(locs)\n ret1.add_perturbations(perturbation2)\n ret2.add_perturbations(perturbation2)\n assert(ret1.k_perturb == perturbation2.k_perturb)\n assert(ret2.k_perturb == perturbation2.k_perturb)\n\n # Perturbations in Descriptors\n# features = ImplicitFeatures(feat_arr)\n# features.add_perturbations(perturbation2)\n# avgdesc = AvgDescriptor()\n# features = FeaturesManager(features, avgdesc)\n# assert(features.k_perturb == perturbation2.k_perturb)\n\n ###########################################################################\n #### JitterPerturbations\n ## Create perturbations\n perturbation3 = JitterLocations(0.2, k_perturb3)\n\n # Testing main functions individually\n perturbation3.apply2indice(0, 0)\n perturbation3.apply2locs(locs)\n# perturbation3.apply2locs_ind(locs, 0, 0)\n perturbation3.selfcompute_locations(locs)\n perturbation3.apply2features(feat_arr)\n# perturbation3.apply2features_ind(feat_arr, 0, 0)\n perturbation3.selfcompute_features(feat_arr)\n\n # Perturbations in Retriever\n ret1 = KRetriever(locs)\n ret2 = CircRetriever(locs)\n ret1.add_perturbations(perturbation3)\n ret2.add_perturbations(perturbation3)\n assert(ret1.k_perturb == perturbation3.k_perturb)\n assert(ret2.k_perturb == perturbation3.k_perturb)\n\n # Perturbations in Descriptors\n# features = ImplicitFeatures(feat_arr)\n# features.add_perturbations(perturbation3)\n# avgdesc = AvgDescriptor()\n# features = FeaturesManager(features, avgdesc)\n# assert(features.k_perturb == perturbation3.k_perturb)\n\n ###########################################################################\n #### CollectionPerturbations\n ## Create perturbations\n perturbation4 = [perturbation1, perturbation2, perturbation3]\n\n # Perturbations in Retriever\n ret1 = KRetriever(locs)\n ret2 = CircRetriever(locs)\n ret1.add_perturbations(perturbation4)\n ret2.add_perturbations(perturbation4)\n assert(ret1.k_perturb == k_perturb4)\n assert(ret2.k_perturb == k_perturb4)\n\n # Perturbations in Descriptors\n# features = ImplicitFeatures(feat_arr)\n# features.add_perturbations(perturbation4)\n# avgdesc = AvgDescriptor()\n# features = FeaturesManager(features, avgdesc)\n# assert(features.k_perturb == k_perturb4)\n\n ###########################################################################\n #### IndividualPerturbations\n feat_perm = np.random.random((100, 1))\n feat_disc = np.random.randint(0, 10, 100)\n feat_cont = np.random.random((100, 1))\n\n ### Reindices individually\n # Individual perturbations\n reind_ind = np.random.permutation(100).reshape((100, 1))\n\n try:\n boolean = False\n perm_ind = PermutationIndPerturbation(list(reind_ind))\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n perm_ind = PermutationIndPerturbation(reind_ind)\n perm_ind.reindices\n # Testing main functions individually\n perm_ind.apply2indice(0, 0)\n perm_ind.apply2locs(locs)\n# perm_ind.apply2locs_ind(locs, 0, 0)\n perm_ind.selfcompute_locations(locs)\n perm_ind.apply2features(feat_perm)\n perm_ind.apply2features(feat_perm, 0)\n perm_ind.apply2features_ind(feat_perm, 0, 0)\n perm_ind.selfcompute_features(feat_perm)\n\n ### Continious individually\n cont_ind = ContiniousIndPerturbation(0.5)\n # Testing main functions individually\n cont_ind.apply2indice(0, 0)\n cont_ind.apply2locs(locs)\n# cont_ind.apply2locs_ind(locs, 0, 0)\n cont_ind.selfcompute_locations(locs)\n cont_ind.apply2features(feat_cont)\n cont_ind.apply2features(feat_cont, 0)\n# cont_ind.apply2features_ind(feat_cont, 0, 0)\n cont_ind.selfcompute_features(feat_cont)\n\n ### Discrete individually\n try:\n boolean = False\n disc_ind = DiscreteIndPerturbation(np.random.random((10, 10)))\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n try:\n boolean = False\n probs = np.random.random((10, 10))\n probs = (probs.T/probs.sum(1)).T\n disc_ind = DiscreteIndPerturbation(probs[:8, :])\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n probs = np.random.random((10, 10))\n probs = (probs.T/probs.sum(1)).T\n disc_ind = DiscreteIndPerturbation(probs)\n # Testing main functions individually\n disc_ind.apply2indice(0, 0)\n disc_ind.apply2locs(locs)\n# disc_ind.apply2locs_ind(locs, 0, 0)\n disc_ind.selfcompute_locations(locs)\n disc_ind.apply2features(feat_disc)\n disc_ind.apply2features(feat_disc, 0)\n# disc_ind.apply2features_ind(feat_disc, 0, 0)\n disc_ind.selfcompute_features(feat_disc)\n try:\n boolean = False\n disc_ind.apply2features(np.random.randint(0, 40, 1000))\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n\n ### Mix individually\n mix_coll = MixedFeaturePertubation([perm_ind, cont_ind, disc_ind])\n # Testing main functions individually\n feat_mix = np.hstack([feat_perm, feat_cont, feat_disc.reshape((100, 1))])\n mix_coll.apply2indice(0, 0)\n mix_coll.apply2locs(locs)\n# mix_coll.apply2locs_ind(locs, 0, 0)\n mix_coll.selfcompute_locations(locs)\n mix_coll.apply2features(feat_mix)\n# mix_coll.apply2features_ind(feat_mix, 0, 0)\n mix_coll.selfcompute_features(feat_mix)\n\n try:\n boolean = False\n MixedFeaturePertubation(None)\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n try:\n boolean = False\n MixedFeaturePertubation([None])\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n try:\n boolean = False\n mix_coll.apply2features(None)\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n try:\n boolean = False\n mix_coll.apply2features([None])\n boolean = True\n raise Exception(\"It has to halt here.\")\n except:\n if boolean:\n raise Exception(\"It has to halt here.\")\n\n ###########################################################################\n ##################### Auxiliar perturbation functions #####################\n ###########################################################################\n sp_general_filter_perturbations(perturbation1)\n feat_filter_perturbations(perturbation1)\n ret_filter_perturbations(perturbation1)\n sp_general_filter_perturbations(perturbation2)\n feat_filter_perturbations(perturbation2)\n ret_filter_perturbations(perturbation2)\n sp_general_filter_perturbations(perturbation3)\n feat_filter_perturbations(perturbation3)\n ret_filter_perturbations(perturbation3)\n sp_general_filter_perturbations([perturbation1])\n feat_filter_perturbations([perturbation1])\n ret_filter_perturbations([perturbation1])\n sp_general_filter_perturbations([perturbation2])\n feat_filter_perturbations([perturbation2])\n ret_filter_perturbations([perturbation2])\n sp_general_filter_perturbations([perturbation3])\n feat_filter_perturbations([perturbation3])\n ret_filter_perturbations([perturbation3])\n\n perts = [PermutationPerturbation((n, 5)), NonePerturbation(5),\n JitterLocations(0.2, 5)]\n\n sp_general_filter_perturbations(perts)\n feat_filter_perturbations(perts)\n ret_filter_perturbations(perts)\n",
"\n\"\"\"\nCompleter functions\n-------------------\nThis module contain possible functions to complete the final measure.\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nfrom out_formatters import count_out_formatter_dict2array\n\n\ndef sparse_dict_completer(measure, global_info=None):\n \"\"\"Sparse completer transform the dictionaries into a sparse matrices.\n\n Parameters\n ----------\n measure: list [ks][vals_i]{feats}\n the measure computed by the descriptormodel and joined by the resulter.\n global_info: optional (default=None)\n the global information we want to use in order to transform the\n measure.\n\n Returns\n -------\n measure: list of scipy.sparse\n the transformed result measure.\n\n See also:\n ---------\n append_addresult_function\n\n \"\"\"\n pos_feats = []\n n_iss = len(measure[0])\n for k in range(len(measure)):\n for vals_i in range(len(measure[k])):\n ## Collapse into one dictionary\n # Just collapsed\n if type(measure[k][vals_i]) == dict:\n continue\n # Add dictionaries and possible features\n d = {}\n for i in range(len(measure[k][vals_i])):\n aux_dict = measure[k][vals_i][i]\n aux_keys = aux_dict.keys()\n pos_feats += aux_keys\n for e in aux_keys:\n if e in d:\n d[e] += aux_dict[e]\n else:\n d[e] = aux_dict[e]\n measure[k][vals_i] = d\n ## Collapsing\n feats_names = list(np.unique(pos_feats))\n for k in range(len(measure)):\n data, iss, jss = [], [], []\n for i in range(len(measure[k])):\n aux_jss = measure[k][i].keys()\n jss += [feats_names.index(e) for e in aux_jss]\n data += measure[k][i].values()\n iss += len(measure[k][i])*[i]\n\n ## Building the matrix and storing it in measure\n shape = (n_iss, len(feats_names))\n data, iss, jss = np.array(data), np.array(iss), np.array(jss)\n measure[k] = coo_matrix((data, (iss, jss)), shape=shape)\n\n# measure = np.array(measure)\n return measure\n\n\ndef sparse_dict_completer_unknown(measure, global_info=None):\n \"\"\"Sparse completer transform the dictionaries into a sparse matrices.\n\n Parameters\n ----------\n measure: list [ks][vals_i]{feats}\n the measure computed by the descriptormodel and joined by the resulter.\n global_info: optional (default=None)\n the global information we want to use in order to transform the\n measure.\n\n Returns\n -------\n measure: list of scipy.sparse\n the transformed result measure.\n\n See also:\n ---------\n replacelist_addresult_function\n\n \"\"\"\n ## Completing measure\n for k in range(len(measure)):\n data, iss, jss = [], [], []\n val_res_con = np.concatenate(measure[k][1])\n if len(np.unique(val_res_con)) == len(val_res_con):\n for i in range(len(measure[k][1])):\n jss += measure[k][0][i].keys()\n data += measure[k][0][i].values()\n iss += len(measure[k][0][i])*[measure[k][1][i]]\n else:\n for i in range(len(measure[k][1])):\n dicti = {}\n for v in range(len(measure[k][1][i])):\n keys = measure[k][0][i][v].keys()\n values = measure[k][0][i][v].values()\n for j in xrange(len(keys)):\n if keys[j] in dicti:\n dicti[keys[j]] += values[j]\n else:\n dicti[keys[j]] = values[j]\n# try:\n# dicti[keys[j]] += values[j]\n# except:\n# dicti[keys[j]] = values[j]\n ### WARNING: TODO: Featurenames transformation into numbers\n jss += [int(e) for e in dicti.keys()]\n iss += [measure[k][1][i][v]]*len(dicti.keys())\n data += dicti.values()\n ## Building the matrix and storing it in measure\n shape = (int(np.max(iss))+1, int(np.max(jss))+1)\n data, iss, jss = np.array(data), np.array(iss), np.array(jss)\n measure[k] = coo_matrix((data, (iss, jss)), shape=shape)\n\n return measure\n\n\ndef null_completer(measure, global_info=None):\n \"\"\"Do not change the measure.\n\n Parameters\n ----------\n measure: optional\n the measure computed by the descriptormodel and joined by the resulter.\n global_info: optional (default=None)\n the global information we want to use in order to transform the\n measure.\n\n Returns\n -------\n measure: optional\n the equal measure input as a result of the spatial descriptormodel.\n\n \"\"\"\n return measure\n\n\ndef null_completer_concatenator(measure, global_info=None):\n \"\"\"Don not change anything, only concatenate all the partial descriptors\n for each vals_i.\n\n Parameters\n ----------\n measure: list of np.ndarray\n the measure computed by the descriptormodel and joined by the resulter.\n global_info: optional (default=None)\n the global information we want to use in order to transform the\n measure.\n\n Returns\n -------\n measure: np.ndarray\n a concatenate np.ndarray of the input measure.\n\n \"\"\"\n return np.concatenate(measure)\n\n\ndef weighted_completer(measure, global_info):\n \"\"\"Weight the different results using the global info.\n It is REQUIRED that the global_info is an array of the same length as the\n measure.\n\n Parameters\n ----------\n measure: list of np.ndarray\n the measure computed by the descriptormodel and joined by the resulter.\n global_info: optional (default=None)\n the global information we want to use in order to transform the\n measure.\n\n Returns\n -------\n measure: np.ndarray\n a weighted measure computation.\n\n \"\"\"\n if global_info is None:\n return measure\n if len(global_info.shape) != 3:\n global_info = global_info.ravel()\n assert len(measure) == len(global_info)\n global_info = global_info.reshape((len(global_info), 1, 1))\n measure = np.multiply(measure, global_info)\n return measure\n",
"\n\"\"\"\nAuxiliar retrievers parsing\n---------------------------\nTools and utilities to parse heterogenous ways to give retriever information\nin order to obtain retriever objects.\n\n\"\"\"\n\n\nimport numpy as np\nfrom retrievers import BaseRetriever\nfrom collectionretrievers import RetrieverManager\nfrom pySpatialTools.Discretization import _discretization_parsing_creation\n\n\n###############################################################################\n######################## Main paser creation functions ########################\n###############################################################################\n################################## Retrievers #################################\ndef _retrieverobject_parsing_creation(retriever_info):\n \"\"\"Function which uniforms the retriever info to be useful in other\n parts of the code.\n\n Parameters\n ----------\n retriever_info: pst.BaseRetriever or tuple\n Variable which contains the retriever information. The standarts input\n of that variable are:\n * Retriever object\n * (Retriever class, main_info)\n * (Retriever class, main_info, pars_ret)\n * (Retriever class, main_info, pars_ret, autolocs)\n\n Returns\n -------\n retriever_info: pst.BaseRetriever\n the retriever instance.\n\n \"\"\"\n if isinstance(retriever_info, BaseRetriever):\n pass\n else:\n assert(type(retriever_info) == tuple)\n assert(isinstance(retriever_info[0], object))\n pars_ret = {}\n if len(retriever_info) >= 3:\n pars_ret = retriever_info[2]\n if len(retriever_info) == 4:\n pars_ret['autolocs'] = retriever_info[3]\n retriever_info = retriever_info[0](retriever_info[1], **pars_ret)\n assert(isinstance(retriever_info, BaseRetriever))\n return retriever_info\n\n\ndef _retrievermanager_parsing_creation(retriever_info):\n \"\"\"Function which uniforms the retriever info to be useful in other\n parts of the code.\n\n Parameters\n ----------\n retriever_info: pst.BaseRetriever or tuple\n Variable which contains the retriever information. The standarts input\n of that variable are:\n * Retriever object\n * Retriever objects\n * RetrieverManager objects\n\n Returns\n -------\n retriever_info: pst.BaseRetriever\n the retriever instance.\n\n \"\"\"\n if isinstance(retriever_info, BaseRetriever):\n retriever_info = RetrieverManager(retriever_info)\n elif type(retriever_info) == list:\n assert(all([isinstance(e, BaseRetriever) for e in retriever_info]))\n retriever_info = RetrieverManager(retriever_info)\n else:\n assert(isinstance(retriever_info, RetrieverManager))\n assert(isinstance(retriever_info, RetrieverManager))\n return retriever_info\n\n\ndef _retriever_parsing_creation(retriever_info):\n \"\"\"Function which uniforms the retriever info to be useful in other\n parts of the code.\n\n Parameters\n ----------\n retriever_info: pst.BaseRetriever or tuple\n Variable which contains the retriever information. The standarts input\n of that variable are:\n * Retriever object\n * (Retriever class, main_info)\n * (Retriever class, main_info, pars_ret)\n * (Retriever class, main_info, pars_ret, autolocs)\n\n Returns\n -------\n retriever_info: pst.BaseRetriever\n the retriever instance.\n\n \"\"\"\n if isinstance(retriever_info, RetrieverManager):\n pass\n elif isinstance(retriever_info, BaseRetriever):\n retriever_info = _retrievermanager_parsing_creation(retriever_info)\n elif type(retriever_info) == list:\n r = [_retrieverobject_parsing_creation(ret) for ret in retriever_info]\n retriever_info = _retrievermanager_parsing_creation(r)\n else:\n retriever_info = _retrieverobject_parsing_creation(retriever_info)\n retriever_info = _retrievermanager_parsing_creation(retriever_info)\n assert(isinstance(retriever_info, RetrieverManager))\n return retriever_info\n\n\n###############################################################################\n################## Creation of automatic discretization maps ##################\n###############################################################################\ndef create_m_in_inverse_discretization(discretization_info):\n \"\"\"Create in_map for inverse discretization.\n\n Parameters\n ----------\n discretization_info: tuple or pst.BaseSpatialDiscretizor\n It is defined by a discretization object or a tuple of locations,\n regions and discretization object. The standard inputs of that\n function parameter are:\n * (discretizator, locs)\n * (locs, regions)\n * disc\n * locs, regs, disc\n\n Returns\n -------\n m_in_inverse_discretazation: function\n the function which formats the input of the retriever.\n\n \"\"\"\n ## 0. Parsing discretization information input\n locs, regions, disc = _discretization_parsing_creation(discretization_info)\n\n ## 1. Building map\n def m_in_inverse_discretazation(self, idxs):\n \"\"\"Inverse application of the discretization information.\"\"\"\n new_idxs = []\n for i in idxs:\n new_idxs.append(np.where(regions == i)[0])\n return new_idxs\n\n return m_in_inverse_discretazation\n\n\ndef create_m_in_direct_discretization(discretization_info):\n \"\"\"Create in_map for direct discretization.\n\n Parameters\n ----------\n discretization_info: tuple or pst.BaseSpatialDiscretizor\n It is defined by a discretization object or a tuple of locations,\n regions and discretization object. The standard inputs of that\n function parameter are:\n * (discretizator, locs)\n * (locs, regions)\n * disc\n * locs, regs, disc\n\n Returns\n -------\n m_in_direct_discretazation: function\n the function which formats the input of the retriever.\n\n \"\"\"\n ## 0. Parsing discretization information input\n locs, regions, disc = _discretization_parsing_creation(discretization_info)\n ## 1. Building map\n if disc is not None:\n def m_in_direct_discretazation(self, idxs):\n \"\"\"Direct application of the discretization information.\"\"\"\n return [disc.discretize(locs[i]) for i in idxs]\n else:\n def m_in_direct_discretazation(self, idxs):\n \"\"\"Direct application of the discretization information.\"\"\"\n return [np.array(regions[e]) for e in idxs]\n\n return m_in_direct_discretazation\n\n\ndef create_m_out_inverse_discretization(discretization_info):\n \"\"\"Create out_map for inverse discretization.\n\n Parameters\n ----------\n discretization_info: tuple or pst.BaseSpatialDiscretizor\n It is defined by a discretization object or a tuple of locations,\n regions and discretization object. The standard inputs of that\n function parameter are:\n * (discretizator, locs)\n * (locs, regions)\n * disc\n * locs, regs, disc\n\n Returns\n -------\n m_out_inverse_discretization: function\n the function which formats the output of the retriever.\n\n \"\"\"\n ## 0. Parsing discretization information input\n locs, regions, disc = _discretization_parsing_creation(discretization_info)\n\n ## 1. Building map\n if type(regions) == np.ndarray:\n def m_out_inverse_discretization(self, idxs, neighs_info):\n \"\"\"This out_map for retrievers change the size of neighbourhood by\n substituting the regions_id or groups_id in the neighs_info for the\n elements which belong to this groups.\n \"\"\"\n neighs, dists = neighs_info\n neighs_o, dists_o = [], []\n for iss_i in range(len(neighs)):\n neighs_p, dists_p = [], []\n for i in range(len(neighs[iss_i])):\n neighs_ip = np.where(regions == neighs[iss_i][i])[0]\n neighs_p.append(neighs_ip)\n if dists[iss_i] is not None:\n sh = len(neighs_ip), 1\n dists_p.append(np.ones(sh) * dists[iss_i][i])\n if neighs_p:\n neighs_p = np.concatenate(neighs_p)\n if dists_p:\n dists_p = np.concatenate(dists_p)\n else:\n dists_p = np.ones((0, 1))\n neighs_o.append(neighs_p)\n dists_o.append(dists_p)\n return neighs_o, dists_o\n\n return m_out_inverse_discretization\n\n\ndef create_m_out_direct_discretization(discretization_info):\n \"\"\"Create out_map for inverse discretization.\n\n Parameters\n ----------\n discretization_info: tuple or pst.BaseSpatialDiscretizor\n It is defined by a discretization object or a tuple of locations,\n regions and discretization object. The standard inputs of that\n function parameter are:\n * (discretizator, locs)\n * (locs, regions)\n * disc\n * locs, regs, disc\n\n Returns\n -------\n m_out_direct_discretization: function\n the function which formats the output of the retriever.\n\n \"\"\"\n ## 0. Parsing discretization information input\n locs, regions, disc = _discretization_parsing_creation(discretization_info)\n\n ## 1. Building map\n if disc is None:\n def m_out_direct_discretization(self, idxs, neighs_info):\n \"\"\"This out_map for retrievers don't change the size of\n neighbourhood, only substitutes the element id for the group or\n regions id. It is useful for PhantomFeatures and direct distance\n features. Distance don't change.\n\n Parameters\n ----------\n neighs_info: tuple (neighs, dists)\n the neighbourhood information.\n\n Returns\n -------\n neighs_info: tuple (neighs, dists)\n the neighbourhood information.\n\n \"\"\"\n neighs, dists = neighs_info\n neighs_o = []\n for iss_i in range(len(neighs)):\n neighs_p = []\n for i in range(len(neighs[iss_i])):\n neighs_ip = np.array([regions[neighs[iss_i][i]]]).ravel()\n neighs_p.append(neighs_ip)\n if neighs_p:\n neighs_p = np.concatenate(neighs_p)\n neighs_o.append(neighs_p)\n return neighs_o, dists\n else:\n def m_out_direct_discretization(self, idxs, neighs_info):\n \"\"\"This out_map for retrievers don't change the size of\n neighbourhood, only substitutes the element id for the group or\n regions id. It is useful for PhantomFeatures and direct distance\n features. Distance don't change.\n\n Parameters\n ----------\n neighs_info: tuple (neighs, dists)\n the neighbourhood information.\n\n Returns\n -------\n neighs_info: tuple (neighs, dists)\n the neighbourhood information.\n\n \"\"\"\n neighs, dists = neighs_info\n neighs_o = []\n for iss_i in range(len(neighs)):\n neighs_p = []\n for i in range(len(neighs[iss_i])):\n neighs_ip = disc.discretize(locs[neighs[iss_i][i]])\n neighs_p.append(np.array([neighs_ip]).ravel())\n if neighs_p:\n neighs_p = np.concatenate(neighs_p)\n neighs_o.append(neighs_p)\n return neighs_o, dists\n\n return m_out_direct_discretization\n",
"\n\"\"\"\nTesting preprocess module\n-------------------------\nfunctions to test preprocess module.\n\"\"\"\n\nimport numpy as np\nfrom itertools import product\nfrom pySpatialTools.Preprocess import remove_unknown_locations,\\\n jitter_group_imputation, combinatorial_combination_features\nfrom pySpatialTools.Preprocess.Transformations.Transformation_2d import\\\n check_in_square_area, ellipsoidal_projection, radians2degrees,\\\n degrees2radians, spheroidal_projection, general_projection\n\n\ndef test():\n logi = np.random.randint(0, 2, 100)\n locations = np.random.random((100, 2))\n groups = np.random.randint(0, 20, 100)\n remove_unknown_locations(locations, logi)\n\n jitter_group_imputation(locations, logi, groups)\n\n sh = 10, 3\n cat_feats = np.random.randint(0, 4, np.prod(sh)).reshape(sh)\n combinatorial_combination_features(cat_feats)\n\n ###########################################################################\n ############################# TRANSFORMATION ##############################\n ###########################################################################\n coord = np.random.random((100, 2))*2\n lim_points = np.array([[0., 1.], [0., 1.]])\n check_in_square_area(coord, lim_points)\n radians2degrees(coord)\n degrees2radians(coord)\n ## Assert inverse proper definition\n coord_i = spheroidal_projection(spheroidal_projection(coord), True)\n np.testing.assert_array_almost_equal(coord, coord_i)\n ## TODO: revise that\n# coord_i = ellipsoidal_projection(ellipsoidal_projection(coord), True)\n# np.testing.assert_array_almost_equal(coord, coord_i)\n\n pos = [['spheroidal', 'ellipsoidal'], [True, False], [True, False]]\n for p in product(*pos):\n general_projection(coord, method=p[0], inverse=p[1], radians=p[2])\n",
"\n\"\"\"\nCheck descriptors\n-----------------\nModule which task is group the functions for testing and measuring how good\nare the descriptor for representing our data.\n\n\"\"\"\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import StratifiedKFold, cross_val_score\nfrom sklearn.metrics import confusion_matrix\n\n\ndef fit_model(model, X, y):\n \"Function to fit the model we want.\"\n n_folds = 3\n skf = StratifiedKFold(y, n_folds=n_folds)\n models, measures = [], []\n for train_index, test_index in skf:\n ## Extract Kfold\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n ## Fit models\n model_i = model()\n models.append(model_i.fit(X_train, y_train))\n ## Compute measure\n proba_m = model_i.predict_proba(X_test)\n measures.append(compute_measure(y_test, proba_m))\n\n i = np.argmax(measures)\n model, measure = models[i], measures[i]\n return model, measure\n\n\ndef descriptors_quality(model, X, y):\n \"Compute the quality measure of the descriptors.\"\n proba_m = model.predict_proba(X)\n measure = compute_measure(y, proba_m)\n return measure\n\n\ndef compute_measure(real, pred):\n \"Compute measure of performance from the predictions.\"\n conf_mat = compute_confusion_matrix(real, pred)\n score = from_confmat2score(conf_mat)\n return score\n\n\ndef from_confmat2score(conf_mat, method='accuracy', comp=None):\n \"Compute the score from confusion matrix.\"\n if type(method) == str:\n if method == 'accuracy':\n score = conf_mat.diagonal().sum()/conf_mat.sum()\n elif type(method).__name__ == 'function':\n score = method(conf_mat)\n return score\n\n\ndef compute_confusion_matrix(real, pred, normalization=None):\n \"Compute confusion matrix.\"\n if real.shape == pred.shape:\n conf_mat = confusion_matrix(real, pred)\n else:\n conf_mat = confusion_matrix_probs(pred, real)\n return conf_mat\n\n\ndef confusion_matrix_probs(predicted_probs, feat_arr):\n \"Confusion matrix from a matrix of probabilities.\"\n vals = np.unique(feat_arr)\n n_vals = vals.shape[0]\n feat_arr = feat_arr.ravel()\n\n conf_mat = np.zeros((n_vals, n_vals))\n for i in xrange(predicted_probs.shape[0]):\n conf_mat[feat_arr[i], :] += predicted_probs[i, :]\n # Normalization\n for i in range(n_vals):\n conf_mat[i, :] = conf_mat[i, :]/(feat_arr == vals[i]).sum()\n return conf_mat\n"
] | [
[
"numpy.arange",
"numpy.random.random",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.hstack",
"numpy.random.permutation",
"numpy.random.random",
"numpy.random.randint"
],
[
"scipy.sparse.coo_matrix",
"numpy.multiply",
"numpy.unique",
"numpy.concatenate",
"numpy.max",
"numpy.array"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.where",
"numpy.ones"
],
[
"numpy.random.random",
"numpy.random.randint",
"numpy.prod",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
],
[
"sklearn.metrics.confusion_matrix",
"sklearn.cross_validation.StratifiedKFold"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
reddigari/Eelbrain | [
"6c02b99955d4b5dc7e3054042c182e1a4629b13c"
] | [
"eelbrain/_stats/tests/test_spm.py"
] | [
"# Author: Christian Brodbeck <[email protected]>\nimport pickle\nfrom nose.tools import eq_\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom eelbrain import datasets\nfrom eelbrain._stats.spm import LM, LMGroup\n\n\ndef test_lm():\n ds = datasets.get_uts()\n model = ds.eval(\"A*B*Y\")\n coeffs = ds['uts'].ols(model)\n\n lm = LM('uts', 'A*B*Y', ds, 'effect')\n eq_(repr(lm), \"<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>\")\n for i, effect in enumerate(model.effects):\n assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i])\n\n\ndef test_random_lm():\n # dummy coding\n ds = datasets.get_uts()\n lms = []\n for i in range(5):\n ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)\n lms.append(LM('uts', 'A*B*Y', ds))\n rlm = LMGroup(lms)\n eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>')\n\n # coefficients\n ds = rlm.coefficients_dataset(('A', 'A x B'))\n eq_(ds['term'].cells, ('A', 'A x B'))\n\n # tests\n res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)\n eq_(res.clusters.n_cases, 1)\n\n # effect coding\n ds = datasets.get_uts()\n lms = []\n for i in range(5):\n ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)\n lms.append(LM('uts', 'A*B*Y', ds, 'effect'))\n rlm = LMGroup(lms)\n res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)\n eq_(res.clusters.n_cases, 6)\n\n # persistence\n rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))\n eq_(rlm_p.dims, rlm.dims)\n"
] | [
[
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
manas96/RelationPrediction | [
"06be62a55554971d1b523dc555f4c8616c21c664"
] | [
"code-tf2/encoders/message_gcns/gcn_basis.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom common.shared_functions import dot_or_lookup, glorot_variance, make_tf_variable, make_tf_bias\n\nfrom encoders.message_gcns.message_gcn import MessageGcn\n\n\nclass BasisGcn(MessageGcn):\n\n def parse_settings(self):\n self.dropout_keep_probability = float(self.settings['DropoutKeepProbability'])\n\n self.n_coefficients = int(self.settings['NumberOfBasisFunctions'])\n\n def local_initialize_train(self):\n vertex_feature_dimension = self.entity_count if self.onehot_input else self.shape[0]\n type_matrix_shape = (self.relation_count, self.n_coefficients)\n vertex_matrix_shape = (vertex_feature_dimension, self.n_coefficients, self.shape[1])\n self_matrix_shape = (vertex_feature_dimension, self.shape[1])\n\n glorot_var_combined = glorot_variance([vertex_matrix_shape[0], vertex_matrix_shape[2]])\n self.W_forward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape)\n self.W_backward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape)\n self.W_self = make_tf_variable(0, glorot_var_combined, self_matrix_shape)\n\n type_init_var = 1\n self.C_forward = make_tf_variable(0, type_init_var, type_matrix_shape)\n self.C_backward = make_tf_variable(0, type_init_var, type_matrix_shape)\n\n self.b = make_tf_bias(self.shape[1])\n\n\n def local_get_weights(self):\n return [self.W_forward, self.W_backward,\n self.C_forward, self.C_backward,\n self.W_self,\n self.b]\n\n def compute_messages(self, sender_features, receiver_features):\n backward_type_scaling, forward_type_scaling = self.compute_coefficients()\n receiver_terms, sender_terms = self.compute_basis_functions(receiver_features, sender_features)\n\n forward_messages = tf.reduce_sum(input_tensor=sender_terms * tf.expand_dims(forward_type_scaling,-1), axis=1)\n backward_messages = tf.reduce_sum(input_tensor=receiver_terms * tf.expand_dims(backward_type_scaling, -1), axis=1)\n\n return forward_messages, backward_messages\n\n def compute_coefficients(self):\n message_types = self.get_graph().get_type_indices()\n forward_type_scaling = tf.nn.embedding_lookup(params=self.C_forward, ids=message_types)\n backward_type_scaling = tf.nn.embedding_lookup(params=self.C_backward, ids=message_types)\n return backward_type_scaling, forward_type_scaling\n\n def compute_basis_functions(self, receiver_features, sender_features):\n sender_terms = self.dot_or_tensor_mul(sender_features, self.W_forward)\n receiver_terms = self.dot_or_tensor_mul(receiver_features, self.W_backward)\n\n return receiver_terms, sender_terms\n\n def dot_or_tensor_mul(self, features, tensor):\n tensor_shape = tf.shape(input=tensor)\n flat_shape = [tensor_shape[0], tensor_shape[1] * tensor_shape[2]]\n\n flattened_tensor = tf.reshape(tensor, flat_shape)\n result_tensor = dot_or_lookup(features, flattened_tensor, onehot_input=self.onehot_input)\n result_tensor = tf.reshape(result_tensor, [-1, tensor_shape[1], tensor_shape[2]])\n\n return result_tensor\n\n def compute_self_loop_messages(self, vertex_features):\n return dot_or_lookup(vertex_features, self.W_self, onehot_input=self.onehot_input)\n\n\n def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):\n mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))\n mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))\n\n collected_messages_f = tf.sparse.sparse_dense_matmul(mtr_f, forward_messages)\n collected_messages_b = tf.sparse.sparse_dense_matmul(mtr_b, backward_messages)\n\n updated_vertex_embeddings = collected_messages_f + collected_messages_b\n\n if self.use_nonlinearity:\n activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages)\n else:\n activated = updated_vertex_embeddings + self_loop_messages\n\n return activated\n\n def local_get_regularization(self):\n regularization = tf.reduce_mean(input_tensor=tf.square(self.W_forward))\n regularization += tf.reduce_mean(input_tensor=tf.square(self.W_backward))\n regularization += tf.reduce_mean(input_tensor=tf.square(self.W_self))\n\n return 0.0 * regularization"
] | [
[
"tensorflow.nn.relu",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.square",
"tensorflow.nn.embedding_lookup",
"tensorflow.sparse.sparse_dense_matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Leo-xxx/Deep-Flow-Guided-Video-Inpainting | [
"6310007009d2bfe150f1e4b29c7588f720c4bba2"
] | [
"utils/flow.py"
] | [
"import numpy as np\nimport cv2\n\n\ndef make_colorwheel():\n '''\n Generates a color wheel for optical flow visualization as presented in:\n Baker et al. \"A Database and Evaluation Methodology for Optical Flow\" (ICCV, 2007)\n URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n '''\n\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n colorwheel = np.zeros((ncols, 3))\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)\n col = col + RY\n # YG\n colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)\n colorwheel[col:col + YG, 1] = 255\n col = col + YG\n # GC\n colorwheel[col:col + GC, 1] = 255\n colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)\n col = col + GC\n # CB\n colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)\n colorwheel[col:col + CB, 2] = 255\n col = col + CB\n # BM\n colorwheel[col:col + BM, 2] = 255\n colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)\n col = col + BM\n # MR\n colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)\n colorwheel[col:col + MR, 0] = 255\n return colorwheel\n\n\ndef flow_compute_color(u, v, convert_to_bgr=False):\n '''\n Applies the flow color wheel to (possibly clipped) flow components u and v.\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n\n :param u: np.ndarray, input horizontal flow\n :param v: np.ndarray, input vertical flow\n :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB\n :return:\n '''\n\n flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)\n\n colorwheel = make_colorwheel() # shape [55x3]\n ncols = colorwheel.shape[0]\n\n rad = np.sqrt(np.square(u) + np.square(v))\n a = np.arctan2(-v, -u) / np.pi\n\n fk = (a + 1) / 2 * (ncols - 1) + 1\n k0 = np.floor(fk).astype(np.int32)\n k0[k0 > 53] = 53\n k1 = k0 + 1\n k1[k1 == ncols] = 1\n f = fk - k0\n\n for i in range(colorwheel.shape[1]):\n\n tmp = colorwheel[:, i]\n col0 = tmp[k0] / 255.0\n col1 = tmp[k1] / 255.0\n col = (1 - f) * col0 + f * col1\n\n idx = (rad <= 1)\n col[idx] = 1 - rad[idx] * (1 - col[idx])\n col[~idx] = col[~idx] * 0.75 # out of range?\n\n # Note the 2-i => BGR instead of RGB\n ch_idx = 2 - i if convert_to_bgr else i\n flow_image[:, :, ch_idx] = np.floor(255 * col)\n\n return flow_image\n\n\ndef flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):\n '''\n Expects a two dimensional flow image of shape [H,W,2]\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n\n :param flow_uv: np.ndarray of shape [H,W,2]\n :param clip_flow: float, maximum clipping value for flow\n :return:\n '''\n\n assert flow_uv.ndim == 3, 'input flow must have three dimensions'\n assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'\n\n if clip_flow is not None:\n flow_uv = np.clip(flow_uv, 0, clip_flow)\n\n u = flow_uv[:, :, 0]\n v = flow_uv[:, :, 1]\n\n rad = np.sqrt(np.square(u) + np.square(v))\n rad_max = np.max(rad)\n\n epsilon = 1e-5\n u = u / (rad_max + epsilon)\n v = v / (rad_max + epsilon)\n\n return flow_compute_color(u, v, convert_to_bgr)\n\n\ndef readFlow(name):\n f = open(name, 'rb')\n\n header = f.read(4)\n if header.decode(\"utf-8\") != 'PIEH':\n raise Exception('Flow file header does not contain PIEH')\n\n width = np.fromfile(f, np.int32, 1).squeeze()\n height = np.fromfile(f, np.int32, 1).squeeze()\n\n flow = np.fromfile(f, np.float32, width * height * 2).reshape((height,\n width, 2))\n f.close()\n return flow.astype(np.float32)\n\n\ndef get_warp_label(flow1, flow2, label1, th=50, value=0):\n label2 = np.ones_like(label1, dtype=label1.dtype) * value\n height = flow1.shape[0]\n width = flow1.shape[1]\n flow_t = np.zeros_like(flow1, dtype=flow1.dtype)\n\n grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2)\n dx = grid[:, :, 0] + flow2[:, :, 1]\n dy = grid[:, :, 1] + flow2[:, :, 0]\n sx = np.floor(dx).astype(int)\n sy = np.floor(dy).astype(int)\n valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1)\n\n sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1)\n sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1)\n sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) *\n (1 - np.abs(sy_mat - dy[:, :, np.newaxis])))\n\n for i in range(4):\n flow_t = flow_t + sxsy_mat[:, :, i][:, :, np.\n newaxis] * flow1[sx_mat[:, :, i],\n sy_mat[:, :, i], :]\n\n valid = valid & (np.linalg.norm(\n flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th)\n\n flow_t = (flow2 - flow_t) / 2.0\n dx = grid[:, :, 0] + flow_t[:, :, 1]\n dy = grid[:, :, 1] + flow_t[:, :, 0]\n\n valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1)\n label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round()\n .astype(int), :]\n return label2\n\n\ndef flow_tf(flow, size):\n flow_shape = flow.shape\n flow_resized = cv2.resize(flow, (size[1], size[0]))\n flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1]))\n flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0]))\n\n return flow_resized"
] | [
[
"numpy.square",
"numpy.fromfile",
"numpy.ones_like",
"numpy.abs",
"numpy.clip",
"numpy.arange",
"numpy.indices",
"numpy.dstack",
"numpy.arctan2",
"numpy.max",
"numpy.zeros_like",
"numpy.floor",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
project-pantheon/pantheon_glob_planner | [
"c0d50a53b36c4678192ec75ad7a4cd68c570daef"
] | [
"env/lib/python3.5/site-packages/cartopy/tests/crs/test_utm.py"
] | [
"# (C) British Crown Copyright 2018, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"\nTests for the UTM coordinate system.\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport pytest\n\nimport cartopy.crs as ccrs\n\n\ndef check_proj4_params(crs, other_args):\n expected = other_args | {'proj=utm', 'no_defs', 'units=m'}\n pro4_params = set(crs.proj4_init.lstrip('+').split(' +'))\n assert expected == pro4_params\n\n\[email protected]('south', [False, True])\ndef test_default(south):\n zone = 1 # Limits are fixed, so don't bother checking other zones.\n utm = ccrs.UTM(zone, southern_hemisphere=south)\n other_args = {'ellps=WGS84', 'zone={}'.format(zone)}\n if south:\n other_args |= {'south'}\n check_proj4_params(utm, other_args)\n\n assert_almost_equal(np.array(utm.x_limits),\n [-250000, 1250000])\n assert_almost_equal(np.array(utm.y_limits),\n [-10000000, 25000000])\n\n\ndef test_ellipsoid_transform():\n # USGS Professional Paper 1395, pp 269 - 271\n globe = ccrs.Globe(ellipse='clrk66')\n utm = ccrs.UTM(zone=18, globe=globe)\n geodetic = utm.as_geodetic()\n\n other_args = {'ellps=clrk66', 'zone=18'}\n check_proj4_params(utm, other_args)\n\n assert_almost_equal(np.array(utm.x_limits),\n [-250000, 1250000])\n assert_almost_equal(np.array(utm.y_limits),\n [-10000000, 25000000])\n\n result = utm.transform_point(-73.5, 40.5, geodetic)\n assert_almost_equal(result, np.array([127106.5 + 500000, 4484124.4]),\n decimal=1)\n\n inverse_result = geodetic.transform_point(result[0], result[1], utm)\n assert_almost_equal(inverse_result, [-73.5, 40.5])\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IcyCC/vnpy | [
"04f6ec013daddde2df36590625e0533e260b4bc1"
] | [
"vnpy/app/cta_backtester/ui/widget.py"
] | [
"import numpy as np\nimport pyqtgraph as pg\nfrom datetime import datetime, timedelta\n\nfrom vnpy.trader.constant import Interval, Direction, Offset\nfrom vnpy.trader.engine import MainEngine\nfrom vnpy.trader.ui import QtCore, QtWidgets, QtGui\nfrom vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell\nfrom vnpy.trader.ui.editor import CodeEditor\nfrom vnpy.event import Event, EventEngine\nfrom vnpy.chart import ChartWidget, CandleItem, VolumeItem\nfrom vnpy.trader.utility import load_json, save_json\n\nfrom ..engine import (\n APP_NAME,\n EVENT_BACKTESTER_LOG,\n EVENT_BACKTESTER_BACKTESTING_FINISHED,\n EVENT_BACKTESTER_OPTIMIZATION_FINISHED,\n OptimizationSetting\n)\n\n\nclass BacktesterManager(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n setting_filename = \"cta_backtester_setting.json\"\n\n signal_log = QtCore.pyqtSignal(Event)\n signal_backtesting_finished = QtCore.pyqtSignal(Event)\n signal_optimization_finished = QtCore.pyqtSignal(Event)\n\n def __init__(self, main_engine: MainEngine, event_engine: EventEngine):\n \"\"\"\"\"\"\n super().__init__()\n\n self.main_engine = main_engine\n self.event_engine = event_engine\n\n self.backtester_engine = main_engine.get_engine(APP_NAME)\n self.class_names = []\n self.settings = {}\n\n self.target_display = \"\"\n\n self.init_ui()\n self.register_event()\n self.backtester_engine.init_engine()\n self.init_strategy_settings()\n\n def init_strategy_settings(self):\n \"\"\"\"\"\"\n self.class_names = self.backtester_engine.get_strategy_class_names()\n\n for class_name in self.class_names:\n setting = self.backtester_engine.get_default_setting(class_name)\n self.settings[class_name] = setting\n\n self.class_combo.addItems(self.class_names)\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"CTA回测\")\n\n # Setting Part\n self.class_combo = QtWidgets.QComboBox()\n\n self.symbol_line = QtWidgets.QLineEdit(\"IF88.CFFEX\")\n\n self.interval_combo = QtWidgets.QComboBox()\n for inteval in Interval:\n self.interval_combo.addItem(inteval.value)\n\n end_dt = datetime.now()\n start_dt = end_dt - timedelta(days=3 * 365)\n\n self.start_date_edit = QtWidgets.QDateEdit(\n QtCore.QDate(\n start_dt.year,\n start_dt.month,\n start_dt.day\n )\n )\n self.end_date_edit = QtWidgets.QDateEdit(\n QtCore.QDate.currentDate()\n )\n\n self.rate_line = QtWidgets.QLineEdit(\"0.000025\")\n self.slippage_line = QtWidgets.QLineEdit(\"0.2\")\n self.size_line = QtWidgets.QLineEdit(\"300\")\n self.pricetick_line = QtWidgets.QLineEdit(\"0.2\")\n self.capital_line = QtWidgets.QLineEdit(\"1000000\")\n\n self.inverse_combo = QtWidgets.QComboBox()\n self.inverse_combo.addItems([\"正向\", \"反向\"])\n\n backtesting_button = QtWidgets.QPushButton(\"开始回测\")\n backtesting_button.clicked.connect(self.start_backtesting)\n\n optimization_button = QtWidgets.QPushButton(\"参数优化\")\n optimization_button.clicked.connect(self.start_optimization)\n\n self.result_button = QtWidgets.QPushButton(\"优化结果\")\n self.result_button.clicked.connect(self.show_optimization_result)\n self.result_button.setEnabled(False)\n\n downloading_button = QtWidgets.QPushButton(\"下载数据\")\n downloading_button.clicked.connect(self.start_downloading)\n\n self.order_button = QtWidgets.QPushButton(\"委托记录\")\n self.order_button.clicked.connect(self.show_backtesting_orders)\n self.order_button.setEnabled(False)\n\n self.trade_button = QtWidgets.QPushButton(\"成交记录\")\n self.trade_button.clicked.connect(self.show_backtesting_trades)\n self.trade_button.setEnabled(False)\n\n self.daily_button = QtWidgets.QPushButton(\"每日盈亏\")\n self.daily_button.clicked.connect(self.show_daily_results)\n self.daily_button.setEnabled(False)\n\n self.candle_button = QtWidgets.QPushButton(\"K线图表\")\n self.candle_button.clicked.connect(self.show_candle_chart)\n self.candle_button.setEnabled(False)\n\n edit_button = QtWidgets.QPushButton(\"代码编辑\")\n edit_button.clicked.connect(self.edit_strategy_code)\n\n reload_button = QtWidgets.QPushButton(\"策略重载\")\n reload_button.clicked.connect(self.reload_strategy_class)\n\n for button in [\n backtesting_button,\n optimization_button,\n downloading_button,\n self.result_button,\n self.order_button,\n self.trade_button,\n self.daily_button,\n self.candle_button,\n edit_button,\n reload_button\n ]:\n button.setFixedHeight(button.sizeHint().height() * 2)\n\n form = QtWidgets.QFormLayout()\n form.addRow(\"交易策略\", self.class_combo)\n form.addRow(\"本地代码\", self.symbol_line)\n form.addRow(\"K线周期\", self.interval_combo)\n form.addRow(\"开始日期\", self.start_date_edit)\n form.addRow(\"结束日期\", self.end_date_edit)\n form.addRow(\"手续费率\", self.rate_line)\n form.addRow(\"交易滑点\", self.slippage_line)\n form.addRow(\"合约乘数\", self.size_line)\n form.addRow(\"价格跳动\", self.pricetick_line)\n form.addRow(\"回测资金\", self.capital_line)\n form.addRow(\"合约模式\", self.inverse_combo)\n\n result_grid = QtWidgets.QGridLayout()\n result_grid.addWidget(self.trade_button, 0, 0)\n result_grid.addWidget(self.order_button, 0, 1)\n result_grid.addWidget(self.daily_button, 1, 0)\n result_grid.addWidget(self.candle_button, 1, 1)\n\n left_vbox = QtWidgets.QVBoxLayout()\n left_vbox.addLayout(form)\n left_vbox.addWidget(backtesting_button)\n left_vbox.addWidget(downloading_button)\n left_vbox.addStretch()\n left_vbox.addLayout(result_grid)\n left_vbox.addStretch()\n left_vbox.addWidget(optimization_button)\n left_vbox.addWidget(self.result_button)\n left_vbox.addStretch()\n left_vbox.addWidget(edit_button)\n left_vbox.addWidget(reload_button)\n\n # Result part\n self.statistics_monitor = StatisticsMonitor()\n\n self.log_monitor = QtWidgets.QTextEdit()\n self.log_monitor.setMaximumHeight(400)\n\n self.chart = BacktesterChart()\n self.chart.setMinimumWidth(1000)\n\n self.trade_dialog = BacktestingResultDialog(\n self.main_engine,\n self.event_engine,\n \"回测成交记录\",\n BacktestingTradeMonitor\n )\n self.order_dialog = BacktestingResultDialog(\n self.main_engine,\n self.event_engine,\n \"回测委托记录\",\n BacktestingOrderMonitor\n )\n self.daily_dialog = BacktestingResultDialog(\n self.main_engine,\n self.event_engine,\n \"回测每日盈亏\",\n DailyResultMonitor\n )\n\n # Candle Chart\n self.candle_dialog = CandleChartDialog()\n\n # Layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.statistics_monitor)\n vbox.addWidget(self.log_monitor)\n\n hbox = QtWidgets.QHBoxLayout()\n hbox.addLayout(left_vbox)\n hbox.addLayout(vbox)\n hbox.addWidget(self.chart)\n self.setLayout(hbox)\n\n # Code Editor\n self.editor = CodeEditor(self.main_engine, self.event_engine)\n\n # Load setting\n setting = load_json(self.setting_filename)\n if not setting:\n return\n\n self.class_combo.setCurrentIndex(\n self.class_combo.findText(setting[\"class_name\"])\n )\n\n self.symbol_line.setText(setting[\"vt_symbol\"])\n\n self.interval_combo.setCurrentIndex(\n self.interval_combo.findText(setting[\"interval\"])\n )\n\n self.rate_line.setText(str(setting[\"rate\"]))\n self.slippage_line.setText(str(setting[\"slippage\"]))\n self.size_line.setText(str(setting[\"size\"]))\n self.pricetick_line.setText(str(setting[\"pricetick\"]))\n self.capital_line.setText(str(setting[\"capital\"]))\n\n if not setting[\"inverse\"]:\n self.inverse_combo.setCurrentIndex(0)\n else:\n self.inverse_combo.setCurrentIndex(1)\n\n def register_event(self):\n \"\"\"\"\"\"\n self.signal_log.connect(self.process_log_event)\n self.signal_backtesting_finished.connect(\n self.process_backtesting_finished_event)\n self.signal_optimization_finished.connect(\n self.process_optimization_finished_event)\n\n self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)\n self.event_engine.register(\n EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)\n self.event_engine.register(\n EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)\n\n def process_log_event(self, event: Event):\n \"\"\"\"\"\"\n msg = event.data\n self.write_log(msg)\n\n def write_log(self, msg):\n \"\"\"\"\"\"\n timestamp = datetime.now().strftime(\"%H:%M:%S\")\n msg = f\"{timestamp}\\t{msg}\"\n self.log_monitor.append(msg)\n\n def process_backtesting_finished_event(self, event: Event):\n \"\"\"\"\"\"\n statistics = self.backtester_engine.get_result_statistics()\n self.statistics_monitor.set_data(statistics)\n\n df = self.backtester_engine.get_result_df()\n self.chart.set_data(df)\n\n self.trade_button.setEnabled(True)\n self.order_button.setEnabled(True)\n self.daily_button.setEnabled(True)\n self.candle_button.setEnabled(True)\n\n def process_optimization_finished_event(self, event: Event):\n \"\"\"\"\"\"\n self.write_log(\"请点击[优化结果]按钮查看\")\n self.result_button.setEnabled(True)\n\n def start_backtesting(self):\n \"\"\"\"\"\"\n class_name = self.class_combo.currentText()\n vt_symbol = self.symbol_line.text()\n interval = self.interval_combo.currentText()\n start = self.start_date_edit.date().toPyDate()\n end = self.end_date_edit.date().toPyDate()\n rate = float(self.rate_line.text())\n slippage = float(self.slippage_line.text())\n size = float(self.size_line.text())\n pricetick = float(self.pricetick_line.text())\n capital = float(self.capital_line.text())\n\n if self.inverse_combo.currentText() == \"正向\":\n inverse = False\n else:\n inverse = True\n\n # Save backtesting parameters\n backtesting_setting = {\n \"class_name\": class_name,\n \"vt_symbol\": vt_symbol,\n \"interval\": interval,\n \"rate\": rate,\n \"slippage\": slippage,\n \"size\": size,\n \"pricetick\": pricetick,\n \"capital\": capital,\n \"inverse\": inverse,\n }\n save_json(self.setting_filename, backtesting_setting)\n\n # Get strategy setting\n old_setting = self.settings[class_name]\n dialog = BacktestingSettingEditor(class_name, old_setting)\n i = dialog.exec()\n if i != dialog.Accepted:\n return\n\n new_setting = dialog.get_setting()\n self.settings[class_name] = new_setting\n\n result = self.backtester_engine.start_backtesting(\n class_name,\n vt_symbol,\n interval,\n start,\n end,\n rate,\n slippage,\n size,\n pricetick,\n capital,\n inverse,\n new_setting\n )\n\n if result:\n self.statistics_monitor.clear_data()\n self.chart.clear_data()\n\n self.trade_button.setEnabled(False)\n self.order_button.setEnabled(False)\n self.daily_button.setEnabled(False)\n self.candle_button.setEnabled(False)\n\n self.trade_dialog.clear_data()\n self.order_dialog.clear_data()\n self.daily_dialog.clear_data()\n self.candle_dialog.clear_data()\n\n def start_optimization(self):\n \"\"\"\"\"\"\n class_name = self.class_combo.currentText()\n vt_symbol = self.symbol_line.text()\n interval = self.interval_combo.currentText()\n start = self.start_date_edit.date().toPyDate()\n end = self.end_date_edit.date().toPyDate()\n rate = float(self.rate_line.text())\n slippage = float(self.slippage_line.text())\n size = float(self.size_line.text())\n pricetick = float(self.pricetick_line.text())\n capital = float(self.capital_line.text())\n\n if self.inverse_combo.currentText() == \"正向\":\n inverse = False\n else:\n inverse = True\n\n parameters = self.settings[class_name]\n dialog = OptimizationSettingEditor(class_name, parameters)\n i = dialog.exec()\n if i != dialog.Accepted:\n return\n\n optimization_setting, use_ga = dialog.get_setting()\n self.target_display = dialog.target_display\n\n self.backtester_engine.start_optimization(\n class_name,\n vt_symbol,\n interval,\n start,\n end,\n rate,\n slippage,\n size,\n pricetick,\n capital,\n inverse,\n optimization_setting,\n use_ga\n )\n\n self.result_button.setEnabled(False)\n\n def start_downloading(self):\n \"\"\"\"\"\"\n vt_symbol = self.symbol_line.text()\n interval = self.interval_combo.currentText()\n start_date = self.start_date_edit.date()\n end_date = self.end_date_edit.date()\n\n start = datetime(start_date.year(), start_date.month(), start_date.day())\n end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59)\n\n self.backtester_engine.start_downloading(\n vt_symbol,\n interval,\n start,\n end\n )\n\n def show_optimization_result(self):\n \"\"\"\"\"\"\n result_values = self.backtester_engine.get_result_values()\n\n dialog = OptimizationResultMonitor(\n result_values,\n self.target_display\n )\n dialog.exec_()\n\n def show_backtesting_trades(self):\n \"\"\"\"\"\"\n if not self.trade_dialog.is_updated():\n trades = self.backtester_engine.get_all_trades()\n self.trade_dialog.update_data(trades)\n\n self.trade_dialog.exec_()\n\n def show_backtesting_orders(self):\n \"\"\"\"\"\"\n if not self.order_dialog.is_updated():\n orders = self.backtester_engine.get_all_orders()\n self.order_dialog.update_data(orders)\n\n self.order_dialog.exec_()\n\n def show_daily_results(self):\n \"\"\"\"\"\"\n if not self.daily_dialog.is_updated():\n results = self.backtester_engine.get_all_daily_results()\n self.daily_dialog.update_data(results)\n\n self.daily_dialog.exec_()\n\n def show_candle_chart(self):\n \"\"\"\"\"\"\n if not self.candle_dialog.is_updated():\n history = self.backtester_engine.get_history_data()\n self.candle_dialog.update_history(history)\n\n trades = self.backtester_engine.get_all_trades()\n self.candle_dialog.update_trades(trades)\n\n self.candle_dialog.exec_()\n\n def edit_strategy_code(self):\n \"\"\"\"\"\"\n class_name = self.class_combo.currentText()\n file_path = self.backtester_engine.get_strategy_class_file(class_name)\n\n self.editor.open_editor(file_path)\n self.editor.show()\n\n def reload_strategy_class(self):\n \"\"\"\"\"\"\n self.backtester_engine.reload_strategy_class()\n\n self.class_combo.clear()\n self.init_strategy_settings()\n\n def show(self):\n \"\"\"\"\"\"\n self.showMaximized()\n\n\nclass StatisticsMonitor(QtWidgets.QTableWidget):\n \"\"\"\"\"\"\n KEY_NAME_MAP = {\n \"start_date\": \"首个交易日\",\n \"end_date\": \"最后交易日\",\n\n \"total_days\": \"总交易日\",\n \"profit_days\": \"盈利交易日\",\n \"loss_days\": \"亏损交易日\",\n\n \"capital\": \"起始资金\",\n \"end_balance\": \"结束资金\",\n\n \"total_return\": \"总收益率\",\n \"annual_return\": \"年化收益\",\n \"max_drawdown\": \"最大回撤\",\n \"max_ddpercent\": \"百分比最大回撤\",\n\n \"total_net_pnl\": \"总盈亏\",\n \"total_commission\": \"总手续费\",\n \"total_slippage\": \"总滑点\",\n \"total_turnover\": \"总成交额\",\n \"total_trade_count\": \"总成交笔数\",\n\n \"daily_net_pnl\": \"日均盈亏\",\n \"daily_commission\": \"日均手续费\",\n \"daily_slippage\": \"日均滑点\",\n \"daily_turnover\": \"日均成交额\",\n \"daily_trade_count\": \"日均成交笔数\",\n\n \"daily_return\": \"日均收益率\",\n \"return_std\": \"收益标准差\",\n \"sharpe_ratio\": \"夏普比率\",\n \"return_drawdown_ratio\": \"收益回撤比\"\n }\n\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n self.cells = {}\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setRowCount(len(self.KEY_NAME_MAP))\n self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))\n\n self.setColumnCount(1)\n self.horizontalHeader().setVisible(False)\n self.horizontalHeader().setSectionResizeMode(\n QtWidgets.QHeaderView.Stretch\n )\n self.setEditTriggers(self.NoEditTriggers)\n\n for row, key in enumerate(self.KEY_NAME_MAP.keys()):\n cell = QtWidgets.QTableWidgetItem()\n self.setItem(row, 0, cell)\n self.cells[key] = cell\n\n def clear_data(self):\n \"\"\"\"\"\"\n for cell in self.cells.values():\n cell.setText(\"\")\n\n def set_data(self, data: dict):\n \"\"\"\"\"\"\n data[\"capital\"] = f\"{data['capital']:,.2f}\"\n data[\"end_balance\"] = f\"{data['end_balance']:,.2f}\"\n data[\"total_return\"] = f\"{data['total_return']:,.2f}%\"\n data[\"annual_return\"] = f\"{data['annual_return']:,.2f}%\"\n data[\"max_drawdown\"] = f\"{data['max_drawdown']:,.2f}\"\n data[\"max_ddpercent\"] = f\"{data['max_ddpercent']:,.2f}%\"\n data[\"total_net_pnl\"] = f\"{data['total_net_pnl']:,.2f}\"\n data[\"total_commission\"] = f\"{data['total_commission']:,.2f}\"\n data[\"total_slippage\"] = f\"{data['total_slippage']:,.2f}\"\n data[\"total_turnover\"] = f\"{data['total_turnover']:,.2f}\"\n data[\"daily_net_pnl\"] = f\"{data['daily_net_pnl']:,.2f}\"\n data[\"daily_commission\"] = f\"{data['daily_commission']:,.2f}\"\n data[\"daily_slippage\"] = f\"{data['daily_slippage']:,.2f}\"\n data[\"daily_turnover\"] = f\"{data['daily_turnover']:,.2f}\"\n data[\"daily_return\"] = f\"{data['daily_return']:,.2f}%\"\n data[\"return_std\"] = f\"{data['return_std']:,.2f}%\"\n data[\"sharpe_ratio\"] = f\"{data['sharpe_ratio']:,.2f}\"\n data[\"return_drawdown_ratio\"] = f\"{data['return_drawdown_ratio']:,.2f}\"\n\n for key, cell in self.cells.items():\n value = data.get(key, \"\")\n cell.setText(str(value))\n\n\nclass BacktestingSettingEditor(QtWidgets.QDialog):\n \"\"\"\n For creating new strategy and editing strategy parameters.\n \"\"\"\n\n def __init__(\n self, class_name: str, parameters: dict\n ):\n \"\"\"\"\"\"\n super(BacktestingSettingEditor, self).__init__()\n\n self.class_name = class_name\n self.parameters = parameters\n self.edits = {}\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n form = QtWidgets.QFormLayout()\n\n # Add vt_symbol and name edit if add new strategy\n self.setWindowTitle(f\"策略参数配置:{self.class_name}\")\n button_text = \"确定\"\n parameters = self.parameters\n\n for name, value in parameters.items():\n type_ = type(value)\n\n edit = QtWidgets.QLineEdit(str(value))\n if type_ is int:\n validator = QtGui.QIntValidator()\n edit.setValidator(validator)\n elif type_ is float:\n validator = QtGui.QDoubleValidator()\n edit.setValidator(validator)\n\n form.addRow(f\"{name} {type_}\", edit)\n\n self.edits[name] = (edit, type_)\n\n button = QtWidgets.QPushButton(button_text)\n button.clicked.connect(self.accept)\n form.addRow(button)\n\n self.setLayout(form)\n\n def get_setting(self):\n \"\"\"\"\"\"\n setting = {}\n\n for name, tp in self.edits.items():\n edit, type_ = tp\n value_text = edit.text()\n\n if type_ == bool:\n if value_text == \"True\":\n value = True\n else:\n value = False\n else:\n value = type_(value_text)\n\n setting[name] = value\n\n return setting\n\n\nclass BacktesterChart(pg.GraphicsWindow):\n \"\"\"\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__(title=\"Backtester Chart\")\n\n self.dates = {}\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n pg.setConfigOptions(antialias=True)\n\n # Create plot widgets\n self.balance_plot = self.addPlot(\n title=\"账户净值\",\n axisItems={\"bottom\": DateAxis(self.dates, orientation=\"bottom\")}\n )\n self.nextRow()\n\n self.drawdown_plot = self.addPlot(\n title=\"净值回撤\",\n axisItems={\"bottom\": DateAxis(self.dates, orientation=\"bottom\")}\n )\n self.nextRow()\n\n self.pnl_plot = self.addPlot(\n title=\"每日盈亏\",\n axisItems={\"bottom\": DateAxis(self.dates, orientation=\"bottom\")}\n )\n self.nextRow()\n\n self.distribution_plot = self.addPlot(title=\"盈亏分布\")\n\n # Add curves and bars on plot widgets\n self.balance_curve = self.balance_plot.plot(\n pen=pg.mkPen(\"#ffc107\", width=3)\n )\n\n dd_color = \"#303f9f\"\n self.drawdown_curve = self.drawdown_plot.plot(\n fillLevel=-0.3, brush=dd_color, pen=dd_color\n )\n\n profit_color = 'r'\n loss_color = 'g'\n self.profit_pnl_bar = pg.BarGraphItem(\n x=[], height=[], width=0.3, brush=profit_color, pen=profit_color\n )\n self.loss_pnl_bar = pg.BarGraphItem(\n x=[], height=[], width=0.3, brush=loss_color, pen=loss_color\n )\n self.pnl_plot.addItem(self.profit_pnl_bar)\n self.pnl_plot.addItem(self.loss_pnl_bar)\n\n distribution_color = \"#6d4c41\"\n self.distribution_curve = self.distribution_plot.plot(\n fillLevel=-0.3, brush=distribution_color, pen=distribution_color\n )\n\n def clear_data(self):\n \"\"\"\"\"\"\n self.balance_curve.setData([], [])\n self.drawdown_curve.setData([], [])\n self.profit_pnl_bar.setOpts(x=[], height=[])\n self.loss_pnl_bar.setOpts(x=[], height=[])\n self.distribution_curve.setData([], [])\n\n def set_data(self, df):\n \"\"\"\"\"\"\n if df is None:\n return\n\n count = len(df)\n\n self.dates.clear()\n for n, date in enumerate(df.index):\n self.dates[n] = date\n\n # Set data for curve of balance and drawdown\n self.balance_curve.setData(df[\"balance\"])\n self.drawdown_curve.setData(df[\"drawdown\"])\n\n # Set data for daily pnl bar\n profit_pnl_x = []\n profit_pnl_height = []\n loss_pnl_x = []\n loss_pnl_height = []\n\n for count, pnl in enumerate(df[\"net_pnl\"]):\n if pnl >= 0:\n profit_pnl_height.append(pnl)\n profit_pnl_x.append(count)\n else:\n loss_pnl_height.append(pnl)\n loss_pnl_x.append(count)\n\n self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)\n self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)\n\n # Set data for pnl distribution\n hist, x = np.histogram(df[\"net_pnl\"], bins=\"auto\")\n x = x[:-1]\n self.distribution_curve.setData(x, hist)\n\n\nclass DateAxis(pg.AxisItem):\n \"\"\"Axis for showing date data\"\"\"\n\n def __init__(self, dates: dict, *args, **kwargs):\n \"\"\"\"\"\"\n super().__init__(*args, **kwargs)\n self.dates = dates\n\n def tickStrings(self, values, scale, spacing):\n \"\"\"\"\"\"\n strings = []\n for v in values:\n dt = self.dates.get(v, \"\")\n strings.append(str(dt))\n return strings\n\n\nclass OptimizationSettingEditor(QtWidgets.QDialog):\n \"\"\"\n For setting up parameters for optimization.\n \"\"\"\n DISPLAY_NAME_MAP = {\n \"总收益率\": \"total_return\",\n \"夏普比率\": \"sharpe_ratio\",\n \"收益回撤比\": \"return_drawdown_ratio\",\n \"日均盈亏\": \"daily_net_pnl\"\n }\n\n def __init__(\n self, class_name: str, parameters: dict\n ):\n \"\"\"\"\"\"\n super().__init__()\n\n self.class_name = class_name\n self.parameters = parameters\n self.edits = {}\n\n self.optimization_setting = None\n self.use_ga = False\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n QLabel = QtWidgets.QLabel\n\n self.target_combo = QtWidgets.QComboBox()\n self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))\n\n grid = QtWidgets.QGridLayout()\n grid.addWidget(QLabel(\"目标\"), 0, 0)\n grid.addWidget(self.target_combo, 0, 1, 1, 3)\n grid.addWidget(QLabel(\"参数\"), 1, 0)\n grid.addWidget(QLabel(\"开始\"), 1, 1)\n grid.addWidget(QLabel(\"步进\"), 1, 2)\n grid.addWidget(QLabel(\"结束\"), 1, 3)\n\n # Add vt_symbol and name edit if add new strategy\n self.setWindowTitle(f\"优化参数配置:{self.class_name}\")\n\n validator = QtGui.QDoubleValidator()\n row = 2\n\n for name, value in self.parameters.items():\n type_ = type(value)\n if type_ not in [int, float]:\n continue\n\n start_edit = QtWidgets.QLineEdit(str(value))\n step_edit = QtWidgets.QLineEdit(str(1))\n end_edit = QtWidgets.QLineEdit(str(value))\n\n for edit in [start_edit, step_edit, end_edit]:\n edit.setValidator(validator)\n\n grid.addWidget(QLabel(name), row, 0)\n grid.addWidget(start_edit, row, 1)\n grid.addWidget(step_edit, row, 2)\n grid.addWidget(end_edit, row, 3)\n\n self.edits[name] = {\n \"type\": type_,\n \"start\": start_edit,\n \"step\": step_edit,\n \"end\": end_edit\n }\n\n row += 1\n\n parallel_button = QtWidgets.QPushButton(\"多进程优化\")\n parallel_button.clicked.connect(self.generate_parallel_setting)\n grid.addWidget(parallel_button, row, 0, 1, 4)\n\n row += 1\n ga_button = QtWidgets.QPushButton(\"遗传算法优化\")\n ga_button.clicked.connect(self.generate_ga_setting)\n grid.addWidget(ga_button, row, 0, 1, 4)\n\n self.setLayout(grid)\n\n def generate_ga_setting(self):\n \"\"\"\"\"\"\n self.use_ga = True\n self.generate_setting()\n\n def generate_parallel_setting(self):\n \"\"\"\"\"\"\n self.use_ga = False\n self.generate_setting()\n\n def generate_setting(self):\n \"\"\"\"\"\"\n self.optimization_setting = OptimizationSetting()\n\n self.target_display = self.target_combo.currentText()\n target_name = self.DISPLAY_NAME_MAP[self.target_display]\n self.optimization_setting.set_target(target_name)\n\n for name, d in self.edits.items():\n type_ = d[\"type\"]\n start_value = type_(d[\"start\"].text())\n step_value = type_(d[\"step\"].text())\n end_value = type_(d[\"end\"].text())\n\n if start_value == end_value:\n self.optimization_setting.add_parameter(name, start_value)\n else:\n self.optimization_setting.add_parameter(\n name,\n start_value,\n end_value,\n step_value\n )\n\n self.accept()\n\n def get_setting(self):\n \"\"\"\"\"\"\n return self.optimization_setting, self.use_ga\n\n\nclass OptimizationResultMonitor(QtWidgets.QDialog):\n \"\"\"\n For viewing optimization result.\n \"\"\"\n\n def __init__(\n self, result_values: list, target_display: str\n ):\n \"\"\"\"\"\"\n super().__init__()\n\n self.result_values = result_values\n self.target_display = target_display\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"参数优化结果\")\n self.resize(1100, 500)\n\n table = QtWidgets.QTableWidget()\n\n table.setColumnCount(2)\n table.setRowCount(len(self.result_values))\n table.setHorizontalHeaderLabels([\"参数\", self.target_display])\n table.setEditTriggers(table.NoEditTriggers)\n table.verticalHeader().setVisible(False)\n\n table.horizontalHeader().setSectionResizeMode(\n 0, QtWidgets.QHeaderView.ResizeToContents\n )\n table.horizontalHeader().setSectionResizeMode(\n 1, QtWidgets.QHeaderView.Stretch\n )\n\n for n, tp in enumerate(self.result_values):\n setting, target_value, _ = tp\n setting_cell = QtWidgets.QTableWidgetItem(str(setting))\n target_cell = QtWidgets.QTableWidgetItem(str(target_value))\n\n setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)\n target_cell.setTextAlignment(QtCore.Qt.AlignCenter)\n\n table.setItem(n, 0, setting_cell)\n table.setItem(n, 1, target_cell)\n\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(table)\n\n self.setLayout(vbox)\n\n\nclass BacktestingTradeMonitor(BaseMonitor):\n \"\"\"\n Monitor for backtesting trade data.\n \"\"\"\n\n headers = {\n \"tradeid\": {\"display\": \"成交号 \", \"cell\": BaseCell, \"update\": False},\n \"orderid\": {\"display\": \"委托号\", \"cell\": BaseCell, \"update\": False},\n \"symbol\": {\"display\": \"代码\", \"cell\": BaseCell, \"update\": False},\n \"exchange\": {\"display\": \"交易所\", \"cell\": EnumCell, \"update\": False},\n \"direction\": {\"display\": \"方向\", \"cell\": DirectionCell, \"update\": False},\n \"offset\": {\"display\": \"开平\", \"cell\": EnumCell, \"update\": False},\n \"price\": {\"display\": \"价格\", \"cell\": BaseCell, \"update\": False},\n \"volume\": {\"display\": \"数量\", \"cell\": BaseCell, \"update\": False},\n \"datetime\": {\"display\": \"时间\", \"cell\": BaseCell, \"update\": False},\n \"gateway_name\": {\"display\": \"接口\", \"cell\": BaseCell, \"update\": False},\n }\n\n\nclass BacktestingOrderMonitor(BaseMonitor):\n \"\"\"\n Monitor for backtesting order data.\n \"\"\"\n\n headers = {\n \"orderid\": {\"display\": \"委托号\", \"cell\": BaseCell, \"update\": False},\n \"symbol\": {\"display\": \"代码\", \"cell\": BaseCell, \"update\": False},\n \"exchange\": {\"display\": \"交易所\", \"cell\": EnumCell, \"update\": False},\n \"type\": {\"display\": \"类型\", \"cell\": EnumCell, \"update\": False},\n \"direction\": {\"display\": \"方向\", \"cell\": DirectionCell, \"update\": False},\n \"offset\": {\"display\": \"开平\", \"cell\": EnumCell, \"update\": False},\n \"price\": {\"display\": \"价格\", \"cell\": BaseCell, \"update\": False},\n \"volume\": {\"display\": \"总数量\", \"cell\": BaseCell, \"update\": False},\n \"traded\": {\"display\": \"已成交\", \"cell\": BaseCell, \"update\": False},\n \"status\": {\"display\": \"状态\", \"cell\": EnumCell, \"update\": False},\n \"datetime\": {\"display\": \"时间\", \"cell\": BaseCell, \"update\": False},\n \"gateway_name\": {\"display\": \"接口\", \"cell\": BaseCell, \"update\": False},\n }\n\n\nclass DailyResultMonitor(BaseMonitor):\n \"\"\"\n Monitor for backtesting daily result.\n \"\"\"\n\n headers = {\n \"date\": {\"display\": \"日期\", \"cell\": BaseCell, \"update\": False},\n \"trade_count\": {\"display\": \"成交笔数\", \"cell\": BaseCell, \"update\": False},\n \"start_pos\": {\"display\": \"开盘持仓\", \"cell\": BaseCell, \"update\": False},\n \"end_pos\": {\"display\": \"收盘持仓\", \"cell\": BaseCell, \"update\": False},\n \"turnover\": {\"display\": \"成交额\", \"cell\": BaseCell, \"update\": False},\n \"commission\": {\"display\": \"手续费\", \"cell\": BaseCell, \"update\": False},\n \"slippage\": {\"display\": \"滑点\", \"cell\": BaseCell, \"update\": False},\n \"trading_pnl\": {\"display\": \"交易盈亏\", \"cell\": BaseCell, \"update\": False},\n \"holding_pnl\": {\"display\": \"持仓盈亏\", \"cell\": BaseCell, \"update\": False},\n \"total_pnl\": {\"display\": \"总盈亏\", \"cell\": BaseCell, \"update\": False},\n \"net_pnl\": {\"display\": \"净盈亏\", \"cell\": BaseCell, \"update\": False},\n }\n\n\nclass BacktestingResultDialog(QtWidgets.QDialog):\n \"\"\"\n \"\"\"\n\n def __init__(\n self,\n main_engine: MainEngine,\n event_engine: EventEngine,\n title: str,\n table_class: QtWidgets.QTableWidget\n ):\n \"\"\"\"\"\"\n super().__init__()\n\n self.main_engine = main_engine\n self.event_engine = event_engine\n self.title = title\n self.table_class = table_class\n\n self.updated = False\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(self.title)\n self.resize(1100, 600)\n\n self.table = self.table_class(self.main_engine, self.event_engine)\n\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.table)\n\n self.setLayout(vbox)\n\n def clear_data(self):\n \"\"\"\"\"\"\n self.updated = False\n self.table.setRowCount(0)\n\n def update_data(self, data: list):\n \"\"\"\"\"\"\n self.updated = True\n\n data.reverse()\n for obj in data:\n self.table.insert_new_row(obj)\n\n def is_updated(self):\n \"\"\"\"\"\"\n return self.updated\n\n\nclass CandleChartDialog(QtWidgets.QDialog):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n self.dt_ix_map = {}\n self.updated = False\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"回测K线图表\")\n self.resize(1400, 800)\n\n # Create chart widget\n self.chart = ChartWidget()\n self.chart.add_plot(\"candle\", hide_x_axis=True)\n self.chart.add_plot(\"volume\", maximum_height=200)\n self.chart.add_item(CandleItem, \"candle\", \"candle\")\n self.chart.add_item(VolumeItem, \"volume\", \"volume\")\n self.chart.add_cursor()\n\n # Add scatter item for showing tradings\n self.trade_scatter = pg.ScatterPlotItem()\n candle_plot = self.chart.get_plot(\"candle\")\n candle_plot.addItem(self.trade_scatter)\n\n # Set layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.chart)\n self.setLayout(vbox)\n\n def update_history(self, history: list):\n \"\"\"\"\"\"\n self.updated = True\n self.chart.update_history(history)\n\n for ix, bar in enumerate(history):\n self.dt_ix_map[bar.datetime] = ix\n\n def update_trades(self, trades: list):\n \"\"\"\"\"\"\n trade_data = []\n\n for trade in trades:\n ix = self.dt_ix_map[trade.datetime]\n\n scatter = {\n \"pos\": (ix, trade.price),\n \"data\": 1,\n \"size\": 14,\n \"pen\": pg.mkPen((255, 255, 255))\n }\n\n if trade.direction == Direction.LONG:\n scatter_symbol = \"t1\" # Up arrow\n else:\n scatter_symbol = \"t\" # Down arrow\n\n if trade.offset == Offset.OPEN:\n scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow\n else:\n scatter_brush = pg.mkBrush((0, 0, 255)) # Blue\n\n scatter[\"symbol\"] = scatter_symbol\n scatter[\"brush\"] = scatter_brush\n\n trade_data.append(scatter)\n\n self.trade_scatter.setData(trade_data)\n\n def clear_data(self):\n \"\"\"\"\"\"\n self.updated = False\n self.chart.clear_all()\n\n self.dt_ix_map.clear()\n self.trade_scatter.clear()\n\n def is_updated(self):\n \"\"\"\"\"\"\n return self.updated\n"
] | [
[
"numpy.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mvaldenegro/paper-subensembles-image-classification | [
"cc3a6567b1de82b9bfb1612ad8d0e73cdd7ae09b"
] | [
"svhn/evaluate_calibration.py"
] | [
"import numpy as np\nimport h5py\nimport pandas as pd\n\nfrom svhn_io import load_svhn\nfrom keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error\n\nEPSILON = 1e-10\n\ndef load_hdf5_data(filename):\n inp = h5py.File(filename, \"r\")\n preds = inp[\"preds\"][...]\n\n inp.close()\n\n return preds\n\nNUM_ENSEMBLES = 15\nNUM_BINS=7\n\n#IOD_FILE_PATTERN = \"cnn_svhn-num_ens-{}-preds.hdf5\"\n#OUTPUT_PATTERN = \"svhn-calibration-sub-deepensembles_1_num-ens-{}_cnn_svhn.csv\"\n\nIOD_FILE_PATTERN = \"deepensembles-cnn_svhn-num_ens-{}-preds.hdf5\"\nOUTPUT_PATTERN = \"svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv\"\n\nif __name__ == \"__main__\":\n for num_ens in range(1, NUM_ENSEMBLES + 1):\n (_, __), (___, y_true) = load_svhn()\n y_true = y_true.flatten()\n\n y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens))\n y_confs = np.max(y_probs, axis=1)\n y_pred = np.argmax(y_probs, axis=1)\n\n curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS)\n error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS)\n\n print(\"Processing calibration curve for {} ensembles. Error: {}\".format(num_ens, error))\n\n output_df = pd.DataFrame(data={\"conf\": curve_conf, \"acc\": curve_acc})\n output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False)"
] | [
[
"numpy.max",
"numpy.argmax",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tolysz/numba | [
"d7953a18dbf5ea231dc16e967ce8e9b754578ea6",
"d7953a18dbf5ea231dc16e967ce8e9b754578ea6",
"d7953a18dbf5ea231dc16e967ce8e9b754578ea6"
] | [
"numba/targets/npyimpl.py",
"numba/tests/test_cfunc.py",
"numba/tests/npyufunc/test_gufunc.py"
] | [
"\"\"\"\nImplementation of functions in the Numpy package.\n\"\"\"\n\n\nimport math\nimport sys\nimport itertools\nfrom collections import namedtuple\n\nfrom llvmlite.llvmpy import core as lc\n\nimport numpy as np\nimport operator\n\nfrom . import builtins, callconv, ufunc_db, arrayobj\nfrom .imputils import Registry, impl_ret_new_ref, force_error_model\nfrom .. import typing, types, cgutils, numpy_support, utils\nfrom ..numpy_support import ufunc_find_matching_loop, select_array_wrapper, from_dtype\nfrom ..typing import npydecl\nfrom ..extending import overload, intrinsic\n\nfrom .. import errors\n\nregistry = Registry()\nlower = registry.lower\n\n\n########################################################################\n\n# In the way we generate code, ufuncs work with scalar as well as\n# with array arguments. The following helper classes help dealing\n# with scalar and array arguments in a regular way.\n#\n# In short, the classes provide a uniform interface. The interface\n# handles the indexing of as many dimensions as the array may have.\n# For scalars, all indexing is ignored and when the value is read,\n# the scalar is returned. For arrays code for actual indexing is\n# generated and reading performs the appropriate indirection.\n\nclass _ScalarIndexingHelper(object):\n def update_indices(self, loop_indices, name):\n pass\n\n def as_values(self):\n pass\n\n\nclass _ScalarHelper(object):\n \"\"\"Helper class to handle scalar arguments (and result).\n Note that store_data is only used when generating code for\n a scalar ufunc and to write the output value.\n\n For loading, the value is directly used without having any\n kind of indexing nor memory backing it up. This is the use\n for input arguments.\n\n For storing, a variable is created in the stack where the\n value will be written.\n\n Note that it is not supported (as it is unneeded for our\n current use-cases) reading back a stored value. This class\n will always \"load\" the original value it got at its creation.\n \"\"\"\n def __init__(self, ctxt, bld, val, ty):\n self.context = ctxt\n self.builder = bld\n self.val = val\n self.base_type = ty\n intpty = ctxt.get_value_type(types.intp)\n self.shape = [lc.Constant.int(intpty, 1)]\n\n lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1)\n self._ptr = cgutils.alloca_once(bld, lty)\n\n def create_iter_indices(self):\n return _ScalarIndexingHelper()\n\n def load_data(self, indices):\n return self.val\n\n def store_data(self, indices, val):\n self.builder.store(val, self._ptr)\n\n @property\n def return_val(self):\n return self.builder.load(self._ptr)\n\n\nclass _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper',\n ('array', 'indices'))):\n def update_indices(self, loop_indices, name):\n bld = self.array.builder\n intpty = self.array.context.get_value_type(types.intp)\n ONE = lc.Constant.int(lc.Type.int(intpty.width), 1)\n\n # we are only interested in as many inner dimensions as dimensions\n # the indexed array has (the outer dimensions are broadcast, so\n # ignoring the outer indices produces the desired result.\n indices = loop_indices[len(loop_indices) - len(self.indices):]\n for src, dst, dim in zip(indices, self.indices, self.array.shape):\n cond = bld.icmp(lc.ICMP_UGT, dim, ONE)\n with bld.if_then(cond):\n bld.store(src, dst)\n\n def as_values(self):\n \"\"\"\n The indexing helper is built using alloca for each value, so it\n actually contains pointers to the actual indices to load. Note\n that update_indices assumes the same. This method returns the\n indices as values\n \"\"\"\n bld = self.array.builder\n return [bld.load(index) for index in self.indices]\n\n\nclass _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder',\n 'shape', 'strides', 'data',\n 'layout', 'base_type', 'ndim',\n 'return_val'))):\n \"\"\"Helper class to handle array arguments/result.\n It provides methods to generate code loading/storing specific\n items as well as support code for handling indices.\n \"\"\"\n def create_iter_indices(self):\n intpty = self.context.get_value_type(types.intp)\n ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0)\n\n indices = []\n for i in range(self.ndim):\n x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width))\n self.builder.store(ZERO, x)\n indices.append(x)\n return _ArrayIndexingHelper(self, indices)\n\n def _load_effective_address(self, indices):\n return cgutils.get_item_pointer2(self.context,\n self.builder,\n data=self.data,\n shape=self.shape,\n strides=self.strides,\n layout=self.layout,\n inds=indices)\n\n def load_data(self, indices):\n model = self.context.data_model_manager[self.base_type]\n ptr = self._load_effective_address(indices)\n return model.load_from_data_pointer(self.builder, ptr)\n\n def store_data(self, indices, value):\n ctx = self.context\n bld = self.builder\n store_value = ctx.get_value_as_data(bld, self.base_type, value)\n assert ctx.get_data_type(self.base_type) == store_value.type\n bld.store(store_value, self._load_effective_address(indices))\n\n\ndef _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):\n \"\"\"returns an instance of the appropriate Helper (either\n _ScalarHelper or _ArrayHelper) class to handle the argument.\n using the polymorphic interface of the Helper classes, scalar\n and array cases can be handled with the same code\"\"\"\n\n # first un-Optional Optionals\n if isinstance(tyinp, types.Optional):\n oty = tyinp\n tyinp = tyinp.type\n inp = ctxt.cast(bld, inp, oty, tyinp)\n\n # then prepare the arg for a concrete instance\n if isinstance(tyinp, types.ArrayCompatible):\n ary = ctxt.make_array(tyinp)(ctxt, bld, inp)\n shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)\n strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)\n return _ArrayHelper(ctxt, bld, shape, strides, ary.data,\n tyinp.layout, tyinp.dtype, tyinp.ndim, inp)\n elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]):\n return _ScalarHelper(ctxt, bld, inp, tyinp)\n else:\n raise NotImplementedError('unsupported type for {0}: {1}'.format(where, str(tyinp)))\n\n\n_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),\n types.intp, types.CPointer(types.intp))\ndef _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):\n '''Low-level utility function used in calculating a shape for\n an implicit output array. This function assumes that the\n destination shape is an LLVM pointer to a C-style array that was\n already initialized to a size of one along all axes.\n\n Returns an integer value:\n >= 1 : Succeeded. Return value should equal the number of dimensions in\n the destination shape.\n 0 : Failed to broadcast because source shape is larger than the\n destination shape (this case should be weeded out at type\n checking).\n < 0 : Failed to broadcast onto destination axis, at axis number ==\n -(return_value + 1).\n '''\n if src_ndim > dest_ndim:\n # This check should have been done during type checking, but\n # let's be defensive anyway...\n return 0\n else:\n src_index = 0\n dest_index = dest_ndim - src_ndim\n while src_index < src_ndim:\n src_dim_size = src_shape[src_index]\n dest_dim_size = dest_shape[dest_index]\n # Check to see if we've already mutated the destination\n # shape along this axis.\n if dest_dim_size != 1:\n # If we have mutated the destination shape already,\n # then the source axis size must either be one,\n # or the destination axis size.\n if src_dim_size != dest_dim_size and src_dim_size != 1:\n return -(dest_index + 1)\n elif src_dim_size != 1:\n # If the destination size is still its initial\n dest_shape[dest_index] = src_dim_size\n src_index += 1\n dest_index += 1\n return dest_index\n\ndef _build_array(context, builder, array_ty, input_types, inputs):\n \"\"\"Utility function to handle allocation of an implicit output array\n given the target context, builder, output array type, and a list of\n _ArrayHelper instances.\n \"\"\"\n intp_ty = context.get_value_type(types.intp)\n def make_intp_const(val):\n return context.get_constant(types.intp, val)\n\n ZERO = make_intp_const(0)\n ONE = make_intp_const(1)\n\n src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,\n \"src_shape\")\n dest_ndim = make_intp_const(array_ty.ndim)\n dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,\n \"dest_shape\")\n dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)\n for index in range(array_ty.ndim))\n\n # Initialize the destination shape with all ones.\n for dest_shape_addr in dest_shape_addrs:\n builder.store(ONE, dest_shape_addr)\n\n # For each argument, try to broadcast onto the destination shape,\n # mutating along any axis where the argument shape is not one and\n # the destination shape is one.\n for arg_number, arg in enumerate(inputs):\n if not hasattr(arg, \"ndim\"): # Skip scalar arguments\n continue\n arg_ndim = make_intp_const(arg.ndim)\n for index in range(arg.ndim):\n builder.store(arg.shape[index],\n cgutils.gep_inbounds(builder, src_shape, index))\n arg_result = context.compile_internal(\n builder, _broadcast_onto, _broadcast_onto_sig,\n [arg_ndim, src_shape, dest_ndim, dest_shape])\n with cgutils.if_unlikely(builder,\n builder.icmp(lc.ICMP_SLT, arg_result, ONE)):\n msg = \"unable to broadcast argument %d to output array\" % (\n arg_number,)\n\n loc = errors.loc_info.get('loc', None)\n if loc is not None:\n msg += '\\nFile \"%s\", line %d, ' % (loc.filename, loc.line)\n\n context.call_conv.return_user_exc(builder, ValueError, (msg,))\n\n real_array_ty = array_ty.as_array\n\n dest_shape_tup = tuple(builder.load(dest_shape_addr)\n for dest_shape_addr in dest_shape_addrs)\n array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,\n dest_shape_tup)\n\n # Get the best argument to call __array_wrap__ on\n array_wrapper_index = select_array_wrapper(input_types)\n array_wrapper_ty = input_types[array_wrapper_index]\n try:\n # __array_wrap__(source wrapped array, out array) -> out wrapped array\n array_wrap = context.get_function('__array_wrap__',\n array_ty(array_wrapper_ty, real_array_ty))\n except NotImplementedError:\n # If it's the same priority as a regular array, assume we\n # should use the allocated array unchanged.\n if array_wrapper_ty.array_priority != types.Array.array_priority:\n raise\n out_val = array_val._getvalue()\n else:\n wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())\n out_val = array_wrap(builder, wrap_args)\n\n ndim = array_ty.ndim\n shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)\n strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)\n return _ArrayHelper(context, builder, shape, strides, array_val.data,\n array_ty.layout, array_ty.dtype, ndim,\n out_val)\n\n\ndef numpy_ufunc_kernel(context, builder, sig, args, kernel_class,\n explicit_output=True):\n # This is the code generator that builds all the looping needed\n # to execute a numpy functions over several dimensions (including\n # scalar cases).\n #\n # context - the code generation context\n # builder - the code emitter\n # sig - signature of the ufunc\n # args - the args to the ufunc\n # kernel_class - a code generating subclass of _Kernel that provides\n # explicit_output - if the output was explicit in the call\n # (ie: np.add(x,y,r))\n\n arguments = [_prepare_argument(context, builder, arg, tyarg)\n for arg, tyarg in zip(args, sig.args)]\n if not explicit_output:\n ret_ty = sig.return_type\n if isinstance(ret_ty, types.ArrayCompatible):\n output = _build_array(context, builder, ret_ty, sig.args, arguments)\n else:\n output = _prepare_argument(\n context, builder,\n lc.Constant.null(context.get_value_type(ret_ty)), ret_ty)\n arguments.append(output)\n elif context.enable_nrt:\n # Incref the output\n context.nrt.incref(builder, sig.return_type, args[-1])\n\n inputs = arguments[0:-1]\n output = arguments[-1]\n\n outer_sig = [a.base_type for a in arguments]\n #signature expects return type first, while we have it last:\n outer_sig = outer_sig[-1:] + outer_sig[:-1]\n outer_sig = typing.signature(*outer_sig)\n kernel = kernel_class(context, builder, outer_sig)\n intpty = context.get_value_type(types.intp)\n\n indices = [inp.create_iter_indices() for inp in inputs]\n\n loopshape = output.shape\n with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices:\n vals_in = []\n for i, (index, arg) in enumerate(zip(indices, inputs)):\n index.update_indices(loop_indices, i)\n vals_in.append(arg.load_data(index.as_values()))\n\n val_out = kernel.generate(*vals_in)\n output.store_data(loop_indices, val_out)\n out = arguments[-1].return_val\n return impl_ret_new_ref(context, builder, sig.return_type, out)\n\n\n# Kernels are the code to be executed inside the multidimensional loop.\nclass _Kernel(object):\n def __init__(self, context, builder, outer_sig):\n self.context = context\n self.builder = builder\n self.outer_sig = outer_sig\n\n def cast(self, val, fromty, toty):\n \"\"\"Numpy uses cast semantics that are different from standard Python\n (for example, it does allow casting from complex to float).\n\n This method acts as a patch to context.cast so that it allows\n complex to real/int casts.\n\n \"\"\"\n if (isinstance(fromty, types.Complex) and\n not isinstance(toty, types.Complex)):\n # attempt conversion of the real part to the specified type.\n # note that NumPy issues a warning in this kind of conversions\n newty = fromty.underlying_float\n attr = self.context.get_getattr(fromty, 'real')\n val = attr(self.context, self.builder, fromty, val, 'real')\n fromty = newty\n # let the regular cast do the rest...\n\n return self.context.cast(self.builder, val, fromty, toty)\n\n\ndef _ufunc_db_function(ufunc):\n \"\"\"Use the ufunc loop type information to select the code generation\n function from the table provided by the dict_of_kernels. The dict\n of kernels maps the loop identifier to a function with the\n following signature: (context, builder, signature, args).\n\n The loop type information has the form 'AB->C'. The letters to the\n left of '->' are the input types (specified as NumPy letter\n types). The letters to the right of '->' are the output\n types. There must be 'ufunc.nin' letters to the left of '->', and\n 'ufunc.nout' letters to the right.\n\n For example, a binary float loop resulting in a float, will have\n the following signature: 'ff->f'.\n\n A given ufunc implements many loops. The list of loops implemented\n for a given ufunc can be accessed using the 'types' attribute in\n the ufunc object. The NumPy machinery selects the first loop that\n fits a given calling signature (in our case, what we call the\n outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'.\n \"\"\"\n\n class _KernelImpl(_Kernel):\n def __init__(self, context, builder, outer_sig):\n super(_KernelImpl, self).__init__(context, builder, outer_sig)\n loop = ufunc_find_matching_loop(\n ufunc, outer_sig.args + (outer_sig.return_type,))\n self.fn = ufunc_db.get_ufunc_info(ufunc).get(loop.ufunc_sig)\n self.inner_sig = typing.signature(\n *(loop.outputs + loop.inputs))\n\n if self.fn is None:\n msg = \"Don't know how to lower ufunc '{0}' for loop '{1}'\"\n raise NotImplementedError(msg.format(ufunc.__name__, loop))\n\n def generate(self, *args):\n isig = self.inner_sig\n osig = self.outer_sig\n\n cast_args = [self.cast(val, inty, outty)\n for val, inty, outty in zip(args, osig.args,\n isig.args)]\n with force_error_model(self.context, 'numpy'):\n res = self.fn(self.context, self.builder, isig, cast_args)\n dmm = self.context.data_model_manager\n res = dmm[isig.return_type].from_return(self.builder, res)\n return self.cast(res, isig.return_type, osig.return_type)\n\n return _KernelImpl\n\n\n################################################################################\n# Helper functions that register the ufuncs\n\n_kernels = {} # Temporary map from ufunc's to their kernel implementation class\n\ndef register_unary_ufunc_kernel(ufunc, kernel):\n def unary_ufunc(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel)\n\n def unary_ufunc_no_explicit_output(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n\n _any = types.Any\n\n # (array or scalar, out=array)\n lower(ufunc, _any, types.Array)(unary_ufunc)\n # (array or scalar)\n lower(ufunc, _any)(unary_ufunc_no_explicit_output)\n\n _kernels[ufunc] = kernel\n\n\ndef register_binary_ufunc_kernel(ufunc, kernel):\n def binary_ufunc(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel)\n\n def binary_ufunc_no_explicit_output(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n\n _any = types.Any\n\n # (array or scalar, array o scalar, out=array)\n lower(ufunc, _any, _any, types.Array)(binary_ufunc)\n # (scalar, scalar)\n lower(ufunc, _any, _any)(binary_ufunc_no_explicit_output)\n\n _kernels[ufunc] = kernel\n\n\ndef register_unary_operator_kernel(operator, kernel, inplace=False):\n assert not inplace # are there any inplace unary operators?\n def lower_unary_operator(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n _arr_kind = types.Array\n lower(operator, _arr_kind)(lower_unary_operator)\n\n\ndef register_binary_operator_kernel(op, kernel, inplace=False):\n def lower_binary_operator(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n\n def lower_inplace_operator(context, builder, sig, args):\n # The visible signature is (A, B) -> A\n # The implementation's signature (with explicit output)\n # is (A, B, A) -> A\n args = tuple(args) + (args[0],)\n sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],))\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=True)\n\n _any = types.Any\n _arr_kind = types.Array\n formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)]\n for sig in formal_sigs:\n if not inplace:\n lower(op, *sig)(lower_binary_operator)\n else:\n lower(op, *sig)(lower_inplace_operator)\n\n\n\n################################################################################\n# Use the contents of ufunc_db to initialize the supported ufuncs\n\nfor ufunc in ufunc_db.get_ufuncs():\n if ufunc.nin == 1:\n register_unary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))\n elif ufunc.nin == 2:\n register_binary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))\n else:\n raise RuntimeError(\"Don't know how to register ufuncs from ufunc_db with arity > 2\")\n\n\n@lower(operator.pos, types.Array)\ndef array_positive_impl(context, builder, sig, args):\n '''Lowering function for +(array) expressions. Defined here\n (numba.targets.npyimpl) since the remaining array-operator\n lowering functions are also registered in this module.\n '''\n class _UnaryPositiveKernel(_Kernel):\n def generate(self, *args):\n [val] = args\n return val\n\n return numpy_ufunc_kernel(context, builder, sig, args,\n _UnaryPositiveKernel, explicit_output=False)\n\n\nfor _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map,\n npydecl.NumpyRulesArrayOperator._op_map,\n ):\n for operator, ufunc_name in _op_map.items():\n ufunc = getattr(np, ufunc_name)\n kernel = _kernels[ufunc]\n if ufunc.nin == 1:\n register_unary_operator_kernel(operator, kernel)\n elif ufunc.nin == 2:\n register_binary_operator_kernel(operator, kernel)\n else:\n raise RuntimeError(\"There shouldn't be any non-unary or binary operators\")\n\nfor _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map,\n ):\n for operator, ufunc_name in _op_map.items():\n ufunc = getattr(np, ufunc_name)\n kernel = _kernels[ufunc]\n if ufunc.nin == 1:\n register_unary_operator_kernel(operator, kernel, inplace=True)\n elif ufunc.nin == 2:\n register_binary_operator_kernel(operator, kernel, inplace=True)\n else:\n raise RuntimeError(\"There shouldn't be any non-unary or binary operators\")\n\n\n\ndel _kernels\n\n@intrinsic\ndef _make_dtype_object(typingctx, desc):\n \"\"\"Given a string or NumberClass description *desc*, returns the dtype object.\n \"\"\"\n def from_nb_type(nb_type):\n return_type = types.DType(nb_type)\n sig = return_type(desc)\n\n def codegen(context, builder, signature, args):\n # All dtype objects are dummy values in LLVM.\n # They only exist in the type level.\n return context.get_dummy_value()\n\n return sig, codegen\n\n if isinstance(desc, types.Literal):\n # Convert the str description into np.dtype then to numba type.\n nb_type = from_dtype(np.dtype(desc.literal_value))\n return from_nb_type(nb_type)\n elif isinstance(desc, types.functions.NumberClass):\n thestr = str(desc.dtype)\n # Convert the str description into np.dtype then to numba type.\n nb_type = from_dtype(np.dtype(thestr))\n return from_nb_type(nb_type)\n\n@overload(np.dtype)\ndef numpy_dtype(desc):\n \"\"\"Provide an implementation so that numpy.dtype function can be lowered.\n \"\"\"\n if isinstance(desc, (types.Literal, types.functions.NumberClass)):\n def imp(desc):\n return _make_dtype_object(desc)\n return imp\n else:\n raise TypeError('unknown dtype descriptor: {}'.format(desc))\n",
"\"\"\"\nTests for @cfunc and friends.\n\"\"\"\n\n\nimport ctypes\nimport os\nimport subprocess\nimport sys\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom numba import unittest_support as unittest\nfrom numba import cfunc, carray, farray, types, typing, utils, njit\nfrom numba import cffi_support, numpy_support\nfrom .support import TestCase, tag, captured_stderr\nfrom .test_dispatcher import BaseCacheTest\n\nskip_cffi_unsupported = unittest.skipUnless(\n cffi_support.SUPPORTED,\n \"CFFI not supported -- please install the cffi module\",\n)\n\n\ndef add_usecase(a, b):\n return a + b\n\ndef div_usecase(a, b):\n c = a / b\n return c\n\ndef square_usecase(a):\n return a ** 2\n\nadd_sig = \"float64(float64, float64)\"\n\ndiv_sig = \"float64(int64, int64)\"\n\nsquare_sig = \"float64(float64)\"\n\ndef objmode_usecase(a, b):\n object()\n return a + b\n\n# Test functions for carray() and farray()\n\nCARRAY_USECASE_OUT_LEN = 8\n\ndef make_cfarray_usecase(func):\n\n def cfarray_usecase(in_ptr, out_ptr, m, n):\n # Tuple shape\n in_ = func(in_ptr, (m, n))\n # Integer shape\n out = func(out_ptr, CARRAY_USECASE_OUT_LEN)\n out[0] = in_.ndim\n out[1:3] = in_.shape\n out[3:5] = in_.strides\n out[5] = in_.flags.c_contiguous\n out[6] = in_.flags.f_contiguous\n s = 0\n for i, j in np.ndindex(m, n):\n s += in_[i, j] * (i - j)\n out[7] = s\n\n return cfarray_usecase\n\ncarray_usecase = make_cfarray_usecase(carray)\nfarray_usecase = make_cfarray_usecase(farray)\n\n\ndef make_cfarray_dtype_usecase(func):\n # Same as make_cfarray_usecase(), but with explicit dtype.\n\n def cfarray_usecase(in_ptr, out_ptr, m, n):\n # Tuple shape\n in_ = func(in_ptr, (m, n), dtype=np.float32)\n # Integer shape\n out = func(out_ptr, CARRAY_USECASE_OUT_LEN, np.float32)\n out[0] = in_.ndim\n out[1:3] = in_.shape\n out[3:5] = in_.strides\n out[5] = in_.flags.c_contiguous\n out[6] = in_.flags.f_contiguous\n s = 0\n for i, j in np.ndindex(m, n):\n s += in_[i, j] * (i - j)\n out[7] = s\n\n return cfarray_usecase\n\ncarray_dtype_usecase = make_cfarray_dtype_usecase(carray)\nfarray_dtype_usecase = make_cfarray_dtype_usecase(farray)\n\ncarray_float32_usecase_sig = types.void(types.CPointer(types.float32),\n types.CPointer(types.float32),\n types.intp, types.intp)\n\ncarray_float64_usecase_sig = types.void(types.CPointer(types.float64),\n types.CPointer(types.float64),\n types.intp, types.intp)\n\ncarray_voidptr_usecase_sig = types.void(types.voidptr, types.voidptr,\n types.intp, types.intp)\n\n\nclass TestCFunc(TestCase):\n\n def test_basic(self):\n \"\"\"\n Basic usage and properties of a cfunc.\n \"\"\"\n f = cfunc(add_sig)(add_usecase)\n\n self.assertEqual(f.__name__, \"add_usecase\")\n self.assertEqual(f.__qualname__, \"add_usecase\")\n self.assertIs(f.__wrapped__, add_usecase)\n\n symbol = f.native_name\n self.assertIsInstance(symbol, str)\n self.assertIn(\"add_usecase\", symbol)\n\n addr = f.address\n self.assertIsInstance(addr, utils.INT_TYPES)\n\n ct = f.ctypes\n self.assertEqual(ctypes.cast(ct, ctypes.c_void_p).value, addr)\n\n self.assertPreciseEqual(ct(2.0, 3.5), 5.5)\n\n @skip_cffi_unsupported\n def test_cffi(self):\n from . import cffi_usecases\n ffi, lib = cffi_usecases.load_inline_module()\n\n f = cfunc(square_sig)(square_usecase)\n\n res = lib._numba_test_funcptr(f.cffi)\n self.assertPreciseEqual(res, 2.25) # 1.5 ** 2\n\n def test_locals(self):\n # By forcing the intermediate result into an integer, we\n # truncate the ultimate function result\n f = cfunc(div_sig, locals={'c': types.int64})(div_usecase)\n self.assertPreciseEqual(f.ctypes(8, 3), 2.0)\n\n def test_errors(self):\n f = cfunc(div_sig)(div_usecase)\n\n with captured_stderr() as err:\n self.assertPreciseEqual(f.ctypes(5, 2), 2.5)\n self.assertEqual(err.getvalue(), \"\")\n\n with captured_stderr() as err:\n res = f.ctypes(5, 0)\n # This is just a side effect of Numba zero-initializing\n # stack variables, and could change in the future.\n self.assertPreciseEqual(res, 0.0)\n err = err.getvalue()\n self.assertIn(\"ZeroDivisionError:\", err)\n self.assertIn(\"Exception ignored\", err)\n\n def test_llvm_ir(self):\n f = cfunc(add_sig)(add_usecase)\n ir = f.inspect_llvm()\n self.assertIn(f.native_name, ir)\n self.assertIn(\"fadd double\", ir)\n\n def test_object_mode(self):\n \"\"\"\n Object mode is currently unsupported.\n \"\"\"\n with self.assertRaises(NotImplementedError):\n cfunc(add_sig, forceobj=True)(add_usecase)\n with self.assertTypingError() as raises:\n cfunc(add_sig)(objmode_usecase)\n self.assertIn(\"Untyped global name 'object'\", str(raises.exception))\n\n\nclass TestCFuncCache(BaseCacheTest):\n\n here = os.path.dirname(__file__)\n usecases_file = os.path.join(here, \"cfunc_cache_usecases.py\")\n modname = \"cfunc_caching_test_fodder\"\n\n def run_in_separate_process(self):\n # Cached functions can be run from a distinct process.\n code = \"\"\"if 1:\n import sys\n\n sys.path.insert(0, %(tempdir)r)\n mod = __import__(%(modname)r)\n mod.self_test()\n\n f = mod.add_usecase\n assert f.cache_hits == 1\n f = mod.outer\n assert f.cache_hits == 1\n f = mod.div_usecase\n assert f.cache_hits == 1\n \"\"\" % dict(tempdir=self.tempdir, modname=self.modname)\n\n popen = subprocess.Popen([sys.executable, \"-c\", code],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = popen.communicate()\n if popen.returncode != 0:\n raise AssertionError(\"process failed with code %s: stderr follows\\n%s\\n\"\n % (popen.returncode, err.decode()))\n\n def check_module(self, mod):\n mod.self_test()\n\n def test_caching(self):\n self.check_pycache(0)\n mod = self.import_module()\n self.check_pycache(6) # 3 index, 3 data\n\n self.assertEqual(mod.add_usecase.cache_hits, 0)\n self.assertEqual(mod.outer.cache_hits, 0)\n self.assertEqual(mod.add_nocache_usecase.cache_hits, 0)\n self.assertEqual(mod.div_usecase.cache_hits, 0)\n self.check_module(mod)\n\n # Reload module to hit the cache\n mod = self.import_module()\n self.check_pycache(6) # 3 index, 3 data\n\n self.assertEqual(mod.add_usecase.cache_hits, 1)\n self.assertEqual(mod.outer.cache_hits, 1)\n self.assertEqual(mod.add_nocache_usecase.cache_hits, 0)\n self.assertEqual(mod.div_usecase.cache_hits, 1)\n self.check_module(mod)\n\n self.run_in_separate_process()\n\n\nclass TestCArray(TestCase):\n \"\"\"\n Tests for carray() and farray().\n \"\"\"\n\n def run_carray_usecase(self, pointer_factory, func):\n a = np.arange(10, 16).reshape((2, 3)).astype(np.float32)\n out = np.empty(CARRAY_USECASE_OUT_LEN, dtype=np.float32)\n func(pointer_factory(a), pointer_factory(out), *a.shape)\n return out\n\n def check_carray_usecase(self, pointer_factory, pyfunc, cfunc):\n expected = self.run_carray_usecase(pointer_factory, pyfunc)\n got = self.run_carray_usecase(pointer_factory, cfunc)\n self.assertPreciseEqual(expected, got)\n\n def make_voidptr(self, arr):\n return arr.ctypes.data_as(ctypes.c_void_p)\n\n def make_float32_pointer(self, arr):\n return arr.ctypes.data_as(ctypes.POINTER(ctypes.c_float))\n\n def make_float64_pointer(self, arr):\n return arr.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n def check_carray_farray(self, func, order):\n def eq(got, expected):\n # Same layout, dtype, shape, etc.\n self.assertPreciseEqual(got, expected)\n # Same underlying data\n self.assertEqual(got.ctypes.data, expected.ctypes.data)\n\n base = np.arange(6).reshape((2, 3)).astype(np.float32).copy(order=order)\n\n # With typed pointer and implied dtype\n a = func(self.make_float32_pointer(base), base.shape)\n eq(a, base)\n # Integer shape\n a = func(self.make_float32_pointer(base), base.size)\n eq(a, base.ravel('K'))\n\n # With typed pointer and explicit dtype\n a = func(self.make_float32_pointer(base), base.shape, base.dtype)\n eq(a, base)\n a = func(self.make_float32_pointer(base), base.shape, np.float32)\n eq(a, base)\n\n # With voidptr and explicit dtype\n a = func(self.make_voidptr(base), base.shape, base.dtype)\n eq(a, base)\n a = func(self.make_voidptr(base), base.shape, np.int32)\n eq(a, base.view(np.int32))\n\n # voidptr without dtype\n with self.assertRaises(TypeError):\n func(self.make_voidptr(base), base.shape)\n # Invalid pointer type\n with self.assertRaises(TypeError):\n func(base.ctypes.data, base.shape)\n # Mismatching dtype\n with self.assertRaises(TypeError) as raises:\n func(self.make_float32_pointer(base), base.shape, np.int32)\n self.assertIn(\"mismatching dtype 'int32' for pointer\",\n str(raises.exception))\n\n def test_carray(self):\n \"\"\"\n Test pure Python carray().\n \"\"\"\n self.check_carray_farray(carray, 'C')\n\n def test_farray(self):\n \"\"\"\n Test pure Python farray().\n \"\"\"\n self.check_carray_farray(farray, 'F')\n\n def make_carray_sigs(self, formal_sig):\n \"\"\"\n Generate a bunch of concrete signatures by varying the width\n and signedness of size arguments (see issue #1923).\n \"\"\"\n for actual_size in (types.intp, types.int32, types.intc,\n types.uintp, types.uint32, types.uintc):\n args = tuple(actual_size if a == types.intp else a\n for a in formal_sig.args)\n yield formal_sig.return_type(*args)\n\n def check_numba_carray_farray(self, usecase, dtype_usecase):\n # With typed pointers and implicit dtype\n pyfunc = usecase\n for sig in self.make_carray_sigs(carray_float32_usecase_sig):\n f = cfunc(sig)(pyfunc)\n self.check_carray_usecase(self.make_float32_pointer, pyfunc, f.ctypes)\n\n # With typed pointers and explicit (matching) dtype\n pyfunc = dtype_usecase\n for sig in self.make_carray_sigs(carray_float32_usecase_sig):\n f = cfunc(sig)(pyfunc)\n self.check_carray_usecase(self.make_float32_pointer, pyfunc, f.ctypes)\n # With typed pointers and mismatching dtype\n with self.assertTypingError() as raises:\n f = cfunc(carray_float64_usecase_sig)(pyfunc)\n self.assertIn(\"mismatching dtype 'float32' for pointer type 'float64*'\",\n str(raises.exception))\n\n # With voidptr\n pyfunc = dtype_usecase\n for sig in self.make_carray_sigs(carray_voidptr_usecase_sig):\n f = cfunc(sig)(pyfunc)\n self.check_carray_usecase(self.make_float32_pointer, pyfunc, f.ctypes)\n\n def test_numba_carray(self):\n \"\"\"\n Test Numba-compiled carray() against pure Python carray()\n \"\"\"\n self.check_numba_carray_farray(carray_usecase, carray_dtype_usecase)\n\n def test_numba_farray(self):\n \"\"\"\n Test Numba-compiled farray() against pure Python farray()\n \"\"\"\n self.check_numba_carray_farray(farray_usecase, farray_dtype_usecase)\n\n\n@skip_cffi_unsupported\nclass TestCffiStruct(TestCase):\n c_source = \"\"\"\ntypedef struct _big_struct {\n int i1;\n float f2;\n double d3;\n float af4[9];\n} big_struct;\n\ntypedef struct _error {\n int bits:4;\n} error;\n\ntypedef double (*myfunc)(big_struct*, size_t);\n\"\"\"\n\n def get_ffi(self, src=c_source):\n from cffi import FFI\n\n ffi = FFI()\n ffi.cdef(src)\n return ffi\n\n def test_type_parsing(self):\n ffi = self.get_ffi()\n # Check struct typedef\n big_struct = ffi.typeof('big_struct')\n nbtype = cffi_support.map_type(big_struct, use_record_dtype=True)\n self.assertIsInstance(nbtype, types.Record)\n self.assertEqual(len(nbtype), 4)\n self.assertEqual(nbtype.typeof('i1'), types.int32)\n self.assertEqual(nbtype.typeof('f2'), types.float32)\n self.assertEqual(nbtype.typeof('d3'), types.float64)\n self.assertEqual(\n nbtype.typeof('af4'),\n types.NestedArray(dtype=types.float32, shape=(9,)),\n )\n\n # Check function typedef\n myfunc = ffi.typeof('myfunc')\n sig = cffi_support.map_type(myfunc, use_record_dtype=True)\n self.assertIsInstance(sig, typing.Signature)\n self.assertEqual(sig.args[0], types.CPointer(nbtype))\n self.assertEqual(sig.args[1], types.uintp)\n self.assertEqual(sig.return_type, types.float64)\n\n def test_cfunc_callback(self):\n ffi = self.get_ffi()\n big_struct = ffi.typeof('big_struct')\n nb_big_struct = cffi_support.map_type(big_struct, use_record_dtype=True)\n sig = cffi_support.map_type(ffi.typeof('myfunc'), use_record_dtype=True)\n\n @njit\n def calc(base):\n tmp = 0\n for i in range(base.size):\n elem = base[i]\n tmp += elem.i1 * elem.f2 / elem.d3\n tmp += base[i].af4.sum()\n return tmp\n\n @cfunc(sig)\n def foo(ptr, n):\n base = carray(ptr, n)\n return calc(base)\n\n # Make data\n mydata = ffi.new('big_struct[3]')\n ptr = ffi.cast('big_struct*', mydata)\n for i in range(3):\n ptr[i].i1 = i * 123\n ptr[i].f2 = i * 213\n ptr[i].d3 = (1 + i) * 213\n for j in range(9):\n ptr[i].af4[j] = i * 10 + j\n\n # Address of my data\n addr = int(ffi.cast('size_t', ptr))\n got = foo.ctypes(addr, 3)\n\n # Make numpy array from the cffi buffer\n array = np.ndarray(\n buffer=ffi.buffer(mydata),\n dtype=numpy_support.as_dtype(nb_big_struct),\n shape=3,\n )\n expect = calc(array)\n self.assertEqual(got, expect)\n\n def test_unsupport_bitsize(self):\n ffi = self.get_ffi()\n with self.assertRaises(ValueError) as raises:\n cffi_support.map_type(\n ffi.typeof('error'),\n use_record_dtype=True,\n )\n # When bitsize is provided, bitshift defaults to 0.\n self.assertEqual(\n \"field 'bits' has bitshift, this is not supported\",\n str(raises.exception)\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import numpy as np\nimport numpy.core.umath_tests as ut\n\nfrom numba import unittest_support as unittest\nfrom numba import void, float32, jit, guvectorize\nfrom numba.npyufunc import GUVectorize\nfrom ..support import tag, TestCase\n\n\ndef matmulcore(A, B, C):\n m, n = A.shape\n n, p = B.shape\n for i in range(m):\n for j in range(p):\n C[i, j] = 0\n for k in range(n):\n C[i, j] += A[i, k] * B[k, j]\n\n\ndef axpy(a, x, y, out):\n out[0] = a * x + y\n\n\nclass TestGUFunc(TestCase):\n target = 'cpu'\n\n def check_matmul_gufunc(self, gufunc):\n matrix_ct = 1001\n A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4)\n B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5)\n\n C = gufunc(A, B)\n Gold = ut.matrix_multiply(A, B)\n\n np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8)\n\n def test_gufunc(self):\n gufunc = GUVectorize(matmulcore, '(m,n),(n,p)->(m,p)',\n target=self.target)\n gufunc.add((float32[:, :], float32[:, :], float32[:, :]))\n gufunc = gufunc.build_ufunc()\n\n self.check_matmul_gufunc(gufunc)\n\n def test_guvectorize_decor(self):\n gufunc = guvectorize([void(float32[:,:], float32[:,:], float32[:,:])],\n '(m,n),(n,p)->(m,p)',\n target=self.target)(matmulcore)\n\n self.check_matmul_gufunc(gufunc)\n\n def test_ufunc_like(self):\n # Test problem that the stride of \"scalar\" gufunc argument not properly\n # handled when the actual argument is an array,\n # causing the same value (first value) being repeated.\n gufunc = GUVectorize(axpy, '(), (), () -> ()', target=self.target)\n gufunc.add('(intp, intp, intp, intp[:])')\n gufunc = gufunc.build_ufunc()\n\n x = np.arange(10, dtype=np.intp)\n out = gufunc(x, x, x)\n\n np.testing.assert_equal(out, x * x + x)\n\n\nclass TestGUFuncParallel(TestGUFunc):\n _numba_parallel_test_ = False\n target = 'parallel'\n\n\nclass TestGUVectorizeScalar(TestCase):\n \"\"\"\n Nothing keeps user from out-of-bound memory access\n \"\"\"\n target = 'cpu'\n\n def test_scalar_output(self):\n \"\"\"\n Note that scalar output is a 0-dimension array that acts as\n a pointer to the output location.\n \"\"\"\n\n @guvectorize(['void(int32[:], int32[:])'], '(n)->()',\n target=self.target, nopython=True)\n def sum_row(inp, out):\n tmp = 0.\n for i in range(inp.shape[0]):\n tmp += inp[i]\n out[()] = tmp\n\n # inp is (10000, 3)\n # out is (10000)\n # The outter (leftmost) dimension must match or numpy broadcasting is performed.\n\n inp = np.arange(30000, dtype=np.int32).reshape(10000, 3)\n out = sum_row(inp)\n\n # verify result\n for i in range(inp.shape[0]):\n assert out[i] == inp[i].sum()\n\n def test_scalar_input(self):\n\n @guvectorize(['int32[:], int32[:], int32[:]'], '(n),()->(n)',\n target=self.target, nopython=True)\n def foo(inp, n, out):\n for i in range(inp.shape[0]):\n out[i] = inp[i] * n[0]\n\n inp = np.arange(3 * 10, dtype=np.int32).reshape(10, 3)\n # out = np.empty_like(inp)\n out = foo(inp, 2)\n\n # verify result\n self.assertPreciseEqual(inp * 2, out)\n\n def test_scalar_input_core_type(self):\n def pyfunc(inp, n, out):\n for i in range(inp.size):\n out[i] = n * (inp[i] + 1)\n\n my_gufunc = guvectorize(['int32[:], int32, int32[:]'],\n '(n),()->(n)',\n target=self.target)(pyfunc)\n\n # test single core loop execution\n arr = np.arange(10).astype(np.int32)\n got = my_gufunc(arr, 2)\n\n expected = np.zeros_like(got)\n pyfunc(arr, 2, expected)\n\n np.testing.assert_equal(got, expected)\n\n # test multiple core loop execution\n arr = np.arange(20).astype(np.int32).reshape(10, 2)\n got = my_gufunc(arr, 2)\n\n expected = np.zeros_like(got)\n for ax in range(expected.shape[0]):\n pyfunc(arr[ax], 2, expected[ax])\n\n np.testing.assert_equal(got, expected)\n\n def test_scalar_input_core_type_error(self):\n with self.assertRaises(TypeError) as raises:\n @guvectorize(['int32[:], int32, int32[:]'], '(n),(n)->(n)',\n target=self.target)\n def pyfunc(a, b, c):\n pass\n self.assertEqual(\"scalar type int32 given for non scalar argument #2\",\n str(raises.exception))\n\n def test_ndim_mismatch(self):\n with self.assertRaises(TypeError) as raises:\n @guvectorize(['int32[:], int32[:]'], '(m,n)->(n)',\n target=self.target)\n def pyfunc(a, b):\n pass\n self.assertEqual(\"type and shape signature mismatch for arg #1\",\n str(raises.exception))\n\n\nclass TestGUVectorizeScalarParallel(TestGUVectorizeScalar):\n _numba_parallel_test_ = False\n target = 'parallel'\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.dtype"
],
[
"numpy.ndindex",
"numpy.arange",
"numpy.empty"
],
[
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.core.umath_tests.matrix_multiply",
"numpy.zeros_like",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NicoleEic/projects | [
"028a4bb4b49539fc98b442f0a2f9434e95c94561",
"028a4bb4b49539fc98b442f0a2f9434e95c94561"
] | [
"neuro_scripts/manual_rigid_body/manual_rigid_body.py",
"data_visualization/plot_in_tkinter.py"
] | [
"import numpy as np\nimport nibabel as nib\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')\nfrom my_functions.matrix_stuff import *\n\ndef manual_rigid_body(fname = 'example_brain.nii.gz',\n outmat = 'transformation.mat',\n outimg = 'example_brain_transformed.nii.gz',\n theta = np.radians([0,0,0]),\n translation_vec = [0,0,0],\n type = 'rotation',\n flip_coordinates = [True, False, False]):\n\n \"\"\"\n Function to perform a rigid body transformation based on manually determined parameters.\n\n Args:\n - fname (str): filepath to input nifti image (.nii.gz)\n - outmat (str): filepath of output 4x4 transformation matrix (.mat)\n - outimg (str): filepath of transformed output image (.nii.gz)\n - theta (np.array): vector of rotation angles in x,y,z dimension (in radians)\n - translation_vec (np.array): vector for translation in x,y,z (in image coordinates)\n - type (str): can be 'rotation' or 'translation' or 'rotation_translation'\n - flip_coordinates (boolean vector): indicates for which axis the sign of the offset needs to be flipped\n\n Returns:\n - M (np.array): output 4x4 transformation matrix\n - M is written to outmat\n - the output image (outimg) is written out\n\n Note on flip_coordinates:\n Voxel coordinates in the image are expected to increase in the following directions\n (it's similar to determining the reorient-command):\n - first dimension: left -> right\n - second dimension: posterir -> anterior\n - third dimension: inferior -> superior\n\n if they go the other way, change input variable accordingly, e.g.:\n flip_coordinates = [True, False, False]\n \"\"\"\n\n # get sform from image to determine offset of coordinate-system\n img = nib.load(fname)\n aff = img.get_affine()\n offset = aff[0:3,3]\n\n # which type of manipulation is requested\n if type == 'rotation':\n print('do rotation only')\n M = rotation(theta, offset, flip_coordinates)\n elif type == 'translation':\n print('do translation only')\n M = vector_to_translation_matrix(translation_vec)\n elif type == 'rotation_translation':\n print('do combined rotation and translation')\n M = rotation_translation(theta, translation_vec, offset, flip_coordinates)\n\n # save output matrix\n print('output matrix: ', M)\n print('save in: ', outmat)\n save_matrix4x4(M, outmat)\n\n # apply transformation to input image\n applywarp_command = \"applywarp -i \" + fname + \" -r \" + fname + \" --premat=\" + outmat + \" --interp=nn -o \" + outimg\n print('run flirt: ', applywarp_command)\n os.system(applywarp_command)\n\n return M\n",
"import tkinter as tk\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport numpy as np\n\n# --------\n# overhead\n# --------\nrootdir = 'my/path/somewhere/'\nsubs = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']\nROI_list = ['ROI1', 'ROI2', 'ROI3', 'ROI4']\nhemi_list = [\"L\", \"R\"]\n\n# --------\n# read data into dataframe\n# --------\ndf = pd.DataFrame(columns=[\"subj\", \"ROI\", \"hemi\", \"my_value\"])\nmy_row = 0\nfor sub in subs:\n OD = os.path.join('rootdir', 'derivatives', 'sub-' + sub)\n for ind_r, ROI in enumerate(ROI_list):\n for hemi in hemi_list:\n # generate random value here as example\n my_val = np.random.randint(10) + ind_r\n df.loc[my_row] = [sub, ROI, hemi, my_val]\n my_row = my_row + 1\n\n# --------\n# Set up TK-widget\n# --------\nroot = tk.Tk()\nroot.wm_title(\"Matplotlib embedded in Tkinter\")\n\n# --------\n# plotting\n# --------\n# initialize matplotlib figure\nfig, ax = plt.subplots(figsize=(4, 4))\n# generate mutliple patches from data\nmy_patches = []\nfor ind_r, ROI in enumerate(ROI_list):\n # get a rectangle reaching from lowest to highest value for each ROI\n rect = patches.Rectangle((np.min(df[(df.hemi == \"L\") & (df.ROI == ROI)].my_value.values) + 1, ind_r + 1), np.max(df[(df.hemi == \"L\") & (df.ROI == ROI)].my_value.values), 0.5)\n ax.add_patch(rect)\n my_patches.append(rect)\n\n# hard-code axis limits for better visibility\nax.set_xlim(0, 15)\nax.set_ylim(0.5, len(ROI_list) + 1)\n\n# link figure to tkinter\ncanvas = FigureCanvasTkAgg(fig, master=root)\ncanvas.draw()\ncanvas.get_tk_widget().pack()\n\n# define settings for text lable below figure\noutput_val = tk.StringVar()\noutput_lbl = tk.Label(textvariable=output_val).pack()\noutput_val.set(\"\")\n\n\n# define what happens when the figure is clicked\ndef mouse_click(event):\n # find the ROI associated with the selected patch\n for ROI, patch in zip(ROI_list, my_patches):\n if patch.contains(event)[0]:\n # update the text in the label\n output_val.set(ROI)\n return\n\n\n# define function when window is closed so that no error is thrown\ndef _quit():\n root.quit()\n\n# link mouse_click function to figure\ncanvas.mpl_connect('button_press_event', mouse_click)\n\n# execute widget\ntk.mainloop()\n"
] | [
[
"numpy.radians"
],
[
"numpy.min",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.max",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.