repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
sarvex/estimator
[ "5f53a7d36676ab4553c75a8cd9668f885536e8f8" ]
[ "tensorflow_estimator/python/estimator/head/multi_head_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for multi_head.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow.python.framework import test_util\nfrom tensorflow_estimator.python.estimator.canned import metric_keys\nfrom tensorflow_estimator.python.estimator.canned import prediction_keys\nfrom tensorflow_estimator.python.estimator.head import head_utils as test_lib\nfrom tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib\nfrom tensorflow_estimator.python.estimator.head import multi_label_head\nfrom tensorflow_estimator.python.estimator.head import regression_head\nfrom tensorflow_estimator.python.estimator.mode_keys import ModeKeys\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MultiHeadTest(tf.test.TestCase):\n\n def test_no_heads(self):\n with self.assertRaisesRegexp(ValueError,\n r'Must specify heads\\. Given: \\[\\]'):\n multi_head_lib.MultiHead(heads=[])\n\n def test_head_name_missing(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3)\n with self.assertRaisesRegexp(ValueError,\n r'All given heads must have name specified\\.'):\n multi_head_lib.MultiHead([head1, head2])\n\n def test_head_weights_wrong_size(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n with self.assertRaisesRegexp(\n ValueError, r'heads and head_weights must have the same size\\. '\n r'Given len\\(heads\\): 2. Given len\\(head_weights\\): 1\\.'):\n multi_head_lib.MultiHead([head1, head2], head_weights=[1.])\n\n def test_name(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2])\n self.assertEqual('head1_head2', multi_head.name)\n\n def test_predict_two_heads_logits_dict(self):\n \"\"\"Tests predict with logits as dict.\"\"\"\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2])\n\n logits = {\n 'head1': np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32),\n 'head2': np.array([[2., -2., 2.], [-3., 2., -2.]], dtype=np.float32)\n }\n expected_probabilities = {\n 'head1': tf.math.sigmoid(logits['head1']),\n 'head2': tf.math.sigmoid(logits['head2']),\n }\n pred_keys = prediction_keys.PredictionKeys\n\n predictions = multi_head.predictions(logits)\n self.assertAllClose(logits['head1'],\n self.evaluate(predictions[('head1', pred_keys.LOGITS)]))\n self.assertAllClose(logits['head2'],\n self.evaluate(predictions[('head2', pred_keys.LOGITS)]))\n self.assertAllClose(\n expected_probabilities['head1'],\n self.evaluate(predictions[('head1', pred_keys.PROBABILITIES)]))\n self.assertAllClose(\n expected_probabilities['head2'],\n self.evaluate(predictions[('head2', pred_keys.PROBABILITIES)]))\n if tf.executing_eagerly():\n return\n\n spec = multi_head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=ModeKeys.PREDICT,\n logits=logits)\n self.assertItemsEqual((test_lib._DEFAULT_SERVING_KEY, 'predict', 'head1',\n 'head1/classification', 'head1/predict', 'head2',\n 'head2/classification', 'head2/predict'),\n spec.export_outputs.keys())\n # Assert predictions and export_outputs.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(logits['head1'],\n predictions[('head1', pred_keys.LOGITS)])\n self.assertAllClose(logits['head2'],\n predictions[('head2', pred_keys.LOGITS)])\n self.assertAllClose(expected_probabilities['head1'],\n predictions[('head1', pred_keys.PROBABILITIES)])\n self.assertAllClose(expected_probabilities['head2'],\n predictions[('head2', pred_keys.PROBABILITIES)])\n\n self.assertAllClose(\n expected_probabilities['head1'],\n sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].scores))\n self.assertAllClose(expected_probabilities['head1'],\n sess.run(spec.export_outputs['head1'].scores))\n self.assertAllClose(expected_probabilities['head2'],\n sess.run(spec.export_outputs['head2'].scores))\n self.assertAllClose(\n expected_probabilities['head1'],\n sess.run(\n spec.export_outputs['predict'].outputs['head1/probabilities']))\n self.assertAllClose(\n expected_probabilities['head2'],\n sess.run(\n spec.export_outputs['predict'].outputs['head2/probabilities']))\n self.assertAllClose(\n expected_probabilities['head1'],\n sess.run(\n spec.export_outputs['head1/predict'].outputs['probabilities']))\n self.assertAllClose(\n expected_probabilities['head2'],\n sess.run(\n spec.export_outputs['head2/predict'].outputs['probabilities']))\n\n def test_predict_two_heads_logits_tensor(self):\n \"\"\"Tests predict with logits as Tensor.\"\"\"\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2])\n\n logits = np.array([[-1., 1., 2., -2., 2.], [-1.5, 1., -3., 2., -2.]],\n dtype=np.float32)\n expected_logits1 = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)\n expected_logits2 = np.array([[2., -2., 2.], [-3., 2., -2.]],\n dtype=np.float32)\n expected_probabilities = {\n 'head1': tf.math.sigmoid(expected_logits1),\n 'head2': tf.math.sigmoid(expected_logits2),\n }\n pred_keys = prediction_keys.PredictionKeys\n\n predictions = multi_head.predictions(logits)\n self.assertAllClose(expected_logits1,\n self.evaluate(predictions[('head1', pred_keys.LOGITS)]))\n self.assertAllClose(expected_logits2,\n self.evaluate(predictions[('head2', pred_keys.LOGITS)]))\n self.assertAllClose(\n expected_probabilities['head1'],\n self.evaluate(predictions[('head1', pred_keys.PROBABILITIES)]))\n self.assertAllClose(\n expected_probabilities['head2'],\n self.evaluate(predictions[('head2', pred_keys.PROBABILITIES)]))\n if tf.executing_eagerly():\n return\n\n spec = multi_head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=ModeKeys.PREDICT,\n logits=logits)\n self.assertItemsEqual((test_lib._DEFAULT_SERVING_KEY, 'predict', 'head1',\n 'head1/classification', 'head1/predict', 'head2',\n 'head2/classification', 'head2/predict'),\n spec.export_outputs.keys())\n # Assert predictions and export_outputs.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(expected_logits1,\n predictions[('head1', pred_keys.LOGITS)])\n self.assertAllClose(expected_logits2,\n predictions[('head2', pred_keys.LOGITS)])\n self.assertAllClose(expected_probabilities['head1'],\n predictions[('head1', pred_keys.PROBABILITIES)])\n self.assertAllClose(expected_probabilities['head2'],\n predictions[('head2', pred_keys.PROBABILITIES)])\n\n self.assertAllClose(\n expected_probabilities['head1'],\n sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].scores))\n self.assertAllClose(expected_probabilities['head1'],\n sess.run(spec.export_outputs['head1'].scores))\n self.assertAllClose(expected_probabilities['head2'],\n sess.run(spec.export_outputs['head2'].scores))\n\n def test_predict_two_heads_logits_tensor_multi_dim(self):\n \"\"\"Tests predict with multi-dimensional logits of shape [2, 2, 5].\"\"\"\n head1 = regression_head.RegressionHead(label_dimension=2, name='head1')\n head2 = regression_head.RegressionHead(label_dimension=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2])\n\n logits = np.array([[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],\n [[-1.5, 1., -3., 2., -2.], [-1.5, 1., -3., 2., -2.]]],\n dtype=np.float32)\n expected_logits1 = np.array(\n [[[-1., 1.], [-1., 1.]], [[-1.5, 1.], [-1.5, 1.]]], dtype=np.float32)\n expected_logits2 = np.array(\n [[[2., -2., 2.], [2., -2., 2.]], [[-3., 2., -2.], [-3., 2., -2.]]],\n dtype=np.float32)\n pred_keys = prediction_keys.PredictionKeys\n\n predictions = multi_head.predictions(logits)\n self.assertAllClose(\n expected_logits1,\n self.evaluate(predictions[('head1', pred_keys.PREDICTIONS)]))\n self.assertAllClose(\n expected_logits2,\n self.evaluate(predictions[('head2', pred_keys.PREDICTIONS)]))\n if tf.executing_eagerly():\n return\n\n spec = multi_head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=ModeKeys.PREDICT,\n logits=logits)\n self.assertItemsEqual(\n (test_lib._DEFAULT_SERVING_KEY, 'predict', 'head1', 'head1/regression',\n 'head1/predict', 'head2', 'head2/regression', 'head2/predict'),\n spec.export_outputs.keys())\n # Assert predictions and export_outputs.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(expected_logits1,\n predictions[('head1', pred_keys.PREDICTIONS)])\n self.assertAllClose(expected_logits2,\n predictions[('head2', pred_keys.PREDICTIONS)])\n\n self.assertAllClose(\n expected_logits1,\n sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].value))\n self.assertAllClose(expected_logits1,\n sess.run(spec.export_outputs['head1'].value))\n self.assertAllClose(expected_logits2,\n sess.run(spec.export_outputs['head2'].value))\n\n def test_eval_two_heads_with_weights(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2], head_weights=[1., 2.])\n\n logits = {\n 'head1':\n np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),\n 'head2':\n np.array([[20., -20., 20.], [-30., 20., -20.]], dtype=np.float32),\n }\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # For large logits, sigmoid cross entropy loss is approximated as:\n # loss = labels * (logits < 0) * (-logits) +\n # (1 - labels) * (logits > 0) * logits =>\n # head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]\n # loss = ((10 + 10) / 2 + (15 + 0) / 2) / 2 = 8.75\n # head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]\n # loss = ((20 + 20 + 20) / 3 + (30 + 0 + 0) / 3) / 2 = 15\n expected_loss_head1 = 8.75\n expected_loss_head2 = 15.\n expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2\n tol = 1e-3\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS + '/head1': expected_loss_head1,\n keys.LOSS + '/head2': expected_loss_head2,\n # Average loss over examples.\n keys.LOSS_MEAN + '/head1': expected_loss_head1,\n keys.LOSS_MEAN + '/head2': expected_loss_head2,\n # auc and auc_pr cannot be reliably calculated for only 4-6 samples, but\n # this assert tests that the algorithm remains consistent.\n keys.AUC + '/head1': 0.1667,\n keys.AUC + '/head2': 0.3333,\n keys.AUC_PR + '/head1': 0.60228,\n keys.AUC_PR + '/head2': 0.40152,\n }\n\n if tf.executing_eagerly():\n loss = multi_head.loss(\n labels, logits, features=features, mode=ModeKeys.EVAL)\n self.assertIsNotNone(loss)\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n\n eval_metrics = multi_head.metrics()\n updated_metrics = multi_head.update_metrics(eval_metrics, features,\n logits, labels)\n self.assertItemsEqual(expected_metrics.keys(), updated_metrics.keys())\n self.assertAllClose(\n expected_metrics,\n {k: updated_metrics[k].result() for k in updated_metrics},\n rtol=tol,\n atol=tol)\n return\n\n spec = multi_head.create_estimator_spec(\n features=features, mode=ModeKeys.EVAL, logits=logits, labels=labels)\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n test_lib._assert_no_hooks(self, spec)\n # Assert predictions, loss, and metrics.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, _ = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n # Check results of value ops (in `metrics`).\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops},\n rtol=tol,\n atol=tol)\n\n def test_train_loss_one_head(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n multi_head = multi_head_lib.MultiHead([head1])\n\n logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}\n labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}\n loss = multi_head.loss(\n labels=labels,\n logits=logits,\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=ModeKeys.TRAIN)\n tol = 1e-3\n # Unreduced loss of the head is [[(10 + 10) / 2], (15 + 0) / 2]\n # (averaged over classes, averaged over examples).\n # loss = sum(unreduced_loss) / 2 = sum([10, 7.5]) / 2 = 8.75\n self.assertAllClose(8.75, self.evaluate(loss), rtol=tol, atol=tol)\n\n def test_train_loss_two_heads_with_weights(self):\n # Use different example weighting for each head weighting.\n weights1 = np.array([[1.], [2.]], dtype=np.float32)\n weights2 = np.array([[2.], [3.]])\n head1 = multi_label_head.MultiLabelHead(\n n_classes=2, name='head1', weight_column='weights1')\n head2 = multi_label_head.MultiLabelHead(\n n_classes=3, name='head2', weight_column='weights2')\n multi_head = multi_head_lib.MultiHead([head1, head2], head_weights=[1., 2.])\n\n logits = {\n 'head1':\n np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),\n 'head2':\n np.array([[20., -20., 20.], [-30., 20., -20.]], dtype=np.float32),\n }\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n training_loss = multi_head.loss(\n logits=logits,\n labels=labels,\n features={\n 'x': np.array(((42,),), dtype=np.int32),\n 'weights1': weights1,\n 'weights2': weights2\n },\n mode=ModeKeys.TRAIN)\n tol = 1e-3\n # loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]\n # = [10, 7.5]\n # training_loss = (1 * 10 + 2 * 7.5) / 2 = 12.5\n # head-weighted unreduced_loss = 1 * [10, 7.5]\n # loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]\n # = [20, 10]\n # training_loss = (2 * 20 + 3 * 10) / 2 = 35\n # head-weighted unreduced_loss = 2 * [20, 10]\n # head-weighted training_loss = 1 * 12.5 + 2 * 35 = 82.5\n self.assertAllClose(82.5, self.evaluate(training_loss), rtol=tol, atol=tol)\n\n def test_train_loss_logits_tensor(self):\n \"\"\"Tests loss with logits Tensor.\"\"\"\n weights1 = np.array([[1.], [2.]], dtype=np.float32)\n weights2 = np.array([[2.], [3.]])\n head1 = multi_label_head.MultiLabelHead(\n n_classes=2, name='head1', weight_column='weights1')\n head2 = multi_label_head.MultiLabelHead(\n n_classes=3, name='head2', weight_column='weights2')\n multi_head = multi_head_lib.MultiHead([head1, head2], head_weights=[1., 2.])\n\n logits = np.array(\n [[-10., 10., 20., -20., 20.], [-15., 10., -30., 20., -20.]],\n dtype=np.float32)\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n training_loss = multi_head.loss(\n logits=logits,\n labels=labels,\n features={\n 'x': np.array(((42,),), dtype=np.int32),\n 'weights1': weights1,\n 'weights2': weights2\n },\n mode=ModeKeys.TRAIN)\n tol = 1e-3\n # loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]\n # = [10, 7.5]\n # training_loss = (1 * 10 + 2 * 7.5) / 2 = 12.5\n # head-weighted unreduced_loss = 1 * [10, 7.5]\n # loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]\n # = [20, 10]\n # training_loss = (2 * 20 + 3 * 10) / 2 = 35\n # head-weighted unreduced_loss = 2 * [20, 10]\n # head-weighted training_loss = 1 * 12.5 + 2 * 35 = 82.5\n self.assertAllClose(82.5, self.evaluate(training_loss), rtol=tol, atol=tol)\n\n def test_train_loss_logits_tensor_wrong_shape(self):\n \"\"\"Tests loss with a logits Tensor of the wrong shape.\"\"\"\n weights1 = np.array([[1.], [2.]], dtype=np.float32)\n weights2 = np.array([[2.], [3.]])\n head1 = multi_label_head.MultiLabelHead(\n n_classes=2, name='head1', weight_column='weights1')\n head2 = multi_label_head.MultiLabelHead(\n n_classes=3, name='head2', weight_column='weights2')\n multi_head = multi_head_lib.MultiHead([head1, head2], head_weights=[1., 2.])\n\n # logits tensor is 2x6 instead of 2x5\n logits = np.array(\n [[-10., 10., 20., -20., 20., 70.], [-15., 10., -30., 20., -20., 80.]],\n dtype=np.float32)\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n with self.assertRaisesRegexp(ValueError, r'Could not split logits'):\n multi_head.loss(\n features={\n 'x': np.array(((42,),), dtype=np.int32),\n 'weights1': weights1,\n 'weights2': weights2\n },\n mode=ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n\n def test_train_loss_logits_tensor_multi_dim(self):\n \"\"\"Tests loss with multi-dimensional logits of shape [2, 2, 5].\"\"\"\n head1 = regression_head.RegressionHead(label_dimension=2, name='head1')\n head2 = regression_head.RegressionHead(label_dimension=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2])\n\n logits = np.array([[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],\n [[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],\n dtype=np.float32)\n labels = {\n 'head1':\n np.array([[[1., 0.], [1., 0.]], [[1.5, 1.5], [1.5, 1.5]]],\n dtype=np.float32),\n 'head2':\n np.array(\n [[[0., 1., 0.], [0., 1., 0.]], [[2., 2., 0.], [2., 2., 0.]]],\n dtype=np.float32),\n }\n # Loss for the first head:\n # loss1 = ((1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +\n # (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2) / 8\n # = 3.5\n # Loss for the second head:\n # loss2 = ((0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +\n # (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2) / 12\n # = 6.167\n expected_training_loss = 3.5 + 6.167\n\n training_loss = multi_head.loss(\n logits=logits, labels=labels, features={}, mode=ModeKeys.TRAIN)\n tol = 1e-3\n self.assertAllClose(\n expected_training_loss,\n self.evaluate(training_loss),\n rtol=tol,\n atol=tol)\n\n def test_train_loss_logits_tensor_multi_dim_wrong_shape(self):\n \"\"\"Tests loss with a multi-dimensional logits tensor of the wrong shape.\"\"\"\n head1 = regression_head.RegressionHead(label_dimension=2, name='head1')\n head2 = regression_head.RegressionHead(label_dimension=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2])\n\n # logits tensor is 2x2x4 instead of 2x2x5\n logits = np.array([[[-1., 1., 2., -2.], [-1., 1., 2., -2.]],\n [[-1.5, 1.5, -2., 2.], [-1.5, 1.5, -2., 2.]]],\n dtype=np.float32)\n labels = {\n 'head1':\n np.array([[[1., 0.], [1., 0.]], [[1.5, 1.5], [1.5, 1.5]]],\n dtype=np.float32),\n 'head2':\n np.array(\n [[[0., 1., 0.], [0., 1., 0.]], [[2., 2., 0.], [2., 2., 0.]]],\n dtype=np.float32),\n }\n with self.assertRaisesRegexp(ValueError, r'Could not split logits'):\n multi_head.loss(\n features={}, mode=ModeKeys.TRAIN, logits=logits, labels=labels)\n\n def test_train_one_head(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n multi_head = multi_head_lib.MultiHead([head1])\n\n logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}\n expected_probabilities = {\n 'head1': tf.math.sigmoid(logits['head1']),\n }\n labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # For large logits, sigmoid cross entropy loss is approximated as:\n # loss = labels * (logits < 0) * (-logits) +\n # (1 - labels) * (logits > 0) * logits =>\n # expected_unweighted_loss = [[10., 10.], [15., 0.]]\n # loss = ((10 + 10) / 2 + (15 + 0) / 2) / 2 = 8.75\n expected_loss = 8.75\n tol = 1e-3\n loss = multi_head.loss(\n logits=logits, labels=labels, features=features, mode=ModeKeys.TRAIN)\n self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)\n if tf.executing_eagerly():\n return\n\n expected_train_result = 'my_train_op'\n\n def _train_op_fn(loss):\n return tf.strings.join([\n tf.constant(expected_train_result),\n tf.strings.as_string(loss, precision=3)\n ])\n\n spec = multi_head.create_estimator_spec(\n features=features,\n mode=ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n test_lib._assert_no_hooks(self, spec)\n # Assert predictions, loss, train_op, and summaries.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str, predictions = sess.run(\n (spec.loss, spec.train_op, spec.scaffold.summary_op,\n spec.predictions))\n self.assertAllClose(\n logits['head1'],\n predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])\n self.assertAllClose(\n expected_probabilities['head1'],\n predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),\n train_result)\n test_lib._assert_simple_summaries(\n self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS + '/head1': expected_loss,\n }, summary_str, tol)\n\n def test_train_one_head_with_optimizer(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n multi_head = multi_head_lib.MultiHead([head1])\n\n logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}\n labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # For large logits, sigmoid cross entropy loss is approximated as:\n # loss = labels * (logits < 0) * (-logits) +\n # (1 - labels) * (logits > 0) * logits =>\n # expected_unweighted_loss = [[10., 10.], [15., 0.]]\n # loss = ((10 + 10) / 2 + (15 + 0) / 2) / 2 = 8.75\n expected_loss = 8.75\n tol = 1e-3\n loss = multi_head.loss(\n logits=logits, labels=labels, features=features, mode=ModeKeys.TRAIN)\n self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)\n if tf.executing_eagerly():\n return\n\n expected_train_result = 'my_train_op'\n\n class _Optimizer(tf.keras.optimizers.Optimizer):\n\n def get_updates(self, loss, params):\n del params\n return [\n tf.strings.join([\n tf.constant(expected_train_result),\n tf.strings.as_string(loss, precision=3)\n ])\n ]\n\n def get_config(self):\n config = super(_Optimizer, self).get_config()\n return config\n\n spec = multi_head.create_estimator_spec(\n features=features,\n mode=ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n optimizer=_Optimizer('my_optimizer'),\n trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])\n\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),\n train_result)\n\n def test_train_two_heads_with_weights(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2], head_weights=[1., 2.])\n\n logits = {\n 'head1':\n np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),\n 'head2':\n np.array([[20., -20., 20.], [-30., 20., -20.]], dtype=np.float32),\n }\n expected_probabilities = {\n 'head1': tf.math.sigmoid(logits['head1']),\n 'head2': tf.math.sigmoid(logits['head2']),\n }\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # For large logits, sigmoid cross entropy loss is approximated as:\n # loss = labels * (logits < 0) * (-logits) +\n # (1 - labels) * (logits > 0) * logits =>\n # head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]\n # loss = ((10 + 10) / 2 + (15 + 0) / 2) / 2 = 8.75\n # head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]\n # loss = ((20 + 20 + 20) / 3 + (30 + 0 + 0) / 3) / 2 = 15\n # Average over classes, weighted sum over batch and heads.\n expected_loss_head1 = 8.75\n expected_loss_head2 = 15.0\n expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2\n tol = 1e-3\n loss = multi_head.loss(\n logits=logits, labels=labels, features=features, mode=ModeKeys.TRAIN)\n self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)\n if tf.executing_eagerly():\n return\n\n expected_train_result = 'my_train_op'\n\n def _train_op_fn(loss):\n return tf.strings.join([\n tf.constant(expected_train_result),\n tf.strings.as_string(loss, precision=3)\n ])\n\n spec = multi_head.create_estimator_spec(\n features=features,\n mode=ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n test_lib._assert_no_hooks(self, spec)\n # Assert predictions, loss, train_op, and summaries.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str, predictions = sess.run(\n (spec.loss, spec.train_op, spec.scaffold.summary_op,\n spec.predictions))\n self.assertAllClose(\n logits['head1'],\n predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])\n self.assertAllClose(\n expected_probabilities['head1'],\n predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])\n self.assertAllClose(\n logits['head2'],\n predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])\n self.assertAllClose(\n expected_probabilities['head2'],\n predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),\n train_result)\n test_lib._assert_simple_summaries(\n self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS + '/head1': expected_loss_head1,\n metric_keys.MetricKeys.LOSS + '/head2': expected_loss_head2,\n }, summary_str, tol)\n\n def test_train_with_regularization_losses(self):\n head1 = multi_label_head.MultiLabelHead(n_classes=2, name='head1')\n head2 = multi_label_head.MultiLabelHead(n_classes=3, name='head2')\n multi_head = multi_head_lib.MultiHead([head1, head2], head_weights=[1., 2.])\n\n logits = {\n 'head1':\n np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),\n 'head2':\n np.array([[20., -20., 20.], [-30., 20., -20.]], dtype=np.float32),\n }\n expected_probabilities = {\n 'head1': tf.math.sigmoid(logits['head1']),\n 'head2': tf.math.sigmoid(logits['head2']),\n }\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n features = {'x': np.array(((42,),), dtype=np.int32)}\n regularization_losses = [1.5, 0.5]\n\n # For large logits, sigmoid cross entropy loss is approximated as:\n # loss = labels * (logits < 0) * (-logits) +\n # (1 - labels) * (logits > 0) * logits =>\n # head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]\n # loss1 = ((10 + 10) / 2 + (15 + 0) / 2) / 2 = 8.75\n # head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]\n # loss2 = ((20 + 20 + 20) / 3 + (30 + 0 + 0) / 3) / 2 = 15\n # Average over classes, weighted sum over batch and heads.\n # weights = [1., 2.]\n # merged_training_loss = 1. * loss1 + 2. * loss2\n # training_loss = merged_training_loss + regularization_loss\n # = 1. * loss1 + 2. * loss2 + sum([1.5, 0.5])\n expected_loss_head1 = 8.75\n expected_loss_head2 = 15.0\n expected_regularization_loss = 2.\n # training loss.\n expected_loss = (1. * expected_loss_head1 + 2. * expected_loss_head2 +\n expected_regularization_loss)\n tol = 1e-3\n loss = multi_head.loss(\n logits=logits,\n labels=labels,\n features=features,\n mode=ModeKeys.TRAIN,\n regularization_losses=regularization_losses)\n self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)\n if tf.executing_eagerly():\n return\n\n keys = metric_keys.MetricKeys\n expected_train_result = 'my_train_op'\n\n def _train_op_fn(loss):\n return tf.strings.join([\n tf.constant(expected_train_result),\n tf.strings.as_string(loss, precision=3)\n ])\n\n spec = multi_head.create_estimator_spec(\n features=features,\n mode=ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn,\n regularization_losses=regularization_losses)\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n test_lib._assert_no_hooks(self, spec)\n # Assert predictions, loss, train_op, and summaries.\n with self.cached_session() as sess:\n test_lib._initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str, predictions = sess.run(\n (spec.loss, spec.train_op, spec.scaffold.summary_op,\n spec.predictions))\n self.assertAllClose(\n logits['head1'],\n predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])\n self.assertAllClose(\n expected_probabilities['head1'],\n predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])\n self.assertAllClose(\n logits['head2'],\n predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])\n self.assertAllClose(\n expected_probabilities['head2'],\n predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),\n train_result)\n test_lib._assert_simple_summaries(\n self, {\n keys.LOSS_REGULARIZATION: expected_regularization_loss,\n keys.LOSS: expected_loss,\n keys.LOSS + '/head1': expected_loss_head1,\n keys.LOSS + '/head2': expected_loss_head2,\n }, summary_str, tol)\n\n\n@test_util.deprecated_graph_mode_only\nclass MultiHeadForEstimator(tf.test.TestCase):\n \"\"\"Tests for create_estimator_spec running in Graph mode only.\"\"\"\n\n def test_loss_reduction_must_be_same(self):\n \"\"\"Tests the loss reduction must be the same for different heads.\"\"\"\n head1 = multi_label_head.MultiLabelHead(\n n_classes=2,\n name='head1',\n loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)\n head2 = multi_label_head.MultiLabelHead(\n n_classes=3, name='head2', loss_reduction=tf.losses.Reduction.AUTO)\n multi_head = multi_head_lib.MultiHead([head1, head2])\n logits = {\n 'head1':\n np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),\n 'head2':\n np.array([[20., -20., 20.], [-30., 20., -20.]], dtype=np.float32),\n }\n labels = {\n 'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),\n 'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),\n }\n with self.assertRaisesRegexp(ValueError, 'must be the same'):\n multi_head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.strings.as_string", "tensorflow.constant", "tensorflow.executing_eagerly", "tensorflow.Variable", "tensorflow.test.main", "tensorflow.math.sigmoid", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RyanNavillus/tcav
[ "ef3bb59fc08fe04d836f00ad0036dd71f5e5079b" ]
[ "tcav/model_test.py" ]
[ "\"\"\"\nCopyright 2019 Google LLC\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n https://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.platform import googletest\nfrom tcav.model import ModelWrapper\n\n\ntf.compat.v1.disable_eager_execution()\n\n\nclass ModelTest_model(ModelWrapper):\n \"\"\"A mock model of model class for ModelTest class.\"\"\"\n\n def __init__(self, model_path=None, node_dict=None):\n super(ModelTest_model, self).__init__(\n model_path=model_path, node_dict=node_dict)\n\n\nclass ModelTest(googletest.TestCase):\n\n def setUp(self):\n # Create an execution graph\n x = tf.compat.v1.placeholder(dtype=tf.float64, shape=[], name='input')\n a = tf.Variable(111, name='var1', dtype=tf.float64)\n y = tf.math.multiply(x, a, name='output')\n\n self.ckpt_dir = '/tmp/ckpts/'\n self.saved_model_dir = '/tmp/saved_model/'\n self.frozen_graph_dir = '/tmp/frozen_graph/'\n self.tmp_dirs = [self.ckpt_dir, self.saved_model_dir, self.frozen_graph_dir]\n for d in self.tmp_dirs:\n if tf.io.gfile.exists(d):\n tf.io.gfile.rmtree(d)\n tf.io.gfile.makedirs(d)\n\n with tf.compat.v1.Session() as sess:\n tf.compat.v1.initialize_all_variables().run()\n\n # Save as checkpoint\n saver = tf.compat.v1.train.Saver()\n saver.save(sess, self.ckpt_dir + 'model.ckpt', write_meta_graph=True)\n\n # Save as SavedModel\n tf.compat.v1.saved_model.simple_save(\n sess,\n self.saved_model_dir,\n inputs={'input': x},\n outputs={'output': y})\n\n graph = sess.graph\n input_graph_def = graph.as_graph_def()\n output_node_names = ['output']\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, input_graph_def, output_node_names)\n\n # Save as binary graph\n tf.io.write_graph(\n output_graph_def, self.frozen_graph_dir, 'graph.pb', as_text=False)\n\n # Save as text graph\n tf.io.write_graph(\n output_graph_def, self.frozen_graph_dir, 'graph.pbtxt', as_text=True)\n\n def tearDown(self):\n for d in self.tmp_dirs:\n tf.io.gfile.rmtree(d)\n\n def _check_output_and_gradient(self, model_path, import_prefix=False):\n model = ModelTest_model(model_path=model_path, node_dict={'v1': 'var1'})\n input_name = 'input:0'\n output_name = 'output:0'\n if import_prefix:\n input_name = 'import/' + input_name\n output_name = 'import/' + output_name\n out = model.sess.run(output_name, feed_dict={input_name: 3})\n self.assertEqual(out, 333.0)\n\n model.loss = model.sess.graph.get_tensor_by_name(output_name)\n\n # Make sure that loaded graph can be modified\n model._make_gradient_tensors()\n grad = model.sess.run(\n model.bottlenecks_gradients['v1'], feed_dict={input_name: 555})\n self.assertEqual(grad, 555.0)\n\n def test_try_loading_model_from_ckpt(self):\n self._check_output_and_gradient(self.ckpt_dir)\n\n def test_try_loading_model_from_saved_model(self):\n self._check_output_and_gradient(self.saved_model_dir)\n\n def test_try_loading_model_from_frozen_pb(self):\n model_path = self.frozen_graph_dir + 'graph.pb'\n self._check_output_and_gradient(model_path, import_prefix=True)\n\n def test_try_loading_model_from_frozen_txt(self):\n model_path = self.frozen_graph_dir + 'graph.pbtxt'\n self._check_output_and_gradient(model_path, import_prefix=True)\n\n\nif __name__ == '__main__':\n googletest.main()\n" ]
[ [ "tensorflow.io.gfile.rmtree", "tensorflow.Variable", "tensorflow.io.write_graph", "tensorflow.io.gfile.exists", "tensorflow.compat.v1.saved_model.simple_save", "tensorflow.io.gfile.makedirs", "tensorflow.math.multiply", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.compat.v1.initialize_all_variables", "tensorflow.python.platform.googletest.main", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.compat.v1.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nishantsule/Guitar-Effects
[ "5bf40f641e477f5655c91c3967ebccf8104e6d96" ]
[ "guitareffects.py" ]
[ "import numpy as np\nfrom bokeh.plotting import figure, show\n# from bokeh.io import output_notebook\nfrom bokeh.palettes import Colorblind\nimport pydub\nimport os\n\n# This class defines the core Guitar Effects object. \n# It contains functions to read and write audio files.\n# It also contains all the different functions implementing various guitar effects\n\nclass GEcore():\n \n def __init__(self):\n self.effectname = ''\n self.audiofilename = ''\n self.framerate = []\n self.signal = []\n self.read_audiofile()\n \n def read_audiofile(self):\n print('----------------------')\n name = input('Enter the audio filename you want to read including the extension: ')\n print('----------------------')\n filename, file_ext = os.path.splitext(name)\n filename = os.getcwd() + '/audiofiles/' + name\n self.audiofilename = filename\n audiofile = pydub.AudioSegment.from_file(filename, file_ext)\n audiofile = audiofile.fade_out(2000)\n self.framerate = audiofile.frame_rate\n songdata = [] # Empty list for holding audio data\n channels = [] # Empty list to hold data from separate channels\n songdata = np.frombuffer(audiofile._data, np.int16)\n for chn in range(audiofile.channels):\n channels.append(songdata[chn::audiofile.channels]) # separate signal from channels\n self.signal = np.sum(channels, axis=0) / len(channels) # Averaging signal over all channels\n self.signal = self.norm_signal(self.signal) # normalize signal amplitude\n self.plot_signal([self.signal], True)\n \n def norm_signal(self, input_signal):\n output_signal = input_signal / np.max(np.absolute(input_signal))\n return output_signal\n \n def plot_signal(self, audio_signal, pflag):\n if pflag:\n p = figure(plot_width=900, plot_height=500, title='Audio Signal', \n x_axis_label='Time (s)', y_axis_label='Amplitude (arb. units)')\n time = np.linspace(0, np.shape(audio_signal)[1] / self.framerate, np.shape(audio_signal)[1])\n m = int(np.shape(audio_signal)[1] / 2000)\n for n in range(np.shape(audio_signal)[0]):\n labels = 'signal ' + str(n + 1)\n p.line(time[0::m], audio_signal[n][0::m], line_color=Colorblind[8][n], \n alpha=0.6, legend_label=labels)\n show(p)\n else:\n pass\n \n def delay(self, input_signal, pflag):\n print('----------------------')\n delaytime = int(input('Enter the delay (> 50ms and < 5000ms): '))\n gain = float(input('Enter the delay gain (number betweeen 0 and 1): '))\n print('----------------------')\n num = int(delaytime * 1e-3 * self.framerate)\n delaysig = np.roll(input_signal, num)\n delaysig[:num] = 0\n output_signal = input_signal + gain * delaysig\n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n \n def flanger(self, input_signal, pflag):\n print('----------------------')\n maxdelay = int(input('Enter the maximum flanger delay (< 15ms): '))\n fflanger = float(input('Enter the frequency of delay oscillation (~ 1Hz): '))\n gain = float(input('Enter the gain (number betweeen 0 and 1): '))\n print('----------------------')\n num = int(maxdelay * 1e-3 * self.framerate)\n output_signal = np.zeros(len(input_signal))\n delaysig = np.zeros(num)\n for n in range(len(input_signal)):\n d = int(0.5 * num * (1 + np.sin(2 * np.pi * fflanger * n / self.framerate)))\n if d < n:\n output_signal[n] = input_signal[n] + gain * input_signal[n-d]\n else:\n output_signal[n] = input_signal[n] \n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n \n def overdrive(self, input_signal, pflag):\n print('----------------------')\n th = float(input('Enter the overdrive signal threshold (< 0.5): '))\n print('----------------------')\n output_signal = np.zeros(len(input_signal))\n for n in range(len(input_signal)):\n if np.absolute(input_signal[n]) < th:\n output_signal[n] = 2 * input_signal[n]\n if np.absolute(input_signal[n]) >= th:\n if input_signal[n] > 0:\n output_signal[n] = (3 - (2 - 3 * input_signal[n])**2) / 3\n if input_signal[n] < 0:\n output_signal[n] = -(3 - (2 - 3 * np.absolute(input_signal[n]))**2) / 3\n if np.absolute(input_signal[n]) > 2 * th:\n if input_signal[n] > 0:\n output_signal[n] = 1\n if input_signal[n] < 0:\n output_signal[n] = -1\n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n \n def distortion(self, input_signal, pflag):\n print('----------------------')\n alph = -1 * float(input('Enter the distortion gain (> 1): '))\n print('----------------------')\n q = np.sign(input_signal)\n output_signal = q * (1 - np.exp(alph * q * input_signal))\n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n \n def tremolo(self, input_signal, pflag):\n print('----------------------')\n alph = float((input('Enter the depth of tremble (number between 0 and 1): ')))\n modfreq = float(input('Enter modulation frequency (< 20Hz): '))\n print('----------------------')\n output_signal = np.zeros(len(input_signal))\n for n in range(len(input_signal)):\n trem = 1 + alph * np.sin(2 * np.pi * modfreq * n / self.framerate)\n output_signal[n] = trem * input_signal[n]\n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n \n def wahwah(self, input_signal, pflag):\n print('----------------------')\n damp = float(input('Enter the wahwah damping factor (< 0.5): '))\n minf = float(input('Enter minimum center cutoff frequency (~ 500Hz): '))\n maxf = float(input('Enter the maximum center cutoff frequency (~ 5000Hz): '))\n wahf = float(input('Enter the \"wah\" frequency (~ 2000Hz): '))\n print('----------------------')\n output_signal = np.zeros(len(input_signal))\n outh = np.zeros(len(input_signal))\n outl = np.zeros(len(input_signal))\n delta = wahf / self.framerate\n centerf = np.concatenate((np.arange(minf, maxf, delta), np.arange(maxf, minf, -delta)))\n while len(centerf) < len(input_signal):\n centerf = np.concatenate((centerf, centerf))\n centerf = centerf[:len(input_signal)]\n f1 = 2 * np.sin(np.pi * centerf[0] / self.framerate)\n outh[0] = input_signal[0]\n output_signal[0] = f1 * outh[0]\n outl[0] = f1 * output_signal[0]\n for n in range(1, len(input_signal)):\n outh[n] = input_signal[n] - outl[n-1] - 2 * damp * output_signal[n-1]\n output_signal[n] = f1 * outh[n] + output_signal[n-1]\n outl[n] = f1 * output_signal[n] + outl[n-1]\n f1 = 2 * np.sin(np.pi * centerf[n] / self.framerate)\n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n \n def octaveup(self, input_signal, pflag):\n print('----------------------')\n gain = float(input('Enter gain of octave-up signal (number between 0 and 1): '))\n print('----------------------')\n output_signal = input_signal + gain * np.absolute(input_signal)\n output_signal = self.norm_signal(output_signal)\n self.plot_signal([input_signal, output_signal], pflag)\n return output_signal\n" ]
[ [ "numpy.absolute", "numpy.arange", "numpy.sin", "numpy.sign", "numpy.frombuffer", "numpy.concatenate", "numpy.shape", "numpy.roll", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wwhio/PaddleGAN
[ "f84ddda4273346cb693724a51e64fefd9b10877c" ]
[ "ppgan/models/generators/occlusion_aware.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# code was heavily based on https://github.com/AliaksandrSiarohin/first-order-model\n\nimport paddle\nfrom paddle import nn\nimport paddle.nn.functional as F\nfrom ...modules.first_order import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, make_coordinate_grid\nfrom ...modules.dense_motion import DenseMotionNetwork\nimport numpy as np\nimport cv2\n\n\nclass OcclusionAwareGenerator(nn.Layer):\n \"\"\"\n Generator that given source image and and keypoints try to transform image according to movement trajectories\n induced by keypoints. Generator follows Johnson architecture.\n \"\"\"\n def __init__(self,\n num_channels,\n num_kp,\n block_expansion,\n max_features,\n num_down_blocks,\n num_bottleneck_blocks,\n estimate_occlusion_map=False,\n dense_motion_params=None,\n estimate_jacobian=False,\n inference=False):\n super(OcclusionAwareGenerator, self).__init__()\n\n if dense_motion_params is not None:\n self.dense_motion_network = DenseMotionNetwork(\n num_kp=num_kp,\n num_channels=num_channels,\n estimate_occlusion_map=estimate_occlusion_map,\n **dense_motion_params)\n else:\n self.dense_motion_network = None\n\n self.first = SameBlock2d(num_channels,\n block_expansion,\n kernel_size=(7, 7),\n padding=(3, 3))\n\n down_blocks = []\n for i in range(num_down_blocks):\n in_features = min(max_features, block_expansion * (2**i))\n out_features = min(max_features, block_expansion * (2**(i + 1)))\n down_blocks.append(\n DownBlock2d(in_features,\n out_features,\n kernel_size=(3, 3),\n padding=(1, 1)))\n self.down_blocks = nn.LayerList(down_blocks)\n\n up_blocks = []\n for i in range(num_down_blocks):\n in_features = min(max_features,\n block_expansion * (2**(num_down_blocks - i)))\n out_features = min(max_features,\n block_expansion * (2**(num_down_blocks - i - 1)))\n up_blocks.append(\n UpBlock2d(in_features,\n out_features,\n kernel_size=(3, 3),\n padding=(1, 1)))\n self.up_blocks = nn.LayerList(up_blocks)\n\n self.bottleneck = paddle.nn.Sequential()\n in_features = min(max_features, block_expansion * (2**num_down_blocks))\n for i in range(num_bottleneck_blocks):\n self.bottleneck.add_sublayer(\n 'r' + str(i),\n ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))\n\n self.final = nn.Conv2D(block_expansion,\n num_channels,\n kernel_size=(7, 7),\n padding=(3, 3))\n self.estimate_occlusion_map = estimate_occlusion_map\n self.num_channels = num_channels\n self.inference = inference\n self.pad = 5\n\n def deform_input(self, inp, deformation):\n _, h_old, w_old, _ = deformation.shape\n _, _, h, w = inp.shape\n if h_old != h or w_old != w:\n deformation = deformation.transpose([0, 3, 1, 2])\n deformation = F.interpolate(deformation,\n size=(h, w),\n mode='bilinear',\n align_corners=False)\n deformation = deformation.transpose([0, 2, 3, 1])\n if self.inference:\n identity_grid = make_coordinate_grid((h, w),\n type=inp.dtype)\n identity_grid = identity_grid.reshape([1, h, w, 2])\n visualization_matrix = np.zeros((h,w)).astype(\"float32\")\n visualization_matrix[self.pad:h-self.pad, self.pad:w-self.pad] = 1.0\n gauss_kernel = paddle.to_tensor(cv2.GaussianBlur(visualization_matrix , (9, 9), 0.0, borderType=cv2.BORDER_ISOLATED))\n gauss_kernel = gauss_kernel.unsqueeze(0).unsqueeze(-1)\n deformation = gauss_kernel * deformation + (1-gauss_kernel) * identity_grid\n\n return F.grid_sample(inp,\n deformation,\n mode='bilinear',\n padding_mode='zeros',\n align_corners=True)\n\n def forward(self, source_image, kp_driving, kp_source):\n # Encoding (downsampling) part\n out = self.first(source_image)\n for i in range(len(self.down_blocks)):\n out = self.down_blocks[i](out)\n\n # Transforming feature representation according to deformation and occlusion\n output_dict = {}\n if self.dense_motion_network is not None:\n dense_motion = self.dense_motion_network(source_image=source_image,\n kp_driving=kp_driving,\n kp_source=kp_source)\n output_dict['mask'] = dense_motion['mask']\n output_dict['sparse_deformed'] = dense_motion['sparse_deformed']\n\n if 'occlusion_map' in dense_motion:\n occlusion_map = dense_motion['occlusion_map']\n output_dict['occlusion_map'] = occlusion_map\n else:\n occlusion_map = None\n deformation = dense_motion['deformation']\n out = self.deform_input(out, deformation)\n\n if occlusion_map is not None:\n if out.shape[2] != occlusion_map.shape[2] or out.shape[\n 3] != occlusion_map.shape[3]:\n occlusion_map = F.interpolate(occlusion_map,\n size=out.shape[2:],\n mode='bilinear',\n align_corners=False)\n if self.inference:\n h,w = occlusion_map.shape[2:]\n occlusion_map[:,:,0:self.pad,:] = 1.0\n occlusion_map[:,:,:,0:self.pad] = 1.0\n occlusion_map[:,:,h-self.pad:h,:] = 1.0\n occlusion_map[:,:,:,w-self.pad:w] = 1.0 \n out = out * occlusion_map\n\n output_dict[\"deformed\"] = self.deform_input(source_image,\n deformation)\n\n # Decoding part\n out = self.bottleneck(out)\n for i in range(len(self.up_blocks)):\n out = self.up_blocks[i](out)\n out = self.final(out)\n out = F.sigmoid(out)\n\n output_dict[\"prediction\"] = out\n\n return output_dict\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pedrohtg/pytorch
[ "b8f1b330d4ecad08d93624d9d1d510f3bcd197c7" ]
[ "semantic_segmentation/data.py" ]
[ "import os\nimport random\nfrom PIL import Image\nimport torch\nfrom torch.utils.data import Dataset\n\n\n# Labels: -1 license plate, 0 unlabeled, 1 ego vehicle, 2 rectification border, 3 out of roi, 4 static, 5 dynamic, 6 ground, 7 road, 8 sidewalk, 9 parking, 10 rail track, 11 building, 12 wall, 13 fence, 14 guard rail, 15 bridge, 16 tunnel, 17 pole, 18 polegroup, 19 traffic light, 20 traffic sign, 21 vegetation, 22 terrain, 23 sky, 24 person, 25 rider, 26 car, 27 truck, 28 bus, 29 caravan, 30 trailer, 31 train, 32 motorcycle, 33 bicycle\nnum_classes = 20\nfull_to_train = {-1: 19, 0: 19, 1: 19, 2: 19, 3: 19, 4: 19, 5: 19, 6: 19, 7: 0, 8: 1, 9: 19, 10: 19, 11: 2, 12: 3, 13: 4, 14: 19, 15: 19, 16: 19, 17: 5, 18: 19, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: 19, 30: 19, 31: 16, 32: 17, 33: 18}\ntrain_to_full = {0: 7, 1: 8, 2: 11, 3: 12, 4: 13, 5: 17, 6: 19, 7: 20, 8: 21, 9: 22, 10: 23, 11: 24, 12: 25, 13: 26, 14: 27, 15: 28, 16: 31, 17: 32, 18: 33, 19: 0}\nfull_to_colour = {0: (0, 0, 0), 7: (128, 64, 128), 8: (244, 35, 232), 11: (70, 70, 70), 12: (102, 102, 156), 13: (190, 153, 153), 17: (153, 153, 153), 19: (250, 170, 30), 20: (220, 220, 0), 21: (107, 142, 35), 22: (152, 251, 152), 23: (70, 130, 180), 24: (220, 20, 60), 25: (255, 0, 0), 26: (0, 0, 142), 27: (0, 0, 70), 28: (0, 60,100), 31: (0, 80, 100), 32: (0, 0, 230), 33: (119, 11, 32)}\n\n\nclass CityscapesDataset(Dataset):\n def __init__(self, split='train', crop=None, flip=False):\n super().__init__()\n self.crop = crop\n self.flip = flip\n self.inputs = []\n self.targets = []\n\n for root, _, filenames in os.walk(os.path.join('leftImg8bit_trainvaltest', 'leftImg8bit', split)):\n for filename in filenames:\n if os.path.splitext(filename)[1] == '.png':\n filename_base = '_'.join(filename.split('_')[:-1])\n target_root = os.path.join('gtFine_trainvaltest', 'gtFine', split, os.path.basename(root))\n self.inputs.append(os.path.join(root, filename_base + '_leftImg8bit.png'))\n self.targets.append(os.path.join(target_root, filename_base + '_gtFine_labelIds.png'))\n\n def __len__(self):\n return len(self.inputs)\n\n def __getitem__(self, i):\n # Load images and perform augmentations with PIL\n input, target = Image.open(self.inputs[i]), Image.open(self.targets[i])\n # Random uniform crop\n if self.crop is not None:\n w, h = input.size\n x1, y1 = random.randint(0, w - self.crop), random.randint(0, h - self.crop)\n input, target = input.crop((x1, y1, x1 + self.crop, y1 + self.crop)), target.crop((x1, y1, x1 + self.crop, y1 + self.crop))\n # Random horizontal flip\n if self.flip:\n if random.random() < 0.5:\n input, target = input.transpose(Image.FLIP_LEFT_RIGHT), target.transpose(Image.FLIP_LEFT_RIGHT)\n\n # Convert to tensors\n w, h = input.size\n input = torch.ByteTensor(torch.ByteStorage.from_buffer(input.tobytes())).view(h, w, 3).permute(2, 0, 1).float().div(255)\n target = torch.ByteTensor(torch.ByteStorage.from_buffer(target.tobytes())).view(h, w).long()\n # Normalise input\n input[0].add_(-0.485).div_(0.229)\n input[1].add_(-0.456).div_(0.224)\n input[2].add_(-0.406).div_(0.225)\n # Convert to training labels\n remapped_target = target.clone()\n for k, v in full_to_train.items():\n remapped_target[target == k] = v\n # Create one-hot encoding\n target = torch.zeros(num_classes, h, w)\n for c in range(num_classes):\n target[c][remapped_target == c] = 1\n return input, target, remapped_target # Return x, y (one-hot), y (index)\n" ]
[ [ "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neuroailab/RAFT
[ "03e532cdcad35d3582b053035ec10257c73cbaaa" ]
[ "core/utils/segmentation_metrics.py" ]
[ "import numpy as np\nimport scipy\nimport sklearn.metrics\nimport skimage\nfrom skimage.segmentation.boundaries import find_boundaries\nfrom sklearn.cluster import KMeans\n\nimport torch\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport pdb\n\ndef object_id_hash(objects, dtype_out=torch.int32, val=256, channels_last=False):\n '''\n objects: [...,C]\n val: a number castable to dtype_out\n returns:\n out: [...,1] where each value is given by sum([val**(C-1-c) * objects[...,c:c+1] for c in range(C)])\n '''\n if not isinstance(objects, torch.Tensor):\n objects = torch.tensor(objects)\n if not channels_last:\n objects = objects.permute(0,2,3,1)\n C = objects.shape[-1]\n val = torch.tensor(val, dtype=dtype_out)\n objects = objects.to(dtype_out)\n out = torch.zeros_like(objects[...,0:1])\n for c in range(C):\n scale = torch.pow(val, C-1-c)\n out += scale * objects[...,c:c+1]\n if not channels_last:\n out = out.permute(0,3,1,2)\n\n return out\n\nclass SegmentationMetrics(object):\n \"\"\"\n A class for computing metrics given a pair of pred and gt segment maps\n \"\"\"\n def __init__(\n self,\n gt_objects, # the true segmentation\n pred_objects=None, # the predicted segmentation\n background_value=0, # value of background segment\n min_gt_size=1, # num pixels needed to be a true segment\n size=None, # image size to do evaluation at\n max_objects=None,\n exclude_pred_ids=None,\n ):\n ## attributes for all evaluations\n self.size = size\n self.background_value = background_value\n self.min_gt_size = min_gt_size\n self.max_objects = max_objects\n\n ## set attributes of the gt and resize\n self.gt_objects = gt_objects\n self.pred_objects = pred_objects\n\n ## initialize metrics\n self.best_ious = None\n self.mean_ious = None\n self.recalls = None\n self.boundary_f1_scores = None\n self.mean_boundary_f1_scores = None\n self.exclude_pred_ids = exclude_pred_ids\n\n @property\n def gt_objects(self):\n return self._gt_objects\n @gt_objects.setter\n def gt_objects(self, value):\n self._set_gt_objects(value)\n self._set_gt_ids()\n\n @property\n def pred_objects(self):\n return self._pred_objects\n @pred_objects.setter\n def pred_objects(self, value):\n self._set_pred_objects(value)\n\n def _object_id_hash(self, objects, dtype_out=np.int32, val=256):\n C = objects.shape[-1]\n out = np.zeros(shape=objects.shape[:-1], dtype=dtype_out)\n for c in range(C):\n scale = np.power(val, C-1-c)\n out += scale * objects[...,c]\n return out\n\n def _parse_objects_tensor(self, objects):\n\n shape = list(objects.shape)\n if len(shape) == 2:\n objects = objects[...,None]\n\n dtype = objects.dtype\n if dtype == torch.uint8:\n assert (shape[-1] == 3) or (shape[-3] == 3), shape\n channels_last = True if shape[-1] == 3 else False\n else:\n assert dtype == torch.int32, dtype\n if (shape[-1] == 1) or (shape[-3] == 1):\n channels_last = True if shape[-1] == 1 else False\n else: # 3 channels\n objects = objects[...,None]\n channels_last = True\n shape = shape + [1]\n\n self._temporal = False\n if len(shape) == 3:\n objects = objects[None]\n self.B = 1\n self.T = 1\n self.BT = self.B\n elif len(shape) == 5:\n self._temporal = True\n self.B, self.T = shape[:2]\n self.BT = self.B*self.T\n objects = objects.view(self.BT,*shape[2:])\n else:\n assert len(objects.shape) == 4, \"torch objects must have shape [BT,C,H,W] or [BT,H,W,C]\"\n self.B = shape[0]\n self.T = 1\n self.BT = self.B\n\n if self.max_objects is None:\n if dtype == torch.uint8:\n hashed = object_id_hash(objects, channels_last=channels_last)\n else:\n hashed = objects\n ims = list(hashed)\n num_objects = [int(torch.unique(im).size(0)) for im in ims]\n self.max_objects = max(num_objects)\n\n if dtype == torch.uint8:\n objects = object_id_hash(objects, channels_last=channels_last)\n\n if not channels_last:\n objects = objects.permute(0,2,3,1)\n\n if self.size is not None:\n objects = F.interpolate(objects.permute(0,3,1,2).float(), size=self.size, mode='nearest').permute(0,2,3,1).int()\n\n assert objects.dtype == torch.int32, objects.dtype\n return objects.numpy()\n\n def _parse_objects_array(self, objects):\n if objects.shape[-1] not in [1,3]:\n objects = objects[...,None]\n if objects.shape[-1] == 3:\n assert objects.dtype == np.uint8, objects.dtype\n objects = self._object_id_hash(objects)\n else:\n assert objects.dtype == np.int32\n\n self._temporal = False\n if len(objects.shape) == 5:\n self._temporal = True\n self.B,self.T = objects.shape[:2]\n self.BT = self.B*self.T\n objects = objects.reshape([self.BT] + objects.shape[2:])\n elif len(objects.shape) == 3:\n self.B = objects.shape[0]\n self.T = 1\n self.BT = self.B\n objects = objects[...,None]\n else:\n assert len(objects.shape) == 4, objects.shape\n self.B = objects.shape[0]\n self.T = 1\n self.BT = self.B\n\n if self.size is not None:\n objects = map(lambda im: skimage.transform.resize(im.astype(float), self.size, order=0).astype(np.int32), [objects[ex] for ex in range(self.BT)])\n objects = np.stack(objects, 0)\n\n def _set_gt_objects(self, objects):\n if isinstance(objects, torch.Tensor):\n objects = self._parse_objects_tensor(objects)\n else:\n objects = self._parse_objects_array(objects)\n\n assert len(objects.shape) == 4, objects.shape\n assert objects.shape[-1] == 1, objects.shape\n assert objects.dtype == np.int32, objects.dtype\n\n self._gt_objects = objects[...,0]\n self.gt_shape = self._gt_objects.shape\n self.size = self.gt_shape[-2:]\n\n def _set_gt_ids(self):\n self.gt_ids = []\n for ex in range(self.BT):\n self.gt_ids.append(\n np.unique(self.gt_objects[ex]))\n\n\n def _set_pred_objects(self, objects):\n if objects is None:\n return\n if isinstance(objects, torch.Tensor):\n objects = self._parse_objects_tensor(objects)\n else:\n objects = self._parse_objects_array(objects)\n\n assert len(objects.shape) == 4, objects.shape\n assert objects.shape[-1] == 1, objects.shape\n assert objects.dtype == np.int32, objects.dtype\n\n ## subtract off the minimum value\n offsets = objects.min(axis=(1,2), keepdims=True)\n objects -= offsets\n\n self._pred_objects = objects[...,0]\n\n\n def _get_mask(self, objects, obj_id=0):\n return objects == obj_id\n\n def get_gt_mask(self, ex, t=0, obj_id=0):\n b = ex*self.T + t\n return self._get_mask(self.gt_objects[b], obj_id)\n\n def get_pred_mask(self, ex, t=0, obj_id=0):\n assert self.pred_objects is not None\n b = ex*self.T + t\n return self._get_mask(self.pred_objects[b], obj_id)\n\n def get_background(self, ex, t=0):\n return self.get_gt_mask(ex, t, self.background_value)\n\n @staticmethod\n def mask_IoU(pred_mask, gt_mask, min_gt_size=1):\n \"\"\"Compute intersection over union of two boolean masks\"\"\"\n assert pred_mask.shape == gt_mask.shape, (pred_mask.shape, gt_mask.shape)\n assert pred_mask.dtype == gt_mask.dtype == bool, (pred_mask.dtype, gt_mask.dtype)\n num_gt_px = gt_mask.sum()\n num_pred_px = pred_mask.sum()\n if num_gt_px < min_gt_size:\n return np.nan\n\n overlap = (pred_mask & gt_mask).sum().astype(float)\n IoU = overlap / (num_gt_px + num_pred_px - overlap)\n return IoU\n\n @staticmethod\n def mask_precision(pred_mask, gt_mask, min_gt_size=1):\n assert pred_mask.shape == gt_mask.shape, (pred_mask.shape, gt_mask.shape)\n assert pred_mask.dtype == gt_mask.dtype == bool, (pred_mask.dtype, gt_mask.dtype)\n num_gt_px = gt_mask.sum()\n num_pred_px = pred_mask.sum()\n if num_gt_px < min_gt_size:\n return np.nan\n\n overlap = (pred_mask & gt_mask).sum().astype(float)\n precision = overlap / np.maximum(num_pred_px, 1.0)\n return precision\n\n @staticmethod\n def mask_recall(pred_mask, gt_mask, min_gt_size=1):\n assert pred_mask.shape == gt_mask.shape, (pred_mask.shape, gt_mask.shape)\n assert pred_mask.dtype == gt_mask.dtype == bool, (pred_mask.dtype, gt_mask.dtype)\n num_gt_px = gt_mask.sum()\n num_pred_px = pred_mask.sum()\n if num_gt_px < min_gt_size:\n return np.nan\n\n overlap = (pred_mask & gt_mask).sum().astype(float)\n recall = overlap / np.maximum(num_gt_px, 1.0)\n return recall\n\n def _mask_metrics(self):\n return {'iou': self.mask_IoU, 'precision': self.mask_precision, 'recall': self.mask_recall}\n\n def compute_matched_IoUs(self, pred_objects=None, exclude_gt_ids=[], metric='iou'):\n if pred_objects is not None:\n self.pred_objects = pred_objects\n\n assert metric in ['iou', 'precision', 'recall'], \"Metric must be 'iou', 'precision', or 'recall'\"\n metric_func = self._mask_metrics()[metric]\n\n exclude_ids = list(set(exclude_gt_ids + [self.background_value]))\n best_IoUs = []\n best_pred_objs = []\n matched_preds, matched_gts = [], []\n\n for b in range(self.BT):\n\n ex, t = (b // self.T, b % self.T)\n\n # the ids in each gt mask\n ids_here = [o for o in self.gt_ids[b] if o not in exclude_ids]\n num_gt = len(ids_here)\n\n # the pred masks\n if self.exclude_pred_ids is None:\n preds = map(lambda o_id: self.get_pred_mask(ex, t, o_id),\n sorted(list(np.unique(self.pred_objects[b]))))\n else:\n preds = map(lambda o_id: self.get_pred_mask(ex, t, o_id),\n sorted([i for i in list(np.unique(self.pred_objects[b])) if i not in self.exclude_pred_ids]))\n preds = list(preds)\n\n num_preds = len(preds)\n\n # # ---- visualize ----\n # import pdb;pdb.set_trace()\n # import matplotlib.pyplot as plt\n # plt.figure(figsize=(20, 5))\n # for i in range(num_preds):\n # plt.subplot(1, num_preds, i+1)\n # plt.imshow(preds[i])\n # plt.title('Pred %d' % i)\n # plt.show()\n # plt.close()\n # plt.figure(figsize=(20, 5))\n # for i in range(num_gt):\n # plt.subplot(1, num_gt, i+1)\n # plt.imshow(self.get_gt_mask(ex, t, ids_here[i]))\n # plt.title('GT %d' % i)\n # plt.show()\n # plt.close()\n\n # compute full matrix of ious\n gts = []\n ious = np.zeros((num_gt, num_preds), dtype=np.float32)\n for m in range(num_gt):\n gt_mask = self.get_gt_mask(ex, t, ids_here[m])\n gts.append(gt_mask)\n for n in range(num_preds):\n pred_mask = preds[n]\n # pdb.set_trace()\n iou = metric_func(pred_mask, gt_mask, self.min_gt_size)\n ious[m,n] = iou if not np.isnan(iou) else 0.0\n\n # linear assignment\n gt_inds, pred_inds = scipy.optimize.linear_sum_assignment(1.0 - ious)\n\n # output values\n best = np.array([0.0] * len(ids_here))\n best[gt_inds] = ious[gt_inds, pred_inds]\n best_IoUs.append(best)\n best_objs = np.array([0] * len(ids_here))\n best_objs[gt_inds] = np.array([sorted(list(np.unique(self.pred_objects[b])))[i] for i in pred_inds])\n best_pred_objs.append(best_objs)\n\n count = 0\n matched_pred = []\n for m in range(num_gt):\n if count < len(gt_inds):\n if m == gt_inds[count]:\n matched_pred.append(preds[pred_inds[count]])\n count += 1\n continue\n\n matched_pred.append(np.zeros_like(preds[0]))\n matched_preds.append(matched_pred)\n # # # # ---- visualize ----\n # import pdb;pdb.set_trace()\n # import matplotlib.pyplot as plt\n # plt.figure(figsize=(20, 5))\n # for i in range(len(matched_preds[0])):\n # plt.subplot(1, len(matched_preds[0]), i+1)\n # plt.imshow(matched_preds[0][i])\n # plt.title('Pred %d' % i)\n # plt.show()\n # plt.close()\n # plt.figure(figsize=(20, 5))\n # for i in range(len(matched_preds[0])):\n # plt.subplot(1, len(matched_preds[0]), i+1)\n # plt.imshow(matched_gts[0][i])\n # plt.title('GT %d' % i)\n # plt.show()\n # plt.close()\n # print(best_IoUs, best[gt_inds])\n #\n # pdb.set_trace()\n\n self.best_ious = best_IoUs\n self.best_object_ids = best_pred_objs\n self.seg_out = (matched_preds, [gts], best_IoUs)\n\n return self.mean_ious\n\n def compute_best_IoUs(self, pred_objects=None):\n raise NotImplementedError(\"Compute the best possible IoUs, reusing pred objects if needed\")\n\n def compute_recalls(self, pred_objects=None, thresh=0.5, exclude_gt_ids=[]):\n if pred_objects is not None:\n self.pred_objects = pred_objects\n\n if self.best_ious is None:\n self.compute_best_IoUs(exclude_gt_ids=exclude_gt_ids)\n\n recalls = np.zeros((self.BT), dtype=np.float32)\n for b in range(self.BT):\n true_pos = np.array(self.best_ious[b]) >= thresh\n recall = (true_pos.sum().astype(float) / len(true_pos)) if len(true_pos) else np.nan\n recalls[b] = recall\n\n self.recalls = recalls\n self.mean_recalls = np.nanmean(self.recalls)\n return self.mean_recalls\n\n def compute_boundary_f_measures(self, pred_objects=None, stride=1, connectivity=2, mode='thick',\n exclude_gt_ids=[]):\n\n \"\"\"\n For matched pred and gt masks, compute F measure on their boundary pixels.\n F measure is defined as 2*(precision * recall) / (precision + recall)\n \"\"\"\n if pred_objects is not None:\n self.pred_objects = pred_objects\n\n if self.best_object_ids is None:\n self.compute_matched_IoUs()\n\n exclude_ids = exclude_gt_ids + [self.background_value]\n\n boundary_fs = []\n for b in range(self.BT):\n ex, t = (b // self.T, b % self.T)\n\n ## the ground truth\n gt_ids_here = [o for o in self.gt_ids[b] if o not in exclude_ids]\n num_gt = len(gt_ids_here)\n\n ## get the object ids that best matched gt\n matched_objs = self.best_object_ids[b]\n num_pred = len(matched_objs)\n\n boundary_f = []\n for i,o_id in enumerate(matched_objs):\n gt_mask = self.get_gt_mask(ex, t, gt_ids_here[i])\n pred_mask = self.get_pred_mask(ex, t, o_id)\n\n gt_boundary = find_boundaries(gt_mask, connectivity=connectivity, mode=mode)\n pred_boundary = find_boundaries(pred_mask, connectivity=connectivity, mode=mode)\n\n ## precision and recall and F1\n true_pos = (gt_boundary & pred_boundary).sum().astype(float)\n false_pos = (~gt_boundary & pred_boundary).sum().astype(float)\n false_neg = (gt_boundary & (~pred_boundary)).sum().astype(float)\n precision = true_pos / (true_pos + false_pos) if (true_pos > 0.0) else 1.0 - (false_pos > 0.0).astype(float)\n recall = true_pos / (true_pos + false_neg) if (true_pos + false_neg > 0.0) else 1.0\n F1 = (2 * precision * recall) / (precision + recall) if (precision + recall > 0.0) else 0.0\n boundary_f.append(F1)\n\n ## if there were fewer pred objects than gt\n if num_pred < num_gt:\n boundary_f.extend([0.0] * (num_gt - num_pred))\n\n boundary_fs.append(np.array(boundary_f))\n\n self.boundary_f1_scores = boundary_fs\n\n return self.mean_boundary_f1_scores\n\n @property\n def mean_ious(self):\n if self.best_ious is None:\n return None\n elif self._mean_ious is None:\n self._mean_ious = np.array([np.nanmean(self.best_ious[b]) for b in range(self.BT)])\n if self._temporal:\n self._mean_ious = self._mean_ious.reshape((self.B, self.T))\n return self._mean_ious\n else:\n return self._mean_ious\n @mean_ious.setter\n def mean_ious(self, value=None):\n if value is not None:\n raise ValueError(\"You can't set the mean ious, you need to compute it\")\n self._mean_ious = value\n\n @property\n def mean_boundary_f1_scores(self):\n if self.boundary_f1_scores is None:\n return None\n elif self._mean_boundary_f1_scores is None:\n self._mean_boundary_f1_scores = np.array(\n [np.nanmean(self.boundary_f1_scores[b]) for b in range(self.BT)])\n if self._temporal:\n self._mean_boundary_f1_scores = self._mean_boundary_f1_scores.reshape((self.B, self.T))\n return self._mean_boundary_f1_scores\n else:\n return self._mean_boundary_f1_scores\n\n @mean_boundary_f1_scores.setter\n def mean_boundary_f1_scores(self, value=None):\n if value is not None:\n raise ValueError(\"You need to compute boundary_f_measure\")\n self._mean_boundary_f1_scores = value\n\n\ndef measure_static_segmentation_metric(out, inputs, size, segment_key,\n eval_full_res=False, moving_only=True, exclude_zone=True,\n exclude_pred_ids=None, gt_seg=None):\n\n if gt_seg is not None:\n gt_objects = gt_seg.int()\n else:\n gt_objects = inputs['gt_segment'].int()\n assert gt_objects.max() < torch.iinfo(torch.int32).max, gt_objects\n if not eval_full_res:\n gt_objects = F.interpolate(gt_objects.float().unsqueeze(1), size=size, mode='nearest').int()\n\n exclude_values = []\n if not isinstance(segment_key, list):\n segment_key = [segment_key]\n\n segment_metric = {}\n segment_out = {}\n for key in segment_key:\n results = {'mean_ious': [], 'recalls': [], 'boundary_f1_scores': []}\n pred_objects = out[key]\n pred_objects = pred_objects.reshape(pred_objects.shape[0], 1, size[0], size[1])\n\n metric = SegmentationMetrics(gt_objects=gt_objects.cpu(),\n pred_objects=pred_objects.int().cpu(),\n size=None if eval_full_res else size,\n background_value=0,\n exclude_pred_ids=exclude_pred_ids)\n\n metric.compute_matched_IoUs(exclude_gt_ids=list(set([0] + exclude_values)))\n metric.compute_recalls()\n metric.compute_boundary_f_measures(exclude_gt_ids=list(set([0] + exclude_values)))\n\n results['mean_ious'].append(metric.mean_ious)\n results['recalls'].append(metric.recalls)\n results['boundary_f1_scores'].append(metric.mean_boundary_f1_scores)\n\n for k, v in results.items():\n segment_metric[f'metric_{key}_{k}'] = torch.tensor(np.mean(v))\n segment_out[key] = metric.seg_out\n\n return segment_metric, segment_out\n\n\n\ndef four_quadrant_segments(size=[128,128], separator=[0.5, 0.5], minval=1, maxval=32):\n H,W = size\n h1 = int(H * separator[0])\n h2 = H-h1\n w1 = int(W * separator[1])\n w2 = W-w1\n\n vals = torch.randint(size=[4], low=minval, high=maxval, dtype=torch.int32)\n q1 = torch.ones([h1,w1]).to(vals) * vals[0]\n q2 = torch.ones([h1,w2]).to(vals) * vals[1]\n q3 = torch.ones([h2,w2]).to(vals) * vals[2]\n q4 = torch.ones([h2,w1]).to(vals) * vals[3]\n top = torch.cat([q2, q1], dim=1)\n bottom = torch.cat([q3,q4], dim=1)\n out = torch.cat([top,bottom], dim=0)[None]\n return out\n\nif __name__ == '__main__':\n size = [128,128]\n # gt_objects = torch.randint(size=(4,2,3,32,32), low=0, high=255, dtype=torch.uint8)\n # gt_objects = torch.randint(size=(4,2,1,32,32), low=0, high=8, dtype=torch.int32)\n # pred_objects = torch.randint(size=(4,2,16,16), low=0, high=32, dtype=torch.int32)\n gt_objects = four_quadrant_segments(size, separator=[0.3,0.6])\n Metrics = SegmentationMetrics(gt_objects, pred_objects=gt_objects, size=size)\n print(\"gt\", Metrics.gt_objects.shape, Metrics.gt_objects.dtype)\n print(\"pred\", Metrics.pred_objects.shape, Metrics.pred_objects.dtype)\n print(\"B, T, size\", Metrics.B, Metrics.T, Metrics.size)\n\n Metrics.compute_matched_IoUs()\n print(\"Best ious\", Metrics.best_ious)\n print(\"Best objects\", Metrics.best_object_ids)\n\n Metrics.compute_recalls()\n print(\"recall\", Metrics.recalls)\n\n # Metrics.compute_matched_IoUs(gt_objects)\n # print(\"Best ious\", Metrics.best_ious)\n # print(\"Best objects\", Metrics.best_object_ids)\n\n # Metrics.compute_recalls()\n # print(\"recall\", Metrics.recalls)\n\n print(\"mean ious\", Metrics.mean_ious)\n Metrics.compute_boundary_f_measures()\n print(\"boundary f1\", Metrics.boundary_f1_scores)\n print(\"boundary f1\", Metrics.mean_boundary_f1_scores)\n\n # print(\"mask precision\", Metrics.mask_precision(Metrics.pred_objects[0] == Metrics.gt_ids[0][0], Metrics.gt_objects[0] == Metrics.gt_ids[0][0]))\n # print(\"mask recall\", Metrics.mask_recall(Metrics.pred_objects[0] == Metrics.gt_ids[0][0], Metrics.gt_objects[0] == Metrics.gt_ids[0][0]))\n\n Metrics.compute_matched_IoUs(metric='precision')\n print(\"Mask precision\", Metrics.best_ious)\n\n Metrics.compute_matched_IoUs(metric='recall')\n print(\"Mask recall\", Metrics.best_ious) \n \n " ]
[ [ "torch.randint", "torch.cat", "torch.iinfo", "torch.unique", "numpy.mean", "numpy.nanmean", "numpy.zeros_like", "scipy.optimize.linear_sum_assignment", "torch.pow", "torch.ones", "numpy.unique", "numpy.stack", "torch.tensor", "numpy.zeros", "numpy.power", "numpy.isnan", "torch.zeros_like", "numpy.array", "numpy.maximum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
wconti27/DS4300_FIFA_Tool
[ "ea34a8cfbd634dbac864122fc73d2a4b1efb1702" ]
[ "import_data.py" ]
[ "import pymongo\nimport os\nimport pandas as pd\nimport json\n\ndef main():\n client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n databases = client.list_database_names()\n\n if \"fifa\" not in databases:\n db = client[\"fifa\"]\n\n players_collection = db[\"players\"]\n ultimate_team_collection = db[\"ultimate_teams\"]\n\n for file in os.listdir(\"data/players\"):\n data = pd.read_csv(\"data/players/\" + file)\n data[\"year\"] = \"20\" + file.split(\".\")[0][-2:]\n if \"female\" in file:\n data[\"gender\"] = \"F\"\n else:\n data[\"gender\"] = \"M\"\n data_json = json.loads(data.to_json(orient='records'))\n \n for player in data_json:\n columns_to_format = [\"ls\", \"st\", \"rs\", \"lw\", \"lf\",\t\"cf\", \"rf\", \"rw\", \"lam\", \"cam\",\t\"ram\", \"lm\", \"lcm\",\t\"cm\", \"rcm\", \"rm\", \"lwb\", \"ldm\", \"cdm\", \"rdm\", \"rwb\", \"lb\", \"lcb\", \"cb\", \"rcb\", \"rb\", \"gk\"]\n for column in columns_to_format:\n if isinstance(player[column], str):\n if \"+\" in player[column]:\n split = player[column].split(\"+\")\n player[column] = int(split[0]) + int(split[1])\n elif \"-\" in player[column]:\n split = player[column].split(\"-\")\n player[column] = int(split[0]) - int(split[1])\n list_columns = [\"player_positions\", \"player_tags\", \"player_traits\"]\n for column in list_columns:\n if player[column] is not None:\n player[column] = [x.strip() for x in player[column].split(',')]\n players_collection.insert_many(data_json)\n print(\"Successfully loaded data for\", file)\n\n print(\"Creating Indices for Faster Searching\")\n players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING)])\n players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING), ('short_name', pymongo.ASCENDING)])\n players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING), ('overall', pymongo.DESCENDING)])\n\n ultimate_team_collection.create_index([('year', pymongo.ASCENDING), ('username', pymongo.ASCENDING), ('team_name', pymongo.ASCENDING)])\n\n else:\n print(\"Data has been previously loaded.\")\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
wkoa/slip_detection
[ "077b34ed69c44cbc6bed13a018e6ec223c9902f0" ]
[ "train_net.py" ]
[ "import os\n\nimport torch\nfrom torch import optim\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom tensorboardX import SummaryWriter\n\nfrom libs.models import network\nfrom libs.utils import data_loader\n\nparams = {}\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train_net(params):\n\n writer = SummaryWriter(log_dir='logs')\n # dummy_input = [torch.zeros(1,3,480,640) for i in range(8)]\n # Create network\n slip_detection_model = network.Slip_detection_network(base_network=params['cnn'], pretrained=params['pretrained'],\n rnn_input_size=params['rnn_input_size'],\n rnn_hidden_size=params['rnn_hidden_size'],\n rnn_num_layers=params['num_layers'],\n num_classes=params['num_classes'],\n use_gpu=params['use_gpu'],\n dropout=params['dropout'])\n if params['use_gpu']:\n slip_detection_model = slip_detection_model.cuda()\n # Some Warnings in there.\n # writer.add_graph(slip_detection_model, input_to_model=(dummy_input, dummy_input))\n\n if 'net_params' in params.keys():\n assert params['net_params'].endswith('.pth'), \"Wrong model path {}\".format(params['net_params'])\n net_params_state_dict = torch.load(params['net_params'])\n slip_detection_model.load_state_dict(net_params_state_dict)\n\n # Init optimizer & loss func.\n optimizer = optim.Adam(slip_detection_model.rnn_network.parameters(), lr=params['lr'])\n loss_function = nn.CrossEntropyLoss()\n\n # Dataloader\n train_dataset = data_loader.Tactile_Vision_dataset(data_path=params['train_data_dir'])\n train_data_loader = DataLoader(train_dataset, batch_size=params['batch_size'], shuffle=True,\n num_workers=params['num_workers'])\n test_dataset = data_loader.Tactile_Vision_dataset(data_path=params['test_data_dir'])\n test_data_loader = DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=params['num_workers'])\n # To record training procession\n train_loss = []\n train_acc = []\n\n # Start training\n for epoch in range(params['epochs']):\n # Start\n total_loss = 0.0\n total_acc = 0.0\n total = 0.0\n for i, data in enumerate(train_data_loader):\n # one iteration\n rgb_imgs, tactile_imgs, label = data\n output = slip_detection_model(rgb_imgs, tactile_imgs)\n if params['use_gpu']:\n label = label.cuda()\n loss = loss_function(output, label)\n\n # Backward & optimize\n slip_detection_model.zero_grad()\n loss.backward()\n optimizer.step()\n\n # cal training acc\n _, predicted = torch.max(output.data, 1)\n total_acc += (predicted == label).sum().item()\n total_loss += float(loss.data)\n total += len(label)\n train_loss.append(total_loss/total)\n train_acc.append(total_acc/total)\n\n writer.add_scalar('train_loss', train_loss[epoch],)\n writer.add_scalar('train_acc', train_acc[epoch],)\n if epoch%params['print_interval'] == 0:\n print('[Epoch: %3d/%3d] Training Loss: %.3f, Training Acc: %.3f'\n % (epoch, params['epochs'], train_loss[epoch], train_acc[epoch],))\n if (epoch + 1)%params['test_interval'] == 0:\n with torch.no_grad():\n correct = 0\n total = 0\n for rgb_imgs, tactile_imgs, labels in test_data_loader:\n outputs = slip_detection_model(rgb_imgs, tactile_imgs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n if params['use_gpu']:\n labels = labels.cuda()\n correct += (predicted == labels).sum().item()\n print('Test Accuracy of the model on the {} test images: {} %'.format(total, 100 * correct / total))\n # Save 5 different model\n if epoch%(int(params['epochs']/5)) == 0:\n if 'save_dir' in params.keys():\n model_path = os.path.join(params['save_dir'], 'slip_detection_network_{:0>5}.pth'.format(epoch))\n torch.save(slip_detection_model.state_dict(), model_path)\n\n if 'save_dir' in params.keys():\n model_path = os.path.join(params['save_dir'], 'slip_detection_network_{:0>6}.pth'.format(epoch))\n torch.save(slip_detection_model.state_dict(), model_path)\n writer.close()\n\n\nif __name__ == '__main__':\n # No modification is recommended.\n params['rnn_input_size'] = 64\n params['rnn_hidden_size'] = 64\n params['num_classes'] = 2\n params['num_layers'] = 1\n params['use_gpu'] = False\n if torch.cuda.is_available():\n params['use_gpu'] = True\n # Customer params setting.\n params['epochs'] = 10\n params['print_interval'] = 5\n params['test_interval'] = 10\n params['batch_size'] = 2\n params['num_workers'] = 1\n params['lr'] = 1e-5\n params['dropout'] = 0.8\n params['train_data_dir'] = 'data'\n params['test_data_dir'] = 'data'\n # Use Alextnet to debug.\n # You can choose vgg_16, vgg_19 or inception_v3(unreliable). Poor MBP\n params['cnn'] = 'debug'\n params['pretrained'] = True # CNN is pretrained by ImageNet or not\n # params['net_params'] = 'model/pretrained_net/'\n\n params['save_dir'] = 'model'\n # Start train\n train_net(params)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.load", "torch.no_grad", "torch.cuda.is_available", "torch.utils.data.dataloader.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mkuchnik/Efficient_Augmentation
[ "a82190c02509682c34f2df782fb58f8ffd3b11da" ]
[ "experiments.py" ]
[ "import numpy as np\nimport sklearn.model_selection\nimport logging\n\nimport collections\nimport pprint\nimport time\n\nimport selection_policy\nimport augmentations\nimport experiments\nimport experiments_util\nimport featurized_classifiers\nimport sklearn.cluster\nimport sklearn.preprocessing\nimport sample_dpp\n\n\ndef run_test(classes,\n rounds,\n n_aug_sample_points,\n n_train,\n n_jobs,\n cv,\n use_GPU,\n batch_size,\n dataset,\n aug_transformation,\n aug_kw_args,\n logistic_reg__C,\n CNN_extractor_max_iter,\n use_loss,\n experiment_configs,\n results_filename,\n baseline_test=False,\n ):\n\n run_params = {\n \"classes\": classes,\n \"rounds\": rounds,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"n_train\": n_train,\n \"n_jobs\": n_jobs,\n \"cv\": cv,\n \"use_GPU\": use_GPU,\n \"batch_size\": batch_size,\n \"dataset\": dataset.name,\n \"aug_transformation\": aug_transformation.name,\n \"aug_kw_args\": aug_kw_args,\n \"logistic_reg__C\": logistic_reg__C,\n \"CNN_extractor_max_iter\": CNN_extractor_max_iter,\n \"use_loss\": use_loss,\n \"experiment_configs\": experiment_configs,\n \"results_filename\": results_filename,\n \"baseline_test\": baseline_test,\n }\n\n pprint.pprint(run_params)\n\n assert n_aug_sample_points\n\n (x_train, y_train), (x_test, y_test) = experiments_util.prepare_dataset(\n dataset,\n classes,\n n_train,\n )\n print(\"Train class breakdown: {}\".format(\n np.unique(y_train, return_counts=True))\n )\n print(\"Test class breakdown: {}\".format(\n np.unique(y_test, return_counts=True))\n )\n\n aug_f = augmentations.get_transformation(aug_transformation)\n (orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_idxs_train) = \\\n experiments_util.poison_dataset(x_train,\n y_train,\n aug_f,\n aug_kw_args)\n (orig_and_auged_x_test,\n orig_and_auged_y_test,\n orig_and_auged_idxs_test) = \\\n experiments_util.poison_dataset(x_test,\n y_test,\n aug_f,\n aug_kw_args)\n print(\"x_train shape: {}\".format(x_train.shape))\n print(\"orig_and_auged_x_train shape: {}\".format(\n orig_and_auged_x_train.shape))\n\n clf = featurized_classifiers.build_featurized_LeNet_logistic_clf(\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n logistic_reg__C,\n cv,\n n_jobs,\n )\n\n svm__C = [0.01, 0.1, 1, 10, 100]\n svm_cv = 4\n is_SV = experiments_util.get_SV_featurized_LeNet(\n x_train,\n y_train,\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n svm__C,\n svm_cv,\n n_jobs\n )\n print(\"Number of support vectors is: {}\".format(np.sum(is_SV)))\n SV_idxs = np.where(is_SV)[0]\n orig_and_SV_idxs = np.concatenate([SV_idxs, [-1]])\n print(\"orig_and_SV_idxs: {}\".format(orig_and_SV_idxs))\n print(\"orig_and_SV_idxs shape: {}\".format(orig_and_SV_idxs.shape))\n SV_orig_and_auged_mask = np.isin(orig_and_auged_idxs_train,\n orig_and_SV_idxs)\n SV_x_train = orig_and_auged_x_train[SV_orig_and_auged_mask]\n SV_y_train = orig_and_auged_y_train[SV_orig_and_auged_mask]\n clf.fit(SV_x_train, SV_y_train)\n VSV_acc = clf.score(orig_and_auged_x_test, orig_and_auged_y_test)\n print(\"VSV acc: {}\".format(VSV_acc))\n\n (no_aug_no_poison_acc,\n poisoned_acc,\n all_aug_train_poisoned_acc,\n aug_scores,\n after_aug_scores,\n best_params,\n training_total_time) = experiments_util.train_and_score_clf(\n clf,\n x_train,\n y_train,\n x_test,\n y_test,\n orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_x_test,\n orig_and_auged_y_test,\n use_loss,\n cv,\n )\n training_end_time = time.time()\n\n if baseline_test:\n # If baseline test, we just use unmodified test set\n exp_x_test = x_test\n exp_y_test = y_test\n else:\n exp_x_test = orig_and_auged_x_test\n exp_y_test = orig_and_auged_y_test\n\n experiment_results = {}\n for policy_name, update_score, downweight_points in experiment_configs:\n policy_f = selection_policy.get_policy_by_name(policy_name)\n if \"deterministic\" in policy_name:\n _rounds = 1\n else:\n _rounds = rounds\n acc = experiments.aug_experiment_rounds(\n clf,\n x_train,\n y_train,\n exp_x_test,\n exp_y_test,\n policy_f,\n aug_scores,\n aug_f,\n aug_kw_args,\n n_aug_sample_points,\n _rounds,\n update_score,\n downweight_points,\n use_loss=use_loss,\n )\n config_name = [policy_name]\n if update_score:\n config_name.append(\"update\")\n if downweight_points:\n config_name.append(\"downweight\")\n config_name = \"_\".join(config_name)\n experiment_results[config_name] = acc\n\n all_results = {\n \"no_aug_no_poison_acc\": no_aug_no_poison_acc,\n \"poisoned_acc\": poisoned_acc,\n \"all_aug_train_poisoned_acc\": all_aug_train_poisoned_acc,\n \"is_SV\": is_SV,\n \"VSV_acc\": VSV_acc,\n \"best_params\": best_params,\n \"initial_aug_scores\": aug_scores,\n \"after_aug_scores\": after_aug_scores,\n \"experiment_results\": experiment_results,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"run_parameters\": run_params,\n \"n_train\": n_train,\n \"rounds\": rounds,\n }\n\n tests_total_time = time.time() - training_end_time\n\n all_results[\"tests_total_runtime\"] = tests_total_time\n\n pprint.pprint(all_results)\n np.savez(results_filename,\n **all_results,\n )\n\n print(\"*\" * 80)\n print(\"Training took {} seconds\".format(training_total_time))\n print(\"All tests took {} seconds\".format(tests_total_time))\n print(\"*\" * 80)\n\n\ndef show_aug_images(x_, x_aug_):\n import matplotlib.pyplot as plt\n f, ax = plt.subplots(len(x_aug_), 2)\n if len(x_) > 1:\n logging.warning(\"x_ has shape '{}' which is greater than\"\n \" length 1\".format(x_.shape))\n x_show = x_[0]\n if x_show.shape[2] < 3:\n x_show = x_show.reshape(x_show.shape[:2])\n ax[0, 0].imshow(x_show,\n cmap=\"gray\")\n else:\n ax[0, 0].imshow(x_show)\n for i in range(len(x_aug_)):\n ax[i, 0].axis(\"off\")\n for i, x_i in enumerate(x_aug_):\n if x_aug_[i].shape[2] < 3:\n ax[i, 1].imshow(x_aug_[i].reshape(x_aug_[i].shape[:2]),\n cmap=\"gray\")\n else:\n ax[i, 1].imshow(x_aug_[i])\n ax[i, 1].axis(\"off\")\n plt.show()\n\n\ndef aug_experiment(clf, x_train, y_train, auged_x_test, auged_y_test,\n aug_iter, train_idxs_scores, aug_f, aug_kw_args,\n n_aug_sample_points, update_LOO=False,\n weight_aug_samples=False, use_loss=False,\n show_aug_images=False,\n stratified_sampling_x_train_ks=None,\n ):\n auged_x_train = np.copy(x_train)\n auged_y_train = np.copy(y_train)\n if weight_aug_samples:\n sample_weight = np.ones(len(x_train))\n else:\n sample_weight = None\n influence_acc = []\n n_aug_sample_points = set(n_aug_sample_points)\n assert len(y_train) == len(x_train)\n if stratified_sampling_x_train_ks is not None:\n aug_idxs = stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n )\n else:\n aug_idxs = np.array(\n list(aug_iter(train_idxs_scores))\n ).flatten()\n already_auged = set()\n while len(already_auged) < len(x_train):\n assert len(train_idxs_scores) == len(x_train)\n next_idxs = [idx for idx in aug_idxs if idx not in already_auged]\n idx = next_idxs[0]\n already_auged.add(idx)\n idx = [idx]\n x_ = x_train[idx]\n y_ = y_train[idx]\n aug_idxs_, (x_aug_, y_aug_) = aug_f(x_, y_, **aug_kw_args)\n if show_aug_images:\n import matplotlib.pyplot as plt\n f, ax = plt.subplots(len(x_aug_), 2)\n if len(x_) > 1:\n logging.warning(\"x_ has shape '{}' which is greater than\"\n \" length 1\".format(x_.shape))\n x_show = x_[0]\n if x_show.shape[2] < 3:\n x_show = x_show.reshape(x_show.shape[:2])\n ax[0, 0].imshow(x_show,\n cmap=\"gray\")\n else:\n ax[0, 0].imshow(x_show)\n for i in range(len(x_aug_)):\n ax[i, 0].axis(\"off\")\n for i, x_i in enumerate(x_aug_):\n if x_aug_[i].shape[2] < 3:\n ax[i, 1].imshow(x_aug_[i].reshape(x_aug_[i].shape[:2]),\n cmap=\"gray\")\n else:\n ax[i, 1].imshow(x_aug_[i])\n ax[i, 1].axis(\"off\")\n plt.show()\n auged_x_train = np.concatenate([\n auged_x_train,\n x_aug_,\n ],\n axis=0)\n auged_y_train = np.concatenate([\n auged_y_train,\n y_aug_,\n ],\n axis=0)\n if weight_aug_samples:\n # We downweight all points from the original train point\n rescale_weight = 1.0 / (len(x_aug_) + 1)\n weight_aug_ = np.full(len(x_aug_), rescale_weight)\n sample_weight = np.concatenate([\n sample_weight,\n weight_aug_,\n ],\n axis=0)\n sample_weight[idx] = rescale_weight\n if len(already_auged) in n_aug_sample_points:\n fit_params = {\"logistic_reg__sample_weight\": sample_weight}\n clf.fit(auged_x_train, auged_y_train, **fit_params)\n aug_train_poisoned_acc = clf.score(\n auged_x_test,\n auged_y_test)\n influence_acc.append(aug_train_poisoned_acc)\n if update_LOO:\n if isinstance(clf, sklearn.model_selection.GridSearchCV):\n if use_loss:\n train_idxs_scores = (clf\n .best_estimator_\n .named_steps[\"logistic_reg\"]\n .log_losses(L2_alpha=0.0))\n else:\n train_idxs_scores = (clf\n .best_estimator_\n .named_steps[\"logistic_reg\"]\n .LOO_influence())\n else:\n if use_loss:\n train_idxs_scores = (clf\n .named_steps[\"logistic_reg\"]\n .log_losses(L2_alpha=0.0))\n else:\n train_idxs_scores = (clf\n .named_steps[\"logistic_reg\"]\n .LOO_influence())\n train_idxs_scores = train_idxs_scores[:len(x_train)]\n if stratified_sampling_x_train_ks is not None:\n aug_idxs = stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n )\n else:\n aug_idxs = np.array(\n list(aug_iter(train_idxs_scores))\n ).flatten()\n return influence_acc\n\n\ndef aug_experiment_rounds(clf, x_train, y_train, auged_x_test, auged_y_test,\n aug_iter, LOO_influences,\n aug_f, aug_kw_args, n_aug_sample_points, rounds,\n update_LOO=False,\n weight_aug_samples=False,\n use_loss=False,\n stratified_sampling_x_train_ks=None):\n all_accs = []\n for r in range(rounds):\n acc = aug_experiment(clf,\n x_train,\n y_train,\n auged_x_test,\n auged_y_test,\n aug_iter,\n LOO_influences,\n aug_f,\n aug_kw_args,\n n_aug_sample_points,\n update_LOO,\n weight_aug_samples,\n use_loss,\n False,\n stratified_sampling_x_train_ks,\n )\n all_accs.append(acc)\n return all_accs\n\n\ndef stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n):\n \"\"\"\n Creates an ordering of sampling by using aug_iter in a round-robin fashion\n over all populations.\n \"\"\"\n stratified_aug_idxs_arr = []\n stratified_train_rev_idxs_arr = []\n for k in sorted(np.unique(stratified_sampling_x_train_ks)):\n is_stratified_train = stratified_sampling_x_train_ks == k\n stratified_train_rev_idxs = np.where(is_stratified_train)[0]\n stratified_train_rev_idxs_arr.append(stratified_train_rev_idxs)\n stratified_train_idxs_scores = train_idxs_scores[\n is_stratified_train\n ]\n stratified_aug_idxs = np.array(\n list(aug_iter(stratified_train_idxs_scores))\n ).flatten()\n stratified_aug_idxs_arr.append(stratified_aug_idxs)\n aug_idxs = []\n has_elements = True\n strat_aug_idxs_i = 0\n while has_elements:\n for i, strat_idxs in enumerate(stratified_aug_idxs_arr):\n if strat_aug_idxs_i < len(strat_idxs):\n strat_idx = strat_idxs[strat_aug_idxs_i]\n strat_idx = stratified_train_rev_idxs_arr[i][strat_idx]\n aug_idxs.append(strat_idx)\n has_elements = np.any(\n list(map(lambda x: strat_aug_idxs_i < len(x),\n stratified_aug_idxs_arr,\n )\n )\n )\n strat_aug_idxs_i += 1\n assert len(train_idxs_scores) == len(aug_idxs)\n return aug_idxs\n\n\ndef precomputed_aug_experiment(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n aug_iter,\n train_idxs_scores,\n n_aug_sample_points,\n update_scores=False,\n weight_aug_samples=False,\n use_loss=False,\n stratified_sampling_x_train_ks=None,\n):\n \"\"\"\n This is a precomputed version of the aug_experiment.\n Here, we expect training sets to be augmented and featurized up front.\n This function will index into the augmented set (with featurization)\n to get the input that would be fed into the classifier.\n\n @param clf The classifier to use (e.g., logistic regression)\n @param auged_featurized_x_train The augmented and featurized training set.\n @param auged_featurized_y_train The labels of the training set.\n @param auged_featurized_x_train_to_source_idxs A list of idxs corresponding\n to the source of augmented images from the original training set. -1 means\n that the point is an original point.\n @param auged_featurized_x_test The augmented and featurized test set.\n @param auged_featurized_y_test The labels of the test set.\n @param auged_featurized_x_test_to_source_idxs A list of idxs corresponding\n to the source of augmented images from the original test set. -1 means\n that the point is an original point.\n @param aug_iter The policy to use.\n @param train_idxs_scores The scores to use for the policies (e.g.,\n LOO influence or loss).\n @param stratified_sampling_x_train_ks The population type of each train\n sample for stratified sampling. Sampling is round robin in numeric order.\n\n @return An list of accuracies on the test set and a list of the points that\n were chosen for augmentation.\n \"\"\"\n influence_acc = []\n aug_iter_idxs = []\n original_mask_train = auged_featurized_x_train_to_source_idxs < 0\n original_x_train = auged_featurized_x_train[original_mask_train]\n original_y_train = auged_featurized_y_train[original_mask_train]\n auged_x_train = np.copy(original_x_train)\n auged_y_train = np.copy(original_y_train)\n n_aug_sample_points = set(n_aug_sample_points)\n if weight_aug_samples:\n sample_weight = np.ones(len(original_x_train))\n else:\n sample_weight = None\n if stratified_sampling_x_train_ks is not None:\n aug_idxs = stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n )\n else:\n aug_idxs = np.array(list(aug_iter(train_idxs_scores))).flatten()\n assert len(np.unique(aug_idxs)) == len(aug_idxs)\n already_auged = set()\n while len(already_auged) < len(original_x_train):\n assert len(train_idxs_scores) == len(original_x_train)\n next_idxs = [idx for idx in aug_idxs if idx not in already_auged]\n idx = next_idxs[0]\n already_auged.add(idx)\n aug_mask = auged_featurized_x_train_to_source_idxs == idx\n x_aug_ = auged_featurized_x_train[aug_mask]\n auged_x_train = np.concatenate(\n [\n auged_x_train,\n x_aug_,\n ],\n axis=0)\n y_aug_ = auged_featurized_y_train[aug_mask]\n auged_y_train = np.concatenate(\n [\n auged_y_train,\n y_aug_,\n ],\n axis=0)\n if weight_aug_samples:\n # We downweight all points from the original train point\n rescale_weight = 1.0 / (len(x_aug_) + 1)\n weight_aug_ = np.full(len(x_aug_), rescale_weight)\n sample_weight = np.concatenate([\n sample_weight,\n weight_aug_,\n ],\n axis=0)\n sample_weight[idx] = rescale_weight\n if len(already_auged) in n_aug_sample_points:\n fit_params = {\"logistic_reg__sample_weight\": sample_weight}\n clf.fit(auged_x_train, auged_y_train, **fit_params)\n aug_train_poisoned_acc = clf.score(\n auged_featurized_x_test,\n auged_featurized_y_test)\n influence_acc.append(aug_train_poisoned_acc)\n aug_iter_idxs.append(idx)\n if update_scores:\n if isinstance(clf, sklearn.model_selection.GridSearchCV):\n if use_loss:\n train_idxs_scores = (clf\n .best_estimator_\n .named_steps[\"logistic_reg\"]\n .log_losses(L2_alpha=0.0))\n else:\n train_idxs_scores = (clf\n .best_estimator_\n .named_steps[\"logistic_reg\"]\n .LOO_influence())\n else:\n if use_loss:\n train_idxs_scores = (clf\n .named_steps[\"logistic_reg\"]\n .log_losses(L2_alpha=0.0))\n else:\n train_idxs_scores = (clf\n .named_steps[\"logistic_reg\"]\n .LOO_influence())\n train_idxs_scores = train_idxs_scores[:len(original_x_train)]\n if stratified_sampling_x_train_ks is not None:\n aug_idxs = stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n )\n else:\n aug_idxs = np.array(\n list(aug_iter(train_idxs_scores))\n ).flatten()\n return influence_acc, aug_iter_idxs\n\n\ndef precomputed_aug_experiment_rounds(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n aug_iter,\n train_idxs_scores,\n n_aug_sample_points,\n rounds,\n update_scores=False,\n weight_aug_samples=False,\n use_loss=False,\n stratified_sampling_x_train_ks=None,\n):\n all_accs = []\n all_idxs = []\n for r in range(rounds):\n acc, idxs = precomputed_aug_experiment(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n aug_iter,\n train_idxs_scores,\n n_aug_sample_points,\n update_scores,\n weight_aug_samples,\n use_loss,\n stratified_sampling_x_train_ks,\n )\n all_accs.append(acc)\n all_idxs.append(idxs)\n return all_accs, all_idxs\n\n\ndef run_test_clustered(\n classes,\n rounds,\n n_aug_sample_points,\n n_train,\n n_jobs,\n cv,\n use_GPU,\n batch_size,\n dataset,\n aug_transformation,\n aug_kw_args,\n logistic_reg__C,\n CNN_extractor_max_iter,\n use_loss,\n experiment_configs,\n results_filename,\n n_clusters,\n):\n\n run_params = {\n \"classes\": classes,\n \"rounds\": rounds,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"n_train\": n_train,\n \"n_jobs\": n_jobs,\n \"cv\": cv,\n \"use_GPU\": use_GPU,\n \"batch_size\": batch_size,\n \"dataset\": dataset.name,\n \"aug_transformation\": aug_transformation.name,\n \"aug_kw_args\": aug_kw_args,\n \"logistic_reg__C\": logistic_reg__C,\n \"CNN_extractor_max_iter\": CNN_extractor_max_iter,\n \"use_loss\": use_loss,\n \"experiment_configs\": experiment_configs,\n \"results_filename\": results_filename,\n \"n_clusters\": n_clusters,\n }\n\n pprint.pprint(run_params)\n\n assert n_aug_sample_points\n\n (x_train, y_train), (x_test, y_test) = experiments_util.prepare_dataset(\n dataset,\n classes,\n n_train,\n )\n print(\"Train class breakdown: {}\".format(\n np.unique(y_train, return_counts=True))\n )\n print(\"Test class breakdown: {}\".format(\n np.unique(y_test, return_counts=True))\n )\n\n aug_f = augmentations.get_transformation(aug_transformation)\n (orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_idxs_train) = \\\n experiments_util.poison_dataset(x_train,\n y_train,\n aug_f,\n aug_kw_args)\n (orig_and_auged_x_test,\n orig_and_auged_y_test,\n orig_and_auged_idxs_test) = \\\n experiments_util.poison_dataset(x_test,\n y_test,\n aug_f,\n aug_kw_args)\n\n print(\"x_train shape: {}\".format(x_train.shape))\n print(\"orig_and_auged_x_train shape: {}\".format(\n orig_and_auged_x_train.shape))\n\n clf = featurized_classifiers.build_featurized_LeNet_logistic_clf(\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n logistic_reg__C,\n cv,\n n_jobs,\n )\n\n svm__C = [0.01, 0.1, 1, 10, 100]\n svm_cv = 4\n is_SV = experiments_util.get_SV_featurized_LeNet(\n x_train,\n y_train,\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n svm__C,\n svm_cv,\n n_jobs\n )\n print(\"Number of support vectors is: {}\".format(np.sum(is_SV)))\n SV_idxs = np.where(is_SV)[0]\n orig_and_SV_idxs = np.concatenate([SV_idxs, [-1]])\n print(\"orig_and_SV_idxs: {}\".format(orig_and_SV_idxs))\n print(\"orig_and_SV_idxs shape: {}\".format(orig_and_SV_idxs.shape))\n SV_orig_and_auged_mask = np.isin(orig_and_auged_idxs_train,\n orig_and_SV_idxs)\n SV_x_train = orig_and_auged_x_train[SV_orig_and_auged_mask]\n SV_y_train = orig_and_auged_y_train[SV_orig_and_auged_mask]\n clf.fit(SV_x_train, SV_y_train)\n VSV_acc = clf.score(orig_and_auged_x_test, orig_and_auged_y_test)\n print(\"VSV acc: {}\".format(VSV_acc))\n\n (no_aug_no_poison_acc,\n poisoned_acc,\n all_aug_train_poisoned_acc,\n aug_scores,\n after_aug_scores,\n best_params,\n training_total_time) = experiments_util.train_and_score_clf(\n clf,\n x_train,\n y_train,\n x_test,\n y_test,\n orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_x_test,\n orig_and_auged_y_test,\n use_loss,\n cv,\n )\n\n featurizer = sklearn.pipeline.Pipeline([\n (\"image_rescaler\", (clf.named_steps[\"image_rescaler\"])),\n (\"feature_map\", clf.named_steps[\"feature_map\"]),\n ])\n clustering_clf = sklearn.cluster.KMeans(n_clusters=n_clusters)\n featurized_x_train = featurizer.transform(x_train)\n print(\"featurized_x_train\", featurized_x_train.shape)\n train_cluster_IDs = clustering_clf.fit_predict(featurized_x_train)\n # test_cluster_IDs = clustering_clf.predict(featurized_x_test)\n\n training_end_time = time.time()\n\n experiment_results = {}\n for policy_name, update_score, downweight_points in experiment_configs:\n policy_f = selection_policy.get_policy_by_name(policy_name)\n if \"deterministic\" in policy_name:\n _rounds = 1\n else:\n _rounds = rounds\n acc = experiments.aug_experiment_rounds(\n clf,\n x_train,\n y_train,\n orig_and_auged_x_test,\n orig_and_auged_y_test,\n policy_f,\n aug_scores,\n aug_f,\n aug_kw_args,\n n_aug_sample_points,\n _rounds,\n update_score,\n downweight_points,\n use_loss=use_loss,\n stratified_sampling_x_train_ks=train_cluster_IDs,\n )\n config_name = [policy_name]\n if update_score:\n config_name.append(\"update\")\n if downweight_points:\n config_name.append(\"downweight\")\n config_name = \"_\".join(config_name)\n experiment_results[config_name] = acc\n\n all_results = {\n \"no_aug_no_poison_acc\": no_aug_no_poison_acc,\n \"poisoned_acc\": poisoned_acc,\n \"all_aug_train_poisoned_acc\": all_aug_train_poisoned_acc,\n \"is_SV\": is_SV,\n \"VSV_acc\": VSV_acc,\n \"best_params\": best_params,\n \"initial_aug_scores\": aug_scores,\n \"after_aug_scores\": after_aug_scores,\n \"experiment_results\": experiment_results,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"run_parameters\": run_params,\n \"n_train\": n_train,\n \"rounds\": rounds,\n }\n\n tests_total_time = time.time() - training_end_time\n\n all_results[\"tests_total_runtime\"] = tests_total_time\n\n pprint.pprint(all_results)\n np.savez(results_filename,\n **all_results,\n )\n\n print(\"*\" * 80)\n print(\"Training took {} seconds\".format(training_total_time))\n print(\"All tests took {} seconds\".format(tests_total_time))\n print(\"*\" * 80)\n\n\ndef run_test_margin(\n classes,\n rounds,\n n_aug_sample_points,\n n_train,\n n_jobs,\n cv,\n use_GPU,\n batch_size,\n dataset,\n aug_transformation,\n aug_kw_args,\n logistic_reg__C,\n CNN_extractor_max_iter,\n use_loss,\n experiment_configs,\n results_filename,\n ):\n \"\"\"\n Uses SVM margin for score\n \"\"\"\n\n run_params = {\n \"classes\": classes,\n \"rounds\": rounds,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"n_train\": n_train,\n \"n_jobs\": n_jobs,\n \"cv\": cv,\n \"use_GPU\": use_GPU,\n \"batch_size\": batch_size,\n \"dataset\": dataset.name,\n \"aug_transformation\": aug_transformation.name,\n \"aug_kw_args\": aug_kw_args,\n \"logistic_reg__C\": logistic_reg__C,\n \"CNN_extractor_max_iter\": CNN_extractor_max_iter,\n \"use_loss\": use_loss,\n \"experiment_configs\": experiment_configs,\n \"results_filename\": results_filename,\n }\n\n pprint.pprint(run_params)\n\n assert n_aug_sample_points\n\n (x_train, y_train), (x_test, y_test) = experiments_util.prepare_dataset(\n dataset,\n classes,\n n_train,\n )\n print(\"Train class breakdown: {}\".format(\n np.unique(y_train, return_counts=True))\n )\n print(\"Test class breakdown: {}\".format(\n np.unique(y_test, return_counts=True))\n )\n\n aug_f = augmentations.get_transformation(aug_transformation)\n (orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_idxs_train) = \\\n experiments_util.poison_dataset(x_train,\n y_train,\n aug_f,\n aug_kw_args)\n (orig_and_auged_x_test,\n orig_and_auged_y_test,\n orig_and_auged_idxs_test) = \\\n experiments_util.poison_dataset(x_test,\n y_test,\n aug_f,\n aug_kw_args)\n print(\"x_train shape: {}\".format(x_train.shape))\n print(\"orig_and_auged_x_train shape: {}\".format(\n orig_and_auged_x_train.shape\n ))\n\n clf = featurized_classifiers.build_featurized_LeNet_logistic_clf(\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n logistic_reg__C,\n cv,\n n_jobs,\n )\n\n svm__C = [0.01, 0.1, 1, 10, 100]\n svm_cv = 4\n is_SV = experiments_util.get_SV_featurized_LeNet(\n x_train,\n y_train,\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n svm__C,\n svm_cv,\n n_jobs\n )\n SVM_margins = experiments_util.get_SVM_margins_featurized_LeNet(\n x_train,\n y_train,\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n svm__C,\n svm_cv,\n n_jobs\n )\n print(\"SVM margins: {}\".format(SVM_margins))\n print(\"Number of support vectors is: {}\".format(np.sum(is_SV)))\n SV_idxs = np.where(is_SV)[0]\n orig_and_SV_idxs = np.concatenate([SV_idxs, [-1]])\n print(\"orig_and_SV_idxs: {}\".format(orig_and_SV_idxs))\n print(\"orig_and_SV_idxs shape: {}\".format(orig_and_SV_idxs.shape))\n SV_orig_and_auged_mask = np.isin(orig_and_auged_idxs_train,\n orig_and_SV_idxs)\n SV_x_train = orig_and_auged_x_train[SV_orig_and_auged_mask]\n SV_y_train = orig_and_auged_y_train[SV_orig_and_auged_mask]\n clf.fit(SV_x_train, SV_y_train)\n VSV_acc = clf.score(orig_and_auged_x_test, orig_and_auged_y_test)\n print(\"VSV acc: {}\".format(VSV_acc))\n\n (no_aug_no_poison_acc,\n poisoned_acc,\n all_aug_train_poisoned_acc,\n aug_scores,\n after_aug_scores,\n best_params,\n training_total_time) = experiments_util.train_and_score_clf(\n clf,\n x_train,\n y_train,\n x_test,\n y_test,\n orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_x_test,\n orig_and_auged_y_test,\n use_loss,\n cv,\n )\n training_end_time = time.time()\n\n # Here we use margins\n aug_scores = np.abs(SVM_margins)\n print(\"Aug scores: {}\".format(aug_scores))\n experiment_results = {}\n for policy_name, update_score, downweight_points in experiment_configs:\n policy_f = selection_policy.get_policy_by_name(policy_name)\n if \"deterministic\" in policy_name:\n _rounds = 1\n else:\n _rounds = rounds\n acc = experiments.aug_experiment_rounds(\n clf,\n x_train,\n y_train,\n orig_and_auged_x_test,\n orig_and_auged_y_test,\n policy_f,\n aug_scores,\n aug_f,\n aug_kw_args,\n n_aug_sample_points,\n _rounds,\n update_score,\n downweight_points,\n use_loss=use_loss,\n )\n config_name = [policy_name]\n if update_score:\n config_name.append(\"update\")\n if downweight_points:\n config_name.append(\"downweight\")\n config_name = \"_\".join(config_name)\n experiment_results[config_name] = acc\n\n all_results = {\n \"no_aug_no_poison_acc\": no_aug_no_poison_acc,\n \"poisoned_acc\": poisoned_acc,\n \"all_aug_train_poisoned_acc\": all_aug_train_poisoned_acc,\n \"is_SV\": is_SV,\n \"VSV_acc\": VSV_acc,\n \"best_params\": best_params,\n \"initial_aug_scores\": aug_scores,\n \"after_aug_scores\": after_aug_scores,\n \"experiment_results\": experiment_results,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"run_parameters\": run_params,\n \"n_train\": n_train,\n \"rounds\": rounds,\n }\n\n tests_total_time = time.time() - training_end_time\n\n all_results[\"tests_total_runtime\"] = tests_total_time\n\n pprint.pprint(all_results)\n np.savez(results_filename,\n **all_results,\n )\n\n print(\"*\" * 80)\n print(\"Training took {} seconds\".format(training_total_time))\n print(\"All tests took {} seconds\".format(tests_total_time))\n print(\"*\" * 80)\n\n\ndef run_test_clustered_sweep(\n classes,\n rounds,\n n_aug_sample_points,\n n_train,\n n_jobs,\n cv,\n use_GPU,\n batch_size,\n dataset,\n aug_transformation,\n aug_kw_args,\n logistic_reg__C,\n CNN_extractor_max_iter,\n use_loss,\n experiment_configs,\n results_filename,\n ):\n \"\"\"\n Gets intertia and silhouette score for clusters\n \"\"\"\n\n run_params = {\n \"classes\": classes,\n \"rounds\": rounds,\n \"n_aug_sample_points\": n_aug_sample_points,\n \"n_train\": n_train,\n \"n_jobs\": n_jobs,\n \"cv\": cv,\n \"use_GPU\": use_GPU,\n \"batch_size\": batch_size,\n \"dataset\": dataset.name,\n \"aug_transformation\": aug_transformation.name,\n \"aug_kw_args\": aug_kw_args,\n \"logistic_reg__C\": logistic_reg__C,\n \"CNN_extractor_max_iter\": CNN_extractor_max_iter,\n \"use_loss\": use_loss,\n \"experiment_configs\": experiment_configs,\n \"results_filename\": results_filename,\n }\n\n pprint.pprint(run_params)\n\n assert n_aug_sample_points\n\n (x_train, y_train), (x_test, y_test) = experiments_util.prepare_dataset(\n dataset,\n classes,\n n_train,\n )\n print(\"Train class breakdown: {}\".format(\n np.unique(y_train, return_counts=True))\n )\n print(\"Test class breakdown: {}\".format(\n np.unique(y_test, return_counts=True))\n )\n\n aug_f = augmentations.get_transformation(aug_transformation)\n (orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_idxs_train) = \\\n experiments_util.poison_dataset(x_train,\n y_train,\n aug_f,\n aug_kw_args)\n (orig_and_auged_x_test,\n orig_and_auged_y_test,\n orig_and_auged_idxs_test) = \\\n experiments_util.poison_dataset(x_test,\n y_test,\n aug_f,\n aug_kw_args)\n print(\"x_train: {}\".format(x_train.shape))\n print(\"orig_and_auged_x_train: {}\".format(orig_and_auged_x_train.shape))\n\n clf = featurized_classifiers.build_featurized_LeNet_logistic_clf(\n CNN_extractor_max_iter,\n use_GPU,\n batch_size,\n logistic_reg__C,\n cv,\n n_jobs,\n )\n\n (no_aug_no_poison_acc,\n poisoned_acc,\n all_aug_train_poisoned_acc,\n aug_scores,\n after_aug_scores,\n best_params,\n training_total_time) = experiments_util.train_and_score_clf(\n clf,\n x_train,\n y_train,\n x_test,\n y_test,\n orig_and_auged_x_train,\n orig_and_auged_y_train,\n orig_and_auged_x_test,\n orig_and_auged_y_test,\n use_loss,\n cv,\n )\n\n featurizer = sklearn.pipeline.Pipeline([\n (\"image_rescaler\", (clf.named_steps[\"image_rescaler\"])),\n (\"feature_map\", clf.named_steps[\"feature_map\"]),\n ])\n featurized_x_train = featurizer.transform(x_train)\n print(\"featurized_x_train: {}\".format(featurized_x_train.shape))\n featurized_x_test = featurizer.transform(x_test)\n\n all_results = collections.defaultdict(list)\n n_clusters_arr = np.unique(\n np.clip(\n np.around(\n np.geomspace(2,\n len(featurized_x_train) - 1,\n num=50)\n ).astype(int),\n 2,\n len(featurized_x_train) - 1\n ),\n )\n print(\"n_clusters_arr: {}\".format(n_clusters_arr))\n assert np.all(n_clusters_arr < len(featurized_x_train))\n\n for n_clusters in n_clusters_arr:\n print(\"n_clusters: {}\".format(n_clusters))\n clustering_clf = sklearn.cluster.KMeans(n_clusters=n_clusters)\n train_cluster_IDs = clustering_clf.fit_predict(featurized_x_train)\n test_cluster_IDs = clustering_clf.predict(featurized_x_test)\n train_silhouette_score = sklearn.metrics.silhouette_score(\n featurized_x_train,\n train_cluster_IDs,\n metric=\"euclidean\",\n )\n train_inertia = clustering_clf.inertia_\n test_silhouette_score = sklearn.metrics.silhouette_score(\n featurized_x_test,\n test_cluster_IDs,\n metric=\"euclidean\",\n )\n all_results[\"n_clusters\"].append(n_clusters)\n all_results[\"train_inertia\"].append(train_inertia)\n all_results[\"train_silhouette_score\"].append(train_silhouette_score)\n all_results[\"test_silhouette_score\"].append(test_silhouette_score)\n pprint.pprint(all_results)\n\n return all_results\n\n\ndef precomputed_aug_experiment_rounds_dpp(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n train_idxs_scores,\n n_aug_sample_points,\n rounds,\n weight_by_scores,\n normalize_features,\n phi_scale,\n):\n all_accs = []\n all_idxs = []\n for r in range(rounds):\n acc, idxs = precomputed_aug_experiment_dpp(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n train_idxs_scores,\n n_aug_sample_points,\n weight_by_scores,\n normalize_features,\n phi_scale,\n )\n all_accs.append(acc)\n all_idxs.append(idxs)\n return all_accs, all_idxs\n\ndef precomputed_aug_experiment_dpp(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n train_idxs_scores,\n n_aug_sample_points,\n weight_by_scores,\n normalize_features,\n phi_scale,\n weight_f=\"dot_prod\",\n):\n \"\"\"\n DPP version\n normalize_features If true, scales features to [0,1]\n phi_scale The scale to apply to the similarity matrix's phis\n weight_f The type of weighting function to use in the DPP similarity matrix\n \"\"\"\n influence_acc = []\n aug_iter_idxs = []\n original_mask_train = auged_featurized_x_train_to_source_idxs < 0\n original_x_train = auged_featurized_x_train[original_mask_train]\n original_y_train = auged_featurized_y_train[original_mask_train]\n\n # DPP part\n if normalize_features:\n phi = sklearn.preprocessing.normalize(original_x_train,\n norm=\"l2\",\n axis=1)\n else:\n phi = original_x_train\n phi *= phi_scale\n\n if weight_by_scores:\n # Diversity and quality\n if weight_f == \"dot_prod\":\n weighted_features = phi * train_idxs_scores[:, np.newaxis]\n L = train_idxs_scores[:, np.newaxis] * phi.dot(phi.T) * train_idxs_scores[:, np.newaxis].T\n # We add some diagonal component to ensure PSD matrix\n L += np.diag(np.full(len(L), 1e-3))\n elif weight_f == \"gauss_dist\":\n L = sklearn.metrics.pairwise.euclidean_distances(\n phi,\n phi,\n )\n L = np.exp(-L)\n L += np.diag(np.abs(train_idxs_scores))\n else:\n raise ValueError(\"Unknown weight: {}\".format(weight_f))\n else:\n # Just diversity\n if weight_f == \"dot_prod\":\n L = phi.dot(phi.T)\n # We add some diagonal component to ensure PSD matrix\n L += np.diag(np.full(len(L), 1e-3))\n elif weight_f == \"gauss_dist\":\n L = sklearn.metrics.pairwise.euclidean_distances(\n phi,\n phi,\n )\n L = np.exp(-L)\n print(\"L: {}\".format(L))\n print(\"L shape: {}\".format(L.shape))\n assert len(L) == len(original_x_train)\n\n for k in n_aug_sample_points:\n dpp_idxs = sample_dpp.oct_sample_k_dpp(\n L,\n k=k,\n one_hot=False)\n\n print(\"DPP idxs: {}\".format(dpp_idxs))\n orig_and_dpp_idxs = np.concatenate([dpp_idxs, [-1]])\n print(\"orig_and_dpp_idxs: {}\".format(orig_and_dpp_idxs))\n print(\"orig_and_dpp_idxs shape: {}\".format(orig_and_dpp_idxs.shape))\n dpp_orig_and_auged_mask = np.isin(\n auged_featurized_x_train_to_source_idxs,\n orig_and_dpp_idxs)\n print(\"dpp_orig_and_auged_mask count: {}/{}\".format(\n np.sum(dpp_orig_and_auged_mask),\n len(dpp_orig_and_auged_mask),\n ))\n dpp_x_train = auged_featurized_x_train[dpp_orig_and_auged_mask]\n dpp_y_train = auged_featurized_y_train[dpp_orig_and_auged_mask]\n clf.fit(dpp_x_train, dpp_y_train)\n\n aug_train_poisoned_acc = clf.score(\n auged_featurized_x_test,\n auged_featurized_y_test)\n influence_acc.append(aug_train_poisoned_acc)\n aug_iter_idxs.append(dpp_idxs) # Batch append\n\n return influence_acc, aug_iter_idxs\n" ]
[ [ "numpy.savez", "numpy.abs", "numpy.unique", "numpy.concatenate", "numpy.copy", "numpy.exp", "matplotlib.pyplot.show", "numpy.where", "numpy.sum", "numpy.isin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
parksang21/ARPL
[ "d2e4af489d789a277bea3f0b4f31a5072239e623" ]
[ "models/resnet.py" ]
[ "'''ResNet in PyTorch.\nBasicBlock and Bottleneck module is from the original ResNet paper:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\nPreActBlock and PreActBottleneck module is from the later paper:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv:1603.05027\nOriginal code is from https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py\n'''\nimport os\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(in_planes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass PreActBlock(nn.Module):\n '''Pre-activation version of the BasicBlock.'''\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = conv3x3(in_planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n out += shortcut\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass PreActBottleneck(nn.Module):\n '''Pre-activation version of the original Bottleneck module.'''\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBottleneck, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n out = self.conv3(F.relu(self.bn3(out)))\n out += shortcut\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = conv3x3(3,64)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n \n def forward(self, x, return_feature=False):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n # out = F.avg_pool2d(out, 4)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1)\n y = self.linear(out)\n if return_feature:\n return out, y\n else:\n return y\n \n # function to extact the multiple features\n def feature_list(self, x):\n out_list = []\n out = F.relu(self.bn1(self.conv1(x)))\n out_list.append(out)\n out = self.layer1(out)\n out_list.append(out)\n out = self.layer2(out)\n out_list.append(out)\n out = self.layer3(out)\n out_list.append(out)\n out = self.layer4(out)\n out_list.append(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n y = self.linear(out)\n return y, out_list\n \n # function to extact a specific feature\n def intermediate_forward(self, x, layer_index):\n out = F.relu(self.bn1(self.conv1(x)))\n if layer_index == 1:\n out = self.layer1(out)\n elif layer_index == 2:\n out = self.layer1(out)\n out = self.layer2(out)\n elif layer_index == 3:\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n elif layer_index == 4:\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out) \n return out\n\n # function to extact the penultimate features\n def penultimate_forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n penultimate = self.layer4(out)\n out = F.avg_pool2d(penultimate, 4)\n out = out.view(out.size(0), -1)\n y = self.linear(out)\n return y, penultimate\n \ndef ResNet18(num_c):\n return ResNet(PreActBlock, [2,2,2,2], num_classes=num_c)\n\ndef ResNet34(num_c):\n return ResNet(BasicBlock, [3,4,6,3], num_classes=num_c)\n\ndef ResNet50():\n return ResNet(Bottleneck, [3,4,6,3])\n\ndef ResNet101():\n return ResNet(Bottleneck, [3,4,23,3])\n\ndef ResNet152():\n return ResNet(Bottleneck, [3,8,36,3])\n\n\ndef test():\n net = ResNet18()\n y = net(Variable(torch.randn(1,3,32,32)))\n print(y.size())\n\n# test()\n" ]
[ [ "torch.nn.Sequential", "torch.randn", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sergiolevin/statsmodels
[ "13a901edfc0d4ab05e09438749df2487af04d77e" ]
[ "statsmodels/genmod/generalized_linear_model.py" ]
[ "\"\"\"\nGeneralized linear models currently supports estimation using the one-parameter\nexponential families\n\nReferences\n----------\nGill, Jeff. 2000. Generalized Linear Models: A Unified Approach.\n SAGE QASS Series.\n\nGreen, PJ. 1984. \"Iteratively reweighted least squares for maximum\n likelihood estimation, and some robust and resistant alternatives.\"\n Journal of the Royal Statistical Society, Series B, 46, 149-192.\n\nHardin, J.W. and Hilbe, J.M. 2007. \"Generalized Linear Models and\n Extensions.\" 2nd ed. Stata Press, College Station, TX.\n\nMcCullagh, P. and Nelder, J.A. 1989. \"Generalized Linear Models.\" 2nd ed.\n Chapman & Hall, Boca Rotan.\n\"\"\"\nimport numpy as np\n\nfrom . import families\n\nfrom statsmodels.tools.decorators import (cache_readonly,\n cached_data, cached_value)\nfrom statsmodels.tools.validation import float_like\nfrom statsmodels.compat.pandas import Appender\n\nimport statsmodels.base.model as base\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.base.wrapper as wrap\nimport statsmodels.regression._tools as reg_tools\nfrom warnings import warn\n\nfrom statsmodels.graphics._regressionplots_doc import (\n _plot_added_variable_doc,\n _plot_partial_residuals_doc,\n _plot_ceres_residuals_doc)\n\n# need import in module instead of lazily to copy `__doc__`\nfrom . import _prediction as pred\nfrom statsmodels.genmod._prediction import PredictionResults\n\nfrom statsmodels.tools.sm_exceptions import (PerfectSeparationError,\n DomainWarning,\n HessianInversionWarning)\n\nfrom numpy.linalg.linalg import LinAlgError\n\n__all__ = ['GLM', 'PredictionResults']\n\n\ndef _check_convergence(criterion, iteration, atol, rtol):\n return np.allclose(criterion[iteration], criterion[iteration + 1],\n atol=atol, rtol=rtol)\n\n\nclass GLM(base.LikelihoodModel):\n __doc__ = \"\"\"\n Generalized Linear Models\n\n GLM inherits from statsmodels.base.model.LikelihoodModel\n\n Parameters\n ----------\n endog : array_like\n 1d array of endogenous response variable. This array can be 1d or 2d.\n Binomial family models accept a 2d array with two columns. If\n supplied, each observation is expected to be [success, failure].\n exog : array_like\n A nobs x k array where `nobs` is the number of observations and `k`\n is the number of regressors. An intercept is not included by default\n and should be added by the user (models specified using a formula\n include an intercept by default). See `statsmodels.tools.add_constant`.\n family : family class instance\n The default is Gaussian. To specify the binomial distribution\n family = sm.family.Binomial()\n Each family can take a link instance as an argument. See\n statsmodels.family.family for more information.\n offset : array_like or None\n An offset to be included in the model. If provided, must be\n an array whose length is the number of rows in exog.\n exposure : array_like or None\n Log(exposure) will be added to the linear prediction in the model.\n Exposure is only valid if the log link is used. If provided, it must be\n an array with the same length as endog.\n freq_weights : array_like\n 1d array of frequency weights. The default is None. If None is selected\n or a blank value, then the algorithm will replace with an array of 1's\n with length equal to the endog.\n WARNING: Using weights is not verified yet for all possible options\n and results, see Notes.\n var_weights : array_like\n 1d array of variance (analytic) weights. The default is None. If None\n is selected or a blank value, then the algorithm will replace with an\n array of 1's with length equal to the endog.\n WARNING: Using weights is not verified yet for all possible options\n and results, see Notes.\n %(extra_params)s\n\n Attributes\n ----------\n df_model : float\n Model degrees of freedom is equal to p - 1, where p is the number\n of regressors. Note that the intercept is not reported as a\n degree of freedom.\n df_resid : float\n Residual degrees of freedom is equal to the number of observation n\n minus the number of regressors p.\n endog : ndarray\n See Notes. Note that `endog` is a reference to the data so that if\n data is already an array and it is changed, then `endog` changes\n as well.\n exposure : array_like\n Include ln(exposure) in model with coefficient constrained to 1. Can\n only be used if the link is the logarithm function.\n exog : ndarray\n See Notes. Note that `exog` is a reference to the data so that if\n data is already an array and it is changed, then `exog` changes\n as well.\n freq_weights : ndarray\n See Notes. Note that `freq_weights` is a reference to the data so that\n if data is already an array and it is changed, then `freq_weights`\n changes as well.\n var_weights : ndarray\n See Notes. Note that `var_weights` is a reference to the data so that\n if data is already an array and it is changed, then `var_weights`\n changes as well.\n iteration : int\n The number of iterations that fit has run. Initialized at 0.\n family : family class instance\n The distribution family of the model. Can be any family in\n statsmodels.families. Default is Gaussian.\n mu : ndarray\n The mean response of the transformed variable. `mu` is the value of\n the inverse of the link function at lin_pred, where lin_pred is the\n linear predicted value of the WLS fit of the transformed variable.\n `mu` is only available after fit is called. See\n statsmodels.families.family.fitted of the distribution family for more\n information.\n n_trials : ndarray\n See Notes. Note that `n_trials` is a reference to the data so that if\n data is already an array and it is changed, then `n_trials` changes\n as well. `n_trials` is the number of binomial trials and only available\n with that distribution. See statsmodels.families.Binomial for more\n information.\n normalized_cov_params : ndarray\n The p x p normalized covariance of the design / exogenous data.\n This is approximately equal to (X.T X)^(-1)\n offset : array_like\n Include offset in model with coefficient constrained to 1.\n scale : float\n The estimate of the scale / dispersion of the model fit. Only\n available after fit is called. See GLM.fit and GLM.estimate_scale\n for more information.\n scaletype : str\n The scaling used for fitting the model. This is only available after\n fit is called. The default is None. See GLM.fit for more information.\n weights : ndarray\n The value of the weights after the last iteration of fit. Only\n available after fit is called. See statsmodels.families.family for\n the specific distribution weighting functions.\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.scotland.load(as_pandas=False)\n >>> data.exog = sm.add_constant(data.exog)\n\n Instantiate a gamma family model with the default link function.\n\n >>> gamma_model = sm.GLM(data.endog, data.exog,\n ... family=sm.families.Gamma())\n\n >>> gamma_results = gamma_model.fit()\n >>> gamma_results.params\n array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,\n -0.00000015, -0.00051868, -0.00000243])\n >>> gamma_results.scale\n 0.0035842831734919055\n >>> gamma_results.deviance\n 0.087388516416999198\n >>> gamma_results.pearson_chi2\n 0.086022796163805704\n >>> gamma_results.llf\n -83.017202161073527\n\n See Also\n --------\n statsmodels.genmod.families.family.Family\n :ref:`families`\n :ref:`links`\n\n Notes\n -----\n Only the following combinations make sense for family and link:\n\n ============= ===== === ===== ====== ======= === ==== ====== ====== ====\n Family ident log logit probit cloglog pow opow nbinom loglog logc\n ============= ===== === ===== ====== ======= === ==== ====== ====== ====\n Gaussian x x x x x x x x x\n inv Gaussian x x x\n binomial x x x x x x x x x\n Poisson x x x\n neg binomial x x x x\n gamma x x x\n Tweedie x x x\n ============= ===== === ===== ====== ======= === ==== ====== ====== ====\n\n Not all of these link functions are currently available.\n\n Endog and exog are references so that if the data they refer to are already\n arrays and these arrays are changed, endog and exog will change.\n\n statsmodels supports two separate definitions of weights: frequency weights\n and variance weights.\n\n Frequency weights produce the same results as repeating observations by the\n frequencies (if those are integers). Frequency weights will keep the number\n of observations consistent, but the degrees of freedom will change to\n reflect the new weights.\n\n Variance weights (referred to in other packages as analytic weights) are\n used when ``endog`` represents an an average or mean. This relies on the\n assumption that that the inverse variance scales proportionally to the\n weight--an observation that is deemed more credible should have less\n variance and therefore have more weight. For the ``Poisson`` family--which\n assumes that occurrences scale proportionally with time--a natural practice\n would be to use the amount of time as the variance weight and set ``endog``\n to be a rate (occurrences per period of time). Similarly, using a\n compound Poisson family, namely ``Tweedie``, makes a similar assumption\n about the rate (or frequency) of occurrences having variance proportional to\n time.\n\n Both frequency and variance weights are verified for all basic results with\n nonrobust or heteroscedasticity robust ``cov_type``. Other robust\n covariance types have not yet been verified, and at least the small sample\n correction is currently not based on the correct total frequency count.\n\n Currently, all residuals are not weighted by frequency, although they may\n incorporate ``n_trials`` for ``Binomial`` and ``var_weights``\n\n +---------------+----------------------------------+\n | Residual Type | Applicable weights |\n +===============+==================================+\n | Anscombe | ``var_weights`` |\n +---------------+----------------------------------+\n | Deviance | ``var_weights`` |\n +---------------+----------------------------------+\n | Pearson | ``var_weights`` and ``n_trials`` |\n +---------------+----------------------------------+\n | Reponse | ``n_trials`` |\n +---------------+----------------------------------+\n | Working | ``n_trials`` |\n +---------------+----------------------------------+\n\n WARNING: Loglikelihood and deviance are not valid in models where\n scale is equal to 1 (i.e., ``Binomial``, ``NegativeBinomial``, and\n ``Poisson``). If variance weights are specified, then results such as\n ``loglike`` and ``deviance`` are based on a quasi-likelihood\n interpretation. The loglikelihood is not correctly specified in this case,\n and statistics based on it, such AIC or likelihood ratio tests, are not\n appropriate.\n \"\"\" % {'extra_params': base._missing_param_doc}\n # Maximum number of endogenous variables when using a formula\n _formula_max_endog = 2\n\n def __init__(self, endog, exog, family=None, offset=None,\n exposure=None, freq_weights=None, var_weights=None,\n missing='none', **kwargs):\n\n if (family is not None) and not isinstance(family.link,\n tuple(family.safe_links)):\n\n warn((\"The %s link function does not respect the domain \"\n \"of the %s family.\") %\n (family.link.__class__.__name__,\n family.__class__.__name__), DomainWarning)\n\n if exposure is not None:\n exposure = np.log(exposure)\n if offset is not None: # this should probably be done upstream\n offset = np.asarray(offset)\n\n if freq_weights is not None:\n freq_weights = np.asarray(freq_weights)\n if var_weights is not None:\n var_weights = np.asarray(var_weights)\n\n self.freq_weights = freq_weights\n self.var_weights = var_weights\n\n super(GLM, self).__init__(endog, exog, missing=missing,\n offset=offset, exposure=exposure,\n freq_weights=freq_weights,\n var_weights=var_weights, **kwargs)\n self._check_inputs(family, self.offset, self.exposure, self.endog,\n self.freq_weights, self.var_weights)\n if offset is None:\n delattr(self, 'offset')\n if exposure is None:\n delattr(self, 'exposure')\n\n self.nobs = self.endog.shape[0]\n\n # things to remove_data\n self._data_attr.extend(['weights', 'mu', 'freq_weights',\n 'var_weights', 'iweights', '_offset_exposure',\n 'n_trials'])\n # register kwds for __init__, offset and exposure are added by super\n self._init_keys.append('family')\n\n self._setup_binomial()\n # internal usage for recreating a model\n if 'n_trials' in kwargs:\n self.n_trials = kwargs['n_trials']\n\n # Construct a combined offset/exposure term. Note that\n # exposure has already been logged if present.\n offset_exposure = 0.\n if hasattr(self, 'offset'):\n offset_exposure = self.offset\n if hasattr(self, 'exposure'):\n offset_exposure = offset_exposure + self.exposure\n self._offset_exposure = offset_exposure\n\n self.scaletype = None\n\n def initialize(self):\n \"\"\"\n Initialize a generalized linear model.\n \"\"\"\n self.df_model = np.linalg.matrix_rank(self.exog) - 1\n\n if (self.freq_weights is not None) and \\\n (self.freq_weights.shape[0] == self.endog.shape[0]):\n self.wnobs = self.freq_weights.sum()\n self.df_resid = self.wnobs - self.df_model - 1\n else:\n self.wnobs = self.exog.shape[0]\n self.df_resid = self.exog.shape[0] - self.df_model - 1\n\n def _check_inputs(self, family, offset, exposure, endog, freq_weights,\n var_weights):\n\n # Default family is Gaussian\n if family is None:\n family = families.Gaussian()\n self.family = family\n\n if exposure is not None:\n if not isinstance(self.family.link, families.links.Log):\n raise ValueError(\"exposure can only be used with the log \"\n \"link function\")\n elif exposure.shape[0] != endog.shape[0]:\n raise ValueError(\"exposure is not the same length as endog\")\n\n if offset is not None:\n if offset.shape[0] != endog.shape[0]:\n raise ValueError(\"offset is not the same length as endog\")\n\n if freq_weights is not None:\n if freq_weights.shape[0] != endog.shape[0]:\n raise ValueError(\"freq weights not the same length as endog\")\n if len(freq_weights.shape) > 1:\n raise ValueError(\"freq weights has too many dimensions\")\n\n # internal flag to store whether freq_weights were not None\n self._has_freq_weights = (self.freq_weights is not None)\n if self.freq_weights is None:\n self.freq_weights = np.ones((endog.shape[0]))\n # TODO: check do we want to keep None as sentinel for freq_weights\n\n if np.shape(self.freq_weights) == () and self.freq_weights > 1:\n self.freq_weights = (self.freq_weights *\n np.ones((endog.shape[0])))\n\n if var_weights is not None:\n if var_weights.shape[0] != endog.shape[0]:\n raise ValueError(\"var weights not the same length as endog\")\n if len(var_weights.shape) > 1:\n raise ValueError(\"var weights has too many dimensions\")\n\n # internal flag to store whether var_weights were not None\n self._has_var_weights = (var_weights is not None)\n if var_weights is None:\n self.var_weights = np.ones((endog.shape[0]))\n # TODO: check do we want to keep None as sentinel for var_weights\n self.iweights = np.asarray(self.freq_weights * self.var_weights)\n\n def _get_init_kwds(self):\n # this is a temporary fixup because exposure has been transformed\n # see #1609, copied from discrete_model.CountModel\n kwds = super(GLM, self)._get_init_kwds()\n if 'exposure' in kwds and kwds['exposure'] is not None:\n kwds['exposure'] = np.exp(kwds['exposure'])\n return kwds\n\n def loglike_mu(self, mu, scale=1.):\n \"\"\"\n Evaluate the log-likelihood for a generalized linear model.\n \"\"\"\n scale = float_like(scale, \"scale\")\n return self.family.loglike(self.endog, mu, self.var_weights,\n self.freq_weights, scale)\n\n def loglike(self, params, scale=None):\n \"\"\"\n Evaluate the log-likelihood for a generalized linear model.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n lin_pred = np.dot(self.exog, params) + self._offset_exposure\n expval = self.family.link.inverse(lin_pred)\n if scale is None:\n scale = self.estimate_scale(expval)\n llf = self.family.loglike(self.endog, expval, self.var_weights,\n self.freq_weights, scale)\n return llf\n\n def score_obs(self, params, scale=None):\n \"\"\"score first derivative of the loglikelihood for each observation.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score_obs : ndarray, 2d\n The first derivative of the loglikelihood function evaluated at\n params for each observation.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n score_factor = self.score_factor(params, scale=scale)\n return score_factor[:, None] * self.exog\n\n def score(self, params, scale=None):\n \"\"\"score, first derivative of the loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score : ndarray_1d\n The first derivative of the loglikelihood function calculated as\n the sum of `score_obs`\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n score_factor = self.score_factor(params, scale=scale)\n return np.dot(score_factor, self.exog)\n\n def score_factor(self, params, scale=None):\n \"\"\"weights for score for each observation\n\n This can be considered as score residuals.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score_factor : ndarray_1d\n A 1d weight vector used in the calculation of the score_obs.\n The score_obs are obtained by `score_factor[:, None] * exog`\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n score_factor = (self.endog - mu) / self.family.link.deriv(mu)\n score_factor /= self.family.variance(mu)\n score_factor *= self.iweights * self.n_trials\n\n if not scale == 1:\n score_factor /= scale\n\n return score_factor\n\n def hessian_factor(self, params, scale=None, observed=True):\n \"\"\"Weights for calculating Hessian\n\n Parameters\n ----------\n params : ndarray\n parameter at which Hessian is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n observed : bool\n If True, then the observed Hessian is returned. If false then the\n expected information matrix is returned.\n\n Returns\n -------\n hessian_factor : ndarray, 1d\n A 1d weight vector used in the calculation of the Hessian.\n The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`\n \"\"\"\n\n # calculating eim_factor\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n eim_factor = 1 / (self.family.link.deriv(mu)**2 *\n self.family.variance(mu))\n eim_factor *= self.iweights * self.n_trials\n\n if not observed:\n if not scale == 1:\n eim_factor /= scale\n return eim_factor\n\n # calculating oim_factor, eim_factor is with scale=1\n\n score_factor = self.score_factor(params, scale=1.)\n if eim_factor.ndim > 1 or score_factor.ndim > 1:\n raise RuntimeError('something wrong')\n\n tmp = self.family.variance(mu) * self.family.link.deriv2(mu)\n tmp += self.family.variance.deriv(mu) * self.family.link.deriv(mu)\n\n tmp = score_factor * tmp\n # correct for duplicatee iweights in oim_factor and score_factor\n tmp /= self.iweights * self.n_trials\n oim_factor = eim_factor * (1 + tmp)\n\n if tmp.ndim > 1:\n raise RuntimeError('something wrong')\n\n if not scale == 1:\n oim_factor /= scale\n\n return oim_factor\n\n def hessian(self, params, scale=None, observed=None):\n \"\"\"Hessian, second derivative of loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n parameter at which Hessian is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n observed : bool\n If True, then the observed Hessian is returned (default).\n If false then the expected information matrix is returned.\n\n Returns\n -------\n hessian : ndarray\n Hessian, i.e. observed information, or expected information matrix.\n \"\"\"\n if observed is None:\n if getattr(self, '_optim_hessian', None) == 'eim':\n observed = False\n else:\n observed = True\n scale = float_like(scale, \"scale\", optional=True)\n tmp = getattr(self, '_tmp_like_exog', np.empty_like(self.exog, dtype=float))\n\n factor = self.hessian_factor(params, scale=scale, observed=observed)\n np.multiply(self.exog.T, factor, out=tmp.T)\n return -tmp.T.dot(self.exog)\n\n def information(self, params, scale=None):\n \"\"\"\n Fisher information matrix.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n return self.hessian(params, scale=scale, observed=False)\n\n def _deriv_mean_dparams(self, params):\n \"\"\"\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n \"\"\"\n lin_pred = self.predict(params, linear=True)\n idl = self.family.link.inverse_deriv(lin_pred)\n dmat = self.exog * idl[:, None]\n return dmat\n\n def _deriv_score_obs_dendog(self, params, scale=None):\n \"\"\"derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog. This\n can is given by `score_factor0[:, None] * exog` where\n `score_factor0` is the score_factor without the residual.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n score_factor = 1 / self.family.link.deriv(mu)\n score_factor /= self.family.variance(mu)\n score_factor *= self.iweights * self.n_trials\n\n if not scale == 1:\n score_factor /= scale\n\n return score_factor[:, None] * self.exog\n\n def score_test(self, params_constrained, k_constraints=None,\n exog_extra=None, observed=True):\n \"\"\"score test for restrictions or for omitted variables\n\n The covariance matrix for the score is based on the Hessian, i.e.\n observed information matrix or optionally on the expected information\n matrix..\n\n Parameters\n ----------\n params_constrained : array_like\n estimated parameter of the restricted model. This can be the\n parameter estimate for the current when testing for omitted\n variables.\n k_constraints : int or None\n Number of constraints that were used in the estimation of params\n restricted relative to the number of exog in the model.\n This must be provided if no exog_extra are given. If exog_extra is\n not None, then k_constraints is assumed to be zero if it is None.\n exog_extra : None or array_like\n Explanatory variables that are jointly tested for inclusion in the\n model, i.e. omitted variables.\n observed : bool\n If True, then the observed Hessian is used in calculating the\n covariance matrix of the score. If false then the expected\n information matrix is used.\n\n Returns\n -------\n chi2_stat : float\n chisquare statistic for the score test\n p-value : float\n P-value of the score test based on the chisquare distribution.\n df : int\n Degrees of freedom used in the p-value calculation. This is equal\n to the number of constraints.\n\n Notes\n -----\n not yet verified for case with scale not equal to 1.\n \"\"\"\n\n if exog_extra is None:\n if k_constraints is None:\n raise ValueError('if exog_extra is None, then k_constraints'\n 'needs to be given')\n\n score = self.score(params_constrained)\n hessian = self.hessian(params_constrained, observed=observed)\n\n else:\n # exog_extra = np.asarray(exog_extra)\n if k_constraints is None:\n k_constraints = 0\n\n ex = np.column_stack((self.exog, exog_extra))\n k_constraints += ex.shape[1] - self.exog.shape[1]\n\n score_factor = self.score_factor(params_constrained)\n score = (score_factor[:, None] * ex).sum(0)\n hessian_factor = self.hessian_factor(params_constrained,\n observed=observed)\n hessian = -np.dot(ex.T * hessian_factor, ex)\n\n from scipy import stats\n # TODO check sign, why minus?\n chi2stat = -score.dot(np.linalg.solve(hessian, score[:, None]))\n pval = stats.chi2.sf(chi2stat, k_constraints)\n # return a stats results instance instead? Contrast?\n return chi2stat, pval, k_constraints\n\n def _update_history(self, tmp_result, mu, history):\n \"\"\"\n Helper method to update history during iterative fit.\n \"\"\"\n history['params'].append(tmp_result.params)\n history['deviance'].append(self.family.deviance(self.endog, mu,\n self.var_weights,\n self.freq_weights,\n self.scale))\n return history\n\n def estimate_scale(self, mu):\n \"\"\"\n Estimate the dispersion/scale.\n\n Type of scale can be chose in the fit method.\n\n Parameters\n ----------\n mu : ndarray\n mu is the mean response estimate\n\n Returns\n -------\n Estimate of scale\n\n Notes\n -----\n The default scale for Binomial, Poisson and Negative Binomial\n families is 1. The default for the other families is Pearson's\n Chi-Square estimate.\n\n See Also\n --------\n statsmodels.genmod.generalized_linear_model.GLM.fit\n \"\"\"\n if not self.scaletype:\n if isinstance(self.family, (families.Binomial, families.Poisson,\n families.NegativeBinomial)):\n return 1.\n else:\n return self._estimate_x2_scale(mu)\n\n if isinstance(self.scaletype, float):\n return np.array(self.scaletype)\n\n if isinstance(self.scaletype, str):\n if self.scaletype.lower() == 'x2':\n return self._estimate_x2_scale(mu)\n elif self.scaletype.lower() == 'dev':\n return (self.family.deviance(self.endog, mu, self.var_weights,\n self.freq_weights, 1.) /\n (self.df_resid))\n else:\n raise ValueError(\"Scale %s with type %s not understood\" %\n (self.scaletype, type(self.scaletype)))\n else:\n raise ValueError(\"Scale %s with type %s not understood\" %\n (self.scaletype, type(self.scaletype)))\n\n def _estimate_x2_scale(self, mu):\n resid = np.power(self.endog - mu, 2) * self.iweights\n return np.sum(resid / self.family.variance(mu)) / self.df_resid\n\n def estimate_tweedie_power(self, mu, method='brentq', low=1.01, high=5.):\n \"\"\"\n Tweedie specific function to estimate scale and the variance parameter.\n The variance parameter is also referred to as p, xi, or shape.\n\n Parameters\n ----------\n mu : array_like\n Fitted mean response variable\n method : str, defaults to 'brentq'\n Scipy optimizer used to solve the Pearson equation. Only brentq\n currently supported.\n low : float, optional\n Low end of the bracketing interval [a,b] to be used in the search\n for the power. Defaults to 1.01.\n high : float, optional\n High end of the bracketing interval [a,b] to be used in the search\n for the power. Defaults to 5.\n\n Returns\n -------\n power : float\n The estimated shape or power.\n \"\"\"\n if method == 'brentq':\n from scipy.optimize import brentq\n\n def psi_p(power, mu):\n scale = ((self.iweights * (self.endog - mu) ** 2 /\n (mu ** power)).sum() / self.df_resid)\n return (np.sum(self.iweights * ((self.endog - mu) ** 2 /\n (scale * (mu ** power)) - 1) *\n np.log(mu)) / self.freq_weights.sum())\n power = brentq(psi_p, low, high, args=(mu))\n else:\n raise NotImplementedError('Only brentq can currently be used')\n return power\n\n def predict(self, params, exog=None, exposure=None, offset=None,\n linear=False):\n \"\"\"\n Return predicted values for a design matrix\n\n Parameters\n ----------\n params : array_like\n Parameters / coefficients of a GLM.\n exog : array_like, optional\n Design / exogenous data. Is exog is None, model exog is used.\n exposure : array_like, optional\n Exposure time values, only can be used with the log link\n function. See notes for details.\n offset : array_like, optional\n Offset values. See notes for details.\n linear : bool\n If True, returns the linear predicted values. If False,\n returns the value of the inverse of the model's link function at\n the linear predicted values.\n\n Returns\n -------\n An array of fitted values\n\n Notes\n -----\n Any `exposure` and `offset` provided here take precedence over\n the `exposure` and `offset` used in the model fit. If `exog`\n is passed as an argument here, then any `exposure` and\n `offset` values in the fit will be ignored.\n\n Exposure values must be strictly positive.\n \"\"\"\n\n # Use fit offset if appropriate\n if offset is None and exog is None and hasattr(self, 'offset'):\n offset = self.offset\n elif offset is None:\n offset = 0.\n\n if exposure is not None and not isinstance(self.family.link,\n families.links.Log):\n raise ValueError(\"exposure can only be used with the log link \"\n \"function\")\n\n # Use fit exposure if appropriate\n if exposure is None and exog is None and hasattr(self, 'exposure'):\n # Already logged\n exposure = self.exposure\n elif exposure is None:\n exposure = 0.\n else:\n exposure = np.log(exposure)\n\n if exog is None:\n exog = self.exog\n\n linpred = np.dot(exog, params) + offset + exposure\n if linear:\n return linpred\n else:\n return self.family.fitted(linpred)\n\n def get_distribution(self, params, scale=1., exog=None, exposure=None,\n offset=None):\n \"\"\"\n Return a random number generator for the predictive distribution.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n scale : scalar\n The scale parameter.\n exog : array_like\n The predictor variable matrix.\n\n Returns\n -------\n gen\n Frozen random number generator object. Use the ``rvs`` method to\n generate random values.\n\n Notes\n -----\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n fit = self.predict(params, exog, exposure, offset, linear=False)\n\n import scipy.stats.distributions as dist\n\n if isinstance(self.family, families.Gaussian):\n return dist.norm(loc=fit, scale=np.sqrt(scale))\n\n elif isinstance(self.family, families.Binomial):\n return dist.binom(n=1, p=fit)\n\n elif isinstance(self.family, families.Poisson):\n return dist.poisson(mu=fit)\n\n elif isinstance(self.family, families.Gamma):\n alpha = fit / float(scale)\n return dist.gamma(alpha, scale=scale)\n\n else:\n raise ValueError(\"get_distribution not implemented for %s\" %\n self.family.name)\n\n def _setup_binomial(self):\n # this checks what kind of data is given for Binomial.\n # family will need a reference to endog if this is to be removed from\n # preprocessing\n self.n_trials = np.ones((self.endog.shape[0])) # For binomial\n if isinstance(self.family, families.Binomial):\n tmp = self.family.initialize(self.endog, self.freq_weights)\n self.endog = tmp[0]\n self.n_trials = tmp[1]\n self._init_keys.append('n_trials')\n\n def fit(self, start_params=None, maxiter=100, method='IRLS', tol=1e-8,\n scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,\n full_output=True, disp=False, max_start_irls=3, **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family.\n\n Parameters\n ----------\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n The default is family-specific and is given by the\n ``family.starting_mu(endog)``. If start_params is given then the\n initial mean will be calculated as ``np.dot(exog, start_params)``.\n maxiter : int, optional\n Default is 100.\n method : str\n Default is 'IRLS' for iteratively reweighted least squares.\n Otherwise gradient optimization is used.\n tol : float\n Convergence tolerance. Default is 1e-8.\n scale : str or float, optional\n `scale` can be 'X2', 'dev', or a float\n The default value is None, which uses `X2` for Gamma, Gaussian,\n and Inverse Gaussian.\n `X2` is Pearson's chi-square divided by `df_resid`.\n The default is 1 for the Binomial and Poisson families.\n `dev` is the deviance divided by df_resid\n cov_type : str\n The type of parameter estimate covariance matrix to compute.\n cov_kwds : dict-like\n Extra arguments for calculating the covariance of the parameter\n estimates.\n use_t : bool\n If True, the Student t-distribution is used for inference.\n full_output : bool, optional\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n Not used if methhod is IRLS.\n disp : bool, optional\n Set to True to print convergence messages. Not used if method is\n IRLS.\n max_start_irls : int\n The number of IRLS iterations used to obtain starting\n values for gradient optimization. Only relevant if\n `method` is set to something other than 'IRLS'.\n atol : float, optional\n (available with IRLS fits) The absolute tolerance criterion that\n must be satisfied. Defaults to ``tol``. Convergence is attained\n when: :math:`rtol * prior + atol > abs(current - prior)`\n rtol : float, optional\n (available with IRLS fits) The relative tolerance criterion that\n must be satisfied. Defaults to 0 which means ``rtol`` is not used.\n Convergence is attained when:\n :math:`rtol * prior + atol > abs(current - prior)`\n tol_criterion : str, optional\n (available with IRLS fits) Defaults to ``'deviance'``. Can\n optionally be ``'params'``.\n wls_method : str, optional\n (available with IRLS fits) options are 'lstsq', 'pinv' and 'qr'\n specifies which linear algebra function to use for the irls\n optimization. Default is `lstsq` which uses the same underlying\n svd based approach as 'pinv', but is faster during iterations.\n 'lstsq' and 'pinv' regularize the estimate in singular and\n near-singular cases by truncating small singular values based\n on `rcond` of the respective numpy.linalg function. 'qr' is\n only valid for cases that are not singular nor near-singular.\n optim_hessian : {'eim', 'oim'}, optional\n (available with scipy optimizer fits) When 'oim'--the default--the\n observed Hessian is used in fitting. 'eim' is the expected Hessian.\n This may provide more stable fits, but adds assumption that the\n Hessian is correctly specified.\n\n Notes\n -----\n If method is 'IRLS', then an additional keyword 'attach_wls' is\n available. This is currently for internal use only and might change\n in future versions. If attach_wls' is true, then the final WLS\n instance of the IRLS iteration is attached to the results instance\n as `results_wls` attribute.\n \"\"\"\n if isinstance(scale, str):\n scale = scale.lower()\n if scale not in (\"x2\", \"dev\"):\n raise ValueError(\n \"scale must be either X2 or dev when a string.\"\n )\n elif scale is not None:\n # GH-6627\n try:\n scale = float(scale)\n except Exception as exc:\n raise type(exc)(\n \"scale must be a float if given and no a string.\"\n )\n self.scaletype = scale\n\n if method.lower() == \"irls\":\n if cov_type.lower() == 'eim':\n cov_type = 'nonrobust'\n return self._fit_irls(start_params=start_params, maxiter=maxiter,\n tol=tol, scale=scale, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t, **kwargs)\n else:\n self._optim_hessian = kwargs.get('optim_hessian')\n self._tmp_like_exog = np.empty_like(self.exog, dtype=float)\n fit_ = self._fit_gradient(start_params=start_params,\n method=method,\n maxiter=maxiter,\n tol=tol, scale=scale,\n full_output=full_output,\n disp=disp, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t,\n max_start_irls=max_start_irls,\n **kwargs)\n del self._optim_hessian\n del self._tmp_like_exog\n return fit_\n\n def _fit_gradient(self, start_params=None, method=\"newton\",\n maxiter=100, tol=1e-8, full_output=True,\n disp=True, scale=None, cov_type='nonrobust',\n cov_kwds=None, use_t=None, max_start_irls=3,\n **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family iteratively\n using the scipy gradient optimizers.\n \"\"\"\n\n # fix scale during optimization, see #4616\n scaletype = self.scaletype\n self.scaletype = 1.\n\n if (max_start_irls > 0) and (start_params is None):\n irls_rslt = self._fit_irls(start_params=start_params,\n maxiter=max_start_irls,\n tol=tol, scale=1., cov_type='nonrobust',\n cov_kwds=None, use_t=None,\n **kwargs)\n start_params = irls_rslt.params\n del irls_rslt\n\n rslt = super(GLM, self).fit(start_params=start_params, tol=tol,\n maxiter=maxiter, full_output=full_output,\n method=method, disp=disp, **kwargs)\n\n # reset scaletype to original\n self.scaletype = scaletype\n\n mu = self.predict(rslt.params)\n scale = self.estimate_scale(mu)\n\n if rslt.normalized_cov_params is None:\n cov_p = None\n else:\n cov_p = rslt.normalized_cov_params / scale\n\n if cov_type.lower() == 'eim':\n oim = False\n cov_type = 'nonrobust'\n else:\n oim = True\n\n try:\n cov_p = np.linalg.inv(-self.hessian(rslt.params, observed=oim)) / scale\n except LinAlgError:\n warn('Inverting hessian failed, no bse or cov_params '\n 'available', HessianInversionWarning)\n cov_p = None\n\n results_class = getattr(self, '_results_class', GLMResults)\n results_class_wrapper = getattr(self, '_results_class_wrapper', GLMResultsWrapper)\n glm_results = results_class(self, rslt.params,\n cov_p,\n scale,\n cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n # TODO: iteration count is not always available\n history = {'iteration': 0}\n if full_output:\n glm_results.mle_retvals = rslt.mle_retvals\n if 'iterations' in rslt.mle_retvals:\n history['iteration'] = rslt.mle_retvals['iterations']\n glm_results.method = method\n glm_results.fit_history = history\n\n return results_class_wrapper(glm_results)\n\n def _fit_irls(self, start_params=None, maxiter=100, tol=1e-8,\n scale=None, cov_type='nonrobust', cov_kwds=None,\n use_t=None, **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family using\n iteratively reweighted least squares (IRLS).\n \"\"\"\n attach_wls = kwargs.pop('attach_wls', False)\n atol = kwargs.get('atol')\n rtol = kwargs.get('rtol', 0.)\n tol_criterion = kwargs.get('tol_criterion', 'deviance')\n wls_method = kwargs.get('wls_method', 'lstsq')\n atol = tol if atol is None else atol\n\n endog = self.endog\n wlsexog = self.exog\n if start_params is None:\n start_params = np.zeros(self.exog.shape[1])\n mu = self.family.starting_mu(self.endog)\n lin_pred = self.family.predict(mu)\n else:\n lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure\n mu = self.family.fitted(lin_pred)\n self.scale = self.estimate_scale(mu)\n dev = self.family.deviance(self.endog, mu, self.var_weights,\n self.freq_weights, self.scale)\n if np.isnan(dev):\n raise ValueError(\"The first guess on the deviance function \"\n \"returned a nan. This could be a boundary \"\n \" problem and should be reported.\")\n\n # first guess on the deviance is assumed to be scaled by 1.\n # params are none to start, so they line up with the deviance\n history = dict(params=[np.inf, start_params], deviance=[np.inf, dev])\n converged = False\n criterion = history[tol_criterion]\n # This special case is used to get the likelihood for a specific\n # params vector.\n if maxiter == 0:\n mu = self.family.fitted(lin_pred)\n self.scale = self.estimate_scale(mu)\n wls_results = lm.RegressionResults(self, start_params, None)\n iteration = 0\n for iteration in range(maxiter):\n self.weights = (self.iweights * self.n_trials *\n self.family.weights(mu))\n wlsendog = (lin_pred + self.family.link.deriv(mu) * (self.endog-mu)\n - self._offset_exposure)\n wls_mod = reg_tools._MinimalWLS(wlsendog, wlsexog,\n self.weights, check_endog=True,\n check_weights=True)\n wls_results = wls_mod.fit(method=wls_method)\n lin_pred = np.dot(self.exog, wls_results.params)\n lin_pred += self._offset_exposure\n mu = self.family.fitted(lin_pred)\n history = self._update_history(wls_results, mu, history)\n self.scale = self.estimate_scale(mu)\n if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):\n msg = \"Perfect separation detected, results not available\"\n raise PerfectSeparationError(msg)\n converged = _check_convergence(criterion, iteration + 1, atol,\n rtol)\n if converged:\n break\n self.mu = mu\n\n if maxiter > 0: # Only if iterative used\n wls_method2 = 'pinv' if wls_method == 'lstsq' else wls_method\n wls_model = lm.WLS(wlsendog, wlsexog, self.weights)\n wls_results = wls_model.fit(method=wls_method2)\n\n glm_results = GLMResults(self, wls_results.params,\n wls_results.normalized_cov_params,\n self.scale,\n cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n glm_results.method = \"IRLS\"\n glm_results.mle_settings = {}\n glm_results.mle_settings['wls_method'] = wls_method\n glm_results.mle_settings['optimizer'] = glm_results.method\n if (maxiter > 0) and (attach_wls is True):\n glm_results.results_wls = wls_results\n history['iteration'] = iteration + 1\n glm_results.fit_history = history\n glm_results.converged = converged\n return GLMResultsWrapper(glm_results)\n\n def fit_regularized(self, method=\"elastic_net\", alpha=0.,\n start_params=None, refit=False,\n opt_method=\"bfgs\", **kwargs):\n r\"\"\"\n Return a regularized fit to a linear regression model.\n\n Parameters\n ----------\n method : {'elastic_net'}\n Only the `elastic_net` approach is currently implemented.\n alpha : scalar or array_like\n The penalty weight. If a scalar, the same penalty weight\n applies to all variables in the model. If a vector, it\n must have the same length as `params`, and contains a\n penalty weight for each coefficient.\n start_params : array_like\n Starting values for `params`.\n refit : bool\n If True, the model is refit using only the variables that\n have non-zero coefficients in the regularized fit. The\n refitted model is not regularized.\n opt_method : string\n The method used for numerical optimization.\n **kwargs\n Additional keyword arguments used when fitting the model.\n\n Returns\n -------\n GLMResults\n An array or a GLMResults object, same type returned by `fit`.\n\n Notes\n -----\n The penalty is the ``elastic net`` penalty, which is a\n combination of L1 and L2 penalties.\n\n The function that is minimized is:\n\n .. math::\n\n -loglike/n + alpha*((1-L1\\_wt)*|params|_2^2/2 + L1\\_wt*|params|_1)\n\n where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.\n\n Post-estimation results are based on the same data used to\n select variables, hence may be subject to overfitting biases.\n\n The elastic_net method uses the following keyword arguments:\n\n maxiter : int\n Maximum number of iterations\n L1_wt : float\n Must be in [0, 1]. The L1 penalty has weight L1_wt and the\n L2 penalty has weight 1 - L1_wt.\n cnvrg_tol : float\n Convergence threshold for maximum parameter change after\n one sweep through all coefficients.\n zero_tol : float\n Coefficients below this threshold are treated as zero.\n \"\"\"\n\n if kwargs.get(\"L1_wt\", 1) == 0:\n return self._fit_ridge(alpha, start_params, opt_method)\n\n from statsmodels.base.elastic_net import fit_elasticnet\n\n if method != \"elastic_net\":\n raise ValueError(\"method for fit_regularied must be elastic_net\")\n\n defaults = {\"maxiter\": 50, \"L1_wt\": 1, \"cnvrg_tol\": 1e-10,\n \"zero_tol\": 1e-10}\n defaults.update(kwargs)\n\n result = fit_elasticnet(self, method=method,\n alpha=alpha,\n start_params=start_params,\n refit=refit,\n **defaults)\n\n self.mu = self.predict(result.params)\n self.scale = self.estimate_scale(self.mu)\n\n if not result.converged:\n msg = \"Elastic net fitting did not converge\"\n warn(msg)\n\n return result\n\n def _fit_ridge(self, alpha, start_params, method):\n\n if start_params is None:\n start_params = np.zeros(self.exog.shape[1])\n\n def fun(x):\n return -(self.loglike(x) / self.nobs - np.sum(alpha * x**2) / 2)\n\n def grad(x):\n return -(self.score(x) / self.nobs - alpha * x)\n\n from scipy.optimize import minimize\n from statsmodels.base.elastic_net import (RegularizedResults,\n RegularizedResultsWrapper)\n\n mr = minimize(fun, start_params, jac=grad, method=method)\n params = mr.x\n\n if not mr.success:\n import warnings\n ngrad = np.sqrt(np.sum(mr.jac**2))\n msg = \"GLM ridge optimization may have failed, |grad|=%f\" % ngrad\n warnings.warn(msg)\n\n results = RegularizedResults(self, params)\n results = RegularizedResultsWrapper(results)\n\n return results\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"fit the model subject to linear equality constraints\n\n The constraints are of the form `R params = q`\n where R is the constraint_matrix and q is the vector of\n constraint_values.\n\n The estimation creates a new model with transformed design matrix,\n exog, and converts the results back to the original parameterization.\n\n\n Parameters\n ----------\n constraints : formula expression or tuple\n If it is a tuple, then the constraint needs to be given by two\n arrays (constraint_matrix, constraint_value), i.e. (R, q).\n Otherwise, the constraints can be given as strings or list of\n strings.\n see t_test for details\n start_params : None or array_like\n starting values for the optimization. `start_params` needs to be\n given in the original parameter space and are internally\n transformed.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the transformed model.\n\n Returns\n -------\n results : Results instance\n \"\"\"\n\n from patsy import DesignInfo\n from statsmodels.base._constraints import (fit_constrained,\n LinearConstraints)\n\n # same pattern as in base.LikelihoodModel.t_test\n lc = DesignInfo(self.exog_names).linear_constraint(constraints)\n R, q = lc.coefs, lc.constants\n\n # TODO: add start_params option, need access to tranformation\n # fit_constrained needs to do the transformation\n params, cov, res_constr = fit_constrained(self, R, q,\n start_params=start_params,\n fit_kwds=fit_kwds)\n # create dummy results Instance, TODO: wire up properly\n res = self.fit(start_params=params, maxiter=0) # we get a wrapper back\n res._results.params = params\n res._results.cov_params_default = cov\n cov_type = fit_kwds.get('cov_type', 'nonrobust')\n if cov_type != 'nonrobust':\n res._results.normalized_cov_params = cov / res_constr.scale\n else:\n res._results.normalized_cov_params = None\n res._results.scale = res_constr.scale\n k_constr = len(q)\n res._results.df_resid += k_constr\n res._results.df_model -= k_constr\n res._results.constraints = LinearConstraints.from_patsy(lc)\n res._results.k_constr = k_constr\n res._results.results_constrained = res_constr\n return res\n\n\nclass GLMResults(base.LikelihoodModelResults):\n \"\"\"\n Class to contain GLM results.\n\n GLMResults inherits from statsmodels.LikelihoodModelResults\n\n Attributes\n ----------\n df_model : float\n See GLM.df_model\n df_resid : float\n See GLM.df_resid\n fit_history : dict\n Contains information about the iterations. Its keys are `iterations`,\n `deviance` and `params`.\n model : class instance\n Pointer to GLM model instance that called fit.\n nobs : float\n The number of observations n.\n normalized_cov_params : ndarray\n See GLM docstring\n params : ndarray\n The coefficients of the fitted model. Note that interpretation\n of the coefficients often depends on the distribution family and the\n data.\n pvalues : ndarray\n The two-tailed p-values for the parameters.\n scale : float\n The estimate of the scale / dispersion for the model fit.\n See GLM.fit and GLM.estimate_scale for more information.\n stand_errors : ndarray\n The standard errors of the fitted GLM. #TODO still named bse\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModelResults\n \"\"\"\n\n def __init__(self, model, params, normalized_cov_params, scale,\n cov_type='nonrobust', cov_kwds=None, use_t=None):\n super(GLMResults, self).__init__(\n model,\n params,\n normalized_cov_params=normalized_cov_params,\n scale=scale)\n self.family = model.family\n self._endog = model.endog\n self.nobs = model.endog.shape[0]\n self._freq_weights = model.freq_weights\n self._var_weights = model.var_weights\n self._iweights = model.iweights\n if isinstance(self.family, families.Binomial):\n self._n_trials = self.model.n_trials\n else:\n self._n_trials = 1\n self.df_resid = model.df_resid\n self.df_model = model.df_model\n self._cache = {}\n # are these intermediate results needed or can we just\n # call the model's attributes?\n\n # for remove data and pickle without large arrays\n self._data_attr.extend(['results_constrained', '_freq_weights',\n '_var_weights', '_iweights'])\n self._data_in_cache.extend(['null', 'mu'])\n self._data_attr_model = getattr(self, '_data_attr_model', [])\n self._data_attr_model.append('mu')\n\n # robust covariance\n from statsmodels.base.covtype import get_robustcov_results\n if use_t is None:\n self.use_t = False # TODO: class default\n else:\n self.use_t = use_t\n\n # temporary warning\n ct = (cov_type == 'nonrobust') or (cov_type.upper().startswith('HC'))\n if self.model._has_freq_weights and not ct:\n import warnings\n from statsmodels.tools.sm_exceptions import SpecificationWarning\n warnings.warn('cov_type not fully supported with freq_weights',\n SpecificationWarning)\n\n if self.model._has_var_weights and not ct:\n import warnings\n from statsmodels.tools.sm_exceptions import SpecificationWarning\n warnings.warn('cov_type not fully supported with var_weights',\n SpecificationWarning)\n\n if cov_type == 'nonrobust':\n self.cov_type = 'nonrobust'\n self.cov_kwds = {'description': 'Standard Errors assume that the' +\n ' covariance matrix of the errors is correctly ' +\n 'specified.'}\n\n else:\n if cov_kwds is None:\n cov_kwds = {}\n get_robustcov_results(self, cov_type=cov_type, use_self=True,\n use_t=use_t, **cov_kwds)\n\n @cached_data\n def resid_response(self):\n \"\"\"\n Response residuals. The response residuals are defined as\n `endog` - `fittedvalues`\n \"\"\"\n return self._n_trials * (self._endog-self.mu)\n\n @cached_data\n def resid_pearson(self):\n \"\"\"\n Pearson residuals. The Pearson residuals are defined as\n (`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution\n specific variance function. See statsmodels.families.family and\n statsmodels.families.varfuncs for more information.\n \"\"\"\n return (np.sqrt(self._n_trials) * (self._endog-self.mu) *\n np.sqrt(self._var_weights) /\n np.sqrt(self.family.variance(self.mu)))\n\n @cached_data\n def resid_working(self):\n \"\"\"\n Working residuals. The working residuals are defined as\n `resid_response`/link'(`mu`). See statsmodels.family.links for the\n derivatives of the link functions. They are defined analytically.\n \"\"\"\n # Isn't self.resid_response is already adjusted by _n_trials?\n val = (self.resid_response * self.family.link.deriv(self.mu))\n val *= self._n_trials\n return val\n\n @cached_data\n def resid_anscombe(self):\n \"\"\"\n Anscombe residuals. See statsmodels.families.family for distribution-\n specific Anscombe residuals. Currently, the unscaled residuals are\n provided. In a future version, the scaled residuals will be provided.\n \"\"\"\n import warnings\n warnings.warn('Anscombe residuals currently unscaled. After the 0.12 '\n 'release, they will be scaled.', category=FutureWarning)\n return self.family.resid_anscombe(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=1.)\n\n @cached_data\n def resid_anscombe_scaled(self):\n \"\"\"\n Scaled Anscombe residuals. See statsmodels.families.family for\n distribution-specific Anscombe residuals.\n \"\"\"\n return self.family.resid_anscombe(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=self.scale)\n\n @cached_data\n def resid_anscombe_unscaled(self):\n \"\"\"\n Unscaled Anscombe residuals. See statsmodels.families.family for\n distribution-specific Anscombe residuals.\n \"\"\"\n return self.family.resid_anscombe(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=1.)\n\n @cached_data\n def resid_deviance(self):\n \"\"\"\n Deviance residuals. See statsmodels.families.family for distribution-\n specific deviance residuals.\n \"\"\"\n dev = self.family.resid_dev(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=1.)\n return dev\n\n @cached_value\n def pearson_chi2(self):\n \"\"\"\n Pearson's Chi-Squared statistic is defined as the sum of the squares\n of the Pearson residuals.\n \"\"\"\n chisq = (self._endog - self.mu)**2 / self.family.variance(self.mu)\n chisq *= self._iweights * self._n_trials\n chisqsum = np.sum(chisq)\n return chisqsum\n\n @cached_data\n def fittedvalues(self):\n \"\"\"\n The estimated mean response.\n\n This is the value of the inverse of the link function at\n lin_pred, where lin_pred is the linear predicted value\n obtained by multiplying the design matrix by the coefficient\n vector.\n \"\"\"\n return self.mu\n\n @cached_data\n def mu(self):\n \"\"\"\n See GLM docstring.\n \"\"\"\n return self.model.predict(self.params)\n\n @cache_readonly\n def null(self):\n \"\"\"\n Fitted values of the null model\n \"\"\"\n endog = self._endog\n model = self.model\n exog = np.ones((len(endog), 1))\n\n kwargs = model._get_init_kwds()\n kwargs.pop('family')\n if hasattr(self, '_offset_exposure'):\n return GLM(endog, exog, family=self.family,\n **kwargs).fit().fittedvalues\n else:\n # correct if fitted is identical across observations\n wls_model = lm.WLS(endog, exog,\n weights=self._iweights * self._n_trials)\n return wls_model.fit().fittedvalues\n\n @cache_readonly\n def deviance(self):\n \"\"\"\n See statsmodels.families.family for the distribution-specific deviance\n functions.\n \"\"\"\n return self.family.deviance(self._endog, self.mu, self._var_weights,\n self._freq_weights)\n\n @cache_readonly\n def null_deviance(self):\n \"\"\"The value of the deviance function for the model fit with a constant\n as the only regressor.\"\"\"\n return self.family.deviance(self._endog, self.null, self._var_weights,\n self._freq_weights)\n\n @cache_readonly\n def llnull(self):\n \"\"\"\n Log-likelihood of the model fit with a constant as the only regressor\n \"\"\"\n return self.family.loglike(self._endog, self.null,\n var_weights=self._var_weights,\n freq_weights=self._freq_weights,\n scale=self.scale)\n\n @cached_value\n def llf(self):\n \"\"\"\n Value of the loglikelihood function evalued at params.\n See statsmodels.families.family for distribution-specific\n loglikelihoods.\n \"\"\"\n _modelfamily = self.family\n if (isinstance(self.family, families.Gaussian) and\n isinstance(self.family.link, families.links.Power) and\n (self.family.link.power == 1.)):\n scale = (np.power(self._endog - self.mu, 2) * self._iweights).sum()\n scale /= self.model.wnobs\n else:\n scale = self.scale\n val = _modelfamily.loglike(self._endog, self.mu,\n var_weights=self._var_weights,\n freq_weights=self._freq_weights,\n scale=scale)\n return val\n\n @cached_value\n def aic(self):\n \"\"\"\n Akaike Information Criterion\n -2 * `llf` + 2*(`df_model` + 1)\n \"\"\"\n return -2 * self.llf + 2 * (self.df_model + 1)\n\n @cached_value\n def bic(self):\n \"\"\"\n Bayes Information Criterion\n `deviance` - `df_resid` * log(`nobs`)\n \"\"\"\n return (self.deviance -\n (self.model.wnobs - self.df_model - 1) *\n np.log(self.model.wnobs))\n\n @Appender(pred.get_prediction_glm.__doc__)\n def get_prediction(self, exog=None, exposure=None, offset=None,\n transform=True, linear=False,\n row_labels=None):\n\n import statsmodels.regression._prediction as linpred\n\n pred_kwds = {'exposure': exposure, 'offset': offset, 'linear': True}\n\n # two calls to a get_prediction duplicates exog generation if patsy\n res_linpred = linpred.get_prediction(self, exog=exog,\n transform=transform,\n row_labels=row_labels,\n pred_kwds=pred_kwds)\n\n pred_kwds['linear'] = False\n res = pred.get_prediction_glm(self, exog=exog, transform=transform,\n row_labels=row_labels,\n linpred=res_linpred,\n link=self.model.family.link,\n pred_kwds=pred_kwds)\n\n return res\n\n def get_hat_matrix_diag(self, observed=True):\n \"\"\"\n Compute the diagonal of the hat matrix\n\n Parameters\n ----------\n observed : bool\n If true, then observed hessian is used in the hat matrix\n computation. If false, then the expected hessian is used.\n In the case of a canonical link function both are the same.\n\n Returns\n -------\n hat_matrix_diag : ndarray\n The diagonal of the hat matrix computed from the observed\n or expected hessian.\n \"\"\"\n weights = self.model.hessian_factor(self.params, observed=observed)\n wexog = np.sqrt(weights)[:, None] * self.model.exog\n\n hd = (wexog * np.linalg.pinv(wexog).T).sum(1)\n return hd\n\n def get_influence(self, observed=True):\n \"\"\"\n Get an instance of GLMInfluence with influence and outlier measures\n\n Parameters\n ----------\n observed : bool\n If true, then observed hessian is used in the hat matrix\n computation. If false, then the expected hessian is used.\n In the case of a canonical link function both are the same.\n\n Returns\n -------\n infl : GLMInfluence instance\n The instance has methods to calculate the main influence and\n outlier measures as attributes.\n\n See Also\n --------\n statsmodels.stats.outliers_influence.GLMInfluence\n \"\"\"\n from statsmodels.stats.outliers_influence import GLMInfluence\n\n weights = self.model.hessian_factor(self.params, observed=observed)\n weights_sqrt = np.sqrt(weights)\n wexog = weights_sqrt[:, None] * self.model.exog\n wendog = weights_sqrt * self.model.endog\n\n # using get_hat_matrix_diag has duplicated computation\n hat_matrix_diag = self.get_hat_matrix_diag(observed=observed)\n infl = GLMInfluence(self, endog=wendog, exog=wexog,\n resid=self.resid_pearson,\n hat_matrix_diag=hat_matrix_diag)\n return infl\n\n @Appender(base.LikelihoodModelResults.remove_data.__doc__)\n def remove_data(self):\n # GLM has alias/reference in result instance\n self._data_attr.extend([i for i in self.model._data_attr\n if '_data.' not in i])\n super(self.__class__, self).remove_data()\n\n # TODO: what are these in results?\n self._endog = None\n self._freq_weights = None\n self._var_weights = None\n self._iweights = None\n self._n_trials = None\n\n @Appender(_plot_added_variable_doc % {'extra_params_doc': ''})\n def plot_added_variable(self, focus_exog, resid_type=None,\n use_glm_weights=True, fit_kwargs=None,\n ax=None):\n\n from statsmodels.graphics.regressionplots import plot_added_variable\n\n fig = plot_added_variable(self, focus_exog,\n resid_type=resid_type,\n use_glm_weights=use_glm_weights,\n fit_kwargs=fit_kwargs, ax=ax)\n\n return fig\n\n @Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})\n def plot_partial_residuals(self, focus_exog, ax=None):\n\n from statsmodels.graphics.regressionplots import plot_partial_residuals\n\n return plot_partial_residuals(self, focus_exog, ax=ax)\n\n @Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})\n def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,\n ax=None):\n\n from statsmodels.graphics.regressionplots import plot_ceres_residuals\n\n return plot_ceres_residuals(self, focus_exog, frac,\n cond_means=cond_means, ax=ax)\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05):\n \"\"\"\n Summarize the Regression Results\n\n Parameters\n ----------\n yname : str, optional\n Default is `y`\n xname : list[str], optional\n Names for the exogenous variables, default is `var_#` for ## in\n the number of regressors. Must match the number of parameters in\n the model\n title : str, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary results\n \"\"\"\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Model Family:', [self.family.__class__.__name__]),\n ('Link Function:', [self.family.link.__class__.__name__]),\n ('Method:', [self.method]),\n ('Date:', None),\n ('Time:', None),\n ('No. Iterations:',\n [\"%d\" % self.fit_history['iteration']]),\n ]\n\n top_right = [('No. Observations:', None),\n ('Df Residuals:', None),\n ('Df Model:', None),\n ('Scale:', [\"%#8.5g\" % self.scale]),\n ('Log-Likelihood:', None),\n ('Deviance:', [\"%#8.5g\" % self.deviance]),\n ('Pearson chi2:', [\"%#6.3g\" % self.pearson_chi2])\n ]\n\n if hasattr(self, 'cov_type'):\n top_left.append(('Covariance Type:', [self.cov_type]))\n\n if title is None:\n title = \"Generalized Linear Model Regression Results\"\n\n # create summary tables\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n if hasattr(self, 'constraints'):\n smry.add_extra_txt(['Model has been estimated subject to linear '\n 'equality constraints.'])\n return smry\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental summary for regression Results\n\n Parameters\n ----------\n yname : str\n Name of the dependent variable (optional)\n xname : list[str], optional\n Names for the exogenous variables, default is `var_#` for ## in\n the number of regressors. Must match the number of parameters in\n the model\n title : str, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format : str\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary2.Summary : class to hold summary results\n \"\"\"\n self.method = 'IRLS'\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n if hasattr(self, 'constraints'):\n smry.add_text('Model has been estimated subject to linear '\n 'equality constraints.')\n\n return smry\n\n\nclass GLMResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\n 'resid_anscombe': 'rows',\n 'resid_deviance': 'rows',\n 'resid_pearson': 'rows',\n 'resid_response': 'rows',\n 'resid_working': 'rows'\n }\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\n\n\nwrap.populate_wrapper(GLMResultsWrapper, GLMResults)\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n data = sm.datasets.longley.load(as_pandas=False)\n # data.exog = add_constant(data.exog)\n GLMmod = GLM(data.endog, data.exog).fit()\n GLMT = GLMmod.summary(returns='tables')\n # GLMT[0].extend_right(GLMT[1])\n # print(GLMT[0])\n # print(GLMT[2])\n GLMTp = GLMmod.summary(title='Test GLM')\n \"\"\"\nFrom Stata\n. webuse beetle\n. glm r i.beetle ldose, family(binomial n) link(cloglog)\n\nIteration 0: log likelihood = -79.012269\nIteration 1: log likelihood = -76.94951\nIteration 2: log likelihood = -76.945645\nIteration 3: log likelihood = -76.945645\n\nGeneralized linear models No. of obs = 24\nOptimization : ML Residual df = 20\n Scale parameter = 1\nDeviance = 73.76505595 (1/df) Deviance = 3.688253\nPearson = 71.8901173 (1/df) Pearson = 3.594506\n\nVariance function: V(u) = u*(1-u/n) [Binomial]\nLink function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]\n\n AIC = 6.74547\nLog likelihood = -76.94564525 BIC = 10.20398\n\n------------------------------------------------------------------------------\n | OIM\n r | Coef. Std. Err. z P>|z| [95% Conf. Interval]\n-------------+----------------------------------------------------------------\n beetle |\n 2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783\n 3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867\n |\n ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658\n _cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116\n------------------------------------------------------------------------------\n\"\"\"\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.linalg.matrix_rank", "numpy.asarray", "scipy.stats.distributions.poisson", "numpy.exp", "scipy.stats.distributions.binom", "scipy.stats.distributions.gamma", "numpy.allclose", "numpy.empty_like", "numpy.column_stack", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.power", "numpy.isnan", "scipy.optimize.minimize", "numpy.array", "numpy.sum", "numpy.linalg.solve", "numpy.ones", "numpy.linalg.pinv", "numpy.shape", "scipy.optimize.brentq", "scipy.stats.chi2.sf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SepandKashani/audio_tools
[ "e606e4e05ef571968ccd862a1f65698c3eb9f78c" ]
[ "audio_tools/network.py" ]
[ "import itertools\nimport pickle\nimport queue\nimport socket\nimport threading\nimport time\n\nimport numpy as np\n\nimport audio_tools.interface as ati\nimport audio_tools.util.interop as atui\n\n\nclass PacketServer(ati.PacketStream):\n \"\"\"\n Route packets from a PacketStream to several PacketClients through a TCP interface.\n\n This class is designed to dispatch available packets to each destination as soon as possible.\n\n Architecture\n ------------\n PacketServer is multi-threaded to efficiently connect the local PacketStream to remote\n PacketClients via:\n * A NetworkListener process which monitors all incoming connection requests;\n * A LocalStreamer process which fan-outs local packets to NetworkStreamer processes; and\n * NetworkStreamer processes (one per PacketClient) which transmit local packets to remote consumers.\n\n When receiving a new connection request, the NetworkListener spawns a new NetworkStreamer to\n handle the connection. Shared ressources are accessed through a lock when required.\n\n Note\n ----\n * PacketServer.stop() is not instantaneous due to internal timeouts to avoid thread deadlocks.\n * Some NetworkStreamers may not end after calling PacketServer.stop(). This can happen if the\n remote connection was not made through a PacketClient. This is a non-issue: the OS will\n force-close such zombie connections after a while.\n \"\"\"\n\n def __init__(self, stream: ati.PacketStream, port: int):\n \"\"\"\n Parameters\n ----------\n stream: PacketStream\n Packet source.\n port: int\n TCP listening port.\n \"\"\"\n super().__init__(dtype=None, sample_rate=None) # not yet known\n self._stream = stream\n self._port = port\n\n self._thread = {} # dict[int, threading.Thread]\n self._pkt_q = {} # dict[int, queue.Queue]\n self._cid = itertools.count(start=2)\n self._q_lck = threading.Lock() # thread synchronization to modify [_thread, _pkt_q, _cid]\n self._active = threading.Event() # thread start/stop notification\n\n def start(self):\n if self.active():\n pass\n else:\n if not self._stream.active(): # initiate acquisition\n self._stream.start()\n self._dtype = self._stream.dtype\n self._sample_rate = self._stream.sample_rate\n self._active.set()\n\n # create/launch non-NetworkStreamer threads\n self._thread.update(\n {\n 0: PacketServer._LocalStreamer(self),\n 1: PacketServer._NetworkListener(self),\n }\n )\n for t_id in {0, 1}:\n self._thread[t_id].start()\n\n def stop(self):\n self._stream.stop() # stop acquisition\n self._stream.clear()\n\n self._active.clear() # gracefully terminate threads\n pkt_size, *_ = self.dtype[\"data\"].shape # smpl/pkt\n pkt_rate = self.sample_rate / pkt_size # pkt/s\n for t in list(self._thread.values()): # guarantees static dict-view.\n # The only threads which may hang are NetworkStreamers. (See Notes above.)\n # Wait a reasonable amount of time for most lingering packets to be sent.\n t.join(timeout=30 / pkt_rate)\n with self._q_lck: # Discard all state related to dead threads (if any).\n for t_id in [_[0] for _ in self._thread.items() if not _[1].is_alive()]:\n self._thread.pop(t_id, None)\n self._pkt_q.pop(t_id, None)\n\n def get(self) -> np.ndarray:\n raise NotImplementedError(\"Operation not supported.\")\n\n def clear(self):\n raise NotImplementedError(\"Operation not supported.\")\n\n def __len__(self) -> int:\n raise NotImplementedError(\"Operation not supported.\")\n\n def active(self) -> bool:\n return self._active.is_set()\n\n class _LocalStreamer(threading.Thread):\n def __init__(self, srvr: \"PacketServer\"):\n super().__init__()\n self._srvr = srvr\n\n def run(self):\n pkt_size, *_ = self._srvr.dtype[\"data\"].shape # smpl/pkt\n pkt_rate = self._srvr.sample_rate / pkt_size # pkt/s\n stream = self._srvr._stream\n\n while self._srvr.active():\n if len(stream) > 0: # required to avoid PacketStream.get()'s blocking behaviour.\n pkt = stream.get()\n for q in self._srvr._pkt_q.values():\n # Note: if no client available, then stream packets are 'lost'. This is\n # intended behaviour.\n q.put(pkt)\n else:\n # No data available at source, but new packet imminent -> stall briefly.\n time.sleep(0.5 / pkt_rate)\n\n class _NetworkStreamer(threading.Thread):\n def __init__(self, srvr: \"PacketServer\", skt: socket.socket, cid: int):\n super().__init__()\n self._srvr = srvr\n self._skt = skt\n self._cid = cid\n\n def run(self):\n try:\n self._send_header()\n self._stream_audio()\n except Exception:\n pass\n finally:\n self._cleanup()\n\n def _send_header(self):\n # Transmit all metadata required for PacketClient to decode audio packets. The header is\n # pickle-encoded, prepended with a >u4 describing its length in bytes. We use pickle in\n # place of JSON since dtype descriptors are sensitive to the sequence type used.\n metadata = pickle.dumps(\n {\n \"sample_rate\": self._srvr.sample_rate,\n \"dtype_descr\": atui.dtype_to_descr(self._srvr.dtype),\n },\n protocol=4,\n )\n header = len(metadata).to_bytes(4, byteorder=\"big\", signed=False) + metadata\n self._skt.sendall(header)\n\n def _stream_audio(self):\n q = self._srvr._pkt_q[self._cid]\n pkt_size, *_ = self._srvr.dtype[\"data\"].shape # smpl/pkt\n pkt_rate = self._srvr.sample_rate / pkt_size # pkt/s\n\n skt_alive = True\n while self._srvr.active() and skt_alive:\n try:\n pkt = q.get(timeout=10 / pkt_rate)\n q.task_done()\n self._skt.sendall(bytes(pkt))\n except queue.Empty:\n # No packet was in the queue despite waiting significantly longer than the\n # packet arrival rate. Delay reason unknown, but the thread should still run\n # because PacketClient.get() is designed to block until a packet is available.\n # The thread should only be taken down if:\n # * PacketServer.stop() is called;\n # * the socket is broken/closed. (Ex: PacketClient.stop() called.)\n pass\n except OSError:\n # Something went wrong with sendall(). Whatever the cause, the socket is no\n # longer in a useable state -> end connection.\n skt_alive = False\n\n def _cleanup(self): # cleanup connection + shared resources\n with self._srvr._q_lck:\n self._srvr._thread.pop(self._cid, None)\n\n q = self._srvr._pkt_q.pop(self._cid, None)\n if q is not None:\n try:\n while True:\n q.get(block=False)\n q.task_done()\n except queue.Empty:\n pass\n self._skt.close()\n\n class _NetworkListener(threading.Thread):\n def __init__(self, srvr: \"PacketServer\"):\n super().__init__()\n self._srvr = srvr\n\n def run(self):\n with socket.socket() as skt:\n skt.bind((\"\", self._srvr._port))\n skt.listen(5)\n\n # Activate non-blocking mode. (Required to avoid socket.accept() freeze.)\n # A conservative timeout period is chosen to avoid wasting resources here.\n pkt_size, *_ = self._srvr.dtype[\"data\"].shape # smpl/pkt\n pkt_rate = self._srvr.sample_rate / pkt_size # pkt/s\n skt.settimeout(20 / pkt_rate)\n\n while self._srvr.active():\n try:\n conn, addr = skt.accept()\n with self._srvr._q_lck:\n cid = next(self._srvr._cid)\n t = PacketServer._NetworkStreamer(self._srvr, conn, cid)\n\n self._srvr._pkt_q[cid] = queue.Queue()\n self._srvr._thread[cid] = t\n t.start()\n except OSError:\n pass # skt.accept() just timed-out -> not a problem.\n\n\nclass PacketClient(ati.PacketStream):\n \"\"\"\n Get packets from a PacketServer.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\"\n Parameters\n ----------\n host: str\n PacketServer hostname/IP-address.\n port: int\n PacketServer port.\n \"\"\"\n super().__init__(dtype=None, sample_rate=None) # not yet known\n self._host = host\n self._port = port\n\n self._thread = None\n self._pkt_q = queue.Queue()\n self._active = threading.Event() # thread start/stop synchronization\n\n def start(self):\n if self.active():\n pass\n else:\n conn = socket.socket()\n conn.connect((self._host, self._port))\n\n self._active.set()\n self._thread = PacketClient._NetworkStreamer(self, conn)\n self._thread.start()\n\n # PacketStream interface constraint: stall until dtype/sample_rate known\n while any([self.dtype is None, self.sample_rate is None]):\n time.sleep(5e-3)\n\n def stop(self):\n self._active.clear() # gracefully terminate acquisition\n\n def get(self) -> np.ndarray:\n return self._pkt_q.get()\n\n def clear(self):\n try:\n while True:\n self._pkt_q.get(block=False)\n self._pkt_q.task_done()\n except queue.Empty:\n pass\n\n def __len__(self) -> int:\n return self._pkt_q.qsize()\n\n def active(self) -> bool:\n return self._active.is_set()\n\n class _NetworkStreamer(threading.Thread):\n def __init__(self, client: \"PacketClient\", skt: socket.socket):\n super().__init__()\n self._client = client\n self._skt = skt\n\n def run(self):\n try:\n self._decode_header()\n self._stream_audio()\n except Exception:\n pass\n finally:\n self._cleanup()\n\n def _decode_header(self):\n data = b\"\"\n N_left = lambda: 4 - len(data)\n while N_left() > 0:\n data += self._skt.recv(N_left())\n N_byte = int.from_bytes(data, byteorder=\"big\", signed=False)\n\n data = b\"\"\n N_left = lambda: N_byte - len(data)\n while N_left() > 0:\n data += self._skt.recv(N_left())\n metadata = pickle.loads(data)\n\n # Set stream properties\n self._client._dtype = atui.descr_to_dtype(metadata[\"dtype_descr\"])\n self._client._sample_rate = metadata[\"sample_rate\"]\n\n def _stream_audio(self):\n skt_alive = True\n while self._client.active() and skt_alive:\n pkt = self._channel2pkt()\n if pkt is not None:\n self._client._pkt_q.put(pkt.copy())\n else:\n # connection closed -> stop acquisition\n skt_alive = False\n\n def _channel2pkt(self) -> np.ndarray:\n # Obtain a full packet from the channel, or None if infeasible.\n data = b\"\"\n N_left = lambda: self._client.dtype.itemsize - len(data)\n while True:\n d = self._skt.recv(N_left())\n data += d\n if N_left() == 0:\n pkt = np.frombuffer(data, dtype=self._client.dtype)\n return pkt\n elif len(d) == 0:\n return None\n\n def _cleanup(self):\n self._skt.close()\n\n # reset state to that after PacketClient.__init__().\n self._client._active.clear()\n self._client._dtype = None\n self._client._sample_rate = None\n self._client._thread = None\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spsancti/albumentations
[ "dd0c5dbf1b28d7e78f1fbd93139089e04144cfd3" ]
[ "albumentations/augmentations/bbox_utils.py" ]
[ "from __future__ import division\nfrom albumentations.core.utils import DataProcessor\n\nimport numpy as np\n\n__all__ = [\n \"normalize_bbox\",\n \"denormalize_bbox\",\n \"normalize_bboxes\",\n \"denormalize_bboxes\",\n \"calculate_bbox_area\",\n \"filter_bboxes_by_visibility\",\n \"convert_bbox_to_albumentations\",\n \"convert_bbox_from_albumentations\",\n \"convert_bboxes_to_albumentations\",\n \"convert_bboxes_from_albumentations\",\n \"BboxProcessor\",\n]\n\n\nclass BboxProcessor(DataProcessor):\n @property\n def default_data_name(self):\n return \"bboxes\"\n\n def ensure_data_valid(self, data):\n for data_name in self.data_fields:\n data_exists = data_name in data and len(data[data_name])\n if data_exists and len(data[data_name][0]) < 5:\n if self.params.label_fields is None:\n raise ValueError(\n \"Please specify 'label_fields' in 'bbox_params' or add labels to the end of bbox \"\n \"because bboxes must have labels\"\n )\n if self.params.label_fields:\n if not all(i in data.keys() for i in self.params.label_fields):\n raise ValueError(\"Your 'label_fields' are not valid - them must have same names as params in dict\")\n\n def filter(self, data, rows, cols):\n return filter_bboxes(\n data, rows, cols, min_area=self.params.min_area, min_visibility=self.params.min_visibility\n )\n\n def check(self, data, rows, cols):\n return check_bboxes(data)\n\n def convert_from_albumentations(self, data, rows, cols):\n return convert_bboxes_from_albumentations(data, self.params.format, rows, cols, check_validity=True)\n\n def convert_to_albumentations(self, data, rows, cols):\n return convert_bboxes_to_albumentations(data, self.params.format, rows, cols, check_validity=True)\n\n\ndef normalize_bbox(bbox, rows, cols):\n \"\"\"Normalize coordinates of a bounding box. Divide x-coordinates by image width and y-coordinates\n by image height.\n\n Args:\n bbox (tuple): Denormalized bounding box `(x_min, y_min, x_max, y_max)`.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n tuple: Normalized bounding box `(x_min, y_min, x_max, y_max)`.\n\n Raises:\n ValueError: If rows or cols is less or equal zero\n\n \"\"\"\n (x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])\n\n if rows <= 0:\n raise ValueError(\"Argument rows must be positive integer\")\n if cols <= 0:\n raise ValueError(\"Argument cols must be positive integer\")\n\n x_min, x_max = x_min / cols, x_max / cols\n y_min, y_max = y_min / rows, y_max / rows\n\n return (x_min, y_min, x_max, y_max) + tail\n\n\ndef denormalize_bbox(bbox, rows, cols):\n \"\"\"Denormalize coordinates of a bounding box. Multiply x-coordinates by image width and y-coordinates\n by image height. This is an inverse operation for :func:`~albumentations.augmentations.bbox.normalize_bbox`.\n\n Args:\n bbox (tuple): Normalized bounding box `(x_min, y_min, x_max, y_max)`.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n tuple: Denormalized bounding box `(x_min, y_min, x_max, y_max)`.\n\n Raises:\n ValueError: If rows or cols is less or equal zero\n\n \"\"\"\n (x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])\n\n if rows <= 0:\n raise ValueError(\"Argument rows must be positive integer\")\n if cols <= 0:\n raise ValueError(\"Argument cols must be positive integer\")\n\n x_min, x_max = x_min * cols, x_max * cols\n y_min, y_max = y_min * rows, y_max * rows\n\n return (x_min, y_min, x_max, y_max) + tail\n\n\ndef normalize_bboxes(bboxes, rows, cols):\n \"\"\"Normalize a list of bounding boxes.\n\n Args:\n bboxes (List[tuple]): Denormalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n List[tuple]: Normalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n\n \"\"\"\n return [normalize_bbox(bbox, rows, cols) for bbox in bboxes]\n\n\ndef denormalize_bboxes(bboxes, rows, cols):\n \"\"\"Denormalize a list of bounding boxes.\n\n Args:\n bboxes (List[tuple]): Normalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n List[tuple]: Denormalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n\n \"\"\"\n return [denormalize_bbox(bbox, rows, cols) for bbox in bboxes]\n\n\ndef calculate_bbox_area(bbox, rows, cols):\n \"\"\"Calculate the area of a bounding box in pixels.\n\n Args:\n bbox (tuple): A bounding box `(x_min, y_min, x_max, y_max)`.\n rows (int): Image height.\n cols (int): Image width.\n\n Return:\n int: Area of a bounding box in pixels.\n\n \"\"\"\n bbox = denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox[:4]\n area = (x_max - x_min) * (y_max - y_min)\n return area\n\n\ndef filter_bboxes_by_visibility(\n original_shape, bboxes, transformed_shape, transformed_bboxes, threshold=0.0, min_area=0.0\n):\n \"\"\"Filter bounding boxes and return only those boxes whose visibility after transformation is above\n the threshold and minimal area of bounding box in pixels is more then min_area.\n\n Args:\n original_shape (tuple): Original image shape `(height, width)`.\n bboxes (List[tuple]): Original bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n transformed_shape (tuple): Transformed image shape `(height, width)`.\n transformed_bboxes (List[tuple]): Transformed bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n threshold (float): visibility threshold. Should be a value in the range [0.0, 1.0].\n min_area (float): Minimal area threshold.\n\n Returns:\n List[tuple]: Filtered bounding boxes `[(x_min, y_min, x_max, y_max)]`.\n\n \"\"\"\n img_height, img_width = original_shape[:2]\n transformed_img_height, transformed_img_width = transformed_shape[:2]\n\n visible_bboxes = []\n for bbox, transformed_bbox in zip(bboxes, transformed_bboxes):\n if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]):\n continue\n bbox_area = calculate_bbox_area(bbox, img_height, img_width)\n transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)\n if transformed_bbox_area < min_area:\n continue\n visibility = transformed_bbox_area / bbox_area\n if visibility >= threshold:\n visible_bboxes.append(transformed_bbox)\n return visible_bboxes\n\n\ndef convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity=False):\n \"\"\"Convert a bounding box from a format specified in `source_format` to the format used by albumentations:\n normalized coordinates of top-left and bottom-right corners of the bounding box in a form of\n `(x_min, y_min, x_max, y_max)` e.g. `(0.15, 0.27, 0.67, 0.5)`.\n\n Args:\n bbox (tuple): A bounding box tuple.\n source_format (str): format of the bounding box. Should be 'coco', 'pascal_voc', or 'yolo'.\n check_validity (bool): Check if all boxes are valid boxes.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n tuple: A bounding box `(x_min, y_min, x_max, y_max)`.\n\n Note:\n The `coco` format of a bounding box looks like `(x_min, y_min, width, height)`, e.g. (97, 12, 150, 200).\n The `pascal_voc` format of a bounding box looks like `(x_min, y_min, x_max, y_max)`, e.g. (97, 12, 247, 212).\n The `yolo` format of a bounding box looks like `(x, y, width, height)`, e.g. (0.3, 0.1, 0.05, 0.07);\n where `x`, `y` coordinates of the center of the box, all values normalized to 1 by image height and width.\n\n Raises:\n ValueError: if `target_format` is not equal to `coco` or `pascal_voc`, ot `yolo`.\n ValueError: If in YOLO format all labels not in range (0, 1).\n\n \"\"\"\n if source_format not in {\"coco\", \"pascal_voc\", \"yolo\"}:\n raise ValueError(\n \"Unknown source_format {}. Supported formats are: 'coco', 'pascal_voc' and 'yolo'\".format(source_format)\n )\n if isinstance(bbox, np.ndarray):\n bbox = bbox.tolist()\n\n if source_format == \"coco\":\n (x_min, y_min, width, height), tail = bbox[:4], tuple(bbox[4:])\n x_max = x_min + width\n y_max = y_min + height\n elif source_format == \"yolo\":\n # https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/scripts/voc_label.py#L12\n bbox, tail = bbox[:4], tuple(bbox[4:])\n _bbox = np.array(bbox[:4])\n if check_validity and np.any((_bbox <= 0) | (_bbox > 1)):\n raise ValueError(\"In YOLO format all coordinates must be float and in range (0, 1]\")\n\n x, y, w, h = bbox\n\n w_half, h_half = w / 2, h / 2\n x_min = x - w_half\n y_min = y - h_half\n x_max = x_min + w\n y_max = y_min + h\n else:\n (x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])\n\n bbox = (x_min, y_min, x_max, y_max) + tail\n\n if source_format != \"yolo\":\n bbox = normalize_bbox(bbox, rows, cols)\n if check_validity:\n check_bbox(bbox)\n return bbox\n\n\ndef convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity=False):\n \"\"\"Convert a bounding box from the format used by albumentations to a format, specified in `target_format`.\n\n Args:\n bbox (tuple): An albumentation bounding box `(x_min, y_min, x_max, y_max)`.\n target_format (str): required format of the output bounding box. Should be 'coco', 'pascal_voc' or 'yolo'.\n rows (int): Image height.\n cols (int): Image width.\n check_validity (bool): Check if all boxes are valid boxes.\n\n Returns:\n tuple: A bounding box.\n\n Note:\n The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200].\n The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212].\n The `yolo` format of a bounding box looks like `[x, y, width, height]`, e.g. [0.3, 0.1, 0.05, 0.07].\n\n Raises:\n ValueError: if `target_format` is not equal to `coco`, `pascal_voc` or `yolo`.\n\n \"\"\"\n if target_format not in {\"coco\", \"pascal_voc\", \"yolo\"}:\n raise ValueError(\n \"Unknown target_format {}. Supported formats are: 'coco', 'pascal_voc' and 'yolo'\".format(target_format)\n )\n if check_validity:\n check_bbox(bbox)\n\n if target_format != \"yolo\":\n bbox = denormalize_bbox(bbox, rows, cols)\n if target_format == \"coco\":\n (x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])\n width = x_max - x_min\n height = y_max - y_min\n bbox = (x_min, y_min, width, height) + tail\n elif target_format == \"yolo\":\n (x_min, y_min, x_max, y_max), tail = bbox[:4], bbox[4:]\n x = (x_min + x_max) / 2.0\n y = (y_min + y_max) / 2.0\n w = x_max - x_min\n h = y_max - y_min\n bbox = (x, y, w, h) + tail\n return bbox\n\n\ndef convert_bboxes_to_albumentations(bboxes, source_format, rows, cols, check_validity=False):\n \"\"\"Convert a list bounding boxes from a format specified in `source_format` to the format used by albumentations\"\"\"\n return [convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity) for bbox in bboxes]\n\n\ndef convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False):\n \"\"\"Convert a list of bounding boxes from the format used by albumentations to a format, specified\n in `target_format`.\n\n Args:\n bboxes (List[tuple]): List of albumentation bounding box `(x_min, y_min, x_max, y_max)`.\n target_format (str): required format of the output bounding box. Should be 'coco', 'pascal_voc' or 'yolo'.\n rows (int): Image height.\n cols (int): Image width.\n check_validity (bool): Check if all boxes are valid boxes.\n\n Returns:\n list[tuple]: List of bounding box.\n\n \"\"\"\n return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]\n\n\ndef check_bbox(bbox):\n \"\"\"Check if bbox boundaries are in range 0, 1 and minimums are lesser then maximums\"\"\"\n for name, value in zip([\"x_min\", \"y_min\", \"x_max\", \"y_max\"], bbox[:4]):\n if not 0 <= value <= 1 and not np.isclose(value, 0) and not np.isclose(value, 1):\n raise ValueError(\n \"Expected {name} for bbox {bbox} \"\n \"to be in the range [0.0, 1.0], got {value}.\".format(bbox=bbox, name=name, value=value)\n )\n x_min, y_min, x_max, y_max = bbox[:4]\n if x_max <= x_min:\n raise ValueError(\"x_max is less than or equal to x_min for bbox {bbox}.\".format(bbox=bbox))\n if y_max <= y_min:\n raise ValueError(\"y_max is less than or equal to y_min for bbox {bbox}.\".format(bbox=bbox))\n\n\ndef check_bboxes(bboxes):\n \"\"\"Check if bboxes boundaries are in range 0, 1 and minimums are lesser then maximums\"\"\"\n for bbox in bboxes:\n check_bbox(bbox)\n\n\ndef filter_bboxes(bboxes, rows, cols, min_area=0.0, min_visibility=0.0):\n \"\"\"Remove bounding boxes that either lie outside of the visible area by more then min_visibility\n or whose area in pixels is under the threshold set by `min_area`. Also it crops boxes to final image size.\n\n Args:\n bboxes (List[tuple]): List of albumentation bounding box `(x_min, y_min, x_max, y_max)`.\n rows (int): Image height.\n cols (int): Image width.\n min_area (float): Minimum area of a bounding box. All bounding boxes whose visible area in pixels.\n is less than this value will be removed. Default: 0.0.\n min_visibility (float): Minimum fraction of area for a bounding box to remain this box in list. Default: 0.0.\n\n Returns:\n List[tuple]: List of bounding box.\n\n \"\"\"\n resulting_boxes = []\n for bbox in bboxes:\n transformed_box_area = calculate_bbox_area(bbox, rows, cols)\n bbox, tail = tuple(np.clip(bbox[:4], 0, 1.0)), tuple(bbox[4:])\n clipped_box_area = calculate_bbox_area(bbox, rows, cols)\n if not transformed_box_area or clipped_box_area / transformed_box_area <= min_visibility:\n continue\n else:\n bbox = tuple(np.clip(bbox[:4], 0, 1.0))\n if calculate_bbox_area(bbox, rows, cols) <= min_area:\n continue\n resulting_boxes.append(bbox + tail)\n return resulting_boxes\n\n\ndef union_of_bboxes(height, width, bboxes, erosion_rate=0.0):\n \"\"\"Calculate union of bounding boxes.\n\n Args:\n height (float): Height of image or space.\n width (float): Width of image or space.\n bboxes (List[tuple]): List like bounding boxes. Format is `[(x_min, y_min, x_max, y_max)]`.\n erosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping.\n Set this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume.\n\n Returns:\n tuple: A bounding box `(x_min, y_min, x_max, y_max)`.\n\n \"\"\"\n x1, y1 = width, height\n x2, y2 = 0, 0\n for bbox in bboxes:\n x_min, y_min, x_max, y_max = bbox[:4]\n w, h = x_max - x_min, y_max - y_min\n lim_x1, lim_y1 = x_min + erosion_rate * w, y_min + erosion_rate * h\n lim_x2, lim_y2 = x_max - erosion_rate * w, y_max - erosion_rate * h\n x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1])\n x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2])\n return x1, y1, x2, y2\n" ]
[ [ "numpy.min", "numpy.clip", "numpy.max", "numpy.any", "numpy.array", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rafaelols/airflow
[ "8e4af5fb576a9568af476c0607819649b724adea" ]
[ "dags/treinos_igti/treino05.py" ]
[ "from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport random\nimport zipfile\nimport os\nimport psycopg2\nfrom sqlalchemy import create_engine\nimport json\n\n\n\n# ctes\ndata_path = '/usr/local/airflow/data/microdados_enade_2019/2019/3.DADOS/'\narquivo = data_path + 'microdados_enade_2019.txt'\n\n# Default args definition\ndefault_args = {\n 'owner': 'Rafael',\n 'depends_on_past': False,\n 'start_date': datetime(2020, 12, 1, 21, 50),\n 'email': ['[email protected]', '[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n# 'retries': 1,\n# 'Retry_delay': timedelta(minutes=1)\n}\n\n# Dag definition\ndag = DAG(\n 'treino-05',\n description=\"Escrever em DW\",\n default_args = default_args,\n schedule_interval=None\n)\n\nstart_preprocessing = BashOperator(\n task_id='start_preprocessing',\n bash_command='echo \"Starting Preprocessing\"',\n dag=dag\n)\n\nbash_command = \"\"\"\nif ! [ -f /usr/local/airflow/data/microdados_enade_2019.zip ]; then \n curl http://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip -o /usr/local/airflow/data/microdados_enade_2019.zip\nfi\n\"\"\"\nget_data = BashOperator(\n task_id='get-data',\n bash_command=bash_command,\n dag=dag\n)\n\ndef unzip_file():\n if not os.path.exists('/usr/local/airflow/data/microdados_enade_2019/'):\n with zipfile.ZipFile('/usr/local/airflow/data/microdados_enade_2019.zip', 'r') as zipped:\n zipped.extractall('/usr/local/airflow/data')\n\nunzip_data = PythonOperator(\n task_id='unzip_data',\n python_callable=unzip_file,\n dag=dag\n)\n\ndef aplica_filtros():\n cols = ['CO_GRUPO', 'TP_SEXO', 'NU_IDADE', 'NT_GER', 'NT_FG', 'NT_CE',\n 'QE_I01', 'QE_I02', 'QE_I04', 'QE_I05', 'QE_I08']\n enade = pd.read_csv(arquivo, sep=';', decimal=',', usecols=cols)\n enade = enade.loc[\n (enade.NU_IDADE > 20) &\n (enade.NU_IDADE < 40) &\n (enade.NT_GER > 0)\n ]\n enade.to_csv(data_path + 'enade_filtrado.csv', index=False)\n\ntask_aplica_filtro = PythonOperator(\n task_id='aplica_filtro',\n python_callable=aplica_filtros,\n dag=dag\n)\n\n# Idade centralizada na média\n# Idade centralizada ao quadrado\n\ndef constroi_idade_centralizada():\n idade = pd.read_csv(data_path + 'enade_filtrado.csv', usecols=['NU_IDADE'])\n idade['idadecent'] = idade.NU_IDADE - idade.NU_IDADE.mean()\n idade[['idadecent']].to_csv(data_path + 'idadecent.csv', index=False)\n\ndef constroi_idade_cent_quad():\n idadecent = pd.read_csv(data_path + 'idadecent.csv')\n idadecent['idade2'] = idadecent.idadecent ** 2\n idadecent[['idade2']].to_csv(data_path + 'idadequadrado.csv', index=False)\n\ntask_idade_cent = PythonOperator(\n task_id='constroi_idade_centralizada',\n python_callable=constroi_idade_centralizada,\n dag=dag\n)\n\ntask_idade_quad = PythonOperator(\n task_id='constroi_idade_ao_quadrado',\n python_callable=constroi_idade_cent_quad,\n dag=dag\n)\n\ndef constroi_est_civil():\n filtro = pd.read_csv(data_path + 'enade_filtrado.csv', usecols=['QE_I01'])\n filtro['estcivil'] = filtro.QE_I01.replace({\n 'A': 'Solteiro',\n 'B': 'Casado',\n 'C': 'Separado',\n 'D': 'Viuvo',\n 'E': 'Outro'\n })\n filtro[['estcivil']].to_csv(data_path + 'estcivil.csv', index=False)\n\ntask_est_civil = PythonOperator(\n task_id = 'constroi_est_civil',\n python_callable=constroi_est_civil,\n dag=dag\n)\n\ndef constroi_cor():\n filtro = pd.read_csv(data_path + 'enade_filtrado.csv', usecols=['QE_I02'])\n filtro['cor'] = filtro.QE_I02.replace({\n 'A': 'Branca',\n 'B': 'Preta',\n 'C': 'Amarela',\n 'D': 'Parda',\n 'E': 'Indígena',\n 'F': '',\n ' ': ''\n })\n filtro[['cor']].to_csv(data_path + 'cor.csv', index=False)\n\n\ntask_cor = PythonOperator(\n task_id='constroi_cor_da_pele',\n python_callable=constroi_cor,\n dag=dag\n)\n\n# Task de JOIN\ndef join_data():\n filtro = pd.read_csv(data_path + 'enade_filtrado.csv')\n idadecent = pd.read_csv(data_path + 'idadecent.csv')\n idadeaoquadrado = pd.read_csv(data_path + 'idadequadrado.csv')\n estcivil = pd.read_csv(data_path + 'estcivil.csv')\n cor = pd.read_csv(data_path + 'cor.csv')\n\n final = pd.concat([\n filtro, idadecent, idadeaoquadrado, estcivil, cor\n ], axis=1)\n\n final.to_csv(data_path + 'enade_tratado.csv', index=False)\n print(final)\n\ntask_join = PythonOperator(\n task_id = 'join_data',\n python_callable=join_data,\n dag=dag\n)\n\ndef escreve_dw():\n # vaccess\n with open('/usr/local/airflow/data/vaccess_pg.txt') as access_file:\n vaccess = json.load(access_file)\n\n final = pd.read_csv(data_path + 'enade_tratado.csv')\n conn_str = 'postgresql+psycopg2://'+vaccess['user']+':'+vaccess['password']+'@'+vaccess['host:port']+'/'+vaccess['dbname']\n engine_pg = create_engine(conn_str)\n final.to_sql(\n 'tratado', \n con=engine_pg, \n index=False, \n if_exists='append'\n )\n\n\ntask_escreve_dw = PythonOperator(\n task_id = 'escreve_dw',\n python_callable=escreve_dw,\n dag=dag\n)\n\nstart_preprocessing >> get_data >> unzip_data >> task_aplica_filtro\ntask_aplica_filtro >> [task_idade_cent, task_est_civil, task_cor]\n\ntask_idade_quad.set_upstream(task_idade_cent)\n\ntask_join.set_upstream([\n task_est_civil, task_cor, task_idade_quad\n])\n\ntask_join >> task_escreve_dw" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Jacarlianda/FinMind
[ "181e478727c7cda498da2b42495e2a6fea9688e3" ]
[ "BackTesting/demo.py" ]
[ "import pandas as pd\nimport requests\n\n\n# class name,必須跟檔案名一致,例如 class demo,檔名也是 demo.py\nclass demo:\n def __init__(self,\n stock_price,\n **kwargs, ):\n # -------------------------------------------------------------------\n # 此區塊請勿更動\n stock_price = stock_price.sort_values('date')\n # 股價\n self.stock_price = stock_price\n # 融資融券\n self.MarginPurchaseShortSale = kwargs.get(\"MarginPurchaseShortSale\", pd.DataFrame())\n # 三大法人買賣\n self.InstitutionalInvestorsBuySell = kwargs.get(\"InstitutionalInvestorsBuySell\", pd.DataFrame())\n # 外資持股\n self.Shareholding = kwargs.get(\"Shareholding\", pd.DataFrame())\n # 此區塊請勿更動\n # -------------------------------------------------------------------\n\n def trade(self, date):\n ''' \n 此區塊,可進行資料處理、做技術指標,寫自己的策略,\n 寫你自己的策略, 必須 return : 1 (買) or -1 (賣) or 0 (不操作)\n 根據時間date,回傳當下要進行什麼操作 ( 買/賣/不操作 )\n '''\n # example\n from random import randint\n\n x = randint(1, 10)\n x = x % 3\n if x == 1:\n return 1\n elif x == 2:\n return -1\n elif x == 0:\n return 0\n\n\ndef test():\n '''\n 測試\n '''\n stock_id = '2330'\n date = '2018-01-01'\n\n url = 'http://finmindapi.servebeer.com/api/data'\n form_data = {'dataset': 'TaiwanStockPrice',\n 'stock_id': stock_id,\n 'date': date}\n\n res = requests.post(url, verify=True, data=form_data)\n\n temp = res.json()\n stock_price = pd.DataFrame(temp['data'])\n\n form_data = {'dataset': 'TaiwanStockMarginPurchaseShortSale',\n 'stock_id': stock_id,\n 'date': date}\n res = requests.post(\n url, verify=True,\n data=form_data)\n\n temp = res.json()\n MarginPurchaseShortSale = pd.DataFrame(temp['data'])\n\n form_data = {'dataset': 'InstitutionalInvestorsBuySell',\n 'stock_id': stock_id,\n 'date': date}\n res = requests.post(\n url, verify=True,\n data=form_data)\n\n temp = res.json()\n InstitutionalInvestorsBuySell = pd.DataFrame(temp['data'])\n\n form_data = {'dataset': 'Shareholding',\n 'stock_id': stock_id,\n 'date': date}\n res = requests.post(\n url, verify=True,\n data=form_data)\n\n temp = res.json()\n Shareholding = pd.DataFrame(temp['data'])\n\n self = demo(\n stock_price=stock_price,\n MarginPurchaseShortSale=MarginPurchaseShortSale,\n InstitutionalInvestorsBuySell=InstitutionalInvestorsBuySell,\n Shareholding=Shareholding, )\n\n self.trade('2019-05-03')\n self.trade('2019-05-05')\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
titu1994/tf-eager-examples
[ "c95a02a96fab794331afa49a1d0ce684fb3340b8" ]
[ "scripts/04_02_cnn_block.py" ]
[ "import os\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import mnist\nfrom tensorflow.contrib.eager.python import tfe\n\n# enable eager mode\ntf.enable_eager_execution()\ntf.set_random_seed(0)\nnp.random.seed(0)\n\nif not os.path.exists('weights/'):\n os.makedirs('weights/')\n\n# constants\nbatch_size = 128\nepochs = 8\nnum_classes = 10\n\n# dataset loading\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = x_train.reshape((-1, 28, 28, 1))\nx_test = x_test.reshape((-1, 28, 28, 1))\n\n# one hot encode the labels. convert back to numpy as we cannot use a combination of numpy\n# and tensors as input to keras\ny_train_ohe = tf.one_hot(y_train, depth=num_classes).numpy()\ny_test_ohe = tf.one_hot(y_test, depth=num_classes).numpy()\n\nprint('x train', x_train.shape)\nprint('y train', y_train_ohe.shape)\nprint('x test', x_test.shape)\nprint('y test', y_test_ohe.shape)\n\n\nclass ConvBnReluBlock(tf.keras.Model):\n def __init__(self, filters, kernel, strides):\n super(ConvBnReluBlock, self).__init__()\n self.cnn = tf.keras.layers.Conv2D(filters, (kernel, kernel), strides=(strides, strides), kernel_initializer='he_normal')\n self.bn = tf.keras.layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n x = self.cnn(inputs)\n x = self.bn(x)\n x = tf.nn.relu(x)\n return x\n\n\nclass CNN(tf.keras.Model):\n def __init__(self, num_classes):\n super(CNN, self).__init__()\n self.block1 = ConvBnReluBlock(16, kernel=5, strides=2)\n self.block2 = ConvBnReluBlock(32, kernel=5, strides=2)\n self.pool = tf.keras.layers.GlobalAveragePooling2D()\n self.classifier = tf.keras.layers.Dense(num_classes)\n\n def call(self, inputs, training=None, mask=None):\n x = self.block1(inputs)\n x = self.block2(x)\n x = self.pool(x)\n output = self.classifier(x)\n\n # softmax op does not exist on the gpu, so always use cpu\n with tf.device('/cpu:0'):\n output = tf.nn.softmax(output)\n\n return output\n\n\ndevice = '/cpu:0' if tfe.num_gpus() == 0 else '/gpu:0'\n\nwith tf.device(device):\n # build model and optimizer\n model = CNN(num_classes)\n model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # suggested fix ; can be incorporated inside `_eager_set_inputs` or `_set_input`\n # Fix = Use exactly one sample from the provided input dataset to determine input/output shape/s for the model\n dummy_x = tf.zeros((1, 28, 28, 1))\n model._set_inputs(dummy_x)\n\n # train\n model.fit(x_train, y_train_ohe, batch_size=batch_size, epochs=epochs,\n validation_data=(x_test, y_test_ohe), verbose=1)\n\n # evaluate on test set\n scores = model.evaluate(x_test, y_test_ohe, batch_size, verbose=1)\n print(\"Final test loss and accuracy :\", scores)\n\n saver = tfe.Saver(model.variables)\n saver.save('weights/04_02_cnn/weights.ckpt')" ]
[ [ "tensorflow.nn.relu", "tensorflow.device", "tensorflow.enable_eager_execution", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.nn.softmax", "numpy.random.seed", "tensorflow.zeros", "tensorflow.contrib.eager.python.tfe.Saver", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.one_hot", "tensorflow.train.AdamOptimizer", "tensorflow.set_random_seed", "tensorflow.python.keras.datasets.mnist.load_data", "tensorflow.contrib.eager.python.tfe.num_gpus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
bme-chatbots/dialogue-generation
[ "4106f41d6841bcbca8555a20c0977b93d7848249" ]
[ "src/train.py" ]
[ "\"\"\"\n@author: Patrik Purgai\n@copyright: Copyright 2019, dialogue-generation\n@license: MIT\n@email: [email protected]\n@date: 2019.07.12.\n\"\"\"\n\n# pylint: disable=import-error\n# pylint: disable=no-name-in-module\n# pylint: disable=no-member\n# pylint: disable=not-callable\n# pylint: disable=used-before-assignment\n \nimport sys\nimport json\nimport torch\nimport random\nimport argparse\nimport logging\nimport os\n\nimport numpy as np\n\nfrom contextlib import contextmanager\nfrom tabulate import tabulate\nfrom tensorboardX import SummaryWriter\n\nfrom collections import (\n OrderedDict, defaultdict)\n\nfrom math import ceil\nfrom datetime import datetime\nfrom statistics import mean\nfrom functools import partial\n\ntry:\n from apex import amp\n APEX_INSTALLED = True\nexcept ImportError:\n APEX_INSTALLED = False\n\nfrom torch.nn.functional import (\n softmax, log_softmax,\n nll_loss, cross_entropy)\n\nfrom torch.distributed import (\n all_reduce, ReduceOp, barrier)\n\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.nn.parallel import (\n DistributedDataParallel)\n\nfrom transformers import AdamW\n\nfrom os.path import (\n exists, join,\n abspath, dirname)\n\n# HACK to enable launching with\n# python src/train.py\nPROJECT_PATH = join(abspath(dirname(__file__)), '..')\nif PROJECT_PATH not in sys.path:\n sys.path.append(PROJECT_PATH)\n\nfrom src.data import (\n create_dataset,\n setup_data_args,\n create_dummy_batch)\n\nfrom src.model import (\n compute_size,\n create_model,\n setup_model_args)\n\n \ndef setup_train_args():\n \"\"\"\n Sets up the training arguments.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--config',\n type=str,\n default=None,\n help='Path of the config file.')\n parser.add_argument(\n '--max_epochs',\n type=int,\n default=25,\n help='Maximum number of epochs for training.')\n parser.add_argument(\n '--no_cuda',\n action='store_true',\n help='Device for training.')\n # TODO XLNet produces NaN with apex\n parser.add_argument(\n '--fp16',\n action='store_true',\n help='Use fp16 precision training.')\n parser.add_argument(\n '--lr',\n type=float,\n default=1e-5,\n help='Learning rate for the model.')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=64,\n help='Batch size during training.')\n parser.add_argument(\n '--patience',\n type=int,\n default=5,\n help='Number of patience epochs before termination.')\n parser.add_argument(\n '--schedule',\n type=str,\n default='noam',\n choices=['noam', 'noamwd'],\n help='Type of learning rate scheduling.')\n parser.add_argument(\n '--warmup_steps',\n type=int,\n default=16000,\n help='Number of warmup steps.')\n parser.add_argument(\n '--total_steps',\n type=int,\n default=1000000,\n help='Number of optimization steps.')\n parser.add_argument(\n '--grad_accum_steps',\n type=int,\n default=2,\n help='Number of steps for grad accum.')\n parser.add_argument(\n '--local_rank',\n type=int,\n default=-1,\n help='Local rank for the script.')\n parser.add_argument(\n '--notebook',\n action='store_true',\n help='Set true if you are using IPython notebook.')\n parser.add_argument(\n '--clip_grad',\n type=float,\n default=None,\n help='Gradient clipping norm value.')\n parser.add_argument(\n '--seed',\n type=int,\n default=None,\n help='Random seed for training.')\n\n setup_data_args(parser)\n setup_model_args(parser)\n\n return parser.parse_args()\n\n\ndef set_random_seed(args):\n \"\"\"\n Sets the random seed for training.\n \"\"\"\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n if args.cuda:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef load_state(\n model_dir, model, optimizer, logger, device):\n \"\"\"\n Loads the model and optimizer state.\n \"\"\"\n try:\n model_path = join(model_dir, 'last.pt')\n state_dict = torch.load(\n model_path, map_location=device)\n\n model.load_state_dict(state_dict['model'])\n optimizer.load_state_dict(state_dict['optimizer'])\n\n logger.info('Loading model from {}'.format(\n model_path))\n\n return (\n state_dict['best_valid_loss'],\n state_dict['epoch'],\n state_dict['step']\n )\n\n except FileNotFoundError:\n return np.inf, 0, 0\n\n\ndef create_logger(model_dir):\n \"\"\"\n Creates a logger that outputs information to a\n file and the standard output as well.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(levelname)s - %(message)s')\n\n # setting up logging to a file\n log_path = join(model_dir, 'training.log')\n file_handler = logging.FileHandler(\n filename=log_path)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger\n\n\ndef create_optimizer(args, parameters):\n \"\"\"\n Creates an adam optimizer.\n \"\"\"\n optimizer = AdamW(\n lr=args.lr,\n params=parameters,\n weight_decay=0.01)\n\n return optimizer\n\n\n# implementation is from DialoGPT repo\ndef noam_decay(step, warmup_steps, d_model):\n \"\"\"\n Learning rate schedule described in\n https://arxiv.org/pdf/1706.03762.pdf.\n \"\"\"\n return (\n d_model ** (-0.5) * min(step ** (-0.5), \n step * warmup_steps**(-1.5)))\n\n\n# implementation is from DialoGPT repo\ndef noamwd_decay(\n step, warmup_steps, d_model, rate=0.5,\n decay_steps=1000, start_step=500):\n \"\"\"\n Learning rate schedule optimized for huge batches.\n \"\"\"\n rate_exp = max(step - start_step + decay_steps, 0) \\\n // decay_steps\n\n return (\n d_model ** (-0.5) * min(step ** (-0.5), \n step * warmup_steps ** (-1.5)) *\n rate ** (rate_exp))\n\n\n# implementation is from DialoGPT repo\ndef set_lr(step, optimizer, schedule, lr,\n warmup_steps, d_model):\n \"\"\"\n Learning rate scheduler that applies either\n noam or noamwd rule.\n \"\"\"\n if schedule == 'noam':\n lr_this_step = lr * 1e4 * \\\n noam_decay(step + 1, warmup_steps, d_model)\n\n elif schedule == 'noamwd':\n lr_this_step = lr * 1e4 * noamwd_decay(\n step + 1, warmup_steps, d_model)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n\n\ndef compute_loss(outputs, targets, ignore_idx):\n \"\"\"\n Computes the loss and accuracy.\n \"\"\"\n logits = outputs[0]\n\n logits_view = logits.view(-1, logits.size(-1))\n targets_view = targets.view(-1)\n\n log_probs = log_softmax(logits_view, dim=-1)\n\n loss = nll_loss(\n log_probs, targets_view,\n ignore_index=ignore_idx,\n reduction='sum')\n\n _, preds = log_probs.max(dim=-1)\n\n # computing accuracy without including the\n # values at the ignore indices\n not_ignore = targets_view.ne(ignore_idx)\n num_targets = not_ignore.long().sum().item()\n\n correct = (targets_view == preds) & not_ignore\n correct = correct.float().sum()\n\n acc = correct / num_targets\n loss = loss / num_targets\n\n ppl = torch.exp(loss).item()\n\n return loss, acc, ppl\n\n\ndef main():\n \"\"\"\n Performs training, validation and testing.\n \"\"\"\n args = setup_train_args()\n\n if args.notebook:\n from tqdm import tqdm_notebook as tqdm\n else:\n from tqdm import tqdm\n\n # if config is provided, then load it\n if args.config is not None:\n with open(args.config, 'r') as fh:\n config = json.load(fh)\n\n for arg in config:\n setattr(args, arg, config[arg])\n\n args.cuda = torch.cuda.is_available() \\\n and not args.no_cuda\n\n # setting random seed for reproducibility\n if args.seed:\n set_random_seed(args)\n\n model_dir = join(\n args.model_dir, args.model, args.name)\n\n os.makedirs(model_dir, exist_ok=True)\n logger = create_logger(model_dir=model_dir)\n\n if args.fp16 and not APEX_INSTALLED:\n logger.warn(\n '--fp16 passed but apex is not installed.')\n\n args.fp16 = args.fp16 and APEX_INSTALLED \\\n and args.cuda\n\n master_process = args.local_rank in [0, -1]\n args.distributed = args.local_rank != -1\n\n if args.distributed:\n # use distributed training if local rank is given\n # and GPU training is requested\n torch.cuda.set_device(args.local_rank)\n device = torch.device('cuda', args.local_rank)\n\n torch.distributed.init_process_group(\n backend='nccl',\n init_method='env://',\n rank=args.local_rank)\n\n else:\n device = torch.device(\n 'cuda' if args.cuda else 'cpu')\n\n # creating dataset and storing dataset splits\n # as individual variables for convenience\n\n if args.distributed:\n # creating the dataset and model only on\n # a single process ( downloading )\n if master_process:\n _, tokenizer, _ = create_dataset(\n args, master_process)\n\n vocab_size = len(tokenizer)\n\n create_model(args, model_dir, vocab_size)\n\n # other threads are waiting for the data init\n barrier()\n\n datasets, tokenizer, max_len = create_dataset(\n args=args, master_process=master_process)\n\n pad_idx = tokenizer.convert_tokens_to_ids(\n tokenizer.pad_token)\n vocab_size = len(tokenizer)\n\n model = create_model(args, model_dir, vocab_size)\n model = model.to(device)\n\n # TODO fix xlnet nan with mixed precision\n if 'xlnet' in args.model:\n args.fp16 = False\n\n optimizer = create_optimizer(\n args=args, parameters=model.parameters())\n\n if master_process:\n writer = SummaryWriter(\n logdir=model_dir, flush_secs=100)\n\n # loading previous state of the training\n best_valid_loss, init_epoch, step = load_state(\n model_dir=model_dir, model=model,\n optimizer=optimizer, logger=logger,\n device=device)\n\n if args.fp16:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level='O2')\n\n d_model = model.config.d_model if 'xlnet' in \\\n args.model else model.config.n_embd\n\n if args.distributed:\n model = DistributedDataParallel(\n model, device_ids=[args.local_rank],\n output_device=args.local_rank)\n\n world_size = int(os.environ.get('WORLD_SIZE', 1))\n\n train, valid, test = [\n (split, ceil(\n size / args.batch_size / world_size))\n for split, size in datasets]\n\n # computing the sizes of the dataset splits\n train_dataset, num_train_steps = train\n valid_dataset, num_valid_steps = valid\n test_dataset, num_test_steps = test\n\n patience, skip, loss, accuracy = 0, 1, 0, 0\n\n set_lr_fn = partial(\n set_lr, \n optimizer=optimizer, \n schedule=args.schedule, \n lr=args.lr, \n warmup_steps=args.warmup_steps,\n d_model=d_model)\n\n if master_process:\n # loading history for training logs\n history_path = join(model_dir, 'history.json')\n\n history = defaultdict(list)\n\n # NOTE the hardcoded values to keep track of\n # in the history\n metrics = ['loss', 'acc', 'ppl']\n headers = ['epoch'] + \\\n ['train_' + m for m in metrics] + \\\n ['valid_' + m for m in metrics]\n\n if exists(history_path):\n with open(history_path, 'r') as fh:\n history = json.load(fh)\n\n def print_results(results):\n \"\"\"\n Prints the history to the standard output.\n \"\"\"\n data = list(zip(*[history[h] for h in headers]))\n\n table = tabulate(\n tabular_data=data,\n headers=headers,\n floatfmt='.3f')\n\n # computing the tabular table string and\n # printing only the last element\n print(table.split('\\n')[-1])\n\n msg = ', '.join(\n '{}: {}'.format(n, r) for\n n, r in results.items())\n\n logger.info(msg)\n\n def record_history(results):\n \"\"\"\n Records the results and prints them.\n \"\"\"\n # saving history and handling unexpected\n # keyboard interrupt\n for header in headers:\n history[header].append(results[header])\n\n while True:\n try:\n with open(history_path, 'w') as fh:\n json.dump(history, fh)\n break\n except KeyboardInterrupt:\n pass\n\n @contextmanager\n def skip_error():\n \"\"\"\n Convenience function for skipping errors.\n \"\"\"\n nonlocal skip\n\n try:\n # checking out of memory error and\n # proceeding if only a single GPU\n # is used for the training\n yield\n\n except RuntimeError as e:\n if 'out of memory' in str(e):\n if args.distributed:\n raise e\n skip += 1\n\n def reduce_tensor(tensor):\n \"\"\"\n Averages a tensor across gpus.\n \"\"\"\n reduced = tensor.clone()\n all_reduce(reduced, op=ReduceOp.SUM)\n reduced /= world_size\n\n return reduced\n\n def forward_step(batch):\n \"\"\"\n Applies forward pass with the given batch.\n \"\"\"\n inputs, targets = batch\n\n outputs = model(inputs, half=args.fp16)\n\n # converting targets from ndarray\n targets = torch.as_tensor(targets)\n targets = targets.long().to(device)\n\n loss, acc, ppl = compute_loss(\n outputs=outputs,\n targets=targets,\n ignore_idx=pad_idx)\n\n if args.distributed:\n # reducing accuracy accross devices\n # for more accurate logging\n acc = reduce_tensor(acc)\n\n return loss, acc.item(), ppl\n\n def train_step(batch):\n \"\"\"\n Performs a single step of training.\n \"\"\"\n nonlocal step, skip\n\n loss, acc, ppl = forward_step(batch)\n\n if torch.isnan(loss).item():\n # during distributed training NaN\n # values are not handled\n if args.distributed:\n raise ValueError(\n 'NaN values encountered.')\n\n logger.debug('skipping step (nan)')\n # returning None values when a NaN loss\n # is encountered and skipping backprop\n # so model grads will not be corrupted\n\n skip += 1\n return None, None\n\n loss /= args.grad_accum_steps\n\n backward(loss)\n\n if args.clip_grad is not None:\n clip_grad_norm(args.clip_grad)\n\n if step % args.grad_accum_steps == 0:\n set_lr_fn(step)\n optimizer.step()\n optimizer.zero_grad()\n\n if args.distributed:\n # reducing loss accross devices for\n # more accurate logging\n loss = reduce_tensor(loss)\n\n step += 1\n\n return {\n 'loss': loss.item(), \n 'acc': acc, \n 'ppl': ppl\n }\n\n def backward(loss):\n \"\"\"\n Backpropagates the loss in either mixed or\n normal precision mode.\n \"\"\"\n # cuda is required for mixed precision training.\n if args.fp16:\n with amp.scale_loss(\n loss, optimizer) as scaled:\n scaled.backward()\n else:\n loss.backward()\n\n def clip_grad_norm(max_norm):\n \"\"\"\n Applies gradient clipping.\n \"\"\"\n if args.fp16:\n clip_grad_norm_(\n amp.master_params(optimizer), max_norm)\n else:\n clip_grad_norm_(model.parameters(), max_norm)\n\n def evaluate(dataset, num_steps):\n \"\"\"\n Constructs a validation loader and evaluates\n the model.\n \"\"\"\n loop = tqdm(\n dataset(), 'eval',\n num_steps, False,\n disable=not master_process)\n\n model.eval()\n\n for batch in loop:\n with skip_error():\n loss, accuracy, ppl = forward_step(batch)\n\n loop.set_postfix(OrderedDict(\n loss=loss.item(), ppl=ppl, acc=accuracy))\n\n yield loss.item(), accuracy, ppl\n\n def save_state(name):\n \"\"\"\n Saves the model and optimizer state.\n \"\"\"\n model_path = join(model_dir, name + '.pt')\n\n state = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_valid_loss': best_valid_loss,\n 'valid_loss': valid_loss,\n 'epoch': epoch + 1,\n 'step': step\n }\n\n logger.info('Saving model to {}'.format(model_path))\n # making sure the model saving is not left in a\n # corrupted state after a keyboard interrupt\n while True:\n try:\n torch.save(state, model_path)\n break\n except KeyboardInterrupt:\n pass\n\n if master_process:\n train_args = vars(args)\n logger.info(str(train_args))\n\n print()\n print(tabulate(train_args.items(), tablefmt='presto'))\n print()\n\n try:\n # initializing cuda buffer to avoid OOM errors\n dummy_batch = create_dummy_batch(\n args, ignore_idx=pad_idx)\n\n train_step(dummy_batch)\n\n except (RuntimeError, ValueError) as e:\n if 'out of memory' in str(e):\n msg = 'Not enough memory, there might ' + \\\n 'be several out of memory error during ' + \\\n 'training. To avoid this lower ' + \\\n 'the `--batch_size` or `--max_len`'\n\n if not args.grad_ckpt:\n msg += ', use the `--checkpointed` flag'\n\n if not APEX_INSTALLED:\n msg += ' or install apex for fp16 precision'\n\n logger.info(msg + '.')\n\n if args.distributed:\n return\n\n # creating table of history with correctly\n # arranged values for each header\n if master_process:\n table = list(zip(*[history[h] for h in headers]))\n print(tabulate(table, headers, floatfmt='.3f'))\n\n for epoch in range(init_epoch, args.max_epochs):\n # running training loop\n loop = tqdm(\n train_dataset(), 'train {}'.format(epoch),\n num_train_steps, False,\n disable=not master_process)\n\n train_metrics = defaultdict(list)\n\n model.train()\n\n for batch in loop:\n with skip_error():\n results = train_step(batch)\n\n loss = results['loss']\n if master_process and loss is not None:\n # adding the results to history\n # and logging them to tensorboard\n for metric, value in results.items():\n train_metrics[metric].append(value)\n\n if value == float('inf'):\n value = 1e30\n\n writer.add_scalar(\n 'train/' + metric, value, step)\n\n loop.set_postfix(OrderedDict(\n **results, skip=skip))\n\n train_metrics = {\n 'train_' + metric: mean(values) \n if len(values) > 0 else 0.0\n for metric, values in train_metrics.items()\n }\n\n with torch.no_grad():\n valid_metrics = zip(*evaluate(\n dataset=valid_dataset,\n num_steps=num_valid_steps))\n\n valid_loss, valid_acc, valid_ppl = [\n mean(values) if len(values) > 0 else 0.0\n for values in valid_metrics\n ]\n\n # switching back to training\n model.train()\n\n if master_process:\n results = {'epoch': epoch}\n\n results.update(train_metrics)\n\n results.update({\n 'valid_loss': valid_loss,\n 'valid_acc': valid_acc,\n 'valid_ppl': valid_ppl\n })\n\n record_history(results)\n print_results(results)\n\n # converting ppl to a large number so tensorboard\n # will not throw any warnings during training\n if valid_ppl == float('inf'):\n valid_ppl = 1e30\n\n # logging to tensorboard\n writer.add_scalar('val/loss', valid_loss, step)\n writer.add_scalar('val/acc', valid_acc, step)\n writer.add_scalar('val/ppl', valid_ppl, step)\n\n if master_process:\n save_state(name='last')\n\n if valid_loss < best_valid_loss:\n patience = 0\n best_valid_loss = valid_loss\n\n if master_process:\n save_state(name='best')\n\n else:\n patience += 1\n if patience == args.patience:\n # terminate when max patience\n # level is hit\n break\n\n if step == args.total_steps:\n break\n\n if master_process:\n writer.close()\n\n with torch.no_grad():\n test_metrics = zip(*evaluate(\n dataset=test_dataset,\n num_steps=num_test_steps))\n\n test_loss, test_acc, test_ppl = [\n mean(values) if len(values) > 0 else 0.0\n for values in test_metrics\n ]\n\n if master_process:\n logger.info('test loss: {:.4}'.format(\n test_loss))\n\n\nif __name__ == '__main__':\n try:\n main()\n\n except KeyboardInterrupt:\n # exiting training with Ctrl + C\n pass\n" ]
[ [ "torch.distributed.init_process_group", "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "numpy.random.seed", "torch.manual_seed", "torch.load", "torch.cuda.set_device", "torch.isnan", "torch.distributed.barrier", "torch.exp", "torch.no_grad", "torch.save", "torch.cuda.is_available", "torch.device", "torch.distributed.all_reduce", "torch.nn.parallel.DistributedDataParallel", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UP-RS-ESP/GEW-DAP05-2018
[ "04ca0327b4a4ea5b6869e3e985672639651771e8" ]
[ "Session_06/fibonacci-python/bench.py" ]
[ "import numpy as np\n\ndef fibf(n):\n a = 0.0\n b = 1.0\n for i in range(n):\n a, b = a+b, a\n\n return a\n\ndef fibr(n):\n if n == 0 or n == 1:\n a = n\n else:\n a = fibr(n-1)+fibr(n-2)\n\n return a\n\ndef main():\n import sys\n from timeit import timeit\n from matplotlib import pyplot as pl\n\n number = 10000\n m = 15\n\n tpr = np.zeros(m)\n tpf = np.zeros(m)\n for i in range(m):\n tpr[i] = timeit('fibr(%i)' % i,\n setup = 'from __main__ import fibr', number = number) / number\n tpf[i] = timeit('fibf(%i)' % i,\n setup = 'from __main__ import fibf', number = number) / number\n\n pl.figure(1, (10.24, 7.68))\n pl.title('Fibonacci benchmark')\n pl.semilogy(tpr, label = 'Python recursion')\n pl.semilogy(tpf, label = 'Python for loop')\n pl.xlabel('Interation / Fibonacci number')\n pl.ylabel('Time [s]')\n pl.legend(loc = 'upper left')\n pl.show()\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "matplotlib.pyplot.semilogy", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yo16/df_overview
[ "6274eb4e4c07e850a67c75834f7f87e86cce926e" ]
[ "src/df_overview.py" ]
[ "# df_overviewモジュール\n# 2018/5/11 yo16\n\nimport numpy as np\nimport pandas as pd\nimport copy\n\n# df_overviewクラス\nclass df_overview(object):\n\t# 分析対象のDataFrame\n\t_df = None\n\t\n\t_debugMode = 1\t\t# [ 0:release | 1:debug ]\n\t\n\t\n\t# -------------------------------------------\n\t# コンストラクタ\n\t# -------------------------------------------\n\tdef __init__(self, df):\n\t\tself._df = copy.deepcopy(df)\n\t\treturn\n\t\n\t\n\t# -------------------------------------------\n\t# 分析内容\n\t# -------------------------------------------\n\t# 列名\n\tdef __anaName(self, c):\n\t\treturn c\n\t\n\t# int か float か stringのいずれであるか調べる\n\tdef __anaType(self, c):\n\t\tif isinstance(self._df[c].iat[0], np.int64):\t\t# numpy.int64は、int?って聞くとFALSEと答えてしまう\n\t\t\treturn 'int'\n\t\tif isinstance(self._df[c].iat[0], float):\n\t\t\treturn 'float'\n\t\treturn 'string'\n\t\t\n\t# 合計\n\tdef __anaTotal(self, c):\n\t\ttotal = 0\n\t\tfor i in range(len(self._df)):\n\t\t\ttotal = total + self._df[c][i]\n\t\t\n\t\treturn total\n\tdef __anaMean(self, c):\n\t\treturn 0\t# めんどくさいので未実装\n\t# 最小値\n\tdef __anaMin(self, c):\n\t\tmin = 0\n\t\tfor i in range(len(self._df)):\n\t\t\tif self._df[c][i] < min:\n\t\t\t\tmin = self._df[c][i]\n\t\treturn min\n\t# 最大値\n\tdef __anaMax(self, c):\n\t\tmax = 0\n\t\tfor i in range(len(self._df)):\n\t\t\tif max < self._df[c][i]:\n\t\t\t\tmax = self._df[c][i]\n\t\treturn max\n\t# ユニークな種類数\n\tdef __anaKind(self, c):\n\t\treturn len(self._df[c].unique())\n\t# ユニークな種類の上位\n\tdef __anaUniqueTop(self, c, n):\n\t\tvc = self._df[c].value_counts(dropna=False)\t\t# NaNも加算\n\t\tvc = vc[:n]\n\t\t# dict型に整形してあげる\n\t\td = {}\n\t\tfor i, v in vc.iteritems():\n\t\t\t d[i] = v\n\t\treturn d\n\t\t\n\t# 型\n\t# 合計(*)\n\t# 平均(*)\n\t# 最小値(*)\n\t# 最大値(*)\n\t# 値の種類数\n\t# (*):数値型のときのみ\n\t_ANALYZE_METHOD = {\t\\\n\t\t'name':\t\t__anaName\t\t\\\n\t,\t'type':\t\t__anaType\t\t\\\n\t,\t'total':\t__anaTotal\t\t\\\n\t,\t'mean':\t\t__anaMean\t\t\\\n\t,\t'min':\t\t__anaMin\t\t\\\n\t,\t'max':\t\t__anaMax\t\t\\\n\t,\t'kind':\t\t__anaKind\t\t\\\n\t,\t'unique':\t__anaUniqueTop\t\\\n\t}\n\t\n\t\n\t# -------------------------------------------\n\t# _dfの列のサマリのDataFrameを返す\n\t# -------------------------------------------\n\tdef cols_summary(self):\n\t\t# 列数\n\t\tself.__debugPrint('shape:(%d,%d)' % self._df.shape)\n\t\t\n\t\t# 戻り値のDataFrame\n\t\tdfRet = pd.DataFrame()\n\t\t\n\t\t# 1列ごとにサマって、dfへ結合していく\n\t\t# 元情報の1列=1行\n\t\tfor c in self._df.columns:\n\t\t\tself.__debugPrint(c)\n\t\t\tdfRet = pd.concat([dfRet,self.__summary_one_column(c)])\n\t\t\n\t\t# 列を並び替える\n\t\tdfRet = dfRet[['列名','型','合計','平均','最小値','最大値','種類数','ユニーク上位']]\n\t\t\n\t\treturn dfRet\n\t\n\t\n\t# -------------------------------------------\n\t# 1列のサマリを得る\n\t# -------------------------------------------\n\tdef __summary_one_column(self, columnName):\n\t\t\n\t\tdictData = {}\n\t\tisNum = False\n\t\tlineNum = len(self._df)\n\t\t\n\t\t# 集計\n\t\t# 列名\n\t\tdictData['列名'] = [self._ANALYZE_METHOD['name'](self, columnName)]\n\t\t# 型\n\t\tdictData['型'] = [self._ANALYZE_METHOD['type'](self, columnName)]\n\t\tisNum = (dictData['型'][0] != 'string')\n\t\t# 合計\n\t\tdictData['合計'] = [self._ANALYZE_METHOD['total'](self, columnName)] if isNum else 0\n\t\t# 平均\n\t\tdictData['平均'] = dictData['合計'][0] / lineNum if isNum else 0\n\t\t# 最小\n\t\tdictData['最小値'] = [self._ANALYZE_METHOD['min'](self, columnName)] if isNum else 0\n\t\t# 最大\n\t\tdictData['最大値'] = [self._ANALYZE_METHOD['max'](self, columnName)] if isNum else 0\n\t\t# 種類数\n\t\tdictData['種類数'] = [self._ANALYZE_METHOD['kind'](self, columnName)]\n\t\t# ユニーク上位\n\t\tdictData['ユニーク上位'] = [self._ANALYZE_METHOD['unique'](self, columnName, 10)]\n\t\t\n\t\tdf = pd.DataFrame(dictData)\n\t\t\n\t\treturn df\n\t\n\t\n\t# -------------------------------------------\n\t# デバッグ用print\n\t# -------------------------------------------\n\tdef __debugPrint(self, str):\n\t\tif self._debugMode == 1:\n\t\t\tprint(str)\n\t\treturn\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hopsparser/npdependency
[ "57571dd797281ad6d0a413aa46d797e423a1f7e8" ]
[ "tests/test_parser.py" ]
[ "import pathlib\nimport tempfile\nfrom typing import List, Tuple\n\nimport torch.cuda\n\nimport hypothesis.strategies as st\nimport pytest\nfrom hypothesis import assume, given, settings\nfrom torch.testing import assert_close\n\nfrom hopsparser.parser import BiAffineParser\nfrom hopsparser.deptree import DepGraph\nfrom hopsparser.lexers import LexingError\n\ndevices = [\"cpu\"]\nif torch.cuda.is_available():\n devices.append(\"cuda:0\")\n\n\[email protected](\"source_device\", devices)\[email protected](\"target_device\", devices)\ndef test_initialize_save_load(\n source_device: str,\n target_device: str,\n train_config: pathlib.Path,\n treebank: pathlib.Path,\n):\n source_device_d = torch.device(source_device)\n target_device_d = torch.device(target_device)\n parser = BiAffineParser.initialize(\n config_path=train_config,\n treebank=list(DepGraph.read_conll(open(treebank))),\n )\n parser.to(source_device_d)\n for _, p in parser.named_parameters():\n assert p.device == source_device_d\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_path = pathlib.Path(tmp_dir)\n parser.save(tmp_path, save_weights=True)\n _ = BiAffineParser.load(tmp_path)\n parser.to(target_device_d)\n for _, p in parser.named_parameters():\n assert p.device == target_device_d\n\n\[email protected](scope=\"session\")\ndef parser_and_reload(\n train_config: pathlib.Path,\n treebank: pathlib.Path,\n):\n parser = BiAffineParser.initialize(\n config_path=train_config,\n treebank=list(DepGraph.read_conll(open(treebank))),\n )\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_path = pathlib.Path(tmp_dir)\n parser.save(tmp_path, save_weights=True)\n reloaded = BiAffineParser.load(tmp_path)\n return parser, reloaded\n\n\n# FIXME: this should be generated in hypothesis to allow config variation\[email protected](scope=\"session\")\ndef parser(\n train_config: pathlib.Path,\n treebank: pathlib.Path,\n):\n parser = BiAffineParser.initialize(\n config_path=train_config,\n treebank=list(DepGraph.read_conll(open(treebank))),\n )\n return parser\n\n\[email protected](\"device\", devices)\n@settings(deadline=8192)\n# FIXME: should we really skip control characters and whitespaces? We do now because most 🤗\n# tokenizers strip them out instead of rendering them as unk (see also test_lexers)\n@given(\n stable_text=st.lists(\n st.text(alphabet=st.characters(blacklist_categories=[\"Zs\", \"C\"]), min_size=1),\n min_size=1, max_size=32,\n ),\n distractor_text_1=st.lists(\n st.text(alphabet=st.characters(blacklist_categories=[\"Zs\", \"C\"]), min_size=1),\n min_size=1, max_size=32,\n ),\n distractor_text_2=st.lists(\n st.text(alphabet=st.characters(blacklist_categories=[\"Zs\", \"C\"]), min_size=1),\n min_size=1, max_size=32,\n ),\n)\ndef test_batch_invariance(\n device: str,\n parser: BiAffineParser,\n stable_text: List[str],\n distractor_text_1: List[str],\n distractor_text_2: List[str],\n):\n parser = parser.to(device)\n parser.eval()\n try:\n encoded_stable_text = parser.encode_sentence(stable_text, strict=True)\n encoded_distractor_text_1 = parser.encode_sentence(\n distractor_text_1, strict=True\n )\n encoded_distractor_text_2 = parser.encode_sentence(\n distractor_text_2, strict=True\n )\n except LexingError:\n assume(False)\n with torch.no_grad():\n stable_length = len(stable_text) + 1\n batch_stable = parser.batch_sentences([encoded_stable_text])\n text_s1 = [encoded_stable_text, encoded_distractor_text_1]\n text_1s = [encoded_distractor_text_1, encoded_stable_text]\n text_s2 = [encoded_stable_text, encoded_distractor_text_2]\n ref_tagger_scores: torch.Tensor\n ref_arc_scores: torch.Tensor\n ref_lab_scores: torch.Tensor\n ref_tagger_scores, ref_arc_scores, ref_lab_scores = parser(\n batch_stable.encodings, batch_stable.sent_lengths\n )\n for text, idx in ((text_s1, 0), (text_1s, 1), (text_s2, 0)):\n batch = parser.batch_sentences(text)\n tagger_scores: torch.Tensor\n arc_scores: torch.Tensor\n lab_scores: torch.Tensor\n tagger_scores, arc_scores, lab_scores = parser(\n batch.encodings, batch.sent_lengths\n )\n assert_close(\n ref_tagger_scores[0, :stable_length, :],\n tagger_scores[idx, :stable_length, :],\n )\n assert_close(\n ref_arc_scores[0, :stable_length, :stable_length],\n arc_scores[idx, :stable_length, :stable_length],\n )\n assert_close(\n ref_lab_scores[0, :stable_length, :stable_length, :],\n lab_scores[idx, :stable_length, :stable_length, :],\n )\n\n\[email protected](\"device\", devices)\n@settings(deadline=8192)\n# FIXME: should we really skip control characters and whitespaces? We do now because most 🤗\n# tokenizers strip them out instead of rendering them as unk (see also test_lexers)\n@given(\n test_text=st.lists(\n st.text(alphabet=st.characters(blacklist_categories=[\"Zs\", \"C\"]), min_size=1),\n min_size=1,\n ),\n)\ndef test_save_load_idempotency(\n device: str,\n parser_and_reload: Tuple[BiAffineParser, BiAffineParser],\n test_text: List[str],\n):\n\n parser, reloaded = parser_and_reload\n parser = parser.to(device)\n reloaded = reloaded.to(device)\n test_sent_as_batch = [\" \".join(test_text)]\n original_parse = parser.parse(test_sent_as_batch, raw=True, strict=False)\n reloaded_parse = reloaded.parse(test_sent_as_batch, raw=True, strict=False)\n original_parsed_conll = \"\\n\\n\".join(t.to_conllu() for t in original_parse)\n reloaded_parsed_conll = \"\\n\\n\".join(t.to_conllu() for t in reloaded_parse)\n assert reloaded_parsed_conll == original_parsed_conll\n" ]
[ [ "torch.testing.assert_close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pdturney/management-theory
[ "1701a334d00bb6c1a3d6c3995e4fad1383e4c398" ]
[ "table_manager_worker_productivity.py" ]
[ "#\r\n# Table Manager Worker Productivity\r\n#\r\n# Peter Turney, February 28, 2021\r\n#\r\n# Run all 844 fusion seeds and store the results internally in\r\n# a numpy tensor:\r\n#\r\n# tensor = 844 fusions x 1001 time steps x 5 colours\r\n#\r\n# - 844 fusion events from 18 fusion_storage.bin files\r\n# - 1001 times steps in the Management game\r\n# - 5 colours (white, red, orange, blue, green)\r\n#\r\n# - value in tensor cell = count of live cells for given triple\r\n# [fusion_num, step_num, colour_num]\r\n#\r\n# After this tensor has been filled with values, generate\r\n# a table of the form:\r\n#\r\n# <step number> <avg live cells manager-manager> \r\n# <avg live cells manager-worker> <avg live cells worker-worker>\r\n#\r\nimport golly as g\r\nimport model_classes as mclass\r\nimport model_functions as mfunc\r\nimport model_parameters as mparam\r\nimport numpy as np\r\nimport time\r\nimport pickle\r\nimport os\r\nimport re\r\nimport sys\r\n#\r\n# Parameter values for making the table.\r\n#\r\nnum_steps = 1001 # number of time steps in the game\r\nnum_fusions = 844 # fusions contained in 18 fusion pickles\r\nnum_colours = 5 # 5 colours [white, red, blue, orange, green]\r\nnum_types = 3 # manager-manager, manager-worker, worker-worker\r\nnum_files = 18 # 18 fusion pickles\r\n#\r\n# Location of fusion_storage.bin files -- the input pickles.\r\n#\r\nfusion_dir = \"C:/Users/peter/Peter's Projects\" + \\\r\n \"/management-theory/Experiments/exper1\"\r\n#\r\nfusion_files = [] # list of pickles\r\n#\r\nfor i in range(num_files):\r\n fusion_files.append(fusion_dir + \"/run\" + str(i + 1) + \\\r\n \"/fusion_storage.bin\")\r\n#\r\n# TSV (tab separated values) file for storing the table.\r\n#\r\ntable_file = fusion_dir + \"/table_manager_worker_productivity.tsv\"\r\ntable_handle = open(table_file, \"w\")\r\ntable_handle.write(\"step num\\tmanager-manager\\tmanager-worker\" + \\\r\n \"\\tworker-worker\\n\")\r\n#\r\n# Initialize the tensor.\r\n#\r\ntensor = np.zeros([num_fusions, num_steps, num_colours])\r\n#\r\n# Read and process each fusion file one-by-one. Each fusion \r\n# file contains several fusion seeds.\r\n#\r\nfusion_num = 0 # fusion_num ranges from 0 to 843\r\n#\r\nfor fusion_file in fusion_files:\r\n fusion_handle = open(fusion_file, \"ab+\")\r\n fusion_handle.seek(0) # start at the beginning of the file\r\n fusion_list = []\r\n # read the pickle file into fusion_list\r\n while True:\r\n try:\r\n part = pickle.load(fusion_handle)\r\n fusion_list.append(part)\r\n except (EOFError, pickle.UnpicklingError):\r\n break\r\n fusion_handle.close()\r\n # iterate through the fusion events in the current fusion file\r\n # -- read four items at a time\r\n for (s2, s3, s4, n) in zip(*[iter(fusion_list)] * 4):\r\n part1 = s2\r\n part2 = s3\r\n # part1 and part2 (s2 and s3) are both using live state 1 (red),\r\n # so we need to convert part2 to live state 2 (blue)\r\n # -- mfunc.change_live_state() makes a copy, so the original is\r\n # not changed\r\n part2 = mfunc.change_live_state(part2, 2)\r\n # join the parts\r\n whole = mfunc.join_seeds(part1, part2)\r\n # initialize Golly\r\n rule_name = \"Management\"\r\n g.setalgo(\"QuickLife\") # use \"HashLife\" or \"QuickLife\"\r\n g.autoupdate(False) # do not update the view unless requested\r\n g.new(rule_name) # initialize cells to state 0\r\n g.setrule(rule_name) # make an infinite plane\r\n # initialize the counts for the five states:\r\n # [white, red, blue, orange, green]\r\n start_size = [0, 0, 0, 0, 0] \r\n end_size = [0, 0, 0, 0, 0]\r\n # copy whole into Golly \r\n for x in range(whole.xspan):\r\n for y in range(whole.yspan):\r\n state = whole.cells[x][y]\r\n g.setcell(x, y, state)\r\n # update start_size and end_size\r\n start_size[state] += 1\r\n end_size[state] += 1\r\n # record the initial growth (time step 0) in the tensor\r\n # -- the intitial growth is necessarily zero for all colours\r\n step_num = 0\r\n for colour_num in range(num_colours):\r\n tensor[fusion_num, step_num, colour_num] = 0\r\n # iterate over the number of time steps\r\n for step_num in range(1, num_steps):\r\n g.run(1)\r\n g.update()\r\n # update end_size\r\n boundary = g.getrect()\r\n if (len(boundary) == 0): # if no live cells ...\r\n end_size = [0, 0, 0, 0, 0]\r\n else:\r\n cell_list = g.getcells(boundary)\r\n # if cell_list ends in 0, then delete the 0 -- note that stateN\r\n # will never be zero, since dead cells (state 0) are not included\r\n # in cell_list\r\n if (cell_list[-1] == 0):\r\n cell_list.pop()\r\n # end_size = [white, red, blue, orange, green]\r\n end_size = [0, 0, 0, 0, 0] # initialize\r\n for (x, y, state) in zip(*[iter(cell_list)] * 3):\r\n end_size[state] += 1 # update count\r\n # update the tensor\r\n for colour_num in range(num_colours):\r\n tensor[fusion_num, step_num, colour_num] = \\\r\n end_size[colour_num] - start_size[colour_num]\r\n # \r\n # increment fusion number\r\n fusion_num += 1\r\n #\r\n#\r\n# Now that we have filled the tensor, we can generate the table:\r\n#\r\n# <step number> <avg live cells manager-manager> \r\n# <avg live cells manager-worker> <avg live cells worker-worker>\r\n#\r\nfor step_num in range(num_steps):\r\n #\r\n mm_count = 0 # manager-manager count (sample size)\r\n mw_count = 0 # manager-worker count (sample size)\r\n ww_count = 0 # worker-worker count (sample size)\r\n #\r\n mm_growth = 0 # manager-manager growth (sum of live cells)\r\n mw_growth = 0 # manager-worker growth (sum of live cells)\r\n ww_growth = 0 # worker-worker growth (sum of live cells)\r\n #\r\n for fusion_num in range(num_fusions):\r\n # \r\n red = tensor[fusion_num, step_num, 1]\r\n blue = tensor[fusion_num, step_num, 2]\r\n orange = tensor[fusion_num, step_num, 3]\r\n green = tensor[fusion_num, step_num, 4]\r\n #\r\n # red is a manager = green > red + orange\r\n # red is a worker = green <= red + orange\r\n #\r\n # blue is a manager = orange > blue + green\r\n # blue is a worker = orange <= blue + green\r\n #\r\n # manager-manager relation = red and blue are both managers\r\n # manager-worker relation = one is a manager and the other is a worker\r\n # worker-worker relation = red and blue are both workers\r\n #\r\n red_manager = (green > (red + orange))\r\n blue_manager = (orange > (blue + green))\r\n #\r\n growth = red + blue + orange + green\r\n #\r\n if (red_manager and blue_manager):\r\n mm_count += 1\r\n mm_growth += growth\r\n elif (red_manager and not blue_manager):\r\n mw_count += 1\r\n mw_growth += growth\r\n elif (blue_manager and not red_manager):\r\n mw_count += 1\r\n mw_growth += growth\r\n else:\r\n ww_count += 1\r\n ww_growth += growth\r\n #\r\n #\r\n assert mm_count + mw_count + ww_count == num_fusions\r\n #\r\n if (mm_count > 0):\r\n mm_avg_growth = mm_growth / mm_count\r\n else:\r\n mm_avg_growth = 0\r\n #\r\n if (mw_count > 0):\r\n mw_avg_growth = mw_growth / mw_count\r\n else:\r\n mw_avg_growth = 0\r\n #\r\n if (ww_count > 0):\r\n ww_avg_growth = ww_growth / ww_count\r\n else:\r\n ww_avg_growth = 0\r\n #\r\n table_handle.write(\"{}\\t{:.3f}\\t{:.3f}\\t{:.3f}\\n\".format(step_num,\r\n mm_avg_growth, mw_avg_growth, ww_avg_growth))\r\n #\r\n#\r\ntable_handle.close()\r\n#\r\n#" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ttobisawa/python-bigquery
[ "f55864ec3d6381f2b31598428a64822fdc73cb56" ]
[ "tests/unit/test_table.py" ]
[ "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport logging\nimport re\nimport time\nimport types\nimport unittest\nimport warnings\n\nimport mock\nimport pytest\n\nimport google.api_core.exceptions\nfrom test_utils.imports import maybe_fail_import\n\ntry:\n from google.cloud import bigquery_storage\n from google.cloud.bigquery_storage_v1.services.big_query_read.transports import (\n grpc as big_query_read_grpc_transport,\n )\nexcept ImportError: # pragma: NO COVER\n bigquery_storage = None\n big_query_read_grpc_transport = None\n\ntry:\n import pandas\nexcept (ImportError, AttributeError): # pragma: NO COVER\n pandas = None\n\ntry:\n import geopandas\nexcept (ImportError, AttributeError): # pragma: NO COVER\n geopandas = None\n\ntry:\n import pyarrow\n import pyarrow.types\nexcept ImportError: # pragma: NO COVER\n pyarrow = None\n\ntry:\n from tqdm import tqdm\nexcept (ImportError, AttributeError): # pragma: NO COVER\n tqdm = None\n\nfrom google.cloud.bigquery.dataset import DatasetReference\n\n\ndef _mock_client():\n from google.cloud.bigquery import client\n\n mock_client = mock.create_autospec(client.Client)\n mock_client.project = \"my-project\"\n return mock_client\n\n\nclass _SchemaBase(object):\n def _verify_field(self, field, r_field):\n self.assertEqual(field.name, r_field[\"name\"])\n self.assertEqual(field.field_type, r_field[\"type\"])\n self.assertEqual(field.mode, r_field.get(\"mode\", \"NULLABLE\"))\n\n def _verifySchema(self, schema, resource):\n r_fields = resource[\"schema\"][\"fields\"]\n self.assertEqual(len(schema), len(r_fields))\n\n for field, r_field in zip(schema, r_fields):\n self._verify_field(field, r_field)\n\n\nclass TestEncryptionConfiguration(unittest.TestCase):\n KMS_KEY_NAME = \"projects/1/locations/us/keyRings/1/cryptoKeys/1\"\n\n @staticmethod\n def _get_target_class():\n from google.cloud.bigquery.table import EncryptionConfiguration\n\n return EncryptionConfiguration\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def test_ctor_defaults(self):\n encryption_config = self._make_one()\n self.assertIsNone(encryption_config.kms_key_name)\n\n def test_ctor_with_key(self):\n encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)\n self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)\n\n\nclass TestTableReference(unittest.TestCase):\n @staticmethod\n def _get_target_class():\n from google.cloud.bigquery.table import TableReference\n\n return TableReference\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def test_ctor_defaults(self):\n dataset_ref = DatasetReference(\"project_1\", \"dataset_1\")\n\n table_ref = self._make_one(dataset_ref, \"table_1\")\n self.assertEqual(table_ref.dataset_id, dataset_ref.dataset_id)\n self.assertEqual(table_ref.table_id, \"table_1\")\n\n def test_to_api_repr(self):\n dataset_ref = DatasetReference(\"project_1\", \"dataset_1\")\n table_ref = self._make_one(dataset_ref, \"table_1\")\n\n resource = table_ref.to_api_repr()\n\n self.assertEqual(\n resource,\n {\"projectId\": \"project_1\", \"datasetId\": \"dataset_1\", \"tableId\": \"table_1\"},\n )\n\n def test_from_api_repr(self):\n from google.cloud.bigquery.table import TableReference\n\n dataset_ref = DatasetReference(\"project_1\", \"dataset_1\")\n expected = self._make_one(dataset_ref, \"table_1\")\n\n got = TableReference.from_api_repr(\n {\"projectId\": \"project_1\", \"datasetId\": \"dataset_1\", \"tableId\": \"table_1\"}\n )\n\n self.assertEqual(expected, got)\n\n def test_from_string(self):\n cls = self._get_target_class()\n got = cls.from_string(\"string-project.string_dataset.string_table\")\n self.assertEqual(got.project, \"string-project\")\n self.assertEqual(got.dataset_id, \"string_dataset\")\n self.assertEqual(got.table_id, \"string_table\")\n\n def test_from_string_w_prefix(self):\n cls = self._get_target_class()\n got = cls.from_string(\"google.com:string-project.string_dataset.string_table\")\n self.assertEqual(got.project, \"google.com:string-project\")\n self.assertEqual(got.dataset_id, \"string_dataset\")\n self.assertEqual(got.table_id, \"string_table\")\n\n def test_from_string_legacy_string(self):\n cls = self._get_target_class()\n with self.assertRaises(ValueError):\n cls.from_string(\"string-project:string_dataset.string_table\")\n\n def test_from_string_w_incorrect_prefix(self):\n cls = self._get_target_class()\n with self.assertRaises(ValueError):\n cls.from_string(\"google.com.string-project.string_dataset.string_table\")\n\n def test_from_string_not_fully_qualified(self):\n cls = self._get_target_class()\n with self.assertRaises(ValueError):\n cls.from_string(\"string_table\")\n\n with self.assertRaises(ValueError):\n cls.from_string(\"string_dataset.string_table\")\n\n with self.assertRaises(ValueError):\n cls.from_string(\"a.b.c.d\")\n\n def test_from_string_with_default_project(self):\n cls = self._get_target_class()\n got = cls.from_string(\n \"string_dataset.string_table\", default_project=\"default-project\"\n )\n self.assertEqual(got.project, \"default-project\")\n self.assertEqual(got.dataset_id, \"string_dataset\")\n self.assertEqual(got.table_id, \"string_table\")\n\n def test_from_string_ignores_default_project(self):\n cls = self._get_target_class()\n got = cls.from_string(\n \"string-project.string_dataset.string_table\",\n default_project=\"default-project\",\n )\n self.assertEqual(got.project, \"string-project\")\n self.assertEqual(got.dataset_id, \"string_dataset\")\n self.assertEqual(got.table_id, \"string_table\")\n\n def test___eq___wrong_type(self):\n dataset_ref = DatasetReference(\"project_1\", \"dataset_1\")\n table = self._make_one(dataset_ref, \"table_1\")\n other = object()\n self.assertNotEqual(table, other)\n self.assertEqual(table, mock.ANY)\n\n def test___eq___project_mismatch(self):\n dataset = DatasetReference(\"project_1\", \"dataset_1\")\n other_dataset = DatasetReference(\"project_2\", \"dataset_1\")\n table = self._make_one(dataset, \"table_1\")\n other = self._make_one(other_dataset, \"table_1\")\n self.assertNotEqual(table, other)\n\n def test___eq___dataset_mismatch(self):\n dataset = DatasetReference(\"project_1\", \"dataset_1\")\n other_dataset = DatasetReference(\"project_1\", \"dataset_2\")\n table = self._make_one(dataset, \"table_1\")\n other = self._make_one(other_dataset, \"table_1\")\n self.assertNotEqual(table, other)\n\n def test___eq___table_mismatch(self):\n dataset = DatasetReference(\"project_1\", \"dataset_1\")\n table = self._make_one(dataset, \"table_1\")\n other = self._make_one(dataset, \"table_2\")\n self.assertNotEqual(table, other)\n\n def test___eq___equality(self):\n dataset = DatasetReference(\"project_1\", \"dataset_1\")\n table = self._make_one(dataset, \"table_1\")\n other = self._make_one(dataset, \"table_1\")\n self.assertEqual(table, other)\n\n def test___hash__set_equality(self):\n dataset = DatasetReference(\"project_1\", \"dataset_1\")\n table1 = self._make_one(dataset, \"table1\")\n table2 = self._make_one(dataset, \"table2\")\n set_one = {table1, table2}\n set_two = {table1, table2}\n self.assertEqual(set_one, set_two)\n\n def test___hash__not_equals(self):\n dataset = DatasetReference(\"project_1\", \"dataset_1\")\n table1 = self._make_one(dataset, \"table1\")\n table2 = self._make_one(dataset, \"table2\")\n set_one = {table1}\n set_two = {table2}\n self.assertNotEqual(set_one, set_two)\n\n def test___repr__(self):\n dataset = DatasetReference(\"project1\", \"dataset1\")\n table1 = self._make_one(dataset, \"table1\")\n expected = (\n \"TableReference(DatasetReference('project1', 'dataset1'), \" \"'table1')\"\n )\n self.assertEqual(repr(table1), expected)\n\n def test___str__(self):\n dataset = DatasetReference(\"project1\", \"dataset1\")\n table1 = self._make_one(dataset, \"table1\")\n self.assertEqual(str(table1), \"project1.dataset1.table1\")\n\n\nclass TestTable(unittest.TestCase, _SchemaBase):\n\n PROJECT = \"prahj-ekt\"\n DS_ID = \"dataset-name\"\n TABLE_NAME = \"table-name\"\n KMS_KEY_NAME = \"projects/1/locations/us/keyRings/1/cryptoKeys/1\"\n\n @staticmethod\n def _get_target_class():\n from google.cloud.bigquery.table import Table\n\n return Table\n\n def _make_one(self, *args, **kw):\n if len(args) == 0:\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n args = (table_ref,)\n\n return self._get_target_class()(*args, **kw)\n\n def _setUpConstants(self):\n import datetime\n from google.cloud._helpers import UTC\n\n self.WHEN_TS = 1437767599.006\n self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)\n self.ETAG = \"ETAG\"\n self.TABLE_FULL_ID = \"%s:%s.%s\" % (self.PROJECT, self.DS_ID, self.TABLE_NAME)\n self.RESOURCE_URL = \"http://example.com/path/to/resource\"\n self.NUM_BYTES = 12345\n self.NUM_ROWS = 67\n self.NUM_EST_BYTES = 1234\n self.NUM_EST_ROWS = 23\n\n def _make_resource(self):\n self._setUpConstants()\n return {\n \"creationTime\": self.WHEN_TS * 1000,\n \"tableReference\": {\n \"projectId\": self.PROJECT,\n \"datasetId\": self.DS_ID,\n \"tableId\": self.TABLE_NAME,\n },\n \"schema\": {\n \"fields\": [\n {\"name\": \"full_name\", \"type\": \"STRING\", \"mode\": \"REQUIRED\"},\n {\"name\": \"age\", \"type\": \"INTEGER\", \"mode\": \"REQUIRED\"},\n ]\n },\n \"etag\": \"ETAG\",\n \"id\": self.TABLE_FULL_ID,\n \"lastModifiedTime\": self.WHEN_TS * 1000,\n \"location\": \"US\",\n \"selfLink\": self.RESOURCE_URL,\n \"numRows\": self.NUM_ROWS,\n \"numBytes\": self.NUM_BYTES,\n \"type\": \"TABLE\",\n \"streamingBuffer\": {\n \"estimatedRows\": str(self.NUM_EST_ROWS),\n \"estimatedBytes\": str(self.NUM_EST_BYTES),\n \"oldestEntryTime\": self.WHEN_TS * 1000,\n },\n \"externalDataConfiguration\": {\n \"sourceFormat\": \"CSV\",\n \"csvOptions\": {\"allowJaggedRows\": True, \"encoding\": \"encoding\"},\n },\n \"labels\": {\"x\": \"y\"},\n }\n\n def _verifyReadonlyResourceProperties(self, table, resource):\n if \"creationTime\" in resource:\n self.assertEqual(table.created, self.WHEN)\n else:\n self.assertIsNone(table.created)\n\n if \"etag\" in resource:\n self.assertEqual(table.etag, self.ETAG)\n else:\n self.assertIsNone(table.etag)\n\n if \"numRows\" in resource:\n self.assertEqual(table.num_rows, self.NUM_ROWS)\n else:\n self.assertIsNone(table.num_rows)\n\n if \"numBytes\" in resource:\n self.assertEqual(table.num_bytes, self.NUM_BYTES)\n else:\n self.assertIsNone(table.num_bytes)\n\n if \"selfLink\" in resource:\n self.assertEqual(table.self_link, self.RESOURCE_URL)\n else:\n self.assertIsNone(table.self_link)\n\n if \"streamingBuffer\" in resource:\n self.assertEqual(table.streaming_buffer.estimated_rows, self.NUM_EST_ROWS)\n self.assertEqual(table.streaming_buffer.estimated_bytes, self.NUM_EST_BYTES)\n self.assertEqual(table.streaming_buffer.oldest_entry_time, self.WHEN)\n else:\n self.assertIsNone(table.streaming_buffer)\n\n self.assertEqual(table.full_table_id, self.TABLE_FULL_ID)\n self.assertEqual(\n table.table_type, \"TABLE\" if \"view\" not in resource else \"VIEW\"\n )\n\n def _verifyResourceProperties(self, table, resource):\n\n self._verifyReadonlyResourceProperties(table, resource)\n\n if \"expirationTime\" in resource:\n self.assertEqual(table.expires, self.EXP_TIME)\n else:\n self.assertIsNone(table.expires)\n\n self.assertEqual(table.description, resource.get(\"description\"))\n self.assertEqual(table.friendly_name, resource.get(\"friendlyName\"))\n self.assertEqual(table.location, resource.get(\"location\"))\n\n if \"view\" in resource:\n self.assertEqual(table.view_query, resource[\"view\"][\"query\"])\n self.assertEqual(\n table.view_use_legacy_sql, resource[\"view\"].get(\"useLegacySql\", True)\n )\n else:\n self.assertIsNone(table.view_query)\n self.assertIsNone(table.view_use_legacy_sql)\n\n if \"schema\" in resource:\n self._verifySchema(table.schema, resource)\n else:\n self.assertEqual(table.schema, [])\n\n if \"externalDataConfiguration\" in resource:\n edc = table.external_data_configuration\n self.assertEqual(edc.source_format, \"CSV\")\n self.assertEqual(edc.options.allow_jagged_rows, True)\n\n if \"labels\" in resource:\n self.assertEqual(table.labels, {\"x\": \"y\"})\n else:\n self.assertEqual(table.labels, {})\n\n if \"encryptionConfiguration\" in resource:\n self.assertIsNotNone(table.encryption_configuration)\n self.assertEqual(\n table.encryption_configuration.kms_key_name,\n resource[\"encryptionConfiguration\"][\"kmsKeyName\"],\n )\n else:\n self.assertIsNone(table.encryption_configuration)\n\n def test_ctor(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n self.assertEqual(table.table_id, self.TABLE_NAME)\n self.assertEqual(table.project, self.PROJECT)\n self.assertEqual(table.dataset_id, self.DS_ID)\n self.assertEqual(table.reference.table_id, self.TABLE_NAME)\n self.assertEqual(table.reference.project, self.PROJECT)\n self.assertEqual(table.reference.dataset_id, self.DS_ID)\n self.assertEqual(\n table.path,\n \"/projects/%s/datasets/%s/tables/%s\"\n % (self.PROJECT, self.DS_ID, self.TABLE_NAME),\n )\n self.assertEqual(table.schema, [])\n\n self.assertIsNone(table.created)\n self.assertIsNone(table.etag)\n self.assertIsNone(table.modified)\n self.assertIsNone(table.num_bytes)\n self.assertIsNone(table.num_rows)\n self.assertIsNone(table.self_link)\n self.assertIsNone(table.full_table_id)\n self.assertIsNone(table.table_type)\n self.assertIsNone(table.description)\n self.assertIsNone(table.expires)\n self.assertIsNone(table.friendly_name)\n self.assertIsNone(table.location)\n self.assertIsNone(table.view_query)\n self.assertIsNone(table.view_use_legacy_sql)\n self.assertIsNone(table.external_data_configuration)\n self.assertEqual(table.labels, {})\n self.assertIsNone(table.encryption_configuration)\n self.assertIsNone(table.time_partitioning)\n self.assertIsNone(table.clustering_fields)\n\n def test_ctor_w_schema(self):\n from google.cloud.bigquery.schema import SchemaField\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n full_name = SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\")\n age = SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\")\n table = self._make_one(table_ref, schema=[full_name, age])\n\n self.assertEqual(table.schema, [full_name, age])\n\n def test_ctor_string(self):\n table = self._make_one(\"some-project.some_dset.some_tbl\")\n self.assertEqual(table.project, \"some-project\")\n self.assertEqual(table.dataset_id, \"some_dset\")\n self.assertEqual(table.table_id, \"some_tbl\")\n\n def test_ctor_tablelistitem(self):\n from google.cloud.bigquery.table import Table, TableListItem\n\n import datetime\n from google.cloud._helpers import _millis, UTC\n\n self.WHEN_TS = 1437767599.125\n self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)\n\n project = \"test-project\"\n dataset_id = \"test_dataset\"\n table_id = \"coffee_table\"\n resource = {\n \"creationTime\": self.WHEN_TS * 1000,\n \"expirationTime\": _millis(self.EXP_TIME),\n \"kind\": \"bigquery#table\",\n \"id\": \"{}:{}.{}\".format(project, dataset_id, table_id),\n \"tableReference\": {\n \"projectId\": project,\n \"datasetId\": dataset_id,\n \"tableId\": table_id,\n },\n \"friendlyName\": \"Mahogany Coffee Table\",\n \"type\": \"TABLE\",\n \"timePartitioning\": {\n \"type\": \"DAY\",\n \"field\": \"mycolumn\",\n \"expirationMs\": \"10000\",\n },\n \"labels\": {\"some-stuff\": \"this-is-a-label\"},\n \"clustering\": {\"fields\": [\"string\"]},\n }\n\n table_list_item = TableListItem(resource)\n table = Table(table_list_item)\n\n self.assertIsNone(table.created)\n self.assertEqual(table.reference.project, project)\n self.assertEqual(table.reference.dataset_id, dataset_id)\n self.assertEqual(table.reference.table_id, table_id)\n\n def test_ctor_string_wo_project_id(self):\n with pytest.raises(ValueError):\n # Project ID is missing.\n self._make_one(\"some_dset.some_tbl\")\n\n def test_num_bytes_getter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n # Check with no value set.\n self.assertIsNone(table.num_bytes)\n\n num_bytes = 1337\n # Check with integer value set.\n table._properties = {\"numBytes\": num_bytes}\n self.assertEqual(table.num_bytes, num_bytes)\n\n # Check with a string value set.\n table._properties = {\"numBytes\": str(num_bytes)}\n self.assertEqual(table.num_bytes, num_bytes)\n\n # Check with invalid int value.\n table._properties = {\"numBytes\": \"x\"}\n with self.assertRaises(ValueError):\n getattr(table, \"num_bytes\")\n\n def test_num_rows_getter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n # Check with no value set.\n self.assertIsNone(table.num_rows)\n\n num_rows = 42\n # Check with integer value set.\n table._properties = {\"numRows\": num_rows}\n self.assertEqual(table.num_rows, num_rows)\n\n # Check with a string value set.\n table._properties = {\"numRows\": str(num_rows)}\n self.assertEqual(table.num_rows, num_rows)\n\n # Check with invalid int value.\n table._properties = {\"numRows\": \"x\"}\n with self.assertRaises(ValueError):\n getattr(table, \"num_rows\")\n\n def test__eq__wrong_type(self):\n table = self._make_one(\"project_foo.dataset_bar.table_baz\")\n\n class TableWannabe:\n pass\n\n not_a_table = TableWannabe()\n not_a_table._properties = table._properties\n\n assert table != not_a_table # Can't fake it.\n\n def test__eq__same_table_basic(self):\n table_1 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_2 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n assert table_1 == table_2\n\n def test__eq__same_table_multiple_properties(self):\n from google.cloud.bigquery import SchemaField\n\n table_1 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_1.require_partition_filter = True\n table_1.labels = {\"first\": \"one\", \"second\": \"two\"}\n\n table_1.schema = [\n SchemaField(\"name\", \"STRING\", \"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", \"NULLABLE\"),\n ]\n\n table_2 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_2.require_partition_filter = True\n table_2.labels = {\"first\": \"one\", \"second\": \"two\"}\n table_2.schema = [\n SchemaField(\"name\", \"STRING\", \"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", \"NULLABLE\"),\n ]\n\n assert table_1 == table_2\n\n def test__eq__same_table_property_different(self):\n table_1 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_1.description = \"This is table baz\"\n\n table_2 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_2.description = \"This is also table baz\"\n\n assert table_1 == table_2 # Still equal, only table reference is important.\n\n def test__eq__different_table(self):\n table_1 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_2 = self._make_one(\"project_foo.dataset_bar.table_baz_2\")\n\n assert table_1 != table_2\n\n def test_hashable(self):\n table_1 = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_1.description = \"This is a table\"\n\n table_1b = self._make_one(\"project_foo.dataset_bar.table_baz\")\n table_1b.description = \"Metadata is irrelevant for hashes\"\n\n assert hash(table_1) == hash(table_1b)\n\n def test_schema_setter_non_sequence(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(TypeError):\n table.schema = object()\n\n def test_schema_setter_invalid_field(self):\n from google.cloud.bigquery.schema import SchemaField\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n full_name = SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\")\n with self.assertRaises(ValueError):\n table.schema = [full_name, object()]\n\n def test_schema_setter_valid_fields(self):\n from google.cloud.bigquery.schema import SchemaField\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n full_name = SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\")\n age = SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\")\n table.schema = [full_name, age]\n self.assertEqual(table.schema, [full_name, age])\n\n def test_schema_setter_invalid_mapping_representation(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n full_name = {\"name\": \"full_name\", \"type\": \"STRING\", \"mode\": \"REQUIRED\"}\n invalid_field = {\"name\": \"full_name\", \"typeooo\": \"STRING\", \"mode\": \"REQUIRED\"}\n with self.assertRaises(Exception):\n table.schema = [full_name, invalid_field]\n\n def test_schema_setter_valid_mapping_representation(self):\n from google.cloud.bigquery.schema import SchemaField\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n full_name = {\"name\": \"full_name\", \"type\": \"STRING\", \"mode\": \"REQUIRED\"}\n job_status = {\n \"name\": \"is_employed\",\n \"type\": \"STRUCT\",\n \"mode\": \"NULLABLE\",\n \"fields\": [\n {\"name\": \"foo\", \"type\": \"DATE\", \"mode\": \"NULLABLE\"},\n {\"name\": \"bar\", \"type\": \"BYTES\", \"mode\": \"REQUIRED\"},\n ],\n }\n\n table.schema = [full_name, job_status]\n\n expected_schema = [\n SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\n \"is_employed\",\n \"STRUCT\",\n mode=\"NULLABLE\",\n fields=[\n SchemaField(\"foo\", \"DATE\", mode=\"NULLABLE\"),\n SchemaField(\"bar\", \"BYTES\", mode=\"REQUIRED\"),\n ],\n ),\n ]\n self.assertEqual(table.schema, expected_schema)\n\n def test_props_set_by_server(self):\n import datetime\n from google.cloud._helpers import UTC\n from google.cloud._helpers import _millis\n\n CREATED = datetime.datetime(2015, 7, 29, 12, 13, 22, tzinfo=UTC)\n MODIFIED = datetime.datetime(2015, 7, 29, 14, 47, 15, tzinfo=UTC)\n TABLE_FULL_ID = \"%s:%s.%s\" % (self.PROJECT, self.DS_ID, self.TABLE_NAME)\n URL = \"http://example.com/projects/%s/datasets/%s/tables/%s\" % (\n self.PROJECT,\n self.DS_ID,\n self.TABLE_NAME,\n )\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table._properties[\"creationTime\"] = _millis(CREATED)\n table._properties[\"etag\"] = \"ETAG\"\n table._properties[\"lastModifiedTime\"] = _millis(MODIFIED)\n table._properties[\"numBytes\"] = 12345\n table._properties[\"numRows\"] = 66\n table._properties[\"selfLink\"] = URL\n table._properties[\"id\"] = TABLE_FULL_ID\n table._properties[\"type\"] = \"TABLE\"\n\n self.assertEqual(table.created, CREATED)\n self.assertEqual(table.etag, \"ETAG\")\n self.assertEqual(table.modified, MODIFIED)\n self.assertEqual(table.num_bytes, 12345)\n self.assertEqual(table.num_rows, 66)\n self.assertEqual(table.self_link, URL)\n self.assertEqual(table.full_table_id, TABLE_FULL_ID)\n self.assertEqual(table.table_type, \"TABLE\")\n\n def test_snapshot_definition_not_set(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n assert table.snapshot_definition is None\n\n def test_snapshot_definition_set(self):\n from google.cloud._helpers import UTC\n from google.cloud.bigquery.table import SnapshotDefinition\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table._properties[\"snapshotDefinition\"] = {\n \"baseTableReference\": {\n \"projectId\": \"project_x\",\n \"datasetId\": \"dataset_y\",\n \"tableId\": \"table_z\",\n },\n \"snapshotTime\": \"2010-09-28T10:20:30.123Z\",\n }\n\n snapshot = table.snapshot_definition\n\n assert isinstance(snapshot, SnapshotDefinition)\n assert snapshot.base_table_reference.path == (\n \"/projects/project_x/datasets/dataset_y/tables/table_z\"\n )\n assert snapshot.snapshot_time == datetime.datetime(\n 2010, 9, 28, 10, 20, 30, 123000, tzinfo=UTC\n )\n\n def test_description_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.description = 12345\n\n def test_description_setter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.description = \"DESCRIPTION\"\n self.assertEqual(table.description, \"DESCRIPTION\")\n\n def test_expires_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.expires = object()\n\n def test_expires_setter(self):\n import datetime\n from google.cloud._helpers import UTC\n\n WHEN = datetime.datetime(2015, 7, 28, 16, 39, tzinfo=UTC)\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.expires = WHEN\n self.assertEqual(table.expires, WHEN)\n\n def test_friendly_name_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.friendly_name = 12345\n\n def test_friendly_name_setter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.friendly_name = \"FRIENDLY\"\n self.assertEqual(table.friendly_name, \"FRIENDLY\")\n\n def test_view_query_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.view_query = 12345\n\n def test_view_query_setter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.view_query = \"select * from foo\"\n self.assertEqual(table.view_query, \"select * from foo\")\n self.assertEqual(table.view_use_legacy_sql, False)\n\n table.view_use_legacy_sql = True\n self.assertEqual(table.view_use_legacy_sql, True)\n\n def test_view_query_deleter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.view_query = \"select * from foo\"\n del table.view_query\n self.assertIsNone(table.view_query)\n self.assertIsNone(table.view_use_legacy_sql)\n\n def test_view_use_legacy_sql_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.view_use_legacy_sql = 12345\n\n def test_view_use_legacy_sql_setter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.view_use_legacy_sql = True\n table.view_query = \"select * from foo\"\n self.assertEqual(table.view_use_legacy_sql, True)\n self.assertEqual(table.view_query, \"select * from foo\")\n\n def test_external_data_configuration_setter(self):\n from google.cloud.bigquery.external_config import ExternalConfig\n\n external_config = ExternalConfig(\"CSV\")\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table.external_data_configuration = external_config\n\n self.assertEqual(\n table.external_data_configuration.source_format,\n external_config.source_format,\n )\n\n def test_external_data_configuration_setter_none(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table.external_data_configuration = None\n\n self.assertIsNone(table.external_data_configuration)\n\n def test_external_data_configuration_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.external_data_configuration = 12345\n\n def test_labels_update_in_place(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n del table._properties[\"labels\"] # don't start w/ existing dict\n labels = table.labels\n labels[\"foo\"] = \"bar\" # update in place\n self.assertEqual(table.labels, {\"foo\": \"bar\"})\n\n def test_labels_setter_bad_value(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n with self.assertRaises(ValueError):\n table.labels = 12345\n\n def test_mview_query(self):\n table = self._make_one()\n self.assertIsNone(table.mview_query)\n table.mview_query = \"SELECT name, SUM(number) FROM dset.tbl GROUP BY 1\"\n self.assertEqual(\n table.mview_query, \"SELECT name, SUM(number) FROM dset.tbl GROUP BY 1\"\n )\n del table.mview_query\n self.assertIsNone(table.mview_query)\n\n def test_mview_last_refresh_time(self):\n table = self._make_one()\n self.assertIsNone(table.mview_last_refresh_time)\n table._properties[\"materializedView\"] = {\n \"lastRefreshTime\": \"1606751842496\",\n }\n self.assertEqual(\n table.mview_last_refresh_time,\n datetime.datetime(\n 2020, 11, 30, 15, 57, 22, 496000, tzinfo=datetime.timezone.utc\n ),\n )\n\n def test_mview_enable_refresh(self):\n table = self._make_one()\n self.assertIsNone(table.mview_enable_refresh)\n table.mview_enable_refresh = True\n self.assertTrue(table.mview_enable_refresh)\n table.mview_enable_refresh = False\n self.assertFalse(table.mview_enable_refresh)\n table.mview_enable_refresh = None\n self.assertIsNone(table.mview_enable_refresh)\n\n def test_mview_refresh_interval(self):\n table = self._make_one()\n self.assertIsNone(table.mview_refresh_interval)\n table.mview_refresh_interval = datetime.timedelta(minutes=30)\n self.assertEqual(table.mview_refresh_interval, datetime.timedelta(minutes=30))\n self.assertEqual(\n table._properties[\"materializedView\"][\"refreshIntervalMs\"], \"1800000\"\n )\n table.mview_refresh_interval = None\n self.assertIsNone(table.mview_refresh_interval)\n\n def test_from_string(self):\n cls = self._get_target_class()\n got = cls.from_string(\"string-project.string_dataset.string_table\")\n self.assertEqual(got.project, \"string-project\")\n self.assertEqual(got.dataset_id, \"string_dataset\")\n self.assertEqual(got.table_id, \"string_table\")\n self.assertEqual(\n str(got.reference), \"string-project.string_dataset.string_table\"\n )\n\n def test_from_string_legacy_string(self):\n cls = self._get_target_class()\n with self.assertRaises(ValueError):\n cls.from_string(\"string-project:string_dataset.string_table\")\n\n def test_from_string_not_fully_qualified(self):\n cls = self._get_target_class()\n with self.assertRaises(ValueError):\n cls.from_string(\"string_dataset.string_table\")\n\n def test_from_api_repr_missing_identity(self):\n self._setUpConstants()\n RESOURCE = {}\n klass = self._get_target_class()\n with self.assertRaises(KeyError):\n klass.from_api_repr(RESOURCE)\n\n def test_from_api_repr_bare(self):\n self._setUpConstants()\n RESOURCE = {\n \"id\": \"%s:%s.%s\" % (self.PROJECT, self.DS_ID, self.TABLE_NAME),\n \"tableReference\": {\n \"projectId\": self.PROJECT,\n \"datasetId\": self.DS_ID,\n \"tableId\": self.TABLE_NAME,\n },\n \"type\": \"TABLE\",\n }\n klass = self._get_target_class()\n table = klass.from_api_repr(RESOURCE)\n self.assertEqual(table.table_id, self.TABLE_NAME)\n self._verifyResourceProperties(table, RESOURCE)\n\n def test_from_api_repr_w_properties(self):\n import datetime\n from google.cloud._helpers import UTC\n from google.cloud._helpers import _millis\n\n RESOURCE = self._make_resource()\n RESOURCE[\"view\"] = {\"query\": \"select fullname, age from person_ages\"}\n RESOURCE[\"type\"] = \"VIEW\"\n RESOURCE[\"location\"] = \"EU\"\n self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)\n RESOURCE[\"expirationTime\"] = _millis(self.EXP_TIME)\n klass = self._get_target_class()\n table = klass.from_api_repr(RESOURCE)\n self._verifyResourceProperties(table, RESOURCE)\n\n def test_from_api_repr_w_partial_streamingbuffer(self):\n import datetime\n from google.cloud._helpers import UTC\n from google.cloud._helpers import _millis\n\n RESOURCE = self._make_resource()\n self.OLDEST_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)\n RESOURCE[\"streamingBuffer\"] = {\"oldestEntryTime\": _millis(self.OLDEST_TIME)}\n klass = self._get_target_class()\n table = klass.from_api_repr(RESOURCE)\n self.assertIsNotNone(table.streaming_buffer)\n self.assertIsNone(table.streaming_buffer.estimated_rows)\n self.assertIsNone(table.streaming_buffer.estimated_bytes)\n self.assertEqual(table.streaming_buffer.oldest_entry_time, self.OLDEST_TIME)\n # Another partial construction\n RESOURCE[\"streamingBuffer\"] = {\"estimatedRows\": 1}\n klass = self._get_target_class()\n table = klass.from_api_repr(RESOURCE)\n self.assertIsNotNone(table.streaming_buffer)\n self.assertEqual(table.streaming_buffer.estimated_rows, 1)\n self.assertIsNone(table.streaming_buffer.estimated_bytes)\n self.assertIsNone(table.streaming_buffer.oldest_entry_time)\n\n def test_from_api_with_encryption(self):\n self._setUpConstants()\n RESOURCE = {\n \"id\": \"%s:%s.%s\" % (self.PROJECT, self.DS_ID, self.TABLE_NAME),\n \"tableReference\": {\n \"projectId\": self.PROJECT,\n \"datasetId\": self.DS_ID,\n \"tableId\": self.TABLE_NAME,\n },\n \"encryptionConfiguration\": {\"kmsKeyName\": self.KMS_KEY_NAME},\n \"type\": \"TABLE\",\n }\n klass = self._get_target_class()\n table = klass.from_api_repr(RESOURCE)\n self._verifyResourceProperties(table, RESOURCE)\n\n def test_to_api_repr_w_custom_field(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table._properties[\"newAlphaProperty\"] = \"unreleased property\"\n resource = table.to_api_repr()\n\n exp_resource = {\n \"tableReference\": table_ref.to_api_repr(),\n \"labels\": {},\n \"newAlphaProperty\": \"unreleased property\",\n }\n self.assertEqual(resource, exp_resource)\n\n def test__build_resource_w_custom_field(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table._properties[\"newAlphaProperty\"] = \"unreleased property\"\n resource = table._build_resource([\"newAlphaProperty\"])\n\n exp_resource = {\"newAlphaProperty\": \"unreleased property\"}\n self.assertEqual(resource, exp_resource)\n\n def test__build_resource_w_custom_field_not_in__properties(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table = self._make_one(dataset.table(self.TABLE_NAME))\n table.bad = \"value\"\n with self.assertRaises(ValueError):\n table._build_resource([\"bad\"])\n\n def test_range_partitioning(self):\n from google.cloud.bigquery.table import RangePartitioning\n from google.cloud.bigquery.table import PartitionRange\n\n table = self._make_one(\"proj.dset.tbl\")\n assert table.range_partitioning is None\n\n table.range_partitioning = RangePartitioning(\n field=\"col1\", range_=PartitionRange(start=-512, end=1024, interval=128)\n )\n assert table.range_partitioning.field == \"col1\"\n assert table.range_partitioning.range_.start == -512\n assert table.range_partitioning.range_.end == 1024\n assert table.range_partitioning.range_.interval == 128\n\n table.range_partitioning = None\n assert table.range_partitioning is None\n\n def test_range_partitioning_w_wrong_type(self):\n object_under_test = self._make_one(\"proj.dset.tbl\")\n with pytest.raises(ValueError, match=\"RangePartitioning\"):\n object_under_test.range_partitioning = object()\n\n def test_require_partitioning_filter(self):\n table = self._make_one(\"proj.dset.tbl\")\n assert table.require_partition_filter is None\n table.require_partition_filter = True\n assert table.require_partition_filter\n table.require_partition_filter = False\n assert table.require_partition_filter is not None\n assert not table.require_partition_filter\n table.require_partition_filter = None\n assert table.require_partition_filter is None\n\n def test_time_partitioning_getter(self):\n from google.cloud.bigquery.table import TimePartitioning\n from google.cloud.bigquery.table import TimePartitioningType\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table._properties[\"timePartitioning\"] = {\n \"type\": \"DAY\",\n \"field\": \"col1\",\n \"expirationMs\": \"123456\",\n \"requirePartitionFilter\": False,\n }\n self.assertIsInstance(table.time_partitioning, TimePartitioning)\n self.assertEqual(table.time_partitioning.type_, TimePartitioningType.DAY)\n self.assertEqual(table.time_partitioning.field, \"col1\")\n self.assertEqual(table.time_partitioning.expiration_ms, 123456)\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertFalse(table.time_partitioning.require_partition_filter)\n\n assert len(warned) == 1\n self.assertIs(warned[0].category, PendingDeprecationWarning)\n\n def test_time_partitioning_getter_w_none(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table._properties[\"timePartitioning\"] = None\n self.assertIsNone(table.time_partitioning)\n\n del table._properties[\"timePartitioning\"]\n self.assertIsNone(table.time_partitioning)\n\n def test_time_partitioning_getter_w_empty(self):\n from google.cloud.bigquery.table import TimePartitioning\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n # Even though there are required properties according to the API\n # specification, sometimes time partitioning is populated as an empty\n # object. See internal bug 131167013.\n table._properties[\"timePartitioning\"] = {}\n self.assertIsInstance(table.time_partitioning, TimePartitioning)\n self.assertIsNone(table.time_partitioning.type_)\n self.assertIsNone(table.time_partitioning.field)\n self.assertIsNone(table.time_partitioning.expiration_ms)\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertIsNone(table.time_partitioning.require_partition_filter)\n\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_time_partitioning_setter(self):\n from google.cloud.bigquery.table import TimePartitioning\n from google.cloud.bigquery.table import TimePartitioningType\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n time_partitioning = TimePartitioning(type_=TimePartitioningType.HOUR)\n\n table.time_partitioning = time_partitioning\n\n self.assertEqual(table.time_partitioning.type_, TimePartitioningType.HOUR)\n # Both objects point to the same properties dict\n self.assertIs(\n table._properties[\"timePartitioning\"], time_partitioning._properties\n )\n\n time_partitioning.expiration_ms = 10000\n\n # Changes to TimePartitioning object are reflected in Table properties\n self.assertEqual(\n table.time_partitioning.expiration_ms, time_partitioning.expiration_ms\n )\n\n def test_time_partitioning_setter_bad_type(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n with self.assertRaises(ValueError):\n table.time_partitioning = {\"timePartitioning\": {\"type\": \"DAY\"}}\n\n def test_time_partitioning_setter_none(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table.time_partitioning = None\n\n self.assertIsNone(table.time_partitioning)\n\n def test_partitioning_type_setter(self):\n from google.cloud.bigquery.table import TimePartitioningType\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertIsNone(table.partitioning_type)\n\n table.partitioning_type = TimePartitioningType.DAY\n\n self.assertEqual(table.partitioning_type, \"DAY\")\n\n self.assertEqual(len(warned), 3)\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_partitioning_type_setter_w_time_partitioning_set(self):\n from google.cloud.bigquery.table import TimePartitioning\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.time_partitioning = TimePartitioning()\n\n with warnings.catch_warnings(record=True) as warned:\n table.partitioning_type = \"NEW_FAKE_TYPE\"\n\n self.assertEqual(table.partitioning_type, \"NEW_FAKE_TYPE\")\n\n self.assertEqual(len(warned), 2)\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_partitioning_expiration_setter_w_time_partitioning_set(self):\n from google.cloud.bigquery.table import TimePartitioning\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n table.time_partitioning = TimePartitioning()\n\n with warnings.catch_warnings(record=True) as warned:\n table.partition_expiration = 100000\n\n self.assertEqual(table.partition_expiration, 100000)\n\n self.assertEqual(len(warned), 2)\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_partition_expiration_setter(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertIsNone(table.partition_expiration)\n\n table.partition_expiration = 100\n\n self.assertEqual(table.partition_expiration, 100)\n # defaults to 'DAY' when expiration is set and type is not set\n self.assertEqual(table.partitioning_type, \"DAY\")\n\n self.assertEqual(len(warned), 4)\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_clustering_fields_setter_w_fields(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n fields = [\"email\", \"phone\"]\n\n table.clustering_fields = fields\n self.assertEqual(table.clustering_fields, fields)\n self.assertEqual(table._properties[\"clustering\"], {\"fields\": fields})\n\n def test_clustering_fields_setter_w_none(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n fields = [\"email\", \"phone\"]\n\n table._properties[\"clustering\"] = {\"fields\": fields}\n table.clustering_fields = None\n self.assertIsNone(table.clustering_fields)\n self.assertTrue(\"clustering\" in table._properties) # None stored explicitly\n\n def test_clustering_fields_setter_w_none_noop(self):\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n\n table.clustering_fields = None\n self.assertIsNone(table.clustering_fields)\n self.assertTrue(\"clustering\" in table._properties) # None stored explicitly\n\n def test_encryption_configuration_setter(self):\n # Previously, the EncryptionConfiguration class was in the table module, not the\n # encryption_configuration module. It was moved to support models encryption.\n # This test import from the table module to ensure that the previous location\n # continues to function as an alias.\n from google.cloud.bigquery.table import EncryptionConfiguration\n\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = self._make_one(table_ref)\n encryption_configuration = EncryptionConfiguration(\n kms_key_name=self.KMS_KEY_NAME\n )\n table.encryption_configuration = encryption_configuration\n self.assertEqual(table.encryption_configuration.kms_key_name, self.KMS_KEY_NAME)\n table.encryption_configuration = None\n self.assertIsNone(table.encryption_configuration)\n\n def test___repr__(self):\n from google.cloud.bigquery.table import TableReference\n\n dataset = DatasetReference(\"project1\", \"dataset1\")\n table1 = self._make_one(TableReference(dataset, \"table1\"))\n expected = (\n \"Table(TableReference(\"\n \"DatasetReference('project1', 'dataset1'), \"\n \"'table1'))\"\n )\n self.assertEqual(repr(table1), expected)\n\n\nclass Test_row_from_mapping(unittest.TestCase, _SchemaBase):\n\n PROJECT = \"prahj-ekt\"\n DS_ID = \"dataset-name\"\n TABLE_NAME = \"table-name\"\n\n def _call_fut(self, mapping, schema):\n from google.cloud.bigquery.table import _row_from_mapping\n\n return _row_from_mapping(mapping, schema)\n\n def test__row_from_mapping_wo_schema(self):\n from google.cloud.bigquery.table import Table, _TABLE_HAS_NO_SCHEMA\n\n MAPPING = {\"full_name\": \"Phred Phlyntstone\", \"age\": 32}\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n table = Table(table_ref)\n\n with self.assertRaises(ValueError) as exc:\n self._call_fut(MAPPING, table.schema)\n\n self.assertEqual(exc.exception.args, (_TABLE_HAS_NO_SCHEMA,))\n\n def test__row_from_mapping_w_invalid_schema(self):\n from google.cloud.bigquery.schema import SchemaField\n from google.cloud.bigquery.table import Table\n\n MAPPING = {\n \"full_name\": \"Phred Phlyntstone\",\n \"age\": 32,\n \"colors\": [\"red\", \"green\"],\n \"bogus\": \"WHATEVER\",\n }\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n full_name = SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\")\n age = SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\")\n colors = SchemaField(\"colors\", \"DATETIME\", mode=\"REPEATED\")\n bogus = SchemaField(\"joined\", \"STRING\", mode=\"BOGUS\")\n table = Table(table_ref, schema=[full_name, age, colors, bogus])\n\n with self.assertRaises(ValueError) as exc:\n self._call_fut(MAPPING, table.schema)\n\n self.assertIn(\"Unknown field mode: BOGUS\", str(exc.exception))\n\n def test__row_from_mapping_w_schema(self):\n from google.cloud.bigquery.schema import SchemaField\n from google.cloud.bigquery.table import Table\n\n MAPPING = {\n \"full_name\": \"Phred Phlyntstone\",\n \"age\": 32,\n \"colors\": [\"red\", \"green\"],\n \"extra\": \"IGNORED\",\n }\n dataset = DatasetReference(self.PROJECT, self.DS_ID)\n table_ref = dataset.table(self.TABLE_NAME)\n full_name = SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\")\n age = SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\")\n colors = SchemaField(\"colors\", \"DATETIME\", mode=\"REPEATED\")\n joined = SchemaField(\"joined\", \"STRING\", mode=\"NULLABLE\")\n table = Table(table_ref, schema=[full_name, age, colors, joined])\n\n self.assertEqual(\n self._call_fut(MAPPING, table.schema),\n (\"Phred Phlyntstone\", 32, [\"red\", \"green\"], None),\n )\n\n\nclass TestTableListItem(unittest.TestCase):\n @staticmethod\n def _get_target_class():\n from google.cloud.bigquery.table import TableListItem\n\n return TableListItem\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def _setUpConstants(self):\n from google.cloud._helpers import UTC\n\n self.WHEN_TS = 1437767599.125\n self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)\n self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)\n\n def test_ctor(self):\n from google.cloud._helpers import _millis\n\n self._setUpConstants()\n project = \"test-project\"\n dataset_id = \"test_dataset\"\n table_id = \"coffee_table\"\n resource = {\n \"creationTime\": self.WHEN_TS * 1000,\n \"expirationTime\": _millis(self.EXP_TIME),\n \"kind\": \"bigquery#table\",\n \"id\": \"{}:{}.{}\".format(project, dataset_id, table_id),\n \"tableReference\": {\n \"projectId\": project,\n \"datasetId\": dataset_id,\n \"tableId\": table_id,\n },\n \"friendlyName\": \"Mahogany Coffee Table\",\n \"type\": \"TABLE\",\n \"timePartitioning\": {\n \"type\": \"DAY\",\n \"field\": \"mycolumn\",\n \"expirationMs\": \"10000\",\n },\n \"labels\": {\"some-stuff\": \"this-is-a-label\"},\n \"clustering\": {\"fields\": [\"string\"]},\n }\n\n table = self._make_one(resource)\n\n self.assertEqual(table.created, self.WHEN)\n self.assertEqual(table.expires, self.EXP_TIME)\n self.assertEqual(table.project, project)\n self.assertEqual(table.dataset_id, dataset_id)\n self.assertEqual(table.table_id, table_id)\n self.assertEqual(\n table.full_table_id, \"{}:{}.{}\".format(project, dataset_id, table_id)\n )\n self.assertEqual(table.reference.project, project)\n self.assertEqual(table.reference.dataset_id, dataset_id)\n self.assertEqual(table.reference.table_id, table_id)\n self.assertEqual(table.friendly_name, \"Mahogany Coffee Table\")\n self.assertEqual(table.table_type, \"TABLE\")\n self.assertEqual(table.time_partitioning.type_, \"DAY\")\n self.assertEqual(table.time_partitioning.expiration_ms, 10000)\n self.assertEqual(table.time_partitioning.field, \"mycolumn\")\n self.assertEqual(table.labels[\"some-stuff\"], \"this-is-a-label\")\n self.assertIsNone(table.view_use_legacy_sql)\n self.assertEqual(table.clustering_fields, [\"string\"])\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertEqual(table.partitioning_type, \"DAY\")\n self.assertEqual(table.partition_expiration, 10000)\n\n self.assertEqual(len(warned), 2)\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_ctor_view(self):\n project = \"test-project\"\n dataset_id = \"test_dataset\"\n table_id = \"just_looking\"\n resource = {\n \"kind\": \"bigquery#table\",\n \"id\": \"{}:{}.{}\".format(project, dataset_id, table_id),\n \"tableReference\": {\n \"projectId\": project,\n \"datasetId\": dataset_id,\n \"tableId\": table_id,\n },\n \"type\": \"VIEW\",\n }\n\n table = self._make_one(resource)\n self.assertEqual(table.project, project)\n self.assertEqual(table.dataset_id, dataset_id)\n self.assertEqual(table.table_id, table_id)\n self.assertEqual(\n table.full_table_id, \"{}:{}.{}\".format(project, dataset_id, table_id)\n )\n self.assertEqual(table.reference.project, project)\n self.assertEqual(table.reference.dataset_id, dataset_id)\n self.assertEqual(table.reference.table_id, table_id)\n self.assertEqual(table.table_type, \"VIEW\")\n # Server default for useLegacySql is True.\n self.assertTrue(table.view_use_legacy_sql)\n\n def test_ctor_missing_properties(self):\n resource = {\n \"tableReference\": {\n \"projectId\": \"testproject\",\n \"datasetId\": \"testdataset\",\n \"tableId\": \"testtable\",\n }\n }\n table = self._make_one(resource)\n self.assertEqual(table.project, \"testproject\")\n self.assertEqual(table.dataset_id, \"testdataset\")\n self.assertEqual(table.table_id, \"testtable\")\n self.assertIsNone(table.created)\n self.assertIsNone(table.expires)\n self.assertIsNone(table.clustering_fields)\n self.assertIsNone(table.full_table_id)\n self.assertIsNone(table.friendly_name)\n self.assertIsNone(table.table_type)\n self.assertIsNone(table.time_partitioning)\n self.assertEqual(table.labels, {})\n self.assertIsNone(table.view_use_legacy_sql)\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertIsNone(table.partitioning_type)\n self.assertIsNone(table.partition_expiration)\n\n self.assertEqual(len(warned), 2)\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_ctor_wo_project(self):\n resource = {\n \"tableReference\": {\"datasetId\": \"testdataset\", \"tableId\": \"testtable\"}\n }\n with self.assertRaises(ValueError):\n self._make_one(resource)\n\n def test_ctor_wo_dataset(self):\n resource = {\n \"tableReference\": {\"projectId\": \"testproject\", \"tableId\": \"testtable\"}\n }\n with self.assertRaises(ValueError):\n self._make_one(resource)\n\n def test_ctor_wo_table(self):\n resource = {\n \"tableReference\": {\"projectId\": \"testproject\", \"datasetId\": \"testdataset\"}\n }\n with self.assertRaises(ValueError):\n self._make_one(resource)\n\n def test_ctor_wo_reference(self):\n with self.assertRaises(ValueError):\n self._make_one({})\n\n def test_labels_update_in_place(self):\n resource = {\n \"tableReference\": {\n \"projectId\": \"testproject\",\n \"datasetId\": \"testdataset\",\n \"tableId\": \"testtable\",\n }\n }\n table = self._make_one(resource)\n labels = table.labels\n labels[\"foo\"] = \"bar\" # update in place\n self.assertEqual(table.labels, {\"foo\": \"bar\"})\n\n def test_to_api_repr(self):\n resource = {\n \"tableReference\": {\n \"projectId\": \"testproject\",\n \"datasetId\": \"testdataset\",\n \"tableId\": \"testtable\",\n }\n }\n table = self._make_one(resource)\n self.assertEqual(table.to_api_repr(), resource)\n\n def test__eq__wrong_type(self):\n resource = {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n }\n table = self._make_one(resource)\n\n class FakeTableListItem:\n project = \"project_foo\"\n dataset_id = \"dataset_bar\"\n table_id = \"table_baz\"\n\n not_a_table = FakeTableListItem()\n\n assert table != not_a_table # Can't fake it.\n\n def test__eq__same_table(self):\n resource = {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n }\n table_1 = self._make_one(resource)\n table_2 = self._make_one(resource)\n\n assert table_1 == table_2\n\n def test__eq__same_table_property_different(self):\n table_ref_resource = {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n\n resource_1 = {\"tableReference\": table_ref_resource, \"friendlyName\": \"Table One\"}\n table_1 = self._make_one(resource_1)\n\n resource_2 = {\"tableReference\": table_ref_resource, \"friendlyName\": \"Table Two\"}\n table_2 = self._make_one(resource_2)\n\n assert table_1 == table_2 # Still equal, only table reference is important.\n\n def test__eq__different_table(self):\n resource_1 = {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n }\n table_1 = self._make_one(resource_1)\n\n resource_2 = {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_quux\",\n }\n }\n table_2 = self._make_one(resource_2)\n\n assert table_1 != table_2\n\n def test_hashable(self):\n resource = {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n }\n table_item = self._make_one(resource)\n table_item_2 = self._make_one(resource)\n\n assert hash(table_item) == hash(table_item_2)\n\n\nclass TestTableClassesInterchangeability:\n @staticmethod\n def _make_table(*args, **kwargs):\n from google.cloud.bigquery.table import Table\n\n return Table(*args, **kwargs)\n\n @staticmethod\n def _make_table_ref(*args, **kwargs):\n from google.cloud.bigquery.table import TableReference\n\n return TableReference(*args, **kwargs)\n\n @staticmethod\n def _make_table_list_item(*args, **kwargs):\n from google.cloud.bigquery.table import TableListItem\n\n return TableListItem(*args, **kwargs)\n\n def test_table_eq_table_ref(self):\n\n table = self._make_table(\"project_foo.dataset_bar.table_baz\")\n dataset_ref = DatasetReference(\"project_foo\", \"dataset_bar\")\n table_ref = self._make_table_ref(dataset_ref, \"table_baz\")\n\n assert table == table_ref\n assert table_ref == table\n\n def test_table_eq_table_list_item(self):\n table = self._make_table(\"project_foo.dataset_bar.table_baz\")\n table_list_item = self._make_table_list_item(\n {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n }\n )\n\n assert table == table_list_item\n assert table_list_item == table\n\n def test_table_ref_eq_table_list_item(self):\n\n dataset_ref = DatasetReference(\"project_foo\", \"dataset_bar\")\n table_ref = self._make_table_ref(dataset_ref, \"table_baz\")\n table_list_item = self._make_table_list_item(\n {\n \"tableReference\": {\n \"projectId\": \"project_foo\",\n \"datasetId\": \"dataset_bar\",\n \"tableId\": \"table_baz\",\n }\n }\n )\n\n assert table_ref == table_list_item\n assert table_list_item == table_ref\n\n\nclass TestSnapshotDefinition:\n @staticmethod\n def _get_target_class():\n from google.cloud.bigquery.table import SnapshotDefinition\n\n return SnapshotDefinition\n\n @classmethod\n def _make_one(cls, *args, **kwargs):\n klass = cls._get_target_class()\n return klass(*args, **kwargs)\n\n def test_ctor_empty_resource(self):\n instance = self._make_one(resource={})\n assert instance.base_table_reference is None\n assert instance.snapshot_time is None\n\n def test_ctor_full_resource(self):\n from google.cloud._helpers import UTC\n from google.cloud.bigquery.table import TableReference\n\n resource = {\n \"baseTableReference\": {\n \"projectId\": \"my-project\",\n \"datasetId\": \"your-dataset\",\n \"tableId\": \"our-table\",\n },\n \"snapshotTime\": \"2005-06-07T19:35:02.123Z\",\n }\n instance = self._make_one(resource)\n\n expected_table_ref = TableReference.from_string(\n \"my-project.your-dataset.our-table\"\n )\n assert instance.base_table_reference == expected_table_ref\n\n expected_time = datetime.datetime(2005, 6, 7, 19, 35, 2, 123000, tzinfo=UTC)\n assert instance.snapshot_time == expected_time\n\n\nclass TestRow(unittest.TestCase):\n def test_row(self):\n from google.cloud.bigquery.table import Row\n\n VALUES = (1, 2, 3)\n row = Row(VALUES, {\"a\": 0, \"b\": 1, \"c\": 2})\n self.assertEqual(row.a, 1)\n self.assertEqual(row[1], 2)\n self.assertEqual(row[\"c\"], 3)\n self.assertEqual(len(row), 3)\n self.assertEqual(row.values(), VALUES)\n self.assertEqual(set(row.keys()), set({\"a\": 1, \"b\": 2, \"c\": 3}.keys()))\n self.assertEqual(set(row.items()), set({\"a\": 1, \"b\": 2, \"c\": 3}.items()))\n self.assertEqual(row.get(\"a\"), 1)\n self.assertEqual(row.get(\"d\"), None)\n self.assertEqual(row.get(\"d\", \"\"), \"\")\n self.assertEqual(row.get(\"d\", default=\"\"), \"\")\n self.assertEqual(repr(row), \"Row((1, 2, 3), {'a': 0, 'b': 1, 'c': 2})\")\n self.assertFalse(row != row)\n self.assertFalse(row == 3)\n with self.assertRaises(AttributeError):\n row.z\n with self.assertRaises(KeyError):\n row[\"z\"]\n\n\nclass Test_EmptyRowIterator(unittest.TestCase):\n def _make_one(self):\n from google.cloud.bigquery.table import _EmptyRowIterator\n\n return _EmptyRowIterator()\n\n def test_total_rows_eq_zero(self):\n row_iterator = self._make_one()\n self.assertEqual(row_iterator.total_rows, 0)\n\n @mock.patch(\"google.cloud.bigquery.table.pyarrow\", new=None)\n def test_to_arrow_error_if_pyarrow_is_none(self):\n row_iterator = self._make_one()\n with self.assertRaises(ValueError):\n row_iterator.to_arrow()\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_arrow(self):\n row_iterator = self._make_one()\n tbl = row_iterator.to_arrow()\n self.assertIsInstance(tbl, pyarrow.Table)\n self.assertEqual(tbl.num_rows, 0)\n\n @mock.patch(\"google.cloud.bigquery.table.pandas\", new=None)\n def test_to_dataframe_error_if_pandas_is_none(self):\n row_iterator = self._make_one()\n with self.assertRaises(ValueError):\n row_iterator.to_dataframe()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe(self):\n row_iterator = self._make_one()\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 0) # verify the number of rows\n\n @mock.patch(\"google.cloud.bigquery.table.pandas\", new=None)\n def test_to_dataframe_iterable_error_if_pandas_is_none(self):\n row_iterator = self._make_one()\n with self.assertRaises(ValueError):\n row_iterator.to_dataframe_iterable()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_iterable(self):\n row_iterator = self._make_one()\n df_iter = row_iterator.to_dataframe_iterable()\n\n result = list(df_iter)\n\n self.assertEqual(len(result), 1)\n df = result[0]\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 0) # Verify the number of rows.\n self.assertEqual(len(df.columns), 0)\n\n @mock.patch(\"google.cloud.bigquery.table.geopandas\", new=None)\n def test_to_geodataframe_if_geopandas_is_none(self):\n row_iterator = self._make_one()\n with self.assertRaisesRegex(\n ValueError,\n re.escape(\n \"The geopandas library is not installed, please install \"\n \"geopandas to use the to_geodataframe() function.\"\n ),\n ):\n row_iterator.to_geodataframe(create_bqstorage_client=False)\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_geodataframe(self):\n row_iterator = self._make_one()\n df = row_iterator.to_geodataframe(create_bqstorage_client=False)\n self.assertIsInstance(df, geopandas.GeoDataFrame)\n self.assertEqual(len(df), 0) # verify the number of rows\n self.assertEqual(df.crs.srs, \"EPSG:4326\")\n self.assertEqual(df.crs.name, \"WGS 84\")\n\n\nclass TestRowIterator(unittest.TestCase):\n def _class_under_test(self):\n from google.cloud.bigquery.table import RowIterator\n\n return RowIterator\n\n def _make_one(\n self,\n client=None,\n api_request=None,\n path=None,\n schema=None,\n table=None,\n **kwargs\n ):\n from google.cloud.bigquery.table import TableReference\n\n if client is None:\n client = _mock_client()\n\n if api_request is None:\n api_request = mock.sentinel.api_request\n\n if path is None:\n path = \"/foo\"\n\n if schema is None:\n schema = []\n\n if table is None:\n table = TableReference.from_string(\"my-project.my_dataset.my_table\")\n\n return self._class_under_test()(\n client, api_request, path, schema, table=table, **kwargs\n )\n\n def _make_one_from_data(self, schema=(), rows=()):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [SchemaField(*a) for a in schema]\n rows = [{\"f\": [{\"v\": v} for v in row]} for row in rows]\n\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n return self._make_one(_mock_client(), api_request, path, schema)\n\n def test_constructor(self):\n from google.cloud.bigquery.table import _item_to_row\n from google.cloud.bigquery.table import _rows_page_start\n\n client = _mock_client()\n path = \"/some/path\"\n iterator = self._make_one(client=client, path=path)\n\n # Objects are set without copying.\n self.assertIs(iterator.client, client)\n self.assertIs(iterator.item_to_value, _item_to_row)\n self.assertIs(iterator._page_start, _rows_page_start)\n # Properties have the expect value.\n self.assertEqual(iterator.extra_params, {})\n self.assertEqual(iterator._items_key, \"rows\")\n self.assertIsNone(iterator.max_results)\n self.assertEqual(iterator.path, path)\n self.assertFalse(iterator._started)\n self.assertIsNone(iterator.total_rows)\n # Changing attributes.\n self.assertEqual(iterator.page_number, 0)\n self.assertIsNone(iterator.next_page_token)\n self.assertEqual(iterator.num_results, 0)\n\n def test_constructor_with_table(self):\n from google.cloud.bigquery.table import Table\n\n table = Table(\"proj.dset.tbl\")\n iterator = self._make_one(table=table, total_rows=100)\n self.assertIs(iterator._table, table)\n self.assertEqual(iterator.total_rows, 100)\n\n def test_constructor_with_dict_schema(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n {\"name\": \"full_name\", \"type\": \"STRING\", \"mode\": \"REQUIRED\"},\n {\"name\": \"age\", \"type\": \"INT64\", \"mode\": \"NULLABLE\"},\n ]\n\n iterator = self._make_one(schema=schema)\n\n expected_schema = [\n SchemaField(\"full_name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INT64\", mode=\"NULLABLE\"),\n ]\n self.assertEqual(iterator.schema, expected_schema)\n\n def test_iterate(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n self.assertEqual(row_iterator.num_results, 0)\n\n rows_iter = iter(row_iterator)\n\n val1 = next(rows_iter)\n self.assertEqual(val1.name, \"Phred Phlyntstone\")\n self.assertEqual(row_iterator.num_results, 1)\n\n val2 = next(rows_iter)\n self.assertEqual(val2.name, \"Bharney Rhubble\")\n self.assertEqual(row_iterator.num_results, 2)\n\n with self.assertRaises(StopIteration):\n next(rows_iter)\n\n api_request.assert_called_once_with(method=\"GET\", path=path, query_params={})\n\n def test_iterate_with_cached_first_page(self):\n from google.cloud.bigquery.schema import SchemaField\n\n first_page = {\n \"rows\": [\n {\"f\": [{\"v\": \"Whillma Phlyntstone\"}, {\"v\": \"27\"}]},\n {\"f\": [{\"v\": \"Bhetty Rhubble\"}, {\"v\": \"28\"}]},\n ],\n \"pageToken\": \"next-page\",\n }\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(\n _mock_client(), api_request, path, schema, first_page_response=first_page\n )\n rows = list(row_iterator)\n self.assertEqual(len(rows), 4)\n self.assertEqual(rows[0].age, 27)\n self.assertEqual(rows[1].age, 28)\n self.assertEqual(rows[2].age, 32)\n self.assertEqual(rows[3].age, 33)\n\n api_request.assert_called_once_with(\n method=\"GET\", path=path, query_params={\"pageToken\": \"next-page\"}\n )\n\n def test_page_size(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n\n row_iterator = self._make_one(\n _mock_client(), api_request, path, schema, page_size=4\n )\n row_iterator._get_next_page_response()\n\n api_request.assert_called_once_with(\n method=\"GET\",\n path=path,\n query_params={\"maxResults\": row_iterator._page_size},\n )\n\n def test__is_completely_cached_returns_false_without_first_page(self):\n iterator = self._make_one(first_page_response=None)\n self.assertFalse(iterator._is_completely_cached())\n\n def test__is_completely_cached_returns_false_with_page_token(self):\n first_page = {\"pageToken\": \"next-page\"}\n iterator = self._make_one(first_page_response=first_page)\n self.assertFalse(iterator._is_completely_cached())\n\n def test__is_completely_cached_returns_true(self):\n first_page = {\"rows\": []}\n iterator = self._make_one(first_page_response=first_page)\n self.assertTrue(iterator._is_completely_cached())\n\n def test__validate_bqstorage_returns_false_when_completely_cached(self):\n first_page = {\"rows\": []}\n iterator = self._make_one(first_page_response=first_page)\n self.assertFalse(\n iterator._validate_bqstorage(\n bqstorage_client=None, create_bqstorage_client=True\n )\n )\n\n def test__validate_bqstorage_returns_false_if_max_results_set(self):\n iterator = self._make_one(\n max_results=10, first_page_response=None # not cached\n )\n result = iterator._validate_bqstorage(\n bqstorage_client=None, create_bqstorage_client=True\n )\n self.assertFalse(result)\n\n def test__validate_bqstorage_returns_false_if_missing_dependency(self):\n iterator = self._make_one(first_page_response=None) # not cached\n\n def fail_bqstorage_import(name, globals, locals, fromlist, level):\n # NOTE: *very* simplified, assuming a straightforward absolute import\n return \"bigquery_storage\" in name or (\n fromlist is not None and \"bigquery_storage\" in fromlist\n )\n\n no_bqstorage = maybe_fail_import(predicate=fail_bqstorage_import)\n\n with no_bqstorage:\n result = iterator._validate_bqstorage(\n bqstorage_client=None, create_bqstorage_client=True\n )\n\n self.assertFalse(result)\n\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test__validate_bqstorage_returns_false_w_warning_if_obsolete_version(self):\n from google.cloud.bigquery.exceptions import LegacyBigQueryStorageError\n\n iterator = self._make_one(first_page_response=None) # not cached\n\n patcher = mock.patch(\n \"google.cloud.bigquery.table._helpers.BQ_STORAGE_VERSIONS.verify_version\",\n side_effect=LegacyBigQueryStorageError(\"BQ Storage too old\"),\n )\n with patcher, warnings.catch_warnings(record=True) as warned:\n result = iterator._validate_bqstorage(\n bqstorage_client=None, create_bqstorage_client=True\n )\n\n self.assertFalse(result)\n\n matching_warnings = [\n warning for warning in warned if \"BQ Storage too old\" in str(warning)\n ]\n assert matching_warnings, \"Obsolete dependency warning not raised.\"\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_arrow(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n SchemaField(\n \"child\",\n \"RECORD\",\n mode=\"REPEATED\",\n fields=[\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ],\n ),\n ]\n rows = [\n {\n \"f\": [\n {\"v\": \"Bharney Rhubble\"},\n {\"v\": \"33\"},\n {\n \"v\": [\n {\"v\": {\"f\": [{\"v\": \"Whamm-Whamm Rhubble\"}, {\"v\": \"3\"}]}},\n {\"v\": {\"f\": [{\"v\": \"Hoppy\"}, {\"v\": \"1\"}]}},\n ]\n },\n ]\n },\n {\n \"f\": [\n {\"v\": \"Wylma Phlyntstone\"},\n {\"v\": \"29\"},\n {\n \"v\": [\n {\"v\": {\"f\": [{\"v\": \"Bepples Phlyntstone\"}, {\"v\": \"0\"}]}},\n {\"v\": {\"f\": [{\"v\": \"Dino\"}, {\"v\": \"4\"}]}},\n ]\n },\n ]\n },\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n tbl = row_iterator.to_arrow(create_bqstorage_client=False)\n\n self.assertIsInstance(tbl, pyarrow.Table)\n self.assertEqual(tbl.num_rows, 2)\n\n # Check the schema.\n self.assertEqual(tbl.schema[0].name, \"name\")\n self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))\n self.assertEqual(tbl.schema[1].name, \"age\")\n self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))\n child_field = tbl.schema[2]\n self.assertEqual(child_field.name, \"child\")\n self.assertTrue(pyarrow.types.is_list(child_field.type))\n self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))\n self.assertEqual(child_field.type.value_type[0].name, \"name\")\n self.assertEqual(child_field.type.value_type[1].name, \"age\")\n\n # Check the data.\n tbl_data = tbl.to_pydict()\n names = tbl_data[\"name\"]\n ages = tbl_data[\"age\"]\n children = tbl_data[\"child\"]\n self.assertEqual(names, [\"Bharney Rhubble\", \"Wylma Phlyntstone\"])\n self.assertEqual(ages, [33, 29])\n self.assertEqual(\n children,\n [\n [\n {\"name\": \"Whamm-Whamm Rhubble\", \"age\": 3},\n {\"name\": \"Hoppy\", \"age\": 1},\n ],\n [{\"name\": \"Bepples Phlyntstone\", \"age\": 0}, {\"name\": \"Dino\", \"age\": 4}],\n ],\n )\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_arrow_w_nulls(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [SchemaField(\"name\", \"STRING\"), SchemaField(\"age\", \"INTEGER\")]\n rows = [\n {\"f\": [{\"v\": \"Donkey\"}, {\"v\": 32}]},\n {\"f\": [{\"v\": \"Diddy\"}, {\"v\": 29}]},\n {\"f\": [{\"v\": \"Dixie\"}, {\"v\": None}]},\n {\"f\": [{\"v\": None}, {\"v\": 111}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n tbl = row_iterator.to_arrow(create_bqstorage_client=False)\n\n self.assertIsInstance(tbl, pyarrow.Table)\n self.assertEqual(tbl.num_rows, 4)\n\n # Check the schema.\n self.assertEqual(tbl.schema[0].name, \"name\")\n self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))\n self.assertEqual(tbl.schema[1].name, \"age\")\n self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))\n\n # Check the data.\n tbl_data = tbl.to_pydict()\n names = tbl_data[\"name\"]\n ages = tbl_data[\"age\"]\n self.assertEqual(names, [\"Donkey\", \"Diddy\", \"Dixie\", None])\n self.assertEqual(ages, [32, 29, None, 111])\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_arrow_w_unknown_type(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n SchemaField(\"sport\", \"UNKNOWN_TYPE\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}, {\"v\": \"volleyball\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}, {\"v\": \"basketball\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with warnings.catch_warnings(record=True) as warned:\n tbl = row_iterator.to_arrow(create_bqstorage_client=False)\n\n self.assertIsInstance(tbl, pyarrow.Table)\n self.assertEqual(tbl.num_rows, 2)\n\n # Check the schema.\n self.assertEqual(tbl.schema[0].name, \"name\")\n self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))\n self.assertEqual(tbl.schema[1].name, \"age\")\n self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))\n self.assertEqual(tbl.schema[2].name, \"sport\")\n\n # Check the data.\n tbl_data = tbl.to_pydict()\n names = tbl_data[\"name\"]\n ages = tbl_data[\"age\"]\n sports = tbl_data[\"sport\"]\n self.assertEqual(names, [\"Bharney Rhubble\", \"Wylma Phlyntstone\"])\n self.assertEqual(ages, [33, 29])\n self.assertEqual(sports, [\"volleyball\", \"basketball\"])\n\n self.assertEqual(len(warned), 1)\n warning = warned[0]\n self.assertTrue(\"sport\" in str(warning))\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_arrow_w_empty_table(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n SchemaField(\n \"child\",\n \"RECORD\",\n mode=\"REPEATED\",\n fields=[\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ],\n ),\n ]\n rows = []\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n tbl = row_iterator.to_arrow(create_bqstorage_client=False)\n\n self.assertIsInstance(tbl, pyarrow.Table)\n self.assertEqual(tbl.num_rows, 0)\n\n # Check the schema.\n self.assertEqual(tbl.schema[0].name, \"name\")\n self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))\n self.assertEqual(tbl.schema[1].name, \"age\")\n self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))\n child_field = tbl.schema[2]\n self.assertEqual(child_field.name, \"child\")\n self.assertTrue(pyarrow.types.is_list(child_field.type))\n self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))\n self.assertEqual(child_field.type.value_type[0].name, \"name\")\n self.assertEqual(child_field.type.value_type[1].name, \"age\")\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_arrow_max_results_w_explicit_bqstorage_client_warning(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n mock_client = _mock_client()\n mock_bqstorage_client = mock.sentinel.bq_storage_client\n\n row_iterator = self._make_one(\n client=mock_client,\n api_request=api_request,\n path=path,\n schema=schema,\n max_results=42,\n )\n\n with warnings.catch_warnings(record=True) as warned:\n row_iterator.to_arrow(bqstorage_client=mock_bqstorage_client)\n\n matches = [\n warning\n for warning in warned\n if warning.category is UserWarning\n and \"cannot use bqstorage_client\" in str(warning).lower()\n and \"REST\" in str(warning)\n ]\n self.assertEqual(len(matches), 1, msg=\"User warning was not emitted.\")\n self.assertIn(\n __file__, str(matches[0]), msg=\"Warning emitted with incorrect stacklevel\"\n )\n mock_client._ensure_bqstorage_client.assert_not_called()\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_arrow_max_results_w_create_bqstorage_client_no_warning(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n mock_client = _mock_client()\n\n row_iterator = self._make_one(\n client=mock_client,\n api_request=api_request,\n path=path,\n schema=schema,\n max_results=42,\n )\n\n with warnings.catch_warnings(record=True) as warned:\n row_iterator.to_arrow(create_bqstorage_client=True)\n\n matches = [\n warning\n for warning in warned\n if warning.category is UserWarning\n and \"cannot use bqstorage_client\" in str(warning).lower()\n and \"REST\" in str(warning)\n ]\n self.assertFalse(matches)\n mock_client._ensure_bqstorage_client.assert_not_called()\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_arrow_w_bqstorage(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client._transport = mock.create_autospec(\n big_query_read_grpc_transport.BigQueryReadGrpcTransport\n )\n streams = [\n # Use two streams we want to check frames are read from each stream.\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/5678\"},\n ]\n session = bigquery_storage.types.ReadSession(streams=streams)\n arrow_schema = pyarrow.schema(\n [\n pyarrow.field(\"colA\", pyarrow.int64()),\n # Not alphabetical to test column order.\n pyarrow.field(\"colC\", pyarrow.float64()),\n pyarrow.field(\"colB\", pyarrow.string()),\n ]\n )\n session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n expected_num_rows = 2\n expected_num_columns = 3\n page_items = [\n pyarrow.array([1, -1]),\n pyarrow.array([2.0, 4.0]),\n pyarrow.array([\"abc\", \"def\"]),\n ]\n\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n mock_page.to_arrow.return_value = pyarrow.RecordBatch.from_arrays(\n page_items, schema=arrow_schema\n )\n mock_pages = (mock_page, mock_page, mock_page)\n type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)\n\n schema = [\n schema.SchemaField(\"colA\", \"INTEGER\"),\n schema.SchemaField(\"colC\", \"FLOAT\"),\n schema.SchemaField(\"colB\", \"STRING\"),\n ]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n actual_tbl = row_iterator.to_arrow(bqstorage_client=bqstorage_client)\n\n # Are the columns in the expected order?\n self.assertEqual(actual_tbl.num_columns, expected_num_columns)\n self.assertEqual(actual_tbl.schema[0].name, \"colA\")\n self.assertEqual(actual_tbl.schema[1].name, \"colC\")\n self.assertEqual(actual_tbl.schema[2].name, \"colB\")\n\n # Have expected number of rows?\n total_pages = len(streams) * len(mock_pages)\n total_rows = expected_num_rows * total_pages\n self.assertEqual(actual_tbl.num_rows, total_rows)\n\n # Don't close the client if it was passed in.\n bqstorage_client._transport.grpc_channel.close.assert_not_called()\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_arrow_w_bqstorage_creates_client(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n mock_client = _mock_client()\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client._transport = mock.create_autospec(\n big_query_read_grpc_transport.BigQueryReadGrpcTransport\n )\n mock_client._ensure_bqstorage_client.return_value = bqstorage_client\n session = bigquery_storage.types.ReadSession()\n bqstorage_client.create_read_session.return_value = session\n row_iterator = mut.RowIterator(\n mock_client,\n None, # api_request: ignored\n None, # path: ignored\n [\n schema.SchemaField(\"colA\", \"STRING\"),\n schema.SchemaField(\"colC\", \"STRING\"),\n schema.SchemaField(\"colB\", \"STRING\"),\n ],\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n )\n row_iterator.to_arrow(create_bqstorage_client=True)\n mock_client._ensure_bqstorage_client.assert_called_once()\n bqstorage_client._transport.grpc_channel.close.assert_called_once()\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_arrow_ensure_bqstorage_client_wo_bqstorage(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Alice\"}, {\"v\": \"98\"}]},\n {\"f\": [{\"v\": \"Bob\"}, {\"v\": \"99\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n\n mock_client = _mock_client()\n mock_client._ensure_bqstorage_client.return_value = None\n row_iterator = self._make_one(mock_client, api_request, path, schema)\n\n tbl = row_iterator.to_arrow(create_bqstorage_client=True)\n\n # The client attempted to create a BQ Storage client, and even though\n # that was not possible, results were still returned without errors.\n mock_client._ensure_bqstorage_client.assert_called_once()\n self.assertIsInstance(tbl, pyarrow.Table)\n self.assertEqual(tbl.num_rows, 2)\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_arrow_w_bqstorage_no_streams(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n session = bigquery_storage.types.ReadSession()\n arrow_schema = pyarrow.schema(\n [\n pyarrow.field(\"colA\", pyarrow.string()),\n # Not alphabetical to test column order.\n pyarrow.field(\"colC\", pyarrow.string()),\n pyarrow.field(\"colB\", pyarrow.string()),\n ]\n )\n session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()\n bqstorage_client.create_read_session.return_value = session\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n [\n schema.SchemaField(\"colA\", \"STRING\"),\n schema.SchemaField(\"colC\", \"STRING\"),\n schema.SchemaField(\"colB\", \"STRING\"),\n ],\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n )\n\n actual_table = row_iterator.to_arrow(bqstorage_client=bqstorage_client)\n self.assertEqual(actual_table.num_columns, 3)\n self.assertEqual(actual_table.num_rows, 0)\n self.assertEqual(actual_table.schema[0].name, \"colA\")\n self.assertEqual(actual_table.schema[1].name, \"colC\")\n self.assertEqual(actual_table.schema[2].name, \"colB\")\n\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @unittest.skipIf(tqdm is None, \"Requires `tqdm`\")\n @mock.patch(\"tqdm.tqdm_gui\")\n @mock.patch(\"tqdm.tqdm_notebook\")\n @mock.patch(\"tqdm.tqdm\")\n def test_to_arrow_progress_bar(self, tqdm_mock, tqdm_notebook_mock, tqdm_gui_mock):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}]},\n {\"f\": [{\"v\": \"Bhettye Rhubble\"}, {\"v\": \"27\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n\n progress_bars = (\n (\"tqdm\", tqdm_mock),\n (\"tqdm_notebook\", tqdm_notebook_mock),\n (\"tqdm_gui\", tqdm_gui_mock),\n )\n\n for progress_bar_type, progress_bar_mock in progress_bars:\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n tbl = row_iterator.to_arrow(\n progress_bar_type=progress_bar_type, create_bqstorage_client=False,\n )\n\n progress_bar_mock.assert_called()\n progress_bar_mock().update.assert_called()\n progress_bar_mock().close.assert_called_once()\n self.assertEqual(tbl.num_rows, 4)\n\n @mock.patch(\"google.cloud.bigquery.table.pyarrow\", new=None)\n def test_to_arrow_w_pyarrow_none(self):\n schema = []\n rows = []\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with self.assertRaises(ValueError):\n row_iterator.to_arrow()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_iterable(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n\n path = \"/foo\"\n api_request = mock.Mock(\n side_effect=[\n {\n \"rows\": [{\"f\": [{\"v\": \"Bengt\"}, {\"v\": \"32\"}]}],\n \"pageToken\": \"NEXTPAGE\",\n },\n {\"rows\": [{\"f\": [{\"v\": \"Sven\"}, {\"v\": \"33\"}]}]},\n ]\n )\n\n row_iterator = self._make_one(\n _mock_client(), api_request, path, schema, page_size=1, max_results=5\n )\n dfs = row_iterator.to_dataframe_iterable()\n\n self.assertIsInstance(dfs, types.GeneratorType)\n\n df_1 = next(dfs)\n self.assertIsInstance(df_1, pandas.DataFrame)\n self.assertEqual(df_1.name.dtype.name, \"object\")\n self.assertEqual(df_1.age.dtype.name, \"int64\")\n self.assertEqual(len(df_1), 1) # verify the number of rows\n self.assertEqual(\n df_1[\"name\"][0], \"Bengt\"\n ) # verify the first value of 'name' column\n self.assertEqual(df_1[\"age\"][0], 32) # verify the first value of 'age' column\n\n df_2 = next(dfs)\n self.assertEqual(len(df_2), 1) # verify the number of rows\n self.assertEqual(df_2[\"name\"][0], \"Sven\")\n self.assertEqual(df_2[\"age\"][0], 33)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_iterable_with_dtypes(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n\n path = \"/foo\"\n api_request = mock.Mock(\n side_effect=[\n {\n \"rows\": [{\"f\": [{\"v\": \"Bengt\"}, {\"v\": \"32\"}]}],\n \"pageToken\": \"NEXTPAGE\",\n },\n {\"rows\": [{\"f\": [{\"v\": \"Sven\"}, {\"v\": \"33\"}]}]},\n ]\n )\n\n row_iterator = self._make_one(\n _mock_client(), api_request, path, schema, page_size=1, max_results=5\n )\n dfs = row_iterator.to_dataframe_iterable(dtypes={\"age\": \"int32\"})\n\n self.assertIsInstance(dfs, types.GeneratorType)\n\n df_1 = next(dfs)\n self.assertIsInstance(df_1, pandas.DataFrame)\n self.assertEqual(df_1.name.dtype.name, \"object\")\n self.assertEqual(df_1.age.dtype.name, \"int32\")\n self.assertEqual(len(df_1), 1) # verify the number of rows\n self.assertEqual(\n df_1[\"name\"][0], \"Bengt\"\n ) # verify the first value of 'name' column\n self.assertEqual(df_1[\"age\"][0], 32) # verify the first value of 'age' column\n\n df_2 = next(dfs)\n self.assertEqual(len(df_2), 1) # verify the number of rows\n self.assertEqual(df_2[\"name\"][0], \"Sven\")\n self.assertEqual(df_2[\"age\"][0], 33)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_iterable_w_bqstorage(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n arrow_fields = [\n pyarrow.field(\"colA\", pyarrow.int64()),\n # Not alphabetical to test column order.\n pyarrow.field(\"colC\", pyarrow.float64()),\n pyarrow.field(\"colB\", pyarrow.utf8()),\n ]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client._transport = mock.create_autospec(\n big_query_read_grpc_transport.BigQueryReadGrpcTransport\n )\n streams = [\n # Use two streams we want to check frames are read from each stream.\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/5678\"},\n ]\n session = bigquery_storage.types.ReadSession(\n streams=streams,\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n page_dataframe = pandas.DataFrame(\n {\"colA\": [1, -1], \"colC\": [2.0, 4.0], \"colB\": [\"abc\", \"def\"]},\n )\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n mock_page.to_dataframe.return_value = page_dataframe\n mock_pages = (mock_page, mock_page, mock_page)\n type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)\n\n schema = [\n schema.SchemaField(\"colA\", \"IGNORED\"),\n schema.SchemaField(\"colC\", \"IGNORED\"),\n schema.SchemaField(\"colB\", \"IGNORED\"),\n ]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n got = list(\n row_iterator.to_dataframe_iterable(bqstorage_client=bqstorage_client)\n )\n\n # Have expected number of rows?\n total_pages = len(streams) * len(mock_pages)\n self.assertEqual(len(got), total_pages)\n\n # Don't close the client if it was passed in.\n bqstorage_client._transport.grpc_channel.close.assert_not_called()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_iterable_w_bqstorage_max_results_warning(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n\n iterator_schema = [\n schema.SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n schema.SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n path = \"/foo\"\n api_request = mock.Mock(\n side_effect=[\n {\n \"rows\": [{\"f\": [{\"v\": \"Bengt\"}, {\"v\": \"32\"}]}],\n \"pageToken\": \"NEXTPAGE\",\n },\n {\"rows\": [{\"f\": [{\"v\": \"Sven\"}, {\"v\": \"33\"}]}]},\n ]\n )\n row_iterator = mut.RowIterator(\n _mock_client(),\n api_request,\n path,\n iterator_schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=iterator_schema,\n max_results=25,\n )\n\n with warnings.catch_warnings(record=True) as warned:\n dfs = row_iterator.to_dataframe_iterable(bqstorage_client=bqstorage_client)\n\n # Was a warning emitted?\n matches = [\n warning\n for warning in warned\n if warning.category is UserWarning\n and \"cannot use bqstorage_client\" in str(warning).lower()\n and \"REST\" in str(warning)\n ]\n assert len(matches) == 1, \"User warning was not emitted.\"\n assert __file__ in str(matches[0]), \"Warning emitted with incorrect stacklevel\"\n\n # Basic check of what we got as a result.\n dataframes = list(dfs)\n assert len(dataframes) == 2\n assert isinstance(dataframes[0], pandas.DataFrame)\n assert isinstance(dataframes[1], pandas.DataFrame)\n\n @mock.patch(\"google.cloud.bigquery.table.pandas\", new=None)\n def test_to_dataframe_iterable_error_if_pandas_is_none(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with pytest.raises(ValueError, match=\"pandas\"):\n row_iterator.to_dataframe_iterable()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}]},\n {\"f\": [{\"v\": \"Bhettye Rhubble\"}, {\"v\": \"27\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 4) # verify the number of rows\n self.assertEqual(list(df), [\"name\", \"age\"]) # verify the column names\n self.assertEqual(df.name.dtype.name, \"object\")\n self.assertEqual(df.age.dtype.name, \"int64\")\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_timestamp_out_of_pyarrow_bounds(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [SchemaField(\"some_timestamp\", \"TIMESTAMP\")]\n rows = [\n {\"f\": [{\"v\": \"81953424000000000\"}]}, # 4567-01-01 00:00:00 UTC\n {\"f\": [{\"v\": \"253402214400000000\"}]}, # 9999-12-31 00:00:00 UTC\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n\n tzinfo = datetime.timezone.utc\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 2) # verify the number of rows\n self.assertEqual(list(df.columns), [\"some_timestamp\"])\n self.assertEqual(\n list(df[\"some_timestamp\"]),\n [\n datetime.datetime(4567, 1, 1, tzinfo=tzinfo),\n datetime.datetime(9999, 12, 31, tzinfo=tzinfo),\n ],\n )\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_datetime_out_of_pyarrow_bounds(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [SchemaField(\"some_datetime\", \"DATETIME\")]\n rows = [\n {\"f\": [{\"v\": \"4567-01-01T00:00:00\"}]},\n {\"f\": [{\"v\": \"9999-12-31T00:00:00\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 2) # verify the number of rows\n self.assertEqual(list(df.columns), [\"some_datetime\"])\n self.assertEqual(\n list(df[\"some_datetime\"]),\n [datetime.datetime(4567, 1, 1), datetime.datetime(9999, 12, 31)],\n )\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(tqdm is None, \"Requires `tqdm`\")\n @mock.patch(\"tqdm.tqdm_gui\")\n @mock.patch(\"tqdm.tqdm_notebook\")\n @mock.patch(\"tqdm.tqdm\")\n def test_to_dataframe_progress_bar(\n self, tqdm_mock, tqdm_notebook_mock, tqdm_gui_mock\n ):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}]},\n {\"f\": [{\"v\": \"Bhettye Rhubble\"}, {\"v\": \"27\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n\n progress_bars = (\n (\"tqdm\", tqdm_mock),\n (\"tqdm_notebook\", tqdm_notebook_mock),\n (\"tqdm_gui\", tqdm_gui_mock),\n )\n\n for progress_bar_type, progress_bar_mock in progress_bars:\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n df = row_iterator.to_dataframe(\n progress_bar_type=progress_bar_type, create_bqstorage_client=False,\n )\n\n progress_bar_mock.assert_called()\n progress_bar_mock().update.assert_called()\n progress_bar_mock().close.assert_called_once()\n self.assertEqual(len(df), 4)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @mock.patch(\"google.cloud.bigquery._tqdm_helpers.tqdm\", new=None)\n def test_to_dataframe_no_tqdm_no_progress_bar(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}]},\n {\"f\": [{\"v\": \"Bhettye Rhubble\"}, {\"v\": \"27\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with warnings.catch_warnings(record=True) as warned:\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n\n user_warnings = [\n warning for warning in warned if warning.category is UserWarning\n ]\n self.assertEqual(len(user_warnings), 0)\n self.assertEqual(len(df), 4)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @mock.patch(\"google.cloud.bigquery._tqdm_helpers.tqdm\", new=None)\n def test_to_dataframe_no_tqdm(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}]},\n {\"f\": [{\"v\": \"Bhettye Rhubble\"}, {\"v\": \"27\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with warnings.catch_warnings(record=True) as warned:\n df = row_iterator.to_dataframe(\n progress_bar_type=\"tqdm\", create_bqstorage_client=False,\n )\n\n user_warnings = [\n warning for warning in warned if warning.category is UserWarning\n ]\n self.assertEqual(len(user_warnings), 1)\n\n # Even though the progress bar won't show, downloading the dataframe\n # should still work.\n self.assertEqual(len(df), 4)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(tqdm is None, \"Requires `tqdm`\")\n @mock.patch(\"tqdm.tqdm_gui\", new=None) # will raise TypeError on call\n @mock.patch(\"tqdm.tqdm_notebook\", new=None) # will raise TypeError on call\n @mock.patch(\"tqdm.tqdm\", new=None) # will raise TypeError on call\n def test_to_dataframe_tqdm_error(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n {\"f\": [{\"v\": \"Wylma Phlyntstone\"}, {\"v\": \"29\"}]},\n {\"f\": [{\"v\": \"Bhettye Rhubble\"}, {\"v\": \"27\"}]},\n ]\n path = \"/foo\"\n\n for progress_bar_type in (\"tqdm\", \"tqdm_notebook\", \"tqdm_gui\"):\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with warnings.catch_warnings(record=True) as warned:\n df = row_iterator.to_dataframe(\n progress_bar_type=progress_bar_type, create_bqstorage_client=False,\n )\n\n self.assertEqual(len(df), 4) # all should be well\n\n # Warn that a progress bar was requested, but creating the tqdm\n # progress bar failed.\n for warning in warned:\n self.assertIs(warning.category, UserWarning)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_w_empty_results(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n api_request = mock.Mock(return_value={\"rows\": []})\n row_iterator = self._make_one(_mock_client(), api_request, schema=schema)\n\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 0) # verify the number of rows\n self.assertEqual(list(df), [\"name\", \"age\"]) # verify the column names\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_w_various_types_nullable(self):\n import datetime\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"start_timestamp\", \"TIMESTAMP\"),\n SchemaField(\"seconds\", \"INT64\"),\n SchemaField(\"miles\", \"FLOAT64\"),\n SchemaField(\"payment_type\", \"STRING\"),\n SchemaField(\"complete\", \"BOOL\"),\n SchemaField(\"date\", \"DATE\"),\n ]\n row_data = [\n [None, None, None, None, None, None],\n [\"1433836800000000\", \"420\", \"1.1\", u\"Cash\", \"true\", \"1999-12-01\"],\n [\"1387811700000000\", \"2580\", \"17.7\", u\"Cash\", \"false\", \"1953-06-14\"],\n [\"1385565300000000\", \"2280\", \"4.4\", u\"Credit\", \"true\", \"1981-11-04\"],\n ]\n rows = [{\"f\": [{\"v\": field} for field in row]} for row in row_data]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n df = row_iterator.to_dataframe(create_bqstorage_client=False)\n\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 4) # verify the number of rows\n exp_columns = [field.name for field in schema]\n self.assertEqual(list(df), exp_columns) # verify the column names\n\n for index, row in df.iterrows():\n if index == 0:\n self.assertTrue(row.isnull().all())\n else:\n self.assertIsInstance(row.start_timestamp, pandas.Timestamp)\n self.assertIsInstance(row.seconds, float)\n self.assertIsInstance(row.payment_type, str)\n self.assertIsInstance(row.complete, bool)\n self.assertIsInstance(row.date, datetime.date)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_column_dtypes(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"start_timestamp\", \"TIMESTAMP\"),\n SchemaField(\"seconds\", \"INT64\"),\n SchemaField(\"miles\", \"FLOAT64\"),\n SchemaField(\"km\", \"FLOAT64\"),\n SchemaField(\"payment_type\", \"STRING\"),\n SchemaField(\"complete\", \"BOOL\"),\n SchemaField(\"date\", \"DATE\"),\n ]\n row_data = [\n [\"1433836800000000\", \"420\", \"1.1\", \"1.77\", u\"Cash\", \"true\", \"1999-12-01\"],\n [\n \"1387811700000000\",\n \"2580\",\n \"17.7\",\n \"28.5\",\n u\"Cash\",\n \"false\",\n \"1953-06-14\",\n ],\n [\"1385565300000000\", \"2280\", \"4.4\", \"7.1\", u\"Credit\", \"true\", \"1981-11-04\"],\n ]\n rows = [{\"f\": [{\"v\": field} for field in row]} for row in row_data]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n df = row_iterator.to_dataframe(\n dtypes={\"km\": \"float16\"}, create_bqstorage_client=False,\n )\n\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 3) # verify the number of rows\n exp_columns = [field.name for field in schema]\n self.assertEqual(list(df), exp_columns) # verify the column names\n\n self.assertEqual(df.start_timestamp.dtype.name, \"datetime64[ns, UTC]\")\n self.assertEqual(df.seconds.dtype.name, \"int64\")\n self.assertEqual(df.miles.dtype.name, \"float64\")\n self.assertEqual(df.km.dtype.name, \"float16\")\n self.assertEqual(df.payment_type.dtype.name, \"object\")\n self.assertEqual(df.complete.dtype.name, \"bool\")\n self.assertEqual(df.date.dtype.name, \"object\")\n\n @mock.patch(\"google.cloud.bigquery.table.pandas\", new=None)\n def test_to_dataframe_error_if_pandas_is_none(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n row_iterator = self._make_one(_mock_client(), api_request, path, schema)\n\n with self.assertRaises(ValueError):\n row_iterator.to_dataframe()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @mock.patch(\"google.cloud.bigquery.table.shapely\", new=None)\n def test_to_dataframe_error_if_shapely_is_none(self):\n with self.assertRaisesRegex(\n ValueError,\n re.escape(\n \"The shapely library is not installed, please install \"\n \"shapely to use the geography_as_object option.\"\n ),\n ):\n self._make_one_from_data().to_dataframe(geography_as_object=True)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_max_results_w_bqstorage_warning(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n bqstorage_client = mock.Mock()\n\n row_iterator = self._make_one(\n client=_mock_client(),\n api_request=api_request,\n path=path,\n schema=schema,\n max_results=42,\n )\n\n with warnings.catch_warnings(record=True) as warned:\n row_iterator.to_dataframe(bqstorage_client=bqstorage_client)\n\n matches = [\n warning\n for warning in warned\n if warning.category is UserWarning\n and \"cannot use bqstorage_client\" in str(warning).lower()\n and \"REST\" in str(warning)\n ]\n self.assertEqual(len(matches), 1, msg=\"User warning was not emitted.\")\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_max_results_w_explicit_bqstorage_client_warning(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n mock_client = _mock_client()\n mock_bqstorage_client = mock.sentinel.bq_storage_client\n\n row_iterator = self._make_one(\n client=mock_client,\n api_request=api_request,\n path=path,\n schema=schema,\n max_results=42,\n )\n\n with warnings.catch_warnings(record=True) as warned:\n row_iterator.to_dataframe(bqstorage_client=mock_bqstorage_client)\n\n matches = [\n warning\n for warning in warned\n if warning.category is UserWarning\n and \"cannot use bqstorage_client\" in str(warning).lower()\n and \"REST\" in str(warning)\n ]\n self.assertEqual(len(matches), 1, msg=\"User warning was not emitted.\")\n self.assertIn(\n __file__, str(matches[0]), msg=\"Warning emitted with incorrect stacklevel\"\n )\n mock_client._ensure_bqstorage_client.assert_not_called()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_max_results_w_create_bqstorage_client_no_warning(self):\n from google.cloud.bigquery.schema import SchemaField\n\n schema = [\n SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\"),\n SchemaField(\"age\", \"INTEGER\", mode=\"REQUIRED\"),\n ]\n rows = [\n {\"f\": [{\"v\": \"Phred Phlyntstone\"}, {\"v\": \"32\"}]},\n {\"f\": [{\"v\": \"Bharney Rhubble\"}, {\"v\": \"33\"}]},\n ]\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": rows})\n mock_client = _mock_client()\n\n row_iterator = self._make_one(\n client=mock_client,\n api_request=api_request,\n path=path,\n schema=schema,\n max_results=42,\n )\n\n with warnings.catch_warnings(record=True) as warned:\n row_iterator.to_dataframe(create_bqstorage_client=True)\n\n matches = [\n warning\n for warning in warned\n if warning.category is UserWarning\n and \"cannot use bqstorage_client\" in str(warning).lower()\n and \"REST\" in str(warning)\n ]\n self.assertFalse(matches)\n mock_client._ensure_bqstorage_client.assert_not_called()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_dataframe_w_bqstorage_creates_client(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n mock_client = _mock_client()\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client._transport = mock.create_autospec(\n big_query_read_grpc_transport.BigQueryReadGrpcTransport\n )\n mock_client._ensure_bqstorage_client.return_value = bqstorage_client\n session = bigquery_storage.types.ReadSession()\n bqstorage_client.create_read_session.return_value = session\n row_iterator = mut.RowIterator(\n mock_client,\n None, # api_request: ignored\n None, # path: ignored\n [\n schema.SchemaField(\"colA\", \"STRING\"),\n schema.SchemaField(\"colC\", \"STRING\"),\n schema.SchemaField(\"colB\", \"STRING\"),\n ],\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n )\n row_iterator.to_dataframe(create_bqstorage_client=True)\n mock_client._ensure_bqstorage_client.assert_called_once()\n bqstorage_client._transport.grpc_channel.close.assert_called_once()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_dataframe_w_bqstorage_no_streams(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n session = bigquery_storage.types.ReadSession()\n bqstorage_client.create_read_session.return_value = session\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n api_request=None,\n path=None,\n schema=[\n schema.SchemaField(\"colA\", \"INTEGER\"),\n schema.SchemaField(\"colC\", \"FLOAT\"),\n schema.SchemaField(\"colB\", \"STRING\"),\n ],\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n )\n\n got = row_iterator.to_dataframe(bqstorage_client)\n column_names = [\"colA\", \"colC\", \"colB\"]\n self.assertEqual(list(got), column_names)\n self.assertTrue(got.empty)\n\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_w_bqstorage_logs_session(self):\n from google.cloud.bigquery.table import Table\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n session = bigquery_storage.types.ReadSession()\n session.name = \"projects/test-proj/locations/us/sessions/SOMESESSION\"\n bqstorage_client.create_read_session.return_value = session\n mock_logger = mock.create_autospec(logging.Logger)\n row_iterator = self._make_one(\n _mock_client(), table=Table(\"debug-proj.debug_dset.debug_tbl\")\n )\n\n with mock.patch(\"google.cloud.bigquery._pandas_helpers._LOGGER\", mock_logger):\n row_iterator.to_dataframe(bqstorage_client=bqstorage_client)\n\n mock_logger.debug.assert_any_call(\n \"Started reading table 'debug-proj.debug_dset.debug_tbl' \"\n \"with BQ Storage API session 'projects/test-proj/locations/us/sessions/SOMESESSION'.\"\n )\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_w_bqstorage_empty_streams(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n arrow_fields = [\n pyarrow.field(\"colA\", pyarrow.int64()),\n # Not alphabetical to test column order.\n pyarrow.field(\"colC\", pyarrow.float64()),\n pyarrow.field(\"colB\", pyarrow.utf8()),\n ]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n session = bigquery_storage.types.ReadSession(\n streams=[{\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"}],\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n mock_pages = mock.PropertyMock(return_value=())\n type(mock_rows).pages = mock_pages\n\n # Schema is required when there are no record batches in the stream.\n schema = [\n schema.SchemaField(\"colA\", \"INTEGER\"),\n schema.SchemaField(\"colC\", \"FLOAT\"),\n schema.SchemaField(\"colB\", \"STRING\"),\n ]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n got = row_iterator.to_dataframe(bqstorage_client)\n\n column_names = [\"colA\", \"colC\", \"colB\"]\n self.assertEqual(list(got), column_names)\n self.assertTrue(got.empty)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_w_bqstorage_nonempty(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n arrow_fields = [\n pyarrow.field(\"colA\", pyarrow.int64()),\n # Not alphabetical to test column order.\n pyarrow.field(\"colC\", pyarrow.float64()),\n pyarrow.field(\"colB\", pyarrow.utf8()),\n ]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client._transport = mock.create_autospec(\n big_query_read_grpc_transport.BigQueryReadGrpcTransport\n )\n streams = [\n # Use two streams we want to check frames are read from each stream.\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/5678\"},\n ]\n session = bigquery_storage.types.ReadSession(\n streams=streams,\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n page_items = [\n pyarrow.array([1, -1]),\n pyarrow.array([2.0, 4.0]),\n pyarrow.array([\"abc\", \"def\"]),\n ]\n page_record_batch = pyarrow.RecordBatch.from_arrays(\n page_items, schema=arrow_schema\n )\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n mock_page.to_arrow.return_value = page_record_batch\n mock_pages = (mock_page, mock_page, mock_page)\n type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)\n\n schema = [\n schema.SchemaField(\"colA\", \"IGNORED\"),\n schema.SchemaField(\"colC\", \"IGNORED\"),\n schema.SchemaField(\"colB\", \"IGNORED\"),\n ]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n got = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)\n\n # Are the columns in the expected order?\n column_names = [\"colA\", \"colC\", \"colB\"]\n self.assertEqual(list(got), column_names)\n\n # Have expected number of rows?\n total_pages = len(streams) * len(mock_pages)\n total_rows = len(page_items[0]) * total_pages\n self.assertEqual(len(got.index), total_rows)\n\n # Don't close the client if it was passed in.\n bqstorage_client._transport.grpc_channel.close.assert_not_called()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_w_bqstorage_multiple_streams_return_unique_index(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n arrow_fields = [pyarrow.field(\"colA\", pyarrow.int64())]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n streams = [\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/5678\"},\n ]\n session = bigquery_storage.types.ReadSession(\n streams=streams,\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n\n page_items = [\n pyarrow.array([1, -1]),\n ]\n page_record_batch = pyarrow.RecordBatch.from_arrays(\n page_items, schema=arrow_schema\n )\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n mock_page.to_arrow.return_value = page_record_batch\n mock_pages = (mock_page, mock_page, mock_page)\n type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)\n\n row_iterator = self._make_one(\n schema=[schema.SchemaField(\"colA\", \"IGNORED\")],\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n )\n got = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)\n\n self.assertEqual(list(got), [\"colA\"])\n total_pages = len(streams) * len(mock_pages)\n total_rows = len(page_items[0]) * total_pages\n self.assertEqual(len(got.index), total_rows)\n self.assertTrue(got.index.is_unique)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(tqdm is None, \"Requires `tqdm`\")\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n @mock.patch(\"tqdm.tqdm\")\n def test_to_dataframe_w_bqstorage_updates_progress_bar(self, tqdm_mock):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n # Speed up testing.\n mut._PROGRESS_INTERVAL = 0.01\n\n arrow_fields = [pyarrow.field(\"testcol\", pyarrow.int64())]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n streams = [\n # Use two streams we want to check that progress bar updates are\n # sent from each stream.\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/5678\"},\n ]\n session = bigquery_storage.types.ReadSession(\n streams=streams,\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n page_items = [-1, 0, 1]\n type(mock_page).num_items = mock.PropertyMock(return_value=len(page_items))\n\n def blocking_to_arrow(*args, **kwargs):\n # Sleep for longer than the waiting interval so that we know we're\n # only reading one page per loop at most.\n time.sleep(2 * mut._PROGRESS_INTERVAL)\n return pyarrow.RecordBatch.from_arrays(\n [pyarrow.array(page_items)], schema=arrow_schema\n )\n\n mock_page.to_arrow.side_effect = blocking_to_arrow\n mock_pages = (mock_page, mock_page, mock_page, mock_page, mock_page)\n type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)\n\n schema = [schema.SchemaField(\"testcol\", \"IGNORED\")]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n row_iterator.to_dataframe(\n bqstorage_client=bqstorage_client, progress_bar_type=\"tqdm\"\n )\n\n # Make sure that this test updated the progress bar once per page from\n # each stream.\n total_pages = len(streams) * len(mock_pages)\n expected_total_rows = total_pages * len(page_items)\n progress_updates = [\n args[0] for args, kwargs in tqdm_mock().update.call_args_list\n ]\n # Should have sent >1 update due to delay in blocking_to_arrow.\n self.assertGreater(len(progress_updates), 1)\n self.assertEqual(sum(progress_updates), expected_total_rows)\n tqdm_mock().close.assert_called_once()\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_w_bqstorage_exits_on_keyboardinterrupt(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n # Speed up testing.\n mut._PROGRESS_INTERVAL = 0.01\n\n arrow_fields = [\n pyarrow.field(\"colA\", pyarrow.int64()),\n # Not alphabetical to test column order.\n pyarrow.field(\"colC\", pyarrow.float64()),\n pyarrow.field(\"colB\", pyarrow.utf8()),\n ]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n session = bigquery_storage.types.ReadSession(\n streams=[\n # Use multiple streams because one will fail with a\n # KeyboardInterrupt, and we want to check that the other streams\n # ends early.\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/5678\"},\n {\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/9999\"},\n ],\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n bqstorage_client.create_read_session.return_value = session\n page_items = [\n pyarrow.array([1, -1]),\n pyarrow.array([2.0, 4.0]),\n pyarrow.array([\"abc\", \"def\"]),\n ]\n\n def blocking_to_arrow(*args, **kwargs):\n # Sleep for longer than the waiting interval so that we know we're\n # only reading one page per loop at most.\n time.sleep(2 * mut._PROGRESS_INTERVAL)\n return pyarrow.RecordBatch.from_arrays(page_items, schema=arrow_schema)\n\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n mock_page.to_arrow.side_effect = blocking_to_arrow\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_pages = mock.PropertyMock(return_value=(mock_page, mock_page, mock_page))\n type(mock_rows).pages = mock_pages\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n mock_rowstream.rows.return_value = mock_rows\n\n mock_cancelled_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_cancelled_pages = mock.PropertyMock(side_effect=KeyboardInterrupt)\n type(mock_cancelled_rows).pages = mock_cancelled_pages\n mock_cancelled_rowstream = mock.create_autospec(reader.ReadRowsStream)\n mock_cancelled_rowstream.rows.return_value = mock_cancelled_rows\n\n bqstorage_client.read_rows.side_effect = (\n mock_rowstream,\n mock_cancelled_rowstream,\n mock_rowstream,\n )\n\n schema = [\n schema.SchemaField(\"colA\", \"IGNORED\"),\n schema.SchemaField(\"colB\", \"IGNORED\"),\n schema.SchemaField(\"colC\", \"IGNORED\"),\n ]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n with pytest.raises(KeyboardInterrupt):\n row_iterator.to_dataframe(bqstorage_client=bqstorage_client)\n\n # Should not have fetched the third page of results because exit_early\n # should have been set.\n self.assertLessEqual(mock_page.to_dataframe.call_count, 2)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n def test_to_dataframe_tabledata_list_w_multiple_pages_return_unique_index(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n iterator_schema = [schema.SchemaField(\"name\", \"STRING\", mode=\"REQUIRED\")]\n path = \"/foo\"\n api_request = mock.Mock(\n side_effect=[\n {\"rows\": [{\"f\": [{\"v\": \"Bengt\"}]}], \"pageToken\": \"NEXTPAGE\"},\n {\"rows\": [{\"f\": [{\"v\": \"Sven\"}]}]},\n ]\n )\n row_iterator = mut.RowIterator(\n _mock_client(),\n api_request,\n path,\n iterator_schema,\n table=mut.Table(\"proj.dset.tbl\"),\n )\n\n df = row_iterator.to_dataframe(\n bqstorage_client=None, create_bqstorage_client=False,\n )\n\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 2)\n self.assertEqual(list(df), [\"name\"])\n self.assertEqual(df.name.dtype.name, \"object\")\n self.assertTrue(df.index.is_unique)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_dataframe_w_bqstorage_raises_auth_error(self):\n from google.cloud.bigquery import table as mut\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client.create_read_session.side_effect = google.api_core.exceptions.Forbidden(\n \"TEST BigQuery Storage API not enabled. TEST\"\n )\n path = \"/foo\"\n api_request = mock.Mock(return_value={\"rows\": []})\n row_iterator = mut.RowIterator(\n _mock_client(), api_request, path, [], table=mut.Table(\"proj.dset.tbl\")\n )\n\n with pytest.raises(google.api_core.exceptions.Forbidden):\n row_iterator.to_dataframe(bqstorage_client=bqstorage_client)\n\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_dataframe_w_bqstorage_partition(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n [schema.SchemaField(\"colA\", \"IGNORED\")],\n table=mut.TableReference.from_string(\"proj.dset.tbl$20181225\"),\n )\n\n with pytest.raises(ValueError):\n row_iterator.to_dataframe(bqstorage_client)\n\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n def test_to_dataframe_w_bqstorage_snapshot(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n [schema.SchemaField(\"colA\", \"IGNORED\")],\n table=mut.TableReference.from_string(\"proj.dset.tbl@1234567890000\"),\n )\n\n with pytest.raises(ValueError):\n row_iterator.to_dataframe(bqstorage_client)\n\n @unittest.skipIf(pandas is None, \"Requires `pandas`\")\n @unittest.skipIf(\n bigquery_storage is None, \"Requires `google-cloud-bigquery-storage`\"\n )\n @unittest.skipIf(pyarrow is None, \"Requires `pyarrow`\")\n def test_to_dataframe_concat_categorical_dtype_w_pyarrow(self):\n from google.cloud.bigquery import schema\n from google.cloud.bigquery import table as mut\n from google.cloud.bigquery_storage_v1 import reader\n\n arrow_fields = [\n # Not alphabetical to test column order.\n pyarrow.field(\"col_str\", pyarrow.utf8()),\n # The backend returns strings, and without other info, pyarrow contains\n # string data in categorical columns, too (and not maybe the Dictionary\n # type that corresponds to pandas.Categorical).\n pyarrow.field(\"col_category\", pyarrow.utf8()),\n ]\n arrow_schema = pyarrow.schema(arrow_fields)\n\n # create a mock BQ storage client\n bqstorage_client = mock.create_autospec(bigquery_storage.BigQueryReadClient)\n bqstorage_client._transport = mock.create_autospec(\n big_query_read_grpc_transport.BigQueryReadGrpcTransport\n )\n session = bigquery_storage.types.ReadSession(\n streams=[{\"name\": \"/projects/proj/dataset/dset/tables/tbl/streams/1234\"}],\n arrow_schema={\"serialized_schema\": arrow_schema.serialize().to_pybytes()},\n )\n bqstorage_client.create_read_session.return_value = session\n\n mock_rowstream = mock.create_autospec(reader.ReadRowsStream)\n bqstorage_client.read_rows.return_value = mock_rowstream\n\n # prepare the iterator over mocked rows\n mock_rows = mock.create_autospec(reader.ReadRowsIterable)\n mock_rowstream.rows.return_value = mock_rows\n page_items = [\n [\n pyarrow.array([\"foo\", \"bar\", \"baz\"]), # col_str\n pyarrow.array([\"low\", \"medium\", \"low\"]), # col_category\n ],\n [\n pyarrow.array([\"foo_page2\", \"bar_page2\", \"baz_page2\"]), # col_str\n pyarrow.array([\"medium\", \"high\", \"low\"]), # col_category\n ],\n ]\n\n mock_pages = []\n\n for record_list in page_items:\n page_record_batch = pyarrow.RecordBatch.from_arrays(\n record_list, schema=arrow_schema\n )\n mock_page = mock.create_autospec(reader.ReadRowsPage)\n mock_page.to_arrow.return_value = page_record_batch\n mock_pages.append(mock_page)\n\n type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)\n\n schema = [\n schema.SchemaField(\"col_str\", \"IGNORED\"),\n schema.SchemaField(\"col_category\", \"IGNORED\"),\n ]\n\n row_iterator = mut.RowIterator(\n _mock_client(),\n None, # api_request: ignored\n None, # path: ignored\n schema,\n table=mut.TableReference.from_string(\"proj.dset.tbl\"),\n selected_fields=schema,\n )\n\n # run the method under test\n got = row_iterator.to_dataframe(\n bqstorage_client=bqstorage_client,\n dtypes={\n \"col_category\": pandas.core.dtypes.dtypes.CategoricalDtype(\n categories=[\"low\", \"medium\", \"high\"], ordered=False,\n ),\n },\n )\n\n # Are the columns in the expected order?\n column_names = [\"col_str\", \"col_category\"]\n self.assertEqual(list(got), column_names)\n\n # Have expected number of rows?\n total_pages = len(mock_pages) # we have a single stream, thus these two equal\n total_rows = len(page_items[0][0]) * total_pages\n self.assertEqual(len(got.index), total_rows)\n\n # Are column types correct?\n expected_dtypes = [\n pandas.core.dtypes.dtypes.np.dtype(\"O\"), # the default for string data\n pandas.core.dtypes.dtypes.CategoricalDtype(\n categories=[\"low\", \"medium\", \"high\"], ordered=False,\n ),\n ]\n self.assertEqual(list(got.dtypes), expected_dtypes)\n\n # And the data in the categorical column?\n self.assertEqual(\n list(got[\"col_category\"]),\n [\"low\", \"medium\", \"low\", \"medium\", \"high\", \"low\"],\n )\n\n # Don't close the client if it was passed in.\n bqstorage_client._transport.grpc_channel.close.assert_not_called()\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_dataframe_geography_as_object(self):\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"geog\", \"GEOGRAPHY\")),\n (\n (\"foo\", \"Point(0 0)\"),\n (\"bar\", None),\n (\"baz\", \"Polygon((0 0, 0 1, 1 0, 0 0))\"),\n ),\n )\n df = row_iterator.to_dataframe(\n create_bqstorage_client=False, geography_as_object=True,\n )\n self.assertIsInstance(df, pandas.DataFrame)\n self.assertEqual(len(df), 3) # verify the number of rows\n self.assertEqual(list(df), [\"name\", \"geog\"]) # verify the column names\n self.assertEqual(df.name.dtype.name, \"object\")\n self.assertEqual(df.geog.dtype.name, \"object\")\n self.assertIsInstance(df.geog, pandas.Series)\n self.assertEqual(\n [v.__class__.__name__ for v in df.geog], [\"Point\", \"float\", \"Polygon\"]\n )\n\n @mock.patch(\"google.cloud.bigquery.table.geopandas\", new=None)\n def test_to_geodataframe_error_if_geopandas_is_none(self):\n with self.assertRaisesRegex(\n ValueError,\n re.escape(\n \"The geopandas library is not installed, please install \"\n \"geopandas to use the to_geodataframe() function.\"\n ),\n ):\n self._make_one_from_data().to_geodataframe()\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_geodataframe(self):\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"geog\", \"GEOGRAPHY\")),\n (\n (\"foo\", \"Point(0 0)\"),\n (\"bar\", None),\n (\"baz\", \"Polygon((0 0, 0 1, 1 0, 0 0))\"),\n ),\n )\n df = row_iterator.to_geodataframe(create_bqstorage_client=False)\n self.assertIsInstance(df, geopandas.GeoDataFrame)\n self.assertEqual(len(df), 3) # verify the number of rows\n self.assertEqual(list(df), [\"name\", \"geog\"]) # verify the column names\n self.assertEqual(df.name.dtype.name, \"object\")\n self.assertEqual(df.geog.dtype.name, \"geometry\")\n self.assertIsInstance(df.geog, geopandas.GeoSeries)\n self.assertEqual(list(map(str, df.area)), [\"0.0\", \"nan\", \"0.5\"])\n self.assertEqual(list(map(str, df.geog.area)), [\"0.0\", \"nan\", \"0.5\"])\n self.assertEqual(df.crs.srs, \"EPSG:4326\")\n self.assertEqual(df.crs.name, \"WGS 84\")\n self.assertEqual(df.geog.crs.srs, \"EPSG:4326\")\n self.assertEqual(df.geog.crs.name, \"WGS 84\")\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_geodataframe_ambiguous_geog(self):\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"geog\", \"GEOGRAPHY\"), (\"geog2\", \"GEOGRAPHY\")), ()\n )\n with self.assertRaisesRegex(\n ValueError,\n re.escape(\n \"There is more than one GEOGRAPHY column in the result. \"\n \"The geography_column argument must be used to specify which \"\n \"one to use to create a GeoDataFrame\"\n ),\n ):\n row_iterator.to_geodataframe(create_bqstorage_client=False)\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_geodataframe_bad_geography_column(self):\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"geog\", \"GEOGRAPHY\"), (\"geog2\", \"GEOGRAPHY\")), ()\n )\n with self.assertRaisesRegex(\n ValueError,\n re.escape(\n \"The given geography column, xxx, doesn't name\"\n \" a GEOGRAPHY column in the result.\"\n ),\n ):\n row_iterator.to_geodataframe(\n create_bqstorage_client=False, geography_column=\"xxx\"\n )\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_geodataframe_no_geog(self):\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"geog\", \"STRING\")), ()\n )\n with self.assertRaisesRegex(\n TypeError,\n re.escape(\n \"There must be at least one GEOGRAPHY column\"\n \" to create a GeoDataFrame\"\n ),\n ):\n row_iterator.to_geodataframe(create_bqstorage_client=False)\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n def test_to_geodataframe_w_geography_column(self):\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"geog\", \"GEOGRAPHY\"), (\"geog2\", \"GEOGRAPHY\")),\n (\n (\"foo\", \"Point(0 0)\", \"Point(1 1)\"),\n (\"bar\", None, \"Point(2 2)\"),\n (\"baz\", \"Polygon((0 0, 0 1, 1 0, 0 0))\", \"Point(3 3)\"),\n ),\n )\n df = row_iterator.to_geodataframe(\n create_bqstorage_client=False, geography_column=\"geog\"\n )\n self.assertIsInstance(df, geopandas.GeoDataFrame)\n self.assertEqual(len(df), 3) # verify the number of rows\n self.assertEqual(list(df), [\"name\", \"geog\", \"geog2\"]) # verify the column names\n self.assertEqual(df.name.dtype.name, \"object\")\n self.assertEqual(df.geog.dtype.name, \"geometry\")\n self.assertEqual(df.geog2.dtype.name, \"object\")\n self.assertIsInstance(df.geog, geopandas.GeoSeries)\n self.assertEqual(list(map(str, df.area)), [\"0.0\", \"nan\", \"0.5\"])\n self.assertEqual(list(map(str, df.geog.area)), [\"0.0\", \"nan\", \"0.5\"])\n self.assertEqual(\n [v.__class__.__name__ for v in df.geog], [\"Point\", \"NoneType\", \"Polygon\"]\n )\n\n # Geog2 isn't a GeoSeries, but it contains geomentries:\n self.assertIsInstance(df.geog2, pandas.Series)\n self.assertEqual(\n [v.__class__.__name__ for v in df.geog2], [\"Point\", \"Point\", \"Point\"]\n )\n # and can easily be converted to a GeoSeries\n self.assertEqual(\n list(map(str, geopandas.GeoSeries(df.geog2).area)), [\"0.0\", \"0.0\", \"0.0\"]\n )\n\n @unittest.skipIf(geopandas is None, \"Requires `geopandas`\")\n @mock.patch(\"google.cloud.bigquery.table.RowIterator.to_dataframe\")\n def test_rowiterator_to_geodataframe_delegation(self, to_dataframe):\n \"\"\"\n RowIterator.to_geodataframe just delegates to RowIterator.to_dataframe.\n\n This test just demonstrates that. We don't need to test all the\n variations, which are tested for to_dataframe.\n \"\"\"\n import numpy\n from shapely import wkt\n\n row_iterator = self._make_one_from_data(\n ((\"name\", \"STRING\"), (\"g\", \"GEOGRAPHY\"))\n )\n bqstorage_client = object()\n dtypes = dict(xxx=numpy.dtype(\"int64\"))\n progress_bar_type = \"normal\"\n create_bqstorage_client = False\n date_as_object = False\n geography_column = \"g\"\n\n to_dataframe.return_value = pandas.DataFrame(\n dict(name=[\"foo\"], g=[wkt.loads(\"point(0 0)\")],)\n )\n\n df = row_iterator.to_geodataframe(\n bqstorage_client=bqstorage_client,\n dtypes=dtypes,\n progress_bar_type=progress_bar_type,\n create_bqstorage_client=create_bqstorage_client,\n date_as_object=date_as_object,\n geography_column=geography_column,\n )\n\n to_dataframe.assert_called_once_with(\n bqstorage_client,\n dtypes,\n progress_bar_type,\n create_bqstorage_client,\n date_as_object,\n geography_as_object=True,\n )\n\n self.assertIsInstance(df, geopandas.GeoDataFrame)\n self.assertEqual(len(df), 1) # verify the number of rows\n self.assertEqual(list(df), [\"name\", \"g\"]) # verify the column names\n self.assertEqual(df.name.dtype.name, \"object\")\n self.assertEqual(df.g.dtype.name, \"geometry\")\n self.assertIsInstance(df.g, geopandas.GeoSeries)\n self.assertEqual(list(map(str, df.area)), [\"0.0\"])\n self.assertEqual(list(map(str, df.g.area)), [\"0.0\"])\n self.assertEqual([v.__class__.__name__ for v in df.g], [\"Point\"])\n\n\nclass TestPartitionRange(unittest.TestCase):\n def _get_target_class(self):\n from google.cloud.bigquery.table import PartitionRange\n\n return PartitionRange\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def test_constructor_defaults(self):\n object_under_test = self._make_one()\n assert object_under_test.start is None\n assert object_under_test.end is None\n assert object_under_test.interval is None\n\n def test_constructor_w_properties(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n assert object_under_test.start == 1\n assert object_under_test.end == 10\n assert object_under_test.interval == 2\n\n def test_constructor_w_resource(self):\n object_under_test = self._make_one(\n _properties={\"start\": -1234567890, \"end\": 1234567890, \"interval\": 1000000}\n )\n assert object_under_test.start == -1234567890\n assert object_under_test.end == 1234567890\n assert object_under_test.interval == 1000000\n\n def test___eq___start_mismatch(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n other = self._make_one(start=2, end=10, interval=2)\n self.assertNotEqual(object_under_test, other)\n\n def test___eq___end__mismatch(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n other = self._make_one(start=1, end=11, interval=2)\n self.assertNotEqual(object_under_test, other)\n\n def test___eq___interval__mismatch(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n other = self._make_one(start=1, end=11, interval=3)\n self.assertNotEqual(object_under_test, other)\n\n def test___eq___hit(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n other = self._make_one(start=1, end=10, interval=2)\n self.assertEqual(object_under_test, other)\n\n def test__eq___type_mismatch(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n self.assertNotEqual(object_under_test, object())\n self.assertEqual(object_under_test, mock.ANY)\n\n def test_unhashable_object(self):\n object_under_test1 = self._make_one(start=1, end=10, interval=2)\n\n with self.assertRaisesRegex(TypeError, r\".*unhashable type.*\"):\n hash(object_under_test1)\n\n def test_repr(self):\n object_under_test = self._make_one(start=1, end=10, interval=2)\n assert repr(object_under_test) == \"PartitionRange(end=10, interval=2, start=1)\"\n\n\nclass TestRangePartitioning(unittest.TestCase):\n def _get_target_class(self):\n from google.cloud.bigquery.table import RangePartitioning\n\n return RangePartitioning\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def test_constructor_defaults(self):\n object_under_test = self._make_one()\n assert object_under_test.field is None\n assert object_under_test.range_.start is None\n assert object_under_test.range_.end is None\n assert object_under_test.range_.interval is None\n\n def test_constructor_w_properties(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n assert object_under_test.field == \"integer_col\"\n assert object_under_test.range_.start == 1\n assert object_under_test.range_.end == 10\n assert object_under_test.range_.interval == 2\n\n def test_constructor_w_resource(self):\n object_under_test = self._make_one(\n _properties={\n \"field\": \"some_column\",\n \"range\": {\"start\": -1234567890, \"end\": 1234567890, \"interval\": 1000000},\n }\n )\n assert object_under_test.field == \"some_column\"\n assert object_under_test.range_.start == -1234567890\n assert object_under_test.range_.end == 1234567890\n assert object_under_test.range_.interval == 1000000\n\n def test_range_w_wrong_type(self):\n object_under_test = self._make_one()\n with pytest.raises(ValueError, match=\"PartitionRange\"):\n object_under_test.range_ = object()\n\n def test___eq___field_mismatch(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n other = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"float_col\"\n )\n self.assertNotEqual(object_under_test, other)\n\n def test___eq___range__mismatch(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n other = self._make_one(\n range_=PartitionRange(start=2, end=20, interval=2), field=\"float_col\"\n )\n self.assertNotEqual(object_under_test, other)\n\n def test___eq___hit(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n other = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n self.assertEqual(object_under_test, other)\n\n def test__eq___type_mismatch(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n self.assertNotEqual(object_under_test, object())\n self.assertEqual(object_under_test, mock.ANY)\n\n def test_unhashable_object(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test1 = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n with self.assertRaisesRegex(TypeError, r\".*unhashable type.*\"):\n hash(object_under_test1)\n\n def test_repr(self):\n from google.cloud.bigquery.table import PartitionRange\n\n object_under_test = self._make_one(\n range_=PartitionRange(start=1, end=10, interval=2), field=\"integer_col\"\n )\n assert (\n repr(object_under_test)\n == \"RangePartitioning(field='integer_col', range_=PartitionRange(end=10, interval=2, start=1))\"\n )\n\n\nclass TestTimePartitioning(unittest.TestCase):\n def _get_target_class(self):\n from google.cloud.bigquery.table import TimePartitioning\n\n return TimePartitioning\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def test_constructor_defaults(self):\n time_partitioning = self._make_one()\n self.assertEqual(time_partitioning.type_, \"DAY\")\n self.assertIsNone(time_partitioning.field)\n self.assertIsNone(time_partitioning.expiration_ms)\n\n def test_constructor_explicit(self):\n from google.cloud.bigquery.table import TimePartitioningType\n\n time_partitioning = self._make_one(\n type_=TimePartitioningType.DAY, field=\"name\", expiration_ms=10000\n )\n\n self.assertEqual(time_partitioning.type_, \"DAY\")\n self.assertEqual(time_partitioning.field, \"name\")\n self.assertEqual(time_partitioning.expiration_ms, 10000)\n\n def test_require_partition_filter_warns_deprecation(self):\n object_under_test = self._make_one()\n\n with warnings.catch_warnings(record=True) as warned:\n assert object_under_test.require_partition_filter is None\n object_under_test.require_partition_filter = True\n assert object_under_test.require_partition_filter\n\n assert len(warned) == 3\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n def test_from_api_repr_empty(self):\n klass = self._get_target_class()\n\n # Even though there are required properties according to the API\n # specification, sometimes time partitioning is populated as an empty\n # object. See internal bug 131167013.\n api_repr = {}\n time_partitioning = klass.from_api_repr(api_repr)\n\n self.assertIsNone(time_partitioning.type_)\n self.assertIsNone(time_partitioning.field)\n self.assertIsNone(time_partitioning.expiration_ms)\n\n def test_from_api_repr_minimal(self):\n from google.cloud.bigquery.table import TimePartitioningType\n\n klass = self._get_target_class()\n api_repr = {\"type\": \"DAY\"}\n time_partitioning = klass.from_api_repr(api_repr)\n\n self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)\n self.assertIsNone(time_partitioning.field)\n self.assertIsNone(time_partitioning.expiration_ms)\n\n def test_from_api_repr_doesnt_override_type(self):\n klass = self._get_target_class()\n api_repr = {\"type\": \"HOUR\"}\n time_partitioning = klass.from_api_repr(api_repr)\n self.assertEqual(time_partitioning.type_, \"HOUR\")\n\n def test_from_api_repr_explicit(self):\n from google.cloud.bigquery.table import TimePartitioningType\n\n klass = self._get_target_class()\n api_repr = {\n \"type\": \"DAY\",\n \"field\": \"name\",\n \"expirationMs\": \"10000\",\n \"requirePartitionFilter\": True,\n }\n time_partitioning = klass.from_api_repr(api_repr)\n\n self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)\n self.assertEqual(time_partitioning.field, \"name\")\n self.assertEqual(time_partitioning.expiration_ms, 10000)\n\n with warnings.catch_warnings(record=True) as warned:\n self.assertTrue(time_partitioning.require_partition_filter)\n\n self.assertIs(warned[0].category, PendingDeprecationWarning)\n\n def test_to_api_repr_defaults(self):\n time_partitioning = self._make_one()\n expected = {\"type\": \"DAY\"}\n self.assertEqual(time_partitioning.to_api_repr(), expected)\n\n def test_to_api_repr_explicit(self):\n from google.cloud.bigquery.table import TimePartitioningType\n\n time_partitioning = self._make_one(\n type_=TimePartitioningType.DAY, field=\"name\", expiration_ms=10000\n )\n\n with warnings.catch_warnings(record=True) as warned:\n time_partitioning.require_partition_filter = True\n\n self.assertIs(warned[0].category, PendingDeprecationWarning)\n\n expected = {\n \"type\": \"DAY\",\n \"field\": \"name\",\n \"expirationMs\": \"10000\",\n \"requirePartitionFilter\": True,\n }\n self.assertEqual(time_partitioning.to_api_repr(), expected)\n\n def test___eq___wrong_type(self):\n time_partitioning = self._make_one()\n other = object()\n self.assertNotEqual(time_partitioning, other)\n self.assertEqual(time_partitioning, mock.ANY)\n\n def test___eq___type__mismatch(self):\n time_partitioning = self._make_one()\n other = self._make_one(type_=\"HOUR\")\n self.assertNotEqual(time_partitioning, other)\n\n def test___eq___field_mismatch(self):\n time_partitioning = self._make_one(field=\"foo\")\n other = self._make_one(field=\"bar\")\n self.assertNotEqual(time_partitioning, other)\n\n def test___eq___expiration_ms_mismatch(self):\n time_partitioning = self._make_one(field=\"foo\", expiration_ms=100000)\n other = self._make_one(field=\"foo\", expiration_ms=200000)\n self.assertNotEqual(time_partitioning, other)\n\n def test___eq___require_partition_filter_mismatch(self):\n time_partitioning = self._make_one(field=\"foo\", expiration_ms=100000)\n other = self._make_one(field=\"foo\", expiration_ms=100000)\n with warnings.catch_warnings(record=True) as warned:\n time_partitioning.require_partition_filter = True\n other.require_partition_filter = False\n\n assert len(warned) == 2\n for warning in warned:\n self.assertIs(warning.category, PendingDeprecationWarning)\n\n self.assertNotEqual(time_partitioning, other)\n\n def test___eq___hit(self):\n time_partitioning = self._make_one(field=\"foo\", expiration_ms=100000)\n other = self._make_one(field=\"foo\", expiration_ms=100000)\n self.assertEqual(time_partitioning, other)\n\n def test___ne___wrong_type(self):\n time_partitioning = self._make_one()\n other = object()\n self.assertNotEqual(time_partitioning, other)\n self.assertEqual(time_partitioning, mock.ANY)\n\n def test___ne___same_value(self):\n time_partitioning1 = self._make_one()\n time_partitioning2 = self._make_one()\n # unittest ``assertEqual`` uses ``==`` not ``!=``.\n comparison_val = time_partitioning1 != time_partitioning2\n self.assertFalse(comparison_val)\n\n def test___ne___different_values(self):\n time_partitioning1 = self._make_one()\n time_partitioning2 = self._make_one(type_=\"HOUR\")\n self.assertNotEqual(time_partitioning1, time_partitioning2)\n\n def test___hash__set_equality(self):\n time_partitioning1 = self._make_one(field=\"foo\")\n time_partitioning2 = self._make_one(field=\"foo\")\n set_one = {time_partitioning1, time_partitioning2}\n set_two = {time_partitioning1, time_partitioning2}\n self.assertEqual(set_one, set_two)\n\n def test___hash__not_equals(self):\n time_partitioning1 = self._make_one(field=\"foo\")\n time_partitioning2 = self._make_one(field=\"bar\")\n set_one = {time_partitioning1}\n set_two = {time_partitioning2}\n self.assertNotEqual(set_one, set_two)\n\n def test___repr___minimal(self):\n time_partitioning = self._make_one()\n expected = \"TimePartitioning(type_='DAY')\"\n self.assertEqual(repr(time_partitioning), expected)\n\n def test___repr___explicit(self):\n from google.cloud.bigquery.table import TimePartitioningType\n\n time_partitioning = self._make_one(\n type_=TimePartitioningType.DAY, field=\"name\", expiration_ms=10000\n )\n expected = \"TimePartitioning(expiration_ms=10000,field='name',type_='DAY')\"\n self.assertEqual(repr(time_partitioning), expected)\n\n def test_set_expiration_w_none(self):\n time_partitioning = self._make_one()\n time_partitioning.expiration_ms = None\n assert time_partitioning._properties[\"expirationMs\"] is None\n\n\[email protected](\n bigquery_storage is None, reason=\"Requires `google-cloud-bigquery-storage`\"\n)\[email protected](\n \"table_path\",\n (\n \"my-project.my_dataset.my_table\",\n \"my-project.my_dataset.my_table$20181225\",\n \"my-project.my_dataset.my_table@1234567890\",\n \"my-project.my_dataset.my_table$20181225@1234567890\",\n ),\n)\ndef test_table_reference_to_bqstorage_v1_stable(table_path):\n from google.cloud.bigquery import table as mut\n\n expected = \"projects/my-project/datasets/my_dataset/tables/my_table\"\n\n for klass in (mut.TableReference, mut.Table, mut.TableListItem):\n got = klass.from_string(table_path).to_bqstorage()\n assert got == expected\n" ]
[ [ "pandas.core.dtypes.dtypes.CategoricalDtype", "pandas.core.dtypes.dtypes.np.dtype", "numpy.dtype", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
aidanscannell/mogpe
[ "25a9af473d73d6fa35bd060bee0eb2c372b995e5" ]
[ "examples/quadcopter-sim/scenario-1/train_from_checkpoint.py" ]
[ "#!/usr/bin/env python3\nimport numpy as np\nimport tensorflow as tf\nimport gpflow as gpf\nfrom mogpe.training import train_from_config_and_checkpoint\n\n\ndef load_quadcopter_dataset(filename, standardise=False):\n data = np.load(filename)\n # X = data['x']\n X = data[\"x\"][:, 0:2]\n Y = data[\"y\"]\n # Y = data['y'][:, 0:1]\n print(\"Input data shape: \", X.shape)\n print(\"Output data shape: \", Y.shape)\n\n X = tf.convert_to_tensor(X, dtype=gpf.default_float())\n Y = tf.convert_to_tensor(Y, dtype=gpf.default_float())\n\n # standardise input\n if standardise:\n mean_x, var_x = tf.nn.moments(X, axes=[0])\n mean_y, var_y = tf.nn.moments(Y, axes=[0])\n X = (X - mean_x) / tf.sqrt(var_x)\n Y = (Y - mean_y) / tf.sqrt(var_y)\n data = (X, Y)\n return data\n\n\n# Set path to data set npz file\ndata_file = \"./data/quad_sim_const_action_scenario_1.npz\"\n\n# Set path to training config\nconfig_file = \"./configs/config_2_experts.toml\"\n# config_file = \"./configs/config_3_experts.toml\"\n\n# Load mcycle data set\ndataset = load_quadcopter_dataset(data_file)\nX, Y = dataset\n\n# Parse the toml config file and train\n# trained_model = train_from_config_and_dataset(config_file, dataset)\n\nckpt_dir = \"../../logs/quadcopter-sim/scenario-1/two_experts/01-19-121349\"\ntrained_model = train_from_config_and_checkpoint(config_file, ckpt_dir, dataset)\n\ngpf.utilities.print_summary(trained_model)\n" ]
[ [ "numpy.load", "tensorflow.nn.moments", "tensorflow.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
moritz-raabe/P.808
[ "6f6bcf089d5d14d0248dbedd9b9278389ceee08c" ]
[ "src/master_script.py" ]
[ "\"\"\"\r\n/*---------------------------------------------------------------------------------------------\r\n* Copyright (c) Microsoft Corporation. All rights reserved.\r\n* Licensed under the MIT License. See License.txt in the project root for license information.\r\n*--------------------------------------------------------------------------------------------*/\r\n@author: Babak Naderi\r\n\"\"\"\r\n\r\nimport argparse\r\nimport os\r\nimport configparser as CP\r\nfrom jinja2 import Template\r\nimport pandas as pd\r\nimport create_input as ca\r\nfrom azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService\r\nfrom azure.storage.file import FileService\r\nimport asyncio\r\n\r\nclass ClipsInAzureStorageAccount(object):\r\n def __init__(self, config, alg):\r\n self._account_name = os.path.basename(config['StorageUrl']).split('.')[0]\r\n if '.file.core.windows.net' in config['StorageUrl']:\r\n self._account_type = 'FileStore'\r\n elif '.blob.core.windows.net' in config['StorageUrl']:\r\n self._account_type = 'BlobStore'\r\n self._account_key = config['StorageAccountKey']\r\n self._container = config['Container']\r\n self._alg = alg\r\n self._clips_path = config['Path'].lstrip('/')\r\n self._clip_names = []\r\n self._modified_clip_names = []\r\n self._SAS_token = ''\r\n\r\n @property\r\n def container(self):\r\n return self._container\r\n\r\n @property\r\n def alg(self):\r\n return self._alg\r\n\r\n @property\r\n def clips_path(self):\r\n return self._clips_path\r\n\r\n @property\r\n async def clip_names(self):\r\n if len(self._clip_names) <= 0:\r\n await self.get_clips()\r\n return self._clip_names\r\n\r\n @property\r\n def store_service(self):\r\n if self._account_type == 'FileStore':\r\n return FileService(account_name = self._account_name, account_key = self._account_key)\r\n elif self._account_type == 'BlobStore':\r\n return BlockBlobService(account_name=self._account_name, account_key=self._account_key)\r\n\r\n @property\r\n def modified_clip_names(self):\r\n self._modified_clip_names = [os.path.basename(clip) for clip in self._clip_names]\r\n return self._modified_clip_names\r\n\r\n async def traverse_down_filestore(self, dirname):\r\n files = self.store_service.list_directories_and_files(self.container, os.path.join(self.clips_path, dirname))\r\n await self.retrieve_contents(files, dirname)\r\n\r\n async def retrieve_contents(self, list_generator, dirname=''):\r\n for e in list_generator:\r\n if '.wav' in e.name:\r\n if not dirname:\r\n self._clip_names.append(e.name)\r\n else:\r\n self._clip_names.append(posixpath.join(dirname.lstrip('/'), e.name))\r\n else:\r\n await self.traverse_down_filestore(e.name)\r\n\r\n async def get_clips(self):\r\n if self._account_type == 'FileStore':\r\n files = self.store_service.list_directories_and_files(self.container, self.clips_path)\r\n if not self._SAS_token:\r\n self._SAS_token = self.store_service.generate_share_shared_access_signature(self.container, permission='read', expiry=datetime.datetime(2019, 10, 30, 12, 30), start=datetime.datetime.now())\r\n await self.retrieve_contents(files)\r\n elif self._account_type == 'BlobStore':\r\n blobs = self.store_service.list_blobs(self.container, self.clips_path)\r\n await self.retrieve_contents(blobs)\r\n\r\n def make_clip_url(self, filename):\r\n if self._account_type == 'FileStore':\r\n source_url = self.store_service.make_file_url(self.container, self.clips_path, filename, sas_token=self._SAS_token)\r\n elif self._account_type == 'BlobStore':\r\n source_url = self.store_service.make_blob_url(self.container, filename)\r\n return source_url\r\n\r\nclass GoldSamplesInStore(ClipsInAzureStorageAccount):\r\n def __init__(self, config, alg):\r\n super().__init__(config, alg)\r\n self._SAS_token = ''\r\n\r\n async def get_dataframe(self):\r\n clips = await self.clip_names\r\n df = pd.DataFrame(columns=['gold_clips', 'gold_clips_ans'])\r\n clipsList = []\r\n for clip in clips:\r\n clipUrl = self.make_clip_url(clip)\r\n rating = 5\r\n if 'noisy' in clipUrl.lower():\r\n rating = 1\r\n\r\n clipsList.append({'gold_clips':clipUrl, 'gold_clips_ans':rating})\r\n\r\n df = df.append(clipsList)\r\n return df\r\n\r\nclass TrappingSamplesInStore(ClipsInAzureStorageAccount):\r\n async def get_dataframe(self):\r\n clips = await self.clip_names\r\n df = pd.DataFrame(columns=['trapping_clips', 'trapping_ans'])\r\n clipsList = []\r\n for clip in clips:\r\n clipUrl = self.make_clip_url(clip)\r\n rating = 0\r\n if '_bad_' in clip.lower():\r\n rating = 1\r\n elif '_poor_' in clip.lower():\r\n rating = 2\r\n elif '_fair_' in clip.lower():\r\n rating = 3\r\n elif '_good_' in clip.lower():\r\n rating = 4\r\n elif '_excellent_' in clip.lower():\r\n rating = 5\r\n\r\n clipsList.append({'trapping_clips':clipUrl, 'trapping_ans':rating})\r\n\r\n df = df.append(clipsList)\r\n return df\r\n\r\nclass PairComparisonSamplesInStore(ClipsInAzureStorageAccount):\r\n async def get_dataframe(self):\r\n clips = await self.clip_names\r\n pair_a_clips = [self.make_clip_url(clip) for clip in clips if '40S_' in clip]\r\n pair_b_clips = [clip.replace('40S_', '50S_') for clip in pair_a_clips]\r\n\r\n df = pd.DataFrame({'pair_a':pair_a_clips, 'pair_b':pair_b_clips})\r\n return df\r\n\r\n\r\ndef create_analyzer_cfg_acr(cfg, template_path, out_path):\r\n \"\"\"\r\n create cfg file to be used by analyzer script (acr method)\r\n :param cfg:\r\n :param template_path:\r\n :param out_path:\r\n :return:\r\n \"\"\"\r\n print(\"Start creating config file for result_parser\")\r\n config = {}\r\n\r\n config['q_num'] = int(cfg['create_input']['number_of_clips_per_session']) + \\\r\n int(cfg['create_input']['number_of_trapping_per_session']) + \\\r\n int(cfg['create_input']['number_of_gold_clips_per_session'])\r\n\r\n config['max_allowed_hits'] = cfg['acr_html']['allowed_max_hit_in_project']\r\n\r\n config['quantity_hits_more_than'] = cfg['acr_html']['quantity_hits_more_than']\r\n config['quantity_bonus'] = cfg['acr_html']['quantity_bonus']\r\n config['quality_top_percentage'] = cfg['acr_html']['quality_top_percentage']\r\n config['quality_bonus'] = cfg['acr_html']['quality_bonus']\r\n\r\n with open(template_path, 'r') as file:\r\n content = file.read()\r\n file.seek(0)\r\n t = Template(content)\r\n cfg_file = t.render(cfg=config)\r\n\r\n with open(out_path, 'w') as file:\r\n file.write(cfg_file)\r\n file.close()\r\n print(f\" [{out_path}] is created\")\r\n\r\n\r\ndef create_analyzer_cfg_dcr_ccr(cfg, template_path, out_path):\r\n \"\"\"\r\n create cfg file to be used by analyzer script (ccr/dcr method)\r\n :param cfg:\r\n :param template_path:\r\n :param out_path:\r\n :return:\r\n \"\"\"\r\n print(\"Start creating config file for result_parser\")\r\n config = {}\r\n\r\n config['q_num'] = int(cfg['create_input']['number_of_clips_per_session']) + \\\r\n int(cfg['create_input']['number_of_trapping_per_session'])\r\n\r\n config['max_allowed_hits'] = cfg['dcr_ccr_html']['allowed_max_hit_in_project']\r\n\r\n config['quantity_hits_more_than'] = cfg['dcr_ccr_html']['quantity_hits_more_than']\r\n config['quantity_bonus'] = cfg['dcr_ccr_html']['quantity_bonus']\r\n config['quality_top_percentage'] = cfg['dcr_ccr_html']['quality_top_percentage']\r\n config['quality_bonus'] = cfg['dcr_ccr_html']['quality_bonus']\r\n\r\n with open(template_path, 'r') as file:\r\n content = file.read()\r\n file.seek(0)\r\n t = Template(content)\r\n cfg_file = t.render(cfg=config)\r\n\r\n with open(out_path, 'w') as file:\r\n file.write(cfg_file)\r\n file.close()\r\n print(f\" [{out_path}] is created\")\r\n\r\n\r\nasync def create_hit_app_ccr_dcr(cfg, template_path, out_path, training_path, cfg_g):\r\n \"\"\"\r\n Create the hit_app (html file) corresponding to this project for ccr and dcr\r\n :param cfg:\r\n :param template_path:\r\n :param out_path:\r\n :return:\r\n \"\"\"\r\n print(\"Start creating custom hit_app (html)\")\r\n\r\n config = {}\r\n config['cookie_name'] = cfg['cookie_name']\r\n config['qual_cookie_name'] = cfg['qual_cookie_name']\r\n config['allowed_max_hit_in_project'] = cfg['allowed_max_hit_in_project']\r\n\r\n config['hit_base_payment'] = cfg['hit_base_payment']\r\n config['quantity_hits_more_than'] = cfg['quantity_hits_more_than']\r\n config['quantity_bonus'] = cfg['quantity_bonus']\r\n config['quality_top_percentage'] = cfg['quality_top_percentage']\r\n config['quality_bonus'] = float(cfg['quality_bonus']) + float(cfg['quantity_bonus'])\r\n config['sum_quantity'] = float(cfg['quantity_bonus']) + float(cfg['hit_base_payment'])\r\n config['sum_quality'] = config['quality_bonus'] + float(cfg['hit_base_payment'])\r\n\r\n # rating urls\r\n rating_urls = []\r\n n_clips = int(cfg_g['number_of_clips_per_session'])\r\n n_traps = int(cfg_g['number_of_trapping_per_session'])\r\n\r\n for i in range(0, n_clips):\r\n rating_urls.append({\"ref\": f\"${{Q{i}_R}}\", \"processed\": f\"${{Q{i}_P}}\"})\r\n\r\n if n_traps > 1:\r\n print(\"more than 1 trapping clips question is not supported. Proceed with 1 trap\")\r\n rating_urls.append({\"ref\": \"${TP}\", \"processed\": \"${TP}\"})\r\n if 'number_of_gold_clips_per_session' in cfg_g:\r\n print(\"Gold clips are not supported for CCR and DCR method. Proceed without them\")\r\n config['rating_urls'] = rating_urls\r\n\r\n # training urls\r\n df_train = pd.read_csv(training_path)\r\n train_urls = []\r\n train_ref = None\r\n for index, row in df_train.iterrows():\r\n if train_ref is None:\r\n train_ref = row['training_references']\r\n train_urls.append({\"ref\": f\"{row['training_references']}\", \"processed\": f\"{row['training_clips']}\"})\r\n # add a trapping clips to the training section\r\n train_urls.append({\"ref\": f\"{train_ref}\", \"processed\": f\"{train_ref}\"})\r\n config['training_urls'] = train_urls\r\n config['training_trap_urls'] = train_ref\r\n\r\n with open(template_path, 'r') as file:\r\n content = file.read()\r\n file.seek(0)\r\n t = Template(content)\r\n html = t.render(cfg=config)\r\n\r\n with open(out_path, 'w') as file:\r\n file.write(html)\r\n print(f\" [{out_path}] is created\")\r\n\r\n\r\nasync def create_hit_app_acr(cfg, template_path, out_path, training_path, trap_path, cfg_g, cfg_trapping_store):\r\n \"\"\"\r\n Create the ACR.html file corresponding to this project\r\n :param cfg:\r\n :param template_path:\r\n :param out_path:\r\n :return:\r\n \"\"\"\r\n print(\"Start creating custom acr.html\")\r\n df_trap = pd.DataFrame()\r\n if trap_path and os.path.exists(trap_path):\r\n df_trap = pd.read_csv(trap_path, nrows=1)\r\n else:\r\n trapclipsstore = TrappingSamplesInStore(cfg_trapping_store, 'TrappingQuestions')\r\n df_trap = await trapclipsstore.get_dataframe()\r\n\r\n for index, row in df_trap.iterrows():\r\n trap_url = row['trapping_clips']\r\n trap_ans = row['trapping_ans']\r\n\r\n config = {}\r\n config['cookie_name'] = cfg['cookie_name']\r\n config['qual_cookie_name'] = cfg['qual_cookie_name']\r\n config['allowed_max_hit_in_project'] = cfg['allowed_max_hit_in_project']\r\n config['training_trap_urls'] = trap_url\r\n config['training_trap_ans'] = trap_ans\r\n\r\n config['hit_base_payment'] = cfg['hit_base_payment']\r\n config['quantity_hits_more_than'] = cfg['quantity_hits_more_than']\r\n config['quantity_bonus'] = cfg['quantity_bonus']\r\n config['quality_top_percentage'] = cfg['quality_top_percentage']\r\n config['quality_bonus'] = float(cfg['quality_bonus']) + float(cfg['quantity_bonus'])\r\n config['sum_quantity'] = float(cfg['quantity_bonus']) + float(cfg['hit_base_payment'])\r\n config['sum_quality'] = config['quality_bonus'] + float(cfg['hit_base_payment'])\r\n\r\n df_train = pd.read_csv(training_path)\r\n train = []\r\n for index, row in df_train.iterrows():\r\n train.append(row['training_clips'])\r\n train.append(trap_url)\r\n config['training_urls'] = train\r\n\r\n # rating urls\r\n rating_urls = []\r\n n_clips = int(cfg_g['number_of_clips_per_session'])\r\n n_traps = int(cfg_g['number_of_trapping_per_session'])\r\n n_gold_clips = int(cfg_g['number_of_gold_clips_per_session'])\r\n\r\n for i in range(0, n_clips ):\r\n rating_urls.append('${Q'+str(i)+'}')\r\n if n_traps > 1:\r\n raise Exception(\"more than 1 trapping clips question is not supported.\")\r\n if n_traps == 1:\r\n rating_urls.append('${TP}')\r\n\r\n if n_gold_clips > 1:\r\n raise Exception(\"more than 1 gold question is not supported.\")\r\n if n_gold_clips == 1:\r\n rating_urls.append('${gold_clips}')\r\n\r\n config['rating_urls'] = rating_urls\r\n\r\n with open(template_path, 'r') as file:\r\n content = file.read()\r\n file.seek(0)\r\n t = Template(content)\r\n html = t.render(cfg=config)\r\n\r\n with open(out_path, 'w') as file:\r\n file.write(html)\r\n print(f\" [{out_path}] is created\")\r\n\r\n\r\nasync def prepare_csv_for_create_input(cfg, test_method, clips, gold, trapping, general):\r\n \"\"\"\r\n Merge different input files into one dataframe\r\n :param test_method\r\n :param clips:\r\n :param trainings:\r\n :param gold:\r\n :param trapping:\r\n :param general:\r\n :return:\r\n \"\"\"\r\n df_clips = pd.DataFrame()\r\n df_gold = pd.DataFrame()\r\n df_trap = pd.DataFrame()\r\n rating_clips = []\r\n if clips and os.path.exists(clips):\r\n df_clips = pd.read_csv(clips)\r\n else:\r\n rating_clips_stores = cfg.get('RatingClips', 'RatingClipsConfigurations').split(',')\r\n for model in rating_clips_stores:\r\n enhancedClip = ClipsInAzureStorageAccount(cfg[model], model)\r\n eclips = await enhancedClip.clip_names\r\n eclips_urls = [enhancedClip.make_clip_url(clip) for clip in eclips]\r\n\r\n print('length of urls for store [{0}] is [{1}]'.format(model, len(await enhancedClip.clip_names)))\r\n rating_clips = rating_clips + eclips_urls\r\n\r\n df_clips = pd.DataFrame({'rating_clips':rating_clips})\r\n \r\n df_general = pd.read_csv(general)\r\n if test_method == \"acr\":\r\n if gold and os.path.exists(gold):\r\n df_gold = pd.read_csv(gold)\r\n else:\r\n goldclipsstore = GoldSamplesInStore(cfg['GoldenSample'], 'GoldenSample')\r\n df_gold = await goldclipsstore.get_dataframe()\r\n print('total gold clips from store [{0}]'.format(len(await goldclipsstore.clip_names)))\r\n\r\n if trapping and os.path.exists(trapping):\r\n df_trap = pd.read_csv(trapping)\r\n else:\r\n trapclipsstore = TrappingSamplesInStore(cfg['TrappingQuestions'], 'TrappingQuestions')\r\n df_trap = await trapclipsstore.get_dataframe()\r\n print('total trapping clips from store [{0}]'.format(len(await trapclipsstore.clip_names)))\r\n else:\r\n df_gold = None\r\n if not os.path.exists(clips):\r\n testclipsstore = ClipsInAzureStorageAccount(cfg['noisy'], 'noisy')\r\n testclipsurls = [testclipsstore.make_clip_url(clip) for clip in await testclipsstore.clip_names]\r\n print('The total test clips for our study is [{0}]'.format(len(testclipsurls)))\r\n\r\n clipdictList = []\r\n for eclip in rating_clips:\r\n for i, c in enumerate(testclipsurls):\r\n if os.path.basename(c) in eclip:\r\n clipdictList.append({'rating_clips':eclip, 'references':testclipsurls[i]})\r\n break\r\n\r\n df_clips = pd.DataFrame(clipdictList)\r\n df_trap = df_clips[['references']].copy()\r\n df_trap.rename(columns={'references': 'trapping_clips'}, inplace=True)\r\n\r\n result = pd.concat([df_clips, df_gold, df_trap, df_general], axis=1, sort=False)\r\n return result\r\n\r\nasync def main(cfg, test_method, args):\r\n # check assets\r\n general_path = os.path.join(os.path.dirname(__file__), 'assets_master_script/general.csv')\r\n # for acr\r\n acr_template_path = os.path.join(os.path.dirname(__file__), 'P808Template/ACR_template.html')\r\n acr_cfg_template_path = os.path.join(os.path.dirname(__file__),\r\n 'assets_master_script/acr_result_parser_template.cfg')\r\n # for dcr\r\n dcr_template_path = os.path.join(os.path.dirname(__file__), 'P808Template/DCR_template.html')\r\n # for ccr\r\n ccr_template_path = os.path.join(os.path.dirname(__file__), 'P808Template/CCR_template.html')\r\n dcr_ccr_cfg_template_path = os.path.join(os.path.dirname(__file__),\r\n 'assets_master_script/dcr_ccr_result_parser_template.cfg')\r\n\r\n template_path = ''\r\n\r\n assert os.path.exists(general_path), f\"No csv file containing general infos in {general_path}\"\r\n if test_method == \"acr\":\r\n assert os.path.exists(acr_template_path), f\"No html template file found in {acr_template_path}\"\r\n assert os.path.exists(acr_cfg_template_path), f\"No cfg template found in {acr_cfg_template_path}\"\r\n template_path = acr_template_path\r\n\r\n if test_method == \"dcr\":\r\n assert os.path.exists(dcr_template_path), f\"No html template file found in {dcr_template_path}\"\r\n assert os.path.exists(dcr_ccr_cfg_template_path), f\"No cfg template found in {dcr_ccr_cfg_template_path}\"\r\n template_path = dcr_template_path\r\n\r\n if test_method == \"ccr\":\r\n assert os.path.exists(ccr_template_path), f\"No html template file found in {ccr_template_path}\"\r\n assert os.path.exists(dcr_ccr_cfg_template_path), f\"No cfg template found in {dcr_ccr_cfg_template_path}\"\r\n template_path = ccr_template_path\r\n\r\n\r\n # create output folder\r\n output_dir = args.project\r\n if not os.path.exists(output_dir):\r\n os.mkdir(output_dir)\r\n # prepare format\r\n df = await prepare_csv_for_create_input(cfg, test_method, args.clips, args.gold_clips, args.trapping_clips, general_path)\r\n\r\n # create inputs\r\n print('Start validating inputs')\r\n ca.validate_inputs(cfg['create_input'], df, test_method)\r\n print('... validation is finished.')\r\n\r\n output_csv_file = os.path.join(output_dir, args.project+'_publish_batch.csv')\r\n ca.create_input_for_mturk(cfg['create_input'], df, test_method, output_csv_file)\r\n\r\n # create hit_app\r\n output_html_file = os.path.join(output_dir, f\"{args.project}_{test_method}.html\")\r\n if test_method == 'acr':\r\n await create_hit_app_acr(cfg['acr_html'], template_path, output_html_file, args.training_clips,\r\n args.trapping_clips, cfg['create_input'], cfg['TrappingQuestions'])\r\n else:\r\n await create_hit_app_ccr_dcr(cfg['dcr_ccr_html'], template_path, output_html_file, args.training_clips,\r\n cfg['create_input'])\r\n\r\n # create a config file for analyzer\r\n output_cfg_file = os.path.join(output_dir, f\"{args.project}_{test_method}_result_parser.cfg\")\r\n if test_method == 'acr':\r\n create_analyzer_cfg_acr(cfg, acr_cfg_template_path, output_cfg_file)\r\n else:\r\n create_analyzer_cfg_dcr_ccr(cfg, dcr_ccr_cfg_template_path, output_cfg_file)\r\n\r\nif __name__ == '__main__':\r\n print(\"Welcome to the Master script for ACR test.\")\r\n parser = argparse.ArgumentParser(description='Master script to prepare the ACR test')\r\n parser.add_argument(\"--project\", help=\"Name of the project\", required=True)\r\n parser.add_argument(\"--cfg\", help=\"Configuration file, see master.cfg\", required=True)\r\n parser.add_argument(\"--method\", required=True,\r\n help=\"one of the test methods: 'acr', 'dcr', or 'ccr'\")\r\n parser.add_argument(\"--clips\", help=\"A csv containing urls of all clips to be rated in column 'rating_clips', in \"\r\n \"case of ccr/dcr it should also contain a column for 'references'\")\r\n parser.add_argument(\"--gold_clips\", help=\"A csv containing urls of all gold clips in column 'gold_clips' and their \"\r\n \"answer in column 'gold_clips_ans'\")\r\n parser.add_argument(\"--training_clips\", help=\"A csv containing urls of all training clips to be rated in training \"\r\n \"section. Column 'training_clips'\", required=True)\r\n parser.add_argument(\"--trapping_clips\", help=\"A csv containing urls of all trapping clips. Columns 'trapping_clips'\"\r\n \"and 'trapping_ans'\")\r\n # check input arguments\r\n args = parser.parse_args()\r\n\r\n methods = ['acr', 'dcr', 'ccr']\r\n test_method = args.method.lower()\r\n assert test_method in methods, f\"No such a method supported, please select between 'acr', 'dcr', 'ccr'\"\r\n assert os.path.exists(args.cfg), f\"No config file in {args.cfg}\"\r\n assert os.path.exists(args.training_clips), f\"No csv file containing training clips in {args.training_clips}\"\r\n\r\n cfg = CP.ConfigParser()\r\n cfg._interpolation = CP.ExtendedInterpolation()\r\n cfg.read(args.cfg)\r\n\r\n if args.clips:\r\n assert os.path.exists(args.clips), f\"No csv file containing clips in {args.clips}\"\r\n elif cfg.has_option('RatingClips','RatingClipsConfigurations'):\r\n assert len(cfg['RatingClips']['RatingClipsConfigurations']) > 0, f\"No cloud store for clips specified in config\"\r\n else:\r\n assert True, \"Neither clips file not cloud store provided for rating clips\"\r\n\r\n if test_method == \"acr\":\r\n if args.gold_clips:\r\n assert os.path.exists(args.gold_clips), f\"No csv file containing gold clips in {args.gold_clips}\"\r\n elif cfg.has_option('GoldenSample','Path'):\r\n assert len(cfg['GoldenSample']['Path']) > 0, \"No golden clips store found\"\r\n else:\r\n assert True, \"Neither gold clips file nor store configuration provided\"\r\n\r\n if args.trapping_clips:\r\n assert os.path.exists(args.trapping_clips), f\"No csv file containing trapping clips in {args.trapping_clips}\"\r\n elif cfg.has_option('TrappingQuestions','Path'):\r\n assert len(cfg['TrappingQuestions']['Path']) > 0, \"No golden clips store found\"\r\n else:\r\n assert True, \"Neither Trapping clips file nor store configuration provided\"\r\n\r\n asyncio.run(main(cfg, test_method, args))\r\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
marbiru/gan
[ "c5aa09b0a104f530aea09073cf795d2b3b582218" ]
[ "tensorflow_gan/examples/mnist_estimator/train_experiment_lib.py" ]
[ "# coding=utf-8\n# Copyright 2019 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trains a GANEstimator on MNIST data using `train_and_evaluate`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport numpy as np\nimport PIL\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nimport tensorflow as tf\nimport tensorflow_gan as tfgan\n\nfrom tensorflow_gan.examples.mnist import data_provider\nfrom tensorflow_gan.examples.mnist import networks\nfrom tensorflow_gan.examples.mnist import util\n\n\nHParams = collections.namedtuple('HParams', [\n 'generator_lr', 'discriminator_lr', 'joint_train', 'batch_size',\n 'noise_dims', 'model_dir', 'num_train_steps', 'num_eval_steps',\n 'num_reader_parallel_calls', 'use_dummy_data'\n])\n\n\ndef input_fn(mode, params):\n \"\"\"Input function for GANEstimator.\"\"\"\n if 'batch_size' not in params:\n raise ValueError('batch_size must be in params')\n if 'noise_dims' not in params:\n raise ValueError('noise_dims must be in params')\n bs = params['batch_size']\n nd = params['noise_dims']\n split = 'train' if mode == tf.estimator.ModeKeys.TRAIN else 'test'\n shuffle = (mode == tf.estimator.ModeKeys.TRAIN)\n just_noise = (mode == tf.estimator.ModeKeys.PREDICT)\n\n noise_ds = (tf.data.Dataset.from_tensors(0).repeat()\n .map(lambda _: tf.random.normal([bs, nd])))\n\n if just_noise:\n return noise_ds\n\n if params['use_dummy_data']:\n img = np.zeros((bs, 28, 28, 1), dtype=np.float32)\n images_ds = tf.data.Dataset.from_tensors(img).repeat()\n else:\n images_ds = (data_provider.provide_dataset(\n split, bs, params['num_reader_parallel_calls'],\n shuffle).map(lambda x: x['images'])) # Just take the images.\n\n return tf.data.Dataset.zip((noise_ds, images_ds))\n\n\ndef unconditional_generator(noise, mode):\n \"\"\"MNIST generator with extra argument for tf.Estimator's `mode`.\"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n return networks.unconditional_generator(noise, is_training=is_training)\n\n\ndef get_metrics(gan_model):\n \"\"\"Return metrics for MNIST experiment.\"\"\"\n real_mnist_score = util.mnist_score(gan_model.real_data)\n generated_mnist_score = util.mnist_score(gan_model.generated_data)\n frechet_distance = util.mnist_frechet_distance(\n gan_model.real_data, gan_model.generated_data)\n return {\n 'real_mnist_score': tf.compat.v1.metrics.mean(real_mnist_score),\n 'mnist_score': tf.compat.v1.metrics.mean(generated_mnist_score),\n 'frechet_distance': tf.compat.v1.metrics.mean(frechet_distance),\n }\n\n\ndef make_estimator(hparams):\n return tfgan.estimator.GANEstimator(\n model_dir=hparams.model_dir,\n generator_fn=unconditional_generator,\n discriminator_fn=networks.unconditional_discriminator,\n generator_loss_fn=tfgan.losses.wasserstein_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,\n params=hparams._asdict(),\n generator_optimizer=tf.compat.v1.train.AdamOptimizer(\n hparams.generator_lr, 0.5),\n discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(\n hparams.discriminator_lr, 0.5),\n add_summaries=tfgan.estimator.SummaryType.IMAGES,\n get_eval_metric_ops_fn=get_metrics)\n\n\ndef write_predictions_to_disk(predictions, out_dir, current_step):\n \"\"\"Write some inference from the final model to disk.\"\"\"\n grid_shape = (predictions.shape[0] // 10, 10)\n tiled_image = tfgan.eval.python_image_grid(predictions, grid_shape=grid_shape)\n eval_dir = os.path.join(out_dir, 'outputs')\n if not tf.io.gfile.exists(eval_dir):\n tf.io.gfile.makedirs(eval_dir)\n fn = os.path.join(eval_dir, 'unconditional_gan_%ssteps.png' % current_step)\n with tf.io.gfile.GFile(fn, 'w') as f:\n # Convert tiled_image from float32 in [-1, 1] to unit8 [0, 255].\n img_np = np.squeeze((255 / 2.0) * (tiled_image + 1.0), axis=2)\n pil_image = PIL.Image.fromarray(img_np.astype(np.uint8))\n pil_image.convert('RGB').save(f, 'PNG')\n tf.compat.v1.logging.info('Wrote output to: %s', fn)\n\n\ndef train(hparams):\n \"\"\"Trains an MNIST GAN.\n\n Args:\n hparams: An HParams instance containing the hyperparameters for training.\n \"\"\"\n estimator = make_estimator(hparams)\n train_spec = tf.estimator.TrainSpec(\n input_fn=input_fn, max_steps=hparams.num_train_steps)\n eval_spec = tf.estimator.EvalSpec(\n name='default', input_fn=input_fn, steps=hparams.num_eval_steps)\n\n # Run training and evaluation for some steps.\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n # Generate predictions and write them to disk.\n yields_prediction = estimator.predict(input_fn)\n predictions = np.array([next(yields_prediction) for _ in xrange(100)])\n write_predictions_to_disk(predictions, hparams.model_dir,\n hparams.num_train_steps)\n" ]
[ [ "tensorflow.compat.v1.metrics.mean", "tensorflow.data.Dataset.from_tensors", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.io.gfile.exists", "tensorflow.io.gfile.GFile", "numpy.squeeze", "tensorflow.random.normal", "tensorflow.io.gfile.makedirs", "tensorflow.data.Dataset.zip", "tensorflow.estimator.TrainSpec", "tensorflow.compat.v1.logging.info", "tensorflow.estimator.EvalSpec", "numpy.zeros", "tensorflow.estimator.train_and_evaluate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chenlheng/pymarl
[ "b6455f939199eec909356608195201d5c80c06b0" ]
[ "src/modules/critics/coma.py" ]
[ "import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass COMACritic(nn.Module):\n def __init__(self, scheme, args):\n super(COMACritic, self).__init__()\n\n self.args = args\n self.n_actions = args.n_actions\n self.n_agents = args.n_agents\n\n input_shape = self._get_input_shape(scheme)\n self.output_type = \"q\"\n\n # Set up network layers\n self.fc1 = nn.Linear(input_shape, 128)\n self.fc2 = nn.Linear(128, 128)\n self.fc3 = nn.Linear(128, self.n_actions)\n\n def forward(self, batch, t=None):\n inputs = self._build_inputs(batch, t=t)\n x = F.relu(self.fc1(inputs))\n x = F.relu(self.fc2(x))\n q = self.fc3(x)\n return q\n\n def _build_inputs(self, batch, t=None):\n bs = batch.batch_size\n max_t = batch.max_seq_length if t is None else 1\n ts = slice(None) if t is None else slice(t, t+1) # if t else all\n inputs = []\n # state\n inputs.append(batch[\"state\"][:, ts].unsqueeze(2).repeat(1, 1, self.n_agents, 1))\n\n # observation\n inputs.append(batch[\"obs\"][:, ts])\n\n # print('obs_shape in _build_inputs():', np.array(batch['obs']).shape) # [bs, t_max, n, ob_size]\n print('sigs_shape in _build_inputs():', np.array(batch['sigs']).shape) # [bs, t_max, n, ob_size]\n # actions (masked out by agent)\n actions = batch[\"actions_onehot\"][:, ts].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)\n agent_mask = (1 - th.eye(self.n_agents, device=batch.device))\n agent_mask = agent_mask.view(-1, 1).repeat(1, self.n_actions).view(self.n_agents, -1)\n inputs.append(actions * agent_mask.unsqueeze(0).unsqueeze(0))\n\n # print('in _build_inputs')\n # print('actions', actions.shape) # [bs, ts, num_agt, num_agt*act_size]\n # print('agent_mask', agent_mask.shape) # [num_agt, num_agt*act_size]\n\n # last actions\n if t == 0:\n inputs.append(th.zeros_like(batch[\"actions_onehot\"][:, 0:1]).view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1))\n elif isinstance(t, int):\n inputs.append(batch[\"actions_onehot\"][:, slice(t-1, t)].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1))\n else:\n last_actions = th.cat([th.zeros_like(batch[\"actions_onehot\"][:, 0:1]), batch[\"actions_onehot\"][:, :-1]], dim=1)\n last_actions = last_actions.view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)\n inputs.append(last_actions)\n\n inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))\n\n inputs = th.cat([x.reshape(bs, max_t, self.n_agents, -1) for x in inputs], dim=-1)\n\n return inputs\n\n def _get_input_shape(self, scheme):\n # state\n input_shape = scheme[\"state\"][\"vshape\"]\n # observation\n input_shape += scheme[\"obs\"][\"vshape\"]\n # actions and last actions\n input_shape += scheme[\"actions_onehot\"][\"vshape\"][0] * self.n_agents * 2\n # agent id\n input_shape += self.n_agents\n return input_shape" ]
[ [ "torch.nn.Linear", "numpy.array", "torch.eye", "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amyburden/keras-CenterNet
[ "5e78070595f73f80d80295800c8d16aa6507feba" ]
[ "utils/compute_overlap.py" ]
[ "import numpy as np\n\ndef compute_overlap(\n boxes,\n query_boxes\n):\n \"\"\"\n Args\n a: (N, 4) ndarray of float\n b: (K, 4) ndarray of float\n\n Returns\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K), dtype=np.float64)\n\n for k in range(K):\n box_area = (\n (query_boxes[k, 2] - query_boxes[k, 0] + 1) *\n (query_boxes[k, 3] - query_boxes[k, 1] + 1)\n )\n for n in range(N):\n iw = (\n min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]) + 1\n )\n if iw > 0:\n ih = (\n min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]) + 1\n )\n if ih > 0:\n ua = np.float64(\n (boxes[n, 2] - boxes[n, 0] + 1) *\n (boxes[n, 3] - boxes[n, 1] + 1) +\n box_area - iw * ih\n )\n overlaps[n, k] = iw * ih / ua\n return overlaps\n" ]
[ [ "numpy.zeros", "numpy.float64" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
data-science-workshops/deep-learning
[ "6f7c4c6aa2cc3cbf781a89e85137ebfc65faa80a" ]
[ "notebooks/utils/callback.py" ]
[ "import numpy as np\nfrom time import time\nfrom keras.callbacks import Callback\n\nclass TimingCallback(Callback):\n def __init__(self):\n super(TimingCallback, self).__init__()\n self.epoch_logs=[]\n self.batch_logs=[]\n self.train_logs=[]\n\n def on_epoch_begin(self, epoch, logs=None):\n self.epoch_starttime = time()\n\n def on_epoch_end(self, epoch, logs=None):\n self.epoch_logs.append(time() - self.epoch_starttime)\n\n def on_batch_begin(self, batch, logs=None):\n self.batch_starttime = time()\n\n def on_batch_end(self, batch, logs=None):\n self.batch_logs.append(time() - self.batch_starttime)\n\n def on_train_begin(self, logs=None):\n self.train_starttime = time()\n\n def on_train_end(self, logs=None):\n self.train_logs.append(time() - self.train_starttime)\n\n @property\n def epoch_mean_time(self):\n return np.array(self.epoch_logs).mean()\n\n @property\n def batch_mean_time(self):\n return np.array(self.batch_logs).mean()\n\n @property\n def train_mean_time(self):\n return np.array(self.train_logs).mean()" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FelixFaber/ASAP
[ "951d9667143095e42f1566816b4ab90d901b56a8", "951d9667143095e42f1566816b4ab90d901b56a8" ]
[ "asaplib/compressor/split.py", "scripts/ridge_regression.py" ]
[ "\"\"\"\nTODO: Module-level description\n\"\"\"\n\nimport collections\nfrom abc import ABCMeta\n\nimport numpy as np\nfrom sklearn.externals.six import with_metaclass\nfrom sklearn.model_selection._split import KFold as _KFold\nfrom sklearn.model_selection._split import ShuffleSplit as _ShuffleSplit\nfrom sklearn.utils import check_random_state\n\n\ndef exponential_split(xmin, xmax, n=5):\n \"\"\"\n Obtain integers that are equally spaced in log space.\n\n Parameters\n ----------\n xmin: lower bound in original space\n xmax: upper bound in original space\n n: integer giving the number of spaces (default is 5)\n\n Returns: numpy array of n evenly spaced points in log space\n -------\n \"\"\"\n\n X = np.zeros(n, dtype=int)\n [lmin, lmax] = [np.log(xmin), np.log(xmax)]\n dl = (lmax - lmin) / (n - 1.)\n X[0] = xmin\n X[-1] = xmax\n for i in range(1, n - 1):\n X[i] = int(np.exp(lmin + dl * i))\n return X\n\n\ndef kernel_random_split(X, y, r=0.05):\n \"\"\"\n\n Parameters\n ----------\n X\n y\n r\n\n Returns\n -------\n\n \"\"\"\n\n if X.shape[0] != X.shape[1]:\n raise ValueError('Kernel matrix is not square')\n if len(X) != len(y):\n raise ValueError('Length of the vector of properties is not the same as the number of samples')\n\n n_sample = len(X)\n all_list = np.arange(n_sample)\n randomchoice = np.random.rand(n_sample)\n test_member_mask = (randomchoice < r)\n train_list = all_list[~test_member_mask]\n test_list = all_list[test_member_mask]\n\n X_train = X[:, train_list][train_list]\n y_train = y[train_list]\n\n X_test = X[:, train_list][test_list]\n y_test = y[test_list]\n\n if len(X_test) < 1:\n raise ValueError(\"No test set selected. Increase sample size and/or test ratio.\")\n\n return X_train, X_test, y_train, y_test, train_list, test_list\n\n\n\"\"\"\nadapted from Felix Musil ML_tools\n\"\"\"\n\n\nclass KFold(_KFold):\n def __init__(self, n_splits=3, shuffle=False, random_state=None):\n super(KFold, self).__init__(n_splits, shuffle, random_state)\n\n def get_params(self):\n params = dict(n_splits=self.n_splits, shuffle=self.shuffle, random_state=self.random_state)\n return params\n\n\nclass ShuffleSplit(_ShuffleSplit):\n def __init__(self, n_splits=10, test_size=\"default\", train_size=None, random_state=None):\n super(ShuffleSplit, self).__init__(n_splits, test_size, train_size, random_state)\n\n def get_params(self):\n params = dict(n_splits=self.n_splits, test_size=self.test_size,\n train_size=self.train_size, random_state=self.random_state)\n return params\n\n\nclass LCSplit(with_metaclass(ABCMeta)):\n def __init__(self, cv, n_repeats=[10], train_sizes=[10], test_size=\"default\", random_state=None, **cvargs):\n if not isinstance(n_repeats, collections.Iterable) or not isinstance(train_sizes, collections.Iterable):\n raise ValueError(\"Number of repetitions or training set sizes must be an iterable.\")\n\n if len(n_repeats) != len(train_sizes):\n raise ValueError(\"Number of repetitions must be equal to length of training set sizes.\")\n\n if any(key in cvargs for key in ('random_state', 'shuffle')):\n raise ValueError(\"cvargs must not contain random_state or shuffle.\")\n\n self.cv = cv\n self.n_repeats = n_repeats\n self.train_sizes = train_sizes\n self.random_state = random_state\n self.cvargs = cvargs\n self.test_size = test_size\n self.n_splits = np.sum(n_repeats)\n\n def get_params(self):\n params = dict(cv=self.cv.get_params(), n_repeats=self.n_repeats, train_sizes=self.train_sizes,\n test_size=self.test_size, random_state=self.random_state, cvargs=self.cvargs)\n return params\n\n def split(self, X, y=None, groups=None):\n \"\"\"Generates indices to split data into training and test set.\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, of length n_samples\n The target variable for supervised learning problems.\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n Returns\n -------\n train : ndarray\n The training set indices for that split.\n test : ndarray\n The testing set indices for that split.\n \"\"\"\n\n rng = check_random_state(self.random_state)\n\n for n_repeat, train_size in zip(self.n_repeats, self.train_sizes):\n cv = self.cv(random_state=rng, n_splits=n_repeat, test_size=self.test_size, train_size=train_size,\n **self.cvargs)\n for train_index, test_index in cv.split(X, y, groups):\n yield train_index, test_index\n\n def get_n_splits(self, X=None, y=None, groups=None):\n \"\"\"Returns the number of splitting iterations in the cross-validator\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n ``np.zeros(n_samples)`` may be used as a placeholder.\n y : object\n Always ignored, exists for compatibility.\n ``np.zeros(n_samples)`` may be used as a placeholder.\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n \"\"\"\n rng = check_random_state(self.random_state)\n n_splits = 0\n for n_repeat, train_size in zip(self.n_repeats, self.train_sizes):\n cv = self.cv(random_state=rng, n_splits=n_repeat, test_size=self.test_size, train_size=train_size,\n **self.cvargs)\n n_splits += cv.get_n_splits(X, y, groups)\n return n_splits\n", "#!/usr/bin/python3\n\"\"\"\nTODO: Module-level description\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ase.io import read\nfrom sklearn.model_selection import train_test_split\n\nfrom asaplib.fit import RidgeRegression\nfrom asaplib.fit import get_score\nfrom asaplib.io import str2bool\nfrom asaplib.plot import plot_styles\n\n\ndef main(fmat, fxyz, fy, prefix, scale, test_ratio, jitter, n_sparse, sigma):\n \"\"\"\n\n Parameters\n ----------\n fmat: Location of descriptor matrix file or name of the tags in ase xyz file. You can use gen_descriptors.py to compute it.\n fxyz: Location of xyz file for reading the properties.\n fy: Location of property list (1D-array of floats)\n prefix: filename prefix for learning curve figure\n scale: Scale the coordinates (True/False). Scaling highly recommanded.\n test_ratio: train/test ratio\n jitter: jitter level, default is 1e-10\n n_sparse: number of representative samples\n sigma: noise level in kernel ridge regression\n\n Returns\n -------\n\n Learning curve.\n\n \"\"\"\n\n scale = bool(scale)\n\n # try to read the xyz file\n if fxyz != 'none':\n try:\n frames = read(fxyz, ':')\n nframes = len(frames)\n print('load xyz file: ', fxyz, ', a total of ', str(nframes), 'frames')\n except:\n raise ValueError('Cannot load the xyz file')\n\n desc = []\n ndesc = 0\n # load from xyz file\n if nframes > 1:\n for i, frame in enumerate(frames):\n if fmat in frame.info:\n try:\n desc.append(frame.info[fmat])\n if ndesc > 0 and len(frame.info[fmat]) != ndesc:\n raise ValueError('mismatch of number of descriptors between frames')\n ndesc = len(frame.info[fmat])\n except:\n raise ValueError('Cannot combine the descriptor matrix from the xyz file')\n if desc != [] and np.shape(desc)[1] != nframes:\n desc = np.asmatrix(desc)\n # print(np.shape(desc))\n desc.reshape((ndesc, nframes))\n else:\n # only one frame\n try:\n desc = frames[0].get_array(fmat)\n except:\n ValueError('Cannot read the descriptor matrix from single frame')\n else:\n print(\"Did not provide the xyz file. We can only output descriptor matrix.\")\n output = 'matrix'\n\n # we can also load the descriptor matrix from a standalone file\n if os.path.isfile(fmat):\n try:\n desc = np.genfromtxt(fmat, dtype=float)\n print(\"loaded the descriptor matrix from file: \", fmat)\n except:\n raise ValueError('Cannot load the descriptor matrix from file')\n if len(desc) == 0:\n raise ValueError('Please supply descriptor in a xyz file or a standlone descriptor matrix')\n print(\"shape of the descriptor matrix: \", np.shape(desc), \"number of descriptors: \", np.shape(desc[0]))\n\n # read in the properties to be predicted\n y_all = []\n try:\n y_all = np.genfromtxt(fy, dtype=float)\n except:\n try:\n for frame in frames:\n if fy == 'volume' or fy == 'Volume':\n y_all.append(frame.get_volume() / len(frame.get_positions()))\n elif fy == 'size' or fy == 'Size':\n y_all.append(len(frame.get_positions()))\n else:\n y_all.append(frame.info[fy] / len(frame.get_positions()))\n except:\n raise ValueError('Cannot load the property vector')\n if len(y_all) != nframes:\n raise ValueError('Length of the vector of properties is not the same as number of samples')\n\n # scale & center\n if scale:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n print(scaler.fit(desc))\n desc = scaler.transform(desc) # normalizing the features\n # add bias\n desc_bias = np.ones((np.shape(desc)[0], np.shape(desc)[1] + 1))\n desc_bias[:, 1:] = desc\n print(np.shape(desc_bias))\n # train test split\n if test_ratio > 0:\n X_train, X_test, y_train, y_test = train_test_split(desc_bias, y_all, test_size=test_ratio, random_state=42)\n else:\n X_train = X_test = desc_bias\n y_train = y_test = y_all\n n_train = len(X_train)\n n_test = len(X_test)\n\n # TODO: add sparsification\n\n rr = RidgeRegression(jitter)\n # fit the model\n rr.fit(X_train, y_train)\n\n # get the predictions for train set\n y_pred = rr.predict(X_train)\n # compute the CV score for the dataset\n print(\"train score: \", get_score(y_pred, y_train))\n # get the predictions for test set\n y_pred_test = rr.predict(X_test)\n # compute the CV score for the dataset\n print(\"test score: \", get_score(y_pred_test, y_test))\n\n plot_styles.set_nice_font()\n fig = plt.figure(figsize=(8 * 2.1, 8))\n ax = fig.add_subplot(121)\n ax.plot(y_train, y_pred, 'b.', label='train')\n ax.plot(y_test, y_pred_test, 'r.', label='test')\n ax.legend()\n ax.set_title('Ridge regression for: ' + fy)\n ax.set_xlabel('actual y')\n ax.set_ylabel('predicted y')\n\n # TODO: add learning curve\n\n plt.show()\n fig.savefig('RR_4_' + prefix + '.png')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-fmat', type=str, default='ASAP_desc',\n help='Location of descriptor matrix file or name of the tags in ase xyz file. You can use gen_descriptors.py to compute it.')\n parser.add_argument('-fxyz', type=str, default='none', help='Location of xyz file for reading the properties.')\n parser.add_argument('-fy', type=str, default='none', help='Location of the list of properties (N floats)')\n parser.add_argument('--prefix', type=str, default='ASAP', help='Filename prefix')\n parser.add_argument('--scale', type=str2bool, nargs='?', const=True, default=True,\n help='Scale the coordinates (True/False). Scaling highly recommanded.')\n parser.add_argument('--test', type=float, default=0.05, help='the test ratio')\n parser.add_argument('--jitter', type=float, default=1e-10,\n help='regularizer that improves the stablity of matrix inversion')\n parser.add_argument('--n', type=int, default=-1, help='number of the representative samples')\n parser.add_argument('--sigma', type=float, default=1e-2, help='the noise level of the signal')\n\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n args = parser.parse_args()\n\n main(args.fmat, args.fxyz, args.fy, args.prefix, args.scale, args.test, args.jitter, args.n, args.sigma)\n" ]
[ [ "sklearn.utils.check_random_state", "sklearn.externals.six.with_metaclass", "numpy.log", "numpy.arange", "numpy.random.rand", "numpy.exp", "numpy.zeros", "numpy.sum" ], [ "sklearn.model_selection.train_test_split", "numpy.genfromtxt", "numpy.asmatrix", "numpy.shape", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BourneXu/AntiGPS
[ "69469b5667e39ae4b239cb85359a8c8f94084e95" ]
[ "script/utility.py" ]
[ "# -*- coding:utf-8 -*-\nimport io\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport geocoder\nimport hypertools as hyp\nimport plotly.express as px\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom geopy import distance\nfrom loguru import logger\nfrom dynaconf import settings\nfrom statsmodels.distributions.empirical_distribution import ECDF\n\nNORTHERNMOST = 49.0\nSOUTHERNMOST = 25.0\nEASTERNMOST = -66.0\nWESTERNMOST = -124.0\n\n\nclass Utility:\n @staticmethod\n def plot(x, y, xlabel, ylabel, title, filename=None):\n fig, ax = plt.subplots()\n ax.plot(x, y)\n ax.set(xlabel=xlabel, ylabel=ylabel, title=title)\n if filename:\n fig.savefig(filename)\n plt.show()\n\n @staticmethod\n def image_display(image: bytes):\n image = Image.open(io.BytesIO(image))\n image.show()\n\n @staticmethod\n def image_save(image, filename, google=False):\n if google:\n ## Cut image to remove google watermark\n width, height = image.size\n image = image.crop((0, 0, width, height - 40))\n image.save(filename, format=\"JPEG\")\n\n @staticmethod\n def image_save_byte(image: bytes, filename, google=False):\n image = Image.open(io.BytesIO(image))\n if google:\n ## Cut image to remove google watermark\n width, height = image.size\n image = image.crop((0, 0, width, height - 40))\n image.save(filename, format=\"JPEG\")\n\n @staticmethod\n def plot_cdf(xarray):\n ecdf = ECDF(xarray)\n line = plt.plot(ecdf.x, ecdf.y * 100)\n return line\n\n @staticmethod\n def generateGPS_random(number_of_points):\n coordinate_list = []\n for _ in range(number_of_points):\n lat = round(random.uniform(SOUTHERNMOST, NORTHERNMOST), 6)\n lng = round(random.uniform(EASTERNMOST, WESTERNMOST), 6)\n # gcode = geocoder.mapbox([lat, lng], method=\"reverse\", key=settings.MAPTOKEN)\n # coordinate_list.append(gcode)\n coordinate_list.append((lat, lng))\n return coordinate_list\n\n @staticmethod\n def visualize_map(coords: dict):\n mapbox_access_token = settings.MAPTOKEN\n px.set_mapbox_access_token(mapbox_access_token)\n fig = px.scatter_mapbox(\n pd.DataFrame(coords),\n lat=\"lats\",\n lon=\"lngs\",\n color_continuous_scale=px.colors.cyclical.IceFire,\n zoom=10,\n )\n fig.show()\n\n @staticmethod\n def concat_images_h_resize(im1, im2, resample=Image.BICUBIC, resize_big_image=True):\n if im1.height == im2.height:\n _im1 = im1\n _im2 = im2\n elif ((im1.height > im2.height) and resize_big_image) or (\n (im1.height < im2.height) and not resize_big_image\n ):\n _im1 = im1.resize(\n (int(im1.width * im2.height / im1.height), im2.height), resample=resample\n )\n _im2 = im2\n else:\n _im1 = im1\n _im2 = im2.resize(\n (int(im2.width * im1.height / im2.height), im1.height), resample=resample\n )\n dst = Image.new(\"RGB\", (_im1.width + _im2.width, _im1.height))\n dst.paste(_im1, (0, 0))\n dst.paste(_im2, (_im1.width, 0))\n return dst\n\n @staticmethod\n def distance_route(route):\n dist = 0\n for start, end in zip(route[:-1], route[1:]):\n coor_start, coor_end = (start[\"lat\"], start[\"lng\"]), (end[\"lat\"], end[\"lng\"])\n dist += distance.distance(coor_start, coor_end).m\n return dist\n\n @staticmethod\n def plot_trainingdata(X_train: np.ndarray, y_train, ndims=3, save_path=None):\n ## X_train is 3-D.\n X_train_2d = np.reshape(X_train, (X_train.shape[0], X_train.shape[1] * X_train.shape[2]))\n geo = hyp.plot(X_train_2d, \".\", hue=y_train, ndims=ndims, legend=True, save_path=save_path)\n\n\ndef test_concat_images():\n folder = \"../results/google_img/\"\n image_files = os.listdir(folder)\n img_names = set([x.split(\"_\")[0] for x in image_files])\n for img_name in img_names:\n pano_120 = []\n for idx in range(3):\n current_img = folder + img_name + f\"_{idx}.jpg\"\n pano_120.append(Image.open(current_img))\n image, img1, img2 = pano_120\n image = Utility.concat_images_h_resize(image, img1)\n image = Utility.concat_images_h_resize(image, img2)\n filename = folder + img_name + \".jpg\"\n if not os.path.exists(filename):\n Utility.image_save(image, filename, google=True)\n else:\n logger.warning(f\"Image {filename} is existing\")\n\n\nif __name__ == \"__main__\":\n test_concat_images()\n" ]
[ [ "numpy.reshape", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
WarrenWeckesser/voronoiz
[ "aad2b62bda3015bfb998142b8694c555a518d748" ]
[ "examples/make_animation.py" ]
[ "\nimport numpy as np\nfrom voronoiz import voronoi_grid\nfrom numpngw import write_apng\n\n\nxmin = 0\nxmax = 5\nymin = 0\nymax = 5\n\npoints = np.array([[0.00, 0.00],\n [1.00, 4.51],\n [1.20, 0.30],\n [2.50, 2.60],\n [2.40, 0.80],\n [4.40, 3.30],\n [1.95, 3.00],\n [3.71, 1.90],\n [4.50, 3.66],\n [4.67, 0.21]])\n\ngridsize = 299\n\nfor kwargs in [dict(metric='cityblock'),\n dict(metric='minkowski', p=2),\n dict(metric='minkowski', p=4)]:\n imgs = []\n for theta in np.linspace(0, 2*np.pi, 250, endpoint=False):\n # points[0] will travel about a circle.\n points[0] = 2.5 + 1.5*np.array([np.cos(theta), np.sin(theta)])\n img = voronoi_grid(points, xmin, xmax, ymin, ymax,\n gridsize=(gridsize, gridsize),\n **kwargs)\n img = (160//(len(points)+1)*img + 64).astype(np.uint8)\n img[img == 64] = 0\n for x, y in points:\n i = int(gridsize*(x - xmin)/(xmax - xmin))\n j = int(gridsize*(y - ymin)/(ymax - ymin))\n img[j-1:j+2, i-1:i+2] = 255\n img = np.pad(img, pad_width=1, mode='constant', constant_values=255)\n imgs.append(img)\n\n tag = '_'.join(f\"{key}_{value}\" for key, value in kwargs.items())\n write_apng(f'animation_{tag}.png', imgs, delay=100)\n" ]
[ [ "numpy.pad", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bysen32/WS-DAN.PyTorch
[ "de206591f037ea82fc52eaf6915de7f64375e0c9" ]
[ "models/wsdan.py" ]
[ "\"\"\"\nWS-DAN models\n\nHu et al.,\n\"See Better Before Looking Closer: Weakly Supervised Data Augmentation Network for Fine-Grained Visual Classification\",\narXiv:1901.09891\n\nCreated: May 04,2019 - Yuchong Gu\nRevised: Dec 03,2019 - Yuchong Gu\n\"\"\"\nimport logging\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport models.vgg as vgg\nimport models.resnet as resnet\nfrom models.inception import inception_v3, BasicConv2d\n\n__all__ = ['WSDAN']\nEPSILON = 1e-12\n\n\n# Bilinear Attention Pooling\nclass BAP(nn.Module):\n def __init__(self, pool='GAP'):\n super(BAP, self).__init__()\n assert pool in ['GAP', 'GMP']\n if pool == 'GAP':\n self.pool = None\n else:\n self.pool = nn.AdaptiveMaxPool2d(1)\n\n def forward(self, features, attentions):\n B, C, H, W = features.size()\n _, M, AH, AW = attentions.size()\n\n # match size\n if AH != H or AW != W:\n attentions = F.upsample_bilinear(attentions, size=(H, W))\n\n # feature_matrix: (B, M, C) -> (B, M * C)\n if self.pool is None:\n feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)\n else:\n feature_matrix = []\n for i in range(M):\n AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)\n feature_matrix.append(AiF)\n feature_matrix = torch.cat(feature_matrix, dim=1)\n\n # sign-sqrt\n feature_matrix = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)\n\n # l2 normalization along dimension M and C\n feature_matrix = F.normalize(feature_matrix, dim=-1)\n return feature_matrix\n\nclass tri_att(nn.Module):\n def __init__(self):\n super(tri_att, self).__init__()\n self.feature_norm = nn.Softmax(dim=2)\n self.bilinear_norm = nn.Softmax(dim=2)\n \n def forward(self, x):\n n = x.size(0)\n c = x.size(1)\n h = x.size(2)\n w = x.size(3)\n f = x.reshape(n, c, -1)\n\n # *7 to obtain an appropriate scale for the input of softmax function.\n f_norm = self.feature_norm(f * 2)\n\n bilinear = f_norm.bmm(f.transpose(1, 2))\n bilinear = self.bilinear_norm(bilinear)\n trilinear_atts = bilinear.bmm(f).view(n, c, h, w).detach()\n # structure_att = torch.sum(trilinear_atts, dim=1, keepdim=True)\n\n # index = torch.randint(c, (n,))\n # detail_att = trilinear_atts[torch.arange(n), index] + 0.01\n \n # return structure_att, detail_att.unsqueeze(1)\n return trilinear_atts\n\n# WS-DAN: Weakly Supervised Data Augmentation Network for FGVC\nclass WSDAN(nn.Module):\n def __init__(self, num_classes, M=32, net='inception_mixed_6e', pretrained=False):\n super(WSDAN, self).__init__()\n self.num_classes = num_classes\n self.M = M\n self.net = net\n\n # Network Initialization\n if 'inception' in net:\n if net == 'inception_mixed_6e':\n self.features = inception_v3(pretrained=pretrained).get_features_mixed_6e()\n self.num_features = 768\n elif net == 'inception_mixed_7c':\n self.features = inception_v3(pretrained=pretrained).get_features_mixed_7c()\n self.num_features = 2048\n else:\n raise ValueError('Unsupported net: %s' % net)\n elif 'vgg' in net:\n self.features = getattr(vgg, net)(pretrained=pretrained).get_features()\n self.num_features = 512\n elif 'resnet' in net:\n self.features = getattr(resnet, net)(pretrained=pretrained).get_features()\n self.num_features = 512 * self.features[-1][-1].expansion\n else:\n raise ValueError('Unsupported net: %s' % net)\n\n # Attention Maps\n self.attentions = BasicConv2d(self.num_features, self.M, kernel_size=1)\n\n # Bilinear Attention Pooling\n self.bap = BAP(pool='GAP')\n # self.tri_att = tri_att()\n\n # Classification Layer\n self.fc = nn.Linear(self.M * self.num_features, self.num_classes, bias=False)\n\n logging.info('WSDAN: using {} as feature extractor, num_classes: {}, num_attentions: {}'.format(net, self.num_classes, self.M))\n\n def forward(self, x):\n batch_size = x.size(0)\n\n # Feature Maps, Attention Maps and Feature Matrix\n feature_maps = self.features(x)\n # feature_maps = self.tri_att(feature_maps)\n if self.net != 'inception_mixed_7c':\n attention_maps = self.attentions(feature_maps)\n else:\n attention_maps = feature_maps[:, :self.M, ...]\n feature_matrix = self.bap(feature_maps, attention_maps)\n\n # Classification\n p = self.fc(feature_matrix * 100.)\n\n # p: (B, self.num_classes)\n # feature_matrix: (B, M * C)\n # attention_map: (B, 2, H, W) in training, (B, 1, H, W) in val/testing\n return p, feature_matrix, attention_maps\n\n def load_state_dict(self, state_dict, strict=True):\n model_dict = self.state_dict()\n pretrained_dict = {k: v for k, v in state_dict.items()\n if k in model_dict and model_dict[k].size() == v.size()}\n\n if len(pretrained_dict) == len(state_dict):\n logging.info('%s: All params loaded' % type(self).__name__)\n else:\n logging.info('%s: Some params were not loaded:' % type(self).__name__)\n not_loaded_keys = [k for k in state_dict.keys() if k not in pretrained_dict.keys()]\n logging.info(('%s, ' * (len(not_loaded_keys) - 1) + '%s') % tuple(not_loaded_keys))\n\n model_dict.update(pretrained_dict)\n super(WSDAN, self).load_state_dict(model_dict)\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.Softmax", "torch.nn.AdaptiveMaxPool2d", "torch.abs", "torch.cat", "torch.sign", "torch.einsum", "torch.nn.Linear", "torch.nn.functional.upsample_bilinear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FilipeMaia/afnumpy
[ "11958f501f7ddeb88915a44d0fd4914e1779e7dd" ]
[ "afnumpy/core/numeric.py" ]
[ "import arrayfire\nimport numpy\nimport afnumpy\nfrom .. import private_utils as pu\nfrom numpy import newaxis\nimport numbers\nfrom numpy import broadcast\nfrom ..decorators import *\n\ndef concatenate(arrays, axis=0):\n if(len(arrays) < 1):\n raise ValueError('need at least one array to concatenate')\n if(axis > 3):\n raise NotImplementedError('only up to 4 axis as currently supported')\n arr = arrays[0].d_array.copy()\n axis = pu.c2f(arrays[0].shape, axis)\n for a in arrays[1:]:\n arr = arrayfire.join(axis, arr, a.d_array)\n return afnumpy.ndarray(pu.af_shape(arr), dtype=arrays[0].dtype, af_array=arr)\n\ndef roll(a, shift, axis=None):\n shape = a.shape\n if(axis is None):\n axis = 0\n a = a.flatten()\n axis = pu.c2f(a.shape, axis)\n if axis == 0:\n s = arrayfire.shift(a.d_array, shift, 0, 0, 0)\n elif axis == 1:\n s = arrayfire.shift(a.d_array, 0, shift, 0, 0)\n elif axis == 2:\n s = arrayfire.shift(a.d_array, 0, 0, shift, 0)\n elif axis == 3:\n s = arrayfire.shift(a.d_array, 0, 0, 0, shift)\n else:\n raise NotImplementedError\n return afnumpy.ndarray(a.shape, dtype=a.dtype, af_array=s).reshape(shape)\n\ndef rollaxis(a, axis, start=0):\n n = a.ndim\n if axis < 0:\n axis += n\n if start < 0:\n start += n\n msg = 'rollaxis: %s (%d) must be >=0 and < %d'\n if not (0 <= axis < n):\n raise ValueError(msg % ('axis', axis, n))\n if not (0 <= start < n+1):\n raise ValueError(msg % ('start', start, n+1))\n if (axis < start): # it's been removed\n start -= 1\n if axis==start:\n return a\n axes = list(range(0, n))\n axes.remove(axis)\n axes.insert(start, axis)\n return a.transpose(axes)\n\ndef ones(shape, dtype=float, order='C'):\n b = numpy.ones(shape, dtype, order)\n return afnumpy.ndarray(b.shape, b.dtype, buffer=b,order=order)\n\ndef reshape(a, newshape, order='C'):\n return a.reshape(newshape,order)\n\ndef asanyarray(a, dtype=None, order=None):\n return afnumpy.array(a, dtype, copy=False, order=order, subok=True)\n\ndef floor(x, out=None):\n s = arrayfire.floor(x.d_array)\n a = afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s)\n if out is not None:\n out[:] = a[:]\n return a\n\ndef ceil(x, out=None):\n s = arrayfire.ceil(x.d_array)\n a = afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s)\n if out is not None:\n out[:] = a[:]\n return a\n\ndef abs(x, out=None):\n if not isinstance(x, afnumpy.ndarray):\n return numpy.abs(x, out)\n a = x.__abs__()\n if out is not None:\n out[:] = a\n return a\n\ndef asarray(a, dtype=None, order=None):\n if(isinstance(a, afnumpy.ndarray) and\n (dtype is None or dtype == a.dtype)):\n # special case for performance\n return a\n return afnumpy.array(a, dtype, copy=False, order=order)\n\ndef ascontiguousarray(a, dtype=None):\n return afnumpy.array(a, dtype, copy=False, order='C', ndmin=1)\n\ndef cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):\n if axis is not None:\n axisa, axisb, axisc = (axis,) * 3\n a = asarray(a)\n b = asarray(b)\n # Move working axis to the end of the shape\n a = rollaxis(a, axisa, a.ndim)\n b = rollaxis(b, axisb, b.ndim)\n msg = (\"incompatible dimensions for cross product\\n\"\n \"(dimension must be 2 or 3)\")\n if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):\n raise ValueError(msg)\n\n # Create the output array\n shape = broadcast(a[..., 0], b[..., 0]).shape\n if a.shape[-1] == 3 or b.shape[-1] == 3:\n shape += (3,)\n dtype = afnumpy.promote_types(a.dtype, b.dtype)\n cp = afnumpy.empty(shape, dtype)\n\n # create local aliases for readability\n a0 = a[..., 0]\n a1 = a[..., 1]\n if a.shape[-1] == 3:\n a2 = a[..., 2]\n b0 = b[..., 0]\n b1 = b[..., 1]\n if b.shape[-1] == 3:\n b2 = b[..., 2]\n if cp.ndim != 0 and cp.shape[-1] == 3:\n cp0 = cp[..., 0]\n cp1 = cp[..., 1]\n cp2 = cp[..., 2]\n\n if a.shape[-1] == 2:\n if b.shape[-1] == 2:\n # a0 * b1 - a1 * b0\n afnumpy.multiply(a0, b1, out=cp)\n cp -= a1 * b0\n if cp.ndim == 0:\n return cp\n else:\n # This works because we are moving the last axis\n return rollaxis(cp, -1, axisc)\n else:\n # cp0 = a1 * b2 - 0 (a2 = 0)\n # cp1 = 0 - a0 * b2 (a2 = 0)\n # cp2 = a0 * b1 - a1 * b0\n afnumpy.multiply(a1, b2, out=cp0)\n afnumpy.multiply(a0, b2, out=cp1)\n negative(cp1, out=cp1)\n afnumpy.multiply(a0, b1, out=cp2)\n cp2 -= a1 * b0\n elif a.shape[-1] == 3:\n if b.shape[-1] == 3:\n # cp0 = a1 * b2 - a2 * b1\n # cp1 = a2 * b0 - a0 * b2\n # cp2 = a0 * b1 - a1 * b0\n afnumpy.multiply(a1, b2, out=cp0)\n tmp = afnumpy.array(a2 * b1)\n cp0 -= tmp\n afnumpy.multiply(a2, b0, out=cp1)\n afnumpy.multiply(a0, b2, out=tmp)\n cp1 -= tmp\n afnumpy.multiply(a0, b1, out=cp2)\n afnumpy.multiply(a1, b0, out=tmp)\n cp2 -= tmp\n else:\n # cp0 = 0 - a2 * b1 (b2 = 0)\n # cp1 = a2 * b0 - 0 (b2 = 0)\n # cp2 = a0 * b1 - a1 * b0\n afnumpy.multiply(a2, b1, out=cp0)\n negative(cp0, out=cp0)\n afnumpy.multiply(a2, b0, out=cp1)\n afnumpy.multiply(a0, b1, out=cp2)\n cp2 -= a1 * b0\n\n if cp.ndim == 1:\n return cp\n else:\n # This works because we are moving the last axis\n return rollaxis(cp, -1, axisc)\n\n@outufunc\ndef isnan(x):\n if not isinstance(x, afnumpy.ndarray):\n return numpy.isnan(x)\n s = arrayfire.isnan(x.d_array)\n return afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s)\n\n@outufunc\ndef isinf(x):\n if not isinstance(x, afnumpy.ndarray):\n return numpy.isinf(x)\n s = arrayfire.isinf(x.d_array)\n return afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s)\n\ndef full(shape, fill_value, dtype=None, order='C'):\n \"\"\"\n Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n The desired data-type for the array The default, `None`, means\n `np.array(fill_value).dtype`.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n\n See Also\n --------\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> np.full((2, 2), np.inf)\n array([[ inf, inf],\n [ inf, inf]])\n >>> np.full((2, 2), 10)\n array([[10, 10],\n [10, 10]])\n\n \"\"\"\n if dtype is None:\n dtype = array(fill_value).dtype\n s = arrayfire.constant(fill_value,*shape[::-1],dtype=pu.typemap(dtype))\n return afnumpy.ndarray(shape, dtype=pu.typemap(s.dtype()), af_array=s)" ]
[ [ "numpy.abs", "numpy.isnan", "numpy.ones", "numpy.broadcast", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jacksonsdean/evolutionary-robotics-
[ "af333afb03dcb3759da233aecd93975dde17df7d" ]
[ "util.py" ]
[ "import math\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport networkx as nx\n# import pygraphviz as pgv \n# from node_functions import *\n# from networkx.drawing.nx_agraph import graphviz_layout\nimport sys\nimport inspect\nimport random\nimport pyrosim.pyrosim as pyrosim\nfrom scikits import bootstrap\n\nimport constants as c\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n \ndef choose_random_function():\n return random.choice(c.activations)\n\n\ndef name_to_fn(name):\n fns = inspect.getmembers(sys.modules[\"node_functions\"])\n fns.extend([(\"\", None)])\n def avg_pixel_distance_fitness():\n pass\n fns.extend([(\"avg_pixel_distance_fitness\", avg_pixel_distance_fitness)])\n return fns[[f[0] for f in fns].index(name)][1]\n \ndef visualize_network(individual, sample_point=[.25]*c.num_sensor_neurons, color_mode=\"L\", visualize_disabled=False, layout='multi', sample=False, show_weights=False, use_inp_bias=False, use_radial_distance=True, save_name=None, extra_text=None, curved=False):\n if(sample):\n individual.eval(sample_point)\n \n # nodes = individual.node_genome\n connections = individual.connection_genome\n\n max_weight = c.max_weight\n\n G = nx.DiGraph()\n function_colors = {}\n # colors = ['r', 'g', 'b', 'c', 'm', 'y', 'orange', 'darkviolet',\n # 'hotpink', 'chocolate', 'lawngreen', 'lightsteelblue']\n colors = ['lightsteelblue'] * len([node.fn for node in individual.node_genome])\n node_labels = {}\n\n node_size = 2000\n # plt.figure(figsize=(int(1+(individual.count_layers())*1.5), 6), frameon=False)\n # plt.figure(figsize=(7, 6), frameon=False)\n plt.subplots_adjust(left=0, bottom=0, right=1.25, top=1.25, wspace=0, hspace=0)\n\n for i, fn in enumerate([node.fn for node in individual.node_genome]):\n function_colors[fn.__name__] = colors[i]\n function_colors[\"identity\"] = colors[0]\n\n fixed_positions={}\n inputs = individual.input_nodes()\n \n for i, node in enumerate(inputs):\n G.add_node(node, color=function_colors[node.fn.__name__], shape='d', layer=(node.layer))\n if node.type == 0:\n node_labels[node] = f\"S{i}:\\n{node.fn.__name__}\\n\"+(f\"{node.output:.3f}\" if node.output!=None else \"\")\n else:\n node_labels[node] = f\"CPG\"\n \n fixed_positions[node] = (-4,((i+1)*2.)/len(inputs))\n\n for node in individual.hidden_nodes():\n G.add_node(node, color=function_colors[node.fn.__name__], shape='o', layer=(node.layer))\n node_labels[node] = f\"{node.id}\\n{node.fn.__name__}\\n\"+(f\"{node.output:.3f}\" if node.output!=None else \"\" )\n\n for i, node in enumerate(individual.output_nodes()):\n title = i\n G.add_node(node, color=function_colors[node.fn.__name__], shape='s', layer=(node.layer))\n node_labels[node] = f\"M{title}:\\n{node.fn.__name__}\\n\"+(f\"{node.output:.3f}\")\n fixed_positions[node] = (4, ((i+1)*2)/len(individual.output_nodes()))\n pos = {}\n # shells = [[node for node in individual.input_nodes()], [node for node in individual.hidden_nodes()], [node for node in individual.output_nodes()]]\n # pos=nx.shell_layout(G, shells, scale=2)\n # pos=nx.shell_layout(G, scale=2)\n # pos=nx.spectral_layout(G, scale=2)\n # pos=graphviz_layout(G, prog='neato') # neato, dot, twopi, circo, fdp, nop, wc, acyclic, gvpr, gvcolor, ccomps, sccmap, tred, sfdp, unflatten.\n fixed_nodes = fixed_positions.keys()\n if(layout=='multi'):\n pos=nx.multipartite_layout(G, scale=4,subset_key='layer')\n elif(layout=='spring'):\n pos=nx.spring_layout(G, scale=4)\n\n plt.figure(figsize=(10, 10))\n # pos = nx.shell_layout(G)\n # pos = fixed_positions\n # pos = nx.spring_layout(G, pos=pos, fixed=fixed_nodes,k=.1, scale = 2, iterations=2000)\n # for f, p in fixed_positions.items():\n # pos[f] = (p[0]*20, p[1]*20)\n shapes = set((node[1][\"shape\"] for node in G.nodes(data=True)))\n for shape in shapes:\n this_nodes = [sNode[0] for sNode in filter(\n lambda x: x[1][\"shape\"] == shape, G.nodes(data=True))]\n colors = [nx.get_node_attributes(G, 'color')[cNode] for cNode in this_nodes]\n nx.draw_networkx_nodes(G, pos, node_size=node_size, node_color=colors,\n label=node_labels, node_shape=shape, nodelist=this_nodes)\n\n edge_labels = {}\n for cx in connections:\n if(not visualize_disabled and (not cx.enabled or np.isclose(cx.weight, 0))): continue\n style = ('-', 'k', .5+abs(cx.weight)/max_weight) if cx.enabled else ('--', 'grey', .5+ abs(cx.weight)/max_weight)\n if(cx.enabled and cx.weight<0): style = ('-', 'r', .5+abs(cx.weight)/max_weight)\n if cx.fromNode in G.nodes and cx.toNode in G.nodes:\n G.add_edge(cx.fromNode, cx.toNode, weight=f\"{cx.weight:.4f}\", pos=pos, style=style)\n else:\n print(\"Connection not in graph:\", cx.fromNode.id, \"->\", cx.toNode.id)\n edge_labels[(cx.fromNode, cx.toNode)] = f\"{cx.weight:.3f}\"\n\n\n edge_colors = nx.get_edge_attributes(G,'color').values()\n edge_styles = shapes = set((s[2] for s in G.edges(data='style')))\n # use_curved = show_weights or individual.count_layers()<3\n use_curved = curved\n for s in edge_styles:\n edges = [e for e in filter(\n lambda x: x[2] == s, G.edges(data='style'))]\n nx.draw_networkx_edges(G, pos,\n edgelist=edges,\n arrowsize=25, arrows=True, \n node_size=[node_size]*1000,\n style=s[0],\n edge_color=[s[1]]*1000,\n width =s[2],\n connectionstyle= \"arc3\" if not use_curved else f\"arc3,rad={0.2*random.random()}\",\n # connectionstyle= \"arc3\"\n )\n \n if extra_text is not None:\n plt.text(0.5,0.05, extra_text, horizontalalignment='center', verticalalignment='center', transform=plt.gcf().transFigure)\n \n \n if (show_weights):\n nx.draw_networkx_edge_labels(G, pos, edge_labels, label_pos=.75)\n nx.draw_networkx_labels(G, pos, labels=node_labels)\n plt.tight_layout()\n if save_name is not None:\n plt.savefig(save_name, format=\"PNG\")\n # plt.close()\n else:\n plt.show()\n # plt.close()\n\ndef visualize_hn_phenotype_network(connection_genome, node_genome, sample_point=[.25]*c.num_sensor_neurons, visualize_disabled=False, layout='multi', sample=False, show_weights=False, use_inp_bias=False, use_radial_distance=True, save_name=None, extra_text=None):\n # nodes = individual.node_genome\n connections = connection_genome\n input_nodes = [n for n in node_genome if n.type == 0]\n output_nodes = [n for n in node_genome if n.type == 1]\n hidden_nodes = [n for n in node_genome if n.type == 2]\n max_weight = c.max_weight\n\n G = nx.DiGraph()\n function_colors = {}\n # colors = ['r', 'g', 'b', 'c', 'm', 'y', 'orange', 'darkviolet',\n # 'hotpink', 'chocolate', 'lawngreen', 'lightsteelblue']\n colors = ['lightsteelblue'] * len([node.fn for node in node_genome])\n node_labels = {}\n\n node_size = 2000\n # plt.figure(figsize=(int(1+(individual.count_layers())*1.5), 6), frameon=False)\n # plt.figure(figsize=(7, 6), frameon=False)\n plt.subplots_adjust(left=0, bottom=0, right=1.25, top=1.25, wspace=0, hspace=0)\n\n for i, fn in enumerate([node.fn for node in node_genome]):\n function_colors[fn.__name__] = colors[i]\n function_colors[\"identity\"] = colors[0]\n\n fixed_positions={}\n inputs = input_nodes\n \n for i, node in enumerate(inputs):\n G.add_node(node, color=function_colors[node.fn.__name__], shape='d', layer=(node.layer))\n if node.type == 0:\n node_labels[node] = f\"S{i}:\\n{node.fn.__name__}\\n\"+(f\"{node.output:.3f}\" if node.output!=None else \"\")\n else:\n node_labels[node] = f\"CPG\"\n \n fixed_positions[node] = (-4,((i+1)*2.)/len(inputs))\n\n for node in hidden_nodes:\n G.add_node(node, color=function_colors[node.fn.__name__], shape='o', layer=(node.layer))\n node_labels[node] = f\"{node.id}\\n{node.fn.__name__}\\n\"+(f\"{node.output:.3f}\" if node.output!=None else \"\" )\n\n for i, node in enumerate(output_nodes):\n title = i\n G.add_node(node, color=function_colors[node.fn.__name__], shape='s', layer=(node.layer))\n node_labels[node] = f\"M{title}:\\n{node.fn.__name__}\\n\"+(f\"{node.output:.3f}\")\n fixed_positions[node] = (4, ((i+1)*2)/len(output_nodes))\n pos = {}\n # shells = [[node for node in individual.input_nodes()], [node for node in individual.hidden_nodes()], [node for node in individual.output_nodes()]]\n # pos=nx.shell_layout(G, shells, scale=2)\n # pos=nx.shell_layout(G, scale=2)\n # pos=nx.spectral_layout(G, scale=2)\n # pos=graphviz_layout(G, prog='neato') # neato, dot, twopi, circo, fdp, nop, wc, acyclic, gvpr, gvcolor, ccomps, sccmap, tred, sfdp, unflatten.\n fixed_nodes = fixed_positions.keys()\n if(layout=='multi'):\n pos=nx.multipartite_layout(G, scale=4,subset_key='layer')\n elif(layout=='spring'):\n pos=nx.spring_layout(G, scale=4)\n\n plt.figure(figsize=(10, 10))\n # pos = nx.shell_layout(G)\n # pos = fixed_positions\n # pos = nx.spring_layout(G, pos=pos, fixed=fixed_nodes,k=.1, scale = 2, iterations=2000)\n # for f, p in fixed_positions.items():\n # pos[f] = (p[0]*20, p[1]*20)\n shapes = set((node[1][\"shape\"] for node in G.nodes(data=True)))\n for shape in shapes:\n this_nodes = [sNode[0] for sNode in filter(\n lambda x: x[1][\"shape\"] == shape, G.nodes(data=True))]\n colors = [nx.get_node_attributes(G, 'color')[cNode] for cNode in this_nodes]\n nx.draw_networkx_nodes(G, pos, node_size=node_size, node_color=colors,\n label=node_labels, node_shape=shape, nodelist=this_nodes)\n\n edge_labels = {}\n for cx in connections:\n if(not visualize_disabled and (not cx.enabled or np.isclose(cx.weight, 0))): continue\n style = ('-', 'k', .5+abs(cx.weight)/max_weight) if cx.enabled else ('--', 'grey', .5+ abs(cx.weight)/max_weight)\n if(cx.enabled and cx.weight<0): style = ('-', 'r', .5+abs(cx.weight)/max_weight)\n if cx.fromNode in G.nodes and cx.toNode in G.nodes:\n G.add_edge(cx.fromNode, cx.toNode, weight=f\"{cx.weight:.4f}\", pos=pos, style=style)\n else:\n print(\"Connection not in graph:\", cx.fromNode.id, \"->\", cx.toNode.id)\n edge_labels[(cx.fromNode, cx.toNode)] = f\"{cx.weight:.3f}\"\n\n\n edge_colors = nx.get_edge_attributes(G,'color').values()\n edge_styles = shapes = set((s[2] for s in G.edges(data='style')))\n # use_curved = show_weights or individual.count_layers()<3\n\n for s in edge_styles:\n edges = [e for e in filter(\n lambda x: x[2] == s, G.edges(data='style'))]\n nx.draw_networkx_edges(G, pos,\n edgelist=edges,\n arrowsize=25, arrows=True, \n node_size=[node_size]*1000,\n style=s[0],\n edge_color=[s[1]]*1000,\n width =s[2],\n # connectionstyle= \"arc3\" if use_curved else \"arc3,rad=0.2\"\n connectionstyle= \"arc3\"\n )\n \n if extra_text is not None:\n plt.text(0.5,0.05, extra_text, horizontalalignment='center', verticalalignment='center', transform=plt.gcf().transFigure)\n \n \n if (show_weights):\n nx.draw_networkx_edge_labels(G, pos, edge_labels, label_pos=.75)\n nx.draw_networkx_labels(G, pos, labels=node_labels)\n plt.tight_layout()\n if save_name is not None:\n plt.savefig(save_name, format=\"PNG\")\n # plt.close()\n else:\n plt.show()\n # plt.close()\n\n\n \"\"\n # labels = nx.get_edge_attributes(G,'weight')\n\n\n\ndef plot_mean_and_bootstrapped_ci_over_time(input_data = None, dataset=None, name = \"change me\", x_label = \"change me\", y_label=\"change me\", y_limit = None, plot_bootstrap = True, show=False, title=None):\n \"\"\"\n \n parameters: \n input_data: (numpy array of shape (max_k, num_repitions)) solution metric to plot\n name: (string) name for legend\n x_label: (string) x axis label\n y_label: (string) y axis label\n \n returns:\n None\n \"\"\"\n fig, ax = plt.subplots() # generate figure and axes\n input_data = [np.array(x) for x in input_data if isinstance(x, list)]\n input_data = np.array(input_data)\n if isinstance(name, str): name = [name]; input_data = [input_data]\n\n # for this_input_data, this_name in zip(input_data, name):\n for index, this_name in enumerate(name):\n # print(\"plotting\",this_name, \"with shape\", dataset[index].shape)\n this_input_data = dataset[index]\n total_generations = this_input_data.shape[1]\n if(plot_bootstrap):\n boostrap_ci_generation_found = np.zeros((2,total_generations))\n for this_gen in range(total_generations):\n boostrap_ci_generation_found[:,this_gen] = bootstrap.ci(this_input_data[:,this_gen], np.mean, alpha=0.05)\n\n\n ax.plot(np.arange(total_generations), np.mean(this_input_data,axis=0), label = this_name) # plot the fitness over time\n if plot_bootstrap:\n ax.fill_between(np.arange(total_generations), boostrap_ci_generation_found[0,:], boostrap_ci_generation_found[1,:],alpha=0.3) # plot, and fill, the confidence interval for fitness over time\n ax.set_xlabel(x_label) # add axes labels\n ax.set_ylabel(y_label)\n if y_limit: ax.set_ylim(y_limit[0],y_limit[1])\n if title is not None:\n plt.title(title)\n else:\n plt.title(y_label)\n plt.legend(loc='best'); # add legend\n if show:\n plt.show() \n\ndef get_best_solution_from_all_runs(results):\n best_fit = -math.inf\n best = None\n run_index = -1\n for i, run in enumerate(results):\n sorted_run = sorted(run, key = lambda x: x.fitness, reverse=True)\n run_best = sorted_run[0]\n if(run_best.fitness > best_fit):\n best_fit = run_best.fitness\n best = run_best\n run_index = i\n return best, run_index\n\n\ndef get_max_number_of_hidden_nodes(population):\n max = 0\n for g in population:\n if len(list(g.hidden_nodes()))> max:\n max = len(list(g.hidden_nodes()))\n return max\n\ndef get_avg_number_of_hidden_nodes(population):\n count = 0\n for g in population:\n count+=len(g.node_genome) - g.n_inputs - g.n_outputs\n return count/len(population)\n\ndef get_max_number_of_connections(population):\n max_count = 0\n for g in population:\n count = len(list(g.enabled_connections()))\n if(count > max_count):\n max_count = count\n return max_count\n\ndef get_min_number_of_connections(population):\n min_count = math.inf\n for g in population:\n count = len(list(g.enabled_connections())) \n if(count < min_count):\n min_count = count\n return min_count\n\ndef get_avg_number_of_connections(population):\n count = 0\n for g in population:\n count+=len(list(g.enabled_connections()))\n return count/len(population)\n \n\ndef generate_body(id):\n pyrosim.Start_URDF(f\"body{id}.urdf\")\n pyrosim.Send_Cube(name=\"Torso\", pos=[0, 0, 1], size=[1, 1, 1], mass=c.torso_weight)\n pyrosim.Send_Joint( name = \"Torso_BackLegRot\" , parent= \"Torso\" , child = \"BackLegRot\" , type = \"revolute\", position = [0, -0.5, 1.0], jointAxis = \"0 1 0\")\n pyrosim.Send_Joint( name = \"BackLegRot_BackLeg\" , parent= \"BackLegRot\" , child = \"BackLeg\" , type = \"revolute\" if c.num_motor_neurons > 9 else \"fixed\", position = [0, 0, 0], jointAxis = \"1 0 0\")\n \n pyrosim.Send_Cube(name=\"BackLegRot\", pos=[0.0, -0.5, 0.0], size=[0,0,0], mass=0.0)\n pyrosim.Send_Cube(name=\"BackLeg\", pos=[0.0, -0.5, 0.0], size=[.2, 1., .2], mass=1.0)\n pyrosim.Send_Joint( name = \"Torso_FrontLegRot\" , parent= \"Torso\" , child = \"FrontLegRot\" , type = \"revolute\", position = [0.0, 0.5, 1.0], jointAxis = \"1 0 0\")\n pyrosim.Send_Joint( name =\"FrontLegRot_FrontLeg\" , parent= \"FrontLegRot\" , child = \"FrontLeg\" , type = \"revolute\" if c.num_motor_neurons > 9 else \"fixed\", position = [0.0, 0.0, 0.0], jointAxis = \"0 1 0\")\n pyrosim.Send_Cube(name=\"FrontLegRot\", pos=[0.0, 0.5, 0], size=[0,0,0], mass=0.0)\n pyrosim.Send_Cube(name=\"FrontLeg\", pos=[0.0, 0.5, 0], size=[.2, 1., .2], mass=1.0)\n pyrosim.Send_Cube(name=\"LeftLeg\", pos=[-0.5, 0.0, 0.0], size=[1.0, 0.2, 0.2], mass=1.0)\n pyrosim.Send_Cube(name=\"LeftLegRot\", pos=[-0.5, 0.0, 0.0], size=[0,0,0], mass=0.0)\n pyrosim.Send_Joint( name = \"Torso_LeftLegRot\" , parent= \"Torso\" , child = \"LeftLegRot\" , type = \"revolute\", position = [-0.5, 0, 1.], jointAxis = \"1 0 0\")\n pyrosim.Send_Joint( name = \"LeftLegRot_LeftLeg\" , parent= \"LeftLegRot\" , child = \"LeftLeg\" , type = \"revolute\" if c.num_motor_neurons > 9 else \"fixed\", position = [0,0,0], jointAxis = \"0 1 0\" )\n pyrosim.Send_Cube(name=\"RightLegRot\", pos=[0.5, 0.0, 0.0], size=[0,0,0], mass=0.0)\n pyrosim.Send_Cube(name=\"RightLeg\", pos=[0.5, 0.0, 0.0], size=[1.0, 0.2, 0.2], mass=1.0)\n pyrosim.Send_Joint( name = \"Torso_RightLegRot\" , parent= \"Torso\" , child = \"RightLegRot\" , type = \"revolute\", position = [0.5, 0, 1.], jointAxis = \"1 0 0\")\n pyrosim.Send_Joint( name = \"RightLegRot_RightLeg\" , parent= \"RightLegRot\" , child = \"RightLeg\" , type = \"revolute\" if c.num_motor_neurons > 9 else \"fixed\", position = [0,0,0], jointAxis = \"0 1 0\" )\n pyrosim.Send_Cube(name=\"FrontLowerLeg\", pos=[0.0, 0.0, -.5], size=[.2, .2, 1.], mass=1.0)\n pyrosim.Send_Joint( name = \"FrontLeg_FrontLowerLeg\" , parent= \"FrontLeg\" , child = \"FrontLowerLeg\" , type = \"revolute\", position = [0,1,0], jointAxis = \"1 0 0\")\n pyrosim.Send_Cube(name=\"BackLowerLeg\", pos=[0.0, 0.0, -.5], size=[.2, .2, 1.], mass=1.0)\n pyrosim.Send_Joint( name = \"BackLeg_BackLowerLeg\" , parent= \"BackLeg\" , child = \"BackLowerLeg\" , type = \"revolute\", position = [0,-1,0], jointAxis = \"1 0 0\")\n pyrosim.Send_Cube(name=\"LeftLowerLeg\", pos=[0.0, 0.0, -.5], size=[.2, .2, 1.], mass=1.0)\n pyrosim.Send_Joint( name = \"LeftLeg_LeftLowerLeg\" , parent= \"LeftLeg\" , child = \"LeftLowerLeg\" , type = \"revolute\", position = [-1,0,0], jointAxis = \"0 1 0\")\n pyrosim.Send_Cube(name=\"RightLowerLeg\", pos=[0.0, 0.0, -.5], size=[.2, .2, 1.], mass=1.0)\n pyrosim.Send_Joint( name = \"RightLeg_RightLowerLeg\" , parent= \"RightLeg\" , child = \"RightLowerLeg\" , type = \"revolute\", position = [1,0,0], jointAxis = \"0 1 0\")\n pyrosim.End()\n\ndef generate_brain(id, node_genome, connection_genome):\n pyrosim.Start_NeuralNetwork(f\"brain{id}.nndf\")\n\n # Neurons:\n # -Input\n n = 0\n pyrosim.Send_Touch_Sensor_Neuron(name = n , linkName = \"FrontLowerLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Touch_Sensor_Neuron(name = n , linkName = \"BackLowerLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Touch_Sensor_Neuron(name = n , linkName = \"LeftLowerLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Touch_Sensor_Neuron(name = n , linkName = \"RightLowerLeg\", activation=node_genome[n].fn); n+=1\n \n bodyID = 101 if c.use_obstacles else 1\n\n if (c.use_cpg and c.num_sensor_neurons > 5) or ( not c.use_cpg and c.num_sensor_neurons > 4):\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"BackLegRot_BackLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"FrontLegRot_FrontLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"LeftLegRot_LeftLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"LeftLegRot_LeftLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n\n if (c.use_cpg and c.num_sensor_neurons > 9) or ( not c.use_cpg and c.num_sensor_neurons > 8):\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"Torso_BackLegRot\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"Torso_FrontLegRot\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"Torso_LeftLegRot\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"Torso_RightLegRot\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n\n if (c.use_cpg and c.num_sensor_neurons > 13) or ( not c.use_cpg and c.num_sensor_neurons > 12):\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"BackLeg_BackLowerLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"FrontLeg_FrontLowerLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"LeftLeg_LeftLowerLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n pyrosim.Send_Rotation_Sensor_Neuron(name = n , jointName = \"RightLeg_RightLowerLeg\", bodyID=bodyID, activation=node_genome[n].fn); n+=1\n if (c.use_cpg and c.num_sensor_neurons > 17) or ( not c.use_cpg and c.num_sensor_neurons > 16):\n pyrosim.Send_Base_Velocity_Sensor_Neuron(name = n , bodyID=bodyID, activation=node_genome[n].fn); n+=1\n\n\n if c.use_cpg:\n pyrosim.Send_CPG(name = n, activation=node_genome[n].fn ); n+=1\n\n # -Hidden\n for neuron in node_genome:\n if neuron.type == 2: # Hidden\n pyrosim.Send_Hidden_Neuron(name = neuron.id, activation=neuron.fn)\n \n # -Output\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"Torso_BackLegRot\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"Torso_FrontLegRot\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"Torso_LeftLegRot\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"Torso_RightLegRot\", activation=node_genome[n].fn); n+=1\n \n if (c.use_cpg and c.num_motor_neurons > 5) or (not c.use_cpg and c.num_motor_neurons > 4):\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"BackLeg_BackLowerLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"FrontLeg_FrontLowerLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"LeftLeg_LeftLowerLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"RightLeg_RightLowerLeg\", activation=node_genome[n].fn); n+=1\n if (c.use_cpg and c.num_motor_neurons > 9) or (not c.use_cpg and c.num_motor_neurons > 8):\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"BackLegRot_BackLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"FrontLegRot_FrontLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"LeftLegRot_LeftLeg\", activation=node_genome[n].fn); n+=1\n pyrosim.Send_Motor_Neuron( name = n , jointName = \"RightLegRot_RightLeg\", activation=node_genome[n].fn); n+=1\n\n\n # Synapses:\n # fully connected:\n for synapse in connection_genome:\n if synapse.enabled:\n pyrosim.Send_Synapse(sourceNeuronName = synapse.fromNode.id, targetNeuronName = synapse.toNode.id, weight = synapse.weight)\n\n pyrosim.End()\n \n while not os.path.exists(f\"brain{id}.nndf\"):\n time.sleep(0.01)\n \n if False:\n num = len([n for n in os.listdir('tmp') if os.path.isfile(n)])\n os.system(f\"copy brain{id}.nndf tmp\\\\{id}.nndf\")\n visualize_network( sample=True, sample_point=[0.1, -0.1, .25, -.25], use_radial_distance=False, save_name=f\"tmp/{id}_{num}.png\", show_weights=False)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.arange", "numpy.isclose", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf", "numpy.mean", "matplotlib.pyplot.subplots_adjust", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlessandroRigoli/project_vg
[ "cb1323bee60cdb4108fe0aab68791321c7974832" ]
[ "train.py" ]
[ "\nimport math\nimport torch\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport multiprocessing\nfrom os.path import join\nfrom datetime import datetime\nfrom torch.utils.data.dataloader import DataLoader\nfrom os.path import exists\nimport h5py\ntorch.backends.cudnn.benchmark= True # Provides a speedup\n\nimport util\nimport test\nimport parser\nimport commons\nimport network\nimport datasets_ws\n\n#### Initial setup: parser, logging...\nargs = parser.parse_arguments()\nstart_time = datetime.now()\nargs.output_folder = join(\"runs\", args.exp_name, start_time.strftime('%Y-%m-%d_%H-%M-%S'))\ncommons.setup_logging(args.output_folder)\ncommons.make_deterministic(args.seed)\nlogging.info(f\"Arguments: {args}\")\nlogging.info(f\"The outputs are being saved in {args.output_folder}\")\nlogging.info(f\"Using {torch.cuda.device_count()} GPUs and {multiprocessing.cpu_count()} CPUs\")\n\n#### Creation of Datasets\nlogging.debug(f\"Loading dataset Pitts30k from folder {args.datasets_folder}\")\n\ntriplets_ds = datasets_ws.TripletsDataset(args, args.datasets_folder, \"pitts30k\", \"train\", args.negs_num_per_query)\nlogging.info(f\"Train query set: {triplets_ds}\")\n\nval_ds = datasets_ws.BaseDataset(args, args.datasets_folder, \"pitts30k\", \"val\")\nlogging.info(f\"Val set: {val_ds}\")\n\ntest_ds = datasets_ws.BaseDataset(args, args.datasets_folder, \"pitts30k\", \"test\")\nlogging.info(f\"Test set: {test_ds}\")\n\n#### Initialize model\nmodel = network.GeoLocalizationNet(args)\nmodel = model.to(args.device)\n\n#### Setup Optimizer and Loss\noptimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\ncriterion_triplet = nn.TripletMarginLoss(margin=args.margin, p=2, reduction=\"sum\")\n\nbest_r5 = 0\nnot_improved_num = 0\n\nlogging.info(f\"Output dimension of the model is {args.features_dim}\")\n\n#call __init__params if the choosen layer is netVlad\n#run cluster.py before train.py\nif args.type == 'NetVLAD':\n net_vlad = model.aggregation[1]\n #todo: make it dataset independent \n initcache = join(args.datasets_folder, 'centroids', \"pitts30k\" + '_' + str(args.num_clusters) + '_desc_cen.hdf5')\n\n if not exists(initcache):\n raise FileNotFoundError('Could not find clusters, please run cluster.py before proceed')\n exit(-1)\n\n with h5py.File(initcache, mode='r') as h5: \n clsts = h5.get(\"centroids\")[...]\n traindescs = h5.get(\"descriptors\")[...]\n net_vlad.init_params(clsts, traindescs) \n del clsts, traindescs\n model = model.to(args.device)\n\n#resume checkpoint\n\n#### Training loop\nfor epoch_num in range(args.epochs_num):\n logging.info(f\"Start training epoch: {epoch_num:02d}\")\n \n epoch_start_time = datetime.now()\n epoch_losses = np.zeros((0,1), dtype=np.float32)\n \n # How many loops should an epoch last (default is 5000/1000=5)\n loops_num = math.ceil(args.queries_per_epoch / args.cache_refresh_rate)\n for loop_num in range(loops_num):\n logging.debug(f\"Cache: {loop_num} / {loops_num}\")\n \n # Compute triplets to use in the triplet loss\n triplets_ds.is_inference = True\n triplets_ds.compute_triplets(args, model)\n triplets_ds.is_inference = False\n \n triplets_dl = DataLoader(dataset=triplets_ds, num_workers=args.num_workers,\n batch_size=args.train_batch_size,\n collate_fn=datasets_ws.collate_fn,\n pin_memory=(args.device==\"cuda\"),\n drop_last=True)\n \n model = model.train()\n \n # images shape: (train_batch_size*12)*3*H*W ; by default train_batch_size=4, H=480, W=640\n # triplets_local_indexes shape: (train_batch_size*10)*3 ; because 10 triplets per query\n for images, triplets_local_indexes, _ in tqdm(triplets_dl, ncols=100):\n \n # Compute features of all images (images contains queries, positives and negatives)\n features = model(images.to(args.device))\n loss_triplet = 0\n \n triplets_local_indexes = torch.transpose(\n triplets_local_indexes.view(args.train_batch_size, args.negs_num_per_query, 3), 1, 0)\n for triplets in triplets_local_indexes:\n queries_indexes, positives_indexes, negatives_indexes = triplets.T\n loss_triplet += criterion_triplet(features[queries_indexes],\n features[positives_indexes],\n features[negatives_indexes])\n del features\n loss_triplet /= (args.train_batch_size * args.negs_num_per_query)\n \n optimizer.zero_grad()\n loss_triplet.backward()\n optimizer.step()\n \n # Keep track of all losses by appending them to epoch_losses\n batch_loss = loss_triplet.item()\n epoch_losses = np.append(epoch_losses, batch_loss)\n del loss_triplet\n \n logging.debug(f\"Epoch[{epoch_num:02d}]({loop_num}/{loops_num}): \" +\n f\"current batch triplet loss = {batch_loss:.4f}, \" +\n f\"average epoch triplet loss = {epoch_losses.mean():.4f}\")\n \n logging.info(f\"Finished epoch {epoch_num:02d} in {str(datetime.now() - epoch_start_time)[:-7]}, \"\n f\"average epoch triplet loss = {epoch_losses.mean():.4f}\")\n \n # Compute recalls on validation set\n recalls, recalls_str = test.test(args, val_ds, model)\n logging.info(f\"Recalls on val set {val_ds}: {recalls_str}\")\n \n is_best = recalls[1] > best_r5\n \n # Save checkpoint, which contains all training parameters\n util.save_checkpoint(args, {\"epoch_num\": epoch_num, \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(), \"recalls\": recalls, \"best_r5\": best_r5,\n \"not_improved_num\": not_improved_num\n }, is_best, filename=\"last_model.pth\")\n \n # If recall@5 did not improve for \"many\" epochs, stop training\n if is_best:\n logging.info(f\"Improved: previous best R@5 = {best_r5:.1f}, current R@5 = {recalls[1]:.1f}\")\n best_r5 = recalls[1]\n not_improved_num = 0\n else:\n not_improved_num += 1\n logging.info(f\"Not improved: {not_improved_num} / {args.patience}: best R@5 = {best_r5:.1f}, current R@5 = {recalls[1]:.1f}\")\n if not_improved_num >= args.patience:\n logging.info(f\"Performance did not improve for {not_improved_num} epochs. Stop training.\")\n break\n\n\nlogging.info(f\"Best R@5: {best_r5:.1f}\")\nlogging.info(f\"Trained for {epoch_num+1:02d} epochs, in total in {str(datetime.now() - start_time)[:-7]}\")\n\n#### Test best model on test set\nbest_model_state_dict = torch.load(join(args.output_folder, \"best_model.pth\"))[\"model_state_dict\"]\nmodel.load_state_dict(best_model_state_dict)\n\nrecalls, recalls_str = test.test(args, test_ds, model)\nlogging.info(f\"Recalls on {test_ds}: {recalls_str}\")\n" ]
[ [ "numpy.append", "torch.utils.data.dataloader.DataLoader", "torch.nn.TripletMarginLoss", "torch.cuda.device_count", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
widdowquinn/Notebooks-Bioinformatics
[ "f1d6f89eb629bc30a3e712139a1187e58df47119" ]
[ "helpers/baserate.py" ]
[ "# baserate.py\n#\n# Helper code for the baserate_fallacy.ipynb notebook in the\n# Notebooks-Bioinformatics repo\n\nimport matplotlib.pylab as pylab\n\n\ndef p_correct_given_pos(sens, fpr, b):\n \"\"\"Returns a simple Bayesian probability for the probability\n that a prediction is correct, given that the prediction\n was positive, for the prevailing sensitivity (sens),\n false positive rate (fpr) and base rate of positive \n examples.\n \"\"\"\n assert 0 <= sens <= 1, \"Sensitivity must be in range [0,1]\"\n assert 0 <= fpr <= 1, \"FPR must be in range [0,1]\"\n return sens * b / (sens * b + fpr * (1 - b))\n\n\ndef plot_prob_effector(sens, fpr, xmax=1, baserate=0.1):\n \"\"\"Plots a line graph of P(effector|positive test) against\n the baserate of effectors in the input set to the classifier.\n \n The baserate argument draws an annotation arrow\n indicating P(pos|+ve) at that baserate\n \"\"\"\n assert 0.1 <= xmax <= 1, \"Max x axis value must be in range [0,1]\"\n assert 0.01 <= baserate <= 1, \"Baserate annotation must be in range [0,1]\"\n baserates = pylab.arange(0, 1.05, xmax * 0.005) \n probs = [p_correct_given_pos(sens, fpr, b) for b in baserates]\n pylab.plot(baserates, probs, 'r')\n pylab.title(\"P(eff|pos) vs baserate; sens: %.2f, fpr: %.2f\" % (sens, fpr))\n pylab.ylabel(\"P(effector|positive)\")\n pylab.xlabel(\"effector baserate\")\n pylab.xlim(0, xmax)\n pylab.ylim(0, 1)\n # Add annotation arrow\n xpos, ypos = (baserate, p_correct_given_pos(sens, fpr, baserate))\n if baserate < xmax:\n if xpos > 0.7 * xmax:\n xtextpos = 0.05 * xmax\n else:\n xtextpos = xpos + (xmax-xpos)/5.\n if ypos > 0.5:\n ytextpos = ypos - 0.05\n else:\n ytextpos = ypos + 0.05\n pylab.annotate('baserate: %.2f, P(pos|+ve): %.3f' % (xpos, ypos), \n xy=(xpos, ypos), \n xytext=(xtextpos, ytextpos),\n arrowprops=dict(facecolor='black', shrink=0.05))\n else:\n pylab.text(0.05 * xmax, 0.95, 'baserate: %.2f, P(pos|+ve): %.3f' %\n (xpos, ypos))\n" ]
[ [ "matplotlib.pylab.xlim", "matplotlib.pylab.text", "matplotlib.pylab.title", "matplotlib.pylab.xlabel", "matplotlib.pylab.ylabel", "matplotlib.pylab.plot", "matplotlib.pylab.ylim", "matplotlib.pylab.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shaneding/cudf
[ "bf68778b78be93ad27ad897589b320fa73de096b" ]
[ "python/cudf/cudf/tests/test_binops.py" ]
[ "# Copyright (c) 2018-2021, NVIDIA CORPORATION.\n\nfrom __future__ import division\n\nimport decimal\nimport operator\nimport random\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.core import Series\nfrom cudf.core.index import as_index\nfrom cudf.tests import utils\nfrom cudf.utils.dtypes import (\n BOOL_TYPES,\n DATETIME_TYPES,\n FLOAT_TYPES,\n INTEGER_TYPES,\n NUMERIC_TYPES,\n TIMEDELTA_TYPES,\n)\n\nSTRING_TYPES = {\"str\"}\n\n_binops = [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.mod,\n operator.pow,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _binops)\ndef test_series_binop(binop, obj_class):\n nelem = 1000\n arr1 = utils.gen_rand(\"float64\", nelem) * 10000\n # Keeping a low value because CUDA 'pow' has 2 full range error\n arr2 = utils.gen_rand(\"float64\", nelem) * 10\n\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n expect = binop(pd.Series(arr1), pd.Series(arr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n utils.assert_eq(result, expect)\n\n\[email protected](\"binop\", _binops)\ndef test_series_binop_concurrent(binop):\n def func(index):\n arr = np.random.random(100) * 10\n sr = Series(arr)\n\n result = binop(sr.astype(\"int32\"), sr)\n expect = binop(arr.astype(\"int32\"), arr)\n\n np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)\n\n from concurrent.futures import ThreadPoolExecutor\n\n indices = range(10)\n with ThreadPoolExecutor(4) as e: # four processes\n list(e.map(func, indices))\n\n\[email protected](\"use_cudf_scalar\", [False, True])\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem,binop\", list(product([1, 2, 100], _binops)))\ndef test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):\n arr = np.random.random(nelem)\n rhs = random.choice(arr).item()\n\n sr = Series(arr)\n if obj_class == \"Index\":\n sr = as_index(sr)\n\n if use_cudf_scalar:\n result = binop(sr, rhs)\n else:\n result = binop(sr, cudf.Scalar(rhs))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))\n\n\n_bitwise_binops = [operator.and_, operator.or_, operator.xor]\n\n\n_int_types = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _bitwise_binops)\[email protected](\n \"lhs_dtype,rhs_dtype\", list(product(_int_types, _int_types))\n)\ndef test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):\n arr1 = (np.random.random(100) * 100).astype(lhs_dtype)\n sr1 = Series(arr1)\n\n arr2 = (np.random.random(100) * 100).astype(rhs_dtype)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))\n\n\n_logical_binops = [\n (operator.and_, operator.and_),\n (operator.or_, operator.or_),\n (np.logical_and, cudf.logical_and),\n (np.logical_or, cudf.logical_or),\n]\n\n\[email protected](\"lhstype\", _int_types + [np.bool_])\[email protected](\"rhstype\", _int_types + [np.bool_])\[email protected](\"binop,cubinop\", _logical_binops)\ndef test_series_logical_binop(lhstype, rhstype, binop, cubinop):\n arr1 = pd.Series(np.random.choice([True, False], 10))\n if lhstype is not np.bool_:\n arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)\n sr1 = Series(arr1)\n\n arr2 = pd.Series(np.random.choice([True, False], 10))\n if rhstype is not np.bool_:\n arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)\n sr2 = Series(arr2)\n\n result = cubinop(sr1, sr2)\n expect = binop(arr1, arr2)\n\n utils.assert_eq(result, expect)\n\n\n_cmpops = [\n operator.lt,\n operator.gt,\n operator.le,\n operator.ge,\n operator.eq,\n operator.ne,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"dtype\", [\"int8\", \"int32\", \"int64\", \"float32\", \"float64\", \"datetime64[ms]\"]\n)\ndef test_series_compare(cmpop, obj_class, dtype):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n arr2 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result1 = cmpop(sr1, sr1)\n result2 = cmpop(sr2, sr2)\n result3 = cmpop(sr1, sr2)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n result3 = Series(result3)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))\n np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))\n np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))\n\n\ndef _series_compare_nulls_typegen():\n tests = []\n tests += list(product(DATETIME_TYPES, DATETIME_TYPES))\n tests += list(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n tests += list(product(NUMERIC_TYPES, NUMERIC_TYPES))\n tests += list(product(STRING_TYPES, STRING_TYPES))\n\n return tests\n\n\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtypes\", _series_compare_nulls_typegen())\ndef test_series_compare_nulls(cmpop, dtypes):\n ltype, rtype = dtypes\n\n ldata = [1, 2, None, None, 5]\n rdata = [2, 1, None, 4, None]\n\n lser = Series(ldata, dtype=ltype)\n rser = Series(rdata, dtype=rtype)\n\n lmask = ~lser.isnull()\n rmask = ~rser.isnull()\n\n expect_mask = np.logical_and(lmask, rmask)\n expect = cudf.Series([None] * 5, dtype=\"bool\")\n expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask])\n\n got = cmpop(lser, rser)\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"obj\", [pd.Series([\"a\", \"b\", None, \"d\", \"e\", None], dtype=\"string\"), \"a\"]\n)\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"cmp_obj\",\n [pd.Series([\"b\", \"a\", None, \"d\", \"f\", None], dtype=\"string\"), \"a\"],\n)\ndef test_string_series_compare(obj, cmpop, cmp_obj):\n\n g_obj = obj\n if isinstance(g_obj, pd.Series):\n g_obj = Series.from_pandas(g_obj)\n g_cmp_obj = cmp_obj\n if isinstance(g_cmp_obj, pd.Series):\n g_cmp_obj = Series.from_pandas(g_cmp_obj)\n got = cmpop(g_obj, g_cmp_obj)\n expected = cmpop(obj, cmp_obj)\n\n if isinstance(expected, pd.Series):\n expected = cudf.from_pandas(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem\", [1, 2, 100])\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtype\", utils.NUMERIC_TYPES + [\"datetime64[ms]\"])\[email protected](\"use_cudf_scalar\", [True, False])\ndef test_series_compare_scalar(\n nelem, cmpop, obj_class, dtype, use_cudf_scalar\n):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n rhs = random.choice(arr1).item()\n\n if use_cudf_scalar:\n rhs = cudf.Scalar(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n\n result1 = cmpop(sr1, rhs)\n result2 = cmpop(rhs, sr1)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))\n np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))\n\n\n_nulls = [\"none\", \"some\"]\n\n\[email protected](\"nelem\", [1, 7, 8, 9, 32, 64, 128])\[email protected](\"lhs_nulls,rhs_nulls\", list(product(_nulls, _nulls)))\ndef test_validity_add(nelem, lhs_nulls, rhs_nulls):\n np.random.seed(0)\n # LHS\n lhs_data = np.random.random(nelem)\n if lhs_nulls == \"some\":\n lhs_mask = utils.random_bitmask(nelem)\n lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]\n lhs_null_count = utils.count_zero(lhs_bitmask)\n assert lhs_null_count >= 0\n lhs = Series.from_masked_array(lhs_data, lhs_mask)\n assert lhs.null_count == lhs_null_count\n else:\n lhs = Series(lhs_data)\n # RHS\n rhs_data = np.random.random(nelem)\n if rhs_nulls == \"some\":\n rhs_mask = utils.random_bitmask(nelem)\n rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]\n rhs_null_count = utils.count_zero(rhs_bitmask)\n assert rhs_null_count >= 0\n rhs = Series.from_masked_array(rhs_data, rhs_mask)\n assert rhs.null_count == rhs_null_count\n else:\n rhs = Series(rhs_data)\n # Result\n res = lhs + rhs\n if lhs_nulls == \"some\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"some\" and rhs_nulls == \"none\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"none\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_\n )[:nelem]\n # Fill NA values\n na_value = -10000\n got = res.fillna(na_value).to_array()\n expect = lhs_data + rhs_data\n if lhs_nulls == \"some\" or rhs_nulls == \"some\":\n expect[~res_mask] = na_value\n\n np.testing.assert_array_equal(expect, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"binop,lhs_dtype,rhs_dtype\",\n list(\n product(\n [operator.add, operator.mul],\n utils.NUMERIC_TYPES,\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 10\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"cmpop,lhs_dtype,rhs_dtype\",\n list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),\n)\ndef test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 5\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = cmpop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))\n\n\n_reflected_ops = [\n lambda x: 1 + x,\n lambda x: 2 * x,\n lambda x: 2 - x,\n lambda x: 2 // x,\n lambda x: 2 / x,\n lambda x: 3 + x,\n lambda x: 3 * x,\n lambda x: 3 - x,\n lambda x: 3 // x,\n lambda x: 3 / x,\n lambda x: 3 % x,\n lambda x: -1 + x,\n lambda x: -2 * x,\n lambda x: -2 - x,\n lambda x: -2 // x,\n lambda x: -2 / x,\n lambda x: -3 + x,\n lambda x: -3 * x,\n lambda x: -3 - x,\n lambda x: -3 // x,\n lambda x: -3 / x,\n lambda x: -3 % x,\n lambda x: 0 + x,\n lambda x: 0 * x,\n lambda x: 0 - x,\n lambda x: 0 // x,\n lambda x: 0 / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"func, dtype\", list(product(_reflected_ops, utils.NUMERIC_TYPES))\n)\ndef test_reflected_ops_scalar(func, dtype, obj_class):\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\n_cudf_scalar_reflected_ops = [\n lambda x: cudf.Scalar(1) + x,\n lambda x: cudf.Scalar(2) * x,\n lambda x: cudf.Scalar(2) - x,\n lambda x: cudf.Scalar(2) // x,\n lambda x: cudf.Scalar(2) / x,\n lambda x: cudf.Scalar(3) + x,\n lambda x: cudf.Scalar(3) * x,\n lambda x: cudf.Scalar(3) - x,\n lambda x: cudf.Scalar(3) // x,\n lambda x: cudf.Scalar(3) / x,\n lambda x: cudf.Scalar(3) % x,\n lambda x: cudf.Scalar(-1) + x,\n lambda x: cudf.Scalar(-2) * x,\n lambda x: cudf.Scalar(-2) - x,\n lambda x: cudf.Scalar(-2) // x,\n lambda x: cudf.Scalar(-2) / x,\n lambda x: cudf.Scalar(-3) + x,\n lambda x: cudf.Scalar(-3) * x,\n lambda x: cudf.Scalar(-3) - x,\n lambda x: cudf.Scalar(-3) // x,\n lambda x: cudf.Scalar(-3) / x,\n lambda x: cudf.Scalar(-3) % x,\n lambda x: cudf.Scalar(0) + x,\n lambda x: cudf.Scalar(0) * x,\n lambda x: cudf.Scalar(0) - x,\n lambda x: cudf.Scalar(0) // x,\n lambda x: cudf.Scalar(0) / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"funcs, dtype\",\n list(\n product(\n list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):\n cpu_func, gpu_func = funcs\n\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = gpu_func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = cpu_func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Empty frame on the right side\n pd_frame = binop(pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({}))\n cd_frame = binop(cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({}))\n utils.assert_eq(cd_frame, pd_frame)\n\n # Empty frame on the left side\n pd_frame = pd.DataFrame({}) + pd.DataFrame({\"x\": [1, 2]})\n cd_frame = cudf.DataFrame({}) + cudf.DataFrame({\"x\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # Note: the below rely on a discrepancy between cudf and pandas\n # While pandas inserts columns in alphabetical order, cudf inserts in the\n # order of whichever column comes first. So the following code will not\n # work if the names of columns are reversed i.e. ('y', 'x') != ('x', 'y')\n\n # More rows on the left side\n pd_frame = pd.DataFrame({\"x\": [1, 2, 3]}) + pd.DataFrame({\"y\": [1, 2]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2, 3]}) + cudf.DataFrame({\"y\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # More rows on the right side\n pd_frame = pd.DataFrame({\"x\": [1, 2]}) + pd.DataFrame({\"y\": [1, 2, 3]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2]}) + cudf.DataFrame({\"y\": [1, 2, 3]})\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_same_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n pd_frame = binop(\n pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({\"x\": [1, 2, 3]})\n )\n cd_frame = binop(\n cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({\"x\": [1, 2, 3]})\n )\n # cast x as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns_with_unaligned_indices(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Test with a RangeIndex\n pdf1 = pd.DataFrame({\"x\": [4, 3, 2, 1], \"y\": [7, 3, 8, 6]})\n # Test with a GenericIndex\n pdf2 = pd.DataFrame(\n {\"x\": [1, 2, 3, 7], \"y\": [4, 5, 6, 7]}, index=[0, 1, 3, 4]\n )\n # Test with a GenericIndex in a different order\n pdf3 = pd.DataFrame(\n {\"x\": [4, 5, 6, 7], \"y\": [1, 2, 3, 7], \"z\": [0, 5, 3, 7]},\n index=[0, 3, 5, 3],\n )\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2)\n gdf3 = cudf.DataFrame.from_pandas(pdf3)\n\n pd_frame = binop(binop(pdf1, pdf2), pdf3)\n cd_frame = binop(binop(gdf1, gdf2), gdf3)\n # cast x and y as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n cd_frame[\"y\"] = cd_frame[\"y\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\n \"df2\",\n [\n cudf.DataFrame({\"a\": [3, 2, 1]}, index=[3, 2, 1]),\n cudf.DataFrame([3, 2]),\n ],\n)\[email protected](\"binop\", [operator.eq, operator.ne])\ndef test_df_different_index_shape(df2, binop):\n df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3])\n\n pdf1 = df1.to_pandas()\n pdf2 = df2.to_pandas()\n\n utils.assert_exceptions_equal(\n lfunc=binop,\n rfunc=binop,\n lfunc_args_and_kwargs=([pdf1, pdf2],),\n rfunc_args_and_kwargs=([df1, df2],),\n )\n\n\[email protected](\"op\", [operator.eq, operator.ne])\ndef test_boolean_scalar_binop(op):\n psr = pd.Series(np.random.choice([True, False], 10))\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(op(psr, True), op(gsr, True))\n utils.assert_eq(op(psr, False), op(gsr, False))\n\n # cuDF scalar\n utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True)))\n utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False)))\n\n\n_operators_arithmetic = [\n \"add\",\n \"radd\",\n \"sub\",\n \"rsub\",\n \"mul\",\n \"rmul\",\n \"mod\",\n \"rmod\",\n \"pow\",\n \"rpow\",\n \"floordiv\",\n \"rfloordiv\",\n \"truediv\",\n \"rtruediv\",\n]\n\n_operators_comparison = [\"eq\", \"ne\", \"lt\", \"le\", \"gt\", \"ge\"]\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series(dtype, func, has_nulls, fill_value):\n count = 1000\n gdf_series_a = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n gdf_series_b = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=100\n )\n pdf_series_a = gdf_series_a.to_pandas()\n pdf_series_b = gdf_series_b.to_pandas()\n\n gdf_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_result, gdf_result)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar(\n dtype, func, has_nulls, fill_value, use_cudf_scalar\n):\n count = 1000\n scalar = 59\n gdf_series = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas()\n\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_series_result, gdf_series_result)\n\n\n_permu_values = [0, 1, None, np.nan]\n\n\[email protected](\"fill_value\", _permu_values)\[email protected](\"scalar_a\", _permu_values)\[email protected](\"scalar_b\", _permu_values)\[email protected](\"func\", _operators_comparison)\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series_logical(\n dtype, func, scalar_a, scalar_b, fill_value\n):\n\n gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype)\n gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype)\n\n pdf_series_a = gdf_series_a.to_pandas(nullable=True)\n pdf_series_b = gdf_series_b.to_pandas(nullable=True)\n\n gdf_series_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_series_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n # If fill_value is np.nan, things break down a bit,\n # because setting a NaN into a pandas nullable float\n # array still gets transformed to <NA>. As such,\n # pd_series_with_nulls.fillna(np.nan) has no effect.\n if (\n (pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum())\n and np.isscalar(fill_value)\n and np.isnan(fill_value)\n ):\n with pytest.raises(AssertionError):\n utils.assert_eq(expect, got)\n return\n utils.assert_eq(expect, got)\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"func\", _operators_comparison)\[email protected](\"has_nulls\", [True, False])\[email protected](\"scalar\", [-59.0, np.nan, 0, 59.0])\[email protected](\"fill_value\", [None, True, False, 1.0])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar_logical(\n dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar\n):\n gdf_series = utils.gen_rand_series(\n dtype, 1000, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas(nullable=True)\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"nulls\", _nulls)\[email protected](\"fill_value\", [None, 27])\[email protected](\"other\", [\"df\", \"scalar\"])\ndef test_operator_func_dataframe(func, nulls, fill_value, other):\n num_rows = 100\n num_cols = 3\n\n def gen_df():\n pdf = pd.DataFrame()\n from string import ascii_lowercase\n\n cols = np.random.choice(num_cols + 5, num_cols, replace=False)\n\n for i in range(num_cols):\n colname = ascii_lowercase[cols[i]]\n data = utils.gen_rand(\"float64\", num_rows) * 10000\n if nulls == \"some\":\n idx = np.random.choice(\n num_rows, size=int(num_rows / 2), replace=False\n )\n data[idx] = np.nan\n pdf[colname] = data\n return pdf\n\n pdf1 = gen_df()\n pdf2 = gen_df() if other == \"df\" else 59.0\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == \"df\" else 59.0\n\n got = getattr(gdf1, func)(gdf2, fill_value=fill_value)\n expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)]\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic + _operators_comparison)\[email protected](\"rhs\", [0, 1, 2, 128])\ndef test_binop_bool_uint(func, rhs):\n # TODO: remove this once issue #2172 is resolved\n if func == \"rmod\" or func == \"rfloordiv\":\n return\n psr = pd.Series([True, False, False])\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(\n getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False\n )\n\n\ndef test_series_misc_binop():\n pds = pd.Series([1, 2, 4], name=\"abc xyz\")\n gds = cudf.Series([1, 2, 4], name=\"abc xyz\")\n\n utils.assert_eq(pds + 1, gds + 1)\n utils.assert_eq(1 + pds, 1 + gds)\n\n utils.assert_eq(pds + pds, gds + gds)\n\n pds1 = pd.Series([1, 2, 4], name=\"hello world\")\n gds1 = cudf.Series([1, 2, 4], name=\"hello world\")\n\n utils.assert_eq(pds + pds1, gds + gds1)\n utils.assert_eq(pds1 + pds, gds1 + gds)\n\n utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5)\n\n\ndef test_int8_float16_binop():\n a = cudf.Series([1], dtype=\"int8\")\n b = np.float16(2)\n expect = cudf.Series([0.5])\n got = a / b\n utils.assert_eq(expect, got, check_dtype=False)\n\n\[email protected](\"dtype\", [\"int64\", \"float64\", \"str\"])\ndef test_vector_to_none_binops(dtype):\n data = Series([1, 2, 3, None], dtype=dtype)\n\n expect = Series([None] * 4).astype(dtype)\n got = data + None\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"lhs\",\n [\n 1,\n 3,\n 4,\n pd.Series([5, 6, 2]),\n pd.Series([0, 10, 20, 30, 3, 4, 5, 6, 2]),\n 6,\n ],\n)\[email protected](\"rhs\", [1, 3, 4, pd.Series([5, 6, 2])])\[email protected](\n \"ops\",\n [\n (np.remainder, cudf.remainder),\n (np.floor_divide, cudf.floor_divide),\n (np.subtract, cudf.subtract),\n (np.add, cudf.add),\n (np.true_divide, cudf.true_divide),\n (np.multiply, cudf.multiply),\n ],\n)\ndef test_ufunc_ops(lhs, rhs, ops):\n np_op, cu_op = ops\n\n if isinstance(lhs, pd.Series):\n culhs = cudf.from_pandas(lhs)\n else:\n culhs = lhs\n\n if isinstance(rhs, pd.Series):\n curhs = cudf.from_pandas(rhs)\n else:\n curhs = rhs\n\n expect = np_op(lhs, rhs)\n got = cu_op(culhs, curhs)\n if np.isscalar(expect):\n assert got == expect\n else:\n utils.assert_eq(\n expect, got,\n )\n\n\ndef dtype_scalar(val, dtype):\n if dtype == \"str\":\n return str(val)\n dtype = np.dtype(dtype)\n if dtype.type in {np.datetime64, np.timedelta64}:\n res, _ = np.datetime_data(dtype)\n return dtype.type(val, res)\n else:\n return dtype.type(val)\n\n\ndef make_valid_scalar_add_data():\n valid = set()\n\n # to any int, we may add any kind of\n # other int, float, datetime timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES,\n FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # to any float, we may add any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # to any datetime, we may add any int, timedelta, or bool\n valid |= set(\n product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # to any timedelta, we may add any int, datetime, other timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES)\n )\n\n # to any bool, we may add any int, float, datetime, timedelta, or bool\n valid |= set(\n product(\n BOOL_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # to any string, we may add any other string\n valid |= {(\"str\", \"str\")}\n\n return sorted(list(valid))\n\n\ndef make_invalid_scalar_add_data():\n invalid = set()\n\n # we can not add a datetime to a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES))\n\n # We can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n # we can not add a float to any datetime\n invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES))\n\n # can can not add a datetime to a datetime\n invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES))\n\n # can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_valid_scalar_add_data())\ndef test_scalar_add(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n # expect = np.add(lval_host, rval_host)\n expect = lval_host + rval_host\n got = lval_gpu + rval_gpu\n\n assert expect == got.value\n if not dtype_l == dtype_r == \"str\":\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_invalid_scalar_add_data())\ndef test_scalar_add_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu + rval_gpu\n\n\ndef make_scalar_difference_data():\n valid = set()\n\n # from an int, we may subtract any int, float, timedelta,\n # or boolean value\n valid |= set(\n product(\n INTEGER_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any float, we may subtract any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # from any datetime we may subtract any int, datetime, timedelta, or bool\n valid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any timedelta we may subtract any int, timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # from any bool we may subtract any int, float or timedelta\n valid |= set(\n product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_difference_data_invalid():\n invalid = set()\n\n # we can't subtract a datetime from an int\n invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or timedelta from a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES))\n\n # we can't subtract a float from a datetime or timedelta\n invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES))\n\n # We can't subtract a datetime from a timedelta\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or bool from a bool\n invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_difference_data())\ndef test_scalar_difference(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host - rval_host\n got = lval_gpu - rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_difference_data_invalid()\n)\ndef test_scalar_difference_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu - rval_gpu\n\n\ndef make_scalar_product_data():\n valid = set()\n\n # we can multiply an int, or bool by any int, float, timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # we can muliply any timedelta by any int, or bool\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES))\n\n # we can multiply a float by any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_product_data_invalid():\n invalid = set()\n\n # can't multiply a ints, floats, datetimes, timedeltas,\n # or bools by datetimes\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES,\n )\n )\n\n # can't multiply datetimes with anything really\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # can't multiply timedeltas by timedeltas\n invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data())\ndef test_scalar_product(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host * rval_host\n got = lval_gpu * rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data_invalid())\ndef test_scalar_product_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu * rval_gpu\n\n\ndef make_scalar_floordiv_data():\n valid = set()\n\n # we can divide ints and floats by other ints, floats, or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can divide timedeltas by ints, floats or other timedeltas\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n # we can divide bools by ints, floats or bools\n valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_floordiv_data_invalid():\n invalid = set()\n\n # we can't numeric types into datelike types\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we can't divide datetime types into anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we can't divide timedeltas into bools, or datetimes\n invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_floordiv_data())\ndef test_scalar_floordiv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host // rval_host\n got = lval_gpu // rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_floordiv_data_invalid()\n)\ndef test_scalar_floordiv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu // rval_gpu\n\n\ndef make_scalar_truediv_data():\n valid = set()\n\n # we can true divide ints, floats, or bools by other\n # ints, floats or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can true divide timedeltas by ints floats or timedeltas\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_truediv_data_invalid():\n invalid = set()\n\n # we can't divide ints, floats or bools by datetimes\n # or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we cant true divide datetime types by anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we cant true divide timedeltas by datetimes or bools or floats\n invalid |= set(\n product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES)\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data())\ndef test_scalar_truediv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = np.true_divide(lval_host, rval_host)\n got = lval_gpu / rval_gpu\n\n assert expect == got.value\n\n # numpy bug\n\n if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2:\n assert expect.dtype == \"float64\" and got.dtype == \"float32\"\n else:\n assert expect.dtype == got.dtype\n # assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data_invalid())\ndef test_scalar_truediv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu / rval_gpu\n\n\ndef make_scalar_remainder_data():\n valid = set()\n\n # can mod numeric types with each other\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # can mod timedeltas by other timedeltas\n valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_remainder_data_invalid():\n invalid = set()\n\n # numeric types cant be modded against timedeltas\n # or datetimes. Also, datetimes can't be modded\n # against datetimes or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetime and timedelta types cant be modded against\n # any numeric types\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # timedeltas cant mod with datetimes\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_remainder_data())\ndef test_scalar_remainder(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host % rval_host\n got = lval_gpu % rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_remainder_data_invalid()\n)\ndef test_scalar_remainder_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu % rval_gpu\n\n\ndef make_scalar_power_data():\n # only numeric values form valid operands for power\n return sorted(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n\ndef make_scalar_power_data_invalid():\n invalid = set()\n\n # datetimes and timedeltas cant go in exponents\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | TIMEDELTA_TYPES\n | DATETIME_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetimes and timedeltas may not be raised to\n # any exponent of any dtype\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n DATETIME_TYPES\n | TIMEDELTA_TYPES\n | INTEGER_TYPES\n | FLOAT_TYPES\n | BOOL_TYPES,\n )\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data())\ndef test_scalar_power(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host ** rval_host\n got = lval_gpu ** rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data_invalid())\ndef test_scalar_power_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu ** rval_gpu\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop(\n date_col, n_periods, frequency, dtype, op\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n expect = op(psr, -poffset)\n got = op(gsr, -goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\n \"kwargs\",\n [\n {\"months\": 2, \"years\": 5},\n {\"microseconds\": 1, \"seconds\": 1},\n {\"months\": 2, \"years\": 5, \"seconds\": 923, \"microseconds\": 481},\n pytest.param(\n {\"milliseconds\": 4},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for milliseconds\"\n ),\n ),\n pytest.param(\n {\"milliseconds\": 4, \"years\": 2},\n marks=pytest.mark.xfail(\n reason=\"Pandas construction fails with these keywords\"\n ),\n ),\n pytest.param(\n {\"nanoseconds\": 12},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for nanoseconds\"\n ),\n ),\n ],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op):\n\n gsr = cudf.Series(date_col, dtype=\"datetime64[ns]\")\n psr = gsr.to_pandas()\n\n poffset = pd.DateOffset(**kwargs)\n goffset = cudf.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\ndef test_datetime_dateoffset_binaryop_reflected(\n date_col, n_periods, frequency, dtype\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = poffset + psr\n got = goffset + gsr\n\n utils.assert_eq(expect, got)\n\n with pytest.raises(TypeError):\n poffset - psr\n\n with pytest.raises(TypeError):\n goffset - gsr\n\n\[email protected](\"frame\", [cudf.Series, cudf.Index, cudf.DataFrame])\[email protected](\n \"dtype\", [\"int\", \"str\", \"datetime64[s]\", \"timedelta64[s]\", \"category\"]\n)\ndef test_binops_with_lhs_numpy_scalar(frame, dtype):\n data = [1, 2, 3, 4, 5]\n\n data = (\n frame({\"a\": data}, dtype=dtype)\n if isinstance(frame, cudf.DataFrame)\n else frame(data, dtype=dtype)\n )\n\n if dtype == \"datetime64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"timedelta64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"category\":\n val = np.int64(4)\n else:\n val = np.dtype(dtype).type(4)\n\n expected = val == data.to_pandas()\n got = val == data\n\n # In case of index, expected would be a numpy array\n if isinstance(data, cudf.Index):\n expected = pd.Index(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\n \"dtype\",\n [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"float32\",\n \"float64\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"op\", _operators_comparison)\ndef test_binops_with_NA_consistent(dtype, op):\n data = [1, 2, 3]\n sr = cudf.Series(data, dtype=dtype)\n\n result = getattr(sr, op)(cudf.NA)\n if dtype in NUMERIC_TYPES:\n if op == \"ne\":\n expect_all = True\n else:\n expect_all = False\n assert (result == expect_all).all()\n elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES:\n assert result._column.null_count == len(data)\n\n\ndef _decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input], dtype=dtype,\n )\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", \"3.005\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"100.1\", \"200.2\"],\n cudf.Decimal64Dtype(scale=3, precision=9),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"99.9\", \"199.8\"],\n cudf.Decimal64Dtype(scale=3, precision=9),\n ),\n (\n operator.mul,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"3.0\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", \"6.0\"],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", \"40.0\"],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.mul,\n [\"1000\", \"2000\"],\n cudf.Decimal64Dtype(scale=-3, precision=4),\n [\"0.343\", \"0.500\"],\n cudf.Decimal64Dtype(scale=3, precision=3),\n [\"343.0\", \"1000.0\"],\n cudf.Decimal64Dtype(scale=0, precision=8),\n ),\n (\n operator.add,\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", None, \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.mul,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", None],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.18\", \"0.21\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1800\", \"0.2100\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"100\", None],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None],\n bool,\n ),\n (\n operator.ne,\n [\"0.06\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False],\n bool,\n ),\n (\n operator.ne,\n [\"1.33\", \"1.21\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1899\", \"1.21\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False],\n bool,\n ),\n (\n operator.ne,\n [\"300\", None],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"110\", \"5500\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"200\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, False],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, True],\n bool,\n ),\n ],\n)\ndef test_binops_decimal(args):\n op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype = args\n\n a = _decimal_series(lhs, l_dtype)\n b = _decimal_series(rhs, r_dtype)\n expect = (\n _decimal_series(expect, expect_dtype)\n if isinstance(expect_dtype, cudf.Decimal64Dtype)\n else cudf.Series(expect, dtype=expect_dtype)\n )\n\n got = op(a, b)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.000\", \"42.001\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100\", \"40\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100\", \"42\", \"24\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"10.1\", \"88\", \"11\", None],\n cudf.Decimal64Dtype(scale=1, precision=3),\n [10, 42, 11, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.000\", \"42\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n ],\n)\[email protected](\"integer_dtype\", cudf.tests.utils.INTEGER_TYPES)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 decimal data setups, with scale from {==0, >0, <0}.\n Decimal precisions are sufficient to hold the digits.\n For each decimal data setup, there is at least one row that lead to one\n of the following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = _decimal_series(ldata, ldtype)\n rhs = cudf.Series(rdata, dtype=integer_dtype)\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"98\", \"198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"96\", \"196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"-98\", \"-198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"-96\", \"-196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n ],\n)\ndef test_binops_decimal_scalar(args):\n op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args\n\n def decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input],\n dtype=dtype,\n )\n\n lhs = decimal_series(lhs, l_dtype)\n expect = decimal_series(expect, expect_dtype)\n\n if reflect:\n lhs, rhs = rhs, lhs\n\n got = op(lhs, rhs)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100.00\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n 100,\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.00\", \"41\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, True, None], dtype=bool),\n cudf.Series([False, True, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.123\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, True, None], dtype=bool),\n cudf.Series([False, True, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, True, True, None], dtype=bool),\n cudf.Series([False, True, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n ],\n)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_scalar_compare(args, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 data setups: pyints, Decimal, and\n decimal cudf.Scalar\n For each data setup, there is at least one row that lead to one of the\n following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = _decimal_series(ldata, ldtype)\n rhs = rdata\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"dtype\",\n [\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"str\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"null_scalar\", [None, cudf.NA, np.datetime64(\"NaT\")])\[email protected](\"cmpop\", _cmpops)\ndef test_column_null_scalar_comparison(dtype, null_scalar, cmpop):\n # This test is meant to validate that comparing\n # a series of any dtype with a null scalar produces\n # a new series where all the elements are <NA>.\n\n if isinstance(null_scalar, np.datetime64):\n if np.dtype(dtype).kind not in \"mM\":\n pytest.skip()\n null_scalar = null_scalar.astype(dtype)\n\n dtype = np.dtype(dtype)\n\n data = [1, 2, 3, 4, 5]\n sr = cudf.Series(data, dtype=dtype)\n result = cmpop(sr, null_scalar)\n\n assert result.isnull().all()\n\n\[email protected](\"fn\", [\"eq\", \"ne\", \"lt\", \"gt\", \"le\", \"ge\"])\ndef test_equality_ops_index_mismatch(fn):\n a = cudf.Series(\n [1, 2, 3, None, None, 4], index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n )\n b = cudf.Series(\n [-5, 4, 3, 2, 1, 0, 19, 11],\n index=[\"aa\", \"b\", \"c\", \"d\", \"e\", \"f\", \"y\", \"z\"],\n )\n\n pa = a.to_pandas(nullable=True)\n pb = b.to_pandas(nullable=True)\n expected = getattr(pa, fn)(pb)\n actual = getattr(a, fn)(b).to_pandas(nullable=True)\n\n utils.assert_eq(expected, actual)\n\n\ndef generate_test_null_equals_columnops_data():\n # Generate tuples of:\n # (left_data, right_data, compare_bool\n # where compare_bool is the correct answer to\n # if the columns should compare as null equals\n\n def set_null_cases(column_l, column_r, case):\n if case == \"neither\":\n return column_l, column_r\n elif case == \"left\":\n column_l[1] = None\n elif case == \"right\":\n column_r[1] = None\n elif case == \"both\":\n column_l[1] = None\n column_r[1] = None\n else:\n raise ValueError(\"Unknown null case\")\n return column_l, column_r\n\n null_cases = [\"neither\", \"left\", \"right\", \"both\"]\n data = [1, 2, 3]\n\n results = []\n # TODO: Numeric types can be cross compared as null equal\n for dtype in (\n list(NUMERIC_TYPES)\n + list(DATETIME_TYPES)\n + list(TIMEDELTA_TYPES)\n + list(STRING_TYPES)\n + [\"category\"]\n ):\n for case in null_cases:\n left = cudf.Series(data, dtype=dtype)\n right = cudf.Series(data, dtype=dtype)\n if case in {\"left\", \"right\"}:\n answer = False\n else:\n answer = True\n left, right = set_null_cases(left, right, case)\n results.append((left._column, right._column, answer, case))\n\n return results\n\n\[email protected](\n \"lcol,rcol,ans,case\", generate_test_null_equals_columnops_data()\n)\ndef test_null_equals_columnops(lcol, rcol, ans, case):\n assert lcol._null_equals(rcol).all() == ans\n" ]
[ [ "numpy.true_divide", "numpy.datetime_data", "numpy.random.random", "pandas.Series", "numpy.random.seed", "pandas.DateOffset", "numpy.random.choice", "numpy.isnan", "numpy.float16", "pandas.Index", "pandas.DataFrame", "numpy.dtype", "numpy.testing.assert_array_equal", "numpy.datetime64", "numpy.int64", "numpy.isscalar", "numpy.logical_and", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "1.0", "0.25" ], "scipy": [], "tensorflow": [] } ]
CooKey-Monster/spiral
[ "88c81334d684d713fab626b524af0a47075f80c5" ]
[ "nineturn/dtdg/types.py" ]
[ "# Copyright 2022 The Nine Turn Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"common types for the dtdg package.\n\nThis file define the types required for dtdg package\n\"\"\"\nimport copy\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\nimport dgl\nimport numpy as np\nfrom dgl import DGLGraph\nfrom dgl import backend as F\nfrom numpy import ndarray\n\nfrom nineturn.core import commonF\nfrom nineturn.core.errors import DimensionError, ValueNotSortedError\nfrom nineturn.core.logger import get_logger\nfrom nineturn.core.utils import get_anchor_position, is_sorted\n\nTIME_D = 0 # position of time in nodes and edges table\nSOURCE = 1 # position of source in edges table\nDESTINATION = 2 # position of destination in edges_table\nFEAT = 'h' # key for edge and node feature in DGLGraph.edata and ndata.\nID_TYPE = 'int64'\nFEATURE_TYPE = \"float32\"\nlogger = get_logger()\n\n\nclass Snapshot:\n \"\"\"A snapshot of a dynamic graph.\n\n The snapshot is usually a tuple (V,X,E,t) where X is the node feature table,\n V is the adjacency matrix, E is the edge feature table with the first entry the source id and the second entry the\n destination id. And edge e in E could have more than two entry. The extra ones would be edge features.\n t is the timestamp when the snapshot was taken. For DTDG, this is usually an integer representing the positional\n ordering.\n In this implementation, the graph state (V,X,E) is implemented by a DGLGraph.\n When designing this class, our primary goal is to support the loading of dynamic graph data\n in 'https://snap.stanford.edu/data/',\n \"\"\"\n\n def __init__(self, observation: DGLGraph, t: int):\n \"\"\"A snapshot of a DTDG composed by an instance of DGLGraph as observation and an integer as timestamp.\"\"\"\n self.observation = observation\n self.t = commonF.to_tensor(np.array([t]))\n\n def num_node_features(self) -> int:\n \"\"\"Return the number of node features.\"\"\"\n return self.observation.ndata[FEAT].shape[1]\n\n def num_edge_features(self) -> int:\n \"\"\"Return the number of edge features.\"\"\"\n return self.observation.edata[FEAT].shape[1]\n\n def node_feature(self):\n \"\"\"Return the node features tensor.\"\"\"\n return self.observation.ndata[FEAT]\n\n def edge_feature(self):\n \"\"\"Return the edge feature tensor.\"\"\"\n return self.observation.edata[FEAT]\n\n def num_nodes(self) -> int:\n \"\"\"Return the number of nodes in the snapshot.\"\"\"\n return self.observation.ndata[FEAT].shape[0]\n\n @property\n def device(self):\n \"\"\"Which device it is currently at.\"\"\"\n return self.observation.device\n\n def to(self, device, **kwargs): # pylint: disable_invalide_name\n \"\"\"Move the snapshot to the targeted device (cpu/gpu).\n\n If the graph is already on the specified device, the function directly returns it.\n Otherwise, it returns a cloned graph on the specified device.\n\n Args:\n device : Framework-specific device context object\n The context to move data to (e.g., ``torch.device``).\n kwargs : Key-word arguments.\n Key-word arguments fed to the framework copy function.\n \"\"\"\n if device is None or self.device == device:\n return self\n\n ret = copy.copy(self)\n ret.observation = self.observation.to(device, **kwargs)\n ret.t = F.copy_to(self.t, device, **kwargs)\n return ret\n\n\nclass BatchedSnapshot:\n \"\"\"Mainly to support mini batch training with dgl.\"\"\"\n\n def __init__(self, observation: List[DGLGraph], feature, t: int):\n \"\"\"A snapshot of a DTDG composed by an instance of DGLGraph as observation and an integer as timestamp.\"\"\"\n self.observation = observation\n self.t = t\n self.feature = feature\n\n def num_blocks(self) -> int:\n \"\"\"Return the number of DGLBlocks in the BatchedSnapshot.\"\"\"\n return len(self.observation)\n\n\nclass DiscreteGraph(ABC):\n \"\"\"This is an abstract class for Discrete Time Dynamic Graph collection.\n\n In implementation, we found that there could be four different kinds of DTDG, V-E invariant, V invariant,\n E invariant, and V_E variant DTDG. Even though they all can be expressed as collection of timestamped snapshots.\n However, it is memory inefficient to do so. Therefore, we need to have different methods to store different\n type of DTDG, and consequently, different type of DTDG requires different data dispatcher.\n As a subclass of DiscreteGraph, it needs to implement its own data dispatcher to generate snapshots in runtime.\n \"\"\"\n\n @abstractmethod\n def __len__(self) -> int:\n \"\"\"Return the total of observations in the graph.\"\"\"\n pass\n\n @abstractmethod\n def dispatcher(self, t: int) -> Snapshot:\n \"\"\"Return the snapshot observed at the input time index.\"\"\"\n pass\n\n\nclass VEInvariantDTDG(DiscreteGraph):\n \"\"\"V-E invariant DTDG.\n\n V-E invariant DTDG is a DTDG that nodes and edges won't change in terms of features or their existence after they\n are created in the graph.\n \"\"\"\n\n def __init__(self, edges: ndarray, nodes: ndarray, timestamps: ndarray):\n \"\"\"V-E invariant DTDG is stored as an edge table,a node table an the timestamp index.\n\n Args:\n edges: a numpy.ndarray of edges with shape (|E|,3+num_features). Each row is an edge with the first entry to\n be the timestamp index,the second and the third entry to be the source and destination index corresponding\n to the row index in the nodes table.\n nodes: a numpy.ndarray of node features with shape (|V|, 1+num_features). Each row is a node and the first\n entry is the timestamp index.\n timestamps: a numpy.ndarray of timestamps with shape (1, num_observations). This array should be sorted\n asc by the timestamps.\n\n Raises:\n DimensionError if the input edge, nodes or timestamps has a different dimension than expected.\n ValueNotSortedError if the input edges, nodes or timestamps is not sorted based on the time dimension.\n \"\"\"\n self.edge_dimension = 3\n self.node_dimension = 1\n error_message = \"\"\"The second dimension of {entity} should be greater than or equal to {value}.\"\"\"\n not_sorted_error = \"\"\"The input {entity} should be sorted based on the its time index.\"\"\"\n if edges.shape[1] < self.edge_dimension:\n raise DimensionError(error_message.format(entity=\"edges\", value=self.edge_dimension))\n\n if nodes.shape[1] < self.node_dimension:\n raise DimensionError(error_message.format(entity=\"nodes\", value=self.node_dimension))\n\n if not is_sorted(timestamps):\n raise ValueNotSortedError(not_sorted_error.format(entity=\"timestamps\"))\n\n if not is_sorted(edges[:, TIME_D]):\n raise ValueNotSortedError(not_sorted_error.format(entity=\"edges\"))\n\n if not is_sorted(nodes[:, TIME_D]):\n raise ValueNotSortedError(not_sorted_error.format(entity=\"nodes\"))\n\n self.nodes = nodes\n self.edges = edges\n self.timestamps = timestamps\n self._node_time_anchors = get_anchor_position(nodes[:, TIME_D], range(len(self.timestamps)))\n self._edge_time_anchors = get_anchor_position(edges[:, TIME_D], range(len(self.timestamps)))\n\n def dispatcher(self, t: int) -> Snapshot:\n \"\"\"Return a snapshot for the input time index.\"\"\"\n this_edges = self.edges[: self._edge_time_anchors[t], :]\n this_nodes = self.nodes[: self._node_time_anchors[t], :]\n num_nodes = this_nodes.shape[0]\n src = commonF.to_tensor(this_edges[:, SOURCE].astype(ID_TYPE))\n dst = commonF.to_tensor(this_edges[:, DESTINATION].astype(ID_TYPE))\n observation = dgl.graph((src, dst), num_nodes=num_nodes)\n if this_edges.shape[1] > self.edge_dimension:\n observation.edata[FEAT] = commonF.to_tensor(this_edges[:, self.edge_dimension :].astype(FEATURE_TYPE))\n\n if this_nodes.shape[1] > self.node_dimension:\n observation.ndata[FEAT] = commonF.to_tensor(this_nodes[:, self.node_dimension :].astype(FEATURE_TYPE))\n\n return Snapshot(observation, t)\n\n def __len__(self) -> int:\n \"\"\"Return the number of snapshots in this DTDG.\"\"\"\n return len(self.timestamps)\n\n\nclass CitationGraph(VEInvariantDTDG):\n \"\"\"Citation graph is a V-E invariant DTDG.\n\n A citation graph is roughly a V-E invariant DTDG, it is different from other kinds of dynamic graph\n in that each node's citation increase over time. Besides citations, other features remain the same.\n \"\"\"\n\n def __init__(self, edges: ndarray, nodes: ndarray, timestamps: ndarray):\n \"\"\"Citation graph is a subclass of VEInvariantDTDG..\n\n Args:\n edges: a numpy.ndarray of edges with shape (|E|,3+num_features). Each row is an edge with the first entry to\n be the timestamp index,the second and the third entry to be the source and destination index corresponding\n to the row index in the nodes table.\n nodes: a numpy.ndarray of node features with shape (|V|, 1+num_features). Each row is a node and the first\n entry is the timestamp index.\n timestamps: a numpy.ndarray of timestamps with shape (1, num_observations). This array should be sorted\n asc by the timestamps.\n\n Raises:\n DimensionError if the input edge, nodes or timestamps has a different dimension than expected.\n ValueNotSortedError if the input edges, nodes or timestamps is not sorted based on the time dimension.\n \"\"\"\n super().__init__(edges, nodes, timestamps)\n\n def dispatcher(self, t: int, add_self_loop: bool = False) -> Snapshot:\n \"\"\"Return a snapshot for the input time index.\n\n For citation graph, the node feature has previous year's citation as the last node feature.\n \"\"\"\n this_edges = self.edges[: self._edge_time_anchors[t], :]\n this_nodes = self.nodes[: self._node_time_anchors[t], :]\n src = commonF.to_tensor(this_edges[:, SOURCE].astype(ID_TYPE))\n dst = commonF.to_tensor(this_edges[:, DESTINATION].astype(ID_TYPE))\n num_nodes = this_nodes.shape[0]\n observation = dgl.graph((src, dst), num_nodes=num_nodes)\n if this_edges.shape[1] > self.edge_dimension:\n observation.edata[FEAT] = commonF.to_tensor(this_edges[:, self.edge_dimension :])\n\n citation = np.zeros(shape=(this_nodes.shape[0], 1))\n if t > 0:\n previous_snapshot = super().dispatcher(t - 1)\n previous_citation = previous_snapshot.observation.in_degrees().numpy()\n citation[: previous_citation.shape[0], 0] = previous_citation\n\n this_nodes = np.hstack((this_nodes, citation))\n observation.ndata[FEAT] = commonF.to_tensor(this_nodes[:, self.node_dimension :].astype(FEATURE_TYPE))\n if add_self_loop:\n observation = observation.add_self_loop()\n return Snapshot(observation, t)\n" ]
[ [ "numpy.hstack", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
devitocodes/joey
[ "bde8eb554d548764a50dd5384d1da9b0f0c9d0a2" ]
[ "joey/layers.py" ]
[ "from abc import abstractmethod\nfrom joey import Layer\nfrom joey import activation\nfrom joey import default_name_allocator as alloc\nfrom joey import default_dim_allocator as dim_alloc\nfrom devito import Grid, Function, Constant, Eq, Inc, \\\n ConditionalDimension\nfrom sympy import exp, Max, And, Min, sign\nimport numpy as np\n\n\nclass Conv(Layer):\n \"\"\"\n A Layer subclass corresponding to a 2D convolution layer (mathematically,\n it performs a cross-correlation operation).\n\n Parameters\n ----------\n kernel_size : (int, int, int)\n The shape of a kernel (represented internally by a NumPy array)\n expressed as (output channels / kernel count, rows, columns).\n input_size : (int, int, int, int)\n The shape of input data expressed as\n (batch size, channels, rows, columns).\n name_allocator_func : zero-argument function, optional\n See Layer.__doc__.\n dim_allocator_func : one-argument function, optional\n See Layer.__doc__.\n stride : (int, int), optional\n Stride of the layer expressed as (rows, columns). The default\n value is (1, 1).\n padding : (int, int), optional\n Padding of the layer expressed as (rows, columns). The default\n value is (0, 0).\n\n Be careful! The current version of Joey supports non-zero padding\n ONLY for standalone layers. When you create a neural network, all\n of its layers must have (0, 0) padding.\n activation : Activation, optional\n See Layer.__doc__. The actual default value is Dummy.\n generate_code : bool, optional\n See Layer.__doc__.\n strict_stride_check : bool, optional\n A boolean indicating whether a strict stride check should be\n performed when instantiating this object. The default value is\n True.\n\n If the check is disabled and the stride turns out to be\n incompatible with the provided kernel, input and padding sizes,\n some parts of input data will not be processed. This behaviour\n is intentional, its aim is avoiding any out-of-bounds accesses.\n \"\"\"\n\n def __init__(self, kernel_size, input_size,\n name_allocator_func=alloc, dim_allocator_func=dim_alloc,\n stride=(1, 1), padding=(0, 0),\n activation=None, generate_code=False,\n strict_stride_check=True):\n # Internal kernel size (self._kernel_size) is expressed as\n # (output channels / kernel count, input channels, rows, columns).\n\n self._error_check(kernel_size, input_size, stride, padding,\n strict_stride_check)\n\n self._kernel_size = (kernel_size[0], input_size[1], kernel_size[1],\n kernel_size[2])\n\n self._stride = stride\n self._padding = padding\n\n super().__init__(self._kernel_size, input_size, activation,\n name_allocator_func, dim_allocator_func,\n generate_code)\n\n def _error_check(self, kernel_size, input_size, stride, padding,\n strict_stride_check):\n if input_size is None or len(input_size) != 4:\n raise Exception(\"Input size is incorrect\")\n\n if kernel_size is None or len(kernel_size) != 3:\n raise Exception(\"Kernel size is incorrect\")\n\n if stride is None or len(stride) != 2:\n raise Exception(\"Stride is incorrect\")\n\n if stride[0] < 1 or stride[1] < 1:\n raise Exception(\"Stride cannot be less than 1\")\n\n if padding is None or len(padding) != 2:\n raise Exception(\"Padding is incorrect\")\n\n if padding[0] < 0 or padding[1] < 0:\n raise Exception(\"Padding cannot be negative\")\n\n if strict_stride_check:\n map_height = input_size[2] + 2 * padding[0]\n map_width = input_size[3] + 2 * padding[1]\n _, kernel_height, kernel_width = kernel_size\n\n if (map_height - kernel_height) % stride[0] != 0 or \\\n (map_width - kernel_width) % stride[1] != 0:\n raise Exception(\"Stride \" + str(stride) + \" is not \"\n \"compatible with feature map, kernel and \"\n \"padding sizes. If you want to proceed \"\n \"anyway, set strict_stride_check=False when \"\n \"instantiating this object\")\n\n def _allocate(self, kernel_size, input_size, name_allocator_func,\n dim_allocator_func):\n map_height = input_size[2] + 2 * self._padding[0]\n map_width = input_size[3] + 2 * self._padding[1]\n _, _, kernel_height, kernel_width = kernel_size\n\n t1, t2, t3, t4, t5, t6, t7, t8, t9, t10 = dim_allocator_func(10)\n\n gridK = Grid(shape=kernel_size, dimensions=(t1, t2, t3, t4))\n K = Function(name=name_allocator_func(), grid=gridK, space_order=0,\n dtype=np.float64)\n\n gridB = Grid(shape=(input_size[0], input_size[1],\n map_height, map_width),\n dimensions=(t5, t6, t7, t8))\n B = Function(name=name_allocator_func(), grid=gridB, space_order=0,\n dtype=np.float64)\n\n gridR = Grid(shape=(input_size[0], kernel_size[0],\n (map_height - kernel_height + self._stride[0])\n // self._stride[0],\n (map_width - kernel_width + self._stride[1])\n // self._stride[1]),\n dimensions=(t5, t1, t9, t10))\n R = Function(name=name_allocator_func(), grid=gridR, space_order=0,\n dtype=np.float64)\n\n bias_grid = Grid(shape=kernel_size[0],\n dimensions=(t1,))\n bias = Function(name=name_allocator_func(), grid=bias_grid,\n space_order=0, dtype=np.float64)\n\n kernel_grad = Function(name=name_allocator_func(),\n grid=gridK, space_order=0, dtype=np.float64)\n\n output_grad = Function(name=name_allocator_func(),\n grid=gridR,\n space_order=0, dtype=np.float64)\n\n bias_grad = Function(name=name_allocator_func(),\n grid=bias_grid, space_order=0, dtype=np.float64)\n\n return (K, B, R, bias, kernel_grad, output_grad, bias_grad)\n\n def execute(self, input_data, bias, kernel_data=None):\n map_height = input_data.shape[2] + 2 * self._padding[0]\n batch_size, channels, _, _ = input_data.shape\n\n for i in range(batch_size):\n for j in range(channels):\n for k in range(self._padding[0],\n map_height - self._padding[0]):\n self._I.data[i, j, k] = \\\n np.concatenate(([0] * self._padding[1],\n input_data[i, j, k - self._padding[0]],\n [0] * self._padding[1]))\n\n if kernel_data is not None:\n self._K.data[:] = kernel_data\n\n self._bias.data[:] = bias\n\n self._R.data[:] = 0\n\n return super().execute()\n\n def equations(self):\n a, b, c, d = self._R.dimensions\n _, _, kernel_height, kernel_width = self._kernel_size\n batch_size, channels, _, _ = self._I.shape\n e, f, g, h = self._K.dimensions\n\n rhs = self._K[b, f, g, h] * \\\n self._I[a, f, self._stride[0] * c + g,\n self._stride[1] * d + h]\n\n eqs = [Inc(self._R[a, b, c, d], rhs)]\n\n if self._activation is not None:\n eqs.append(Eq(self._R[a, b, c, d],\n self._activation(self._R[a, b, c, d] +\n self._bias[b])))\n else:\n eqs.append(Inc(self._R[a, b, c, d], self._bias[b]))\n\n return (eqs, [])\n\n def backprop_equations(self, prev_layer, next_layer):\n layer = self\n\n kernel_dims = layer.kernel_gradients.dimensions\n bias_dims = layer.bias_gradients.dimensions\n dims = layer.result_gradients.dimensions\n\n eqs = [Inc(layer.bias_gradients[bias_dims[0]],\n layer.result_gradients[dims[0], dims[1], dims[2], dims[3]]),\n Inc(layer.kernel_gradients[kernel_dims[0], kernel_dims[1],\n kernel_dims[2], kernel_dims[3]],\n layer.result_gradients[dims[0],\n kernel_dims[0], dims[2],\n dims[3]] *\n layer.input[dims[0], kernel_dims[1],\n kernel_dims[2] + dims[2],\n kernel_dims[3] + dims[3]])]\n\n _, _, height, width = layer.kernel.shape\n\n if next_layer is not None:\n next_dims = next_layer.result_gradients.dimensions\n\n cd1 = ConditionalDimension(name=alloc(), parent=kernel_dims[2],\n condition=And(next_dims[2] - height +\n 1 + kernel_dims[2] >= 0,\n next_dims[2] - height +\n 1 + kernel_dims[2] <\n layer.result_gradients\n .shape[2]))\n cd2 = ConditionalDimension(name=alloc(), parent=kernel_dims[3],\n condition=And(next_dims[3] - width + 1 +\n kernel_dims[3] >= 0,\n next_dims[3] - width + 1 +\n kernel_dims[3] <\n layer.result_gradients\n .shape[3]))\n\n eqs += [Inc(next_layer.result_gradients[next_dims[0],\n next_dims[1],\n next_dims[2],\n next_dims[3]],\n layer.kernel[dims[1], next_dims[1],\n height - kernel_dims[2] - 1,\n width - kernel_dims[3] - 1] *\n layer.result_gradients[next_dims[0],\n dims[1],\n next_dims[2] - height + 1 +\n kernel_dims[2],\n next_dims[3] - width + 1 +\n kernel_dims[3]],\n implicit_dims=(cd1, cd2))] + \\\n next_layer.activation.backprop_eqs(next_layer)\n\n return (eqs, [])\n\n\nclass Pooling(Layer):\n \"\"\"\n A Layer abstract subclass corresponding to a generic pooling layer.\n When you create a subclass of Pooling, you have to implement\n the following methods: equations(), backprop_equations().\n\n Parameters\n ----------\n kernel_size : (int, int)\n The shape of a kernel (represented internally by a NumPy array)\n expressed as (rows, columns).\n input_size : (int, int, int, int)\n The shape of input data expressed as\n (batch size, channels, rows, columns).\n name_allocator_func : zero-argument function, optional\n See Layer.__doc__.\n dim_allocator_func : one-argument function, optional\n See Layer.__doc__.\n stride : (int, int), optional\n Stride of the layer expressed as (rows, columns). The default\n value is (1, 1).\n padding : (int, int), optional\n Padding of the layer expressed as (rows, columns). The default\n value is (0, 0).\n\n Be careful! The current version of Joey supports non-zero padding\n ONLY for standalone layers. When you create a neural network, all\n of its layers must have (0, 0) padding.\n activation : Activation, optional\n See Layer.__doc__. The actual default value is Dummy.\n generate_code : bool, optional\n See Layer.__doc__.\n strict_stride_check : bool, optional\n A boolean indicating whether a strict stride check should be\n performed when instantiating this object. The default value is\n True.\n\n If the check is disabled and the stride turns out to be\n incompatible with the provided kernel, input and padding sizes,\n some parts of input data will not be processed. This behaviour\n is intentional, its aim is avoiding any out-of-bounds accesses.\n \"\"\"\n\n def __init__(self, kernel_size, input_size,\n name_allocator_func=alloc, dim_allocator_func=dim_alloc,\n stride=(1, 1), padding=(0, 0), activation=None,\n generate_code=False, strict_stride_check=True):\n # Kernel size is expressed as (rows, columns).\n # Input size is expressed as (batch size, channels, rows, columns).\n\n self._error_check(kernel_size, input_size, stride, padding,\n strict_stride_check)\n\n self._kernel_size = kernel_size\n\n self._stride = stride\n self._padding = padding\n\n super().__init__(kernel_size, input_size, activation,\n name_allocator_func, dim_allocator_func,\n generate_code)\n\n def _error_check(self, kernel_size, input_size, stride, padding,\n strict_stride_check):\n if input_size is None or len(input_size) != 4:\n raise Exception(\"Input size is incorrect\")\n\n if kernel_size is None or len(kernel_size) != 2:\n raise Exception(\"Kernel size is incorrect\")\n\n if stride is None or len(stride) != 2:\n raise Exception(\"Stride is incorrect\")\n\n if stride[0] < 1 or stride[1] < 1:\n raise Exception(\"Stride cannot be less than 1\")\n\n if padding is None or len(padding) != 2:\n raise Exception(\"Padding is incorrect\")\n\n if padding[0] < 0 or padding[1] < 0:\n raise Exception(\"Padding cannot be negative\")\n\n if strict_stride_check:\n map_height = input_size[2] + 2 * padding[0]\n map_width = input_size[3] + 2 * padding[1]\n kernel_height, kernel_width = kernel_size\n\n if (map_height - kernel_height) % stride[0] != 0 or \\\n (map_width - kernel_width) % stride[1] != 0:\n raise Exception(\"Stride \" + str(stride) + \" is not \"\n \"compatible with feature map, kernel and \"\n \"padding sizes. If you want to proceed \"\n \"anyway, set strict_stride_check=False \"\n \"when instantiating this object\")\n\n def _allocate(self, kernel_size, input_size, name_allocator_func,\n dim_allocator_func):\n map_height = input_size[2] + 2 * self._padding[0]\n map_width = input_size[3] + 2 * self._padding[1]\n kernel_height, kernel_width = kernel_size\n\n t1, t2, t3, t4, t5, t6 = dim_allocator_func(6)\n\n gridB = Grid(shape=(input_size[0], input_size[1], map_height,\n map_width),\n dimensions=(t1, t2, t3, t4))\n B = Function(name=name_allocator_func(), grid=gridB, space_order=0,\n dtype=np.float64)\n\n gridR = Grid(shape=(input_size[0], input_size[1],\n (map_height - kernel_height + self._stride[0])\n // self._stride[0],\n (map_width - kernel_width + self._stride[1])\n // self._stride[1]),\n dimensions=(t1, t2, t5, t6))\n\n R = Function(name=name_allocator_func(), grid=gridR, space_order=0,\n dtype=np.float64)\n\n output_grad = Function(name=name_allocator_func(),\n grid=gridR,\n space_order=0, dtype=np.float64)\n\n return (None, B, R, None, None, output_grad, None)\n\n @property\n def stride(self):\n \"\"\"Stride of the layer.\"\"\"\n return self._stride\n\n @property\n def kernel_size(self):\n \"\"\"The kernel size of the layer.\"\"\"\n return self._kernel_size\n\n def execute(self, input_data):\n map_height = input_data.shape[2]\n # Add padding to the start and end of each row\n for image in range(input_data.shape[0]):\n for channel in range(input_data.shape[1]):\n for i in range(self._padding[0],\n map_height - self._padding[0]):\n self._I.data[image, channel, i] = \\\n np.concatenate(([0] * self._padding[1],\n input_data[image, channel,\n i - self._padding[0]],\n [0] * self._padding[1]))\n return super().execute()\n\n @abstractmethod\n def equations(self):\n pass\n\n @abstractmethod\n def backprop_equations(self, prev_layer, next_layer):\n pass\n\n\nclass MaxPooling(Pooling):\n \"\"\"\n A Layer/Pooling subclass corresponding to a max pooling layer.\n\n Parameters\n ----------\n See Pooling.__doc__.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._indices = None\n self._forward_tmp_constants = None\n self._backward_tmp_constants = None\n super().__init__(*args, **kwargs)\n\n def equations(self):\n if self._forward_tmp_constants is None:\n self._forward_tmp_constants = \\\n [Constant(name=alloc(), dtype=np.float64)]\n\n if self._indices is None:\n self._indices = \\\n Function(name=alloc(),\n grid=self._R.grid,\n space_order=0,\n dtype=np.int32)\n\n a, b, c, d = self._R.dimensions\n kernel_height, kernel_width = self._kernel_size\n i, j = dim_alloc(2)\n\n args = [(i.name + '_M', kernel_height - 1),\n (j.name + '_M', kernel_width - 1)]\n\n old = self._forward_tmp_constants[0]\n\n cond1 = abs(sign(self._R[a, b, c, d] - old)) * kernel_width * \\\n kernel_height\n cond2 = abs(sign(self._I[a, b, self._stride[0] * c + i,\n self._stride[1] * d + j] -\n self._R[a, b, c, d])) * kernel_width * kernel_height\n\n eqs = [Eq(self._indices, kernel_height * kernel_width),\n Eq(self._R[a, b, c, d], self._I[a, b,\n self._stride[0] * c,\n self._stride[1] * d]),\n Eq(old, self._R[a, b, c, d], implicit_dims=(i, j)),\n Eq(self._R[a, b, c, d], Max(self._R[a, b, c, d],\n self._I[a, b,\n self._stride[0] * c + i,\n self._stride[1] * d + j])),\n Eq(self._indices[a, b, c, d],\n Min(self._indices[a, b, c, d] + cond1,\n i * kernel_width + j + cond2))]\n\n if self._activation is not None:\n eqs.append(Eq(self._R, self._activation(self._R)))\n\n return (eqs, args)\n\n def backprop_equations(self, prev_layer, next_layer):\n if next_layer is None:\n return ([], [])\n\n if self._backward_tmp_constants is None:\n self._backward_tmp_constants = \\\n [Constant(name=alloc(), dtype=np.int32),\n Constant(name=alloc(), dtype=np.int32)]\n\n dims = self._R.dimensions\n stride_rows, stride_cols = self.stride\n\n index = self._indices[dims[0], dims[1], dims[2], dims[3]]\n a = self._backward_tmp_constants[0]\n b = self._backward_tmp_constants[1]\n\n return ([Eq(a, index // 2),\n Eq(b, index % 2),\n Inc(next_layer.result_gradients[dims[0],\n dims[1],\n stride_rows * dims[2] + a,\n stride_cols * dims[3] + b],\n self.result_gradients[dims[0],\n dims[1], dims[2], dims[3]])] +\n next_layer.activation.backprop_eqs(next_layer), [])\n\n\nclass FullyConnected(Layer):\n \"\"\"\n A Layer subclass corresponding to a full connection (FC) layer.\n\n Parameters\n ----------\n weight_size : (int, int)\n The shape of a weight matrix (represented internally by a NumPy array)\n expressed as (rows, columns).\n input_size : (int, int)\n The shape of input data expressed as (rows, columns).\n name_allocator_func : zero-argument function, optional\n See Layer.__doc__.\n dim_allocator_func : one-argument function, optional\n See Layer.__doc__.\n activation : Activation, optional\n See Layer.__doc__. The actual default value is Dummy.\n generate_code : bool, optional\n See Layer.__doc__.\n \"\"\"\n\n def __init__(self, weight_size, input_size, name_allocator_func=alloc,\n dim_allocator_func=dim_alloc, activation=None,\n generate_code=False):\n super().__init__(weight_size, input_size, activation,\n name_allocator_func, dim_allocator_func,\n generate_code)\n\n def _allocate(self, weight_size, input_size, name_allocator_func,\n dim_allocator_func):\n t1, t2, t3 = dim_allocator_func(3)\n self._dimensions = (t1, t2, t3)\n\n gridW = Grid(shape=weight_size, dimensions=(t1, t2))\n W = Function(name=name_allocator_func(), grid=gridW, space_order=0,\n dtype=np.float64)\n\n gridV_dimensions = (t2, t3)\n gridR_dimensions = (t1, t3)\n gridR_shape = (weight_size[0], input_size[1])\n\n gridV = Grid(shape=input_size, dimensions=gridV_dimensions)\n V = Function(name=name_allocator_func(), grid=gridV, space_order=0,\n dtype=np.float64)\n\n gridR = Grid(shape=gridR_shape, dimensions=gridR_dimensions)\n R = Function(name=name_allocator_func(), grid=gridR, space_order=0,\n dtype=np.float64)\n\n if self._activation is not None:\n self._T = Function(name=name_allocator_func(), grid=gridR,\n space_order=0, dtype=np.float64)\n\n bias_grid = Grid(shape=weight_size[0],\n dimensions=(t1,))\n bias = Function(name=name_allocator_func(), grid=bias_grid,\n space_order=0, dtype=np.float64)\n\n kernel_grad = Function(name=name_allocator_func(),\n grid=gridW, space_order=0, dtype=np.float64)\n\n output_grad = Function(name=name_allocator_func(),\n grid=gridR, space_order=0,\n dtype=np.float64)\n\n bias_grad = Function(name=name_allocator_func(),\n grid=bias_grid, space_order=0, dtype=np.float64)\n\n return (W, V, R, bias, kernel_grad, output_grad, bias_grad)\n\n def execute(self, input_data, bias, weight_data=None):\n if weight_data is not None:\n self._K.data[:] = weight_data\n\n self._I.data[:] = input_data\n self._bias.data[:] = bias\n\n if self._activation is not None:\n self._T.data[:] = 0\n\n self._R.data[:] = 0\n\n return super().execute()\n\n def equations(self):\n a, b, c = self._dimensions\n\n eqs = [Inc(self._R[a, c], self._K[a, b] * self._I[b, c])]\n\n if self._activation is not None:\n eqs.append(Eq(self._R, self._activation(self._bias[a] + self._R)))\n else:\n eqs.append(Inc(self._R[a, c], self._bias[a]))\n\n return (eqs, [])\n\n def backprop_equations(self, prev_layer, next_layer):\n layer = self\n\n dims = layer.result_gradients.dimensions\n kernel_dims = layer.kernel_gradients.dimensions\n\n if prev_layer is None:\n return ([Inc(layer.bias_gradients, layer.result_gradients),\n Inc(layer.kernel_gradients[kernel_dims[0],\n kernel_dims[1]],\n layer.input[kernel_dims[1],\n dims[1]] *\n layer.result_gradients[kernel_dims[0], dims[1]])], [])\n\n prev_dims = prev_layer.result_gradients.dimensions\n\n return ([Inc(layer.result_gradients[dims[0], dims[1]],\n prev_layer.kernel[prev_dims[0], dims[0]] *\n prev_layer.result_gradients[prev_dims[0], dims[1]])] +\n layer.activation.backprop_eqs(layer) +\n [Inc(layer.bias_gradients, layer.result_gradients),\n Eq(layer.kernel_gradients[kernel_dims[0], kernel_dims[1]],\n layer.kernel_gradients[kernel_dims[0], kernel_dims[1]] +\n layer.input[kernel_dims[1], dims[1]] *\n layer.result_gradients[kernel_dims[0], dims[1]])], [])\n\n\nclass FullyConnectedSoftmax(FullyConnected):\n \"\"\"\n A Layer/FullyConnected subclass corresponding to a full connection (FC)\n layer with the softmax activation.\n\n Parameters\n ----------\n weight_size : (int, int)\n The shape of a weight matrix (represented internally by a NumPy array)\n expressed as (rows, columns).\n input_size : (int, int)\n The shape of input data expressed as (rows, columns).\n name_allocator_func : zero-argument function, optional\n See Layer.__doc__.\n dim_allocator_func : one-argument function, optional\n See Layer.__doc__.\n generate_code : bool, optional\n See Layer.__doc__.\n \"\"\"\n\n def __init__(self, weight_size, input_size, name_allocator_func=alloc,\n dim_allocator_func=dim_alloc, generate_code=False):\n self._name_allocator = name_allocator_func\n self._dim_allocator = dim_allocator_func\n super().__init__(weight_size, input_size, name_allocator_func,\n dim_allocator_func, activation.Dummy(), generate_code)\n\n def equations(self):\n a, b, c = self._dimensions\n\n gridC = Grid(shape=self._R.shape[1], dimensions=(c,))\n C = Function(name=self._name_allocator(), grid=gridC, space_order=0,\n dtype=np.float64)\n M = Function(name=self._name_allocator(), grid=gridC, space_order=0,\n dtype=np.float64)\n\n return ([Inc(self._T[a, c], self._K[a, b] * self._I[b, c]),\n Inc(self._T[a, c], self._bias[a]),\n Eq(M[c], Max(*[self._T[i, c]\n for i in range(self._R.shape[0])])),\n Eq(C[c], sum([exp(self._T[i, c] - M[c])\n for i in range(self._R.shape[0])])),\n Eq(self._R[a, b], exp(self._T[a, b] - M[b]) / C[b]),\n Eq(self._T, 0)], [])\n\n\nclass Flat(Layer):\n \"\"\"\n A Layer subclass corresponding to an internal flattening layer turning\n a 4D array into a 2D matrix required by a full connection (FC) layer.\n\n When creating a neural network, you have to put Flat between\n a pooling/convolution layer and an FC layer.\n\n Parameters\n ----------\n input_size : (int, int)\n The shape of input data expressed as (batch size, channels,\n rows, columns).\n\n The output shape will be (channels * rows * columns, batch size).\n name_allocator_func : zero-argument function, optional\n See Layer.__doc__.\n dim_allocator_func : one-argument function, optional\n See Layer.__doc__.\n generate_code : bool, optional\n See Layer.__doc__.\n \"\"\"\n\n def __init__(self, input_size, name_allocator_func=alloc,\n dim_allocator_func=dim_alloc, generate_code=False):\n # Input size is expressed as (batch size, channels, rows, columns).\n\n super().__init__(None, input_size, None, name_allocator_func,\n dim_allocator_func, generate_code)\n\n def _allocate(self, kernel_size, input_size, name_allocator_func,\n dim_allocator_func):\n t1, t2, t3, t4, t5 = dim_allocator_func(5)\n\n gridI = Grid(shape=input_size, dimensions=(t1, t2, t3, t4))\n I = Function(name=name_allocator_func(), grid=gridI, space_order=0,\n dtype=np.float64)\n\n gridR = Grid(shape=(input_size[1]*input_size[2]*input_size[3],\n input_size[0]),\n dimensions=(t5, t1))\n R = Function(name=name_allocator_func(), grid=gridR, space_order=0,\n dtype=np.float64)\n\n output_grad = Function(name=name_allocator_func(),\n grid=gridR,\n space_order=0, dtype=np.float64)\n\n return (None, I, R, None, None, output_grad, None)\n\n def execute(self, input_data):\n self._I.data[:] = input_data\n return super().execute()\n\n def equations(self):\n _, b, c, d = self._I.dimensions\n batch_size, channels, height, width = self._I.shape\n\n return ([Eq(self._R[b * height * width + c * height + d, a],\n self._I[a, b, c, d]) for a in range(batch_size)], [])\n\n def backprop_equations(self, prev_layer, next_layer):\n layer = self\n\n prev_kernel_dims = prev_layer.kernel_gradients.dimensions\n dims = layer.result_gradients.dimensions\n\n batch_size, _, height, width = next_layer.result_gradients.shape\n next_dims = next_layer.result_gradients.dimensions\n\n return ([Inc(layer.result_gradients[dims[0], dims[1]],\n prev_layer.kernel[prev_kernel_dims[0], dims[0]] *\n prev_layer.result_gradients[prev_kernel_dims[0],\n dims[1]])] +\n [Eq(next_layer.result_gradients[batch, next_dims[1],\n next_dims[2], next_dims[3]],\n layer.result_gradients[next_dims[1] * height * width +\n next_dims[2] * height +\n next_dims[3], batch])\n for batch in range(batch_size)] +\n next_layer.activation.backprop_eqs(next_layer), [])\n" ]
[ [ "numpy.concatenate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
henktillman/ray
[ "fe6d7052460203bb7d58d5c6cbc199bd92424809" ]
[ "python/ray/tests/test_advanced_3.py" ]
[ "# coding: utf-8\nimport glob\nimport logging\nimport os\nimport json\nimport sys\nimport socket\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.ray_constants as ray_constants\nimport ray.cluster_utils\nimport ray.test_utils\nfrom ray import resource_spec\nimport setproctitle\n\nfrom ray.test_utils import (check_call_ray, RayTestTimeoutException,\n wait_for_num_actors)\n\nlogger = logging.getLogger(__name__)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.node.unique_id\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.node.unique_id\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.actors()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n assert ray.cluster_resources()[\"CPU\"] == 5\n assert ray.cluster_resources()[\"GPU\"] == 3\n assert ray.cluster_resources()[\"CustomResource\"] == 1\n\n # A driver/worker creates a temporary object during startup. Although the\n # temporary object is freed immediately, in a rare case, we can still find\n # the object ref in GCS because Raylet removes the object ref from GCS\n # asynchronously.\n # Because we can't control when workers create the temporary objects, so\n # We can't assert that `ray.objects()` returns an empty dict. Here we just\n # make sure `ray.objects()` succeeds.\n assert len(ray.objects()) >= 0\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n class Actor:\n def __init__(self):\n pass\n\n _ = Actor.remote() # noqa: F841\n # Wait for actor to be created\n wait_for_num_actors(1)\n\n actor_table = ray.actors()\n assert len(actor_table) == 1\n\n actor_info, = actor_table.values()\n assert actor_info[\"JobID\"] == job_id.hex()\n assert \"IPAddress\" in actor_info[\"Address\"]\n assert \"IPAddress\" in actor_info[\"OwnerAddress\"]\n assert actor_info[\"Address\"][\"Port\"] != actor_info[\"OwnerAddress\"][\"Port\"]\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"DriverIPAddress\"] == node_ip_address\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError:\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n for i in range(200):\n assert str(i) in output_lines\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray.worker.global_worker.current_job_id\n\n # in worker\n @ray.remote\n def f():\n return ray.worker.global_worker.current_job_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_ref_properties():\n id_bytes = b\"00112233445566778899\"\n object_ref = ray.ObjectRef(id_bytes)\n assert object_ref.binary() == id_bytes\n object_ref = ray.ObjectRef.nil()\n assert object_ref.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectRef(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectRef(b\"0123456789\")\n object_ref = ray.ObjectRef.from_random()\n assert not object_ref.is_nil()\n assert object_ref.binary() != id_bytes\n id_dumps = pickle.dumps(object_ref)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_ref\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(\n num_cpus=1,\n object_store_memory=int(10**8),\n _internal_config=json.dumps({\n \"object_pinning_enabled\": 0\n }))\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.core_worker.object_exists(x_id)\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName:\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.f()\"\n\n @ray.remote\n def unique_1():\n assert \"unique_1\" in setproctitle.getproctitle()\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.WorkerID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(\n check_call_ray([\"stack\"], capture_stdout=True))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n if sys.platform != \"win32\":\n random_name = ray.ObjectRef.from_random().hex()\n temp_raylet_socket_dir = os.path.join(ray.utils.get_ray_temp_dir(),\n \"tests\", random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=2, raylet_socket_name=temp_raylet_socket_name)\n\n @ray.remote\n def foo(x):\n time.sleep(1)\n return 2 * x\n\n ray.get([foo.remote(i) for i in range(2)])\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\[email protected](\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_put_pins_object(ray_start_object_store_memory):\n obj = np.ones(200 * 1024, dtype=np.uint8)\n x_id = ray.put(obj)\n x_binary = x_id.binary()\n assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()\n\n # x cannot be evicted since x_id pins it\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert (ray.get(x_id) == obj).all()\n assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()\n\n # now it can be evicted since x_id pins it but x_binary does not\n del x_id\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert not ray.worker.global_worker.core_worker.object_exists(\n ray.ObjectRef(x_binary))\n\n # weakref put\n y_id = ray.put(obj, weakref=True)\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(y_id)\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_refs, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor:\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n\n\ndef test_invalid_unicode_in_worker_log(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n # Wait till first worker log file is created.\n while True:\n log_file_paths = glob.glob(\"{}/worker*.out\".format(logs_dir))\n if len(log_file_paths) == 0:\n time.sleep(0.2)\n else:\n break\n\n with open(log_file_paths[0], \"wb\") as f:\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.flush()\n\n # Wait till the log monitor reads the file.\n time.sleep(1.0)\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\[email protected](reason=\"This test is too expensive to run.\")\ndef test_move_log_files_to_old(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n @ray.remote\n class Actor:\n def f(self):\n print(\"function f finished\")\n\n # First create a temporary actor.\n actors = [\n Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)\n ]\n ray.get([a.f.remote() for a in actors])\n\n # Make sure no log files are in the \"old\" directory before the actors\n # are killed.\n assert len(glob.glob(\"{}/old/worker*.out\".format(logs_dir))) == 0\n\n # Now kill the actors so the files get moved to logs/old/.\n [a.__ray_terminate__.remote() for a in actors]\n\n while True:\n log_file_paths = glob.glob(\"{}/old/worker*.out\".format(logs_dir))\n if len(log_file_paths) > 0:\n with open(log_file_paths[0], \"r\") as f:\n assert \"function f finished\\n\" in f.readlines()\n break\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\ndef test_lease_request_leak(shutdown_only):\n ray.init(\n num_cpus=1,\n _internal_config=json.dumps({\n \"initial_reconstruction_timeout_milliseconds\": 200\n }))\n assert len(ray.objects()) == 0\n\n @ray.remote\n def f(x):\n time.sleep(0.1)\n return\n\n # Submit pairs of tasks. Tasks in a pair can reuse the same worker leased\n # from the raylet.\n tasks = []\n for _ in range(10):\n obj_ref = ray.put(1)\n for _ in range(2):\n tasks.append(f.remote(obj_ref))\n del obj_ref\n ray.get(tasks)\n\n time.sleep(\n 1) # Sleep for an amount longer than the reconstruction timeout.\n assert len(ray.objects()) == 0, ray.objects()\n\n\[email protected](\n \"ray_start_cluster\", [{\n \"num_cpus\": 0,\n \"num_nodes\": 1,\n \"do_init\": False,\n }],\n indirect=True)\ndef test_ray_address_environment_variable(ray_start_cluster):\n address = ray_start_cluster.address\n # In this test we use zero CPUs to distinguish between starting a local\n # ray cluster and connecting to an existing one.\n\n # Make sure we connect to an existing cluster if\n # RAY_ADDRESS is set.\n os.environ[\"RAY_ADDRESS\"] = address\n ray.init()\n assert \"CPU\" not in ray.state.cluster_resources()\n del os.environ[\"RAY_ADDRESS\"]\n ray.shutdown()\n\n # Make sure we start a new cluster if RAY_ADDRESS is not set.\n ray.init()\n assert \"CPU\" in ray.state.cluster_resources()\n ray.shutdown()\n\n\ndef test_gpu_info_parsing():\n info_string = \"\"\"Model: Tesla V100-SXM2-16GB\nIRQ: 107\nGPU UUID: GPU-8eaaebb8-bb64-8489-fda2-62256e821983\nVideo BIOS: 88.00.4f.00.09\nBus Type: PCIe\nDMA Size: 47 bits\nDMA Mask: 0x7fffffffffff\nBus Location: 0000:00:1e.0\nDevice Minor: 0\nBlacklisted: No\n \"\"\"\n constraints_dict = resource_spec._constraints_from_gpu_info(info_string)\n expected_dict = {\n \"{}V100\".format(ray_constants.RESOURCE_CONSTRAINT_PREFIX): 1\n }\n assert constraints_dict == expected_dict\n\n info_string = \"\"\"Model: Tesla T4\nIRQ: 10\nGPU UUID: GPU-415fe7a8-f784-6e3d-a958-92ecffacafe2\nVideo BIOS: 90.04.84.00.06\nBus Type: PCIe\nDMA Size: 47 bits\nDMA Mask: 0x7fffffffffff\nBus Location: 0000:00:1b.0\nDevice Minor: 0\nBlacklisted: No\n \"\"\"\n constraints_dict = resource_spec._constraints_from_gpu_info(info_string)\n expected_dict = {\n \"{}T4\".format(ray_constants.RESOURCE_CONSTRAINT_PREFIX): 1\n }\n assert constraints_dict == expected_dict\n\n assert resource_spec._constraints_from_gpu_info(None) == {}\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
atharva-diwan/PolyLaneNet
[ "c89500428ddd72e7c3027955d88fd074603f48e0" ]
[ "lib/datasets/lane_dataset.py" ]
[ "import cv2\nimport numpy as np\nimport imgaug.augmenters as iaa\nfrom imgaug.augmenters import Resize\nfrom torchvision.transforms import ToTensor\nfrom torch.utils.data.dataset import Dataset\nfrom imgaug.augmentables.lines import LineString, LineStringsOnImage\n\nfrom .elas import ELAS\nfrom .llamas import LLAMAS\nfrom .tusimple import TuSimple\nfrom .nolabel_dataset import NoLabelDataset\n\nGT_COLOR = (255, 0, 0)\nPRED_HIT_COLOR = (0, 255, 0)\nPRED_MISS_COLOR = (0, 0, 255)\nIMAGENET_MEAN = np.array([0.485, 0.456, 0.406])\nIMAGENET_STD = np.array([0.229, 0.224, 0.225])\n\n\nclass LaneDataset(Dataset):\n def __init__(self,\n dataset='tusimple',\n augmentations=None,\n normalize=False,\n split='train',\n img_size=(360, 640),\n aug_chance=1.,\n **kwargs):\n super(LaneDataset, self).__init__()\n if dataset == 'tusimple':\n self.dataset = TuSimple(split=split, **kwargs)\n elif dataset == 'llamas':\n self.dataset = LLAMAS(split=split, **kwargs)\n elif dataset == 'elas':\n self.dataset = ELAS(split=split, **kwargs)\n elif dataset == 'nolabel_dataset':\n self.dataset = NoLabelDataset(**kwargs)\n else:\n raise NotImplementedError()\n\n self.transform_annotations()\n self.img_h, self.img_w = img_size\n\n if augmentations is not None:\n # add augmentations\n augmentations = [getattr(iaa, aug['name'])(**aug['parameters'])\n for aug in augmentations] # add augmentation\n\n self.normalize = normalize\n transformations = iaa.Sequential([Resize({'height': self.img_h, 'width': self.img_w})])\n self.to_tensor = ToTensor()\n self.transform = iaa.Sequential([iaa.Sometimes(then_list=augmentations, p=aug_chance), transformations])\n self.max_lanes = self.dataset.max_lanes\n\n def transform_annotation(self, anno, img_wh=None):\n if img_wh is None:\n img_h = self.dataset.get_img_heigth(anno['path'])\n img_w = self.dataset.get_img_width(anno['path'])\n else:\n img_w, img_h = img_wh\n\n old_lanes = anno['lanes']\n categories = anno['categories'] if 'categories' in anno else [1] * len(old_lanes)\n old_lanes = zip(old_lanes, categories)\n old_lanes = filter(lambda x: len(x[0]) > 0, old_lanes)\n lanes = np.ones((self.dataset.max_lanes, 1 + 2 + 2 * self.dataset.max_points), dtype=np.float32) * -1e5\n lanes[:, 0] = 0\n old_lanes = sorted(old_lanes, key=lambda x: x[0][0][0])\n for lane_pos, (lane, category) in enumerate(old_lanes):\n lower, upper = lane[0][1], lane[-1][1]\n xs = np.array([p[0] for p in lane]) / img_w\n ys = np.array([p[1] for p in lane]) / img_h\n lanes[lane_pos, 0] = category\n lanes[lane_pos, 1] = lower / img_h\n lanes[lane_pos, 2] = upper / img_h\n lanes[lane_pos, 3:3 + len(xs)] = xs\n lanes[lane_pos, (3 + self.dataset.max_points):(3 + self.dataset.max_points + len(ys))] = ys\n\n new_anno = {\n 'path': anno['path'],\n 'label': lanes,\n 'old_anno': anno,\n 'categories': [cat for _, cat in old_lanes]\n }\n\n return new_anno\n\n @property\n def annotations(self):\n return self.dataset.annotations\n\n def transform_annotations(self):\n print('Transforming annotations...')\n self.dataset.annotations = np.array(list(map(self.transform_annotation, self.dataset.annotations)))\n print('Done.')\n\n def draw_annotation(self, idx, pred=None, img=None, cls_pred=None):\n if img is None:\n img, label, _ = self.__getitem__(idx, transform=True)\n # Tensor to opencv image\n img = img.permute(1, 2, 0).numpy()\n # Unnormalize\n if self.normalize:\n img = img * np.array(IMAGENET_STD) + np.array(IMAGENET_MEAN)\n img = (img * 255).astype(np.uint8)\n else:\n _, label, _ = self.__getitem__(idx)\n\n img_h, img_w, _ = img.shape\n\n # Draw label\n for i, lane in enumerate(label):\n if lane[0] == 0: # Skip invalid lanes\n continue\n lane = lane[3:] # remove conf, upper and lower positions\n xs = lane[:len(lane) // 2]\n ys = lane[len(lane) // 2:]\n ys = ys[xs >= 0]\n xs = xs[xs >= 0]\n\n # draw GT points\n for p in zip(xs, ys):\n p = (int(p[0] * img_w), int(p[1] * img_h))\n img = cv2.circle(img, p, 5, color=GT_COLOR, thickness=-1)\n\n # draw GT lane ID\n cv2.putText(img,\n str(i), (int(xs[0] * img_w), int(ys[0] * img_h)),\n fontFace=cv2.FONT_HERSHEY_COMPLEX,\n fontScale=1,\n color=(0, 255, 0))\n\n if pred is None:\n return img\n\n # Draw predictions\n pred = pred[pred[:, 0] != 0] # filter invalid lanes\n matches, accs, _ = self.dataset.get_metrics(pred, idx)\n overlay = img.copy()\n for i, lane in enumerate(pred):\n if matches[i]:\n color = PRED_HIT_COLOR\n else:\n color = PRED_MISS_COLOR\n lane = lane[1:] # remove conf\n lower, upper = lane[0], lane[1]\n lane = lane[2:] # remove upper, lower positions\n\n # generate points from the polynomial\n ys = np.linspace(lower, upper, num=100)\n points = np.zeros((len(ys), 2), dtype=np.int32)\n points[:, 1] = (ys * img_h).astype(int)\n points[:, 0] = (np.polyval(lane, ys) * img_w).astype(int)\n points = points[(points[:, 0] > 0) & (points[:, 0] < img_w)]\n\n # draw lane with a polyline on the overlay\n for current_point, next_point in zip(points[:-1], points[1:]):\n overlay = cv2.line(overlay, tuple(current_point), tuple(next_point), color=color, thickness=2)\n\n # draw class icon\n if cls_pred is not None and len(points) > 0:\n class_icon = self.dataset.get_class_icon(cls_pred[i])\n class_icon = cv2.resize(class_icon, (32, 32))\n mid = tuple(points[len(points) // 2] - 60)\n x, y = mid\n\n img[y:y + class_icon.shape[0], x:x + class_icon.shape[1]] = class_icon\n\n # draw lane ID\n if len(points) > 0:\n cv2.putText(img, str(i), tuple(points[0]), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=1, color=color)\n\n # draw lane accuracy\n if len(points) > 0:\n cv2.putText(img,\n '{:.2f}'.format(accs[i] * 100),\n tuple(points[len(points) // 2] - 30),\n fontFace=cv2.FONT_HERSHEY_COMPLEX,\n fontScale=.75,\n color=color)\n # Add lanes overlay\n w = 0.6\n img = ((1. - w) * img + w * overlay).astype(np.uint8)\n\n return img\n\n def lane_to_linestrings(self, lanes):\n lines = []\n for lane in lanes:\n lines.append(LineString(lane))\n\n return lines\n\n def linestrings_to_lanes(self, lines):\n lanes = []\n for line in lines:\n lanes.append(line.coords)\n\n return lanes\n\n def __getitem__(self, idx, transform=True):\n item = self.dataset[idx]\n img = cv2.imread(item['path'])\n label = item['label']\n if transform:\n line_strings = self.lane_to_linestrings(item['old_anno']['lanes'])\n line_strings = LineStringsOnImage(line_strings, shape=img.shape)\n img, line_strings = self.transform(image=img, line_strings=line_strings)\n line_strings.clip_out_of_image_()\n new_anno = {'path': item['path'], 'lanes': self.linestrings_to_lanes(line_strings)}\n new_anno['categories'] = item['categories']\n label = self.transform_annotation(new_anno, img_wh=(self.img_w, self.img_h))['label']\n\n img = img / 255.\n if self.normalize:\n img = (img - IMAGENET_MEAN) / IMAGENET_STD\n img = self.to_tensor(img.astype(np.float32))\n return (img, label, idx)\n\n def __len__(self):\n return len(self.dataset)\n\n\ndef main():\n import torch\n from lib.config import Config\n np.random.seed(0)\n torch.manual_seed(0)\n cfg = Config('config.yaml')\n train_dataset = cfg.get_dataset('train')\n for idx in range(len(train_dataset)):\n img = train_dataset.draw_annotation(idx)\n cv2.imshow('sample', img)\n cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.linspace", "numpy.random.seed", "torch.manual_seed", "numpy.ones", "numpy.array", "numpy.polyval" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
noamkatzir/palm-hand-reading
[ "1a405759c03218fc74d661805bced8e4f4a92e74" ]
[ "tests/test5.py" ]
[ "__author__ = 'noam'\n\nimport os\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef sortObjects(contours):\n areas = []\n for i in xrange(len(contours)):\n M = cv2.moments(contours[i])\n centroid_x = int(M['m10']/M['m00'])\n centroid_y = int(M['m01']/M['m00'])\n # cv2.circle(lefthand, (centroid_x, centroid_y), 10, (255, 0, 0),-1)\n\n areas.append([cv2.contourArea(contours[i]), i, centroid_x, centroid_y])\n\n arr= np.array(areas)\n return arr[arr[:, 0].argsort()[::-1]]\n\nimagesPath = '../images/preprocessed/'\n# lefthand = cv2.imread(imagesPath+'noam_left_hand_30.5.15_02062015_0001.png')\nlefthand = cv2.imread(imagesPath+'noam_left_hand_6.12.08_02062015.png')\n\nlefthand_imgray = cv2.cvtColor(lefthand,cv2.COLOR_BGR2GRAY)\n\nsmall = cv2.resize(lefthand_imgray, (0, 0), fx=0.5**6, fy=0.5**6)\n\nsmall = cv2.resize(small, (lefthand_imgray.shape[1], lefthand.shape[0]),interpolation=cv2.INTER_CUBIC)\nret, threshold1 = cv2.threshold(small,245,255,cv2.THRESH_BINARY_INV)\ncontours, hierarchy = cv2.findContours(threshold1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\nmask = np.zeros(threshold1.shape,np.uint8)\n\n\n\nfor i in xrange(len(contours)):\n area = cv2.contourArea(contours[i])\n if(area > 70000): #in origin it was 300000, but it start ignoring fingers\n cv2.drawContours(mask, contours,i,255,-1)\n\nret, threshold2 = cv2.threshold(mask, 0, 256, cv2.THRESH_BINARY)\ncontours2, hierarchy2 = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\nmask = mask.astype('uint8')\ncv2.drawContours(lefthand,contours2,-1,(0,255,0),3)\n\nobjects = sortObjects(contours2)\n\ncenter = np.array([objects[0][2], objects[0][3]])\nfor i in xrange(1,len(objects)):\n dist = np.linalg.norm(np.array([objects[i][2], objects[i][3]]) - center)\n if dist > 2500:\n continue\n\n cv2.line(lefthand, (int(objects[0][2]), int(objects[0][3])), (int(objects[i][2]), int(objects[i][3])), (0,255,0),5)\n\n cv2.putText(lefthand,'{}'.format(dist),(int(objects[i][2]), int(objects[i][3])), cv2.FONT_HERSHEY_SIMPLEX, 4, 1,20)\n print('distance between center and {} is {}'.format(i, dist))\n\nplt.figure(1)\nplt.imshow(mask, cmap='gray')\nplt.figure(2)\nplt.imshow(lefthand, cmap='gray')\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wuhuzi/tensor2robot
[ "4b8d40244e2618dd5f46bfed6698d5ed812847cc" ]
[ "bin/run_t2r_trainer.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Tensor2Robot Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as python3\n\"\"\"Binary for training TFModels with Estimator API.\"\"\"\n\nfrom absl import app\nfrom absl import flags\nimport gin\nfrom tensor2robot.utils import train_eval\nimport tensorflow.compat.v1 as tf\n\n\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n gin.parse_config_files_and_bindings(FLAGS.gin_configs, FLAGS.gin_bindings)\n train_eval.train_eval_model()\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run(main)\n" ]
[ [ "tensorflow.compat.v1.logging.set_verbosity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
simonvh/gimmemotifs
[ "e40ab914a93210864c358b39ae677ac0792a80f2" ]
[ "test/test_rank.py" ]
[ "import unittest\nimport tempfile\nimport os\nimport pandas as pd\nfrom gimmemotifs.rank import rankagg, _rankagg_stuart\n\n\nclass TestRank(unittest.TestCase):\n \"\"\"A test class to test rank aggregation\"\"\"\n\n def setUp(self):\n self.data_dir = \"test/data/rank\"\n self.fname = os.path.join(self.data_dir, \"ranked.txt\")\n self.rank_in = os.path.join(self.data_dir, \"rank_input.txt\")\n self.rank_out = os.path.join(self.data_dir, \"rank_output.txt\")\n\n def test1_rankagg(self):\n \"\"\"Test rank aggregation\"\"\"\n df = pd.read_csv(self.fname, index_col=0, sep=\"\\t\")\n result = rankagg(df, method=\"stuart\")\n self.assertEqual(\"AP2\", result.sort_values(\"score\").index[-1])\n result = rankagg(df, method=\"int_stouffer\")\n self.assertEqual(\"AP2\", result.sort_values(\"z-score\").index[-1])\n\n def test2_rankagg(self):\n \"\"\"Test Python implementation of rank aggregation\"\"\"\n df = pd.read_csv(self.rank_in, index_col=0, sep=\"\\t\")\n result = _rankagg_stuart(df)[\"score\"].values\n ref = pd.read_csv(self.rank_out, index_col=0, sep=\"\\t\")[\"score\"].values\n for v1, v2 in zip(ref, result):\n self.assertAlmostEqual(v1, v2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
justaddcoffee/dipper
[ "085c1601ae5b88848ddcdad0d52387e8f450b1be" ]
[ "dipper/sources/SGD.py" ]
[ "import logging\nimport pandas as pd\n\nfrom dipper.sources.Source import Source\nfrom dipper.models.assoc.Association import Assoc\nfrom dipper.models.Model import Model\nfrom dipper.models.Reference import Reference\nfrom ontobio.ontol_factory import OntologyFactory\n\n\n__author__ = 'timputman'\n\nLOG = logging.getLogger(__name__)\n\n\nclass SGD(Source):\n \"\"\"\n Ingest of Saccharomyces Genome Database (SGD) phenotype associations\n\n \"\"\"\n SGD_BASE = 'https://downloads.yeastgenome.org/curation/literature/'\n files = {\n 'sgd_phenotype': {\n 'file': 'phenotype_data.tab',\n 'url': SGD_BASE + 'phenotype_data.tab',\n 'columns': [\n 'Feature Name',\n 'Feature Type',\n 'Gene Name',\n 'SGDID',\n 'Reference',\n 'Experiment Type',\n 'Mutant Type',\n 'Allele',\n 'Strain Background',\n 'Phenotype',\n 'Chemical',\n 'Condition',\n 'Details',\n 'Reporter'\n ]\n\n },\n }\n\n def __init__(self,\n graph_type,\n are_bnodes_skolemized,\n data_release_version=None):\n super().__init__(\n graph_type=graph_type,\n are_bnodes_skized=are_bnodes_skolemized,\n data_release_version=data_release_version,\n name='sgd',\n ingest_title='Saccharomyces Genome Database',\n ingest_url='https://www.yeastgenome.org/',\n ingest_logo='source-sgd.png',\n license_url='https://sites.google.com/view/yeastgenome-help/about',\n data_rights=None,\n file_handle=None\n )\n\n self.apo_term_id = SGD.make_apo_map()\n\n def fetch(self, is_dl_forced=False):\n \"\"\"\n Override Source.fetch()\n Fetches resources from yeast_genome_database\n using the yeast_genome_doenload site.\n\n\n Args:\n :param is_dl_forced (bool): Force download\n Returns:\n :return None\n \"\"\"\n self.get_files(is_dl_forced)\n return\n\n def parse(self, limit=None):\n \"\"\"\n Override Source.parse()\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n \"\"\"\n if limit is not None:\n LOG.info(\"Only parsing first %d rows\", limit)\n\n sgd_file = '/'.join((self.rawdir, self.files['sgd_phenotype']['file']))\n columns = [\n 'Feature Name', 'Feature Type', 'Gene Name', 'SGDID', 'Reference',\n 'Experiment Type', 'Mutant Type', 'Allele', 'Strain Background',\n 'Phenotype', 'Chemical', 'Condition', 'Details', 'Reporter']\n sgd_df = pd.read_csv(sgd_file, sep='\\t', names=columns)\n records = sgd_df.to_dict(orient='records')\n for index, assoc in enumerate(records):\n if isinstance(assoc['Gene Name'], str):\n if limit is not None and index > limit:\n break\n self.make_association(assoc)\n\n return\n\n def make_association(self, record):\n \"\"\"\n contstruct the association\n :param record:\n :return: modeled association of genotype to mammalian??? phenotype\n \"\"\"\n # prep record\n # remove description and mapp Experiment Type to apo term\n experiment_type = record['Experiment Type'].split('(')[0]\n experiment_type = experiment_type.split(',')\n record['experiment_type'] = list()\n for exp_type in experiment_type:\n exp_type = exp_type.lstrip().rstrip()\n record['experiment_type'].append(\n {\n 'id': self.apo_term_id[exp_type],\n 'term': exp_type,\n })\n sgd_phenotype = record['Phenotype']\n pheno_obj = {\n 'entity': {\n 'term': None,\n 'apo_id': None\n },\n 'quality': {\n 'term': None,\n 'apo_id': None\n },\n 'has_quality': False # descriptive and don't bother looking for a quality\n }\n phenotype = record['Phenotype']\n if ':' in phenotype:\n pheno_obj['has_quality'] = True\n ent_qual = sgd_phenotype.split(': ')\n entity = ent_qual[0]\n quality = ent_qual[1]\n pheno_obj['entity']['term'] = entity\n pheno_obj['entity']['apo_id'] = self.apo_term_id[entity]\n pheno_obj['quality']['term'] = quality\n pheno_obj['quality']['apo_id'] = self.apo_term_id[quality]\n else:\n pheno_obj['entity']['term'] = phenotype\n pheno_obj['entity']['apo_id'] = self.apo_term_id[phenotype]\n record['pheno_obj'] = pheno_obj\n\n # begin modeling\n model = Model(self.graph)\n\n # define the triple\n gene = 'SGD:{}'.format(record['SGDID'])\n relation = self.globaltt['has phenotype']\n\n if record['pheno_obj']['has_quality']:\n pheno_label = '{0}:{1}'.format(\n record['pheno_obj']['entity']['term'],\n record['pheno_obj']['quality']['term'])\n pheno_id = 'MONARCH:{0}{1}'.format(\n record['pheno_obj']['entity']['apo_id'].replace(':', '_'),\n record['pheno_obj']['quality']['apo_id'].replace(':', '_')\n )\n g2p_assoc = Assoc(\n self.graph, self.name, sub=gene, obj=pheno_id, pred=relation)\n else:\n pheno_label = record['pheno_obj']['entity']['term']\n pheno_id = record['pheno_obj']['entity']['apo_id']\n g2p_assoc = Assoc(\n self.graph, self.name, sub=gene, obj=pheno_id, pred=relation)\n assoc_id = g2p_assoc.make_association_id(\n 'yeastgenome.org', gene, relation, pheno_id)\n g2p_assoc.set_association_id(assoc_id=assoc_id)\n\n # add to graph to mint assoc id\n g2p_assoc.add_association_to_graph()\n\n model.addLabel(subject_id=gene, label=record['Gene Name'])\n\n # add the association triple\n model.addTriple(subject_id=gene, predicate_id=relation, obj=pheno_id)\n\n model.addTriple(\n subject_id=pheno_id,\n predicate_id=self.globaltt['subclass_of'],\n obj=self.globaltt['phenotype'])\n\n # label nodes\n # pheno label\n\n model.addLabel(subject_id=pheno_id, label=pheno_label)\n\n g2p_assoc.description = self._make_description(record)\n\n # add the references\n references = record['Reference']\n references = references.replace(' ', '')\n references = references.split('|')\n\n # created Ref prefix in curie map to route to proper reference URL in SGD\n if len(references) > 0:\n # make first ref in list the source\n g2p_assoc.add_source(identifier=references[0])\n ref_model = Reference(\n self.graph, references[0],\n self.globaltt['publication']\n )\n ref_model.addRefToGraph()\n\n if len(references) > 1:\n # create equivalent source for any other refs in list\n for ref in references[1:]:\n model.addSameIndividual(sub=references[0], obj=ref)\n\n # add experiment type as evidence\n for exp_type in record['experiment_type']:\n g2p_assoc.add_evidence(exp_type['id'])\n model.addLabel(subject_id=exp_type['id'], label=exp_type['term'])\n\n try:\n g2p_assoc.add_association_to_graph()\n except Exception as e:\n print(e)\n return\n\n @staticmethod\n def make_apo_map():\n # load apo for term mapping\n ofactory = OntologyFactory()\n apo_ont = ofactory.create(\"apo\")\n apo_nodes = apo_ont.nodes()\n # dict schema { 'term': 'apo_id' }\n apo_term_id = dict()\n for node in apo_nodes:\n label = apo_ont.label(node)\n apo_term_id[label] = node\n return apo_term_id\n\n @staticmethod\n def _make_description(record):\n return \" | \".join([\n 'genomic_background: {}'.format(record['Strain Background']),\n 'allele: {}'.format(record['Allele']),\n 'chemical: {}'.format(record['Chemical']),\n 'condition: {}'.format(record['Condition']),\n 'details: {}'.format(record['Details']),\n 'feature_name: {}'.format(record['Feature Name']),\n 'gene_name: {}'.format(record['Gene Name']),\n 'mutant_type: {}'.format(record['Mutant Type']),\n 'reporter: {}'.format(record['Reporter']),\n ]).strip()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
terryli710/COVID_19_Rapid_Triage_Risk_Predictor
[ "ccf737806d914f390d21b441d18688630b4fa6f9" ]
[ "radiomics_patch/imageoperations.py" ]
[ "from __future__ import print_function\n\nimport logging\n\nimport numpy\nimport pywt\nimport SimpleITK as sitk\nimport six\nfrom six.moves import range\n\nlogger = logging.getLogger(__name__)\n\n\ndef getMask(mask, **kwargs):\n \"\"\"\n Function to get the correct mask. Includes enforcing a correct pixel data type (UInt32).\n\n Also supports extracting the mask for a segmentation (stored as SimpleITK Vector image) if necessary.\n In this case, the mask at index ``label_channel`` is extracted. The resulting 3D volume is then treated as it were a\n scalar input volume (i.e. with the region of interest defined by voxels with value matching ``label``).\n\n Finally, checks if the mask volume contains an ROI identified by ``label``. Raises a value error if the label is not\n present (including a list of valid labels found).\n\n :param mask: SimpleITK Image object representing the mask. Can be a vector image to allow for overlapping masks.\n :param kwargs: keyword arguments. If argument ``label_channel`` is present, this is used to select the channel.\n Otherwise label_channel ``0`` is assumed.\n :return: SimpleITK.Image with pixel type UInt32 representing the mask volume\n \"\"\"\n global logger\n label = kwargs.get('label', 1)\n label_channel = kwargs.get('label_channel', 0)\n if 'vector' in mask.GetPixelIDTypeAsString().lower():\n logger.debug('Mask appears to be a segmentation object (=stored as vector image).')\n n_components = mask.GetNumberOfComponentsPerPixel()\n assert label_channel < n_components, \\\n \"Mask %i requested, but segmentation object only contains %i objects\" % (label_channel, n_components)\n\n logger.info('Extracting mask at index %i', label_channel)\n selector = sitk.VectorIndexSelectionCastImageFilter()\n selector.SetIndex(label_channel)\n mask = selector.Execute(mask)\n\n logger.debug('Force casting mask to UInt32 to ensure correct datatype.')\n mask = sitk.Cast(mask, sitk.sitkUInt32)\n\n labels = numpy.unique(sitk.GetArrayFromImage(mask))\n if len(labels) == 1:\n raise ValueError('No labels found in this mask (i.e. nothing is segmented)!')\n if label not in labels:\n raise ValueError('Label (%g) not present in mask. Choose from %s' % (label, labels[labels != 0]))\n\n return mask\n\n\ndef getBinEdges(parameterValues, **kwargs):\n r\"\"\"\n Calculate and return the histogram using parameterValues (1D array of all segmented voxels in the image).\n\n **Fixed bin width:**\n\n Returns the bin edges, a list of the edges of the calculated bins, length is N(bins) + 1. Bins are defined such, that\n the bin edges are equally spaced from zero, and that the leftmost edge :math:`\\leq \\min(X_{gl})`. These bin edges\n represent the half-open ranges of each bin :math:`[\\text{lower_edge}, \\text{upper_edge})` and result in gray value\n discretization as follows:\n\n .. math::\n X_{b, i} = \\lfloor \\frac{X_{gl, i}}{W} \\rfloor - \\lfloor \\frac {\\min(X_{gl})}{W} \\rfloor + 1\n\n Here, :math:`X_{gl, i}` and :math:`X_{b, i}` are gray level intensities before and after discretization, respectively.\n :math:`{W}` is the bin width value (specfied in ``binWidth`` parameter). The first part of the formula ensures that\n the bins are equally spaced from 0, whereas the second part ensures that the minimum gray level intensity inside the\n ROI after binning is always 1.\n\n In the case where the maximum gray level intensity is equally dividable by the binWidth, i.e.\n :math:`\\max(X_{gl}) \\mod W = 0`, this will result in that maximum gray level being assigned to bin\n :math:`[\\max(X_{gl}), \\max(X_{gl}) + W)`, which is consistent with numpy.digitize, but different from the behaviour\n of numpy.histogram, where the final bin has a closed range, including the maximum gray level, i.e.\n :math:`[\\max(X_{gl}) - W, \\max(X_{gl})]`.\n\n .. note::\n This method is slightly different from the fixed bin size discretization method described by IBSI. The two most\n notable differences are 1) that PyRadiomics uses a floor division (and adds 1), as opposed to a ceiling division and\n 2) that in PyRadiomics, bins are always equally spaced from 0, as opposed to equally spaced from the minimum\n gray level intensity.\n\n *Example: for a ROI with values ranging from 54 to 166, and a bin width of 25, the bin edges will be [50, 75, 100,\n 125, 150, 175].*\n\n This value can be directly passed to ``numpy.histogram`` to generate a histogram or ``numpy.digitize`` to discretize\n the ROI gray values. See also :py:func:`binImage()`.\n\n **Fixed bin Count:**\n\n .. math::\n X_{b, i} = \\left\\{ {\\begin{array}{lcl}\n \\lfloor N_b\\frac{(X_{gl, i} - \\min(X_{gl})}{\\max(X_{gl}) - \\min(X_{gl})} \\rfloor + 1 &\n \\mbox{for} & X_{gl, i} < \\max(X_{gl}) \\\\\n N_b & \\mbox{for} & X_{gl, i} = \\max(X_{gl}) \\end{array}} \\right.\n\n Here, :math:`N_b` is the number of bins to use, as defined in ``binCount``.\n\n References\n\n - Leijenaar RTH, Nalbantov G, Carvalho S, et al. The effect of SUV discretization in quantitative FDG-PET Radiomics:\n the need for standardized methodology in tumor texture analysis. Sci Rep. 2015;5(August):11075.\n \"\"\"\n global logger\n binWidth = kwargs.get('binWidth', 25)\n binCount = kwargs.get('binCount')\n\n if binCount is not None:\n binEdges = numpy.histogram(parameterValues, binCount)[1]\n binEdges[-1] += 1 # Ensures that the maximum value is included in the topmost bin when using numpy.digitize\n else:\n minimum = min(parameterValues)\n maximum = max(parameterValues)\n\n # Start binning form the first value lesser than or equal to the minimum value and evenly dividable by binwidth\n lowBound = minimum - (minimum % binWidth)\n # Add + 2* binwidth to ensure the maximum value is included in the range generated by numpy.arange, and that values\n # equal to highbound are binned into a separate bin by numpy.histogram (This ensures ALL bins are half open, as\n # numpy.histogram treats the last bin as a closed interval. Moreover, this ensures consistency with numpy.digitize,\n # which will assign len(bins) + 1 to values equal to rightmost bin edge, treating all bins as half-open)\n highBound = maximum + 2 * binWidth\n\n binEdges = numpy.arange(lowBound, highBound, binWidth)\n\n # if min(parameterValues) % binWidth = 0 and min(parameterValues) = max(parameterValues), binEdges will only contain\n # 1 value. If this is the case (flat region) ensure that numpy.histogram creates 1 bin (requires 2 edges). For\n # numpy.histogram, a binCount (1) would also suffice, however, this is not accepted by numpy.digitize, which also uses\n # binEdges calculated by this function.\n if len(binEdges) == 1: # Flat region, ensure that there is 1 bin\n binEdges = [binEdges[0] - .5, binEdges[0] + .5] # Simulates binEdges returned by numpy.histogram if bins = 1\n\n logger.debug('Calculated %d bins for bin width %g with edges: %s)', len(binEdges) - 1, binWidth, binEdges)\n\n return binEdges # numpy.histogram(parameterValues, bins=binedges)\n\n\ndef binImage(parameterMatrix, parameterMatrixCoordinates=None, **kwargs):\n r\"\"\"\n Discretizes the parameterMatrix (matrix representation of the gray levels in the ROI) using the binEdges calculated\n using :py:func:`getBinEdges`. Only voxels defined by parameterMatrixCoordinates (defining the segmentation) are used\n for calculation of histogram and subsequently discretized. Voxels outside segmentation are left unchanged.\n \"\"\"\n global logger\n logger.debug('Discretizing gray levels inside ROI')\n\n discretizedParameterMatrix = numpy.zeros(parameterMatrix.shape, dtype='int')\n if parameterMatrixCoordinates is None:\n binEdges = getBinEdges(parameterMatrix.flatten(), **kwargs)\n discretizedParameterMatrix = numpy.digitize(parameterMatrix, binEdges)\n else:\n binEdges = getBinEdges(parameterMatrix[parameterMatrixCoordinates], **kwargs)\n discretizedParameterMatrix[parameterMatrixCoordinates] = numpy.digitize(parameterMatrix[parameterMatrixCoordinates], binEdges)\n\n return discretizedParameterMatrix.astype('int'), binEdges\n\n\ndef checkMask(imageNode, maskNode, **kwargs):\n \"\"\"\n Checks whether the Region of Interest (ROI) defined in the mask size and dimensions match constraints, specified in\n settings. The following checks are performed.\n\n 1. Check whether the mask corresponds to the image (i.e. has a similar size, spacing, direction and origin). **N.B.\n This check is performed by SimpleITK, if it fails, an error is logged, with additional error information from\n SimpleITK logged with level DEBUG (i.e. logging-level has to be set to debug to store this information in the log\n file).** The tolerance can be increased using the ``geometryTolerance`` parameter. Alternatively, if the\n ``correctMask`` parameter is ``True``, PyRadiomics will check if the mask contains a valid ROI (inside image\n physical area) and if so, resample the mask to image geometry. See :ref:`radiomics-settings-label` for more info.\n\n 2. Check if the label is present in the mask\n 3. Count the number of dimensions in which the size of the ROI > 1 (i.e. does the ROI represent a single voxel (0), a\n line (1), a surface (2) or a volume (3)) and compare this to the minimum number of dimension required (specified in\n ``minimumROIDimensions``).\n 4. Optional. Check if there are at least N voxels in the ROI. N is defined in ``minimumROISize``, this test is skipped\n if ``minimumROISize = None``.\n\n This function returns a tuple of two items. The first item is the bounding box of the mask. The second item is the\n mask that has been corrected by resampling to the input image geometry (if that resampling was successful).\n\n If a check fails, a ValueError is raised. No features will be extracted for this mask.\n If the mask passes all tests, this function returns the bounding box, which is used in the :py:func:`cropToTumorMask`\n function.\n\n The bounding box is calculated during (1.) and used for the subsequent checks. The bounding box is\n calculated by SimpleITK.LabelStatisticsImageFilter() and returned as a tuple of indices: (L_x, U_x, L_y, U_y, L_z,\n U_z), where 'L' and 'U' are lower and upper bound, respectively, and 'x', 'y' and 'z' the three image dimensions.\n\n By reusing the bounding box calculated here, calls to SimpleITK.LabelStatisticsImageFilter() are reduced, improving\n performance.\n\n Uses the following settings:\n\n - minimumROIDimensions [1]: Integer, range 1-3, specifies the minimum dimensions (1D, 2D or 3D, respectively).\n Single-voxel segmentations are always excluded.\n - minimumROISize [None]: Integer, > 0, specifies the minimum number of voxels required. Test is skipped if\n this parameter is set to None.\n\n .. note::\n\n If the first check fails there are generally 2 possible causes:\n\n 1. The image and mask are matched, but there is a slight difference in origin, direction or spacing. The exact\n cause, difference and used tolerance are stored with level DEBUG in a log (if enabled). For more information on\n setting up logging, see \":ref:`setting up logging <radiomics-logging-label>`\" and the helloRadiomics examples\n (located in the ``pyradiomics/examples`` folder). This problem can be fixed by changing the global tolerance\n (``geometryTolerance`` parameter) or enabling mask correction (``correctMask`` parameter).\n 2. The image and mask do not match, but the ROI contained within the mask does represent a physical volume\n contained within the image. If this is the case, resampling is needed to ensure matching geometry between image\n and mask before features can be extracted. This can be achieved by enabling mask correction using the\n ``correctMask`` parameter.\n \"\"\"\n global logger\n\n correctedMask = None\n\n label = kwargs.get('label', 1)\n minDims = kwargs.get('minimumROIDimensions', 2)\n minSize = kwargs.get('minimumROISize', None)\n\n logger.debug('Checking mask with label %d', label)\n logger.debug('Calculating bounding box')\n # Determine bounds\n lsif = sitk.LabelStatisticsImageFilter()\n try:\n lsif.Execute(imageNode, maskNode)\n\n # If lsif fails, and mask is corrected, it includes a check whether the label is present. Therefore, perform\n # this test here only if lsif does not fail on the first attempt.\n if label not in lsif.GetLabels():\n raise ValueError('Label (%g) not present in mask' % label)\n except RuntimeError as e:\n # If correctMask = True, try to resample the mask to the image geometry, otherwise return None (\"fail\")\n if not kwargs.get('correctMask', False):\n if \"Both images for LabelStatisticsImageFilter don't match type or dimension!\" in e.args[0]:\n logger.debug('Additional information on error.', exc_info=True)\n raise ValueError('Image/Mask datatype or size mismatch. Potential fix: enable correctMask, see '\n 'Documentation:Usage:Customizing the Extraction:Settings:correctMask for more information')\n elif \"Inputs do not occupy the same physical space!\" in e.args[0]:\n logger.debug('Additional information on error.', exc_info=True)\n raise ValueError('Image/Mask geometry mismatch. Potential fix: increase tolerance using geometryTolerance, '\n 'see Documentation:Usage:Customizing the Extraction:Settings:geometryTolerance for more '\n 'information')\n else:\n raise e # unhandled error\n\n logger.warning('Image/Mask geometry mismatch, attempting to correct Mask')\n\n correctedMask = _correctMask(imageNode, maskNode, **kwargs) # Raises Value error if ROI outside image physical space\n\n # Resampling successful, try to calculate boundingbox\n try:\n lsif.Execute(imageNode, correctedMask)\n except RuntimeError:\n logger.debug('Bounding box calculation with resampled mask failed', exc_info=True)\n raise ValueError('Calculation of bounding box failed, for more information run with DEBUG logging and check log')\n\n # LBound and UBound of the bounding box, as (L_X, U_X, L_Y, U_Y, L_Z, U_Z)\n boundingBox = numpy.array(lsif.GetBoundingBox(label))\n\n logger.debug('Checking minimum number of dimensions requirements (%d)', minDims)\n ndims = numpy.sum((boundingBox[1::2] - boundingBox[0::2] + 1) > 1) # UBound - LBound + 1 = Size\n if ndims == 0:\n raise ValueError('mask only contains 1 segmented voxel! Cannot extract features for a single voxel.')\n elif ndims < minDims:\n raise ValueError('mask has too few dimensions (number of dimensions %d, minimum required %d)' % (ndims, minDims))\n\n if minSize is not None:\n logger.debug('Checking minimum size requirements (minimum size: %d)', minSize)\n roiSize = lsif.GetCount(label)\n if roiSize <= minSize:\n raise ValueError('Size of the ROI is too small (minimum size: %g, ROI size: %g' % (minSize, roiSize))\n\n return boundingBox, correctedMask\n\n\ndef _correctMask(imageNode, maskNode, **kwargs):\n \"\"\"\n If the mask geometry does not match the image geometry, this function can be used to resample the mask to the image\n physical space.\n\n First, the mask is checked for a valid ROI (i.e. maskNode contains an ROI with the given label value, which does not\n include areas outside of the physical image bounds).\n\n If the ROI is valid, the maskNode is resampled using the imageNode as a reference image and a nearest neighbor\n interpolation.\n\n If the ROI is valid, the resampled mask is returned, otherwise ``None`` is returned.\n \"\"\"\n global logger\n logger.debug('Resampling mask to image geometry')\n\n _checkROI(imageNode, maskNode, **kwargs) # Raises a value error if ROI is invalid\n\n rif = sitk.ResampleImageFilter()\n rif.SetReferenceImage(imageNode)\n rif.SetInterpolator(sitk.sitkNearestNeighbor)\n\n logger.debug('Resampling...')\n\n return rif.Execute(maskNode)\n\n\ndef _checkROI(imageNode, maskNode, **kwargs):\n \"\"\"\n Check whether maskNode contains a valid ROI defined by label:\n\n 1. Check whether the label value is present in the maskNode.\n 2. Check whether the ROI defined by the label does not include an area outside the physical area of the image.\n\n For the second check, a tolerance of 1e-3 is allowed.\n\n If the ROI is valid, the bounding box (lower bounds, followd by size in all dimensions (X, Y, Z ordered)) is\n returned. Otherwise, a ValueError is raised.\n \"\"\"\n global logger\n label = kwargs.get('label', 1)\n\n logger.debug('Checking ROI validity')\n\n # Determine bounds of cropped volume in terms of original Index coordinate space\n lssif = sitk.LabelShapeStatisticsImageFilter()\n lssif.Execute(maskNode)\n\n logger.debug('Checking if label %d is persent in the mask', label)\n if label not in lssif.GetLabels():\n raise ValueError('Label (%d) not present in mask', label)\n\n # LBound and size of the bounding box, as (L_X, L_Y, [L_Z], S_X, S_Y, [S_Z])\n bb = numpy.array(lssif.GetBoundingBox(label))\n Nd = maskNode.GetDimension()\n\n # Determine if the ROI is within the physical space of the image\n\n logger.debug('Comparing physical space of bounding box to physical space of image')\n # Step 1: Get the origin and UBound corners of the bounding box in physical space\n # The additional 0.5 represents the difference between the voxel center and the voxel corner\n # Upper bound index of ROI = bb[:Nd] + bb[Nd:] - 1 (LBound + Size - 1), .5 is added to get corner\n ROIBounds = (maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] - .5), # Origin\n maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] + bb[Nd:] - 0.5)) # UBound\n # Step 2: Translate the ROI physical bounds to the image coordinate space\n ROIBounds = (imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[0]), # Origin\n imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[1]))\n\n logger.debug('ROI bounds (image coordinate space): %s', ROIBounds)\n\n # Check if any of the ROI bounds are outside the image indices (i.e. -0.5 < ROI < Im.Size -0.5)\n # The additional 0.5 is to allow for different spacings (defines the edges, not the centers of the edge-voxels\n tolerance = 1e-3 # Define a tolerance to correct for machine precision errors\n if numpy.any(numpy.min(ROIBounds, axis=0) < (- .5 - tolerance)) or \\\n numpy.any(numpy.max(ROIBounds, axis=0) > (numpy.array(imageNode.GetSize()) - .5 + tolerance)):\n raise ValueError('Bounding box of ROI is larger than image space:\\n\\t'\n 'ROI bounds (x, y, z image coordinate space) %s\\n\\tImage Size %s' %\n (ROIBounds, imageNode.GetSize()))\n\n logger.debug('ROI valid, calculating resampling grid')\n\n return bb\n\n\ndef cropToTumorMask(imageNode, maskNode, boundingBox, **kwargs):\n \"\"\"\n Create a sitkImage of the segmented region of the image based on the input label.\n\n Create a sitkImage of the labelled region of the image, cropped to have a\n cuboid shape equal to the ijk boundaries of the label.\n\n :param boundingBox: The bounding box used to crop the image. This is the bounding box as returned by\n :py:func:`checkMask`.\n :param label: [1], value of the label, onto which the image and mask must be cropped.\n :return: Cropped image and mask (SimpleITK image instances).\n\n \"\"\"\n global logger\n padDistance = kwargs.get('padDistance', 0)\n\n size = numpy.array(maskNode.GetSize())\n\n ijkMinBounds = boundingBox[0::2] - padDistance\n ijkMaxBounds = size - boundingBox[1::2] - padDistance - 1\n\n # Ensure cropped area is not outside original image bounds\n ijkMinBounds = numpy.maximum(ijkMinBounds, 0)\n ijkMaxBounds = numpy.maximum(ijkMaxBounds, 0)\n\n # Crop Image\n logger.debug('Cropping to size %s', (boundingBox[1::2] - boundingBox[0::2]) + 1)\n cif = sitk.CropImageFilter()\n try:\n cif.SetLowerBoundaryCropSize(ijkMinBounds)\n cif.SetUpperBoundaryCropSize(ijkMaxBounds)\n except TypeError:\n # newer versions of SITK/python want a tuple or list\n cif.SetLowerBoundaryCropSize(ijkMinBounds.tolist())\n cif.SetUpperBoundaryCropSize(ijkMaxBounds.tolist())\n croppedImageNode = cif.Execute(imageNode)\n croppedMaskNode = cif.Execute(maskNode)\n\n return croppedImageNode, croppedMaskNode\n\n\ndef resampleImage(imageNode, maskNode, **kwargs):\n \"\"\"\n Resamples image and mask to the specified pixel spacing (The default interpolator is Bspline).\n\n Resampling can be enabled using the settings 'interpolator' and 'resampledPixelSpacing' in the parameter file or as\n part of the settings passed to the feature extractor. See also\n :ref:`feature extractor <radiomics-featureextractor-label>`.\n\n 'imageNode' and 'maskNode' are SimpleITK Objects, and 'resampledPixelSpacing' is the output pixel spacing (sequence of\n 3 elements).\n\n If only in-plane resampling is required, set the output pixel spacing for the out-of-plane dimension (usually the last\n dimension) to 0. Spacings with a value of 0 are replaced by the spacing as it is in the original mask.\n\n Only part of the image and labelmap are resampled. The resampling grid is aligned to the input origin, but only voxels\n covering the area of the image ROI (defined by the bounding box) and the padDistance are resampled. This results in a\n resampled and partially cropped image and mask. Additional padding is required as some filters also sample voxels\n outside of segmentation boundaries. For feature calculation, image and mask are cropped to the bounding box without\n any additional padding, as the feature classes do not need the gray level values outside the segmentation.\n\n The resampling grid is calculated using only the input mask. Even when image and mask have different directions, both\n the cropped image and mask will have the same direction (equal to direction of the mask). Spacing and size are\n determined by settings and bounding box of the ROI.\n\n .. note::\n Before resampling the bounds of the non-padded ROI are compared to the bounds. If the ROI bounding box includes\n areas outside of the physical space of the image, an error is logged and (None, None) is returned. No features will\n be extracted. This enables the input image and mask to have different geometry, so long as the ROI defines an area\n within the image.\n\n .. note::\n The additional padding is adjusted, so that only the physical space within the mask is resampled. This is done to\n prevent resampling outside of the image. Please note that this assumes the image and mask to image the same physical\n space. If this is not the case, it is possible that voxels outside the image are included in the resampling grid,\n these will be assigned a value of 0. It is therefore recommended, but not enforced, to use an input mask which has\n the same or a smaller physical space than the image.\n \"\"\"\n global logger\n resampledPixelSpacing = kwargs['resampledPixelSpacing']\n interpolator = kwargs.get('interpolator', sitk.sitkBSpline)\n padDistance = kwargs.get('padDistance', 5)\n label = kwargs.get('label', 1)\n\n logger.debug('Resampling image and mask')\n\n if imageNode is None or maskNode is None:\n raise ValueError('Requires both image and mask to resample')\n\n maskSpacing = numpy.array(maskNode.GetSpacing())\n imageSpacing = numpy.array(imageNode.GetSpacing())\n\n Nd_resampled = len(resampledPixelSpacing)\n Nd_mask = len(maskSpacing)\n assert Nd_resampled == Nd_mask, \\\n 'Wrong dimensionality (%i-D) of resampledPixelSpacing!, %i-D required' % (Nd_resampled, Nd_mask)\n\n # If spacing for a direction is set to 0, use the original spacing (enables \"only in-slice\" resampling)\n logger.debug('Where resampled spacing is set to 0, set it to the original spacing (mask)')\n resampledPixelSpacing = numpy.array(resampledPixelSpacing)\n resampledPixelSpacing = numpy.where(resampledPixelSpacing == 0, maskSpacing, resampledPixelSpacing)\n\n # Check if the maskNode contains a valid ROI. If ROI is valid, the bounding box needed to calculate the resampling\n # grid is returned.\n bb = _checkROI(imageNode, maskNode, **kwargs)\n\n # Do not resample in those directions where labelmap spans only one slice.\n maskSize = numpy.array(maskNode.GetSize())\n resampledPixelSpacing = numpy.where(bb[Nd_mask:] != 1, resampledPixelSpacing, maskSpacing)\n\n # If current spacing is equal to resampledPixelSpacing, no interpolation is needed\n # Tolerance = 1e-5 + 1e-8*abs(resampledSpacing)\n logger.debug('Comparing resampled spacing to original spacing (image')\n if numpy.allclose(imageSpacing, resampledPixelSpacing):\n logger.info('New spacing equal to original image spacing, just resampling the mask')\n\n # Ensure that image and mask geometry match\n rif = sitk.ResampleImageFilter()\n rif.SetReferenceImage(imageNode)\n rif.SetInterpolator(sitk.sitkNearestNeighbor)\n maskNode = rif.Execute(maskNode)\n\n # re-calculate the bounding box of the mask\n lssif = sitk.LabelShapeStatisticsImageFilter()\n lssif.Execute(maskNode)\n bb = numpy.array(lssif.GetBoundingBox(label))\n\n low_up_bb = numpy.empty(Nd_mask * 2, dtype=int)\n low_up_bb[::2] = bb[:Nd_mask]\n low_up_bb[1::2] = bb[:Nd_mask] + bb[Nd_mask:] - 1\n return cropToTumorMask(imageNode, maskNode, low_up_bb, **kwargs)\n\n spacingRatio = maskSpacing / resampledPixelSpacing\n\n # Determine bounds of cropped volume in terms of new Index coordinate space,\n # round down for lowerbound and up for upperbound to ensure entire segmentation is captured (prevent data loss)\n # Pad with an extra .5 to prevent data loss in case of upsampling. For Ubound this is (-1 + 0.5 = -0.5)\n bbNewLBound = numpy.floor((bb[:Nd_mask] - 0.5) * spacingRatio - padDistance)\n bbNewUBound = numpy.ceil((bb[:Nd_mask] + bb[Nd_mask:] - 0.5) * spacingRatio + padDistance)\n\n # Ensure resampling is not performed outside bounds of original image\n maxUbound = numpy.ceil(maskSize * spacingRatio) - 1\n bbNewLBound = numpy.where(bbNewLBound < 0, 0, bbNewLBound)\n bbNewUBound = numpy.where(bbNewUBound > maxUbound, maxUbound, bbNewUBound)\n\n # Calculate the new size. Cast to int to prevent error in sitk.\n newSize = numpy.array(bbNewUBound - bbNewLBound + 1, dtype='int').tolist()\n\n # Determine continuous index of bbNewLBound in terms of the original Index coordinate space\n bbOriginalLBound = bbNewLBound / spacingRatio\n\n # Origin is located in center of first voxel, e.g. 1/2 of the spacing\n # from Corner, which corresponds to 0 in the original Index coordinate space.\n # The new spacing will be in 0 the new Index coordinate space. Here we use continuous\n # index to calculate where the new 0 of the new Index coordinate space (of the original volume\n # in terms of the original spacing, and add the minimum bounds of the cropped area to\n # get the new Index coordinate space of the cropped volume in terms of the original Index coordinate space.\n # Then use the ITK functionality to bring the continuous index into the physical space (mm)\n newOriginIndex = numpy.array(.5 * (resampledPixelSpacing - maskSpacing) / maskSpacing)\n newCroppedOriginIndex = newOriginIndex + bbOriginalLBound\n newOrigin = maskNode.TransformContinuousIndexToPhysicalPoint(newCroppedOriginIndex)\n\n imagePixelType = imageNode.GetPixelID()\n maskPixelType = maskNode.GetPixelID()\n\n direction = numpy.array(maskNode.GetDirection())\n\n logger.info('Applying resampling from spacing %s and size %s to spacing %s and size %s',\n maskSpacing, maskSize, resampledPixelSpacing, newSize)\n\n try:\n if isinstance(interpolator, six.string_types):\n interpolator = getattr(sitk, interpolator)\n except Exception:\n logger.warning('interpolator \"%s\" not recognized, using sitkBSpline', interpolator)\n interpolator = sitk.sitkBSpline\n\n rif = sitk.ResampleImageFilter()\n\n rif.SetOutputSpacing(resampledPixelSpacing)\n rif.SetOutputDirection(direction)\n rif.SetSize(newSize)\n rif.SetOutputOrigin(newOrigin)\n\n logger.debug('Resampling image')\n rif.SetOutputPixelType(imagePixelType)\n rif.SetInterpolator(interpolator)\n resampledImageNode = rif.Execute(imageNode)\n\n logger.debug('Resampling mask')\n rif.SetOutputPixelType(maskPixelType)\n rif.SetInterpolator(sitk.sitkNearestNeighbor)\n resampledMaskNode = rif.Execute(maskNode)\n\n return resampledImageNode, resampledMaskNode\n\n\ndef normalizeImage(image, **kwargs):\n r\"\"\"\n Normalizes the image by centering it at the mean with standard deviation. Normalization is based on all gray values in\n the image, not just those inside the segementation.\n\n :math:`f(x) = \\frac{s(x - \\mu_x)}{\\sigma_x}`\n\n Where:\n\n - :math:`x` and :math:`f(x)` are the original and normalized intensity, respectively.\n - :math:`\\mu_x` and :math:`\\sigma_x` are the mean and standard deviation of the image instensity values.\n - :math:`s` is an optional scaling defined by ``scale``. By default, it is set to 1.\n\n Optionally, outliers can be removed, in which case values for which :math:`x > \\mu_x + n\\sigma_x` or\n :math:`x < \\mu_x - n\\sigma_x` are set to :math:`\\mu_x + n\\sigma_x` and :math:`\\mu_x - n\\sigma_x`, respectively.\n Here, :math:`n>0` and defined by ``outliers``. This, in turn, is controlled by the ``removeOutliers`` parameter.\n Removal of outliers is done after the values of the image are normalized, but before ``scale`` is applied.\n \"\"\"\n global logger\n scale = kwargs.get('normalizeScale', 1)\n outliers = kwargs.get('removeOutliers')\n\n logger.debug('Normalizing image with scale %d', scale)\n image = sitk.Normalize(image)\n\n if outliers is not None:\n logger.debug('Removing outliers > %g standard deviations', outliers)\n imageArr = sitk.GetArrayFromImage(image)\n\n imageArr[imageArr > outliers] = outliers\n imageArr[imageArr < -outliers] = -outliers\n\n newImage = sitk.GetImageFromArray(imageArr)\n newImage.CopyInformation(image)\n image = newImage\n\n image *= scale\n\n return image\n\n\ndef resegmentMask(imageNode, maskNode, **kwargs):\n r\"\"\"\n Resegment the Mask based on the range specified by the threshold(s) in ``resegmentRange``. Either 1 or 2 thresholds\n can be defined. In case of 1 threshold, all values equal to or higher than that threshold are included. If there are\n 2 thresholds, all voxels with a value inside the closed-range defined by these thresholds is included\n (i.e. a voxels is included if :math:`T_{lower} \\leq X_gl \\leq T_{upper}`).\n The resegmented mask is therefore always equal or smaller in size than the original mask.\n In the case where either resegmentRange or resegmentMode contains illigal values, a ValueError is raised.\n\n There are 3 modes for defining the threshold:\n\n 1. absolute (default): The values in resegmentRange define as absolute values (i.e. corresponding to the gray values\n in the image\n 2. relative: The values in resegmentRange define the threshold as relative to the maximum value found in the ROI.\n (e.g. 0.5 indicates a threshold at 50% of maximum gray value)\n 3. sigma: The threshold is defined as the number of sigma from the mean. (e.g. resegmentRange [-3, 3] will include\n all voxels that have a value that differs 3 or less standard deviations from the mean).\n\n \"\"\"\n global logger\n resegmentRange = kwargs['resegmentRange']\n resegmentMode = kwargs.get('resegmentMode', 'absolute')\n label = kwargs.get('label', 1)\n\n if resegmentRange is None:\n raise ValueError('resegmentRange is None.')\n if len(resegmentRange) == 0 or len(resegmentRange) > 2:\n raise ValueError('Length %i is not allowed for resegmentRange' % len(resegmentRange))\n\n logger.debug('Resegmenting mask (range %s, mode %s)', resegmentRange, resegmentMode)\n\n im_arr = sitk.GetArrayFromImage(imageNode)\n ma_arr = (sitk.GetArrayFromImage(maskNode) == label) # boolean array\n\n oldSize = numpy.sum(ma_arr)\n\n if resegmentMode == 'absolute':\n logger.debug('Resegmenting in absolute mode')\n thresholds = sorted(resegmentRange)\n elif resegmentMode == 'relative':\n max_gl = numpy.max(im_arr[ma_arr])\n logger.debug('Resegmenting in relative mode, max %g', max_gl)\n thresholds = [max_gl * th for th in sorted(resegmentRange)]\n elif resegmentMode == 'sigma':\n mean_gl = numpy.mean(im_arr[ma_arr])\n sd_gl = numpy.std(im_arr[ma_arr])\n logger.debug('Resegmenting in sigma mode, mean %g, std %g', mean_gl, sd_gl)\n thresholds = [mean_gl + sd_gl * th for th in sorted(resegmentRange)]\n else:\n raise ValueError('Resegment mode %s not recognized.' % resegmentMode)\n\n # Apply lower threshold\n logger.debug('Applying lower threshold (%g)', thresholds[0])\n ma_arr[ma_arr] = im_arr[ma_arr] >= thresholds[0]\n\n # If 2 thresholds are defined, also apply an upper threshold\n if len(thresholds) == 2:\n logger.debug('Applying upper threshold (%g)', thresholds[1])\n ma_arr[ma_arr] = im_arr[ma_arr] <= thresholds[1]\n\n roiSize = numpy.sum(ma_arr)\n\n if roiSize <= 1:\n raise ValueError(\"Resegmentation excluded too many voxels with label %i (retained %i voxel(s))! \"\n \"Cannot extract features\" % (label, roiSize))\n\n # Transform the boolean array back to an image with the correct voxels set to the label value\n newMask_arr = numpy.zeros(ma_arr.shape, dtype='int')\n newMask_arr[ma_arr] = label\n\n newMask = sitk.GetImageFromArray(newMask_arr)\n newMask.CopyInformation(maskNode)\n logger.debug('Resegmentation complete, new size: %d voxels (excluded %d voxels)', roiSize, oldSize - roiSize)\n\n return newMask\n\n\ndef getOriginalImage(inputImage, inputMask, **kwargs):\n \"\"\"\n This function does not apply any filter, but returns the original image. This function is needed to\n dynamically expose the original image as a valid image type.\n\n :return: Yields original image, 'original' and ``kwargs``\n \"\"\"\n global logger\n logger.debug('Yielding original image')\n yield inputImage, 'original', kwargs\n\n\ndef getLoGImage(inputImage, inputMask, **kwargs):\n r\"\"\"\n Applies a Laplacian of Gaussian filter to the input image and yields a derived image for each sigma value specified.\n\n A Laplacian of Gaussian image is obtained by convolving the image with the second derivative (Laplacian) of a Gaussian\n kernel.\n\n The Gaussian kernel is used to smooth the image and is defined as\n\n .. math::\n\n G(x, y, z, \\sigma) = \\frac{1}{(\\sigma \\sqrt{2 \\pi})^3}e^{-\\frac{x^2 + y^2 + z^2}{2\\sigma^2}}\n\n The Gaussian kernel is convolved by the laplacian kernel :math:`\\nabla^2G(x, y, z)`, which is sensitive to areas with\n rapidly changing intensities, enhancing edges. The width of the filter in the Gaussian kernel is determined by\n :math:`\\sigma` and can be used to emphasize more fine (low :math:`\\sigma` values) or coarse (high :math:`\\sigma`\n values) textures.\n\n .. warning::\n\n The LoG filter implemented in PyRadiomics is a 3D LoG filter, and therefore requires 3D input. Features using a\n single slice (2D) segmentation can still be extracted, but the input image *must* be a 3D image, with a minimum size\n in all dimensions :math:`\\geq \\sigma`. If input image is too small, a warning is logged and :math:`\\sigma` value is\n skipped. Moreover, the image size *must* be at least 4 voxels in each dimensions, if this constraint is not met, no\n LoG derived images can be generated.\n\n Following settings are possible:\n\n - sigma: List of floats or integers, must be greater than 0. Filter width (mm) to use for the Gaussian kernel\n (determines coarseness).\n\n .. warning::\n Setting for sigma must be provided. If omitted, no LoG image features are calculated and the function\n will return an empty dictionary.\n\n Returned filter name reflects LoG settings:\n log-sigma-<sigmaValue>-3D.\n\n References:\n\n - `SimpleITK Doxygen documentation\n <https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LaplacianRecursiveGaussianImageFilter.html>`_\n - `ITK Doxygen documentation <https://itk.org/Doxygen/html/classitk_1_1LaplacianRecursiveGaussianImageFilter.html>`_\n - `<https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian>`_\n\n :return: Yields log filtered image for each specified sigma, corresponding image type name and ``kwargs`` (customized\n settings).\n \"\"\"\n global logger\n\n logger.debug('Generating LoG images')\n\n # Check if size of image is > 4 in all 3D directions (otherwise, LoG filter will fail)\n size = numpy.array(inputImage.GetSize())\n spacing = numpy.array(inputImage.GetSpacing())\n\n if numpy.min(size) < 4:\n logger.warning('Image too small to apply LoG filter, size: %s', size)\n return\n\n sigmaValues = kwargs.get('sigma', [])\n\n for sigma in sigmaValues:\n logger.info('Computing LoG with sigma %g', sigma)\n\n if sigma > 0.0:\n if numpy.all(size >= numpy.ceil(sigma / spacing) + 1):\n lrgif = sitk.LaplacianRecursiveGaussianImageFilter()\n lrgif.SetNormalizeAcrossScale(True)\n lrgif.SetSigma(sigma)\n inputImageName = 'log-sigma-%s-mm-3D' % (str(sigma).replace('.', '-'))\n logger.debug('Yielding %s image', inputImageName)\n yield lrgif.Execute(inputImage), inputImageName, kwargs\n else:\n logger.warning('applyLoG: sigma(%g)/spacing(%s) + 1 must be greater than the size(%s) of the inputImage',\n sigma,\n spacing,\n size)\n else:\n logger.warning('applyLoG: sigma must be greater than 0.0: %g', sigma)\n\n\ndef getWaveletImage(inputImage, inputMask, **kwargs):\n \"\"\"\n Applies wavelet filter to the input image and yields the decompositions and the approximation.\n\n Following settings are possible:\n\n - start_level [0]: integer, 0 based level of wavelet which should be used as first set of decompositions\n from which a signature is calculated\n - level [1]: integer, number of levels of wavelet decompositions from which a signature is calculated.\n - wavelet [\"coif1\"]: string, type of wavelet decomposition. Enumerated value, validated against possible values\n present in the ``pyWavelet.wavelist()``. Current possible values (pywavelet version 0.4.0) (where an\n aditional number is needed, range of values is indicated in []):\n\n - haar\n - dmey\n - sym[2-20]\n - db[1-20]\n - coif[1-5]\n - bior[1.1, 1.3, 1.5, 2.2, 2.4, 2.6, 2.8, 3.1, 3.3, 3.5, 3.7, 3.9, 4.4, 5.5, 6.8]\n - rbio[1.1, 1.3, 1.5, 2.2, 2.4, 2.6, 2.8, 3.1, 3.3, 3.5, 3.7, 3.9, 4.4, 5.5, 6.8]\n\n Returned filter name reflects wavelet type:\n wavelet[level]-<decompositionName>\n\n N.B. only levels greater than the first level are entered into the name.\n\n :return: Yields each wavelet decomposition and final approximation, corresponding imaget type name and ``kwargs``\n (customized settings).\n \"\"\"\n global logger\n\n logger.debug('Generating Wavelet images')\n\n Nd = inputImage.GetDimension()\n axes = list(range(Nd - 1, -1, -1))\n if kwargs.get('force2D', False):\n axes.remove(kwargs.get('force2Ddimension', 0))\n\n approx, ret = _swt3(inputImage, tuple(axes), **kwargs)\n\n for idx, wl in enumerate(ret, start=1):\n for decompositionName, decompositionImage in wl.items():\n logger.info('Computing Wavelet %s', decompositionName)\n\n if idx == 1:\n inputImageName = 'wavelet-%s' % (decompositionName)\n else:\n inputImageName = 'wavelet%s-%s' % (idx, decompositionName)\n logger.debug('Yielding %s image', inputImageName)\n yield decompositionImage, inputImageName, kwargs\n\n if len(ret) == 1:\n inputImageName = 'wavelet-%s' % ('L' * len(axes))\n else:\n inputImageName = 'wavelet%s-%s' % (len(ret), ('L' * len(axes)))\n logger.debug('Yielding approximation (%s) image', inputImageName)\n yield approx, inputImageName, kwargs\n\n\ndef _swt3(inputImage, axes, **kwargs): # Stationary Wavelet Transform 3D\n wavelet = kwargs.get('wavelet', 'coif1')\n level = kwargs.get('level', 1)\n start_level = kwargs.get('start_level', 0)\n\n matrix = sitk.GetArrayFromImage(inputImage) # This function gets a numpy array from the SimpleITK Image \"inputImage\"\n matrix = numpy.asarray(matrix) # The function np.asarray converts \"matrix\" (which could be also a tuple) into an array.\n\n original_shape = matrix.shape\n # original_shape becomes a tuple (?,?,?) containing the number of rows, columns, and slices of the image\n # this is of course dependent on the number of dimensions, but the same principle holds\n padding = tuple([(0, 1 if dim % 2 != 0 else 0) for dim in original_shape])\n # padding is necessary because of pywt.swtn (see function Notes)\n data = matrix.copy() # creates a modifiable copy of \"matrix\" and we call it \"data\"\n data = numpy.pad(data, padding, 'wrap') # padding the tuple \"padding\" previously computed\n\n if not isinstance(wavelet, pywt.Wavelet):\n wavelet = pywt.Wavelet(wavelet)\n\n for i in range(0, start_level): # if start_level = 0 (default) this for loop never gets executed\n # compute all decompositions and saves them in \"dec\" dict\n dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]\n # copies in \"data\" just the \"aaa\" decomposition (i.e. approximation; No of consecutive 'a's = len(axes))\n data = dec['a' * len(axes)].copy()\n\n ret = [] # initialize empty list\n for i in range(start_level, start_level + level):\n # compute the n-dimensional stationary wavelet transform\n dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]\n # Copy the approximation into data (approximation in output / input for next levels)\n data = dec['a' * len(axes)].copy()\n\n dec_im = {} # initialize empty dict\n for decName, decImage in six.iteritems(dec):\n # Returning the approximiation is done only for the last loop,\n # and is handled separately below (by building it from `data`)\n # There for, skip it here\n if decName == 'a' * len(axes):\n continue\n decTemp = decImage.copy()\n decTemp = decTemp[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]\n sitkImage = sitk.GetImageFromArray(decTemp)\n sitkImage.CopyInformation(inputImage)\n dec_im[str(decName).replace('a', 'L').replace('d', 'H')] = sitkImage\n # modifies 'a' with 'L' (Low-pass filter) and 'd' with 'H' (High-pass filter)\n\n ret.append(dec_im) # appending all the filtered sitk images (stored in \"dec_im\") to the \"ret\" list\n\n data = data[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]\n approximation = sitk.GetImageFromArray(data)\n approximation.CopyInformation(inputImage)\n\n return approximation, ret # returns the approximation and the detail (ret) coefficients of the stationary wavelet decomposition\n\n\ndef getSquareImage(inputImage, inputMask, **kwargs):\n r\"\"\"\n Computes the square of the image intensities.\n\n Resulting values are rescaled on the range of the initial original image and negative intensities are made\n negative in resultant filtered image.\n\n :math:`f(x) = (cx)^2,\\text{ where } c=\\displaystyle\\frac{1}{\\sqrt{\\max(|x|)}}`\n\n Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.\n\n :return: Yields square filtered image, 'square' and ``kwargs`` (customized settings).\n \"\"\"\n global logger\n\n im = sitk.GetArrayFromImage(inputImage)\n im = im.astype('float64')\n coeff = 1 / numpy.sqrt(numpy.max(numpy.abs(im)))\n im = (coeff * im) ** 2\n im = sitk.GetImageFromArray(im)\n im.CopyInformation(inputImage)\n\n logger.debug('Yielding square image')\n yield im, 'square', kwargs\n\n\ndef getSquareRootImage(inputImage, inputMask, **kwargs):\n r\"\"\"\n Computes the square root of the absolute value of image intensities.\n\n Resulting values are rescaled on the range of the initial original image and negative intensities are made\n negative in resultant filtered image.\n\n :math:`f(x) = \\left\\{ {\\begin{array}{lcl}\n \\sqrt{cx} & \\mbox{for} & x \\ge 0 \\\\\n -\\sqrt{-cx} & \\mbox{for} & x < 0\\end{array}} \\right.,\\text{ where } c=\\max(|x|)`\n\n Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.\n\n :return: Yields square root filtered image, 'squareroot' and ``kwargs`` (customized settings).\n \"\"\"\n global logger\n\n im = sitk.GetArrayFromImage(inputImage)\n im = im.astype('float64')\n coeff = numpy.max(numpy.abs(im))\n im[im > 0] = numpy.sqrt(im[im > 0] * coeff)\n im[im < 0] = - numpy.sqrt(-im[im < 0] * coeff)\n im = sitk.GetImageFromArray(im)\n im.CopyInformation(inputImage)\n\n logger.debug('Yielding squareroot image')\n yield im, 'squareroot', kwargs\n\n\ndef getLogarithmImage(inputImage, inputMask, **kwargs):\n r\"\"\"\n Computes the logarithm of the absolute value of the original image + 1.\n\n Resulting values are rescaled on the range of the initial original image and negative intensities are made\n negative in resultant filtered image.\n\n :math:`f(x) = \\left\\{ {\\begin{array}{lcl}\n c\\log{(x + 1)} & \\mbox{for} & x \\ge 0 \\\\\n -c\\log{(-x + 1)} & \\mbox{for} & x < 0\\end{array}} \\right. \\text{, where } c=\\frac{\\max(|x|)}{\\log(\\max(|x|) + 1)}`\n\n Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.\n\n :return: Yields logarithm filtered image, 'logarithm' and ``kwargs`` (customized settings)\n \"\"\"\n global logger\n\n im = sitk.GetArrayFromImage(inputImage)\n im = im.astype('float64')\n im_max = numpy.max(numpy.abs(im))\n im[im > 0] = numpy.log(im[im > 0] + 1)\n im[im < 0] = - numpy.log(- (im[im < 0] - 1))\n im = im * (im_max / numpy.max(numpy.abs(im)))\n im = sitk.GetImageFromArray(im)\n im.CopyInformation(inputImage)\n\n logger.debug('Yielding logarithm image')\n yield im, 'logarithm', kwargs\n\n\ndef getExponentialImage(inputImage, inputMask, **kwargs):\n r\"\"\"\n Computes the exponential of the original image.\n\n Resulting values are rescaled on the range of the initial original image.\n\n :math:`f(x) = e^{cx},\\text{ where } c=\\displaystyle\\frac{\\log(\\max(|x|))}{\\max(|x|)}`\n\n Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.\n\n :return: Yields exponential filtered image, 'exponential' and ``kwargs`` (customized settings)\n \"\"\"\n global logger\n\n im = sitk.GetArrayFromImage(inputImage)\n im = im.astype('float64')\n im_max = numpy.max(numpy.abs(im))\n coeff = numpy.log(im_max) / im_max\n im = numpy.exp(coeff * im)\n im = sitk.GetImageFromArray(im)\n im.CopyInformation(inputImage)\n\n logger.debug('Yielding exponential image')\n yield im, 'exponential', kwargs\n\n\ndef getGradientImage(inputImage, inputMask, **kwargs):\n r\"\"\"\n Compute and return the Gradient Magnitude in the image.\n By default, takes into account the image spacing, this can be switched off by specifying\n ``gradientUseSpacing = False``.\n\n References:\n\n - `SimpleITK documentation\n <https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1GradientMagnitudeImageFilter.html>`_\n - `<https://en.wikipedia.org/wiki/Image_gradient>`_\n \"\"\"\n gmif = sitk.GradientMagnitudeImageFilter()\n gmif.SetUseImageSpacing(kwargs.get('gradientUseSpacing', True))\n im = gmif.Execute(inputImage)\n yield im, 'gradient', kwargs\n\n\ndef getLBP2DImage(inputImage, inputMask, **kwargs):\n \"\"\"\n Compute and return the Local Binary Pattern (LBP) in 2D. If ``force2D`` is set to false (= feature extraction in 3D) a\n warning is logged, as this filter processes the image in a by-slice operation. The plane in which the LBP is\n applied can be controlled by the ``force2Ddimension`` parameter (see also :py:func:`generateAngles`).\n\n Following settings are possible (in addition to ``force2Ddimension``):\n\n - ``lbp2DRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled\n - ``lbp2DSamples`` [9]: Integer, specifies the number of samples to use\n - ``lbp2DMethod`` ['uniform']: String, specifies the method for computing the LBP to use.\n\n For more information see `scikit documentation\n <http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.local_binary_pattern>`_\n\n :return: Yields LBP filtered image, 'lbp-2D' and ``kwargs`` (customized settings)\n\n .. note::\n LBP can often return only a very small number of different gray levels. A customized bin width is often needed.\n .. warning::\n Requires package ``scikit-image`` to function. If not available, this filter logs a warning and does not yield an image.\n\n References:\n\n - T. Ojala, M. Pietikainen, and D. Harwood (1994), \"Performance evaluation of texture measures with classification\n based on Kullback discrimination of distributions\", Proceedings of the 12th IAPR International Conference on Pattern\n Recognition (ICPR 1994), vol. 1, pp. 582 - 585.\n - T. Ojala, M. Pietikainen, and D. Harwood (1996), \"A Comparative Study of Texture Measures with Classification Based\n on Feature Distributions\", Pattern Recognition, vol. 29, pp. 51-59.\n \"\"\"\n global logger\n try:\n from skimage.feature import local_binary_pattern\n except ImportError:\n logger.warning('Could not load required package \"skimage\", cannot implement filter LBP 2D')\n return\n\n lbp_radius = kwargs.get('lbp2DRadius', 1)\n lbp_samples = kwargs.get('lbp2DSamples', 8)\n lbp_method = kwargs.get('lbp2DMethod', 'uniform')\n\n im_arr = sitk.GetArrayFromImage(inputImage)\n\n Nd = inputImage.GetDimension()\n if Nd == 3:\n # Warn the user if features are extracted in 3D, as this function calculates LBP in 2D\n if not kwargs.get('force2D', False):\n logger.warning('Calculating Local Binary Pattern in 2D, but extracting features in 3D. Use with caution!')\n lbp_axis = kwargs.get('force2Ddimension', 0)\n\n im_arr = im_arr.swapaxes(0, lbp_axis)\n for idx in range(im_arr.shape[0]):\n im_arr[idx, ...] = local_binary_pattern(im_arr[idx, ...], P=lbp_samples, R=lbp_radius, method=lbp_method)\n im_arr = im_arr.swapaxes(0, lbp_axis)\n elif Nd == 2:\n im_arr = local_binary_pattern(im_arr, P=lbp_samples, R=lbp_radius, method=lbp_method)\n else:\n logger.warning('LBP 2D is only available for 2D or 3D with forced 2D extraction')\n return\n\n im = sitk.GetImageFromArray(im_arr)\n im.CopyInformation(inputImage)\n\n yield im, 'lbp-2D', kwargs\n\n\ndef getLBP3DImage(inputImage, inputMask, **kwargs):\n \"\"\"\n Compute and return the Local Binary Pattern (LBP) in 3D using spherical harmonics.\n If ``force2D`` is set to true (= feature extraction in 2D) a warning is logged.\n\n LBP is only calculated for voxels segmented in the mask\n\n Following settings are possible:\n\n - ``lbp3DLevels`` [2]: integer, specifies the the number of levels in spherical harmonics to use.\n - ``lbp3DIcosphereRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled\n - ``lbp3DIcosphereSubdivision`` [1]: Integer, specifies the number of subdivisions to apply in the icosphere\n\n :return: Yields LBP filtered image for each level, 'lbp-3D-m<level>' and ``kwargs`` (customized settings).\n Additionally yields the kurtosis image, 'lbp-3D-k' and ``kwargs``.\n\n .. note::\n LBP can often return only a very small number of different gray levels. A customized bin width is often needed.\n .. warning::\n Requires package ``scipy`` and ``trimesh`` to function. If not available, this filter logs a warning and does not\n yield an image.\n\n References:\n\n - Banerjee, J, Moelker, A, Niessen, W.J, & van Walsum, T.W. (2013), \"3D LBP-based rotationally invariant region\n description.\" In: Park JI., Kim J. (eds) Computer Vision - ACCV 2012 Workshops. ACCV 2012. Lecture Notes in Computer\n Science, vol 7728. Springer, Berlin, Heidelberg. doi:10.1007/978-3-642-37410-4_3\n \"\"\"\n global logger\n Nd = inputImage.GetDimension()\n if Nd != 3:\n logger.warning('LBP 3D only available for 3 dimensional images, found %i dimensions', Nd)\n return\n\n try:\n from scipy.stats import kurtosis\n from scipy.ndimage.interpolation import map_coordinates\n from scipy.special import sph_harm\n from trimesh.creation import icosphere\n except ImportError:\n logger.warning('Could not load required package \"scipy\" or \"trimesh\", cannot implement filter LBP 3D')\n return\n\n # Warn the user if features are extracted in 2D, as this function calculates LBP in 3D\n if kwargs.get('force2D', False):\n logger.warning('Calculating Local Binary Pattern in 3D, but extracting features in 2D. Use with caution!')\n\n label = kwargs.get('label', 1)\n\n lbp_levels = kwargs.get('lbp3DLevels', 2)\n lbp_icosphereRadius = kwargs.get('lbp3DIcosphereRadius', 1)\n lbp_icosphereSubdivision = kwargs.get('lbp3DIcosphereSubdivision', 1)\n\n im_arr = sitk.GetArrayFromImage(inputImage)\n ma_arr = sitk.GetArrayFromImage(inputMask)\n\n # Variables used in the shape comments:\n # Np Number of voxels\n # Nv Number of vertices\n\n # Vertices icosahedron for spherical sampling\n coords_icosahedron = numpy.array(icosphere(lbp_icosphereSubdivision, lbp_icosphereRadius).vertices) # shape(Nv, 3)\n\n # Corresponding polar coordinates\n theta = numpy.arccos(numpy.true_divide(coords_icosahedron[:, 2], lbp_icosphereRadius))\n phi = numpy.arctan2(coords_icosahedron[:, 1], coords_icosahedron[:, 0])\n\n # Corresponding spherical harmonics coefficients Y_{m, n, theta, phi}\n Y = sph_harm(0, 0, theta, phi) # shape(Nv,)\n n_ix = numpy.array(0)\n\n for n in range(1, lbp_levels):\n for m in range(-n, n + 1):\n n_ix = numpy.append(n_ix, n)\n Y = numpy.column_stack((Y, sph_harm(m, n, theta, phi)))\n # shape (Nv, x) where x is the number of iterations in the above loops + 1\n\n # Get labelled coordinates\n ROI_coords = numpy.where(ma_arr == label) # shape(3, Np)\n\n # Interpolate f (samples on the spheres across the entire volume)\n coords = numpy.array(ROI_coords).T[None, :, :] + coords_icosahedron[:, None, :] # shape(Nv, Np, 3)\n f = map_coordinates(im_arr, coords.T, order=3) # Shape(Np, Nv) Note that 'Np' and 'Nv' are swapped due to .T\n\n # Compute spherical Kurtosis\n k = kurtosis(f, axis=1) # shape(Np,)\n\n # Apply sign function\n f_centroids = im_arr[ROI_coords] # Shape(Np,)\n f = numpy.greater_equal(f, f_centroids[:, None]).astype(int) # Shape(Np, Nv)\n\n # Compute c_{m,n} coefficients\n c = numpy.multiply(f[:, :, None], Y[None, :, :]) # Shape(Np, Nv, x)\n c = c.sum(axis=1) # Shape(Np, x)\n\n # Integrate over m\n f = numpy.multiply(c[:, None, n_ix == 0], Y[None, :, n_ix == 0]) # Shape (Np, Nv, 1)\n for n in range(1, lbp_levels):\n f = numpy.concatenate((f,\n numpy.sum(numpy.multiply(c[:, None, n_ix == n], Y[None, :, n_ix == n]),\n axis=2, keepdims=True)\n ),\n axis=2)\n # Shape f (Np, Nv, levels)\n\n # Compute L2-Norm\n f = numpy.sqrt(numpy.sum(f ** 2, axis=1)) # shape(Np, levels)\n\n # Keep only Real Part\n f = numpy.real(f) # shape(Np, levels)\n k = numpy.real(k) # shape(Np,)\n\n # Yield the derived images for each level\n result = numpy.ndarray(im_arr.shape)\n for l_idx in range(lbp_levels):\n result[ROI_coords] = f[:, l_idx]\n\n # Create a SimpleITK image\n im = sitk.GetImageFromArray(result)\n im.CopyInformation(inputImage)\n\n yield im, 'lbp-3D-m%d' % (l_idx + 1), kwargs\n\n # Yield Kurtosis\n result[ROI_coords] = k\n\n # Create a SimpleITK image\n im = sitk.GetImageFromArray(result)\n im.CopyInformation(inputImage)\n\n yield im, 'lbp-3D-k', kwargs\n" ]
[ [ "numpy.true_divide", "numpy.sqrt", "numpy.asarray", "numpy.ndarray", "scipy.ndimage.interpolation.map_coordinates", "numpy.arctan2", "numpy.max", "numpy.mean", "numpy.digitize", "numpy.exp", "numpy.where", "numpy.histogram", "numpy.allclose", "numpy.pad", "numpy.arange", "scipy.special.sph_harm", "numpy.greater_equal", "numpy.ceil", "numpy.real", "numpy.std", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.min", "numpy.append", "numpy.floor", "scipy.stats.kurtosis", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.abs", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
caseypw/m2g
[ "be29587322ab1fafb96f6afb726efbdb39b64b66" ]
[ "m2g/graph.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nm2g.graph\n~~~~~~~~~~\n\nContains the primary functionality for connectome estimation after tractography has completed.\nUsed in the final stage of the pipeline.\n\"\"\"\n\n\n# standard library imports\nimport os\nimport time\nimport csv\nfrom itertools import combinations\nfrom collections import defaultdict\nfrom pathlib import Path\n\n# package imports\nimport numpy as np\nimport networkx as nx\nimport nibabel as nib\nfrom dipy.tracking._utils import _mapping_to_voxel, _to_voxel_coordinates\nfrom m2g.utils.gen_utils import timer\nimport matplotlib\n\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nfrom graspy.utils import ptr\nfrom graspy.plot import heatmap\n\n\nclass GraphTools:\n \"\"\"Initializes the graph with nodes corresponding to the number of ROIS\n\n Parameters\n ----------\n rois : str\n Path to a set of ROIs as either an array or nifti file\n tracks : list\n Streamlines for analysis\n affine : ndarray\n a 2-D array with ones on the diagonal and zeros elsewhere (DOESN'T APPEAR TO BE Used)\n outdir : Path\n location of output directory\n connectome_path : str\n Path for the output connectome file (.csv file)\n attr : int, optional\n Path to atlas before registration. By default None\n sens : str, optional, DEPRACATED\n type of MRI scan being analyzed (can be 'dwi' or 'func'), by default \"dwi\"\n\n Raises\n ------\n ValueError\n graph saved with unsupported igraph modality\n ValueError\n graph saved not using edgelist, gpickle, or graphml\n \"\"\"\n\n def __init__(\n self, rois, tracks, affine, outdir, connectome_path, attr=None, sens=\"dwi\"\n ):\n\n self.edge_dict = defaultdict(int)\n self.roi_file = rois\n self.roi_img = nib.load(self.roi_file)\n self.rois = self.roi_img.get_data().astype(\"int\")\n self.n_ids = self.rois[self.rois > 0]\n self.N = len(self.n_ids)\n self.modal = sens\n self.tracks = tracks\n self.affine = affine\n self.outdir = outdir\n self.connectome_path = os.path.dirname(connectome_path)\n self.attr = attr\n\n @timer\n def make_graph_old(self):\n \"\"\"\n Takes streamlines and produces a graph\n **Positional Arguments:**\n streamlines:\n - Fiber streamlines either file or array in a dipy EuDX\n or compatible format.\n \"\"\"\n\n print(\"Building connectivity matrix...\")\n self.g = nx.Graph(\n name=\"Generated by NeuroData's MRI Graphs (m2g)\",\n date=time.asctime(time.localtime()),\n source=\"http://m2g.io\",\n region=\"brain\",\n sensor=self.modal,\n ecount=0,\n vcount=len(self.n_ids),\n )\n print(self.g.graph)\n\n [self.g.add_node(ids) for ids in self.n_ids]\n\n nlines = np.shape(self.tracks)[0]\n print(\"# of Streamlines: \" + str(nlines))\n\n for idx, streamline in enumerate(self.tracks):\n if (idx % int(nlines * 0.05)) == 0:\n print(idx)\n\n points = np.round(streamline).astype(int)\n p = set()\n for point in points:\n try:\n loc = self.rois[point[0], point[1], point[2]]\n except IndexError:\n loc = \"\"\n\n if loc:\n p.add(loc)\n\n edges = combinations(p, 2)\n for edge in edges:\n lst = tuple([int(node) for node in edge])\n self.edge_dict[tuple(sorted(lst))] += 1\n\n edge_list = [(k[0], k[1], v) for k, v in list(self.edge_dict.items())]\n self.g.add_weighted_edges_from(edge_list)\n return self.g, self.edge_dict\n\n @timer\n def make_graph(self, error_margin=2, overlap_thr=1, voxel_size=2):\n \"\"\"Takes streamlines and produces a graph using Numpy functions\n\n Parameters\n ----------\n error_margin : int, optional\n Number of mm around roi's to use (i.e. if 2, then any voxel within 2 mm of roi is considered part of roi), by default 2\n overlap_thr : int, optional\n The amount of overlap between an roi and streamline to be considered a connection, by default 1\n voxel_size : int, optional\n Voxel size for roi/streamlines, by default 2\n\n Returns\n -------\n Graph\n networkx Graph object containing the connectome matrix\n \"\"\"\n print(\"Building connectivity matrix...\")\n\n # Instantiate empty networkX graph object & dictionary\n # Create voxel-affine mapping\n lin_T, offset = _mapping_to_voxel(\n np.eye(4)\n ) # TODO : voxel_size was removed in dipy 1.0.0, make sure that didn't break anything when voxel size is not 2mm\n #mx = len(np.unique(self.rois.astype(np.int64))) #- 1\n\n self.attr = nib.load(self.attr)\n self.attr = self.attr.get_data().astype(\"int\")\n \n mx = len(np.unique(self.attr.astype(np.int64)))\n self.g = nx.Graph(ecount=0, vcount=mx)\n edge_dict = defaultdict(int)\n #node_dict = dict(\n # zip(np.unique(self.rois).astype(\"int16\") + 1, np.arange(mx) + 1)\n #)\n node_dict = dict(\n zip(np.unique(self.attr).astype(\"int16\"), np.arange(mx))\n )\n\n lost_rois=[]\n #Track lost rois\n for un in np.unique(self.attr.astype(np.int64)):\n if un not in self.rois:\n lost_rois.append(un)\n \n if len(lost_rois)>0:\n with open(f'{self.connectome_path}/lost_roi.csv', mode='w') as lost_file:\n lost_writer = csv.writer(lost_file, delimiter=',')\n lost_writer.writerow(lost_rois)\n\n\n # Add empty vertices\n for node in range(0,mx):#(1, mx + 1):\n self.g.add_node(node)\n\n nlines = np.shape(self.tracks)[0]\n print(\"# of Streamlines: \" + str(nlines))\n\n ix = 0\n for s in self.tracks:\n # Map the streamlines coordinates to voxel coordinates and get labels for label_volume\n # i, j, k = np.vstack(np.array([get_sphere(coord, error_margin,\n # (voxel_size, voxel_size, voxel_size),\n # self.roi_img.shape) for coord in\n # _to_voxel_coordinates(s, lin_T, offset)])).T\n\n # Map the streamlines coordinates to voxel coordinates\n points = _to_voxel_coordinates(s, lin_T, offset)\n\n # get labels for label_volume\n i, j, k = points.T\n\n lab_arr = self.rois[i, j, k]\n endlabels = []\n for lab in np.unique(lab_arr).astype(\"int16\"):\n if (lab > 0) and (np.sum(lab_arr == lab) >= overlap_thr):\n endlabels.append(node_dict[lab])\n\n edges = combinations(endlabels, 2)\n for edge in edges:\n lst = tuple([int(node) for node in edge])\n edge_dict[tuple(sorted(lst))] += 1\n\n edge_list = [(k[0], k[1], v) for k, v in edge_dict.items()]\n\n self.g.add_weighted_edges_from(edge_list)\n ix = ix + 1\n\n conn_matrix = np.array(nx.to_numpy_matrix(self.g))\n conn_matrix[np.isnan(conn_matrix)] = 0\n conn_matrix[np.isinf(conn_matrix)] = 0\n conn_matrix = np.asmatrix(np.maximum(conn_matrix, conn_matrix.transpose()))\n g = nx.from_numpy_matrix(conn_matrix)\n\n return g\n\n def save_graph(self, graphname, fmt=\"igraph\"):\n \"\"\"Saves the graph to disk\n\n Parameters\n ----------\n graphname : str\n Filename for the graph\n fmt : str, optional\n Format you want the graph saved as [edgelist, gpickle, graphml, txt, npy, igraph], by default \"igraph\"\n\n Raises\n ------\n ValueError\n Unsupported modality (not dwi or func) for saving the graph in igraph format\n ValueError\n Unsupported format\n \"\"\"\n\n self.g.graph[\"ecount\"] = nx.number_of_edges(self.g)\n self.g = nx.convert_node_labels_to_integers(self.g, first_label=1)\n print(self.g.graph)\n if fmt == \"edgelist\":\n nx.write_weighted_edgelist(self.g, graphname, encoding=\"utf-8\")\n elif fmt == \"gpickle\":\n nx.write_gpickle(self.g, graphname)\n elif fmt == \"graphml\":\n nx.write_graphml(self.g, graphname)\n elif fmt == \"txt\":\n np.savetxt(graphname, nx.to_numpy_matrix(self.g))\n elif fmt == \"npy\":\n np.save(graphname, nx.to_numpy_matrix(self.g))\n elif fmt == \"igraph\":\n nx.write_weighted_edgelist(\n self.g, graphname, delimiter=\" \", encoding=\"utf-8\"\n )\n else:\n raise ValueError(\n \"Only edgelist, gpickle, graphml, txt, and npy are currently supported\"\n )\n\n if not os.path.isfile(graphname):\n raise FileNotFoundError(f\"File {graphname} not created.\")\n\n print(f\"Graph saved. Output location here: {graphname}\")\n\n def save_graph_png(self, graphname):\n \"\"\"Saves adjacency graph, made using graspy's heatmap function, as a png. This will be saved in the qa/graphs_plotting/ directory\n\n Parameters\n ----------\n graphname : str\n name of the generated graph (do not include '.png')\n \"\"\"\n\n conn_matrix = np.array(nx.to_numpy_matrix(self.g))\n conn_matrix = ptr.pass_to_ranks(conn_matrix)\n heatmap(conn_matrix)\n outpath = str(self.outdir / f\"qa/graphs_plotting/{Path(graphname).stem}.png\")\n plt.savefig(outpath)\n plt.close()\n\n def summary(self):\n \"\"\"\n User friendly wrapping and display of graph properties\n \"\"\"\n print(\"\\nGraph Summary:\")\n print(nx.info(self.g))\n\n" ]
[ [ "numpy.sum", "numpy.unique", "numpy.isnan", "matplotlib.use", "numpy.eye", "numpy.arange", "matplotlib.pyplot.savefig", "numpy.round", "numpy.shape", "matplotlib.pyplot.close", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
prosyslab-warehouse/tensorflow-2.6.2
[ "153df12f5343096713587e21d4143b9acfbb3513" ]
[ "tensorflow/python/ops/nn_fused_batchnorm_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for fused_batch_norm related functionality in tensorflow.ops.nn.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\n\nclass BatchNormalizationTest(test.TestCase):\n\n def _batch_norm(self, x, mean, var, offset, scale, epsilon):\n # We compute the batch norm manually in this function because\n # nn_impl.batch_normalization does not support float16 yet.\n # TODO(reedwm): Add float16 support to nn_impl.batch_normalization.\n inv = math_ops.rsqrt(var + epsilon) * scale\n y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)\n return math_ops.cast(y, x.dtype)\n\n def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):\n if data_format not in ['NHWC', 'NCHW', 'NDHWC', 'NCDHW']:\n raise ValueError('data_format must be NCHW or NHWC for 4D tensors or'\n 'NCDHW or NDHWC for 5D tensors, got %s.' % data_format)\n if data_format == 'NCHW':\n x = array_ops.transpose(x, [0, 2, 3, 1])\n elif data_format == 'NCDHW':\n x = array_ops.transpose(x, [0, 2, 3, 4, 1])\n y = self._batch_norm(x, mean, var, offset, scale, epsilon)\n if data_format == 'NCHW':\n y = array_ops.transpose(y, [0, 3, 1, 2])\n elif data_format == 'NCDHW':\n y = array_ops.transpose(y, [0, 4, 1, 2, 3])\n return self.evaluate(y)\n\n def _test_inference(self,\n x_shape,\n x_dtype,\n scale_shape,\n scale_dtype,\n use_gpu=True,\n exponential_avg_factor=1.0,\n data_format='NHWC'):\n np.random.seed(1)\n x_val = np.random.random_sample(x_shape).astype(x_dtype)\n scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n var_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n\n with self.cached_session(use_gpu=use_gpu) as sess:\n x = constant_op.constant(x_val, name='x')\n scale = constant_op.constant(scale_val, name='scale')\n offset = constant_op.constant(offset_val, name='offset')\n mean = constant_op.constant(mean_val, name='mean')\n var = constant_op.constant(var_val, name='variance')\n epsilon = 0.001\n y, _, _ = nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=mean,\n variance=var,\n epsilon=epsilon,\n exponential_avg_factor=exponential_avg_factor,\n data_format=data_format,\n is_training=False)\n y_val = self.evaluate(y)\n y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,\n data_format)\n # An atol value of 1e-3 is too small for float16's, because some adjacent\n # float16 values that y_val can take are greater than 1e-3 apart, e.g.\n # 2.16602 and 2.16797.\n atol = 2e-3 if x_dtype == np.float16 else 1e-3\n self.assertAllClose(y_ref, y_val, atol=atol)\n\n def _running_mean(self, old_mean, new_val, factor):\n if factor == 1.0:\n return new_val\n else:\n return (1.0 - factor) * old_mean + factor * new_val\n\n def _training_ref(self, x, scale, offset, old_mean, old_var,\n exponential_avg_factor, epsilon, data_format):\n if data_format not in ['NHWC', 'NCHW', 'NDHWC', 'NCDHW']:\n raise ValueError('data_format must be NCHW or NHWC for 4D tensors or'\n 'NCDHW or NDHWC for 5D tensors, got %s.' % data_format)\n use_4d_tensor = (x.shape.ndims == 4)\n if data_format == 'NCHW':\n x = array_ops.transpose(x, [0, 2, 3, 1])\n elif data_format == 'NCDHW':\n x = array_ops.transpose(x, [0, 2, 3, 4, 1])\n\n mean_axis = [0, 1, 2] if use_4d_tensor else [0, 1, 2, 3]\n batch_mean, batch_var = nn_impl.moments(\n math_ops.cast(x, scale.dtype), mean_axis, keep_dims=False)\n\n y = self._batch_norm(x, batch_mean, batch_var, offset, scale, epsilon)\n if data_format == 'NCHW':\n y = array_ops.transpose(y, [0, 3, 1, 2])\n elif data_format == 'NCDHW':\n y = array_ops.transpose(y, [0, 4, 1, 2, 3])\n\n # This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as\n # the denominator in the formula to calculate variance, while\n # tf.compat.v1.nn.fused_batch_norm has Bessel's correction built in.\n sample_size = math_ops.cast(\n array_ops.size(x) / array_ops.size(scale), scale.dtype)\n batch_var_corrected = batch_var * sample_size / (\n math_ops.maximum(sample_size - 1.0, 1.0))\n\n mean = self._running_mean(old_mean, batch_mean, exponential_avg_factor)\n var = self._running_mean(old_var, batch_var_corrected,\n exponential_avg_factor)\n return self.evaluate(y), self.evaluate(mean), self.evaluate(var)\n\n def _test_training(self,\n x_shape,\n x_dtype,\n scale_shape,\n scale_dtype,\n use_gpu=True,\n exponential_avg_factor=1.0,\n data_format='NHWC'):\n np.random.seed(1)\n x_val = np.random.random_sample(x_shape).astype(x_dtype)\n scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n if exponential_avg_factor == 1.0:\n old_mean_val = None\n old_var_val = None\n else:\n old_mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n old_var_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n\n with self.cached_session(use_gpu=use_gpu) as sess:\n x = constant_op.constant(x_val, name='x')\n scale = constant_op.constant(scale_val, name='scale')\n offset = constant_op.constant(offset_val, name='offset')\n epsilon = 0.001\n y, mean, var = nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=old_mean_val,\n variance=old_var_val,\n epsilon=epsilon,\n exponential_avg_factor=exponential_avg_factor,\n data_format=data_format,\n is_training=True)\n y_val, mean_val, var_val = self.evaluate([y, mean, var])\n y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset,\n old_mean_val, old_var_val,\n exponential_avg_factor,\n epsilon, data_format)\n y_atol = 2e-3 if x_dtype == np.float16 else 1e-3\n self.assertAllClose(y_ref, y_val, atol=y_atol)\n self.assertAllClose(mean_ref, mean_val, atol=1e-3)\n self.assertAllClose(var_ref, var_val, atol=1e-3)\n\n def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape):\n \"\"\"Computes the gradient error for float16 inputs and/or outputs.\n\n This returns the same value as gradient_checker.compute_gradient_error. The\n difference is that gradient_checker.compute_gradient_error does not\n numerically compute the gradients in a numerically stable way for float16\n tensors. To fix this, this function requires float32 versions of x and y to\n numerically compute the gradients, to compare with the float16 symbolically\n computed gradients.\n\n Args:\n x: The input tensor.\n x32: A float32 version of x.\n x_shape: The shape of x.\n y: The output tensor.\n y32: A float32 version of y. Must be calculated based on x32, not x.\n y_shape: The shape of y.\n\n Returns:\n The maximum error in between the two Jacobians, as in\n gradient_checker.compute_gradient_error.\n \"\"\"\n x_init_val = np.random.random_sample(x_shape).astype(np.float16)\n x32_init_val = x_init_val.astype(np.float32)\n\n # TODO(reedwm): Do not perform the unnecessary computations in\n # compute_gradient, since they double the computation time of this function.\n theoretical_grad, _ = gradient_checker.compute_gradient(\n x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val)\n _, numerical_grad = gradient_checker.compute_gradient(\n x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val)\n\n # If grad is empty, no error.\n if theoretical_grad.size == 0 and numerical_grad.size == 0:\n return 0\n return np.fabs(theoretical_grad - numerical_grad).max()\n\n def _test_gradient(self,\n x_shape,\n x_dtype,\n scale_shape,\n scale_dtype,\n use_gpu=True,\n exponential_avg_factor=1.0,\n data_format='NHWC',\n is_training=True):\n np.random.seed(1)\n x_val = np.random.random_sample(x_shape).astype(x_dtype)\n scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n\n with self.cached_session(use_gpu=use_gpu):\n x = constant_op.constant(x_val, name='x')\n scale = constant_op.constant(scale_val, name='scale')\n offset = constant_op.constant(offset_val, name='offset')\n if is_training and exponential_avg_factor == 1.0:\n pop_mean = None\n pop_var = None\n else:\n pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)\n pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)\n y, _, _ = nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=pop_mean,\n variance=pop_var,\n exponential_avg_factor=exponential_avg_factor,\n data_format=data_format,\n is_training=is_training)\n if x_dtype != np.float16:\n err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)\n err_scale = gradient_checker.compute_gradient_error(\n scale, scale_shape, y, x_shape)\n err_offset = gradient_checker.compute_gradient_error(\n offset, scale_shape, y, x_shape)\n else:\n x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32)\n y32, _, _ = nn_impl.fused_batch_norm(\n x32,\n scale,\n offset,\n mean=pop_mean,\n variance=pop_var,\n data_format=data_format,\n exponential_avg_factor=exponential_avg_factor,\n is_training=is_training)\n err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32,\n x_shape)\n err_scale = self._compute_gradient_error_float16(\n scale, scale, scale_shape, y, y32, x_shape)\n err_offset = self._compute_gradient_error_float16(\n offset, offset, scale_shape, y, y32, x_shape)\n\n x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3\n scale_err_tolerance = 1e-3\n self.assertLess(err_x, x_err_tolerance)\n self.assertLess(err_scale, scale_err_tolerance)\n self.assertLess(err_offset, scale_err_tolerance)\n\n def _test_grad_grad(self,\n x_shape,\n x_dtype,\n scale_shape,\n scale_dtype,\n use_gpu=True,\n exponential_avg_factor=1.0,\n data_format='NHWC',\n is_training=True,\n err_tolerance=1e-3):\n np.random.seed(1)\n x_val = np.random.random_sample(x_shape).astype(x_dtype)\n grad_y_val = np.random.random_sample(x_shape).astype(x_dtype)\n scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)\n\n with self.cached_session(use_gpu=use_gpu) as sess:\n x = constant_op.constant(x_val, name='x')\n grad_y = constant_op.constant(grad_y_val, name='grad_y')\n scale = constant_op.constant(scale_val, name='scale')\n offset = constant_op.constant(offset_val, name='offset')\n if is_training and exponential_avg_factor == 1.0:\n pop_mean = None\n pop_var = None\n else:\n pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)\n pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)\n y, _, _ = nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=pop_mean,\n variance=pop_var,\n exponential_avg_factor=exponential_avg_factor,\n data_format=data_format,\n is_training=is_training)\n grad_x, grad_scale, grad_offset = gradients_impl.gradients(\n y, [x, scale, offset], grad_y)\n\n if is_training:\n epsilon = y.op.get_attr('epsilon')\n data_format = y.op.get_attr('data_format')\n grad_vals = self.evaluate([grad_x, grad_scale, grad_offset])\n grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, pop_mean,\n pop_var, epsilon, data_format)\n grad_internal_vals = self.evaluate(list(grad_internal))\n for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):\n self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)\n\n if x_dtype != np.float16:\n err_grad_grad_y_1 = gradient_checker.compute_gradient_error(\n grad_y, x_shape, grad_x, x_shape)\n err_grad_grad_y_2 = gradient_checker.compute_gradient_error(\n grad_y, x_shape, grad_scale, scale_shape)\n err_grad_grad_y_3 = gradient_checker.compute_gradient_error(\n grad_y, x_shape, grad_offset, scale_shape)\n # In freeze mode, grad_x is not a function of x.\n if is_training:\n err_grad_x_1 = gradient_checker.compute_gradient_error(\n x, x_shape, grad_x, x_shape)\n err_grad_x_2 = gradient_checker.compute_gradient_error(\n x, x_shape, grad_scale, scale_shape)\n\n err_grad_scale = gradient_checker.compute_gradient_error(\n scale, scale_shape, grad_x, x_shape)\n else:\n x32 = constant_op.constant(x_val, dtype=dtypes.float32, name='x32')\n grad_y32 = constant_op.constant(\n grad_y_val, dtype=dtypes.float32, name='grad_y32')\n y32, _, _ = nn_impl.fused_batch_norm(\n x32,\n scale,\n offset,\n mean=pop_mean,\n variance=pop_var,\n exponential_avg_factor=exponential_avg_factor,\n data_format=data_format,\n is_training=is_training)\n grad_x32, grad_scale32, grad_offset32 = gradients_impl.gradients(\n y32, [x32, scale, offset], grad_y32)\n err_grad_grad_y_1 = self._compute_gradient_error_float16(\n grad_y, grad_y32, x_shape, grad_x, grad_x32, x_shape)\n err_grad_grad_y_2 = self._compute_gradient_error_float16(\n grad_y, grad_y32, x_shape, grad_scale, grad_scale32, scale_shape)\n err_grad_grad_y_3 = self._compute_gradient_error_float16(\n grad_y, grad_y32, x_shape, grad_offset, grad_offset32, scale_shape)\n # In freeze mode, grad_x is not a function of x.\n if is_training:\n err_grad_x_1 = self._compute_gradient_error_float16(\n x, x32, x_shape, grad_x, grad_x32, x_shape)\n err_grad_x_2 = self._compute_gradient_error_float16(\n x, x32, x_shape, grad_scale, grad_scale32, scale_shape)\n\n err_grad_scale = self._compute_gradient_error_float16(\n scale, scale, scale_shape, grad_x, grad_x32, x_shape)\n\n self.assertLess(err_grad_grad_y_1, err_tolerance)\n self.assertLess(err_grad_grad_y_2, err_tolerance)\n self.assertLess(err_grad_grad_y_3, err_tolerance)\n if is_training:\n self.assertLess(err_grad_x_1, err_tolerance)\n self.assertLess(err_grad_x_2, err_tolerance)\n self.assertLess(err_grad_scale, err_tolerance)\n\n def _runtests(self, x_shape, is_training, gradient_test=False,\n cpu_only=False):\n if len(x_shape) == 4:\n data_format_list = ['NHWC', 'NCHW']\n else:\n data_format_list = ['NCDHW', 'NDHWC']\n use_gpu_vals = [False]\n if test.is_gpu_available(cuda_only=True) and not cpu_only:\n use_gpu_vals += [True]\n factors = [1.0, 0.6]\n for dtype in [np.float16, np.float32]:\n for use_gpu in use_gpu_vals:\n for data_format in data_format_list:\n if data_format == 'NHWC' or data_format == 'NDHWC':\n scale_shape = x_shape[-1:]\n else:\n scale_shape = x_shape[1:2]\n for exponential_avg_factor in factors:\n if gradient_test:\n self._test_gradient(\n x_shape,\n dtype,\n scale_shape,\n np.float32,\n use_gpu=use_gpu,\n data_format=data_format,\n is_training=is_training,\n exponential_avg_factor=exponential_avg_factor)\n else:\n if is_training:\n self._test_training(\n x_shape,\n dtype,\n scale_shape,\n np.float32,\n use_gpu=use_gpu,\n data_format=data_format,\n exponential_avg_factor=exponential_avg_factor)\n else:\n self._test_inference(\n x_shape,\n dtype,\n scale_shape,\n np.float32,\n use_gpu=use_gpu,\n data_format=data_format,\n exponential_avg_factor=exponential_avg_factor)\n\n def testInferenceShape1(self):\n x_shape = [1, 1, 6, 1]\n self._runtests(x_shape, False)\n\n def testInferenceShape2(self):\n x_shape = [1, 1, 6, 2]\n self._runtests(x_shape, False)\n\n def testInferenceShape3(self):\n x_shape = [1, 2, 1, 6]\n self._runtests(x_shape, False)\n\n def testInferenceShape4(self):\n x_shape = [27, 131, 127, 6]\n self._runtests(x_shape, False)\n\n def testInferenceShape5(self):\n x_shape = [0, 131, 127, 6]\n self._runtests(x_shape, False)\n\n def testInferenceShape6(self):\n x_shape = [1, 1, 1, 1]\n # GPU kernel doesn't properly handle case where non-channel dimensions are 1\n self._runtests(x_shape, False, cpu_only=True)\n\n def testInferenceShape7(self):\n x_shape = [1, 2, 6, 1, 3]\n self._runtests(x_shape, False)\n\n def testTrainingShape1(self):\n x_shape = [1, 1, 6, 1]\n self._runtests(x_shape, True)\n\n def testTrainingShape2(self):\n x_shape = [1, 1, 6, 2]\n self._runtests(x_shape, True)\n\n def testTrainingShape3(self):\n x_shape = [1, 2, 1, 6]\n self._runtests(x_shape, True)\n\n def testTrainingShape4(self):\n x_shape = [27, 131, 127, 6]\n self._runtests(x_shape, True)\n\n @test_util.disable_xla('b/141236973: Empty inputs wrong on CPU.')\n def testTrainingShape5(self):\n x_shape = [0, 131, 127, 6]\n self._runtests(x_shape, True)\n\n @test_util.run_deprecated_v1\n def testTrainingShape6(self):\n x_shape = [1, 1, 1, 1]\n # GPU kernel doesn't properly handle case where non-channel dimensions are 1\n self._runtests(x_shape, True, cpu_only=True)\n\n def testTrainingShape7(self):\n x_shape = [1, 2, 6, 1, 3]\n self._runtests(x_shape, True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradInferenceShape1(self):\n x_shape = [1, 1, 6, 1]\n self._runtests(x_shape, is_training=False, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradInferenceShape2(self):\n x_shape = [1, 1, 6, 2]\n self._runtests(x_shape, is_training=False, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradInferenceShape3(self):\n x_shape = [1, 2, 1, 6]\n self._runtests(x_shape, is_training=False, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradInferenceShape4(self):\n x_shape = [5, 7, 11, 4]\n self._runtests(x_shape, is_training=False, gradient_test=True)\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla('This test never passed for XLA')\n def testBatchNormGradInferenceShape5(self):\n x_shape = [0, 7, 11, 4]\n self._runtests(x_shape, is_training=False, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradInferenceShape6(self):\n x_shape = [1, 1, 1, 1]\n # GPU kernel doesn't properly handle case where non-channel dimensions are 1\n self._runtests(x_shape, is_training=False, gradient_test=True,\n cpu_only=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradInferenceShape7(self):\n x_shape = [1, 2, 6, 1, 3]\n self._runtests(x_shape, is_training=False, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradTrainingShape1(self):\n x_shape = [1, 1, 6, 1]\n self._runtests(x_shape, is_training=True, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradTrainingShape2(self):\n x_shape = [1, 1, 6, 2]\n self._runtests(x_shape, is_training=True, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradTrainingShape3(self):\n x_shape = [1, 2, 1, 6]\n self._runtests(x_shape, is_training=True, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradTrainingShape4(self):\n x_shape = [5, 7, 11, 4]\n self._runtests(x_shape, is_training=True, gradient_test=True)\n\n @test_util.run_deprecated_v1\n @test_util.disable_xla('This test never passed for XLA')\n def testBatchNormGradTrainingShape5(self):\n x_shape = [0, 7, 11, 4]\n self._runtests(x_shape, is_training=True, gradient_test=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradTrainingShape6(self):\n x_shape = [1, 1, 1, 1]\n # GPU kernel doesn't properly handle case where non-channel dimensions are 1\n self._runtests(x_shape, is_training=True, gradient_test=True, cpu_only=True)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradTrainingShape7(self):\n x_shape = [1, 2, 6, 1, 3]\n self._runtests(x_shape, is_training=True, gradient_test=True)\n\n def _testBatchNormGradGrad(self, config):\n shape = config['shape']\n err_tolerance = config['err_tolerance']\n dtype = config['dtype']\n rank = len(shape)\n if rank == 4:\n data_format_nhwc, features_nhwc = 'NHWC', shape[3]\n data_format_nchw, features_nchw = 'NCHW', shape[1]\n else:\n data_format_nhwc, features_nhwc = 'NDHWC', shape[4]\n data_format_nchw, features_nchw = 'NCDHW', shape[1]\n for is_training in [True, False]:\n if test.is_gpu_available(cuda_only=True):\n self._test_grad_grad(\n shape,\n dtype, [features_nhwc],\n np.float32,\n use_gpu=True,\n data_format=data_format_nhwc,\n is_training=is_training,\n err_tolerance=err_tolerance)\n self._test_grad_grad(\n shape,\n dtype, [features_nchw],\n np.float32,\n use_gpu=True,\n data_format=data_format_nchw,\n is_training=is_training,\n err_tolerance=err_tolerance)\n self._test_grad_grad(\n shape,\n dtype, [features_nhwc],\n np.float32,\n use_gpu=False,\n data_format=data_format_nhwc,\n is_training=is_training,\n err_tolerance=err_tolerance)\n self._test_grad_grad(\n shape,\n dtype, [features_nchw],\n np.float32,\n use_gpu=False,\n data_format=data_format_nchw,\n is_training=is_training,\n err_tolerance=err_tolerance)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradGradConfig1(self):\n config = {\n 'shape': [2, 3, 4, 5],\n 'err_tolerance': 1e-2,\n 'dtype': np.float32,\n }\n self._testBatchNormGradGrad(config)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradGradConfig2(self):\n config = {\n 'shape': [2, 3, 2, 2],\n 'err_tolerance': 1e-3,\n 'dtype': np.float32,\n }\n self._testBatchNormGradGrad(config)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradGradConfig3(self):\n config = {\n 'shape': [2, 3, 4, 5],\n 'err_tolerance': 2e-2,\n 'dtype': np.float16,\n }\n self._testBatchNormGradGrad(config)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradGradConfig4(self):\n config = {\n 'shape': [2, 3, 2, 2],\n 'err_tolerance': 2e-3,\n 'dtype': np.float16,\n }\n self._testBatchNormGradGrad(config)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradGradConfig5(self):\n config = {\n 'shape': [2, 3, 2, 2, 2],\n 'err_tolerance': 2e-3,\n 'dtype': np.float32,\n }\n self._testBatchNormGradGrad(config)\n\n @test_util.run_deprecated_v1\n def testBatchNormGradGradConfig6(self):\n config = {\n 'shape': [2, 3, 2, 2, 2],\n 'err_tolerance': 3e-3,\n 'dtype': np.float16,\n }\n self._testBatchNormGradGrad(config)\n\n def test5dBatchNormFollowedByRelu(self):\n # The remapper grappler pass previously did not properly handle a 5D\n # inference FusedBatchNorm followed by Relu. This asserts that this case is\n # correctly handled.\n np.random.seed(1)\n x = np.random.random_sample((2, 3, 2, 2, 3)).astype(np.float32)\n scale = np.random.random_sample((3,)).astype(np.float32)\n offset = np.random.random_sample((3,)).astype(np.float32)\n mean = np.random.random_sample((3,)).astype(np.float32)\n var = np.random.random_sample((3,)).astype(np.float32)\n\n epsilon = 0.001\n y, _, _ = nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=mean,\n variance=var,\n epsilon=epsilon,\n data_format='NCDHW',\n is_training=False)\n y = nn_ops.relu(y)\n y_val = self.evaluate(y)\n y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,\n 'NCDHW')\n y_ref = np.maximum(y_ref, 0.)\n self.assertAllClose(y_ref, y_val, atol=1e-3)\n\n def testEagerShapeErrors(self):\n with context.eager_mode():\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((3,))\n offset = array_ops.ones((2,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'scale must have the same number of elements'):\n nn_impl.fused_batch_norm(x, scale, offset)\n\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n offset = array_ops.ones((3,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'offset must have the same number of elements'):\n nn_impl.fused_batch_norm(x, scale, offset)\n\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n offset = array_ops.ones((2,))\n mean = array_ops.ones((0,))\n variance = array_ops.ones((2,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'When is_training=false, mean must have the same number of elements'):\n nn_impl.fused_batch_norm(\n x, scale, offset, mean=mean, variance=variance, is_training=False)\n\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n offset = array_ops.ones((2,))\n mean = array_ops.ones((2,))\n variance = array_ops.ones((0,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'When is_training=false, variance must have the same number of '\n 'elements'):\n nn_impl.fused_batch_norm(\n x, scale, offset, mean=mean, variance=variance, is_training=False)\n\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n offset = array_ops.ones((2,))\n mean = array_ops.ones((0,))\n variance = array_ops.ones((2,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'When exponential_avg_factor != 1, mean must have the same number of '\n 'elements'):\n nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=mean,\n variance=variance,\n exponential_avg_factor=0.5)\n\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n offset = array_ops.ones((2,))\n mean = array_ops.ones((2,))\n variance = array_ops.ones((0,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'When exponential_avg_factor != 1, variance must have the same '\n 'number of elements'):\n nn_impl.fused_batch_norm(\n x,\n scale,\n offset,\n mean=mean,\n variance=variance,\n exponential_avg_factor=0.5)\n\n def testEagerShapeGradErrors(self):\n with context.eager_mode():\n y_backprop = array_ops.ones((2, 2, 2, 3))\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n reserve_space_1 = array_ops.ones((2,))\n reserve_space_2 = array_ops.ones((2,))\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n 'x and y_backprop must have same shape,'):\n gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,\n reserve_space_1, reserve_space_2)\n\n y_backprop = array_ops.ones((2, 2, 2, 2))\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((3,))\n reserve_space_1 = array_ops.ones((2,))\n reserve_space_2 = array_ops.ones((2,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'scale must have the same number of elements'):\n gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,\n reserve_space_1, reserve_space_2)\n\n y_backprop = array_ops.ones((2, 2, 2, 2))\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n reserve_space_1 = array_ops.ones((3,))\n reserve_space_2 = array_ops.ones((2,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'reserve_space_1 must have the same number of elements'):\n gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,\n reserve_space_1, reserve_space_2)\n\n y_backprop = array_ops.ones((2, 2, 2, 2))\n x = array_ops.ones((2, 2, 2, 2))\n scale = array_ops.ones((2,))\n reserve_space_1 = array_ops.ones((2,))\n reserve_space_2 = array_ops.ones((3,))\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n 'reserve_space_2 must have the same number of elements'):\n gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,\n reserve_space_1, reserve_space_2)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.ops.gradient_checker.compute_gradient", "tensorflow.python.ops.math_ops.rsqrt", "numpy.random.random_sample", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.framework.test_util.disable_xla", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.nn_impl.fused_batch_norm", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.ops.gradient_checker.compute_gradient_error", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.nn_grad._BatchNormGrad", "numpy.maximum", "numpy.random.seed", "tensorflow.python.ops.gen_nn_ops.fused_batch_norm_grad_v2", "tensorflow.python.ops.nn_ops.relu", "tensorflow.python.ops.math_ops.maximum", "numpy.fabs", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "1.4", "2.7", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.6", "2.10" ] } ]
GloriaGo/sparkRAW
[ "c00ff36070d8a03e3df2452747e59b42b9e2df1e" ]
[ "python/pyspark/sql/dataframe.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nimport random\n\nif sys.version >= '3':\n basestring = unicode = str\n long = int\n from functools import reduce\nelse:\n from itertools import imap as map\n\nimport warnings\n\nfrom pyspark import copy_func, since\nfrom pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix\nfrom pyspark.serializers import ArrowSerializer, BatchedSerializer, PickleSerializer, \\\n UTF8Deserializer\nfrom pyspark.storagelevel import StorageLevel\nfrom pyspark.traceback_utils import SCCallSiteSync\nfrom pyspark.sql.types import _parse_datatype_json_string\nfrom pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column\nfrom pyspark.sql.readwriter import DataFrameWriter\nfrom pyspark.sql.streaming import DataStreamWriter\nfrom pyspark.sql.types import *\n\n__all__ = [\"DataFrame\", \"DataFrameNaFunctions\", \"DataFrameStatFunctions\"]\n\n\nclass DataFrame(object):\n \"\"\"A distributed collection of data grouped into named columns.\n\n A :class:`DataFrame` is equivalent to a relational table in Spark SQL,\n and can be created using various functions in :class:`SQLContext`::\n\n people = sqlContext.read.parquet(\"...\")\n\n Once created, it can be manipulated using the various domain-specific-language\n (DSL) functions defined in: :class:`DataFrame`, :class:`Column`.\n\n To select a column from the data frame, use the apply method::\n\n ageCol = people.age\n\n A more concrete example::\n\n # To create DataFrame using SQLContext\n people = sqlContext.read.parquet(\"...\")\n department = sqlContext.read.parquet(\"...\")\n\n people.filter(people.age > 30).join(department, people.deptId == department.id) \\\\\n .groupBy(department.name, \"gender\").agg({\"salary\": \"avg\", \"age\": \"max\"})\n\n .. versionadded:: 1.3\n \"\"\"\n\n def __init__(self, jdf, sql_ctx):\n self._jdf = jdf\n self.sql_ctx = sql_ctx\n self._sc = sql_ctx and sql_ctx._sc\n self.is_cached = False\n self._schema = None # initialized lazily\n self._lazy_rdd = None\n\n @property\n @since(1.3)\n def rdd(self):\n \"\"\"Returns the content as an :class:`pyspark.RDD` of :class:`Row`.\n \"\"\"\n if self._lazy_rdd is None:\n jrdd = self._jdf.javaToPython()\n self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))\n return self._lazy_rdd\n\n @property\n @since(\"1.3.1\")\n def na(self):\n \"\"\"Returns a :class:`DataFrameNaFunctions` for handling missing values.\n \"\"\"\n return DataFrameNaFunctions(self)\n\n @property\n @since(1.4)\n def stat(self):\n \"\"\"Returns a :class:`DataFrameStatFunctions` for statistic functions.\n \"\"\"\n return DataFrameStatFunctions(self)\n\n @ignore_unicode_prefix\n @since(1.3)\n def toJSON(self, use_unicode=True):\n \"\"\"Converts a :class:`DataFrame` into a :class:`RDD` of string.\n\n Each row is turned into a JSON document as one element in the returned RDD.\n\n >>> df.toJSON().first()\n u'{\"age\":2,\"name\":\"Alice\"}'\n \"\"\"\n rdd = self._jdf.toJSON()\n return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))\n\n @since(1.3)\n def registerTempTable(self, name):\n \"\"\"Registers this RDD as a temporary table using the given name.\n\n The lifetime of this temporary table is tied to the :class:`SQLContext`\n that was used to create this :class:`DataFrame`.\n\n >>> df.registerTempTable(\"people\")\n >>> df2 = spark.sql(\"select * from people\")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n >>> spark.catalog.dropTempView(\"people\")\n\n .. note:: Deprecated in 2.0, use createOrReplaceTempView instead.\n \"\"\"\n self._jdf.createOrReplaceTempView(name)\n\n @since(2.0)\n def createTempView(self, name):\n \"\"\"Creates a local temporary view with this DataFrame.\n\n The lifetime of this temporary table is tied to the :class:`SparkSession`\n that was used to create this :class:`DataFrame`.\n throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the\n catalog.\n\n >>> df.createTempView(\"people\")\n >>> df2 = spark.sql(\"select * from people\")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n >>> df.createTempView(\"people\") # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n AnalysisException: u\"Temporary table 'people' already exists;\"\n >>> spark.catalog.dropTempView(\"people\")\n\n \"\"\"\n self._jdf.createTempView(name)\n\n @since(2.0)\n def createOrReplaceTempView(self, name):\n \"\"\"Creates or replaces a local temporary view with this DataFrame.\n\n The lifetime of this temporary table is tied to the :class:`SparkSession`\n that was used to create this :class:`DataFrame`.\n\n >>> df.createOrReplaceTempView(\"people\")\n >>> df2 = df.filter(df.age > 3)\n >>> df2.createOrReplaceTempView(\"people\")\n >>> df3 = spark.sql(\"select * from people\")\n >>> sorted(df3.collect()) == sorted(df2.collect())\n True\n >>> spark.catalog.dropTempView(\"people\")\n\n \"\"\"\n self._jdf.createOrReplaceTempView(name)\n\n @since(2.1)\n def createGlobalTempView(self, name):\n \"\"\"Creates a global temporary view with this DataFrame.\n\n The lifetime of this temporary view is tied to this Spark application.\n throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the\n catalog.\n\n >>> df.createGlobalTempView(\"people\")\n >>> df2 = spark.sql(\"select * from global_temp.people\")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n >>> df.createGlobalTempView(\"people\") # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n AnalysisException: u\"Temporary table 'people' already exists;\"\n >>> spark.catalog.dropGlobalTempView(\"people\")\n\n \"\"\"\n self._jdf.createGlobalTempView(name)\n\n @since(2.2)\n def createOrReplaceGlobalTempView(self, name):\n \"\"\"Creates or replaces a global temporary view using the given name.\n\n The lifetime of this temporary view is tied to this Spark application.\n\n >>> df.createOrReplaceGlobalTempView(\"people\")\n >>> df2 = df.filter(df.age > 3)\n >>> df2.createOrReplaceGlobalTempView(\"people\")\n >>> df3 = spark.sql(\"select * from global_temp.people\")\n >>> sorted(df3.collect()) == sorted(df2.collect())\n True\n >>> spark.catalog.dropGlobalTempView(\"people\")\n\n \"\"\"\n self._jdf.createOrReplaceGlobalTempView(name)\n\n @property\n @since(1.4)\n def write(self):\n \"\"\"\n Interface for saving the content of the non-streaming :class:`DataFrame` out into external\n storage.\n\n :return: :class:`DataFrameWriter`\n \"\"\"\n return DataFrameWriter(self)\n\n @property\n @since(2.0)\n def writeStream(self):\n \"\"\"\n Interface for saving the content of the streaming :class:`DataFrame` out into external\n storage.\n\n .. note:: Evolving.\n\n :return: :class:`DataStreamWriter`\n \"\"\"\n return DataStreamWriter(self)\n\n @property\n @since(1.3)\n def schema(self):\n \"\"\"Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.\n\n >>> df.schema\n StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))\n \"\"\"\n if self._schema is None:\n try:\n self._schema = _parse_datatype_json_string(self._jdf.schema().json())\n except AttributeError as e:\n raise Exception(\n \"Unable to parse datatype from schema. %s\" % e)\n return self._schema\n\n @since(1.3)\n def printSchema(self):\n \"\"\"Prints out the schema in the tree format.\n\n >>> df.printSchema()\n root\n |-- age: integer (nullable = true)\n |-- name: string (nullable = true)\n <BLANKLINE>\n \"\"\"\n print(self._jdf.schema().treeString())\n\n @since(1.3)\n def explain(self, extended=False):\n \"\"\"Prints the (logical and physical) plans to the console for debugging purpose.\n\n :param extended: boolean, default ``False``. If ``False``, prints only the physical plan.\n\n >>> df.explain()\n == Physical Plan ==\n Scan ExistingRDD[age#0,name#1]\n\n >>> df.explain(True)\n == Parsed Logical Plan ==\n ...\n == Analyzed Logical Plan ==\n ...\n == Optimized Logical Plan ==\n ...\n == Physical Plan ==\n ...\n \"\"\"\n if extended:\n print(self._jdf.queryExecution().toString())\n else:\n print(self._jdf.queryExecution().simpleString())\n\n @since(1.3)\n def isLocal(self):\n \"\"\"Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally\n (without any Spark executors).\n \"\"\"\n return self._jdf.isLocal()\n\n @property\n @since(2.0)\n def isStreaming(self):\n \"\"\"Returns true if this :class:`Dataset` contains one or more sources that continuously\n return data as it arrives. A :class:`Dataset` that reads data from a streaming source\n must be executed as a :class:`StreamingQuery` using the :func:`start` method in\n :class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or\n :func:`collect`) will throw an :class:`AnalysisException` when there is a streaming\n source present.\n\n .. note:: Evolving\n \"\"\"\n return self._jdf.isStreaming()\n\n @since(1.3)\n def show(self, n=20, truncate=True, vertical=False):\n \"\"\"Prints the first ``n`` rows to the console.\n\n :param n: Number of rows to show.\n :param truncate: If set to True, truncate strings longer than 20 chars by default.\n If set to a number greater than one, truncates long strings to length ``truncate``\n and align cells right.\n :param vertical: If set to True, print output rows vertically (one line\n per column value).\n\n >>> df\n DataFrame[age: int, name: string]\n >>> df.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n >>> df.show(truncate=3)\n +---+----+\n |age|name|\n +---+----+\n | 2| Ali|\n | 5| Bob|\n +---+----+\n >>> df.show(vertical=True)\n -RECORD 0-----\n age | 2\n name | Alice\n -RECORD 1-----\n age | 5\n name | Bob\n \"\"\"\n if isinstance(truncate, bool) and truncate:\n print(self._jdf.showString(n, 20, vertical))\n else:\n print(self._jdf.showString(n, int(truncate), vertical))\n\n def __repr__(self):\n return \"DataFrame[%s]\" % (\", \".join(\"%s: %s\" % c for c in self.dtypes))\n\n @since(2.1)\n def checkpoint(self, eager=True):\n \"\"\"Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the\n logical plan of this DataFrame, which is especially useful in iterative algorithms where the\n plan may grow exponentially. It will be saved to files inside the checkpoint\n directory set with L{SparkContext.setCheckpointDir()}.\n\n :param eager: Whether to checkpoint this DataFrame immediately\n\n .. note:: Experimental\n \"\"\"\n jdf = self._jdf.checkpoint(eager)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(2.1)\n def withWatermark(self, eventTime, delayThreshold):\n \"\"\"Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point\n in time before which we assume no more late data is going to arrive.\n\n Spark will use this watermark for several purposes:\n - To know when a given time window aggregation can be finalized and thus can be emitted\n when using output modes that do not allow updates.\n\n - To minimize the amount of state that we need to keep for on-going aggregations.\n\n The current watermark is computed by looking at the `MAX(eventTime)` seen across\n all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost\n of coordinating this value across partitions, the actual watermark used is only guaranteed\n to be at least `delayThreshold` behind the actual event time. In some cases we may still\n process records that arrive more than `delayThreshold` late.\n\n :param eventTime: the name of the column that contains the event time of the row.\n :param delayThreshold: the minimum delay to wait to data to arrive late, relative to the\n latest record that has been processed in the form of an interval\n (e.g. \"1 minute\" or \"5 hours\").\n\n .. note:: Evolving\n\n >>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')\n DataFrame[name: string, time: timestamp]\n \"\"\"\n if not eventTime or type(eventTime) is not str:\n raise TypeError(\"eventTime should be provided as a string\")\n if not delayThreshold or type(delayThreshold) is not str:\n raise TypeError(\"delayThreshold should be provided as a string interval\")\n jdf = self._jdf.withWatermark(eventTime, delayThreshold)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(2.2)\n def hint(self, name, *parameters):\n \"\"\"Specifies some hint on the current DataFrame.\n\n :param name: A name of the hint.\n :param parameters: Optional parameters.\n :return: :class:`DataFrame`\n\n >>> df.join(df2.hint(\"broadcast\"), \"name\").show()\n +----+---+------+\n |name|age|height|\n +----+---+------+\n | Bob| 5| 85|\n +----+---+------+\n \"\"\"\n if len(parameters) == 1 and isinstance(parameters[0], list):\n parameters = parameters[0]\n\n if not isinstance(name, str):\n raise TypeError(\"name should be provided as str, got {0}\".format(type(name)))\n\n for p in parameters:\n if not isinstance(p, str):\n raise TypeError(\n \"all parameters should be str, got {0} of type {1}\".format(p, type(p)))\n\n jdf = self._jdf.hint(name, self._jseq(parameters))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.3)\n def count(self):\n \"\"\"Returns the number of rows in this :class:`DataFrame`.\n\n >>> df.count()\n 2\n \"\"\"\n return int(self._jdf.count())\n\n @ignore_unicode_prefix\n @since(1.3)\n def collect(self):\n \"\"\"Returns all the records as a list of :class:`Row`.\n\n >>> df.collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n \"\"\"\n with SCCallSiteSync(self._sc) as css:\n port = self._jdf.collectToPython()\n return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))\n\n @ignore_unicode_prefix\n @since(2.0)\n def toLocalIterator(self):\n \"\"\"\n Returns an iterator that contains all of the rows in this :class:`DataFrame`.\n The iterator will consume as much memory as the largest partition in this DataFrame.\n\n >>> list(df.toLocalIterator())\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n \"\"\"\n with SCCallSiteSync(self._sc) as css:\n port = self._jdf.toPythonIterator()\n return _load_from_socket(port, BatchedSerializer(PickleSerializer()))\n\n @ignore_unicode_prefix\n @since(1.3)\n def limit(self, num):\n \"\"\"Limits the result count to the number specified.\n\n >>> df.limit(1).collect()\n [Row(age=2, name=u'Alice')]\n >>> df.limit(0).collect()\n []\n \"\"\"\n jdf = self._jdf.limit(num)\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def take(self, num):\n \"\"\"Returns the first ``num`` rows as a :class:`list` of :class:`Row`.\n\n >>> df.take(2)\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n \"\"\"\n return self.limit(num).collect()\n\n @since(1.3)\n def foreach(self, f):\n \"\"\"Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.\n\n This is a shorthand for ``df.rdd.foreach()``.\n\n >>> def f(person):\n ... print(person.name)\n >>> df.foreach(f)\n \"\"\"\n self.rdd.foreach(f)\n\n @since(1.3)\n def foreachPartition(self, f):\n \"\"\"Applies the ``f`` function to each partition of this :class:`DataFrame`.\n\n This a shorthand for ``df.rdd.foreachPartition()``.\n\n >>> def f(people):\n ... for person in people:\n ... print(person.name)\n >>> df.foreachPartition(f)\n \"\"\"\n self.rdd.foreachPartition(f)\n\n @since(1.3)\n def cache(self):\n \"\"\"Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).\n\n .. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.\n \"\"\"\n self.is_cached = True\n self._jdf.cache()\n return self\n\n @since(1.3)\n def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):\n \"\"\"Sets the storage level to persist the contents of the :class:`DataFrame` across\n operations after the first time it is computed. This can only be used to assign\n a new storage level if the :class:`DataFrame` does not have a storage level set yet.\n If no storage level is specified defaults to (C{MEMORY_AND_DISK}).\n\n .. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.\n \"\"\"\n self.is_cached = True\n javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)\n self._jdf.persist(javaStorageLevel)\n return self\n\n @property\n @since(2.1)\n def storageLevel(self):\n \"\"\"Get the :class:`DataFrame`'s current storage level.\n\n >>> df.storageLevel\n StorageLevel(False, False, False, False, 1)\n >>> df.cache().storageLevel\n StorageLevel(True, True, False, True, 1)\n >>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel\n StorageLevel(True, False, False, False, 2)\n \"\"\"\n java_storage_level = self._jdf.storageLevel()\n storage_level = StorageLevel(java_storage_level.useDisk(),\n java_storage_level.useMemory(),\n java_storage_level.useOffHeap(),\n java_storage_level.deserialized(),\n java_storage_level.replication())\n return storage_level\n\n @since(1.3)\n def unpersist(self, blocking=False):\n \"\"\"Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from\n memory and disk.\n\n .. note:: `blocking` default has changed to False to match Scala in 2.0.\n \"\"\"\n self.is_cached = False\n self._jdf.unpersist(blocking)\n return self\n\n @since(1.4)\n def coalesce(self, numPartitions):\n \"\"\"\n Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.\n\n Similar to coalesce defined on an :class:`RDD`, this operation results in a\n narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,\n there will not be a shuffle, instead each of the 100 new partitions will\n claim 10 of the current partitions. If a larger number of partitions is requested,\n it will stay at the current number of partitions.\n\n However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,\n this may result in your computation taking place on fewer nodes than\n you like (e.g. one node in the case of numPartitions = 1). To avoid this,\n you can call repartition(). This will add a shuffle step, but means the\n current upstream partitions will be executed in parallel (per whatever\n the current partitioning is).\n\n >>> df.coalesce(1).rdd.getNumPartitions()\n 1\n \"\"\"\n return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)\n\n @since(1.3)\n def repartition(self, numPartitions, *cols):\n \"\"\"\n Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The\n resulting DataFrame is hash partitioned.\n\n ``numPartitions`` can be an int to specify the target number of partitions or a Column.\n If it is a Column, it will be used as the first partitioning column. If not specified,\n the default number of partitions is used.\n\n .. versionchanged:: 1.6\n Added optional arguments to specify the partitioning columns. Also made numPartitions\n optional if partitioning columns are specified.\n\n >>> df.repartition(10).rdd.getNumPartitions()\n 10\n >>> data = df.union(df).repartition(\"age\")\n >>> data.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 5| Bob|\n | 5| Bob|\n | 2|Alice|\n | 2|Alice|\n +---+-----+\n >>> data = data.repartition(7, \"age\")\n >>> data.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n >>> data.rdd.getNumPartitions()\n 7\n >>> data = data.repartition(\"name\", \"age\")\n >>> data.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 5| Bob|\n | 5| Bob|\n | 2|Alice|\n | 2|Alice|\n +---+-----+\n \"\"\"\n if isinstance(numPartitions, int):\n if len(cols) == 0:\n return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)\n else:\n return DataFrame(\n self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)\n elif isinstance(numPartitions, (basestring, Column)):\n cols = (numPartitions, ) + cols\n return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)\n else:\n raise TypeError(\"numPartitions should be an int or Column\")\n\n @since(1.3)\n def distinct(self):\n \"\"\"Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.\n\n >>> df.distinct().count()\n 2\n \"\"\"\n return DataFrame(self._jdf.distinct(), self.sql_ctx)\n\n @since(1.3)\n def sample(self, withReplacement=None, fraction=None, seed=None):\n \"\"\"Returns a sampled subset of this :class:`DataFrame`.\n\n :param withReplacement: Sample with replacement or not (default False).\n :param fraction: Fraction of rows to generate, range [0.0, 1.0].\n :param seed: Seed for sampling (default a random seed).\n\n .. note:: This is not guaranteed to provide exactly the fraction specified of the total\n count of the given :class:`DataFrame`.\n\n .. note:: `fraction` is required and, `withReplacement` and `seed` are optional.\n\n >>> df = spark.range(10)\n >>> df.sample(0.5, 3).count()\n 4\n >>> df.sample(fraction=0.5, seed=3).count()\n 4\n >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()\n 1\n >>> df.sample(1.0).count()\n 10\n >>> df.sample(fraction=1.0).count()\n 10\n >>> df.sample(False, fraction=1.0).count()\n 10\n \"\"\"\n\n # For the cases below:\n # sample(True, 0.5 [, seed])\n # sample(True, fraction=0.5 [, seed])\n # sample(withReplacement=False, fraction=0.5 [, seed])\n is_withReplacement_set = \\\n type(withReplacement) == bool and isinstance(fraction, float)\n\n # For the case below:\n # sample(faction=0.5 [, seed])\n is_withReplacement_omitted_kwargs = \\\n withReplacement is None and isinstance(fraction, float)\n\n # For the case below:\n # sample(0.5 [, seed])\n is_withReplacement_omitted_args = isinstance(withReplacement, float)\n\n if not (is_withReplacement_set\n or is_withReplacement_omitted_kwargs\n or is_withReplacement_omitted_args):\n argtypes = [\n str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]\n raise TypeError(\n \"withReplacement (optional), fraction (required) and seed (optional)\"\n \" should be a bool, float and number; however, \"\n \"got [%s].\" % \", \".join(argtypes))\n\n if is_withReplacement_omitted_args:\n if fraction is not None:\n seed = fraction\n fraction = withReplacement\n withReplacement = None\n\n seed = long(seed) if seed is not None else None\n args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]\n jdf = self._jdf.sample(*args)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.5)\n def sampleBy(self, col, fractions, seed=None):\n \"\"\"\n Returns a stratified sample without replacement based on the\n fraction given on each stratum.\n\n :param col: column that defines strata\n :param fractions:\n sampling fraction for each stratum. If a stratum is not\n specified, we treat its fraction as zero.\n :param seed: random seed\n :return: a new DataFrame that represents the stratified sample\n\n >>> from pyspark.sql.functions import col\n >>> dataset = sqlContext.range(0, 100).select((col(\"id\") % 3).alias(\"key\"))\n >>> sampled = dataset.sampleBy(\"key\", fractions={0: 0.1, 1: 0.2}, seed=0)\n >>> sampled.groupBy(\"key\").count().orderBy(\"key\").show()\n +---+-----+\n |key|count|\n +---+-----+\n | 0| 5|\n | 1| 9|\n +---+-----+\n\n \"\"\"\n if not isinstance(col, str):\n raise ValueError(\"col must be a string, but got %r\" % type(col))\n if not isinstance(fractions, dict):\n raise ValueError(\"fractions must be a dict but got %r\" % type(fractions))\n for k, v in fractions.items():\n if not isinstance(k, (float, int, long, basestring)):\n raise ValueError(\"key must be float, int, long, or string, but got %r\" % type(k))\n fractions[k] = float(v)\n seed = seed if seed is not None else random.randint(0, sys.maxsize)\n return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)\n\n @since(1.4)\n def randomSplit(self, weights, seed=None):\n \"\"\"Randomly splits this :class:`DataFrame` with the provided weights.\n\n :param weights: list of doubles as weights with which to split the DataFrame. Weights will\n be normalized if they don't sum up to 1.0.\n :param seed: The seed for sampling.\n\n >>> splits = df4.randomSplit([1.0, 2.0], 24)\n >>> splits[0].count()\n 1\n\n >>> splits[1].count()\n 3\n \"\"\"\n for w in weights:\n if w < 0.0:\n raise ValueError(\"Weights must be positive. Found weight value: %s\" % w)\n seed = seed if seed is not None else random.randint(0, sys.maxsize)\n rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))\n return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]\n\n @property\n @since(1.3)\n def dtypes(self):\n \"\"\"Returns all column names and their data types as a list.\n\n >>> df.dtypes\n [('age', 'int'), ('name', 'string')]\n \"\"\"\n return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]\n\n @property\n @since(1.3)\n def columns(self):\n \"\"\"Returns all column names as a list.\n\n >>> df.columns\n ['age', 'name']\n \"\"\"\n return [f.name for f in self.schema.fields]\n\n @ignore_unicode_prefix\n @since(1.3)\n def alias(self, alias):\n \"\"\"Returns a new :class:`DataFrame` with an alias set.\n\n >>> from pyspark.sql.functions import *\n >>> df_as1 = df.alias(\"df_as1\")\n >>> df_as2 = df.alias(\"df_as2\")\n >>> joined_df = df_as1.join(df_as2, col(\"df_as1.name\") == col(\"df_as2.name\"), 'inner')\n >>> joined_df.select(\"df_as1.name\", \"df_as2.name\", \"df_as2.age\").collect()\n [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]\n \"\"\"\n assert isinstance(alias, basestring), \"alias should be a string\"\n return DataFrame(getattr(self._jdf, \"as\")(alias), self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(2.1)\n def crossJoin(self, other):\n \"\"\"Returns the cartesian product with another :class:`DataFrame`.\n\n :param other: Right side of the cartesian product.\n\n >>> df.select(\"age\", \"name\").collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df2.select(\"name\", \"height\").collect()\n [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]\n >>> df.crossJoin(df2.select(\"height\")).select(\"age\", \"name\", \"height\").collect()\n [Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),\n Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]\n \"\"\"\n\n jdf = self._jdf.crossJoin(other._jdf)\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def join(self, other, on=None, how=None):\n \"\"\"Joins with another :class:`DataFrame`, using the given join expression.\n\n :param other: Right side of the join\n :param on: a string for the join column name, a list of column names,\n a join expression (Column), or a list of Columns.\n If `on` is a string or a list of strings indicating the name of the join column(s),\n the column(s) must exist on both sides, and this performs an equi-join.\n :param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,\n ``full``, ``full_outer``, ``left``, ``left_outer``, ``right``, ``right_outer``,\n ``left_semi``, and ``left_anti``.\n\n The following performs a full outer join between ``df1`` and ``df2``.\n\n >>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()\n [Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]\n\n >>> df.join(df2, 'name', 'outer').select('name', 'height').collect()\n [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]\n\n >>> cond = [df.name == df3.name, df.age == df3.age]\n >>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()\n [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]\n\n >>> df.join(df2, 'name').select(df.name, df2.height).collect()\n [Row(name=u'Bob', height=85)]\n\n >>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()\n [Row(name=u'Bob', age=5)]\n \"\"\"\n\n if on is not None and not isinstance(on, list):\n on = [on]\n\n if on is not None:\n if isinstance(on[0], basestring):\n on = self._jseq(on)\n else:\n assert isinstance(on[0], Column), \"on should be Column or list of Column\"\n on = reduce(lambda x, y: x.__and__(y), on)\n on = on._jc\n\n if on is None and how is None:\n jdf = self._jdf.join(other._jdf)\n else:\n if how is None:\n how = \"inner\"\n if on is None:\n on = self._jseq([])\n assert isinstance(how, basestring), \"how should be basestring\"\n jdf = self._jdf.join(other._jdf, on, how)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.6)\n def sortWithinPartitions(self, *cols, **kwargs):\n \"\"\"Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).\n\n :param cols: list of :class:`Column` or column names to sort by.\n :param ascending: boolean or list of boolean (default True).\n Sort ascending vs. descending. Specify list for multiple sort orders.\n If a list is specified, length of the list must equal length of the `cols`.\n\n >>> df.sortWithinPartitions(\"age\", ascending=False).show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n \"\"\"\n jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def sort(self, *cols, **kwargs):\n \"\"\"Returns a new :class:`DataFrame` sorted by the specified column(s).\n\n :param cols: list of :class:`Column` or column names to sort by.\n :param ascending: boolean or list of boolean (default True).\n Sort ascending vs. descending. Specify list for multiple sort orders.\n If a list is specified, length of the list must equal length of the `cols`.\n\n >>> df.sort(df.age.desc()).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> df.sort(\"age\", ascending=False).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> df.orderBy(df.age.desc()).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> from pyspark.sql.functions import *\n >>> df.sort(asc(\"age\")).collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df.orderBy(desc(\"age\"), \"name\").collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> df.orderBy([\"age\", \"name\"], ascending=[0, 1]).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n \"\"\"\n jdf = self._jdf.sort(self._sort_cols(cols, kwargs))\n return DataFrame(jdf, self.sql_ctx)\n\n orderBy = sort\n\n def _jseq(self, cols, converter=None):\n \"\"\"Return a JVM Seq of Columns from a list of Column or names\"\"\"\n return _to_seq(self.sql_ctx._sc, cols, converter)\n\n def _jmap(self, jm):\n \"\"\"Return a JVM Scala Map from a dict\"\"\"\n return _to_scala_map(self.sql_ctx._sc, jm)\n\n def _jcols(self, *cols):\n \"\"\"Return a JVM Seq of Columns from a list of Column or column names\n\n If `cols` has only one list in it, cols[0] will be used as the list.\n \"\"\"\n if len(cols) == 1 and isinstance(cols[0], list):\n cols = cols[0]\n return self._jseq(cols, _to_java_column)\n\n def _sort_cols(self, cols, kwargs):\n \"\"\" Return a JVM Seq of Columns that describes the sort order\n \"\"\"\n if not cols:\n raise ValueError(\"should sort by at least one column\")\n if len(cols) == 1 and isinstance(cols[0], list):\n cols = cols[0]\n jcols = [_to_java_column(c) for c in cols]\n ascending = kwargs.get('ascending', True)\n if isinstance(ascending, (bool, int)):\n if not ascending:\n jcols = [jc.desc() for jc in jcols]\n elif isinstance(ascending, list):\n jcols = [jc if asc else jc.desc()\n for asc, jc in zip(ascending, jcols)]\n else:\n raise TypeError(\"ascending can only be boolean or list, but got %s\" % type(ascending))\n return self._jseq(jcols)\n\n @since(\"1.3.1\")\n def describe(self, *cols):\n \"\"\"Computes basic statistics for numeric and string columns.\n\n This include count, mean, stddev, min, and max. If no columns are\n given, this function computes statistics for all numerical or string columns.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n guarantee about the backward compatibility of the schema of the resulting DataFrame.\n\n >>> df.describe(['age']).show()\n +-------+------------------+\n |summary| age|\n +-------+------------------+\n | count| 2|\n | mean| 3.5|\n | stddev|2.1213203435596424|\n | min| 2|\n | max| 5|\n +-------+------------------+\n >>> df.describe().show()\n +-------+------------------+-----+\n |summary| age| name|\n +-------+------------------+-----+\n | count| 2| 2|\n | mean| 3.5| null|\n | stddev|2.1213203435596424| null|\n | min| 2|Alice|\n | max| 5| Bob|\n +-------+------------------+-----+\n\n Use summary for expanded statistics and control over which statistics to compute.\n \"\"\"\n if len(cols) == 1 and isinstance(cols[0], list):\n cols = cols[0]\n jdf = self._jdf.describe(self._jseq(cols))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(\"2.3.0\")\n def summary(self, *statistics):\n \"\"\"Computes specified statistics for numeric and string columns. Available statistics are:\n - count\n - mean\n - stddev\n - min\n - max\n - arbitrary approximate percentiles specified as a percentage (eg, 75%)\n\n If no statistics are given, this function computes count, mean, stddev, min,\n approximate quartiles (percentiles at 25%, 50%, and 75%), and max.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n guarantee about the backward compatibility of the schema of the resulting DataFrame.\n\n >>> df.summary().show()\n +-------+------------------+-----+\n |summary| age| name|\n +-------+------------------+-----+\n | count| 2| 2|\n | mean| 3.5| null|\n | stddev|2.1213203435596424| null|\n | min| 2|Alice|\n | 25%| 5.0| null|\n | 50%| 5.0| null|\n | 75%| 5.0| null|\n | max| 5| Bob|\n +-------+------------------+-----+\n\n >>> df.summary(\"count\", \"min\", \"25%\", \"75%\", \"max\").show()\n +-------+---+-----+\n |summary|age| name|\n +-------+---+-----+\n | count| 2| 2|\n | min| 2|Alice|\n | 25%|5.0| null|\n | 75%|5.0| null|\n | max| 5| Bob|\n +-------+---+-----+\n\n To do a summary for specific columns first select them:\n\n >>> df.select(\"age\", \"name\").summary(\"count\").show()\n +-------+---+----+\n |summary|age|name|\n +-------+---+----+\n | count| 2| 2|\n +-------+---+----+\n\n See also describe for basic statistics.\n \"\"\"\n if len(statistics) == 1 and isinstance(statistics[0], list):\n statistics = statistics[0]\n jdf = self._jdf.summary(self._jseq(statistics))\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def head(self, n=None):\n \"\"\"Returns the first ``n`` rows.\n\n .. note:: This method should only be used if the resulting array is expected\n to be small, as all the data is loaded into the driver's memory.\n\n :param n: int, default 1. Number of rows to return.\n :return: If n is greater than 1, return a list of :class:`Row`.\n If n is 1, return a single Row.\n\n >>> df.head()\n Row(age=2, name=u'Alice')\n >>> df.head(1)\n [Row(age=2, name=u'Alice')]\n \"\"\"\n if n is None:\n rs = self.head(1)\n return rs[0] if rs else None\n return self.take(n)\n\n @ignore_unicode_prefix\n @since(1.3)\n def first(self):\n \"\"\"Returns the first row as a :class:`Row`.\n\n >>> df.first()\n Row(age=2, name=u'Alice')\n \"\"\"\n return self.head()\n\n @ignore_unicode_prefix\n @since(1.3)\n def __getitem__(self, item):\n \"\"\"Returns the column as a :class:`Column`.\n\n >>> df.select(df['age']).collect()\n [Row(age=2), Row(age=5)]\n >>> df[ [\"name\", \"age\"]].collect()\n [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]\n >>> df[ df.age > 3 ].collect()\n [Row(age=5, name=u'Bob')]\n >>> df[df[0] > 3].collect()\n [Row(age=5, name=u'Bob')]\n \"\"\"\n if isinstance(item, basestring):\n jc = self._jdf.apply(item)\n return Column(jc)\n elif isinstance(item, Column):\n return self.filter(item)\n elif isinstance(item, (list, tuple)):\n return self.select(*item)\n elif isinstance(item, int):\n jc = self._jdf.apply(self.columns[item])\n return Column(jc)\n else:\n raise TypeError(\"unexpected item type: %s\" % type(item))\n\n @since(1.3)\n def __getattr__(self, name):\n \"\"\"Returns the :class:`Column` denoted by ``name``.\n\n >>> df.select(df.age).collect()\n [Row(age=2), Row(age=5)]\n \"\"\"\n if name not in self.columns:\n raise AttributeError(\n \"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))\n jc = self._jdf.apply(name)\n return Column(jc)\n\n @ignore_unicode_prefix\n @since(1.3)\n def select(self, *cols):\n \"\"\"Projects a set of expressions and returns a new :class:`DataFrame`.\n\n :param cols: list of column names (string) or expressions (:class:`Column`).\n If one of the column names is '*', that column is expanded to include all columns\n in the current DataFrame.\n\n >>> df.select('*').collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df.select('name', 'age').collect()\n [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]\n >>> df.select(df.name, (df.age + 10).alias('age')).collect()\n [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]\n \"\"\"\n jdf = self._jdf.select(self._jcols(*cols))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.3)\n def selectExpr(self, *expr):\n \"\"\"Projects a set of SQL expressions and returns a new :class:`DataFrame`.\n\n This is a variant of :func:`select` that accepts SQL expressions.\n\n >>> df.selectExpr(\"age * 2\", \"abs(age)\").collect()\n [Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]\n \"\"\"\n if len(expr) == 1 and isinstance(expr[0], list):\n expr = expr[0]\n jdf = self._jdf.selectExpr(self._jseq(expr))\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def filter(self, condition):\n \"\"\"Filters rows using the given condition.\n\n :func:`where` is an alias for :func:`filter`.\n\n :param condition: a :class:`Column` of :class:`types.BooleanType`\n or a string of SQL expression.\n\n >>> df.filter(df.age > 3).collect()\n [Row(age=5, name=u'Bob')]\n >>> df.where(df.age == 2).collect()\n [Row(age=2, name=u'Alice')]\n\n >>> df.filter(\"age > 3\").collect()\n [Row(age=5, name=u'Bob')]\n >>> df.where(\"age = 2\").collect()\n [Row(age=2, name=u'Alice')]\n \"\"\"\n if isinstance(condition, basestring):\n jdf = self._jdf.filter(condition)\n elif isinstance(condition, Column):\n jdf = self._jdf.filter(condition._jc)\n else:\n raise TypeError(\"condition should be string or Column\")\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def groupBy(self, *cols):\n \"\"\"Groups the :class:`DataFrame` using the specified columns,\n so we can run aggregation on them. See :class:`GroupedData`\n for all the available aggregate functions.\n\n :func:`groupby` is an alias for :func:`groupBy`.\n\n :param cols: list of columns to group by.\n Each element should be a column name (string) or an expression (:class:`Column`).\n\n >>> df.groupBy().avg().collect()\n [Row(avg(age)=3.5)]\n >>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())\n [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]\n >>> sorted(df.groupBy(df.name).avg().collect())\n [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]\n >>> sorted(df.groupBy(['name', df.age]).count().collect())\n [Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]\n \"\"\"\n jgd = self._jdf.groupBy(self._jcols(*cols))\n from pyspark.sql.group import GroupedData\n return GroupedData(jgd, self.sql_ctx)\n\n @since(1.4)\n def rollup(self, *cols):\n \"\"\"\n Create a multi-dimensional rollup for the current :class:`DataFrame` using\n the specified columns, so we can run aggregation on them.\n\n >>> df.rollup(\"name\", df.age).count().orderBy(\"name\", \"age\").show()\n +-----+----+-----+\n | name| age|count|\n +-----+----+-----+\n | null|null| 2|\n |Alice|null| 1|\n |Alice| 2| 1|\n | Bob|null| 1|\n | Bob| 5| 1|\n +-----+----+-----+\n \"\"\"\n jgd = self._jdf.rollup(self._jcols(*cols))\n from pyspark.sql.group import GroupedData\n return GroupedData(jgd, self.sql_ctx)\n\n @since(1.4)\n def cube(self, *cols):\n \"\"\"\n Create a multi-dimensional cube for the current :class:`DataFrame` using\n the specified columns, so we can run aggregation on them.\n\n >>> df.cube(\"name\", df.age).count().orderBy(\"name\", \"age\").show()\n +-----+----+-----+\n | name| age|count|\n +-----+----+-----+\n | null|null| 2|\n | null| 2| 1|\n | null| 5| 1|\n |Alice|null| 1|\n |Alice| 2| 1|\n | Bob|null| 1|\n | Bob| 5| 1|\n +-----+----+-----+\n \"\"\"\n jgd = self._jdf.cube(self._jcols(*cols))\n from pyspark.sql.group import GroupedData\n return GroupedData(jgd, self.sql_ctx)\n\n @since(1.3)\n def agg(self, *exprs):\n \"\"\" Aggregate on the entire :class:`DataFrame` without groups\n (shorthand for ``df.groupBy.agg()``).\n\n >>> df.agg({\"age\": \"max\"}).collect()\n [Row(max(age)=5)]\n >>> from pyspark.sql import functions as F\n >>> df.agg(F.min(df.age)).collect()\n [Row(min(age)=2)]\n \"\"\"\n return self.groupBy().agg(*exprs)\n\n @since(2.0)\n def union(self, other):\n \"\"\" Return a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union\n (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n Also as standard in SQL, this function resolves columns by position (not by name).\n \"\"\"\n return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def unionAll(self, other):\n \"\"\" Return a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union\n (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n Also as standard in SQL, this function resolves columns by position (not by name).\n\n .. note:: Deprecated in 2.0, use :func:`union` instead.\n \"\"\"\n return self.union(other)\n\n @since(2.3)\n def unionByName(self, other):\n \"\"\" Returns a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set\n union (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n The difference between this function and :func:`union` is that this function\n resolves columns by name (not by position):\n\n >>> df1 = spark.createDataFrame([[1, 2, 3]], [\"col0\", \"col1\", \"col2\"])\n >>> df2 = spark.createDataFrame([[4, 5, 6]], [\"col1\", \"col2\", \"col0\"])\n >>> df1.unionByName(df2).show()\n +----+----+----+\n |col0|col1|col2|\n +----+----+----+\n | 1| 2| 3|\n | 6| 4| 5|\n +----+----+----+\n \"\"\"\n return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def intersect(self, other):\n \"\"\" Return a new :class:`DataFrame` containing rows only in\n both this frame and another frame.\n\n This is equivalent to `INTERSECT` in SQL.\n \"\"\"\n return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def subtract(self, other):\n \"\"\" Return a new :class:`DataFrame` containing rows in this frame\n but not in another frame.\n\n This is equivalent to `EXCEPT` in SQL.\n \"\"\"\n return DataFrame(getattr(self._jdf, \"except\")(other._jdf), self.sql_ctx)\n\n @since(1.4)\n def dropDuplicates(self, subset=None):\n \"\"\"Return a new :class:`DataFrame` with duplicate rows removed,\n optionally only considering certain columns.\n\n For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming\n :class:`DataFrame`, it will keep all data across triggers as intermediate state to drop\n duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can\n be and system will accordingly limit the state. In addition, too late data older than\n watermark will be dropped to avoid any possibility of duplicates.\n\n :func:`drop_duplicates` is an alias for :func:`dropDuplicates`.\n\n >>> from pyspark.sql import Row\n >>> df = sc.parallelize([ \\\\\n ... Row(name='Alice', age=5, height=80), \\\\\n ... Row(name='Alice', age=5, height=80), \\\\\n ... Row(name='Alice', age=10, height=80)]).toDF()\n >>> df.dropDuplicates().show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 5| 80|Alice|\n | 10| 80|Alice|\n +---+------+-----+\n\n >>> df.dropDuplicates(['name', 'height']).show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 5| 80|Alice|\n +---+------+-----+\n \"\"\"\n if subset is None:\n jdf = self._jdf.dropDuplicates()\n else:\n jdf = self._jdf.dropDuplicates(self._jseq(subset))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(\"1.3.1\")\n def dropna(self, how='any', thresh=None, subset=None):\n \"\"\"Returns a new :class:`DataFrame` omitting rows with null values.\n :func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.\n\n :param how: 'any' or 'all'.\n If 'any', drop a row if it contains any nulls.\n If 'all', drop a row only if all its values are null.\n :param thresh: int, default None\n If specified, drop rows that have less than `thresh` non-null values.\n This overwrites the `how` parameter.\n :param subset: optional list of column names to consider.\n\n >>> df4.na.drop().show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 10| 80|Alice|\n +---+------+-----+\n \"\"\"\n if how is not None and how not in ['any', 'all']:\n raise ValueError(\"how ('\" + how + \"') should be 'any' or 'all'\")\n\n if subset is None:\n subset = self.columns\n elif isinstance(subset, basestring):\n subset = [subset]\n elif not isinstance(subset, (list, tuple)):\n raise ValueError(\"subset should be a list or tuple of column names\")\n\n if thresh is None:\n thresh = len(subset) if how == 'any' else 1\n\n return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)\n\n @since(\"1.3.1\")\n def fillna(self, value, subset=None):\n \"\"\"Replace null values, alias for ``na.fill()``.\n :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.\n\n :param value: int, long, float, string, bool or dict.\n Value to replace null values with.\n If the value is a dict, then `subset` is ignored and `value` must be a mapping\n from column name (string) to replacement value. The replacement value must be\n an int, long, float, boolean, or string.\n :param subset: optional list of column names to consider.\n Columns specified in subset that do not have matching data type are ignored.\n For example, if `value` is a string, and subset contains a non-string column,\n then the non-string column is simply ignored.\n\n >>> df4.na.fill(50).show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 10| 80|Alice|\n | 5| 50| Bob|\n | 50| 50| Tom|\n | 50| 50| null|\n +---+------+-----+\n\n >>> df5.na.fill(False).show()\n +----+-------+-----+\n | age| name| spy|\n +----+-------+-----+\n | 10| Alice|false|\n | 5| Bob|false|\n |null|Mallory| true|\n +----+-------+-----+\n\n >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()\n +---+------+-------+\n |age|height| name|\n +---+------+-------+\n | 10| 80| Alice|\n | 5| null| Bob|\n | 50| null| Tom|\n | 50| null|unknown|\n +---+------+-------+\n \"\"\"\n if not isinstance(value, (float, int, long, basestring, bool, dict)):\n raise ValueError(\"value should be a float, int, long, string, bool or dict\")\n\n # Note that bool validates isinstance(int), but we don't want to\n # convert bools to floats\n\n if not isinstance(value, bool) and isinstance(value, (int, long)):\n value = float(value)\n\n if isinstance(value, dict):\n return DataFrame(self._jdf.na().fill(value), self.sql_ctx)\n elif subset is None:\n return DataFrame(self._jdf.na().fill(value), self.sql_ctx)\n else:\n if isinstance(subset, basestring):\n subset = [subset]\n elif not isinstance(subset, (list, tuple)):\n raise ValueError(\"subset should be a list or tuple of column names\")\n\n return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)\n\n @since(1.4)\n def replace(self, to_replace, value=None, subset=None):\n \"\"\"Returns a new :class:`DataFrame` replacing a value with another value.\n :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are\n aliases of each other.\n Values to_replace and value must have the same type and can only be numerics, booleans,\n or strings. Value can have None. When replacing, the new value will be cast\n to the type of the existing column.\n For numeric replacements all values to be replaced should have unique\n floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)\n and arbitrary replacement will be used.\n\n :param to_replace: bool, int, long, float, string, list or dict.\n Value to be replaced.\n If the value is a dict, then `value` is ignored and `to_replace` must be a\n mapping between a value and a replacement.\n :param value: bool, int, long, float, string, list or None.\n The replacement value must be a bool, int, long, float, string or None. If `value` is a\n list, `value` should be of the same length and type as `to_replace`.\n If `value` is a scalar and `to_replace` is a sequence, then `value` is\n used as a replacement for each item in `to_replace`.\n :param subset: optional list of column names to consider.\n Columns specified in subset that do not have matching data type are ignored.\n For example, if `value` is a string, and subset contains a non-string column,\n then the non-string column is simply ignored.\n\n >>> df4.na.replace(10, 20).show()\n +----+------+-----+\n | age|height| name|\n +----+------+-----+\n | 20| 80|Alice|\n | 5| null| Bob|\n |null| null| Tom|\n |null| null| null|\n +----+------+-----+\n\n >>> df4.na.replace('Alice', None).show()\n +----+------+----+\n | age|height|name|\n +----+------+----+\n | 10| 80|null|\n | 5| null| Bob|\n |null| null| Tom|\n |null| null|null|\n +----+------+----+\n\n >>> df4.na.replace('Alice').show()\n +----+------+----+\n | age|height|name|\n +----+------+----+\n | 10| 80|null|\n | 5| null| Bob|\n |null| null| Tom|\n |null| null|null|\n +----+------+----+\n\n >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()\n +----+------+----+\n | age|height|name|\n +----+------+----+\n | 10| 80| A|\n | 5| null| B|\n |null| null| Tom|\n |null| null|null|\n +----+------+----+\n \"\"\"\n # Helper functions\n def all_of(types):\n \"\"\"Given a type or tuple of types and a sequence of xs\n check if each x is instance of type(s)\n\n >>> all_of(bool)([True, False])\n True\n >>> all_of(basestring)([\"a\", 1])\n False\n \"\"\"\n def all_of_(xs):\n return all(isinstance(x, types) for x in xs)\n return all_of_\n\n all_of_bool = all_of(bool)\n all_of_str = all_of(basestring)\n all_of_numeric = all_of((float, int, long))\n\n # Validate input types\n valid_types = (bool, float, int, long, basestring, list, tuple)\n if not isinstance(to_replace, valid_types + (dict, )):\n raise ValueError(\n \"to_replace should be a bool, float, int, long, string, list, tuple, or dict. \"\n \"Got {0}\".format(type(to_replace)))\n\n if not isinstance(value, valid_types) and value is not None \\\n and not isinstance(to_replace, dict):\n raise ValueError(\"If to_replace is not a dict, value should be \"\n \"a bool, float, int, long, string, list, tuple or None. \"\n \"Got {0}\".format(type(value)))\n\n if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):\n if len(to_replace) != len(value):\n raise ValueError(\"to_replace and value lists should be of the same length. \"\n \"Got {0} and {1}\".format(len(to_replace), len(value)))\n\n if not (subset is None or isinstance(subset, (list, tuple, basestring))):\n raise ValueError(\"subset should be a list or tuple of column names, \"\n \"column name or None. Got {0}\".format(type(subset)))\n\n # Reshape input arguments if necessary\n if isinstance(to_replace, (float, int, long, basestring)):\n to_replace = [to_replace]\n\n if isinstance(to_replace, dict):\n rep_dict = to_replace\n if value is not None:\n warnings.warn(\"to_replace is a dict and value is not None. value will be ignored.\")\n else:\n if isinstance(value, (float, int, long, basestring)) or value is None:\n value = [value for _ in range(len(to_replace))]\n rep_dict = dict(zip(to_replace, value))\n\n if isinstance(subset, basestring):\n subset = [subset]\n\n # Verify we were not passed in mixed type generics.\n if not any(all_of_type(rep_dict.keys())\n and all_of_type(x for x in rep_dict.values() if x is not None)\n for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):\n raise ValueError(\"Mixed type replacements are not supported\")\n\n if subset is None:\n return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)\n else:\n return DataFrame(\n self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)\n\n @since(2.0)\n def approxQuantile(self, col, probabilities, relativeError):\n \"\"\"\n Calculates the approximate quantiles of numerical columns of a\n DataFrame.\n\n The result of this algorithm has the following deterministic bound:\n If the DataFrame has N elements and if we request the quantile at\n probability `p` up to error `err`, then the algorithm will return\n a sample `x` from the DataFrame so that the *exact* rank of `x` is\n close to (p * N). More precisely,\n\n floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).\n\n This method implements a variation of the Greenwald-Khanna\n algorithm (with some speed optimizations). The algorithm was first\n present in [[http://dx.doi.org/10.1145/375663.375670\n Space-efficient Online Computation of Quantile Summaries]]\n by Greenwald and Khanna.\n\n Note that null values will be ignored in numerical columns before calculation.\n For columns only containing null values, an empty list is returned.\n\n :param col: str, list.\n Can be a single column name, or a list of names for multiple columns.\n :param probabilities: a list of quantile probabilities\n Each number must belong to [0, 1].\n For example 0 is the minimum, 0.5 is the median, 1 is the maximum.\n :param relativeError: The relative target precision to achieve\n (>= 0). If set to zero, the exact quantiles are computed, which\n could be very expensive. Note that values greater than 1 are\n accepted but give the same result as 1.\n :return: the approximate quantiles at the given probabilities. If\n the input `col` is a string, the output is a list of floats. If the\n input `col` is a list or tuple of strings, the output is also a\n list, but each element in it is a list of floats, i.e., the output\n is a list of list of floats.\n\n .. versionchanged:: 2.2\n Added support for multiple columns.\n \"\"\"\n\n if not isinstance(col, (str, list, tuple)):\n raise ValueError(\"col should be a string, list or tuple, but got %r\" % type(col))\n\n isStr = isinstance(col, str)\n\n if isinstance(col, tuple):\n col = list(col)\n elif isinstance(col, str):\n col = [col]\n\n for c in col:\n if not isinstance(c, str):\n raise ValueError(\"columns should be strings, but got %r\" % type(c))\n col = _to_list(self._sc, col)\n\n if not isinstance(probabilities, (list, tuple)):\n raise ValueError(\"probabilities should be a list or tuple\")\n if isinstance(probabilities, tuple):\n probabilities = list(probabilities)\n for p in probabilities:\n if not isinstance(p, (float, int, long)) or p < 0 or p > 1:\n raise ValueError(\"probabilities should be numerical (float, int, long) in [0,1].\")\n probabilities = _to_list(self._sc, probabilities)\n\n if not isinstance(relativeError, (float, int, long)) or relativeError < 0:\n raise ValueError(\"relativeError should be numerical (float, int, long) >= 0.\")\n relativeError = float(relativeError)\n\n jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)\n jaq_list = [list(j) for j in jaq]\n return jaq_list[0] if isStr else jaq_list\n\n @since(1.4)\n def corr(self, col1, col2, method=None):\n \"\"\"\n Calculates the correlation of two columns of a DataFrame as a double value.\n Currently only supports the Pearson Correlation Coefficient.\n :func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.\n\n :param col1: The name of the first column\n :param col2: The name of the second column\n :param method: The correlation method. Currently only supports \"pearson\"\n \"\"\"\n if not isinstance(col1, str):\n raise ValueError(\"col1 should be a string.\")\n if not isinstance(col2, str):\n raise ValueError(\"col2 should be a string.\")\n if not method:\n method = \"pearson\"\n if not method == \"pearson\":\n raise ValueError(\"Currently only the calculation of the Pearson Correlation \" +\n \"coefficient is supported.\")\n return self._jdf.stat().corr(col1, col2, method)\n\n @since(1.4)\n def cov(self, col1, col2):\n \"\"\"\n Calculate the sample covariance for the given columns, specified by their names, as a\n double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.\n\n :param col1: The name of the first column\n :param col2: The name of the second column\n \"\"\"\n if not isinstance(col1, str):\n raise ValueError(\"col1 should be a string.\")\n if not isinstance(col2, str):\n raise ValueError(\"col2 should be a string.\")\n return self._jdf.stat().cov(col1, col2)\n\n @since(1.4)\n def crosstab(self, col1, col2):\n \"\"\"\n Computes a pair-wise frequency table of the given columns. Also known as a contingency\n table. The number of distinct values for each column should be less than 1e4. At most 1e6\n non-zero pair frequencies will be returned.\n The first column of each row will be the distinct values of `col1` and the column names\n will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.\n Pairs that have no occurrences will have zero as their counts.\n :func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.\n\n :param col1: The name of the first column. Distinct items will make the first item of\n each row.\n :param col2: The name of the second column. Distinct items will make the column names\n of the DataFrame.\n \"\"\"\n if not isinstance(col1, str):\n raise ValueError(\"col1 should be a string.\")\n if not isinstance(col2, str):\n raise ValueError(\"col2 should be a string.\")\n return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)\n\n @since(1.4)\n def freqItems(self, cols, support=None):\n \"\"\"\n Finding frequent items for columns, possibly with false positives. Using the\n frequent element count algorithm described in\n \"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou\".\n :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n guarantee about the backward compatibility of the schema of the resulting DataFrame.\n\n :param cols: Names of the columns to calculate frequent items for as a list or tuple of\n strings.\n :param support: The frequency with which to consider an item 'frequent'. Default is 1%.\n The support must be greater than 1e-4.\n \"\"\"\n if isinstance(cols, tuple):\n cols = list(cols)\n if not isinstance(cols, list):\n raise ValueError(\"cols must be a list or tuple of column names as strings.\")\n if not support:\n support = 0.01\n return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def withColumn(self, colName, col):\n \"\"\"\n Returns a new :class:`DataFrame` by adding a column or replacing the\n existing column that has the same name.\n\n :param colName: string, name of the new column.\n :param col: a :class:`Column` expression for the new column.\n\n >>> df.withColumn('age2', df.age + 2).collect()\n [Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]\n \"\"\"\n assert isinstance(col, Column), \"col should be Column\"\n return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def withColumnRenamed(self, existing, new):\n \"\"\"Returns a new :class:`DataFrame` by renaming an existing column.\n This is a no-op if schema doesn't contain the given column name.\n\n :param existing: string, name of the existing column to rename.\n :param col: string, new name of the column.\n\n >>> df.withColumnRenamed('age', 'age2').collect()\n [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]\n \"\"\"\n return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)\n\n @since(1.4)\n @ignore_unicode_prefix\n def drop(self, *cols):\n \"\"\"Returns a new :class:`DataFrame` that drops the specified column.\n This is a no-op if schema doesn't contain the given column name(s).\n\n :param cols: a string name of the column to drop, or a\n :class:`Column` to drop, or a list of string name of the columns to drop.\n\n >>> df.drop('age').collect()\n [Row(name=u'Alice'), Row(name=u'Bob')]\n\n >>> df.drop(df.age).collect()\n [Row(name=u'Alice'), Row(name=u'Bob')]\n\n >>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()\n [Row(age=5, height=85, name=u'Bob')]\n\n >>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()\n [Row(age=5, name=u'Bob', height=85)]\n\n >>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()\n [Row(name=u'Bob')]\n \"\"\"\n if len(cols) == 1:\n col = cols[0]\n if isinstance(col, basestring):\n jdf = self._jdf.drop(col)\n elif isinstance(col, Column):\n jdf = self._jdf.drop(col._jc)\n else:\n raise TypeError(\"col should be a string or a Column\")\n else:\n for col in cols:\n if not isinstance(col, basestring):\n raise TypeError(\"each col in the param list should be a string\")\n jdf = self._jdf.drop(self._jseq(cols))\n\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n def toDF(self, *cols):\n \"\"\"Returns a new class:`DataFrame` that with new specified column names\n\n :param cols: list of new column names (string)\n\n >>> df.toDF('f1', 'f2').collect()\n [Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]\n \"\"\"\n jdf = self._jdf.toDF(self._jseq(cols))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.3)\n def toPandas(self):\n \"\"\"\n Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.\n\n This is only available if Pandas is installed and available.\n\n .. note:: This method should only be used if the resulting Pandas's DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n >>> df.toPandas() # doctest: +SKIP\n age name\n 0 2 Alice\n 1 5 Bob\n \"\"\"\n import pandas as pd\n if self.sql_ctx.getConf(\"spark.sql.execution.arrow.enable\", \"false\").lower() == \"true\":\n try:\n import pyarrow\n tables = self._collectAsArrow()\n if tables:\n table = pyarrow.concat_tables(tables)\n return table.to_pandas()\n else:\n return pd.DataFrame.from_records([], columns=self.columns)\n except ImportError as e:\n msg = \"note: pyarrow must be installed and available on calling Python process \" \\\n \"if using spark.sql.execution.arrow.enable=true\"\n raise ImportError(\"%s\\n%s\" % (e.message, msg))\n else:\n dtype = {}\n for field in self.schema:\n pandas_type = _to_corrected_pandas_type(field.dataType)\n if pandas_type is not None:\n dtype[field.name] = pandas_type\n\n pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)\n\n for f, t in dtype.items():\n pdf[f] = pdf[f].astype(t, copy=False)\n return pdf\n\n def _collectAsArrow(self):\n \"\"\"\n Returns all records as list of deserialized ArrowPayloads, pyarrow must be installed\n and available.\n\n .. note:: Experimental.\n \"\"\"\n with SCCallSiteSync(self._sc) as css:\n port = self._jdf.collectAsArrowToPython()\n return list(_load_from_socket(port, ArrowSerializer()))\n\n ##########################################################################################\n # Pandas compatibility\n ##########################################################################################\n\n groupby = copy_func(\n groupBy,\n sinceversion=1.4,\n doc=\":func:`groupby` is an alias for :func:`groupBy`.\")\n\n drop_duplicates = copy_func(\n dropDuplicates,\n sinceversion=1.4,\n doc=\":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.\")\n\n where = copy_func(\n filter,\n sinceversion=1.3,\n doc=\":func:`where` is an alias for :func:`filter`.\")\n\n\ndef _to_scala_map(sc, jm):\n \"\"\"\n Convert a dict into a JVM Map.\n \"\"\"\n return sc._jvm.PythonUtils.toScalaMap(jm)\n\n\ndef _to_corrected_pandas_type(dt):\n \"\"\"\n When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.\n This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.\n \"\"\"\n import numpy as np\n if type(dt) == ByteType:\n return np.int8\n elif type(dt) == ShortType:\n return np.int16\n elif type(dt) == IntegerType:\n return np.int32\n elif type(dt) == FloatType:\n return np.float32\n else:\n return None\n\n\nclass DataFrameNaFunctions(object):\n \"\"\"Functionality for working with missing data in :class:`DataFrame`.\n\n .. versionadded:: 1.4\n \"\"\"\n\n def __init__(self, df):\n self.df = df\n\n def drop(self, how='any', thresh=None, subset=None):\n return self.df.dropna(how=how, thresh=thresh, subset=subset)\n\n drop.__doc__ = DataFrame.dropna.__doc__\n\n def fill(self, value, subset=None):\n return self.df.fillna(value=value, subset=subset)\n\n fill.__doc__ = DataFrame.fillna.__doc__\n\n def replace(self, to_replace, value=None, subset=None):\n return self.df.replace(to_replace, value, subset)\n\n replace.__doc__ = DataFrame.replace.__doc__\n\n\nclass DataFrameStatFunctions(object):\n \"\"\"Functionality for statistic functions with :class:`DataFrame`.\n\n .. versionadded:: 1.4\n \"\"\"\n\n def __init__(self, df):\n self.df = df\n\n def approxQuantile(self, col, probabilities, relativeError):\n return self.df.approxQuantile(col, probabilities, relativeError)\n\n approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__\n\n def corr(self, col1, col2, method=None):\n return self.df.corr(col1, col2, method)\n\n corr.__doc__ = DataFrame.corr.__doc__\n\n def cov(self, col1, col2):\n return self.df.cov(col1, col2)\n\n cov.__doc__ = DataFrame.cov.__doc__\n\n def crosstab(self, col1, col2):\n return self.df.crosstab(col1, col2)\n\n crosstab.__doc__ = DataFrame.crosstab.__doc__\n\n def freqItems(self, cols, support=None):\n return self.df.freqItems(cols, support)\n\n freqItems.__doc__ = DataFrame.freqItems.__doc__\n\n def sampleBy(self, col, fractions, seed=None):\n return self.df.sampleBy(col, fractions, seed)\n\n sampleBy.__doc__ = DataFrame.sampleBy.__doc__\n\n\ndef _test():\n import doctest\n from pyspark.context import SparkContext\n from pyspark.sql import Row, SQLContext, SparkSession\n import pyspark.sql.dataframe\n from pyspark.sql.functions import from_unixtime\n globs = pyspark.sql.dataframe.__dict__.copy()\n sc = SparkContext('local[4]', 'PythonTest')\n globs['sc'] = sc\n globs['sqlContext'] = SQLContext(sc)\n globs['spark'] = SparkSession(sc)\n globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\\\n .toDF(StructType([StructField('age', IntegerType()),\n StructField('name', StringType())]))\n globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()\n globs['df3'] = sc.parallelize([Row(name='Alice', age=2),\n Row(name='Bob', age=5)]).toDF()\n globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),\n Row(name='Bob', age=5, height=None),\n Row(name='Tom', age=None, height=None),\n Row(name=None, age=None, height=None)]).toDF()\n globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),\n Row(name='Bob', spy=None, age=5),\n Row(name='Mallory', spy=True, age=None)]).toDF()\n globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),\n Row(name='Bob', time=1479442946)]).toDF()\n\n (failure_count, test_count) = doctest.testmod(\n pyspark.sql.dataframe, globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)\n globs['sc'].stop()\n if failure_count:\n exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
audio-is-fun/chord-detection-algorithm
[ "8cf41ccd72a035131bc744b440b4dd12671c1fac" ]
[ "detect_chords_microphone_old.py" ]
[ "'''\nAutomatic chords detection algorithm (real time).\n\nImplementation of the algorithm described in Described in the Bachelor Thesis: \nDesign and Evaluation of a Simple Chord Detection Algorithm by Christoph Hausner.\n\nImplemented by Haldo Sponton ([email protected]).\n\nNote: this functionality requires pyaudio. \nI had to install it using `conda install pyaudio`. Pip installation failed.\n'''\n\n# imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport librosa\nimport librosa.display\nimport scipy\nimport warnings\nwarnings.filterwarnings('ignore')\nimport argparse\nimport os\nimport sys\n\nimport pyaudio\nimport struct\n\nimport queue\nfrom matplotlib.animation import FuncAnimation\nimport sounddevice as sd\n\nimport config\nimport utils\n\n# initial screen\nprint(config.SPLASH)\n\n# show audio devices \nprint(' ## Audio devices')\nprint(sd.query_devices())\nprint('\\n')\n\n# constants\nchunk = 2 * 1024 # samples per frame\nFORMAT = pyaudio.paInt16 # audio format (bytes per sample?)\nchannels = 1 # single channel for microphone\nfs = 44100 # samples per second\n\n# pyaudio class instance\np = pyaudio.PyAudio()\n\n# stream object to get data from microphone\nstream = p.open(\n format=FORMAT,\n channels=channels,\n rate=fs,\n input=True,\n output=True,\n frames_per_buffer=chunk\n)\n\nsamples = []\n\nwhile True:\n \n try:\n # binary data\n data = stream.read(chunk) \n # convert data to integers, make np array, then offset it by 127\n data_int = struct.unpack(str(2 * chunk) + 'B', data)\n # create np array and normalize\n data_np = np.array(data_int, dtype='b')[::2] / 128\n\n if len(samples) < config.NFFT:\n samples = np.concatenate([samples, data_np])\n else:\n # compute spectrogram\n f, t, sp = utils.compute_spectrogram(samples, fs)\n\n # harmonic content extraction\n sp_filtered = utils.harmonic_content_extraction(sp, f)\n\n # compute chromagram\n chromagram = utils.compute_chromagram(sp, f)\n\n # compute chordgram\n chordgram, detected_chords, detected_weights = utils.compute_chordgram(chromagram)\n print(' ## Detected chords: ' + ' '.join(detected_chords))\n print(' ## Detected weights: ' + ' '.join(['%.2f' % w for w in detected_weights]) + '\\n')\n\n # save chordgram image\n utils.save_chordgram_image(chordgram, t, detected_chords, 'test.png')\n\n samples = []\n\n except Exception as e:\n print(type(e).__name__ + ': ' + str(e))" ]
[ [ "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
THGLab/sidechainnet
[ "e6f4f6a30ad4e202db3afb28e4d958937cb57135" ]
[ "sidechainnet/utils/organize.py" ]
[ "\"\"\"Contains methods for organizing SidechainNet data into a Python dictionary.\"\"\"\n\nimport copy\nimport datetime\nimport os\nimport pickle\nimport re\n\nimport numpy as np\n\nfrom sidechainnet.utils.download import determine_pnid_type\n\nEMPTY_SPLIT_DICT = {\n \"seq\": [],\n \"ang\": [],\n \"ids\": [],\n \"evo\": [],\n \"msk\": [],\n \"crd\": [],\n \"sec\": [],\n \"res\": [],\n \"ums\": [],\n \"mod\": []\n}\n\n\ndef validate_data_dict(data):\n \"\"\"Performs several sanity checks on the data dict before saving.\"\"\"\n from sidechainnet.utils.download import VALID_SPLITS\n # Assert size of each data subset matches\n train_len = len(data[\"train\"][\"seq\"])\n test_len = len(data[\"test\"][\"seq\"])\n items_recorded = [\"seq\", \"ang\", \"ids\", \"crd\", \"msk\", \"evo\"]\n for num_items, subset in zip([train_len, test_len], [\"train\", \"test\"]):\n assert all([\n l == num_items for l in map(len, [data[subset][k] for k in items_recorded])\n ]), f\"{subset} lengths don't match.\"\n\n for vsplit in VALID_SPLITS:\n valid_len = len(data[vsplit][\"seq\"])\n assert all([\n l == valid_len\n for l in map(len, [data[vsplit][k] for k in [\"ang\", \"ids\", \"crd\"]])\n ]), \"Valid lengths don't match.\"\n\n\ndef create_empty_dictionary():\n \"\"\"Create an empty SidechainNet dictionary ready to hold SidechainNet data.\"\"\"\n from sidechainnet.utils.download import VALID_SPLITS\n\n data = {\n \"train\": copy.deepcopy(EMPTY_SPLIT_DICT),\n \"test\": copy.deepcopy(EMPTY_SPLIT_DICT),\n # To parse date, use datetime.datetime.strptime(date, \"%I:%M%p on %B %d, %Y\")\n \"date\": datetime.datetime.now().strftime(\"%I:%M%p %b %d, %Y\"),\n \"settings\": dict()\n }\n\n validation_subdict = {\n vsplit: copy.deepcopy(EMPTY_SPLIT_DICT) for vsplit in VALID_SPLITS\n }\n data.update(validation_subdict)\n\n return data\n\n\ndef get_proteinnetIDs_by_split(casp_version, thinning, custom_ids=None):\n \"\"\"Returns a dict of ProteinNet IDs organized by data split (train/test/valid).\"\"\"\n from sidechainnet.create import get_proteinnet_ids\n if custom_ids is not None:\n ids_datasplit = [(_id, determine_pnid_type(_id)) for _id in custom_ids]\n ids = {\"train\": [], \"valid\": [], \"test\": []}\n for _id, split in ids_datasplit:\n ids[split].append(_id)\n return ids\n\n if thinning == \"debug\":\n thinning = 100\n\n ids = {\n \"train\": get_proteinnet_ids(casp_version, \"train\", thinning=thinning),\n \"valid\": get_proteinnet_ids(casp_version, \"valid\"),\n \"test\": get_proteinnet_ids(casp_version, \"test\")\n }\n\n return ids\n\n\ndef organize_data(scnet_data,\n casp_version,\n thinning,\n is_debug=False,\n description=None,\n custom_ids=None):\n \"\"\"Given an unsorted Sidechainnet data dict, organizes into ProteinNet data splits.\n\n Args:\n scnet_data: A dictionary mapping ProteinNet ids (pnids) to data recorded by\n SidechainNet ('seq', 'ang', 'crd', 'evo', 'msk').\n casp_version: A string describing the CASP version of this dataset.\n thinning: An integer representing the training set thinning.\n is_debug: A bool. If True, sample 200 training set IDs.\n description: A string describing the dataset.\n custom_ids: (optional) A list of custom ProteinNet IDs to use for this dataset.\n\n Returns:\n A Python dictionary containing SidechainNet data, but this time, organized\n and divided into the data splits specified by ProteinNet.\n \"\"\"\n from sidechainnet.utils.download import DATA_SPLITS\n # First, we need to determine which pnids belong to which data split.\n ids = get_proteinnetIDs_by_split(casp_version, thinning, custom_ids)\n\n # Next, we create the empty dictionary for storing the data, organized by data splits\n organized_data = create_empty_dictionary()\n\n # Now, we organize the data by its data splits\n n_proteins = 0\n for split in [\"train\", \"test\", \"valid\"]:\n if split == \"train\" and is_debug:\n thinning = 0\n np.random.seed(0)\n split_ids = np.random.choice(ids[split], 200, replace=False)\n else:\n split_ids = ids[split]\n for pnid in split_ids:\n if pnid not in scnet_data:\n continue\n if 'primary' in scnet_data[pnid]:\n print(f\"{pnid} had 'primary' key.\")\n del scnet_data[pnid]\n continue\n realsplit = f\"valid-{pnid.split('#')[0]}\" if split == \"valid\" else split\n organized_data[realsplit]['seq'].append(scnet_data[pnid]['seq'])\n organized_data[realsplit]['ang'].append(scnet_data[pnid]['ang'])\n organized_data[realsplit]['crd'].append(scnet_data[pnid]['crd'])\n organized_data[realsplit]['msk'].append(scnet_data[pnid]['msk'])\n organized_data[realsplit]['evo'].append(scnet_data[pnid]['evo'])\n organized_data[realsplit]['sec'].append(scnet_data[pnid]['sec'])\n organized_data[realsplit]['res'].append(scnet_data[pnid]['res'])\n organized_data[realsplit]['ums'].append(scnet_data[pnid]['ums'])\n organized_data[realsplit]['mod'].append(scnet_data[pnid]['mod'])\n organized_data[realsplit]['ids'].append(pnid)\n n_proteins += 1\n\n # Sort each split of data by length, ascending\n for split in DATA_SPLITS:\n organized_data[split] = sort_datasplit(organized_data[split])\n\n # Add settings\n organized_data[\"description\"] = description\n organized_data[\"settings\"][\"casp_version\"] = int(\n casp_version) if (isinstance(casp_version, int) or casp_version.isnumeric()) else casp_version\n organized_data[\"settings\"][\"thinning\"] = int(\n thinning) if (isinstance(thinning, int) or thinning.isnumeric()) else thinning\n organized_data[\"settings\"][\"n_proteins\"] = n_proteins\n organized_data[\"settings\"][\"angle_means\"] = compute_angle_means(\n organized_data['train']['ang'])\n organized_data[\"settings\"][\"lengths\"] = np.sort(\n np.asarray(list(map(len, (v['seq'] for k, v in scnet_data.items())))))\n organized_data['settings']['max_length'] = organized_data[\"settings\"][\"lengths\"].max()\n\n print(f\"{n_proteins} included in CASP {casp_version} ({thinning}% thinning).\")\n\n validate_data_dict(organized_data)\n\n return organized_data\n\n\ndef get_validation_split_identifiers_from_pnid_list(pnids):\n \"\"\"Return a sorted list of validation set identifiers given a list of ProteinNet IDs.\n\n Args:\n pnids (list): List of ProteinNet-formated IDs (90#1A9U_1_A)\n\n Returns:\n List: List of validation set identifiers present in the list of pnids.\n\n Example:\n >>> pnids = ['40#1XHN_1_A', '10#2MEM_1_A', '90#3EOI_1_A']\n >>> get_validation_split_identifiers_from_pnid_list(pnids)\n [10, 40, 90]\n \"\"\"\n matches = (re.match(r\"(\\d+)#\\S+\", s) for s in pnids)\n matches = set((m.group(1) for m in filter(lambda s: s is not None, matches)))\n return sorted(map(int, matches))\n\n\ndef compute_angle_means(angle_list):\n \"\"\"Computes mean of angle matrices in a Python list ignoring all-zero rows.\"\"\"\n angles = np.concatenate(angle_list)\n angles = angles[~(angles == 0).all(axis=1)]\n return angles.mean(axis=0)\n\n\ndef save_data(data, path):\n \"\"\"Saves an organized SidechainNet data dict to a given, local filepath.\"\"\"\n with open(path, \"wb\") as f:\n return pickle.dump(data, f)\n\n\ndef load_data(path):\n \"\"\"Loads SidechainNet data dict from a given, local filepath.\"\"\"\n with open(path, \"rb\") as f:\n return pickle.load(f)\n\n\ndef sort_datasplit(split):\n \"\"\"Sorts a single split of the SidechainNet data dict by ascending length.\"\"\"\n sorted_len_indices = [\n a[0]\n for a in sorted(enumerate(split['seq']), key=lambda x: len(x[1]), reverse=False)\n ]\n\n for datatype in split.keys():\n split[datatype] = [split[datatype][i] for i in sorted_len_indices]\n\n return split\n" ]
[ [ "numpy.concatenate", "numpy.random.seed", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anandj91/tensorboard
[ "92d8971eecdd4a185b899cae5e34bdeeafe94c76" ]
[ "tensorboard/plugins/projector/projector_plugin.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Embedding Projector plugin.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport imghdr\nimport math\nimport os\nimport threading\n\nimport numpy as np\nfrom werkzeug import wrappers\n\nfrom google.protobuf import json_format\nfrom google.protobuf import text_format\n\nfrom tensorboard.backend.http_util import Respond\nfrom tensorboard.compat import tf\nfrom tensorboard.compat import _pywrap_tensorflow\nfrom tensorboard.plugins import base_plugin\nfrom tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig\nfrom tensorboard.util import tb_logging\n\nlogger = tb_logging.get_logger()\n\n# The prefix of routes provided by this plugin.\n_PLUGIN_PREFIX_ROUTE = 'projector'\n\n# FYI - the PROJECTOR_FILENAME is hardcoded in the visualize_embeddings\n# method in tf.contrib.tensorboard.plugins.projector module.\n# TODO(@dandelionmane): Fix duplication when we find a permanent home for the\n# projector module.\nPROJECTOR_FILENAME = 'projector_config.pbtxt'\n_PLUGIN_NAME = 'org_tensorflow_tensorboard_projector'\n_PLUGINS_DIR = 'plugins'\n\n# Number of tensors in the LRU cache.\n_TENSOR_CACHE_CAPACITY = 1\n\n# HTTP routes.\nCONFIG_ROUTE = '/info'\nTENSOR_ROUTE = '/tensor'\nMETADATA_ROUTE = '/metadata'\nRUNS_ROUTE = '/runs'\nBOOKMARKS_ROUTE = '/bookmarks'\nSPRITE_IMAGE_ROUTE = '/sprite_image'\n\n_IMGHDR_TO_MIMETYPE = {\n 'bmp': 'image/bmp',\n 'gif': 'image/gif',\n 'jpeg': 'image/jpeg',\n 'png': 'image/png'\n}\n_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'\n\n\nclass LRUCache(object):\n \"\"\"LRU cache. Used for storing the last used tensor.\"\"\"\n\n def __init__(self, size):\n if size < 1:\n raise ValueError('The cache size must be >=1')\n self._size = size\n self._dict = collections.OrderedDict()\n\n def get(self, key):\n try:\n value = self._dict.pop(key)\n self._dict[key] = value\n return value\n except KeyError:\n return None\n\n def set(self, key, value):\n if value is None:\n raise ValueError('value must be != None')\n try:\n self._dict.pop(key)\n except KeyError:\n if len(self._dict) >= self._size:\n self._dict.popitem(last=False)\n self._dict[key] = value\n\n\nclass EmbeddingMetadata(object):\n \"\"\"Metadata container for an embedding.\n\n The metadata holds different columns with values used for visualization\n (color by, label by) in the \"Embeddings\" tab in TensorBoard.\n \"\"\"\n\n def __init__(self, num_points):\n \"\"\"Constructs a metadata for an embedding of the specified size.\n\n Args:\n num_points: Number of points in the embedding.\n \"\"\"\n self.num_points = num_points\n self.column_names = []\n self.name_to_values = {}\n\n def add_column(self, column_name, column_values):\n \"\"\"Adds a named column of metadata values.\n\n Args:\n column_name: Name of the column.\n column_values: 1D array/list/iterable holding the column values. Must be\n of length `num_points`. The i-th value corresponds to the i-th point.\n\n Raises:\n ValueError: If `column_values` is not 1D array, or of length `num_points`,\n or the `name` is already used.\n \"\"\"\n # Sanity checks.\n if isinstance(column_values, list) and isinstance(column_values[0], list):\n raise ValueError('\"column_values\" must be a flat list, but we detected '\n 'that its first entry is a list')\n\n if isinstance(column_values, np.ndarray) and column_values.ndim != 1:\n raise ValueError('\"column_values\" should be of rank 1, '\n 'but is of rank %d' % column_values.ndim)\n if len(column_values) != self.num_points:\n raise ValueError('\"column_values\" should be of length %d, but is of '\n 'length %d' % (self.num_points, len(column_values)))\n if column_name in self.name_to_values:\n raise ValueError('The column name \"%s\" is already used' % column_name)\n\n self.column_names.append(column_name)\n self.name_to_values[column_name] = column_values\n\n\ndef _read_tensor_tsv_file(fpath):\n with tf.io.gfile.GFile(fpath, 'r') as f:\n tensor = []\n for line in f:\n line = line.rstrip('\\n')\n if line:\n tensor.append(list(map(float, line.split('\\t'))))\n return np.array(tensor, dtype='float32')\n\n\ndef _assets_dir_to_logdir(assets_dir):\n sub_path = os.path.sep + _PLUGINS_DIR + os.path.sep\n if sub_path in assets_dir:\n two_parents_up = os.pardir + os.path.sep + os.pardir\n return os.path.abspath(os.path.join(assets_dir, two_parents_up))\n return assets_dir\n\n\ndef _latest_checkpoints_changed(configs, run_path_pairs):\n \"\"\"Returns true if the latest checkpoint has changed in any of the runs.\"\"\"\n for run_name, assets_dir in run_path_pairs:\n if run_name not in configs:\n config = ProjectorConfig()\n config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)\n if tf.io.gfile.exists(config_fpath):\n with tf.io.gfile.GFile(config_fpath, 'r') as f:\n file_content = f.read()\n text_format.Merge(file_content, config)\n else:\n config = configs[run_name]\n\n # See if you can find a checkpoint file in the logdir.\n logdir = _assets_dir_to_logdir(assets_dir)\n ckpt_path = _find_latest_checkpoint(logdir)\n if not ckpt_path:\n continue\n if config.model_checkpoint_path != ckpt_path:\n return True\n return False\n\n\ndef _parse_positive_int_param(request, param_name):\n \"\"\"Parses and asserts a positive (>0) integer query parameter.\n\n Args:\n request: The Werkzeug Request object\n param_name: Name of the parameter.\n\n Returns:\n Param, or None, or -1 if parameter is not a positive integer.\n \"\"\"\n param = request.args.get(param_name)\n if not param:\n return None\n try:\n param = int(param)\n if param <= 0:\n raise ValueError()\n return param\n except ValueError:\n return -1\n\n\ndef _rel_to_abs_asset_path(fpath, config_fpath):\n fpath = os.path.expanduser(fpath)\n if not os.path.isabs(fpath):\n return os.path.join(os.path.dirname(config_fpath), fpath)\n return fpath\n\n\ndef _using_tf():\n \"\"\"Return true if we're not using the fake TF API stub implementation.\"\"\"\n return tf.__version__ != 'stub'\n\n\nclass ProjectorPlugin(base_plugin.TBPlugin):\n \"\"\"Embedding projector.\"\"\"\n\n plugin_name = _PLUGIN_PREFIX_ROUTE\n\n def __init__(self, context):\n \"\"\"Instantiates ProjectorPlugin via TensorBoard core.\n\n Args:\n context: A base_plugin.TBContext instance.\n \"\"\"\n self.multiplexer = context.multiplexer\n self.logdir = context.logdir\n self._handlers = None\n self.readers = {}\n self.run_paths = None\n self._configs = {}\n self.old_num_run_paths = None\n self.config_fpaths = None\n self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY)\n\n # Whether the plugin is active (has meaningful data to process and serve).\n # Once the plugin is deemed active, we no longer re-compute the value\n # because doing so is potentially expensive.\n self._is_active = False\n\n # The running thread that is currently determining whether the plugin is\n # active. If such a thread exists, do not start a duplicate thread.\n self._thread_for_determining_is_active = None\n\n if self.multiplexer:\n self.run_paths = self.multiplexer.RunPaths()\n\n def get_plugin_apps(self):\n self._handlers = {\n RUNS_ROUTE: self._serve_runs,\n CONFIG_ROUTE: self._serve_config,\n TENSOR_ROUTE: self._serve_tensor,\n METADATA_ROUTE: self._serve_metadata,\n BOOKMARKS_ROUTE: self._serve_bookmarks,\n SPRITE_IMAGE_ROUTE: self._serve_sprite_image\n }\n return self._handlers\n\n def is_active(self):\n \"\"\"Determines whether this plugin is active.\n\n This plugin is only active if any run has an embedding.\n\n Returns:\n Whether any run has embedding data to show in the projector.\n \"\"\"\n if not self.multiplexer:\n return False\n\n if self._is_active:\n # We have already determined that the projector plugin should be active.\n # Do not re-compute that. We have no reason to later set this plugin to be\n # inactive.\n return True\n\n if self._thread_for_determining_is_active:\n # We are currently determining whether the plugin is active. Do not start\n # a separate thread.\n return self._is_active\n\n # The plugin is currently not active. The frontend might check again later.\n # For now, spin off a separate thread to determine whether the plugin is\n # active.\n new_thread = threading.Thread(\n target=self._determine_is_active,\n name='ProjectorPluginIsActiveThread')\n self._thread_for_determining_is_active = new_thread\n new_thread.start()\n return False\n\n def _determine_is_active(self):\n \"\"\"Determines whether the plugin is active.\n\n This method is run in a separate thread so that the plugin can offer an\n immediate response to whether it is active and determine whether it should\n be active in a separate thread.\n \"\"\"\n if self.configs:\n self._is_active = True\n self._thread_for_determining_is_active = None\n\n @property\n def configs(self):\n \"\"\"Returns a map of run paths to `ProjectorConfig` protos.\"\"\"\n run_path_pairs = list(self.run_paths.items())\n self._append_plugin_asset_directories(run_path_pairs)\n # If there are no summary event files, the projector should still work,\n # treating the `logdir` as the model checkpoint directory.\n if not run_path_pairs:\n run_path_pairs.append(('.', self.logdir))\n if (self._run_paths_changed() or\n _latest_checkpoints_changed(self._configs, run_path_pairs)):\n self.readers = {}\n self._configs, self.config_fpaths = self._read_latest_config_files(\n run_path_pairs)\n self._augment_configs_with_checkpoint_info()\n return self._configs\n\n def _run_paths_changed(self):\n num_run_paths = len(list(self.run_paths.keys()))\n if num_run_paths != self.old_num_run_paths:\n self.old_num_run_paths = num_run_paths\n return True\n return False\n\n def _augment_configs_with_checkpoint_info(self):\n for run, config in self._configs.items():\n for embedding in config.embeddings:\n # Normalize the name of the embeddings.\n if embedding.tensor_name.endswith(':0'):\n embedding.tensor_name = embedding.tensor_name[:-2]\n # Find the size of embeddings associated with a tensors file.\n if embedding.tensor_path and not embedding.tensor_shape:\n fpath = _rel_to_abs_asset_path(embedding.tensor_path,\n self.config_fpaths[run])\n tensor = self.tensor_cache.get((run, embedding.tensor_name))\n if tensor is None:\n tensor = _read_tensor_tsv_file(fpath)\n self.tensor_cache.set((run, embedding.tensor_name), tensor)\n embedding.tensor_shape.extend([len(tensor), len(tensor[0])])\n\n reader = self._get_reader_for_run(run)\n if not reader:\n continue\n # Augment the configuration with the tensors in the checkpoint file.\n special_embedding = None\n if config.embeddings and not config.embeddings[0].tensor_name:\n special_embedding = config.embeddings[0]\n config.embeddings.remove(special_embedding)\n var_map = reader.get_variable_to_shape_map()\n for tensor_name, tensor_shape in var_map.items():\n if len(tensor_shape) != 2:\n continue\n embedding = self._get_embedding(tensor_name, config)\n if not embedding:\n embedding = config.embeddings.add()\n embedding.tensor_name = tensor_name\n if special_embedding:\n embedding.metadata_path = special_embedding.metadata_path\n embedding.bookmarks_path = special_embedding.bookmarks_path\n if not embedding.tensor_shape:\n embedding.tensor_shape.extend(tensor_shape)\n\n # Remove configs that do not have any valid (2D) tensors.\n runs_to_remove = []\n for run, config in self._configs.items():\n if not config.embeddings:\n runs_to_remove.append(run)\n for run in runs_to_remove:\n del self._configs[run]\n del self.config_fpaths[run]\n\n def _read_latest_config_files(self, run_path_pairs):\n \"\"\"Reads and returns the projector config files in every run directory.\"\"\"\n configs = {}\n config_fpaths = {}\n for run_name, assets_dir in run_path_pairs:\n config = ProjectorConfig()\n config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)\n if tf.io.gfile.exists(config_fpath):\n with tf.io.gfile.GFile(config_fpath, 'r') as f:\n file_content = f.read()\n text_format.Merge(file_content, config)\n has_tensor_files = False\n for embedding in config.embeddings:\n if embedding.tensor_path:\n if not embedding.tensor_name:\n embedding.tensor_name = os.path.basename(embedding.tensor_path)\n has_tensor_files = True\n break\n\n if not config.model_checkpoint_path:\n # See if you can find a checkpoint file in the logdir.\n logdir = _assets_dir_to_logdir(assets_dir)\n ckpt_path = _find_latest_checkpoint(logdir)\n if not ckpt_path and not has_tensor_files:\n continue\n if ckpt_path:\n config.model_checkpoint_path = ckpt_path\n\n # Sanity check for the checkpoint file.\n if (config.model_checkpoint_path and _using_tf() and\n not tf.compat.v1.train.checkpoint_exists(config.model_checkpoint_path)):\n logger.warn('Checkpoint file \"%s\" not found',\n config.model_checkpoint_path)\n continue\n configs[run_name] = config\n config_fpaths[run_name] = config_fpath\n return configs, config_fpaths\n\n def _get_reader_for_run(self, run):\n if run in self.readers:\n return self.readers[run]\n\n config = self._configs[run]\n reader = None\n if config.model_checkpoint_path and _using_tf():\n try:\n reader = tf.train.load_checkpoint(config.model_checkpoint_path)\n except Exception: # pylint: disable=broad-except\n logger.warn('Failed reading \"%s\"', config.model_checkpoint_path)\n self.readers[run] = reader\n return reader\n\n def _get_metadata_file_for_tensor(self, tensor_name, config):\n embedding_info = self._get_embedding(tensor_name, config)\n if embedding_info:\n return embedding_info.metadata_path\n return None\n\n def _get_bookmarks_file_for_tensor(self, tensor_name, config):\n embedding_info = self._get_embedding(tensor_name, config)\n if embedding_info:\n return embedding_info.bookmarks_path\n return None\n\n def _canonical_tensor_name(self, tensor_name):\n if ':' not in tensor_name:\n return tensor_name + ':0'\n else:\n return tensor_name\n\n def _get_embedding(self, tensor_name, config):\n if not config.embeddings:\n return None\n for info in config.embeddings:\n if (self._canonical_tensor_name(info.tensor_name) ==\n self._canonical_tensor_name(tensor_name)):\n return info\n return None\n\n def _append_plugin_asset_directories(self, run_path_pairs):\n for run, assets in self.multiplexer.PluginAssets(_PLUGIN_NAME).items():\n if PROJECTOR_FILENAME not in assets:\n continue\n assets_dir = os.path.join(self.run_paths[run], _PLUGINS_DIR, _PLUGIN_NAME)\n assets_path_pair = (run, os.path.abspath(assets_dir))\n run_path_pairs.append(assets_path_pair)\n\n @wrappers.Request.application\n def _serve_runs(self, request):\n \"\"\"Returns a list of runs that have embeddings.\"\"\"\n return Respond(request, list(self.configs.keys()), 'application/json')\n\n @wrappers.Request.application\n def _serve_config(self, request):\n run = request.args.get('run')\n if run is None:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n return Respond(request,\n json_format.MessageToJson(config), 'application/json')\n\n @wrappers.Request.application\n def _serve_metadata(self, request):\n run = request.args.get('run')\n if run is None:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n num_rows = _parse_positive_int_param(request, 'num_rows')\n if num_rows == -1:\n return Respond(request, 'query parameter num_rows must be integer > 0',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n fpath = self._get_metadata_file_for_tensor(name, config)\n if not fpath:\n return Respond(\n request,\n 'No metadata file found for tensor \"%s\" in the config file \"%s\"' %\n (name, self.config_fpaths[run]), 'text/plain', 400)\n fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):\n return Respond(request, '\"%s\" not found, or is not a file' % fpath,\n 'text/plain', 400)\n\n num_header_rows = 0\n with tf.io.gfile.GFile(fpath, 'r') as f:\n lines = []\n # Stream reading the file with early break in case the file doesn't fit in\n # memory.\n for line in f:\n lines.append(line)\n if len(lines) == 1 and '\\t' in lines[0]:\n num_header_rows = 1\n if num_rows and len(lines) >= num_rows + num_header_rows:\n break\n return Respond(request, ''.join(lines), 'text/plain')\n\n @wrappers.Request.application\n def _serve_tensor(self, request):\n run = request.args.get('run')\n if run is None:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n num_rows = _parse_positive_int_param(request, 'num_rows')\n if num_rows == -1:\n return Respond(request, 'query parameter num_rows must be integer > 0',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n\n tensor = self.tensor_cache.get((run, name))\n if tensor is None:\n # See if there is a tensor file in the config.\n embedding = self._get_embedding(name, config)\n\n if embedding and embedding.tensor_path:\n fpath = _rel_to_abs_asset_path(embedding.tensor_path,\n self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath):\n return Respond(request,\n 'Tensor file \"%s\" does not exist' % fpath,\n 'text/plain', 400)\n tensor = _read_tensor_tsv_file(fpath)\n else:\n reader = self._get_reader_for_run(run)\n if not reader or not reader.has_tensor(name):\n return Respond(request,\n 'Tensor \"%s\" not found in checkpoint dir \"%s\"' %\n (name, config.model_checkpoint_path), 'text/plain',\n 400)\n try:\n tensor = reader.get_tensor(name)\n except tf.errors.InvalidArgumentError as e:\n return Respond(request, str(e), 'text/plain', 400)\n\n self.tensor_cache.set((run, name), tensor)\n\n if num_rows:\n tensor = tensor[:num_rows]\n if tensor.dtype != 'float32':\n tensor = tensor.astype(dtype='float32', copy=False)\n data_bytes = tensor.tobytes()\n return Respond(request, data_bytes, 'application/octet-stream')\n\n @wrappers.Request.application\n def _serve_bookmarks(self, request):\n run = request.args.get('run')\n if not run:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n fpath = self._get_bookmarks_file_for_tensor(name, config)\n if not fpath:\n return Respond(\n request,\n 'No bookmarks file found for tensor \"%s\" in the config file \"%s\"' %\n (name, self.config_fpaths[run]), 'text/plain', 400)\n fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):\n return Respond(request, '\"%s\" not found, or is not a file' % fpath,\n 'text/plain', 400)\n\n bookmarks_json = None\n with tf.io.gfile.GFile(fpath, 'rb') as f:\n bookmarks_json = f.read()\n return Respond(request, bookmarks_json, 'application/json')\n\n @wrappers.Request.application\n def _serve_sprite_image(self, request):\n run = request.args.get('run')\n if not run:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n embedding_info = self._get_embedding(name, config)\n\n if not embedding_info or not embedding_info.sprite.image_path:\n return Respond(\n request,\n 'No sprite image file found for tensor \"%s\" in the config file \"%s\"' %\n (name, self.config_fpaths[run]), 'text/plain', 400)\n\n fpath = os.path.expanduser(embedding_info.sprite.image_path)\n fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):\n return Respond(request, '\"%s\" does not exist or is directory' % fpath,\n 'text/plain', 400)\n f = tf.io.gfile.GFile(fpath, 'rb')\n encoded_image_string = f.read()\n f.close()\n image_type = imghdr.what(None, encoded_image_string)\n mime_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)\n return Respond(request, encoded_image_string, mime_type)\n\n\ndef _find_latest_checkpoint(dir_path):\n if not _using_tf():\n return None\n try:\n ckpt_path = tf.train.latest_checkpoint(dir_path)\n if not ckpt_path:\n # Check the parent directory.\n ckpt_path = tf.train.latest_checkpoint(os.path.join(dir_path, os.pardir))\n return ckpt_path\n except tf.errors.NotFoundError:\n return None\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fox-ds/river
[ "9ce947ebfc012ec7059de0a09c765b2da7fc1d25" ]
[ "river/misc/test_cov_matrix.py" ]
[ "import math\n\nimport numpy as np\nimport pandas as pd\n\nfrom river import misc\n\n\ndef test_cov_matrix():\n\n # NOTE: this test only works with ddof=1 because pandas ignores it if there are missing values\n ddof = 1\n\n cov = misc.CovMatrix(ddof=ddof)\n p = 5\n X_all = pd.DataFrame(columns=range(p))\n\n for _ in range(5):\n n = np.random.randint(1, 31)\n X = pd.DataFrame(np.random.random((n, p))).sample(3, axis=\"columns\")\n cov.update_many(X)\n X_all = pd.concat((X_all, X)).astype(float)\n pd_cov = X_all.cov(ddof=ddof)\n\n for i, j in cov:\n assert math.isclose(cov[i, j].get(), pd_cov.loc[i, j])\n" ]
[ [ "pandas.concat", "numpy.random.random", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
altescy/logexp
[ "19389c884c686ca42f691500e82e8963bd039f0c" ]
[ "examples/scikit-learn/iris.py" ]
[ "import typing as tp\nimport copy\nimport importlib\n\nimport colt\nimport logexp\nfrom sklearn.base import BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\nfrom logger import create_logger\n\nlogger = create_logger(__name__)\nex = logexp.Experiment(\"sklearn-iris\")\n\n\[email protected](\"sklearn-trainer\")\nclass TrainSklearnModel(logexp.BaseWorker):\n def config(self):\n self.model = {\n \"@type\": \"sklearn.svm.SVC\",\n \"C\": 1.0,\n \"kernel\": \"rbf\",\n }\n self.test_size = 0.3\n\n def run(self):\n logger.info(\"load iris dataset\")\n\n model = colt.build(self.model)\n\n iris = load_iris()\n X, y = iris.data, iris.target\n\n X_train, X_valid, y_train, y_valid = \\\n train_test_split(X, y, test_size=self.test_size)\n\n logger.info(\n f\"dataset size: train={len(X_train)}, valid={len(X_valid)}\")\n\n logger.info(\"start training\")\n\n model.fit(X_train, y_train)\n\n logger.info(\"end training\")\n\n train_accuracy = model.score(X_train, y_train)\n valid_accuracy = model.score(X_valid, y_valid)\n\n report = logexp.Report()\n report[\"train_size\"] = len(X_train)\n report[\"valid_size\"] = len(X_valid)\n report[\"train_accuracy\"] = train_accuracy\n report[\"valid_accuracy\"] = valid_accuracy\n\n return report\n" ]
[ [ "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dtak/adversarial_robustness
[ "eb1e3af5301789cc6e7562750145ab91f80dcca7" ]
[ "adversarial_robustness/datasets/notmnist.py" ]
[ "from __future__ import print_function\nimport glob\nimport os\nimport numpy as np\nfrom six.moves import cPickle as pickle\nfrom six.moves.urllib.request import urlretrieve\nimport tarfile\nimport sys\nfrom adversarial_robustness.dataset import *\n\nclass notMNIST(Dataset):\n def __init__(self, data_dir=default_data_dir):\n self.X, self.y, self.Xv, self.yv, self.Xt, self.yt = load_notmnist(data_dir)\n self.feature_names = [str(i) for i in range(28*28)]\n self.label_names = ['A','B','C','D','E','F','G','H','I','J']\n self.image_shape = (28,28)\n\ndef load_notmnist(data_dir):\n filename = data_dir + '/notMNIST.pickle'\n if not os.path.exists(filename):\n print('Dataset not found, downloading and preprocessing...')\n download_and_extract_notmnist(data_dir)\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n def resh(x):\n return x.reshape(len(x), 28*28)\n X = resh(data['train_dataset'])\n Xv = resh(data['valid_dataset'])\n Xt = resh(data['test_dataset'])\n y = data['train_labels']\n yv = data['valid_labels']\n yt = data['test_labels']\n return X, y, Xv, yv, Xt, yt\n\ndef download_and_extract_notmnist(data_root):\n \"\"\"\n Adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/1_notmnist.ipynb\n \"\"\"\n url = 'https://commondatastorage.googleapis.com/books1000/'\n image_size = 28 # Pixel width and height.\n pixel_depth = 255.0 # Number of levels per pixel.\n num_classes = 10\n\n def maybe_download(filename, expected_bytes, force=False):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n def maybe_extract(filename, force=False):\n root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz\n if os.path.isdir(root) and not force:\n # You may override by setting force=True.\n print('%s already present - Skipping extraction of %s.' % (root, filename))\n else:\n print('Extracting data for %s. This may take a while. Please wait.' % root)\n tar = tarfile.open(filename)\n sys.stdout.flush()\n tar.extractall(data_root)\n tar.close()\n data_folders = [\n os.path.join(root, d) for d in sorted(os.listdir(root))\n if os.path.isdir(os.path.join(root, d))]\n if len(data_folders) != num_classes:\n raise Exception(\n 'Expected %d folders, one per class. Found %d instead.' % (\n num_classes, len(data_folders)))\n print(data_folders)\n return data_folders\n \n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n\n def load_letter(folder, min_num_images):\n \"\"\"Load the data for a single letter label.\"\"\"\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=np.float32)\n print(folder)\n num_images = 0\n for image in image_files:\n image_file = os.path.join(folder, image)\n try:\n from scipy import ndimage\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n \n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' %\n (num_images, min_num_images))\n \n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset\n \n def maybe_pickle(data_folders, min_num_images_per_class, force=False):\n dataset_names = []\n for folder in data_folders:\n set_filename = folder + '.pickle'\n dataset_names.append(set_filename)\n if os.path.exists(set_filename) and not force:\n # You may override by setting force=True.\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n print('Pickling %s.' % set_filename)\n dataset = load_letter(folder, min_num_images_per_class)\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n \n return dataset_names\n\n train_datasets = maybe_pickle(train_folders, 45000)\n test_datasets = maybe_pickle(test_folders, 1800)\n\n def make_arrays(nb_rows, img_size):\n if nb_rows:\n dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n dataset, labels = None, None\n return dataset, labels\n\n def merge_datasets(pickle_files, train_size, valid_size=0):\n num_classes = len(pickle_files)\n valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n train_dataset, train_labels = make_arrays(train_size, image_size)\n vsize_per_class = valid_size // num_classes\n tsize_per_class = train_size // num_classes\n \n start_v, start_t = 0, 0\n end_v, end_t = vsize_per_class, tsize_per_class\n end_l = vsize_per_class+tsize_per_class\n for label, pickle_file in enumerate(pickle_files): \n try:\n with open(pickle_file, 'rb') as f:\n letter_set = pickle.load(f)\n # let's shuffle the letters to have random validation and training set\n np.random.shuffle(letter_set)\n if valid_dataset is not None:\n valid_letter = letter_set[:vsize_per_class, :, :]\n valid_dataset[start_v:end_v, :, :] = valid_letter\n valid_labels[start_v:end_v] = label\n start_v += vsize_per_class\n end_v += vsize_per_class\n \n train_letter = letter_set[vsize_per_class:end_l, :, :]\n train_dataset[start_t:end_t, :, :] = train_letter\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n except Exception as e:\n print('Unable to process data from', pickle_file, ':', e)\n raise\n \n return valid_dataset, valid_labels, train_dataset, train_labels\n \n train_size = 200000\n valid_size = 10000\n test_size = 10000\n\n valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(\n train_datasets, train_size, valid_size)\n _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)\n\n print('Training:', train_dataset.shape, train_labels.shape)\n print('Validation:', valid_dataset.shape, valid_labels.shape)\n print('Testing:', test_dataset.shape, test_labels.shape)\n\n def randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation,:,:]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n train_dataset, train_labels = randomize(train_dataset, train_labels)\n test_dataset, test_labels = randomize(test_dataset, test_labels)\n valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)\n\n pickle_file = data_root + '/notMNIST.pickle'\n\n try:\n f = open(pickle_file, 'wb')\n save = {\n 'train_dataset': train_dataset,\n 'train_labels': train_labels,\n 'valid_dataset': valid_dataset,\n 'valid_labels': valid_labels,\n 'test_dataset': test_dataset,\n 'test_labels': test_labels,\n }\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\nif __name__ == '__main__':\n import pdb\n dataset = notMNIST()\n pdb.set_trace()\n pass\n" ]
[ [ "scipy.ndimage.imread", "numpy.ndarray", "numpy.random.shuffle", "numpy.std", "numpy.random.permutation", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] } ]
isaac-gw/astropy
[ "efea853d7e127379370ed83fc89c66fa3b662324" ]
[ "astropy/modeling/tests/test_units_mapping.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.units import Quantity, UnitsError, equivalencies\nfrom astropy.modeling.models import UnitsMapping\n\n\ndef test_properties():\n model = UnitsMapping(((u.dimensionless_unscaled, u.m), (u.dimensionless_unscaled, u.s)))\n assert model.n_inputs == 2\n assert model.n_outputs == 2\n assert model.inputs == (\"x0\", \"x1\")\n assert model.outputs == (\"x0\", \"x1\")\n assert model.input_units == {\"x0\": u.dimensionless_unscaled, \"x1\": u.dimensionless_unscaled}\n assert model.mapping == ((u.dimensionless_unscaled, u.m), (u.dimensionless_unscaled, u.s))\n\n\ndef test_add_units():\n model = UnitsMapping(((u.dimensionless_unscaled, u.m),))\n\n for value in [10, Quantity(10), np.arange(10), Quantity(np.arange(10))]:\n result = model(value)\n assert isinstance(result, Quantity)\n assert np.all(result.value == value)\n assert result.unit == u.m\n\n with pytest.raises(UnitsError):\n model(Quantity(10, u.s))\n\n\ndef test_remove_units():\n model = UnitsMapping(((u.m, u.dimensionless_unscaled),))\n\n result = model(Quantity(10, u.m))\n assert isinstance(result, Quantity)\n assert result.value == 10\n assert result.unit == u.dimensionless_unscaled\n\n result = model(Quantity(1000, u.cm))\n assert isinstance(result, Quantity)\n assert result.value == 10\n assert result.unit == u.dimensionless_unscaled\n\n with pytest.raises(UnitsError):\n model(10)\n\n with pytest.raises(UnitsError):\n model(Quantity(10))\n\n\ndef test_remove_quantity():\n model = UnitsMapping(((u.m, None),))\n\n result = model(Quantity(10, u.m))\n assert result == 10\n\n result = model(Quantity(1000, u.cm))\n assert result == 10\n\n with pytest.raises(UnitsError):\n model(10)\n\n with pytest.raises(UnitsError):\n model(Quantity(10))\n\n # The model shouldn't allow a mixture of None and non-None\n # output units.\n with pytest.raises(ValueError, match=r\"If one return unit is None, then all must be None\"):\n UnitsMapping(((u.m, None), (u.s, u.dimensionless_unscaled)))\n\n\ndef test_equivalencies():\n model = UnitsMapping(((u.m, u.dimensionless_unscaled),))\n\n with pytest.raises(UnitsError):\n model(Quantity(100, u.Hz))\n\n model = UnitsMapping(((u.m, u.dimensionless_unscaled),), input_units_equivalencies={\"x\": equivalencies.spectral()})\n\n result = model(Quantity(100, u.Hz))\n assert result.unit == u.dimensionless_unscaled\n\n\ndef test_allow_dimensionless():\n model = UnitsMapping(((u.m, u.dimensionless_unscaled),))\n\n with pytest.raises(UnitsError):\n model(10)\n\n model = UnitsMapping(((u.m, u.dimensionless_unscaled),), input_units_allow_dimensionless=True)\n result = model(10)\n assert isinstance(result, Quantity)\n assert result.value == 10\n assert result.unit == u.dimensionless_unscaled\n\n\ndef test_custom_inputs_and_outputs():\n model = UnitsMapping(((u.m, u.dimensionless_unscaled),))\n\n model.inputs = (\"foo\",)\n model.outputs = (\"bar\",)\n\n assert model.inputs == (\"foo\",)\n assert model.input_units == {\"foo\": u.m}\n assert model.outputs == (\"bar\",)\n\n\ndef test_repr():\n model = UnitsMapping(((u.m, None),))\n assert repr(model) == \"\"\"<UnitsMapping(((Unit(\"m\"), None),))>\"\"\"\n\n model = UnitsMapping(((u.m, None),), name=\"foo\")\n assert repr(model) == \"\"\"<UnitsMapping(((Unit(\"m\"), None),), name='foo')>\"\"\"\n" ]
[ [ "numpy.all", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
binh234/facial-liveness-detection
[ "841aca9e4a1adc63fd72dbd578e5f74c8b93f29a" ]
[ "prepare_dataset.py" ]
[ "import os\nimport pandas as pd\nimport itertools\nimport random\nimport requests\nfrom sklearn.model_selection import train_test_split\nfrom io import BytesIO\nfrom zipfile import ZipFile\n\ndef download_file_from_google_drive(id, destination):\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination) \n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\n\ndef unzip(zip_file, extract_to='.'):\n zipfile = ZipFile(zip_file)\n zipfile.extractall(path=extract_to)\n\n\ndef pos_pairing(lst):\n ret = []\n for i in range(len(lst)):\n for j in range(i, len(lst)):\n if lst[j] == lst[i]:\n pass\n else:\n ret.append((lst[i], lst[j]))\n return ret\n\n\ndef pos_pairing2(lst, lst2):\n return [(x, y) for x in lst for y in lst2 if not x == y]\n\n\ndef neg_pairing(lst, lst2):\n return [(x, y) for x in lst for y in lst2]\n\n\ndef generate_samples(real_path, fake_path, real_sub_dir, df, num_pairs=2500):\n for directory in real_sub_dir:\n pos_images = [name for name in os.listdir(\n f'{real_path}/{directory}') if not (name.startswith(\".\") or name.startswith(\"Thumb\"))]\n neg_images = [name for name in os.listdir(\n f'{fake_path}/{directory}') if not (name.startswith(\".\") or name.startswith(\"Thumb\"))]\n pos_pairs = list(itertools.combinations(pos_images, 2))\n\n if len(pos_pairs) > num_pairs:\n pos_pairs = random.choices(pos_pairs, k=num_pairs)\n\n for i in pos_pairs:\n new_row = {'sample': directory,\n 'image1': f'{real_path}/{directory}/{i[0]}', 'image2': f'{real_path}/{directory}/{i[1]}', 'label': 1}\n df = df.append(new_row, ignore_index=True)\n\n neg_pairs = list(itertools.product(pos_images, neg_images))\n if len(neg_pairs) > num_pairs:\n neg_pairs = random.choices(neg_pairs, k=num_pairs)\n for i in neg_pairs:\n new_neg_row = {\n 'sample': directory, 'image1': f'{real_path}/{directory}/{i[0]}', 'image2': f'{fake_path}/{directory}/{i[1]}', 'label': 0}\n df = df.append(new_neg_row, ignore_index=True)\n\n return df\n\n\nDATASET_ID = \"1oE6yv-RYV5_4HDjUo6F8mJofzrW_tdTo\"\n\n\ndef prepare_dataset(data_folder=\"ds\"):\n \"\"\"\n Prepare training dataset\n \"\"\"\n if not os.path.exists(data_folder):\n os.makedirs(data_folder)\n if not os.path.exists(\"dataset\"):\n os.makedirs(\"dataset\")\n\n # Download dataset\n zip_file = \"./Detectedface.zip\"\n\n download_file_from_google_drive(DATASET_ID, zip_file)\n unzip(zip_file, data_folder)\n\n if not os.path.exists(os.path.join(data_folder, \"Detectedface\")):\n print(\"Can't download dataset\")\n return\n\n real_path = os.path.join(data_folder, \"Detectedface\", \"ClientFace\")\n fake_path = os.path.join(data_folder, \"Detectedface\", \"ImposterFace\")\n\n os.rename(data_folder + \"/Detectedface/ClientFace/0013\", data_folder + \"/Detectedface/ClientFace/0016\")\n\n real_sub_dir = [name for name in os.listdir(\n real_path) if not name.startswith(\".\")]\n real_sub_dir.sort()\n fake_sub_dir = [name for name in os.listdir(\n fake_path) if not name.startswith(\".\")]\n fake_sub_dir.sort()\n\n column_names = [\"sample\", \"image1\", \"image2\", \"label\"]\n\n df = pd.DataFrame(columns=column_names)\n test_df = pd.DataFrame(columns=column_names)\n\n # Take last 4 clients for testing, others for training and validation\n print(\"Generating training samples..\")\n df = generate_samples(real_path, fake_path, real_sub_dir[:-4], df)\n print(\"Generating testing samples..\")\n test_df = generate_samples(\n real_path, fake_path, real_sub_dir[-4:], test_df)\n\n # UNCOMMENT TO SAVE CSV\n test_df.to_csv('./dataset/test_data.csv', encoding='utf-8', index=False)\n y = df.label\n X = df.drop('label', axis=1)\n df.reset_index(drop=True, inplace=True)\n\n X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8)\n\n X_train.reset_index(drop=True, inplace=True)\n X_val.reset_index(drop=True, inplace=True)\n y_train.reset_index(drop=True, inplace=True)\n y_val.reset_index(drop=True, inplace=True)\n\n X = pd.concat([X_train, y_train], axis=1)\n X_val = pd.concat([X_val, y_val], axis=1)\n\n X.to_csv('./dataset/train_data.csv', encoding='utf-8', index=False)\n X_val.to_csv('./dataset/val_data.csv', encoding='utf-8', index=False)\n\n\nif __name__ == '__main__':\n prepare_dataset()\n" ]
[ [ "pandas.concat", "sklearn.model_selection.train_test_split", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ankitshah009/MMdnn
[ "a03d800eb4016765e97f82eb5d2e69f98de3a9cf" ]
[ "mmdnn/conversion/tensorflow/saver.py" ]
[ "import tensorflow as tf\n\n\ndef save_model(MainModel, network_filepath, weight_filepath, dump_filepath, dump_tag = 'SERVING'):\n if dump_tag == 'SERVING':\n tag_list = [tf.saved_model.tag_constants.SERVING]\n else:\n tag_list = [tf.saved_model.tag_constants.TRAINING]\n res = MainModel.KitModel(weight_filepath)\n input = res[0]\n model = res[1:]\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n builder = tf.saved_model.builder.SavedModelBuilder(dump_filepath)\n\n tensor_info_input = tf.saved_model.utils.build_tensor_info(input)\n outputs = {'output{}'.format(i): tf.saved_model.utils.build_tensor_info(model[i]) for i in range(len(model))}\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'input': tensor_info_input},\n outputs=outputs,\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\n )\n )\n\n builder.add_meta_graph_and_variables(\n sess,\n tag_list,\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature\n }\n )\n\n save_path = builder.save()\n\n print('Tensorflow file is saved as [{}], generated by [{}.py] and [{}].'.format(\n save_path, network_filepath, weight_filepath))\n" ]
[ [ "tensorflow.saved_model.signature_def_utils.build_signature_def", "tensorflow.saved_model.builder.SavedModelBuilder", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.saved_model.utils.build_tensor_info" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
kbj2060/pytrader
[ "ad32ed16917ccf7baa6affefac2229be5affa1ef" ]
[ "database.py" ]
[ "# TODO : Not Yet Implemented\nimport glob\nimport sqlite3\n\nimport h5py\nimport pandas as pd\n\n\ndef convert_sql_to_csv():\n con = sqlite3.connect(\"../data/stock.db\")\n code_list = con.execute(\"SELECT name from sqlite_master WHERE type='table'\").fetchall()\n code = code_list[0][0]\n for code in code_list:\n code = code[0]\n data = pd.read_sql(\"SELECT * from '%s'\" % code, con, index_col='일자').sort_index()\n data.to_csv('../data/stocks/%s.csv' % code)\n\ndef convert_sql_to_h5():\n con = sqlite3.connect(\"../data/stock.db\")\n code_list = con.execute(\"SELECT name from sqlite_master WHERE type='table'\").fetchall()\n code = code_list[0][0]\n for code in code_list:\n code = code[0]\n data = pd.read_sql(\"SELECT * from '%s'\" % code, con, index_col='일자').sort_index()\n data.to_hdf('../data/h5/%s.h5'%code,'df',mode='w',data_columns=True)\n\ndef read_h5():\n code_list = glob.glob('../data/h5/*')\n for code in code_list[:10]:\n h = h5py.File(code)\n print(h)\n\nif __name__ == '__main__':\n read_h5()\n" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
WenqinSHAO/rtt_changedetect_rnn_lstm
[ "9a8b0f3a500c0f2fa825c17ca053ebbf85f94e61" ]
[ "model.py" ]
[ "import numpy as np\nimport time\nfrom keras.layers import Input, Concatenate, Dot, Flatten\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.wrappers import Bidirectional, TimeDistributed\nfrom keras.models import Model, model_from_json\nfrom keras.utils import plot_model\nimport matplotlib.pyplot as plt\n\ndef save_trained_model(model,fn=\"model\"):\n \"\"\"save to file the trained model\n \"\"\"\n # serialize model to JSON\n model_json = model.to_json()\n with open(\"%s.json\"%fn, \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(\"%s.h5\"%fn)\n\ndef plot_leanring_curv(rec, fn='model'):\n \"\"\"plot learning curve during training\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for k in rec.history.keys():\n ax.plot(rec.history[k])\n ax.set_xlabel('epoch')\n ax.legend(rec.history.keys(), loc='upper left')\n fig.set_size_inches(10,8)\n plt.savefig(\"%s_learning_curve.pdf\"%fn, format='pdf')\n plt.close()\n\ndef load_model(fn):\n \"\"\"load model from file\n \"\"\"\n model_json = open(\"%s.json\"%fn, 'r')\n model = model_json.read()\n model_json.close()\n model = model_from_json(model)\n model.load_weights(\"%s.h5\"%fn)\n return model\n\ndef model_1():\n \"\"\"model for the detection of change points\n \"\"\"\n # inputs; works only for sequences of 100 datapoint\n input_seq = Input(shape=(100,1), name='input_seq')\n input_array = Input(shape=(100,), name='input_array')\n\n lstm = LSTM(100, return_sequences=True)(input_seq)\n\n # auxiliary out only tells where there is change or not\n # the main out tell where the change happens\n\n deep = TimeDistributed(Dense(20, activation='relu'))(lstm)\n deep = Flatten()(deep)\n aux_out = Dense(1, activation='sigmoid', name='aux_out')(deep)\n\n deep = Concatenate()([deep, input_array])\n deep = Dense(50, activation='relu')(deep)\n deep = Dense(50, activation='relu')(deep) \n main_out = Dense(100, activation='sigmoid', name='main_out')(deep)\n \n model = Model(inputs=[input_seq, input_array], outputs=[aux_out, main_out])\n model.compile(loss={'aux_out':'binary_crossentropy', 'main_out':'binary_crossentropy'},\n loss_weights = {'aux_out': 0.1, 'main_out': 1.0},\n optimizer='adam')\n\n print(model.summary())\n plot_model(model, show_shapes=True, to_file='cpt_model_1.png')\n\n return model\n\ndef model_2():\n \"\"\"mode for the detection whether there is change in given timesereis\n \"\"\"\n inputs = Input(shape=(None,1)) # should work for seq of arbitrary length\n lstm = LSTM(100)(inputs)\n out = Dense(1, activation='sigmoid')(lstm)\n model = Model(inputs=inputs, outputs=out)\n model.compile(loss='binary_crossentropy', optimizer='adam')\n print(model.summary())\n plot_model(model, show_shapes=True, to_file='cpt_model_2.png')\n return model\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
robertfasano/argent
[ "49a779e54063ad4f6432b78d1f8070d2f0a932a7" ]
[ "argent/generator/channel_parsing.py" ]
[ "import numpy as np\n\ndef get_ttl_channels(playlist):\n ''' Crawls through the playlist to assemble a list of all TTL channels\n whose state is specified at some point. Returns a list of the format\n ['ttlA0', 'ttlA1', ...]\n '''\n ttls = []\n for stage in playlist:\n sequence = stage['sequence']['steps']\n for step in sequence:\n ttls.extend(step.get('ttl', {}).keys())\n return list(np.unique(ttls))\n\ndef get_dac_channels(playlist):\n ''' Crawls through the playlist to assemble a list of all DAC boards\n whose state is specified at some point. Returns a list of the format\n ['zotinoA'].\n '''\n dacs = []\n for stage in playlist:\n sequence = stage['sequence']['steps']\n for step in sequence:\n dacs.extend(step.get('dac', {}).keys())\n return list(np.unique(dacs))\n\ndef get_adc_boards(playlist):\n ''' Crawls through the playlist to assemble a list of all ADC boards\n which are enabled at some state. Returns a list of the format\n ['samplerA'].\n '''\n boards = []\n for stage in playlist:\n sequence = stage['sequence']['steps']\n for step in sequence:\n for board in step['adc']:\n if step['adc'][board]['enable']:\n boards.append(board)\n return list(np.unique(boards))\n\ndef get_grabber_boards(playlist):\n ''' Crawls through the playlist to assemble a list of all grabber boards\n which are enabled at some state. Returns a list of the format\n ['grabberA'].\n '''\n boards = []\n for stage in playlist:\n sequence = stage['sequence']['steps']\n for step in sequence:\n if 'cam' in step:\n for board in step['cam']:\n if step['cam'][board]['enable']:\n boards.append(board)\n return list(np.unique(boards))\n\ndef get_data_arrays(playlist):\n arrays = {}\n for stage in playlist:\n sequence = stage['sequence']['steps']\n for i, step in enumerate(sequence):\n for board in step['adc']:\n if step['adc'][board]['enable']:\n name = stage['name'].replace(' ', '_') + '_' + str(i)\n arrays[name] = f'[[0]*8]*{int(step[\"adc\"][board][\"samples\"])}'\n\n return arrays\n\ndef get_dds_boards(playlist):\n ''' Crawls through the playlist to assemble a list of all DDS boards\n whose state is specified at some point. Returns a list of the format\n ['urukulA', 'urukulB']. Assumes that the device names in the device_db\n follow the syntax {board}_ch{i} for channels (e.g. urukulA_ch0) and\n {board}_cpld for CPLDs (e.g. urukulA_cpld).\n '''\n boards = []\n for stage in playlist:\n for step in stage['sequence']['steps']:\n boards.extend([ch.split('_')[0] for ch in step.get('dds', {}).keys()])\n return list(np.unique(boards))\n\ndef get_dds_channels(playlist):\n channels = []\n for stage in playlist:\n for step in stage['sequence']['steps']:\n channels.extend(step.get('dds', {}).keys())\n return list(np.unique(channels))\n" ]
[ [ "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chihina/Scratch_autoencoder
[ "d55ae810c9fb4589b8d3ba3ed6bf3462d97e7a14" ]
[ "AutoEncoder_pytorch_gpu_ver2.py" ]
[ "\r\n# coding: utf-8\r\n\r\n# In[ ]:\r\n\r\nimport time\r\nfrom torchvision import transforms\r\nfrom torchvision.datasets import MNIST\r\nfrom torchvision.utils import save_image\r\nimport torch.nn.functional as F\r\nimport sys, os\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport torch\r\nimport torchvision\r\nfrom torch import nn\r\nfrom torchvision import datasets\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\n\r\nstart_time = time.time()\r\n\r\n# 点描用関数\r\ndef img_show(img):\r\n pil_img = Image.fromarray(np.uint8(img))\r\n pil_img.show()\r\n\r\n\r\n# データ成形用関数\r\ndef to_img(x):\r\n# x = 0.5 * (x + 1) # [-1,1] => [0, 1]\r\n x = x.clamp(0, 1)\r\n x = x.view(x.size(0), 1, 28, 28)\r\n return x\r\n\r\n\r\n# ネットワーク定義\r\nclass Autoencoder(nn.Module):\r\n\r\n def __init__(self):\r\n super(Autoencoder, self).__init__()\r\n self.encoder = nn.Sequential(\r\n nn.Linear(28 * 28, 392),\r\n nn.ReLU(True),\r\n nn.Linear(392, 196),\r\n nn.ReLU(True),\r\n nn.Linear(196, 98),\r\n nn.ReLU(True),\r\n nn.Linear(98, 49),\r\n nn.ReLU(True),\r\n nn.Linear(49, 2)\r\n )\r\n\r\n self.decoder = nn.Sequential(\r\n nn.Linear(2, 49),\r\n nn.ReLU(True),\r\n nn.Linear(49, 98),\r\n nn.ReLU(True),\r\n nn.Linear(98, 196),\r\n nn.ReLU(True),\r\n nn.Linear(196, 392),\r\n nn.ReLU(True),\r\n nn.Linear(392, 784),\r\n nn.Tanh()\r\n )\r\n\r\n def forward(self, x):\r\n x = self.encoder(x)\r\n x = self.decoder(x)\r\n return x\r\n\r\n\r\ncuda = torch.cuda.is_available()\r\nif cuda:\r\n print('cuda is available!')\r\n\r\nelse:\r\n print('cuda is not available')\r\n\r\n\r\n# 保存用ディレクトリ作成\r\nout_dir = './autoencoder_0402_gpu_ver2'\r\nif not os.path.exists(out_dir):\r\n os.mkdir(out_dir)\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n# epoch数の定義\r\nnum_epochs = 50\r\n\r\n# crossvalidation の回数\r\nvalidation_number = 5\r\n\r\nfor val_num in range(validation_number):\r\n val_start_time = time.time()\r\n print('validation : {}'.format(val_num + 1))\r\n\r\n img_transform = transforms.Compose([\r\n # torchvision.transforms.Grayscale(num_output_channels=3),\r\n transforms.ToTensor(),\r\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # [0,1] => [-1,1]\r\n ])\r\n\r\n # meke a dataset train and validation 7 :3\r\n trainval_dataset = datasets.MNIST('./', train=True, download=True, transform=img_transform)\r\n\r\n n_samples = len(trainval_dataset) # n_samples is 60000\r\n train_size = int(len(trainval_dataset) * 0.7) # train_size is 42000\r\n val_size = n_samples - train_size # val_size is 42000\r\n\r\n # shuffleしてから分割\r\n train_dataset, val_dataset = torch.utils.data.random_split(trainval_dataset, [train_size, val_size])\r\n\r\n train_loader = DataLoader(train_dataset, batch_size=100, shuffle=False)\r\n\r\n validation_loader = DataLoader(val_dataset, batch_size=100, shuffle=False)\r\n\r\n # lossを保存するリストを初期化\r\n train_loss_list = []\r\n validation_loss_list = []\r\n val_sum = 0\r\n for epoch in range(num_epochs):\r\n # ネットワークのインスタンスを生成\r\n if (epoch + 1) % 10 == 0:\r\n print(\"now is {} epoch\".format(epoch + 1))\r\n model = Autoencoder().to(device)\r\n\r\n # 損失関数を定義\r\n loss_function = nn.MSELoss()\r\n\r\n # 最適化関数の定義\r\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)\r\n\r\n for img, labels in train_loader:\r\n x = img.view(img.size(0), -1)\r\n\r\n # 今回はGPUを使用\r\n if cuda:\r\n x = Variable(x).cuda()\r\n else:\r\n x = Variable(x)\r\n\r\n\r\n x_modeled_train = model(x)\r\n\r\n loss = loss_function(x_modeled_train, x)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n train_loss_list.append(loss.data)\r\n\r\n print(\"epoch[{}/{}], train_loss: {}\".format(epoch + 1, num_epochs, loss.data))\r\n for img, labels in validation_loader:\r\n x = img.view(img.size(0), -1)\r\n # 今回はGPUを使用\r\n if cuda:\r\n x = Variable(x).cuda()\r\n else:\r\n x = Variable(x)\r\n\r\n x_modeled_val = model(x)\r\n loss = loss_function(x_modeled_val, x)\r\n validation_loss_list.append(loss.data)\r\n\r\n for val_loss in validation_loss_list:\r\n val_sum += val_loss\r\n val_score = val_sum / len(validation_loss_list)\r\n print(\"validation_loss_{}: {}\".format(val_num + 1, val_score))\r\n\r\n pic_origin = to_img(x.cpu())\r\n pic_changed = to_img(x_modeled_val.cpu().data)\r\n save_image(pic_changed, './{}/cha_image_validation_{}.png'.format(out_dir, val_num + 1))\r\n save_image(pic_origin, './{}/ori_image_validation_{}.png'.format(out_dir, val_num + 1))\r\n torch.save(model.state_dict(), './{}/validation_{}_state_dict.pth'.format(out_dir, val_num + 1))\r\n val_finish_time = time.time()\r\n print(\"validation_cost_time: {}s\".format(val_finish_time - val_start_time))\r\n\r\nfinish_time = time.time()\r\nprint(\"total_costtime: {}s\".format(finish_time - start_time))\r\n" ]
[ [ "numpy.uint8", "torch.utils.data.DataLoader", "torch.nn.Tanh", "torch.nn.Linear", "torch.utils.data.random_split", "torch.cuda.is_available", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mattwilkin/StrainRecon
[ "5c8e0cf6792e1d58db764f6734b5028adc133b8b" ]
[ "optimizers_mjw.py" ]
[ "# import pycuda.driver as cuda\nimport numpy as np\nfrom cuda import cuda, nvrtc\nimport time\nimport gpuarray\nfrom config import Config\nfrom run_cuda import run_cuda_function\n\n\ndef CrossEntropyMethod(recon, x, y,\n XD, YD, OffsetD, MaskD, TrueMaskD, scoreD, S_gpu,\n NumD=10000, numCut=100, cov=1e-6 * np.eye(9), MaxIter=50, mean=np.eye(3), BlockSize=256,\n debug=False):\n if not recon.ImLoaded:\n recon.loadIm()\n if not recon.GsLoaded:\n recon.loadGs()\n x = np.array(x).astype(np.float32)\n y = np.array(y).astype(np.float32)\n \n for ii in range(MaxIter):\n #np.random.seed(ii)\n S = np.random.multivariate_normal(\n np.zeros(9), cov, size=(NumD)).reshape((NumD, 3, 3), order='C') + np.tile(mean, (NumD, 1, 1))\n \n Sr = S.ravel().astype(np.float32)\n err, = cuda.cuMemcpyHtoD( S_gpu,Sr.ctypes.data, Sr.nbytes)\n\n\n # Sim Strain ####################################################################################### \n\n \n args = [XD, YD, OffsetD, MaskD, TrueMaskD,\n x, y, recon.afDetInfoD, np.array([int(S_gpu)], dtype=np.uint64),\n recon.whichOmegaD, np.array(NumD).astype(np.int32), np.array(recon.NumG).astype(np.int32),\n np.array(recon.Cfg.energy).astype(np.float32), np.array(recon.Cfg.window[2]).astype(np.int32), recon.LimD, \n np.array(5).astype(np.int32), np.array(recon.Cfg.omgInterval).astype(np.float32),\n recon.tG]\n \n \n err = run_cuda_function(recon.sim_strain_func,args,(NumD,1,1),(recon.NumG,1,1))\n\n # Hit Fun ##################################################################################################\n\n args = [scoreD,XD, YD, OffsetD, MaskD, TrueMaskD,\n recon.MaxIntD, np.array(recon.NumG).astype(np.int32), np.array(NumD).astype(np.int32), recon.windowD,recon.tcExp]\n \n \n err = run_cuda_function(recon.hit_func,args,(int(NumD/BlockSize+1),1,1),(BlockSize,1,1))\n \n \n score = scoreD.get()\n\n \n args = np.argpartition(score, -numCut)[-numCut:]\n cov = np.cov(S[args].reshape((numCut, 9), order='C').T)\n mean = np.mean(S[args], axis=0)\n \n \n if debug:\n print(np.max(score))\n if np.trace(np.absolute(cov)) < 1e-8:\n \n break\n\n \n return cov, mean, np.max(score[args])\n\n\ndef ChangeOneVoxel_KL(recon, x, y, mean, realMapsLogD, falseMapsD,\n XD, YD, OffsetD, MaskD, TrueMaskD, diffD, S_gpu,\n NumD=10000, numCut=50, cov=1e-6 * np.eye(9), epsilon=1e-6, MaxIter=3, BlockSize=256, debug=False):\n if not recon.GsLoaded:\n recon.loadGs()\n # remove the original hit\n \n S = mean\n Sr = S.ravel().astype(np.float32)\n err, = cuda.cuMemcpyHtoD( S_gpu,Sr.ctypes.data, Sr.nbytes)\n \n \n \n x = np.array(x).astype(np.float32)\n y = np.array(y).astype(np.float32)\n\n \n # Sim Strain ####################################################################################\n args = [XD, YD, OffsetD, MaskD, TrueMaskD,\n x, y, recon.afDetInfoD, np.array([int(S_gpu)], dtype=np.uint64),\n recon.whichOmegaD, np.array(1).astype(np.int32), np.array(recon.NumG).astype(np.int32),\n np.array(recon.Cfg.energy).astype(np.float32), np.array(recon.Cfg.window[2]).astype(np.int32), recon.LimD, \n np.array(5).astype(np.int32), np.array(recon.Cfg.omgInterval).astype(np.float32),\n recon.tG]\n\n\n \n \n \n err = run_cuda_function(recon.sim_strain_func,args,(1,1,1),(recon.NumG,1,1))\n\n\n # OneFun ###################################################################################\n \n args = [XD, YD, OffsetD, MaskD, TrueMaskD,\n falseMapsD, np.array(recon.NumG).astype(np.int32),\n np.array(epsilon).astype(np.float32), np.array(-1).astype(np.int32),recon.windowD]\n \n \n err = run_cuda_function(recon.One_func,args,(1,1,1),(recon.NumG,1,1))\n \n \n # find a better distortion matrix\n \n for ii in range(MaxIter):\n \n S = np.empty((NumD, 3, 3), dtype=np.float32)\n S[0, :, :] = mean\n #np.random.seed(42)\n S[1:, :, :] = np.random.multivariate_normal(\n mean.ravel(), cov, size=(NumD - 1)).reshape((NumD - 1, 3, 3), order='C')\n Sr = S.ravel().astype(np.float32)\n err, = cuda.cuMemcpyHtoD( S_gpu,Sr.ctypes.data, Sr.nbytes)\n \n # Sim Strain #####################################################################\n args = [XD, YD, OffsetD, MaskD, TrueMaskD,\n x, y, recon.afDetInfoD, np.array([int(S_gpu)], dtype=np.uint64),\n recon.whichOmegaD, np.array(NumD).astype(np.int32), np.array(recon.NumG).astype(np.int32),\n np.array(recon.Cfg.energy).astype(np.float32), np.array(recon.Cfg.window[2]).astype(np.int32), recon.LimD, \n np.array(5).astype(np.int32), np.array(recon.Cfg.omgInterval).astype(np.float32),\n recon.tG]\n\n \n err = run_cuda_function(recon.sim_strain_func,args,(NumD,1,1),(recon.NumG,1,1))\n # print(OffsetD.get())\n # KL ###################################################################################\n args = [diffD,XD, YD, OffsetD, MaskD, TrueMaskD,\n realMapsLogD, falseMapsD,\n np.array(recon.NumG).astype(np.int32), np.array(NumD).astype(np.int32),recon.windowD]\n \n err = run_cuda_function(recon.KL_diff_func,args,(int(NumD / BlockSize + 1),1,1),(BlockSize,1,1)) \n diffH = diffD.get()\n \n args = np.argpartition(diffH, numCut)[:numCut]\n \n cov = np.cov(S[args].reshape((numCut, 9), order='C').T)\n mean = np.mean(S[args], axis=0)\n \n \n if ii == 0:\n diff_init = diffH[0]\n if debug:\n print(np.min(diffH), diffH[0])\n # add the new hit\n S = mean\n Sr = S.ravel().astype(np.float32)\n err, = cuda.cuMemcpyHtoD( S_gpu,Sr.ctypes.data, Sr.nbytes)\n \n\n \n #Sim Strain #######################################################################\n args = [XD, YD, OffsetD, MaskD, TrueMaskD,\n x, y, recon.afDetInfoD, np.array([int(S_gpu)], dtype=np.uint64),\n recon.whichOmegaD, np.array(1).astype(np.int32), np.array(recon.NumG).astype(np.int32),\n np.array(recon.Cfg.energy).astype(np.float32), np.array(recon.Cfg.window[2]).astype(np.int32), recon.LimD, \n np.array(5).astype(np.int32), np.array(recon.Cfg.omgInterval).astype(np.float32),\n recon.tG]\n \n err = run_cuda_function(recon.sim_strain_func,args,(1,1,1),(recon.NumG,1,1))\n \n # KL ####################################################################################\n args = [diffD,XD, YD, OffsetD, MaskD, TrueMaskD,\n realMapsLogD, falseMapsD,\n np.array(recon.NumG).astype(np.int32), np.array(1).astype(np.int32),recon.windowD]\n\n err = run_cuda_function(recon.KL_diff_func,args,(int(NumD / BlockSize + 1),1,1),(BlockSize,1,1)) \n diffH = diffD.get()\n \n #One Fun ######################################################################################\n args = [XD, YD, OffsetD, MaskD, TrueMaskD,\n falseMapsD, np.array(recon.NumG).astype(np.int32), \n np.array(epsilon).astype(np.float32), np.array(+1).astype(np.int32),recon.windowD]\n err = run_cuda_function(recon.One_func,args,(1,1,1),(recon.NumG,1,1))\n \n return cov, mean, diffH[0] - diff_init\n\n\n" ]
[ [ "numpy.absolute", "numpy.min", "numpy.eye", "numpy.tile", "numpy.max", "numpy.mean", "numpy.argpartition", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cle1109/scot
[ "48598b79d4400dad893b134cd2194715511facda" ]
[ "scot/parallel.py" ]
[ "# Released under The MIT License (MIT)\n# http://opensource.org/licenses/MIT\n# Copyright (c) 2014 SCoT Development Team\n\nfrom __future__ import print_function\n\n\ndef parallel_loop(func, n_jobs=1, verbose=1):\n \"\"\"run loops in parallel, if joblib is available.\n\n Parameters\n ----------\n func : function\n function to be executed in parallel\n n_jobs : int | None\n Number of jobs. If set to None, do not attempt to use joblib.\n verbose : int\n verbosity level\n\n Notes\n -----\n Execution of the main script must be guarded with `if __name__ == '__main__':` when using parallelization.\n \"\"\"\n if n_jobs:\n try:\n from joblib import Parallel, delayed\n except ImportError:\n try:\n from sklearn.externals.joblib import Parallel, delayed\n except ImportError:\n n_jobs = None\n\n if not n_jobs:\n if verbose:\n print('running ', func, ' serially')\n par = lambda x: list(x)\n else:\n if verbose:\n print('running ', func, ' in parallel')\n func = delayed(func)\n par = Parallel(n_jobs=n_jobs, verbose=verbose)\n\n return par, func\n" ]
[ [ "sklearn.externals.joblib.delayed", "sklearn.externals.joblib.Parallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
daniel-furman/pyimpute
[ "58706f44b0d29a9cfe98b97378e8cdb5490de5cd" ]
[ "src/pyimpute/_main.py" ]
[ "from __future__ import print_function\nimport rasterio\nimport numpy as np\nimport os\nimport math\nimport logging\nfrom sklearn import metrics\nfrom sklearn import cross_validation\nlogger = logging.getLogger('pyimpute')\n\n\ndef load_training_vector(response_shapes, explanatory_rasters, response_field, metric='mean'):\n \"\"\"\n Parameters\n ----------\n response_shapes : Source of vector features for raster_stats;\n can be OGR file path or iterable of geojson-like features\n response_field : Field name containing the known response category (must be numeric)\n explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables\n metric : Statistic to aggregate explanatory data across line and polygon vector features\n Defaults to 'mean' (optional)\n\n Returns\n -------\n train_xs : Array of explanatory variables\n train_y : 1xN array of known responses\n \"\"\"\n from rasterstats import zonal_stats\n all_means = []\n all_zones = None\n\n for i, raster in enumerate(explanatory_rasters):\n logger.debug(\"Rasters stats on %s\" % raster)\n\n stats = zonal_stats(response_shapes, raster, stats=metric, prefix=\"pyimpute_\", geojson_out=True)\n\n zones = [x['properties'][response_field] for x in stats]\n if all_zones:\n assert zones == all_zones\n else:\n all_zones = zones\n\n means = [x['properties']['pyimpute_' + metric] for x in stats]\n all_means.append(means)\n\n train_y = np.array(all_zones)\n train_xs = np.array(all_means).T\n\n return train_xs, train_y\n\n\ndef load_training_rasters(response_raster, explanatory_rasters, selected=None):\n \"\"\"\n Parameters\n ----------\n response_raster : Path to GDAL raster containing responses\n explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables\n\n Returns\n -------\n train_xs : Array of explanatory variables\n train_ys : 1xN array of known responses\n \"\"\"\n\n with rasterio.open(response_raster) as src:\n response_data = src.read().flatten()\n\n if selected is None:\n train_y = response_data\n else:\n train_y = response_data[selected]\n\n selected_data = []\n for rast in explanatory_rasters:\n with rasterio.open(rast) as src:\n explanatory_data = src.read().flatten()\n assert explanatory_data.size == response_data.size\n if selected is None:\n selected_data.append(explanatory_data)\n else:\n selected_data.append(explanatory_data[selected])\n\n train_xs = np.asarray(selected_data).T\n return train_xs, train_y\n\n\ndef load_targets(explanatory_rasters):\n \"\"\"\n Parameters\n ----------\n explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables\n\n Returns\n -------\n expl : Array of explanatory variables\n raster_info : dict of raster info\n \"\"\"\n\n explanatory_raster_arrays = []\n aff = None\n shape = None\n crs = None\n\n for raster in explanatory_rasters:\n logger.debug(raster)\n with rasterio.open(raster) as src:\n ar = src.read(1) # TODO band num? \n\n # Save or check the geotransform\n if not aff:\n aff = src.affine\n else:\n assert aff == src.affine\n\n # Save or check the shape\n if not shape:\n shape = ar.shape\n else:\n assert shape == ar.shape\n\n # Save or check the geotransform\n if not crs:\n crs = src.crs\n else:\n assert crs == src.crs\n\n # Flatten in one dimension\n arf = ar.flatten()\n explanatory_raster_arrays.append(arf)\n\n expl = np.array(explanatory_raster_arrays).T\n\n raster_info = {\n 'affine': aff,\n 'shape': shape,\n 'crs': crs\n }\n return expl, raster_info\n\n\ndef impute(target_xs, clf, raster_info, outdir=\"output\", linechunk=1000, class_prob=True, certainty=True):\n \"\"\"\n Parameters\n ----------\n target_xs: Array of explanatory variables for which to predict responses\n clf: instance of a scikit-learn Classifier\n raster_info: dictionary of raster attributes with key 'gt', 'shape' and 'srs'\n\n Options\n -------\n outdir : output directory\n linechunk : number of lines to process per pass; reduce only if memory is constrained\n class_prob : Boolean. Should we create a probability raster for each class?\n certainty : Boolean. Should we produce a raster of overall classification certainty?\n \"\"\"\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n shape = raster_info['shape']\n\n profile = {\n 'affine': raster_info['affine'],\n 'blockxsize': shape[1],\n 'height': shape[0],\n 'blockysize': 1,\n 'count': 1,\n 'crs': raster_info['crs'],\n 'driver': u'GTiff',\n 'dtype': 'int16',\n 'nodata': -32768,\n 'tiled': False,\n 'transform': raster_info['affine'].to_gdal(),\n 'width': shape[1]}\n\n try:\n response_path = os.path.join(outdir, \"responses.tif\")\n response_ds = rasterio.open(response_path, 'w', **profile)\n\n profile['dtype'] = 'float32'\n if certainty:\n certainty_path = os.path.join(outdir, \"certainty.tif\")\n certainty_ds = rasterio.open(certainty_path, 'w', **profile)\n\n class_dss = []\n if class_prob:\n classes = list(clf.classes_)\n class_paths = []\n for i, c in enumerate(classes):\n ods = os.path.join(outdir, \"probability_%s.tif\" % c)\n class_paths.append(ods)\n for p in class_paths:\n class_dss.append(rasterio.open(p, 'w', **profile))\n\n # Chunky logic\n if not linechunk:\n linechunk = shape[0]\n chunks = int(math.ceil(shape[0] / float(linechunk)))\n\n for chunk in range(chunks):\n logger.debug(\"Writing chunk %d of %d\" % (chunk+1, chunks))\n row = chunk * linechunk\n if row + linechunk > shape[0]:\n linechunk = shape[0] - row\n # in 1D space\n start = shape[1] * row\n end = start + shape[1] * linechunk\n line = target_xs[start:end, :]\n\n window = ((row, row + linechunk), (0, shape[1]))\n\n # Predict\n responses = clf.predict(line)\n responses2D = responses.reshape((linechunk, shape[1])).astype('int16')\n response_ds.write_band(1, responses2D, window=window)\n\n if certainty or class_prob:\n proba = clf.predict_proba(line)\n\n # Certainty\n if certainty:\n certaintymax = proba.max(axis=1)\n certainty2D = certaintymax.reshape((linechunk, shape[1])).astype('float32')\n certainty_ds.write_band(1, certainty2D, window=window)\n\n # write out probabilities for each class as a separate raster\n for i, class_ds in enumerate(class_dss):\n proba_class = proba[:, i]\n classcert2D = proba_class.reshape((linechunk, shape[1])).astype('float32')\n class_ds.write_band(1, classcert2D, window=window)\n\n finally:\n response_ds.close()\n if certainty:\n certainty_ds.close()\n for class_ds in class_dss:\n class_ds.close()\n\n\ndef stratified_sample_raster(strata_data, target_sample_size=30, min_sample_proportion=0.1):\n \"\"\"\n Parameters\n ----------\n strata_data: Path to raster dataset containing strata to sample from (e.g. zones)\n\n Returns\n -------\n selected: array of selected indices\n \"\"\"\n with rasterio.open(strata_data) as src:\n strata = src.read().flatten()\n index_array = np.arange(strata.size)\n\n # construct a dictionary of lists,\n # keys are stratum ids\n # values are list of indices\n sample = dict([(int(s),[]) for s in np.unique(strata)])\n satisfied = []\n\n # counts for proportion-based constraints\n bins = np.bincount(strata)\n ii = np.nonzero(bins)[0]\n stratum_count = dict(zip(ii,bins[ii]))\n\n # shuffle the indices and loop until the sample satisfied our constraints\n np.random.shuffle(index_array)\n for idx in index_array:\n stratum = strata[index_array[idx]]\n if stratum in satisfied:\n continue\n sample[stratum].append(idx)\n nsamples = len(sample[stratum])\n # constraints -> hit the target sample size OR proportion of total\n # (whichever is highest)\n target = stratum_count[stratum] * min_sample_proportion\n if target < target_sample_size:\n target = target_sample_size\n if nsamples >= target:\n satisfied.append(stratum)\n if len(satisfied) == len(sample.keys()):\n break\n\n # convert sampled indicies into a list of indicies\n selected = []\n for k, v in sample.items():\n # check for stratum with < target sample size\n if len(v) < target_sample_size:\n # if we have too few samples, drop them\n #warnings.warn(\"Stratum %s has only %d samples, dropped\" % (k, len(v)))\n pass\n else:\n selected.extend(v)\n\n return np.array(selected)\n\n\ndef evaluate_clf(clf, X, y, k=None, test_size=0.5, scoring=\"f1_weighted\", feature_names=None):\n \"\"\"\n Evalate the classifier on the FULL training dataset\n This takes care of fitting on train/test splits\n \"\"\"\n X_train, X_test, y_train, y_true = cross_validation.train_test_split(\n X, y, test_size=test_size)\n\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n print(\"Accuracy Score: %f\" % metrics.accuracy_score(y_true, y_pred))\n print()\n\n print(\"Classification report\")\n print(metrics.classification_report(y_true, y_pred))\n print()\n\n print(\"Confussion matrix\")\n print(metrics.confusion_matrix(y_true, y_pred))\n print()\n\n print(\"Feature importances\")\n if not feature_names:\n feature_names = [\"%d\" % i for i in xrange(X.shape[1])]\n for f, imp in zip(feature_names, clf.feature_importances_):\n print(\"%20s: %s\" % (f, round(imp * 100, 1)))\n print()\n\n if k:\n print(\"Cross validation\")\n kf = cross_validation.KFold(len(y), n_folds=k)\n scores = cross_validation.cross_val_score(clf, X, y, cv=kf, scoring=scoring)\n print(scores)\n print(\"%d-fold Cross Validation Accuracy: %0.2f (+/- %0.2f)\" % (k, scores.mean() * 100, scores.std() * 200))\n" ]
[ [ "sklearn.cross_validation.cross_val_score", "sklearn.cross_validation.train_test_split", "numpy.nonzero", "numpy.unique", "numpy.asarray", "numpy.arange", "sklearn.metrics.confusion_matrix", "numpy.random.shuffle", "numpy.bincount", "numpy.array", "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tohtsky/pymc3
[ "68d5201292b45feecbfaf88a10aa8e392d5ab9f2" ]
[ "pymc3/tests/test_model.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport theano\nimport theano.tensor as tt\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport numpy.testing as npt\nimport unittest\n\nimport pymc3 as pm\nfrom pymc3.distributions import HalfCauchy, Normal, transforms\nfrom pymc3 import Potential, Deterministic\nfrom pymc3.model import ValueGradFunction\nfrom .helpers import select_by_precision\n\n\nclass NewModel(pm.Model):\n def __init__(self, name=\"\", model=None):\n super().__init__(name, model)\n assert pm.modelcontext(None) is self\n # 1) init variables with Var method\n self.Var(\"v1\", pm.Normal.dist())\n self.v2 = pm.Normal(\"v2\", mu=0, sigma=1)\n # 2) Potentials and Deterministic variables with method too\n # be sure that names will not overlap with other same models\n pm.Deterministic(\"d\", tt.constant(1))\n pm.Potential(\"p\", tt.constant(1))\n\n\nclass DocstringModel(pm.Model):\n def __init__(self, mean=0, sigma=1, name=\"\", model=None):\n super().__init__(name, model)\n self.Var(\"v1\", Normal.dist(mu=mean, sigma=sigma))\n Normal(\"v2\", mu=mean, sigma=sigma)\n Normal(\"v3\", mu=mean, sigma=HalfCauchy(\"sd\", beta=10, testval=1.0))\n Deterministic(\"v3_sq\", self.v3 ** 2)\n Potential(\"p1\", tt.constant(1))\n\n\nclass TestBaseModel:\n def test_setattr_properly_works(self):\n with pm.Model() as model:\n pm.Normal(\"v1\")\n assert len(model.vars) == 1\n with pm.Model(\"sub\") as submodel:\n submodel.Var(\"v1\", pm.Normal.dist())\n assert hasattr(submodel, \"v1\")\n assert len(submodel.vars) == 1\n assert len(model.vars) == 2\n with submodel:\n submodel.Var(\"v2\", pm.Normal.dist())\n assert hasattr(submodel, \"v2\")\n assert len(submodel.vars) == 2\n assert len(model.vars) == 3\n\n def test_context_passes_vars_to_parent_model(self):\n with pm.Model() as model:\n assert pm.model.modelcontext(None) == model\n assert pm.Model.get_context() == model\n # a set of variables is created\n nm = NewModel()\n assert pm.Model.get_context() == model\n # another set of variables are created but with prefix 'another'\n usermodel2 = NewModel(name=\"another\")\n assert pm.Model.get_context() == model\n assert usermodel2._parent == model\n # you can enter in a context with submodel\n with usermodel2:\n usermodel2.Var(\"v3\", pm.Normal.dist())\n pm.Normal(\"v4\")\n # this variable is created in parent model too\n assert \"another_v2\" in model.named_vars\n assert \"another_v3\" in model.named_vars\n assert \"another_v3\" in usermodel2.named_vars\n assert \"another_v4\" in model.named_vars\n assert \"another_v4\" in usermodel2.named_vars\n assert hasattr(usermodel2, \"v3\")\n assert hasattr(usermodel2, \"v2\")\n assert hasattr(usermodel2, \"v4\")\n # When you create a class based model you should follow some rules\n with model:\n m = NewModel(\"one_more\")\n assert m.d is model[\"one_more_d\"]\n assert m[\"d\"] is model[\"one_more_d\"]\n assert m[\"one_more_d\"] is model[\"one_more_d\"]\n\n\nclass TestNested:\n def test_nest_context_works(self):\n with pm.Model() as m:\n new = NewModel()\n with new:\n assert pm.modelcontext(None) is new\n assert pm.modelcontext(None) is m\n assert \"v1\" in m.named_vars\n assert \"v2\" in m.named_vars\n\n def test_named_context(self):\n with pm.Model() as m:\n NewModel(name=\"new\")\n assert \"new_v1\" in m.named_vars\n assert \"new_v2\" in m.named_vars\n\n def test_docstring_example1(self):\n usage1 = DocstringModel()\n assert \"v1\" in usage1.named_vars\n assert \"v2\" in usage1.named_vars\n assert \"v3\" in usage1.named_vars\n assert \"v3_sq\" in usage1.named_vars\n assert len(usage1.potentials), 1\n\n def test_docstring_example2(self):\n with pm.Model() as model:\n DocstringModel(name=\"prefix\")\n assert \"prefix_v1\" in model.named_vars\n assert \"prefix_v2\" in model.named_vars\n assert \"prefix_v3\" in model.named_vars\n assert \"prefix_v3_sq\" in model.named_vars\n assert len(model.potentials), 1\n\n def test_duplicates_detection(self):\n with pm.Model():\n DocstringModel(name=\"prefix\")\n with pytest.raises(ValueError):\n DocstringModel(name=\"prefix\")\n\n def test_model_root(self):\n with pm.Model() as model:\n assert model is model.root\n with pm.Model() as sub:\n assert model is sub.root\n\n\nclass TestObserved:\n def test_observed_rv_fail(self):\n with pytest.raises(TypeError):\n with pm.Model():\n x = Normal(\"x\")\n Normal(\"n\", observed=x)\n\n def test_observed_type(self):\n X_ = np.random.randn(100, 5)\n X = pm.floatX(theano.shared(X_))\n with pm.Model():\n x1 = pm.Normal(\"x1\", observed=X_)\n x2 = pm.Normal(\"x2\", observed=X)\n\n assert x1.type == X.type\n assert x2.type == X.type\n\n\nclass TestTheanoConfig:\n def test_set_testval_raise(self):\n with theano.configparser.change_flags(compute_test_value=\"off\"):\n with pm.Model():\n assert theano.config.compute_test_value == \"raise\"\n assert theano.config.compute_test_value == \"off\"\n\n def test_nested(self):\n with theano.configparser.change_flags(compute_test_value=\"off\"):\n with pm.Model(theano_config={\"compute_test_value\": \"ignore\"}):\n assert theano.config.compute_test_value == \"ignore\"\n with pm.Model(theano_config={\"compute_test_value\": \"warn\"}):\n assert theano.config.compute_test_value == \"warn\"\n assert theano.config.compute_test_value == \"ignore\"\n assert theano.config.compute_test_value == \"off\"\n\n\ndef test_matrix_multiplication():\n # Check matrix multiplication works between RVs, transformed RVs,\n # Deterministics, and numpy arrays\n with pm.Model() as linear_model:\n matrix = pm.Normal(\"matrix\", shape=(2, 2))\n transformed = pm.Gamma(\"transformed\", alpha=2, beta=1, shape=2)\n rv_rv = pm.Deterministic(\"rv_rv\", matrix @ transformed)\n np_rv = pm.Deterministic(\"np_rv\", np.ones((2, 2)) @ transformed)\n rv_np = pm.Deterministic(\"rv_np\", matrix @ np.ones(2))\n rv_det = pm.Deterministic(\"rv_det\", matrix @ rv_rv)\n det_rv = pm.Deterministic(\"det_rv\", rv_rv @ transformed)\n\n posterior = pm.sample(10, tune=0, compute_convergence_checks=False, progressbar=False)\n decimal = select_by_precision(7, 5)\n for point in posterior.points():\n npt.assert_almost_equal(\n point[\"matrix\"] @ point[\"transformed\"],\n point[\"rv_rv\"],\n decimal=decimal,\n )\n npt.assert_almost_equal(\n np.ones((2, 2)) @ point[\"transformed\"],\n point[\"np_rv\"],\n decimal=decimal,\n )\n npt.assert_almost_equal(\n point[\"matrix\"] @ np.ones(2),\n point[\"rv_np\"],\n decimal=decimal,\n )\n npt.assert_almost_equal(\n point[\"matrix\"] @ point[\"rv_rv\"],\n point[\"rv_det\"],\n decimal=decimal,\n )\n npt.assert_almost_equal(\n point[\"rv_rv\"] @ point[\"transformed\"],\n point[\"det_rv\"],\n decimal=decimal,\n )\n\n\ndef test_duplicate_vars():\n with pytest.raises(ValueError) as err:\n with pm.Model():\n pm.Normal(\"a\")\n pm.Normal(\"a\")\n err.match(\"already exists\")\n\n with pytest.raises(ValueError) as err:\n with pm.Model():\n pm.Normal(\"a\")\n pm.Normal(\"a\", transform=transforms.log)\n err.match(\"already exists\")\n\n with pytest.raises(ValueError) as err:\n with pm.Model():\n a = pm.Normal(\"a\")\n pm.Potential(\"a\", a ** 2)\n err.match(\"already exists\")\n\n with pytest.raises(ValueError) as err:\n with pm.Model():\n pm.Binomial(\"a\", 10, 0.5)\n pm.Normal(\"a\", transform=transforms.log)\n err.match(\"already exists\")\n\n\ndef test_empty_observed():\n data = pd.DataFrame(np.ones((2, 3)) / 3)\n data.values[:] = np.nan\n with pm.Model():\n a = pm.Normal(\"a\", observed=data)\n npt.assert_allclose(a.tag.test_value, np.zeros((2, 3)))\n b = pm.Beta(\"b\", alpha=1, beta=1, observed=data)\n npt.assert_allclose(b.tag.test_value, np.ones((2, 3)) / 2)\n\n\nclass TestValueGradFunction(unittest.TestCase):\n def test_no_extra(self):\n a = tt.vector(\"a\")\n a.tag.test_value = np.zeros(3, dtype=a.dtype)\n a.dshape = (3,)\n a.dsize = 3\n f_grad = ValueGradFunction([a.sum()], [a], [], mode=\"FAST_COMPILE\")\n assert f_grad.size == 3\n\n def test_invalid_type(self):\n a = tt.ivector(\"a\")\n a.tag.test_value = np.zeros(3, dtype=a.dtype)\n a.dshape = (3,)\n a.dsize = 3\n with pytest.raises(TypeError) as err:\n ValueGradFunction([a.sum()], [a], [], mode=\"FAST_COMPILE\")\n err.match(\"Invalid dtype\")\n\n def setUp(self):\n extra1 = tt.iscalar(\"extra1\")\n extra1_ = np.array(0, dtype=extra1.dtype)\n extra1.tag.test_value = extra1_\n extra1.dshape = tuple()\n extra1.dsize = 1\n\n val1 = tt.vector(\"val1\")\n val1_ = np.zeros(3, dtype=val1.dtype)\n val1.tag.test_value = val1_\n val1.dshape = (3,)\n val1.dsize = 3\n\n val2 = tt.matrix(\"val2\")\n val2_ = np.zeros((2, 3), dtype=val2.dtype)\n val2.tag.test_value = val2_\n val2.dshape = (2, 3)\n val2.dsize = 6\n\n self.val1, self.val1_ = val1, val1_\n self.val2, self.val2_ = val2, val2_\n self.extra1, self.extra1_ = extra1, extra1_\n\n self.cost = extra1 * val1.sum() + val2.sum()\n\n self.f_grad = ValueGradFunction([self.cost], [val1, val2], [extra1], mode=\"FAST_COMPILE\")\n\n def test_extra_not_set(self):\n with pytest.raises(ValueError) as err:\n self.f_grad.get_extra_values()\n err.match(\"Extra values are not set\")\n\n with pytest.raises(ValueError) as err:\n self.f_grad(np.zeros(self.f_grad.size, dtype=self.f_grad.dtype))\n err.match(\"Extra values are not set\")\n\n def test_grad(self):\n self.f_grad.set_extra_values({\"extra1\": 5})\n array = np.ones(self.f_grad.size, dtype=self.f_grad.dtype)\n val, grad = self.f_grad(array)\n assert val == 21\n npt.assert_allclose(grad, [5, 5, 5, 1, 1, 1, 1, 1, 1])\n\n def test_bij(self):\n self.f_grad.set_extra_values({\"extra1\": 5})\n array = np.ones(self.f_grad.size, dtype=self.f_grad.dtype)\n point = self.f_grad.array_to_dict(array)\n assert len(point) == 2\n npt.assert_allclose(point[\"val1\"], 1)\n npt.assert_allclose(point[\"val2\"], 1)\n\n array2 = self.f_grad.dict_to_array(point)\n npt.assert_allclose(array2, array)\n point_ = self.f_grad.array_to_full_dict(array)\n assert len(point_) == 3\n assert point_[\"extra1\"] == 5\n\n def test_edge_case(self):\n # Edge case discovered in #2948\n ndim = 3\n with pm.Model() as m:\n pm.Lognormal(\n \"sigma\", mu=np.zeros(ndim), tau=np.ones(ndim), shape=ndim\n ) # variance for the correlation matrix\n pm.HalfCauchy(\"nu\", beta=10)\n step = pm.NUTS()\n\n func = step._logp_dlogp_func\n func.set_extra_values(m.test_point)\n q = func.dict_to_array(m.test_point)\n logp, dlogp = func(q)\n assert logp.size == 1\n assert dlogp.size == 4\n npt.assert_allclose(dlogp, 0.0, atol=1e-5)\n\n def test_tensor_type_conversion(self):\n # case described in #3122\n X = np.random.binomial(1, 0.5, 10)\n X[0] = -1 # masked a single value\n X = np.ma.masked_values(X, value=-1)\n with pm.Model() as m:\n x1 = pm.Uniform(\"x1\", 0.0, 1.0)\n x2 = pm.Bernoulli(\"x2\", x1, observed=X)\n\n gf = m.logp_dlogp_function()\n\n assert m[\"x2_missing\"].type == gf._extra_vars_shared[\"x2_missing\"].type\n\n\ndef test_multiple_observed_rv():\n \"Test previously buggy MultiObservedRV comparison code.\"\n y1_data = np.random.randn(10)\n y2_data = np.random.randn(100)\n with pm.Model() as model:\n mu = pm.Normal(\"mu\")\n x = pm.DensityDist( # pylint: disable=unused-variable\n \"x\", pm.Normal.dist(mu, 1.0).logp, observed={\"value\": 0.1}\n )\n assert not model[\"x\"] == model[\"mu\"]\n assert model[\"x\"] == model[\"x\"]\n assert model[\"x\"] in model.observed_RVs\n assert not model[\"x\"] in model.vars\n\n\ndef test_tempered_logp_dlogp():\n with pm.Model() as model:\n pm.Normal(\"x\")\n pm.Normal(\"y\", observed=1)\n\n func = model.logp_dlogp_function()\n func.set_extra_values({})\n\n func_temp = model.logp_dlogp_function(tempered=True)\n func_temp.set_extra_values({})\n\n func_nograd = model.logp_dlogp_function(compute_grads=False)\n func_nograd.set_extra_values({})\n\n func_temp_nograd = model.logp_dlogp_function(tempered=True, compute_grads=False)\n func_temp_nograd.set_extra_values({})\n\n x = np.ones(func.size, dtype=func.dtype)\n assert func(x) == func_temp(x)\n assert func_nograd(x) == func(x)[0]\n assert func_temp_nograd(x) == func(x)[0]\n\n func_temp.set_weights(np.array([0.0], dtype=func.dtype))\n func_temp_nograd.set_weights(np.array([0.0], dtype=func.dtype))\n npt.assert_allclose(func(x)[0], 2 * func_temp(x)[0])\n npt.assert_allclose(func(x)[1], func_temp(x)[1])\n\n npt.assert_allclose(func_nograd(x), func(x)[0])\n npt.assert_allclose(func_temp_nograd(x), func_temp(x)[0])\n\n func_temp.set_weights(np.array([0.5], dtype=func.dtype))\n func_temp_nograd.set_weights(np.array([0.5], dtype=func.dtype))\n npt.assert_allclose(func(x)[0], 4 / 3 * func_temp(x)[0])\n npt.assert_allclose(func(x)[1], func_temp(x)[1])\n\n npt.assert_allclose(func_nograd(x), func(x)[0])\n npt.assert_allclose(func_temp_nograd(x), func_temp(x)[0])\n\n\ndef test_model_pickle(tmpdir):\n \"\"\"Tests that PyMC3 models are pickleable\"\"\"\n with pm.Model() as model:\n x = pm.Normal(\"x\")\n pm.Normal(\"y\", observed=1)\n\n file_path = tmpdir.join(\"model.p\")\n with open(file_path, \"wb\") as buff:\n pickle.dump(model, buff)\n\n\ndef test_model_pickle_deterministic(tmpdir):\n \"\"\"Tests that PyMC3 models are pickleable\"\"\"\n with pm.Model() as model:\n x = pm.Normal(\"x\")\n z = pm.Normal(\"z\")\n pm.Deterministic(\"w\", x / z)\n pm.Normal(\"y\", observed=1)\n\n file_path = tmpdir.join(\"model.p\")\n with open(file_path, \"wb\") as buff:\n pickle.dump(model, buff)\n" ]
[ [ "numpy.ones", "numpy.testing.assert_almost_equal", "numpy.random.randn", "numpy.ma.masked_values", "numpy.testing.assert_allclose", "numpy.random.binomial", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sailordiary/m3f.pytorch
[ "f06a8eaff1f00563cb40e2dc5183de4324b08bff" ]
[ "models/vox2_dataset.py" ]
[ "import os\n\nimport math\nimport random\n\nimport numpy as np\nimport cv2\nfrom .cv_augment import adjust_brightness, adjust_contrast\n\nimport torch\nfrom torch.utils.data import Dataset\n\n\ndef load_video(path, start, length,\n is_training=False,\n mirror_augment=False,\n crop_augment=False,\n input_size=128):\n frames = []\n # use identical crop windows for every frame\n if crop_augment:\n if is_training:\n crop_x = random.randint(0, input_size // 8)\n crop_y = random.randint(0, input_size // 8)\n else:\n crop_x, crop_y = input_size // 16, input_size // 16\n crop_size = input_size * 7 // 8\n \n # color jitter\n if is_training:\n brightness_factor = random.uniform(0.9, 1.1)\n contrast_factor = random.uniform(0.9, 1.1)\n\n cap = cv2.VideoCapture(path)\n assert cap.isOpened(), 'read error: {}'.format(path)\n cap.set(1, start)\n for i in range(length):\n ret, img = cap.read()\n if not ret: img = frames[-1] # TODO(yuanhang): this shouldn't happen\n if crop_augment:\n img = img[crop_y: crop_y + crop_size, crop_x: crop_x + crop_size]\n if mirror_augment and is_training: img = cv2.flip(img, 1)\n if is_training:\n img = adjust_brightness(img, brightness_factor)\n img = adjust_contrast(img, contrast_factor)\n # TODO: add temporal augmentation (repeat, deletion)\n frames.append(img)\n cap.release()\n seq = np.stack(frames).transpose(3, 0, 1, 2).astype(np.float32) # THWC->CTHW\n return seq\n\n\nclass VoxCeleb2Dataset(Dataset):\n '''\n 112 by 112 VoxCeleb2 face tracks with 1,000 classes.\n \n Params:\n split: partition (train, val)\n path: base path for data\n window_len: length of temporal crop window\n '''\n def __init__(self, split, path, window_len=16):\n self.split = split\n self.path = path\n self.window_len = window_len\n\n self.label_map = {l: i for i, l in enumerate(open(os.path.join(self.path, 'vox2_top1000_dev500utt_identity.csv'), 'r').read().splitlines())}\n self.files = []\n for l in open(os.path.join(self.path, 'vox2_top1000_dev500utt_{}.csv'.format(self.split)), 'r').read().splitlines():\n l = l.split('/')\n identity = self.label_map[l[0]]\n path = os.path.join(self.path, 'top1000_64f_128', '{}/{}/{}_{}.mp4'.format(l[0], self.split, l[1], l[2]))\n if os.path.exists(path):\n self.files.append((path, identity))\n \n print ('Loaded partition {}: {} files'.format(self.split, len(self.files)))\n\n def __getitem__(self, i):\n if self.split == 'train':\n vid_name = self.files[i][0]\n track_len = self.window_len\n start_frame = random.randint(0, 64 - self.window_len)\n else:\n vid_name = self.files[i][0]\n track_len = 16\n start_frame = 24\n \n is_training = self.split == 'train'\n inputs = load_video(vid_name, start_frame, track_len,\n is_training,\n random.random() > 0.5,\n True)\n \n return {\n 'video': torch.from_numpy(inputs),\n 'label': self.files[i][1]\n }\n\n def __len__(self):\n return len(self.files)\n" ]
[ [ "torch.from_numpy", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
This-50m/vega
[ "52b53582fe7df95d7aacc8425013fd18645d079f" ]
[ "vega/modules/cells/dag_cell.py" ]
[ "# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\"\"\"This is DAG Cell for network.\"\"\"\nfrom vega.modules.module import Module\nfrom dag import DAG\nimport numpy as np\nfrom vega.modules.operators import ops\nfrom vega.modules.connections import Sequential\nfrom vega.common.class_factory import ClassFactory, ClassType\n\n\[email protected](ClassType.NETWORK)\nclass DagGraphCell(Module):\n \"\"\"Merge and process inputs to the middle-level graph.\"\"\"\n\n def __init__(self, adj_matrix, nodes, in_channels=64, out_channels=64):\n super(DagGraphCell, self).__init__()\n self.adj_matrix = adj_matrix\n self.nodes = nodes\n self.c_in = in_channels\n self.c_out = out_channels\n self._add_nodes()\n\n def _add_nodes(self):\n for node_id, node_name in enumerate(self.nodes):\n module = ClassFactory.get_instance(ClassType.NETWORK, node_name, in_channels=self.c_in,\n out_channels=self.c_out)\n self.add_module(str(node_id), module)\n\n def _create_dag(self):\n dag = DAG()\n for name, modules in self.named_children():\n dag.add_node_if_not_exists(int(name))\n frontier = [0]\n num_vertices = np.shape(self.adj_matrix)[0]\n while frontier:\n node_id = frontier.pop()\n for v in range(num_vertices):\n if self.adj_matrix[node_id][v]:\n dag.add_edge(node_id, v)\n frontier.append(v)\n self.out_tensors = {}\n return dag\n\n def forward(self, x, *args, **kwargs):\n \"\"\"Forward x.\"\"\"\n dag = self._create_dag()\n node = dag.ind_nodes()[0]\n out = self._forward_module(x, node, dag)\n return out\n\n def _forward_module(self, x, parent, dag):\n parent_nodes = dag.predecessors(parent)\n if len(parent_nodes) <= 1:\n next_input = self._modules.get(str(parent))(x)\n elif self.out_tensors.get(parent) and len(self.out_tensors.get(parent)) == len(parent_nodes) - 1:\n out = self.out_tensors.pop(parent)\n out.append(x)\n next_input = self._modules.get(str(parent))(out)\n else:\n if parent not in self.out_tensors:\n self.out_tensors[parent] = []\n self.out_tensors[parent].append(x)\n return None\n children = dag.downstream(parent)\n for child in children:\n out = self._forward_module(next_input, child, dag)\n if out is not None:\n next_input = out\n return next_input\n\n\nclass ConvBnRelu(Module):\n \"\"\"Conv bn Relu class.\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0):\n super(ConvBnRelu, self).__init__()\n self.conv_bn_relu = Sequential(\n ops.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),\n ops.BatchNorm2d(out_channels),\n ops.Relu(inplace=True)\n )\n\n def call(self, x):\n \"\"\"Call forward function.\"\"\"\n return self.conv_bn_relu(x)\n\n\[email protected](ClassType.NETWORK)\nclass Conv3x3BnRelu(Module):\n \"\"\"The Class of 3x3 convolution with batch norm and ReLU activation.\"\"\"\n\n def __init__(self, in_channels, out_channels):\n super(Conv3x3BnRelu, self).__init__()\n self.conv3x3 = ConvBnRelu(in_channels, out_channels, 3, 1, 1)\n\n def call(self, x):\n \"\"\"Call forward function.\"\"\"\n return self.conv3x3(x)\n\n\[email protected](ClassType.NETWORK)\nclass Conv1x1BnRelu(Module):\n \"\"\"The Class of 1x1 convolution with batch norm and ReLU activation.\"\"\"\n\n def __init__(self, in_channels, out_channels):\n super(Conv1x1BnRelu, self).__init__()\n self.conv1x1 = ConvBnRelu(in_channels, out_channels, 1, 1, 0)\n\n def call(self, x):\n \"\"\"Call forward function.\"\"\"\n return self.conv1x1(x)\n\n\[email protected](ClassType.NETWORK)\nclass MaxPool3x3(Module):\n \"\"\"The class of 3x3 max pool with no subsampling.\"\"\"\n\n def __init__(self, kernel_size=3, stride=1, padding=1):\n super(MaxPool3x3, self).__init__()\n self.maxpool = ops.MaxPool2d(kernel_size, stride, padding)\n\n def call(self, x):\n \"\"\"Call forward function.\"\"\"\n return self.maxpool(x)\n\n\[email protected](ClassType.NETWORK)\nclass Input(Module):\n \"\"\"Input Class.\"\"\"\n\n def __init__(self, size=None):\n super(Input, self).__init__()\n self.size = size\n\n def call(self, x):\n \"\"\"Call forward function.\"\"\"\n return x\n\n\[email protected](ClassType.NETWORK)\nclass Output(Module):\n \"\"\"Output Class.\"\"\"\n\n def __init__(self, size=None):\n super(Output, self).__init__()\n self.size = size\n\n def call(self, x, **kwargs):\n \"\"\"Call forward function.\"\"\"\n return ops.concat(x, 1)\n" ]
[ [ "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yinchimaoliang/K-Net
[ "5e50ee58957dce972f51096804ff69171c2f072e" ]
[ "knet/det/mask_hungarian_assigner.py" ]
[ "import numpy as np\nimport torch\n\nfrom mmdet.core import AssignResult, BaseAssigner\nfrom mmdet.core.bbox.builder import BBOX_ASSIGNERS\nfrom mmdet.core.bbox.match_costs.builder import MATCH_COST, build_match_cost\n\ntry:\n from scipy.optimize import linear_sum_assignment\nexcept ImportError:\n linear_sum_assignment = None\n\n\n@MATCH_COST.register_module()\nclass DiceCost(object):\n \"\"\"DiceCost.\n\n Args:\n weight (int | float, optional): loss_weight\n pred_act (bool): Whether to activate the prediction\n before calculating cost\n\n Examples:\n >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost\n >>> import torch\n >>> self = BBoxL1Cost()\n >>> bbox_pred = torch.rand(1, 4)\n >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])\n >>> factor = torch.tensor([10, 8, 10, 8])\n >>> self(bbox_pred, gt_bboxes, factor)\n tensor([[1.6172, 1.6422]])\n \"\"\"\n\n def __init__(self,\n weight=1.,\n pred_act=False,\n act_mode='sigmoid',\n eps=1e-3):\n self.weight = weight\n self.pred_act = pred_act\n self.act_mode = act_mode\n self.eps = eps\n\n def dice_loss(cls, input, target, eps=1e-3):\n input = input.reshape(input.size()[0], -1)\n target = target.reshape(target.size()[0], -1).float()\n # einsum saves 10x memory\n # a = torch.sum(input[:, None] * target[None, ...], -1)\n a = torch.einsum('nh,mh->nm', input, target)\n b = torch.sum(input * input, 1) + eps\n c = torch.sum(target * target, 1) + eps\n d = (2 * a) / (b[:, None] + c[None, ...])\n # 1 is a constance that will not affect the matching, so ommitted\n return -d\n\n def __call__(self, mask_preds, gt_masks):\n \"\"\"\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n gt_bboxes (Tensor): Ground truth boxes with normalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n\n Returns:\n torch.Tensor: bbox_cost value with weight\n \"\"\"\n if self.pred_act and self.act_mode == 'sigmoid':\n mask_preds = mask_preds.sigmoid()\n elif self.pred_act:\n mask_preds = mask_preds.softmax(dim=0)\n dice_cost = self.dice_loss(mask_preds, gt_masks, self.eps)\n return dice_cost * self.weight\n\n\n@MATCH_COST.register_module()\nclass MaskCost(object):\n \"\"\"MaskCost.\n\n Args:\n weight (int | float, optional): loss_weight\n \"\"\"\n\n def __init__(self, weight=1., pred_act=False, act_mode='sigmoid'):\n self.weight = weight\n self.pred_act = pred_act\n self.act_mode = act_mode\n\n def __call__(self, cls_pred, target):\n \"\"\"\n Args:\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n Returns:\n torch.Tensor: cls_cost value with weight\n \"\"\"\n if self.pred_act and self.act_mode == 'sigmoid':\n cls_pred = cls_pred.sigmoid()\n elif self.pred_act:\n cls_pred = cls_pred.softmax(dim=0)\n\n _, H, W = target.shape\n # flatten_cls_pred = cls_pred.view(num_proposals, -1)\n # eingum is ~10 times faster than matmul\n pos_cost = torch.einsum('nhw,mhw->nm', cls_pred, target)\n neg_cost = torch.einsum('nhw,mhw->nm', 1 - cls_pred, 1 - target)\n cls_cost = -(pos_cost + neg_cost) / (H * W)\n return cls_cost * self.weight\n\n\n@BBOX_ASSIGNERS.register_module()\nclass MaskHungarianAssigner(BaseAssigner):\n \"\"\"Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classfication cost, regression L1 cost and regression iou cost. The\n targets don't include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_weight (int | float, optional): The scale factor for classification\n cost. Default 1.0.\n bbox_weight (int | float, optional): The scale factor for regression\n L1 cost. Default 1.0.\n iou_weight (int | float, optional): The scale factor for regression\n iou cost. Default 1.0.\n iou_calculator (dict | optional): The config for the iou calculation.\n Default type `BboxOverlaps2D`.\n iou_mode (str | optional): \"iou\" (intersection over union), \"iof\"\n (intersection over foreground), or \"giou\" (generalized\n intersection over union). Default \"giou\".\n \"\"\"\n\n def __init__(self,\n cls_cost=dict(type='ClassificationCost', weight=1.),\n mask_cost=dict(type='SigmoidCost', weight=1.0),\n dice_cost=dict(),\n boundary_cost=None,\n topk=1):\n self.cls_cost = build_match_cost(cls_cost)\n self.mask_cost = build_match_cost(mask_cost)\n self.dice_cost = build_match_cost(dice_cost)\n if boundary_cost is not None:\n self.boundary_cost = build_match_cost(boundary_cost)\n else:\n self.boundary_cost = None\n self.topk = topk\n\n def assign(self,\n bbox_pred,\n cls_pred,\n gt_bboxes,\n gt_labels,\n img_meta=None,\n gt_bboxes_ignore=None,\n eps=1e-7):\n \"\"\"Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n \"\"\"\n assert gt_bboxes_ignore is None, \\\n 'Only case when gt_bboxes_ignore is None is supported.'\n num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)\n\n # 1. assign -1 by default\n assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n assigned_labels = bbox_pred.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n if num_gts == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n if num_gts == 0:\n # No ground truth, assign all to background\n assigned_gt_inds[:] = 0\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n\n # 2. compute the weighted costs\n # classification and bboxcost.\n if self.cls_cost.weight != 0 and cls_pred is not None:\n cls_cost = self.cls_cost(cls_pred, gt_labels)\n else:\n cls_cost = 0\n if self.mask_cost.weight != 0:\n reg_cost = self.mask_cost(bbox_pred, gt_bboxes)\n else:\n reg_cost = 0\n if self.dice_cost.weight != 0:\n dice_cost = self.dice_cost(bbox_pred, gt_bboxes)\n else:\n dice_cost = 0\n if self.boundary_cost is not None and self.boundary_cost.weight != 0:\n b_cost = self.boundary_cost(bbox_pred, gt_bboxes)\n else:\n b_cost = 0\n cost = cls_cost + reg_cost + dice_cost + b_cost\n\n # 3. do Hungarian matching on CPU using linear_sum_assignment\n cost = cost.detach().cpu()\n if linear_sum_assignment is None:\n raise ImportError('Please run \"pip install scipy\" '\n 'to install scipy first.')\n if self.topk == 1:\n matched_row_inds, matched_col_inds = linear_sum_assignment(cost)\n else:\n topk_matched_row_inds = []\n topk_matched_col_inds = []\n for i in range(self.topk):\n matched_row_inds, matched_col_inds = linear_sum_assignment(\n cost)\n topk_matched_row_inds.append(matched_row_inds)\n topk_matched_col_inds.append(matched_col_inds)\n cost[matched_row_inds] = 1e10\n matched_row_inds = np.concatenate(topk_matched_row_inds)\n matched_col_inds = np.concatenate(topk_matched_col_inds)\n\n matched_row_inds = torch.from_numpy(matched_row_inds).to(\n bbox_pred.device)\n matched_col_inds = torch.from_numpy(matched_col_inds).to(\n bbox_pred.device)\n\n # 4. assign backgrounds and foregrounds\n # assign all indices to backgrounds first\n assigned_gt_inds[:] = 0\n # assign foregrounds based on matching results\n assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n" ]
[ [ "torch.einsum", "torch.sum", "torch.from_numpy", "numpy.concatenate", "scipy.optimize.linear_sum_assignment" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
iancze/hierarchical-mutual-inclinations
[ "a9d40df941d6cc0ce32e0cd9efc1596f011be223" ]
[ "make_real_data.py" ]
[ "import numpy as np\nfrom astropy.table import Table\nfrom astropy.io import ascii\n\ndeg = np.pi/180.0\n\n# table of stars with inclinations and uncertainties\n# name, #i_disk, i_disk_err, i_star, i_star_err (deg)\nrows = [(\"V4046 Sgr\", 33.5, 1.4, 33.42, 0.58),\n(\"AK Sco\", 109.4, 0.5, 108.76, 2.4),\n(\"DQ Tau\", 160.0, 3.0, 158.24, 2.77),\n(\"UZ Tau E\", 56.15, 1.5, 56.1, 5.7)]\n\nsample = Table(rows=rows, names=[\"name\", \"i_disk\", \"i_disk_err\", \"i_star\", \"i_star_err\"])\n\n# calculate these errors in cos(i)\ncos_i_disk = np.cos(sample[\"i_disk\"] * deg)\ncos_i_disk_err = np.sin(sample[\"i_disk\"] * deg) * sample[\"i_disk_err\"] * deg\n\ncos_i_star = np.cos(sample[\"i_star\"] * deg)\ncos_i_star_err = np.sin(sample[\"i_star\"] * deg) * sample[\"i_star_err\"] * deg\n\ncos_sample = Table([sample[\"name\"], cos_i_disk, cos_i_disk_err, cos_i_star, cos_i_star_err], names=[\"name\", \"cos_i_disk\", \"cos_i_disk_err\", \"cos_i_star\", \"cos_i_star_err\"])\n\nprint(cos_sample)\n\nascii.write(cos_sample, \"data/real_sample.ecsv\", format=\"ecsv\", overwrite=True)\n" ]
[ [ "numpy.cos", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ewinston/qiskit-sdk-py
[ "4d64125aba4ff31f15d0054b90437bcef352782e" ]
[ "qiskit/transpiler/passes/scheduling/calibration_creators.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Calibration creators.\"\"\"\n\nimport math\nfrom typing import List\nfrom abc import abstractmethod\nimport numpy as np\n\nfrom qiskit.pulse import Play, ShiftPhase, Schedule, ControlChannel, DriveChannel, GaussianSquare\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.providers import basebackend\nfrom qiskit.dagcircuit import DAGNode\nfrom qiskit.circuit.library.standard_gates import RZXGate\nfrom qiskit.transpiler.basepasses import TransformationPass\n\n\nclass CalibrationCreator(TransformationPass):\n \"\"\"Abstract base class to inject calibrations into circuits.\"\"\"\n\n @abstractmethod\n def supported(self, node_op: DAGNode) -> bool:\n \"\"\"Determine if a given name supports the calibration.\"\"\"\n\n @abstractmethod\n def get_calibration(self, params: List, qubits: List) -> Schedule:\n \"\"\"Gets the calibrated schedule for the given qubits and parameters.\"\"\"\n\n def run(self, dag):\n \"\"\"Run the calibration adder pass on `dag`.\n\n Args:\n dag (DAGCircuit): DAG to schedule.\n\n Returns:\n DAGCircuit: A DAG with calibrations added to it.\n \"\"\"\n for node in dag.nodes():\n if node.type == 'op':\n if self.supported(node.op):\n params = node.op.params\n qubits = [_.index for _ in node.qargs]\n\n schedule = self.get_calibration(params, qubits)\n\n dag.add_calibration(node.op, qubits, schedule, params=params)\n\n return dag\n\n\nclass RZXCalibrationBuilder(CalibrationCreator):\n \"\"\"\n Creates calibrations for RZXGate(theta) by stretching and compressing\n Gaussian square pulses in the CX gate. This is done by retrieving (for a given pair of\n qubits) the CX schedule in the instruction schedule map of the backend defaults.\n The CX schedule must be an echoed cross-resonance gate optionally with rotary tones.\n The cross-resonance drive tones and rotary pulses must be Gaussian square pulses.\n The width of the Gaussian square pulse is adjusted so as to match the desired rotation angle.\n If the rotation angle is small such that the width disappears then the amplitude of the\n zero width Gaussian square pulse (i.e. a Gaussian) is reduced to reach the target rotation\n angle. Additional details can be found in https://arxiv.org/abs/2012.11660.\n \"\"\"\n\n def __init__(self, backend: basebackend):\n \"\"\"\n Initializes a RZXGate calibration builder.\n\n Args:\n backend: Backend for which to construct the gates.\n\n Raises:\n QiskitError: if open pulse is not supported by the backend.\n \"\"\"\n super().__init__()\n if not backend.configuration().open_pulse:\n raise QiskitError('Calibrations can only be added to Pulse-enabled backends, '\n 'but {0} is not enabled with Pulse.'.format(backend.name()))\n\n self._inst_map = backend.defaults().instruction_schedule_map\n self._config = backend.configuration()\n self._channel_map = backend.configuration().qubit_channel_mapping\n\n def supported(self, node_op: DAGNode) -> bool:\n \"\"\"\n Args:\n node_op: The node from the dag dep.\n\n Returns:\n match: True if the node is a RZXGate.\n \"\"\"\n return isinstance(node_op, RZXGate)\n\n @staticmethod\n def rescale_cr_inst(instruction: Play, theta: float, sample_mult: int = 16) -> Play:\n \"\"\"\n Args:\n instruction: The instruction from which to create a new shortened or lengthened pulse.\n theta: desired angle, pi/2 is assumed to be the angle that the pulse in the given\n play instruction implements.\n sample_mult: All pulses must be a multiple of sample_mult.\n\n Returns:\n Play: The play instruction with the stretched compressed GaussianSquare pulse.\n\n Raises:\n QiskitError: if the pulses are not GaussianSquare.\n \"\"\"\n pulse_ = instruction.pulse\n if isinstance(pulse_, GaussianSquare):\n amp = pulse_.amp\n width = pulse_.width\n sigma = pulse_.sigma\n n_sigmas = (pulse_.duration - width) / sigma\n\n # The error function is used because the Gaussian may have chopped tails.\n gaussian_area = abs(amp) * sigma * np.sqrt(2 * np.pi) * math.erf(n_sigmas)\n area = gaussian_area + abs(amp) * width\n\n target_area = abs(theta) / (np.pi / 2.) * area\n sign = theta / abs(theta)\n\n if target_area > gaussian_area:\n width = (target_area - gaussian_area) / abs(amp)\n duration = math.ceil((width + n_sigmas * sigma) / sample_mult) * sample_mult\n return Play(GaussianSquare(amp=sign*amp, width=width, sigma=sigma,\n duration=duration), channel=instruction.channel)\n else:\n amp_scale = sign * target_area / gaussian_area\n duration = math.ceil(n_sigmas * sigma / sample_mult) * sample_mult\n return Play(\n GaussianSquare(amp=amp * amp_scale, width=0, sigma=sigma, duration=duration),\n channel=instruction.channel)\n else:\n raise QiskitError('RZXCalibrationBuilder only stretches/compresses GaussianSquare.')\n\n def get_calibration(self, params: List, qubits: List) -> Schedule:\n \"\"\"\n Args:\n params: Parameters of the RZXGate(theta). I.e. params[0] is theta.\n qubits: List of qubits for which to get the schedules. The first qubit is\n the control and the second is the target.\n\n Returns:\n schedule: The calibration schedule for the RZXGate(theta).\n\n Raises:\n QiskitError: if the control and target qubits cannot be identified or the backend\n does not support cx between the qubits.\n \"\"\"\n theta = params[0]\n q1, q2 = qubits[0], qubits[1]\n\n if not self._inst_map.has('cx', qubits):\n raise QiskitError('This transpilation pass requires the backend to support cx '\n 'between qubits %i and %i.' % (q1, q2))\n\n cx_sched = self._inst_map.get('cx', qubits=(q1, q2))\n rzx_theta = Schedule(name='rzx(%.3f)' % theta)\n\n if theta == 0.0:\n return rzx_theta\n\n crs, comp_tones, shift_phases = [], [], []\n control, target = None, None\n\n for time, inst in cx_sched.instructions:\n\n if isinstance(inst, ShiftPhase) and time == 0:\n shift_phases.append(ShiftPhase(-theta, inst.channel))\n\n # Identify the CR pulses.\n if isinstance(inst, Play) and not isinstance(inst, ShiftPhase):\n if isinstance(inst.channel, ControlChannel):\n crs.append((time, inst))\n\n # Identify the compensation tones.\n if isinstance(inst.channel, DriveChannel) and not isinstance(inst, ShiftPhase):\n if isinstance(inst.pulse, GaussianSquare):\n comp_tones.append((time, inst))\n target = inst.channel.index\n control = q1 if target == q2 else q2\n\n if control is None:\n raise QiskitError('Control qubit is None.')\n if target is None:\n raise QiskitError('Target qubit is None.')\n\n echo_x = self._inst_map.get('x', qubits=control)\n\n # Build the schedule\n for inst in shift_phases:\n rzx_theta = rzx_theta.insert(0, inst)\n\n # Stretch/compress the CR gates and compensation tones\n cr1 = self.rescale_cr_inst(crs[0][1], theta)\n cr2 = self.rescale_cr_inst(crs[1][1], theta)\n\n if len(comp_tones) == 0:\n comp1, comp2 = None, None\n elif len(comp_tones) == 2:\n comp1 = self.rescale_cr_inst(comp_tones[0][1], theta)\n comp2 = self.rescale_cr_inst(comp_tones[1][1], theta)\n else:\n raise QiskitError('CX must have either 0 or 2 rotary tones between qubits %i and %i '\n 'but %i were found.' % (control, target, len(comp_tones)))\n\n # Build the schedule for the RZXGate\n rzx_theta = rzx_theta.insert(0, cr1)\n\n if comp1 is not None:\n rzx_theta = rzx_theta.insert(0, comp1)\n\n rzx_theta = rzx_theta.insert(comp1.duration, echo_x)\n time = comp1.duration + echo_x.duration\n rzx_theta = rzx_theta.insert(time, cr2)\n\n if comp2 is not None:\n rzx_theta = rzx_theta.insert(time, comp2)\n\n time = 2*comp1.duration + echo_x.duration\n rzx_theta = rzx_theta.insert(time, echo_x)\n\n # Reverse direction of the ZX with Hadamard gates\n if control == qubits[0]:\n return rzx_theta\n else:\n rzc = self._inst_map.get('rz', [control], np.pi / 2)\n sxc = self._inst_map.get('sx', [control])\n rzt = self._inst_map.get('rz', [target], np.pi / 2)\n sxt = self._inst_map.get('sx', [target])\n h_sched = Schedule(name='hadamards')\n h_sched = h_sched.insert(0, rzc)\n h_sched = h_sched.insert(0, sxc)\n h_sched = h_sched.insert(sxc.duration, rzc)\n h_sched = h_sched.insert(0, rzt)\n h_sched = h_sched.insert(0, sxt)\n h_sched = h_sched.insert(sxc.duration, rzt)\n rzx_theta = h_sched.append(rzx_theta)\n return rzx_theta.append(h_sched)\n" ]
[ [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
University-of-Reading-Space-Science/ExtremeEvents
[ "1a14aaf48984f72d24ce2d99fba97c04a31d7621" ]
[ "code/helio_time.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 15 18:30:50 2020\n\n@author: mathewjowens\n\nA collect of time conversion routes. Mostly ported from Matlab\n\n\"\"\"\n\nimport numpy as np\nimport datetime as datetime\nimport pandas as pd\n\n\n\ndef date2jd(*args):\n \n \"\"\"\n date2mjd(year,month,day, *hour, *miunute, *second)\n *optional\n \n Based on Matlab file central code: date2jd\n Mathew Owens, 15/10/20\n \n % JD = DATE2JD(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MICROSECOND) returns the Julian\n % day number of the given date (Gregorian calendar) plus a fractional part\n % depending on the time of day.\n %\n % Start of the JD (Julian day) count is from 0 at 12 noon 1 January -4712\n % (4713 BC), Julian proleptic calendar. Note that this day count conforms\n % with the astronomical convention starting the day at noon, in contrast\n % with the civil practice where the day starts with midnight.\n %\n % Astronomers have used the Julian period to assign a unique number to\n % every day since 1 January 4713 BC. This is the so-called Julian Day\n % (JD). JD 0 designates the 24 hours from noon UTC on 1 January 4713 BC\n % (Julian proleptic calendar) to noon UTC on 2 January 4713 BC.\n \n % Sources: - http://tycho.usno.navy.mil/mjd.html\n % - The Calendar FAQ (http://www.faqs.org)\n \n % Author: Peter J. Acklam\n % Time-stamp: 2002-05-24 13:30:06 +0200\n % E-mail: [email protected]\n % URL: http://home.online.no/~pjacklam\n \n \n \"\"\"\n \n assert(len(args)>=3)\n year=args[0]\n month=args[1]\n day=args[2]\n \n if isinstance(year,int):\n L=1\n else:\n L=len(year)\n \n #use 00:00:00.0 as default time\n if len(args)>3:\n hour=args[3]\n else:\n hour=np.zeros(L,dtype=int)\n \n if len(args)>4:\n minute=args[4]\n else:\n minute=np.zeros(L,dtype=int)\n \n if len(args)>5:\n second=args[5]\n else:\n second=np.zeros(L,dtype=int)\n \n if len(args)>6:\n microsecond=args[6]\n else:\n microsecond=np.zeros(L,dtype=int)\n \n \n \n #check inputs are integers (except seconds)\n # assert(isinstance(year,int) or isinstance(year[0],np.int32))\n # assert(isinstance(month,int) or isinstance(month[0],np.int32))\n # assert(isinstance(day,int) or isinstance(day[0],np.int32))\n # assert(isinstance(hour,int) or isinstance(hour[0],np.int32))\n # assert(isinstance(minute,int) or isinstance(minute[0],np.int32))\n # assert(isinstance(second,int) or isinstance(second[0],np.int32))\n # assert(isinstance(microsecond,int) or isinstance(microsecond[0],np.int32))\n \n #check the input value ranges\n assert(np.all(month>=0)) \n assert(np.all(month<=12))\n assert(np.all(day>=1)) \n assert(np.all(day<=31))\n assert(np.all(hour>=0)) \n assert(np.all(hour<=23))\n assert(np.all(minute>=0)) \n assert(np.all(minute<=59))\n assert(np.all(second>=0)) \n assert(np.all(second<60))\n \n \n a = np.floor((14-month)/12)\n y = year + 4800 - a\n m = month + 12*a -3\n\n jd = day + np.floor((153*m + 2)/5) + y*365 + np.floor(y/4) - np.floor(y/100) + np.floor(y/400) - 32045 \n \n #add the fractional day part\n jd = jd + (microsecond/1000000 + second + 60*minute + 3600*(hour - 12) )/86400\n \n return jd\n \n\ndef date2mjd(*args):\n \"\"\"\n date2mjd(year,month,day, *hour, *miunute, *second)\n *optional\n \n Convert a date to MJD. Just a wrapper for date2jd\n Mathew Owens, 15/10/20\n\n \"\"\"\n \n mjd = date2jd(*args) - 2400000.5\n \n return mjd\n\ndef datetime2jd(dt):\n \"\"\"\n Convert a datetime to JD. Just a wrapper for date2jd\n datetime2mjd(datetime)\n Mathew Owens, 15/10/20\n\n \"\"\" \n \n #check whether the input is an array of daetimes or a single instance\n if isinstance(dt,np.ndarray):\n year=np.vectorize(lambda x: x.year)(dt)\n month=np.vectorize(lambda x: x.month)(dt)\n day=np.vectorize(lambda x: x.day)(dt)\n hour=np.vectorize(lambda x: x.hour)(dt)\n minute=np.vectorize(lambda x: x.minute)(dt)\n second=np.vectorize(lambda x: x.second)(dt)\n microsecond=np.vectorize(lambda x: x.microsecond)(dt)\n jd=date2jd(year,month,day,hour,minute,second,microsecond)\n elif isinstance(dt,datetime.datetime) or isinstance(dt,pd.core.indexes.datetimes.DatetimeIndex):\n jd=date2jd(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second,\n dt.microsecond)\n \n return jd\n \n\ndef datetime2mjd(dt):\n \"\"\"\n Convert a datetime to MJD. Just a wrapper for date2jd\n \n datetime2mjd(datetime)\n \n Mathew Owens, 15/10/20\n\n \"\"\" \n jd = datetime2jd(dt)\n \n return jd - 2400000.5\n \n \n \ndef jd2datetime(jd):\n \"\"\"\n\n Convert from Julian Day to a datetime object or array of datetimes\n \n Adapted from Matlab code, presumably the companion to date2jd, but can't \n find original source.\n \n BUG? Seems to gain 16 microseconds, possibly due to numerical roundoff.\n \n (Mathew Owens, 16/10/2020)\n\n \"\"\"\n \n #get the integer part of jd\n #Adding 0.5 to JD and taking FLOOR ensures that the date is correct.\n ijd = np.floor(jd + 0.5)\n #get the fractional part\n fjd = jd - ijd + 0.5\n \n a = ijd + 32044\n b = np.floor((4 * a + 3)/146097)\n c = a - np.floor((b*146097) / 4)\n \n \n d = np.floor((4* c + 3)/1461)\n e = c - np.floor((1461*d) /4)\n m = np.floor((5 * e + 2) / 153)\n \n day = e - np.floor((153 * m + 2) / 5) + 1\n month = m + 3 - 12 * np.floor(m/10)\n year = b * 100 + d - 4800 + np.floor(m/10)\n \n hour = np.floor(fjd*24)\n fjd = fjd - hour/24\n minute = np.floor(fjd*60*24)\n fjd = fjd - minute/24/60\n second = np.floor(fjd*24*60*60)\n fjd = fjd - second/24/60/60\n microsecond = np.floor(fjd*24*60*60*1000000) \n \n #datetime requires integer input\n #check whether the input is an array of daetimes or a single instance\n if isinstance(hour,np.ndarray):\n year=year.astype(int)\n month=month.astype(int)\n day=day.astype(int)\n hour=hour.astype(int)\n minute=minute.astype(int)\n second=second.astype(int)\n microsecond=microsecond.astype(int)\n else:\n year=int(year)\n month=int(month)\n day=int(day)\n hour=int(hour)\n minute=int(minute)\n second=int(second)\n microsecond=int(microsecond)\n \n \n #can't create a datetime array from an array of year, month, etc values\n #make a function and vectorize it\n def date_to_datetime(year,month,day,hour,minute,second,microsecond):\n return datetime.datetime(year,month,day,hour,minute,second,microsecond)\n converttime = np.vectorize(date_to_datetime)\n \n return converttime(year,month,day,hour,minute,second,microsecond)\n \ndef mjd2datetime(mjd):\n \"\"\"\n Convert MJD to datetime object. Just a wrapper for jd2datetime\n \n mjd2dateim(mjd)\n \n Mathew Owens, 15/10/20\n\n \"\"\" \n jd = mjd + 2400000.5\n \n return jd2datetime(jd)\n\ndef crnum2mjd(crnum):\n \"\"\" \n Converts a Carrington Rotation number to MJD\n Mathew Owens, 16/10/20\n\n \"\"\"\n return (crnum - 1750)*27.2753 + 45871.41\n\ndef mjd2crnum(mjd):\n \"\"\"\n Converts MJD to Carrington Rotation number\n Mathew Owens, 16/10/20\n \"\"\" \n return 1750 + ((mjd-45871.41)/27.2753)\n\ndef isleapyear(year):\n \"\"\"\n Tests if \"year\" is a leap year, returns boolean\n Mathew Owens, 16/10/20\n \"\"\"\n yearf = np.floor(year/4) - year/4\n return yearf == 0\n\ndef mjd2doyyr(mjd):\n \"\"\"\n Convert mjd to (decimal) day-of-year and year\n Mathew Owens, 16/10/20\n \"\"\"\n \n #convert to datetime and extract the necessary info\n dt=mjd2datetime(mjd)\n year=np.vectorize(lambda x: x.year)(dt)\n year=year.astype(int)\n doy=np.vectorize(lambda x: x.timetuple().tm_yday)(dt)\n \n #include the fractional part of the day\n doyfrac=mjd-np.floor(mjd)\n doy = doy + doyfrac\n \n return doy, year\n \n\ndef doyyr2mjd(doy,yr):\n \"\"\"\n Converts (decimal) day-of-year and (integer) year to MJD\n Mathew Owens, 16/10/20\n \"\"\" \n \n #create a datetime object at the start of the year, add doy as a delta\n def create_dt(doy,yr):\n dt = datetime.datetime(yr,yr*0+1,yr*0+1) + datetime.timedelta(days = np.floor(doy)-yr*0+1)\n return dt\n vec_create_dt = np.vectorize(create_dt)\n dt = vec_create_dt(doy,yr) \n \n #convert to mjd\n mjd = datetime2mjd(dt)\n \n #add the fractional part of doy\n mjd = mjd + (doy-np.floor(doy))\n \n return mjd\n\n \n \n \n# <codecell> Testing \n \n# dt = datetime.datetime(2020,5,17,2,40,23,999999)\n# jd=datetime2jd(dt)\n# print(dt,jd,jd2datetime(jd))\n\n# dt = datetime.datetime(2020,5,17,23,40,23)\n# jd=datetime2jd(dt)\n# print(dt,jd,jd2datetime(jd))\n\n# dt = datetime.datetime(2020,5,17,0,0,0)\n# jd=datetime2jd(dt)\n# print(dt,jd,jd2datetime(jd))\n\n\n# dt = datetime.datetime(2020,5,17,2,40,23,999999)\n# mjd=datetime2mjd(dt)\n# print(dt,mjd,mjd2datetime(mjd))\n\n\n\n# t = np.arange(datetime.datetime(1985,7,1,4,5,6), datetime.datetime(2085,7,4,4,5,6), \n# datetime.timedelta(days=1)).astype(datetime.datetime)\n# jd=datetime2jd(t)\n# mjd=datetime2mjd(t)\n# print(t,mjd,mjd2datetime(mjd))\n\n \n# jd=date2jd(year,month,day,hour,minute,second)\n# print(jd)\n\n# jd=date2jd(year,month,day)\n# print(jd)\n\n# mjd=date2mjd(year,month,day,hour,minute,second)\n# print(mjd)\n" ]
[ [ "numpy.all", "numpy.vectorize", "numpy.zeros", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hejj16/Machine-Learning-Algorithms
[ "d832c0a59f60eeab30502f8b28fc0f3de08bb479" ]
[ "PPCA.py" ]
[ "import numpy as np\r\n\r\n\r\nclass PPCA:\r\n\r\n def __init__(self):\r\n self.dim = 0\r\n self.latent_dim = 0\r\n self.W = None\r\n self.mu = None\r\n self.sigma2 = None\r\n self.alpha = None\r\n\r\n def fit(self, data, alpha_threshold=1e3, alpha_max_iter=10, alpha_iter_threshold=1e-6, EM_max_iter=200, EM_threshold=1e-6, random_state=None):\r\n\r\n np.random.seed(random_state)\r\n self.dim = data.shape[1]\r\n self.latent_dim = self.dim - 1\r\n self.W = np.random.randn(self.dim, self.latent_dim)\r\n self.mu = np.mean(data, axis=0)\r\n self.sigma2 = np.random.rand(1)\r\n self.alpha = np.random.rand(self.latent_dim)\r\n\r\n for iter_alpha in range(alpha_max_iter):\r\n\r\n for iter_EM in range(EM_max_iter):\r\n\r\n old_W = self.W\r\n\r\n # E-step\r\n M = self.W.T @ self.W + self.sigma2 * np.eye(self.latent_dim)\r\n E_Z = np.linalg.inv(M) @ self.W.T @ (data - self.mu).T\r\n E_ZZ = data.shape[0] * self.sigma2 * np.linalg.inv(M) + E_Z @ E_Z.T\r\n\r\n # M-step\r\n self.W = (data - self.mu).T @ E_Z.T @ np.linalg.inv(np.diag(self.alpha) * self.sigma2 + E_ZZ)\r\n self.sigma2 = 1 / self.dim / data.shape[0] * (np.sum((data - self.mu) ** 2) -\r\n 2 * np.trace(E_Z.T @ self.W.T @ (data - self.mu).T) +\r\n np.trace(E_ZZ @ self.W.T @ self.W))\r\n\r\n # check the threshold\r\n if np.sqrt(np.sum((self.W - old_W) ** 2)) <= EM_threshold:\r\n break\r\n\r\n # Calculate alpha\r\n old_alpha = self.alpha\r\n self.alpha = self.dim / (np.diag(self.W.T @ self.W) + 1e-10)\r\n self.latent_dim = np.sum(self.alpha <= alpha_threshold)\r\n self.W = self.W[:, self.alpha <= alpha_threshold]\r\n self.alpha = self.alpha[self.alpha <= alpha_threshold]\r\n if self.alpha.shape[0] == old_alpha.shape[0] and np.linalg.norm(self.alpha - old_alpha) <= alpha_iter_threshold:\r\n break\r\n\r\n return self\r\n\r\n def transform(self, data):\r\n\r\n M = self.W.T @ self.W + self.sigma2 * np.eye(self.latent_dim)\r\n Z = (data - self.mu) @ self.W @ np.linalg.inv(M).T\r\n\r\n return Z\r\n\r\n\r\n# z = np.random.randn(1500, 5)\r\n# W = np.random.randn(100, 5) * 20\r\n# mu = np.random.rand(100) * 10\r\n# data = z @ W.T + mu + np.random.randn(100)\r\n\r\n# ppca = PPCA().fit(data, random_state=42)\r\n# print(ppca.latent_dim)\r\n# print(ppca.alpha)\r\n" ]
[ [ "numpy.diag", "numpy.random.seed", "numpy.linalg.inv", "numpy.eye", "numpy.trace", "numpy.linalg.norm", "numpy.random.randn", "numpy.random.rand", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shanliang1992/Paddle-Lite
[ "a499e9988482b97da0736629be602828e8cf46b7" ]
[ "lite/tests/unittest_py/op/test_elementwise_sub_op.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume, reproduce_failure\nimport hypothesis.strategies as st\nimport numpy as np\nfrom functools import partial\nfrom test_elementwise_add_op import check_broadcast\n\n\nclass TestElementwiseSubOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n self.enable_testing_on_place(\n TargetType.X86,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 4])\n self.enable_testing_on_place(\n TargetType.ARM, [PrecisionType.FP32, PrecisionType.INT32],\n DataLayoutType.NCHW,\n thread=[1, 4])\n opencl_valid_places = [\n Place(TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),\n Place(TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=opencl_valid_places)\n metal_places = [\n Place(TargetType.Metal, PrecisionType.FP32,\n DataLayoutType.MetalTexture2DArray),\n Place(TargetType.Metal, PrecisionType.FP16,\n DataLayoutType.MetalTexture2DArray),\n Place(TargetType.ARM, PrecisionType.FP32),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=metal_places)\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n target_type = predictor_config.target()\n in_x_shape = list(program_config.inputs[\"input_data_x\"].shape)\n in_y_shape = list(program_config.inputs[\"input_data_y\"].shape)\n input_data_type = program_config.inputs[\"input_data_x\"].dtype\n # Check config\n if target_type in [TargetType.ARM]:\n if predictor_config.precision(\n ) == PrecisionType.INT64 and input_data_type != np.int64:\n return False\n if predictor_config.precision(\n ) == PrecisionType.FP32 and input_data_type != np.float32:\n return False\n if predictor_config.precision(\n ) == PrecisionType.FP16 and input_data_type != np.float16:\n return False\n if predictor_config.precision(\n ) == PrecisionType.INT32 and input_data_type != np.int32:\n return False\n if target_type == TargetType.Metal:\n if input_data_type != np.float32 \\\n or in_x_shape != in_y_shape \\\n or len(in_x_shape) == 3 \\\n or in_x_shape[0] != 1:\n return False\n\n return True\n\n def sample_program_configs(self, draw):\n input_data_x_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=20), min_size=1, max_size=4))\n input_data_y_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=20), min_size=1, max_size=4))\n axis = draw(st.integers(min_value=-1, max_value=4))\n assume(\n check_broadcast(input_data_x_shape, input_data_y_shape, axis) ==\n True)\n if axis < 0:\n axis = abs(len(input_data_x_shape) - len(\n input_data_y_shape)) + axis + 1\n\n if self.get_target().upper() == 'X86':\n input_data_type = draw(\n st.sampled_from([np.float32, np.int32, np.int64]))\n elif self.get_target().upper() == 'ARM':\n input_data_type = draw(st.sampled_from([np.float32, np.int32]))\n elif self.get_target().upper() == 'OPENCL':\n input_data_type = draw(st.sampled_from([np.float32]))\n elif self.get_target().upper() == 'METAL':\n input_data_type = draw(st.sampled_from([np.float32]))\n\n def gen_input_data(*args, **kwargs):\n return np.random.randint(\n 1, 20, size=(kwargs['shape'])).astype(kwargs['dtype'])\n\n elementwise_sub_op = OpConfig(\n type=\"elementwise_sub\",\n inputs={\"X\": [\"input_data_x\"],\n \"Y\": [\"input_data_y\"]},\n outputs={\"Out\": [\"output_data\"]},\n attrs={\"axis\": axis})\n program_config = ProgramConfig(\n ops=[elementwise_sub_op],\n weights={},\n inputs={\n \"input_data_x\": TensorConfig(data_gen=partial(\n gen_input_data,\n shape=input_data_x_shape,\n dtype=input_data_type)),\n \"input_data_y\": TensorConfig(data_gen=partial(\n gen_input_data,\n shape=input_data_y_shape,\n dtype=input_data_type))\n },\n outputs=[\"output_data\"])\n return program_config\n\n def sample_predictor_configs(self):\n return self.get_predictor_configs(), [\"elementwise_sub\"], (1e-5, 1e-5)\n\n def add_ignore_pass_case(self):\n def teller1(program_config, predictor_config):\n target_type = predictor_config.target()\n if target_type in [TargetType.ARM]:\n return True\n return False\n\n self.add_ignore_check_case(\n teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"The elementwise_min op's result is different from paddle in some case, we should fix it as soon as possible!\"\n )\n\n def test(self, *args, **kwargs):\n target_str = self.get_target()\n max_examples = 150\n if target_str == \"OpenCL\":\n # Make sure to generate enough valid cases for OpenCL\n max_examples = 300\n if target_str == \"Metal\":\n # Make sure to generate enough valid cases for Metal\n max_examples = 2000\n self.run_and_statis(quant=False, max_examples=max_examples)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SahayDivyanshu/Model-Predictive-Control
[ "9243c4fc182416841ba14ffb427f2d812a798b1f" ]
[ "Obstacle_Avoidance.py" ]
[ "import numpy as np\nfrom sim.sim2d import sim_run\n\n# Simulator options.\noptions = {}\noptions['FIG_SIZE'] = [8,8]\noptions['OBSTACLES'] = True\n\nclass ModelPredictiveControl:\n def __init__(self):\n self.horizon = 15\n self.dt = 0.2\n\n # Reference or set point the controller will achieve.\n self.reference1 = [10, 0, 0]\n self.reference2 = None\n\n self.x_obs = 5\n self.y_obs = 0.1\n\n def plant_model(self,prev_state, dt, pedal, steering):\n x_t = prev_state[0]\n y_t = prev_state[1]\n psi_t = prev_state[2]\n v_t = prev_state[3]\n x_t_1 = x_t + v_t*np.cos(psi_t)*dt\n y_t_1 = y_t + v_t*np.sin(psi_t)*dt\n a_t = pedal\n v_t_1 = v_t + a_t*dt - v_t/25\n psi_t_1 = psi_t + v_t*np.tan(steering)/2.5*dt\n return [x_t_1, y_t_1, psi_t_1, v_t_1]\n\n\n def cost_function(self,u,*args):\n state = args[0]\n ref = args[1]\n cost = 0\n # for k in range(self.horizon):\n # state = self.plant_model(state,self.dt,u[2*k],u[k+1])\n # cost += abs(ref[0]-state[0])**2\n # cost += abs(ref[1]-state[1] - 0.3)**2\n # cost += abs(ref[2]-state[2])**2\n # if(state[0]== 4.0 and state[1] == 0.1):\n # cost = state[3]*100\n # return cost\n\n\n #cost function from the solution\n for k in range(self.horizon):\n state = self.plant_model(state,self.dt,u[2*k],u[2*k+1])\n cost += abs(ref[0]-state[0])**2\n cost += abs(ref[1]-state[1])**2\n cost += abs(ref[2]-state[2])**2\n cost+=self.obstacle_cost(state[0],state[1])\n return cost\n\n def obstacle_cost(self,x,y):\n dist = (x-self.x_obs)**2 + (y-self.y_obs)**2\n dist = np.sqrt(dist)\n if(dist>2):\n return 15\n else:\n return 1/dist*30 \n\nsim_run(options, ModelPredictiveControl)\n" ]
[ [ "numpy.tan", "numpy.cos", "numpy.sqrt", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
src-d/tm-experiments
[ "031595582d66b0dababaf0b0963925aa7032b18b" ]
[ "tmexp/label.py" ]
[ "from argparse import ArgumentParser\nfrom collections import defaultdict\nfrom enum import Enum\nfrom functools import partial\nimport itertools\nimport os\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\n\nfrom .cli import CLIBuilder, register_command\nfrom .constants import DIFF_MODEL\nfrom .data import FileReducer, RepoMapping\nfrom .io_constants import (\n BOW_DIR,\n DOC_FILENAME,\n DOCWORD_FILENAME,\n LABELS_FILENAME,\n REF_FILENAME,\n TOPICS_DIR,\n VOCAB_FILENAME,\n WORDTOPIC_FILENAME,\n)\nfrom .reduce import (\n concat_reducer,\n diff_to_hall_reducer,\n last_ref_reducer,\n max_reducer,\n mean_reducer,\n median_reducer,\n)\nfrom .utils import (\n check_file_exists,\n check_range,\n check_remove,\n create_logger,\n load_refs_dict,\n)\n\n\ndef _define_parser(parser: ArgumentParser) -> None:\n cli_builder = CLIBuilder(parser)\n cli_builder.add_bow_arg(required=True)\n cli_builder.add_experiment_arg(required=True)\n cli_builder.add_force_arg()\n parser.add_argument(\n \"--mu\",\n help=\"Weights how discriminative we want the label to be relative to other\"\n \" topics , defaults to %(default)s.\",\n default=1.0,\n type=float,\n )\n parser.add_argument(\n \"--label-size\",\n help=\"Number of words in a label, defaults to %(default)s.\",\n default=2,\n type=int,\n )\n parser.add_argument(\n \"--min-prob\",\n help=\"Admissible words for a topic label must have a topic probability over \"\n \"this value, defaults to %(default)s.\",\n default=0.001,\n type=float,\n )\n parser.add_argument(\n \"--max-topics\",\n help=\"Admissible words for a topic label must be admissible for less then this\"\n \" amount of topics, defaults to %(default)s.\",\n default=10,\n type=int,\n )\n parser.add_argument(\n \"--no-smoothing\",\n help=\"To ignore words that don't cooccur with a given label rather then use \"\n \"Laplace smoothing on the joint word/label probabilty.\",\n dest=\"smoothing\",\n action=\"store_false\",\n )\n parser.add_argument(\n \"--context\",\n help=\"Context creation method.\",\n choices=list(Context),\n type=Context.from_string,\n required=True,\n )\n\n\nclass Context(Enum):\n last = partial(last_ref_reducer)\n max = partial(max_reducer)\n mean = partial(mean_reducer)\n median = partial(median_reducer)\n concat = partial(concat_reducer)\n hall = None\n\n def __str__(self) -> str:\n return self.name\n\n @staticmethod\n def from_string(s: str) -> \"Context\":\n try:\n return Context[s]\n except KeyError:\n raise ValueError()\n\n @property\n def reducer(self) -> Optional[FileReducer]:\n return self.value\n\n\n@register_command(parser_definer=_define_parser)\ndef label(\n bow_name: str,\n exp_name: str,\n force: bool,\n log_level: str,\n mu: float,\n label_size: int,\n min_prob: float,\n max_topics: int,\n smoothing: bool,\n context: Context,\n) -> None:\n \"\"\"Infer a label for each topic automatically given a topic model.\"\"\"\n logger = create_logger(log_level, __name__)\n input_dir_bow = os.path.join(BOW_DIR, bow_name)\n doc_input_path = os.path.join(input_dir_bow, DOC_FILENAME)\n check_file_exists(doc_input_path)\n docword_input_path = os.path.join(input_dir_bow, DOCWORD_FILENAME)\n check_file_exists(docword_input_path)\n refs_input_path = os.path.join(input_dir_bow, REF_FILENAME)\n check_file_exists(refs_input_path)\n vocab_input_path = os.path.join(input_dir_bow, VOCAB_FILENAME)\n check_file_exists(vocab_input_path)\n\n dir_exp = os.path.join(TOPICS_DIR, bow_name, exp_name)\n wordtopic_input_path = os.path.join(dir_exp, WORDTOPIC_FILENAME)\n check_file_exists(wordtopic_input_path)\n\n labels_output_path = os.path.join(dir_exp, LABELS_FILENAME)\n check_remove(labels_output_path, logger, force)\n\n check_range(min_prob, \"min-prob\")\n\n refs_dict = load_refs_dict(logger, refs_input_path)\n\n logger.info(\"Loading word index ...\")\n with open(vocab_input_path, \"r\", encoding=\"utf-8\") as fin:\n word_index: Dict[int, str] = {\n i: word.replace(\"\\n\", \"\") for i, word in enumerate(fin)\n }\n num_words = len(word_index)\n logger.info(\"Loaded word index, found %d words.\", num_words)\n\n repo_mapping = RepoMapping()\n repo_mapping.build(logger, doc_input_path)\n corpus = repo_mapping.create_corpus(logger, docword_input_path)\n if repo_mapping.topic_model == DIFF_MODEL:\n logger.info(\"Recreating hall model corpus (we can't use delta-documents) ...\")\n corpus = repo_mapping.reduce_corpus(\n corpus, logger, refs_dict, diff_to_hall_reducer\n )\n num_docs = corpus.shape[0]\n logger.info(\"Recreated hall model corpus, found %d documents ...\", num_docs)\n\n if context.reducer is not None:\n logger.info(\"Creating %s context ...\", str(context))\n corpus = repo_mapping.reduce_corpus(corpus, logger, refs_dict, context.reducer)\n num_docs = corpus.shape[0]\n logger.info(\"Created context, found %d documents ...\", num_docs)\n\n logger.info(\"Loading word topic distributions ...\")\n topic_words = np.load(wordtopic_input_path)\n num_topics = topic_words.shape[0]\n logger.info(\"Loaded distributions, found %d topics.\", num_topics)\n\n logger.info(\"Finding common words for each topic ...\")\n common_words = np.argwhere(np.sum(topic_words > min_prob, axis=0) > max_topics)\n mask = np.ones(num_words, dtype=bool)\n mask[common_words] = False\n logger.info(\n \"Found %d words with probability over %.4f for more then %d topics, \"\n \"they will not be considered for labels.\",\n len(common_words),\n min_prob,\n max_topics,\n )\n if len(common_words) == num_words:\n logger.info(\"All words were excluded, cannot infer label.\")\n return\n coeff = mu / (num_topics - 1)\n words_counts = np.sum(corpus, axis=0)\n logger.info(\"Inferring labels for each topic ...\")\n best_labels_per_topic: Dict[int, Dict[str, float]] = {}\n best_scores: Dict[str, float] = defaultdict(lambda: -np.inf)\n for cur_topic in range(num_topics):\n logger.info(\"Topic %d:\", cur_topic + 1)\n num_admissible = len(np.argwhere(topic_words[cur_topic] > min_prob).flatten())\n admissible_words = np.argwhere(\n topic_words[cur_topic, mask] > min_prob\n ).flatten()\n if not len(admissible_words):\n logger.info(\"No admissible words where found, cannot infer label.\")\n return\n logger.info(\n \"\\tFound %d words with probability over %.4f, %d remained after removing \"\n \"common words.\",\n num_admissible,\n min_prob,\n len(admissible_words),\n )\n candidates = []\n candidates_names = []\n candidates_counts: Union[List, np.array] = []\n candidates_sizes = []\n for candidate in itertools.combinations(admissible_words, label_size):\n if np.min(corpus[:, candidate], axis=1).any():\n candidates.append(candidate)\n candidates_names.append(\" \".join(word_index[w] for w in candidate))\n candidates_counts.append(np.prod(corpus[:, list(candidate)], axis=1))\n candidates_sizes.append(len(candidate))\n num_cand = len(candidates_names)\n if not num_cand:\n logger.info(\"No candidates where found, cannot infer label.\")\n return\n logger.info(\"\\tFound %d candidate labels, computing their scores ...\", num_cand)\n candidates_counts = np.array(candidates_counts)\n joint_counts = candidates_counts @ corpus\n candidates_counts = np.sum(candidates_counts, axis=1)\n if smoothing:\n joint_counts += 1\n else:\n inds = np.argwhere(joint_counts == 0)\n joint_counts[joint_counts == 0] = (\n candidates_counts[inds[:, 0]] * words_counts[inds[:, 1]]\n )\n for cand_ind, candidate in enumerate(candidates):\n joint_counts[cand_ind, list(candidate)] = candidates_counts[cand_ind]\n\n # denominator = constant term > so we use counts instead of probs to compute PMI\n\n pmi = np.log(\n joint_counts / (candidates_counts[:, None] @ words_counts[:, None].T)\n )\n topic_probs = np.copy(topic_words).T\n topic_probs[:, cur_topic] *= coeff + 1\n topic_probs[:, [t for t in range(num_topics) if t != cur_topic]] *= -coeff\n scores = {\n name: score\n for name, score in zip(candidates_names, np.sum(pmi @ topic_probs, axis=1))\n }\n logger.info(\"\\tTop 5 candidates:\")\n best_labels = sorted(scores, key=scores.get, reverse=True)[:num_topics]\n best_labels_per_topic[cur_topic] = {}\n for label in best_labels:\n if scores[label] > best_scores[label]:\n for topic in best_labels_per_topic:\n if label in best_labels_per_topic[topic]:\n best_labels_per_topic[topic].pop(label)\n best_labels_per_topic[cur_topic][label] = scores[label]\n best_scores[label] = scores[label]\n for i, label_name in enumerate(best_labels[:5]):\n logger.info(\"\\t\\t %d. %s : %.4f\", i + 1, label_name, scores[label_name])\n\n topic_labels: List[str] = []\n for cur_topic in range(num_topics):\n scores = best_labels_per_topic[cur_topic]\n topic_labels.append(sorted(scores, key=scores.get, reverse=True)[0])\n\n logger.info(\"Selected the following labels:\")\n for ind_label, label in enumerate(topic_labels):\n logger.info(\n \"\\tTopic %d : %s (score: %.4f)\", ind_label + 1, label, best_scores[label]\n )\n\n logger.info(\"Saving topic labels ...\")\n with open(labels_output_path, \"w\", encoding=\"utf-8\") as fout:\n fout.write(\"\\n\".join(label for label in topic_labels))\n logger.info(\"Saved topic labels in '%s'.\", labels_output_path)\n" ]
[ [ "numpy.log", "numpy.min", "numpy.ones", "numpy.argwhere", "numpy.copy", "numpy.load", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sinonchen13/Video-Person-ReID
[ "4b09ab300ea93a21cfd03fbf61a27f3d901336b9" ]
[ "test-all-ensemble.py" ]
[ "from __future__ import print_function, absolute_import\nimport os\nimport gc\nimport sys\nimport time\nimport math\nimport argparse\nimport os.path as osp\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\n\nimport models\nimport transforms.spatial_transforms as ST\nimport transforms.temporal_transforms as TT\nimport tools.data_manager as data_manager\nfrom tools.video_loader import VideoDataset\nfrom tools.utils import Logger\nfrom tools.eval_metrics import evaluate\n\nparser = argparse.ArgumentParser(description='Test using all frames')\n# Datasets\nparser.add_argument('--root', type=str, default='/media/sdb1/zzj/datasets')\nparser.add_argument('-d', '--dataset', type=str, default='mars',\n choices=data_manager.get_names())\nparser.add_argument('-j', '--workers', default=4, type=int)\nparser.add_argument('--height', type=int, default=256)\nparser.add_argument('--width', type=int, default=128)\nparser.add_argument('--test_sample_mode', type=str, default='test_all_sampled',#可视化时选择 rrs0\n help=\"test_all_sampled, rrs0\")\n# Augment\nparser.add_argument('--test_frames', default=4, type=int,\n help='frames per clip for test')\n# Architecture\nparser.add_argument('-a', '--arch', type=str, default='baseline',\n help=\"baseline, ame\")\n# Miscs\nparser.add_argument('--resume', type=str, default='./', metavar='PATH')\nparser.add_argument('--test_epochs', default=[240], nargs='+', type=int)\nparser.add_argument('--distance', type=str, default='cosine',\n help=\"euclidean or cosine\")\nparser.add_argument('--gpu', default='0, 1', type=str,\n help='gpu device ids for CUDA_VISIBLE_DEVICES')\n#Vis\nparser.add_argument('--vis', default=1, type=int,\n help='0 False, 1 True')\nargs = parser.parse_args()\n\n\ndef cam_add_distmat(distmat, q_camids, g_camids):\n max_dist = np.max(np.max(distmat))\n q_camids_list = q_camids.tolist()\n g_camids_list = g_camids.tolist()\n cams_equals = np.zeros((len(q_camids_list), len(g_camids_list)))\n for i in range(len(q_camids_list)):\n for j in range(len(g_camids_list)):\n if q_camids_list[i] == g_camids_list[j]:\n cams_equals[i][j] = max_dist\n distmat = distmat + cams_equals\n return distmat\n\n\ndef main():\n #vis\n if args.vis==1:\n args.test_sample_mode=\"rrs0\"\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n use_gpu = torch.cuda.is_available()\n #log\n sys.stdout = Logger(osp.join(args.resume, 'log_test_{}.txt'.format(args.test_sample_mode)))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n print(\"Initializing dataset {}\".format(args.dataset))\n dataset = data_manager.init_dataset(name=args.dataset, root=args.root)\n\n # Data augmentation\n spatial_transform_test = ST.Compose([\n ST.Scale((args.height, args.width), interpolation=3),\n ST.ToTensor(),\n ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n \n temporal_transform_test = TT.TemporalSample(mode=args.test_sample_mode, seq_len=args.test_frames)\n\n pin_memory = True if use_gpu else False\n\n queryloader = DataLoader(\n VideoDataset(dataset.query, spatial_transform=spatial_transform_test,\n temporal_transform=temporal_transform_test),\n batch_size=1, shuffle=False, num_workers=0,\n pin_memory=pin_memory, drop_last=False)\n\n galleryloader = DataLoader(\n VideoDataset(dataset.gallery, spatial_transform=spatial_transform_test,\n temporal_transform=temporal_transform_test),\n batch_size=1, shuffle=False, num_workers=0,\n pin_memory=pin_memory, drop_last=False)\n\n test(queryloader, galleryloader, use_gpu)\n\n\ndef test(queryloader, galleryloader, use_gpu):\n since = time.time()\n\n q_pids, q_camids = [], []\n for batch_idx, (vids, pids, camids) in enumerate(queryloader):\n if (batch_idx + 1) % 1000 == 0:\n print(\"{}/{}\".format(batch_idx+1, len(queryloader)))\n\n q_pids.extend(pids)\n q_camids.extend(camids)\n\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n g_pids, g_camids = [], []\n for batch_idx, (vids, pids, camids) in enumerate(galleryloader):\n if (batch_idx + 1) % 1000 == 0:\n print(\"{}/{}\".format(batch_idx+1, len(galleryloader)))\n\n g_pids.extend(pids)\n g_camids.extend(camids)\n\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n if args.dataset == 'mars':\n # gallery set must contain query set, otherwise 140 query imgs will not have ground truth.\n g_pids = np.append(q_pids, g_pids)\n g_camids = np.append(q_camids, g_camids)\n\n time_elapsed = time.time() - since\n print('Extracting features complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n\n print(\"Computing distance matrix\")\n\n dist_list = [\"distmat_me.npy\",\"distmat_multiloss.npy\",\"distmat_coordatt.npy\",\"distmat_coordatt_me_multiloss.npy\"]\n \n for i in range(len(dist_list)):\n if i==0:\n distmat=np.load(dist_list[i]) \n else:\n distmat+=np.load(dist_list[i])\n \n re_distmat = cam_add_distmat(distmat, q_camids, g_camids)\n print(\"Computing CMC and mAP\")\n cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)\n print(\"ori Results ----------\")\n print('top1:{:.1%} top5:{:.1%} top10:{:.1%} mAP:{:.1%}'.format(\n cmc[0], cmc[4], cmc[9], mAP))\n print(\"------------------\")\n cmc, mAP = evaluate(re_distmat, q_pids, g_pids, q_camids, g_camids)\n print(\"re Results ----------\")\n print('top1:{:.1%} top5:{:.1%} top10:{:.1%} mAP:{:.1%}'.format(cmc[0], cmc[4], cmc[9], mAP))\n\n\n return cmc[0]\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.asarray", "numpy.max", "numpy.append", "torch.cuda.is_available", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
toxa81/aiida_scripts
[ "756e04e0576726a3677de95d9186ce7159fedf6b" ]
[ "eos_v2/mod_eos.py" ]
[ "import numpy as np\n\n#=======================\n# Birch-Murnaghan EOS\n#=======================\ndef birch_murnaghan(V,E0,V0,B0,B01):\n r = (V0/V)**(2./3.)\n return (E0 + 9./16. * B0 * V0 * (r-1.)**2 * ( 2.+ (B01-4.)*(r-1.)))\n\n#=======================\n# function to fit the EOS\n#=======================\ndef fit_birch_murnaghan_params(volumes, energies):\n x = np.array(volumes)\n y = np.array(energies)\n from scipy.optimize import curve_fit\n\n b01 = 0.1\n b01min = 0\n perrmin = 100\n\n #while True:\n # params, covariance = curve_fit(birch_murnaghan,\n # xdata=x,\n # ydata=y,\n # p0=(y.min(), #E0\n # x.mean(), #V0\n # 0.1, #B0\n # b01, #B01\n # ),\n # sigma=None)\n # \n # if isinstance(covariance, np.ndarray):\n # perr = np.sqrt(np.diag(covariance))\n # if np.sum(perr) < perrmin: \n # perrmin = np.sum(perr)\n # b01min = b01\n # #print(\"b01, perr: \", b01, np.sum(perr))\n # #if perr[1] < 0.1: break\n\n\n # b01 += 0.1\n # if b01 > 10: break\n \n try:\n params, covariance = curve_fit(birch_murnaghan, xdata=x, ydata=y,\n p0=(y.min(), x.mean(), 0.0, 0.0),\n sigma=None)\n return params\n except:\n print(\"fit_birch_murnaghan_params: failed\")\n return None\n\ndef deltaE2(V, params1, params2):\n return (birch_murnaghan(V, *params1) - birch_murnaghan(V, *params2))**2\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hanklu2020/mabessa_F3DAS
[ "57b1bd1cb85d96567ad1044c216535ab3df88db3" ]
[ "sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point69/script_DoE69_meshing.py" ]
[ "# Abaqus/CAE script\n# Created by M.A. Bessa ([email protected]) on 12-Nov-2019 05:18:47\n#\nfrom abaqus import *\nfrom abaqusConstants import *\nsession.viewports['Viewport: 1'].makeCurrent()\n#session.viewports['Viewport: 1'].maximize()\nfrom caeModules import *\nfrom driverUtils import executeOnCaeStartup\nexecuteOnCaeStartup()\nMdb()\n#\nimport numpy\n\n#------------------------------------------------------------\nos.chdir(r'/home/gkus/F3DAS-master/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point69')\n#\n#-------------------------------------------------------------\n# Parameters:\nVertexPolygon = 3 # Number of vertices (sides) of the polygon base\npower = 1.00000e+00 # Power law exponent establishing the evolution of the spacing between battens\nMastDiameter = 1.00000e+02 # Radius of the circumscribing circle of the polygon\nnStories = 1 # Number of stories in HALF of the strut (i.e. in a single AstroMast!)\nMastPitch = 1.27144e+02 # Pitch length of the strut (i.e. a single AstroMast!)\npinned_joints = 1 # (1 = batten are pinned to longerons, 0 = battens and longerons are a solid piece)\nLongeron_CS = 1.00003e+01 # (Cross Section of the longeron)\nIx = 3.28753e+01 # (Second moment of area around X axis )\nIy = 7.50000e+01 # (Second moment of area around Y axis )\nJ = 2.50000e+02 # (Second moment of area around X axis )\nEmodulus = 1.82600e+03 # (Youngus Modulus)\nGmodulus = 6.57377e+02 # (Shear Modulus)\nnu = 3.88853e-01 # (Poisson Ratio)\nConeSlope = 5.00000e-01 # Slope of the longerons (0 = straight, <0 larger at the top, >0 larger at the bottom)\nTwist_angle = 0.00000e+00 # Do you want to twist the longerons?\ntransition_length_ratio = 1.00000e+00 # Transition zone for the longerons\n#------------------------------------------------------------\n\nMastRadius = MastDiameter/2.0\nMastHeight = nStories*MastPitch\n\nMesh_size = min(MastRadius,MastPitch)/300.0\n\nsession.viewports['Viewport: 1'].setValues(displayedObject=None)\n\n\n# Create all the joints of the a single Deployable Mast:\njoints = numpy.zeros((nStories+1,VertexPolygon,3))\njoints_outter = numpy.zeros((nStories+1,VertexPolygon,3))\nfor iStorey in range(0,nStories+1,1):\n for iVertex in range(0,VertexPolygon,1):\n # Constant spacing between each storey (linear evolution):\n Zcoord = MastHeight/nStories*iStorey\n # Power-law spacing between each storey (more frequent at the fixed end):\n # Zcoord = MastHeight*(float(iStorey)/float(nStories))**power\n # Power-law spacing between each storey (more frequent at the rotating end):\n # Zcoord = -MastHeight/(float(nStories)**power)*(float(nStories-iStorey)**power)+MastHeight\n # Exponential spacing between each storey\n # Zcoord =(MastHeight+1.0)/exp(float(nStories))*exp(float(iStorey))\n #\n Xcoord = MastRadius*cos(2.0*pi/VertexPolygon*iVertex + Twist_angle*min(Zcoord/MastHeight/transition_length_ratio,1.0))\n Ycoord = MastRadius*sin(2.0*pi/VertexPolygon*iVertex + Twist_angle*min(Zcoord/MastHeight/transition_length_ratio,1.0))\n # Save point defining this joint:\n joints[iStorey,iVertex,:] = (Xcoord*(1.0-min(Zcoord,transition_length_ratio*MastHeight)/MastHeight*ConeSlope),Ycoord*(1.0-min(Zcoord,transition_length_ratio*MastHeight)/MastHeight*ConeSlope),Zcoord)\n #\n center = (0.0,0.0)\n vec = joints[iStorey,iVertex,0:2]-center\n norm_vec = numpy.linalg.norm(vec)\n joints_outter[iStorey,iVertex,2] = joints[iStorey,iVertex,2]\n joints_outter[iStorey,iVertex,0:2] = joints[iStorey,iVertex,0:2]\n # end iSide loop\n \n#end iStorey loop\n\n# Create the longerons:\n\np_longerons = mdb.models['Model-1'].Part(name='longerons', dimensionality=THREE_D, \n type=DEFORMABLE_BODY)\n\np_longerons = mdb.models['Model-1'].parts['longerons']\nsession.viewports['Viewport: 1'].setValues(displayedObject=p_longerons)\n\nd_longerons, r_longerons = p_longerons.datums, p_longerons.referencePoints\n\nLocalDatum_list = [] # List with local coordinate system for each longeron\nlong_midpoints = [] # List with midpoints of longerons (just to determine a set containing the longerons)\ne_long = p_longerons.edges\n\n\nfor iVertex in range(0,VertexPolygon,1):\n # First create local coordinate system (useful for future constraints, etc.):\n iStorey=0\n origin = joints[iStorey,iVertex,:]\n point2 = joints[iStorey,iVertex-1,:]\n name = 'Local_Datum_'+str(iVertex)\n LocalDatum_list.append(p_longerons.DatumCsysByThreePoints(origin=origin, point2=point2, name=name, \n coordSysType=CARTESIAN, point1=(0.0, 0.0, 0.0)))\n #\n # Then, create the longerons\n templist = [] # List that will contain the points used to make each longeron\n for iStorey in range(0,nStories+1,1):\n templist.append(joints[iStorey,iVertex,:])\n if iStorey != 0: # Save midpoints of bars\n long_midpoints.append( [(joints[iStorey-1,iVertex,:]+joints[iStorey,iVertex,:])/2 , ])\n # end if\n # end iStorey loop\n p_longerons.WirePolyLine(points=templist,\n mergeType=IMPRINT, meshable=ON)\n # Create set for each longeron (to assign local beam directions)\n for i in range(0,len(templist)): # loop over longerons edges\n if i == 0:\n select_edges = e_long.findAt([templist[0], ]) # Find the first edge\n else:\n # Now find remaining edges in longerons\n temp = e_long.findAt([templist[i], ])\n select_edges = select_edges + temp\n #end if\n #end i loop\n longeron_name = 'longeron-'+str(iVertex)+'_set'\n p_longerons.Set(edges=select_edges, name=longeron_name)\n\n#end for iVertex loop\n\n# Longerons set:\ne_long = p_longerons.edges\nselect_edges = []\nfor i in range(0,len(long_midpoints)): # loop over longerons edges\n if i == 0:\n select_edges = e_long.findAt(long_midpoints[0]) # Find the first edge\n else:\n # Now find remaining edges in longerons\n temp = e_long.findAt(long_midpoints[i])\n select_edges = select_edges + temp\n #end if\n\n#end i loop\n\np_longerons.Set(edges=select_edges, name='all_longerons_set')\nall_longerons_set_edges = select_edges\n\np_longerons.Surface(circumEdges=all_longerons_set_edges, name='all_longerons_surface')\n\n\n# Create a set with all the joints:\nv_long = p_longerons.vertices\nselect_vertices = []\nselect_top_vertices = []\nselect_bot_vertices = []\nfor iStorey in range(0,nStories+1,1):\n for iVertex in range(0,VertexPolygon,1):\n # Select all the joints in the longerons:\n current_joint = v_long.findAt( [joints[iStorey,iVertex,:] , ] ) # Find the first vertex\n current_joint_name = 'joint-'+str(iStorey)+'-'+str(iVertex)\n # Create a set for each joint:\n p_longerons.Set(vertices=current_joint, name=current_joint_name)\n #\n if iStorey == 0 and iVertex == 0:\n select_vertices = current_joint # Instantiate the first point in set\n else:\n select_vertices = select_vertices + current_joint # Instantiate the first point in set\n # endif iStorey == 0 and iVertex == 0\n #\n if iStorey == 0: # Also save the bottom nodes separately\n if iVertex == 0:\n # Start selecting the bottom joints for implementing the boundary conditions\n select_bot_vertices = current_joint\n else:\n select_bot_vertices = select_bot_vertices + current_joint\n # endif iStorey == 0:\n elif iStorey == nStories: # Also save the top nodes separately\n if iVertex == 0:\n # Start selecting the top joints for implementing the boundary conditions\n select_top_vertices = current_joint\n else: # remaining vertices:\n select_top_vertices = select_top_vertices + current_joint\n #end if\n #end iVertex loop\n\n#end iStorey loop\n\np_longerons.Set(vertices=select_vertices, name='all_joints_set')\np_longerons.Set(vertices=select_bot_vertices, name='bot_joints_set')\np_longerons.Set(vertices=select_top_vertices, name='top_joints_set')\n\n#\n# Create materials:\nmdb.models['Model-1'].Material(name='NiTi_alloy')\nmdb.models['Model-1'].materials['NiTi_alloy'].Elastic(table=((83.0E3, 0.31), \n ))\nmdb.models['Model-1'].materials['NiTi_alloy'].Density(table=((1.0E-3, ), ))\n\nmdb.models['Model-1'].Material(name='PC')\nmdb.models['Model-1'].materials['PC'].Elastic(table=((2134, 0.27), \n ))\nmdb.models['Model-1'].materials['PC'].Density(table=((1.19E-3, ), ))\n\nmdb.models['Model-1'].Material(name='PLA')\nmdb.models['Model-1'].materials['PLA'].Elastic(table=((Emodulus, nu), \n ))\nmdb.models['Model-1'].materials['PLA'].Density(table=((1.24E-3, ), ))\n\nmdb.models['Model-1'].Material(name='CNT')\nmdb.models['Model-1'].materials['CNT'].Elastic(table=((1000.0E3, 0.3), \n ))\nmdb.models['Model-1'].materials['CNT'].Density(table=((1.0E-3, ), ))\n\n# Create beam profiles and beam sections:\nmdb.models['Model-1'].GeneralizedProfile(name='LongeronsProfile', area=Longeron_CS, i11=Ix, i12=0.0, i22=Iy, j=J, gammaO=0.0, gammaW=0.0)\n\nmdb.models['Model-1'].BeamSection(name='LongeronsSection', integration=\n BEFORE_ANALYSIS, poissonRatio=0.31, beamShape=CONSTANT, \n profile='LongeronsProfile', density=0.00124, thermalExpansion=OFF, \n temperatureDependency=OFF, dependencies=0, table=((Emodulus, Gmodulus), ), \n alphaDamping=0.0, betaDamping=0.0, compositeDamping=0.0, centroid=(0.0, \n 0.0), shearCenter=(0.0, 0.0), consistentMassMatrix=False)\n\n# Assign respective sections:\np_longerons.SectionAssignment(offset=0.0, \n offsetField='', offsetType=MIDDLE_SURFACE, region=\n p_longerons.sets['all_longerons_set'], \n sectionName='LongeronsSection', thicknessAssignment=FROM_SECTION)\n\n# Assing beam orientation:\nfor iVertex in range(0,VertexPolygon,1):\n iStorey=0\n dir_vec_n1 = joints[iStorey,iVertex,:]-(0.,0.,0.) # Vector n1 perpendicular to the longeron tangent\n longeron_name = 'longeron-'+str(iVertex)+'_set'\n region=p_longerons.sets[longeron_name]\n p_longerons.assignBeamSectionOrientation(region=region, method=N1_COSINES, n1=dir_vec_n1)\n\n#end for iVertex\n#\n\ndelta = Mesh_size/100.0\n########################################################################\n#Mesh the structure\n\n#refPlane = p_longerons.DatumPlaneByPrincipalPlane(principalPlane=XYPLANE, offset=L/2)\n#d = p.datums\n#All_faces = facesLeafs+facesDoubleThickBoom\n#p.PartitionFaceByDatumPlane(datumPlane=d[refPlane.id], faces=All_faces)\n##\n#session.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=OFF\n# engineeringFeatures=OFF, mesh=ON)\n#session.viewports['Viewport: 1'].partDisplay.meshOptions.setValues(\n# meshTechnique=ON)\n#p = mdb.models['Model-1'].parts['reducedCF_TRAC_boom']\n\np_longerons.seedPart(size=Mesh_size, deviationFactor=0.04, minSizeFactor=0.001,\n constraint=FINER)\np_longerons.seedEdgeBySize(edges=all_longerons_set_edges, size=Mesh_size, deviationFactor=0.04,\n constraint=FINER)\nelemType_longerons = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD) # Element type\np_longerons.setElementType(regions=(all_longerons_set_edges, ), elemTypes=(elemType_longerons, ))\np_longerons.generateMesh()\n\n#######################################################################\n\n# Make Analytical surfaces for contact purposes\ns1 = mdb.models['Model-1'].ConstrainedSketch(name='__profile__', \n sheetSize=MastRadius*3.0)\ng, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints\ns1.setPrimaryObject(option=STANDALONE)\ns1.Line(point1=(0.0, -MastRadius*1.1), point2=(0.0, MastRadius*1.1))\ns1.VerticalConstraint(entity=g[2], addUndoState=False)\np_surf = mdb.models['Model-1'].Part(name='AnalyticSurf', dimensionality=THREE_D, \n type=ANALYTIC_RIGID_SURFACE)\np_surf = mdb.models['Model-1'].parts['AnalyticSurf']\np_surf.AnalyticRigidSurfExtrude(sketch=s1, depth=MastRadius*2.2)\ns1.unsetPrimaryObject()\n\nrigid_face = p_surf.faces\n#surf_select = f.findAt((0.0,MastRadius*1.05,0.0))\n#surf_select = f[0]\np_surf.Surface(side1Faces=rigid_face, name='rigid_support')\n#p_surf.Set(faces=surf_select, name='support_surface_set')\n#p_surf.sets['all_diagonals_set']\n\n#\n# Make assembly:\na = mdb.models['Model-1'].rootAssembly\na.DatumCsysByDefault(CARTESIAN)\n# Create reference points to assign boundary conditions\nRP_ZmYmXm = a.ReferencePoint(point=(0.0, 0.0, -1.1*MastRadius))\nrefpoint_ZmYmXm = (a.referencePoints[RP_ZmYmXm.id],)\na.Set(referencePoints=refpoint_ZmYmXm, name='RP_ZmYmXm')\n#\nRP_ZpYmXm = a.ReferencePoint(point=(0.0, 0.0, MastHeight+1.1*MastRadius))\nrefpoint_ZpYmXm = (a.referencePoints[RP_ZpYmXm.id],)\na.Set(referencePoints=refpoint_ZpYmXm, name='RP_ZpYmXm')\n#\n# Create longerons\na_long = a.Instance(name='longerons-1-1', part=p_longerons, dependent=ON)\n# Create bottom surface\na_surf_bot = a.Instance(name='AnalyticSurf-1-1', part=p_surf, dependent=ON)\n# Now rotate the plane to have the proper direction\na.rotate(instanceList=('AnalyticSurf-1-1', ), axisPoint=(0.0, 0.0, 0.0), \n axisDirection=(0.0, 1.0, 0.0), angle=90.0)\n#\n# Create set with surface\nselect_bot_surf=a_surf_bot.surfaces['rigid_support']\n# Perhaps we need to define a set instead of a face\n#AnalyticSurf_surface=a_surf_bot.Surface(side1Faces=select_bot_surf, name='support_surf_bot-1')\nmdb.models['Model-1'].RigidBody(name='Constraint-RigidBody_surf_bot-1', refPointRegion=refpoint_ZmYmXm, \n surfaceRegion=select_bot_surf)\nfor iVertex in range(0,VertexPolygon,1):\n #\n # Select appropriate coordinate system:\n DatumID = LocalDatum_list[iVertex].id\n datum = a_long.datums[DatumID]\n for iStorey in range(0,nStories+1,1):\n # Current joint:\n current_joint_name = 'joint-'+str(iStorey)+'-'+str(iVertex)\n # Define COUPLING constraints for all the joints:\n if iStorey == 0: # Bottom base:\n #\n master_region=a.sets['RP_ZmYmXm'] # Note that the master is the Reference Point\n #\n slave_region=a_long.sets[current_joint_name]\n # Make constraint for this joint:\n Constraint_name = 'RP_ZmYmXm_PinConstraint-'+str(iStorey)+'-'+str(iVertex)\n mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region, \n surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC, \n localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)\n #\n #Constraint_name = 'RP_ZmYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)\n #mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region, \n # surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC, \n # localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)\n # Make constraint for this joint:\n elif iStorey == nStories: # Top base:\n #\n master_region=a.sets['RP_ZpYmXm'] # Note that the master is the Reference Point\n #\n slave_region=a_long.sets[current_joint_name]\n # Make constraint for this joint:\n Constraint_name = 'RP_ZpYmXm_PinConstraint-'+str(iStorey)+'-'+str(iVertex)\n mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region, \n surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC, \n localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)\n #\n #Constraint_name = 'RP_ZpYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)\n #mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region, \n # surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC, \n # localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)\n # Make constraint for this joint:\n else: # Middle stories:\n master_region=a_long.sets[current_joint_name]\n #\n slave_region=a_bat.sets[current_joint_name]\n # Make constraint for this joint:\n #endif iStorey\n #\n #end for iStorey\n\n#end for iVertex\n\n\n#\n\n# Create hinges:\n#select_joints=a.instances['deployable_mast-1'].sets['all_joints_set']\n#select_RefPoint=a.sets['RP_joints']\n#mdb.models['Model-1'].RigidBody(name='JointsContraint', refPointRegion=select_RefPoint, \n# pinRegion=select_joints)\n\n#\n# Export mesh to .inp file\n#\nmdb.Job(name='include_mesh_DoE69', model='Model-1', type=ANALYSIS, explicitPrecision=SINGLE,\n nodalOutputPrecision=SINGLE, description='',\n parallelizationMethodExplicit=DOMAIN, multiprocessingMode=DEFAULT,\n numDomains=1, userSubroutine='', numCpus=1, memory=90,\n memoryUnits=PERCENTAGE, scratch='', echoPrint=OFF, modelPrint=OFF,\n contactPrint=OFF, historyPrint=OFF)\nimport os\nmdb.jobs['include_mesh_DoE69'].writeInput(consistencyChecking=OFF)\n# End of python script\n\n" ]
[ [ "numpy.zeros", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
vanstrn/RL_public
[ "0e971e40e063b17918460e19728f95d7924af8db" ]
[ "SampleSelection.py" ]
[ "\nimport itertools\nimport numpy as np\nfrom scipy.spatial import ConvexHull\nfrom random import randint\nimport matplotlib.pyplot as plt\n\ndef SampleSelection_v1(setOfPoints,nSamples,returnIndicies=False,nTrials=200):\n \"\"\"Randomly selecting samples to use for SF analysis. Checks for repetition in the sample space. \"\"\"\n nPoints = len(setOfPoints)\n maxDist=0\n for trial in range(nTrials):\n indicies = []\n sampleSet = []\n while len(indicies) < nSamples:\n x = randint(0,nPoints-1)\n if x in indicies:\n continue\n if x in setOfPoints:\n continue\n indicies.append(x)\n sampleSet.append(setOfPoints[x])\n dist = TotalAverageDistance(sampleSet)\n if dist >= maxDist:\n maxDist=dist\n bestPoints = sampleSet.copy()\n bestIndicies = indicies.copy()\n\n\n if returnIndicies:\n return bestIndicies\n return bestPoints\n\ndef TotalAverageDistance(setOfPoints):\n dist = 0\n for i in range(len(setOfPoints)):\n for j in range(i+1,len(setOfPoints)):\n dist+=np.linalg.norm(setOfPoints[i]-setOfPoints[j])\n return dist\ndef arreqclose_in_list(myarr, list_arrays):\n return next((True for elem in list_arrays if elem.size == myarr.size and np.allclose(elem, myarr,atol=1E-6)), False)\n\ndef SampleSelection_v2(setOfPoints,nSamples,returnIndicies=False, nTrials=10, debug=False):\n \"\"\"Using Convex Hull to select boundary points. Filling the rest by performing random selections \"\"\"\n nPoints = setOfPoints.shape[0]\n hull = ConvexHull(setOfPoints)\n indicies = hull.vertices.tolist()\n boundaryPoints = [];removeIndicies=[]\n for idx in indicies:\n if not arreqclose_in_list(setOfPoints[idx],boundaryPoints):\n boundaryPoints.append(setOfPoints[idx])\n else:\n removeIndicies.append(idx)\n for idx in removeIndicies:\n indicies.remove(idx)\n if debug:print(\"Finished Calculating Convex Hull of the set. Number of boundary points \" + str(len(boundaryPoints)))\n\n if len(indicies) >= nSamples: #Perform prunning operation\n #Removing the entry that lowers the entropy the least\n while len(indicies) != nSamples:\n worstDist=0\n for i in range(len(boundaryPoints)):\n dist = TotalAverageDistance(boundaryPoints.copy().pop(i))\n if dist > worstDist:\n worstDist=dist\n idx = i\n boundaryPoints.pop(idx)\n indicies.pop(idx)\n if returnIndicies:\n return indicies\n return boundaryPoints\n else:\n\n maxDist = 0\n for trial in range(nTrials):\n if debug:print(\"Begining sampling trial \" + str(trial))\n\n points = boundaryPoints.copy()\n idx = indicies.copy()\n while len(points) < nSamples:\n x = randint(0,nPoints-1)\n if x in idx:\n continue\n if arreqclose_in_list(setOfPoints[x],points):\n continue\n idx = np.append(idx,x)\n points.append(setOfPoints[x])\n dist = TotalAverageDistance(points)\n if dist >= maxDist:\n maxDist=dist\n bestPoints = points.copy()\n bestIndicies = idx.copy()\n if debug: print(maxDist,len(bestPoints),len(bestIndicies))\n if returnIndicies:\n return bestIndicies\n return bestPoints\n\ndef SampleSelection_v3(setOfPoints,nSamples,returnIndicies=False, nTrials=10, debug=False):\n \"\"\"Separating into clusters. Using Convex Hull to select boundary points. Filling the rest by performing random selections \"\"\"\n # from sklearn.mixture import GaussianMixture\n # model = GaussianMixture(n_components=4)\n # model.fit(setOfPoints)\n # yhat =model.predict(setOfPoints)\n nPoints = setOfPoints.shape[0]\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n data = scaler.fit_transform(setOfPoints)\n from sklearn.cluster import DBSCAN\n model = DBSCAN(eps=0.1, min_samples=10)\n yhat = model.fit_predict(data)\n clusters=np.unique(yhat)\n\n\n Gindicies = [];GboundaryPoints=[]\n for cluster in clusters:\n row_ix = np.where(yhat==cluster)\n clusterPoints = np.squeeze(setOfPoints[row_ix,:])\n if np.unique(clusterPoints,axis=0).shape[0] < 3:\n GboundaryPoints.append(setOfPoints[row_ix[0][0]])\n Gindicies.append(row_ix[0][0])\n continue\n hull = ConvexHull(clusterPoints)\n indicies = hull.vertices.tolist()\n boundaryPoints = [];removeIndicies=[]\n for idx in indicies:\n if not arreqclose_in_list(setOfPoints[row_ix[0][idx]],GboundaryPoints):\n GboundaryPoints.append(setOfPoints[row_ix[0][idx]])\n else:\n removeIndicies.append(idx)\n for idx in removeIndicies:\n indicies.remove(idx)\n for idx in indicies:\n Gindicies.append(row_ix[0][idx])\n if debug:print(\"Finished Calculating Convex Hull of the set. Number of boundary points \" + str(len(GboundaryPoints)))\n\n if len(Gindicies) >= nSamples: #Perform prunning operation\n #Removing the entry that lowers the entropy the least\n while len(Gindicies) != nSamples:\n worstDist=0\n for i in range(len(GboundaryPoints)):\n dist = TotalAverageDistance(GboundaryPoints.copy().pop(i))\n if dist > worstDist:\n worstDist=dist\n idx = i\n GboundaryPoints.pop(idx)\n Gindicies.pop(idx)\n if returnIndicies:\n return Gindicies\n return GboundaryPoints\n else:\n\n maxDist = 0\n for trial in range(nTrials):\n if debug:print(\"Begining sampling trial \" + str(trial))\n\n points = GboundaryPoints.copy()\n idx = Gindicies.copy()\n while len(points) < nSamples:\n x = randint(0,nPoints-1)\n if x in idx:\n continue\n if arreqclose_in_list(setOfPoints[x],points):\n continue\n idx = np.append(idx,x)\n points.append(setOfPoints[x])\n dist = TotalAverageDistance(points)\n if dist >= maxDist:\n maxDist=dist\n bestPoints = points.copy()\n bestIndicies = idx.copy()\n if debug: print(maxDist,len(bestPoints),len(bestIndicies))\n if returnIndicies:\n return bestIndicies\n return bestPoints\n\ndef SimulatedAnnealingSampling(setOfPoints,nSamples,returnIndicies=False, nIterations=500):\n import random\n \"\"\"Simulated annealing method to select the set of point with the highest entropy in the syste,\n \"\"\"\n nPoints = setOfPoints.shape[0]\n\n points=[]\n idx=[]\n while len(points) < nSamples:\n x = randint(0,nPoints-1)\n if x in idx:\n continue\n if arreqclose_in_list(setOfPoints[x],points):\n continue\n idx.append(x)\n points.append(setOfPoints[x])\n dist = TotalAverageDistance(points)\n\n for i in range(nIterations):\n points_ = points.copy()\n idx_ = idx.copy()\n T = nIterations/(i+1)\n # moving points based on entropy metric\n for i in range(int(nSamples/8)):\n x = randint(0,len(idx_)-1)\n points_.pop(x)\n idx_.pop(x)\n while len(points_) < nSamples:\n x = randint(0,nPoints-1)\n if x in idx_:\n continue\n if arreqclose_in_list(setOfPoints[x],points_):\n continue\n idx_.append(x)\n points_.append(setOfPoints[x])\n\n #Compute the Entrpy Metric.\n dist_ = TotalAverageDistance(points_)\n #Compare if the entropy is within acceptable limits to switch\n prob = np.exp(-(dist-dist_)/T)\n # print(prob)\n if random.uniform(0, 1) < prob:\n points = points_.copy()\n idx = idx_.copy()\n dist = dist_\n if returnIndicies:\n return idx\n return points\ndef SimulatedAnnealingSampling2(setOfPoints,nSamples,returnIndicies=False, nIterations=500):\n import random\n \"\"\"Simulated annealing method to select the set of point with the highest entropy in the syste,\n \"\"\"\n nPoints = setOfPoints.shape[0]\n\n points=[]\n idx=[]\n while len(points) < nSamples:\n x = randint(0,nPoints-1)\n if x in idx:\n continue\n if arreqclose_in_list(setOfPoints[x],points):\n continue\n idx.append(x)\n points.append(setOfPoints[x])\n dist = TotalAverageDistance(points)\n\n for i in range(nIterations):\n points_ = points.copy()\n idx_ = idx.copy()\n T = nIterations/(i+1)\n # moving points based on entropy metric\n for i in range(int(nSamples/8)):\n x = randint(0,len(idx_)-1)\n points_.pop(x)\n idx_.pop(x)\n while len(points_) < nSamples:\n x = randint(0,nPoints-1)\n if x in idx_:\n continue\n if arreqclose_in_list(setOfPoints[x],points_):\n continue\n idx_.append(x)\n points_.append(setOfPoints[x])\n\n #Compute the Entrpy Metric.\n dist_ = TotalAverageDistance(points_)\n #Compare if the entropy is within acceptable limits to switch\n prob = np.exp(-(dist-dist_)/T)\n # print(prob)\n if 1 < prob:\n points = points_.copy()\n idx = idx_.copy()\n dist = dist_\n if returnIndicies:\n return idx\n return points\n\n\ndef CreateTestSet(nPoints,nDim):\n set = np.random.rand(nPoints,nDim)\n return set\n\nif __name__ == \"__main__\":\n set = CreateTestSet(400,3)\n # points1 = SampleSelection_v1(set,50)\n # print(TotalAverageDistance(points1))\n # points2 = SampleSelection_v2(set,50)\n # print(TotalAverageDistance(points2))\n # points3 = SampleSelection_v3(set,50)\n # print(TotalAverageDistance(points3))\n points4 = SimulatedAnnealingSampling(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling2(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling2(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling2(set,50)\n print(TotalAverageDistance(points4))\n points4 = SimulatedAnnealingSampling2(set,50)\n print(TotalAverageDistance(points4))\n #\n # plt.scatter(np.stack(set)[:,0],np.stack(set)[:,1],color='b')\n # plt.scatter(np.stack(points1)[:,0],np.stack(points1)[:,1],color='r')\n # plt.show()\n # plt.scatter(np.stack(set)[:,0],np.stack(set)[:,1],color='b')\n # plt.scatter(np.stack(points2)[:,0],np.stack(points2)[:,1],color='r')\n # plt.show()\n # plt.scatter(np.stack(set)[:,0],np.stack(set)[:,1],color='b')\n # plt.scatter(np.stack(points4)[:,0],np.stack(points4)[:,1],color='r')\n # plt.show()\n" ]
[ [ "numpy.allclose", "numpy.unique", "numpy.squeeze", "numpy.linalg.norm", "sklearn.cluster.DBSCAN", "numpy.append", "numpy.random.rand", "scipy.spatial.ConvexHull", "numpy.exp", "numpy.where", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
STU-IT/trafik_cv
[ "f3d4285e9e57decc3bda1f6f1964a16759f8b5d0" ]
[ "trafik_Projeckt/ass.py" ]
[ "import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('biler/rød-bil-fra-venstre.mp4')\n\n# take first frame of the video\nfor i in range(10):\n ret,frame = cap.read()\n\n# setup initial location of window\nr,h,c,w = 300,90,0,125 # simply hardcoded the values\ntrack_window = (c,r,w,h)\n\n# set up the ROI for tracking\nroi = frame[r:r+h, c:c+w]\nhsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)# 180 255 255\nmask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((150.,100.,150.)))\nroi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])\ncv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)\n\n# Setup the termination criteria, either 10 iteration or move by atleast 1 pt\nterm_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )\n\nwhile(1):\n ret ,frame = cap.read()\n #frame = cv2.cvtColor(frame, cv2.COLOR_BGRGRAY)\n if ret == True:\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)\n\n # apply meanshift to get the new location\n ret, track_window = cv2.CamShift(dst, track_window, term_crit)\n\n # Draw it on image\n pts = cv2.boxPoints(ret)\n pts = np.int0(pts)\n \n img2 = cv2.polylines(dst,[pts],True, 255,2)\n #img2 = cv2.polylines(frame,[pts],True, 255,2)\n cv2.imshow('img2',img2)\n \n k = cv2.waitKey(60) & 0xff\n if k == 27:\n break\n else:\n cv2.imwrite(chr(k)+\".jpg\",img2)\n\n else:\n break\n\ncv2.destroyAllWindows()\ncap.release()\n" ]
[ [ "numpy.int0", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amjames/optrotvib
[ "a2bce7d4a69ce2cdc57078ab0f1104787bd2e12b" ]
[ "optrotvib/engine/g09_engine.py" ]
[ "import sys\nimport re\nimport os\nfrom pathlib import Path\nimport subprocess as sp\nimport numpy as np\nimport traceback\n\nfrom . import cpu_info\n\n\nfrom victor.constants import physconst\nfrom victor import util\n\n# Collection helpers\ndef _array_from_fchk(fchk_text, array_name):\n matcher = re.compile(r'\\A(?P<title>{})\\s+R\\s+N=\\s+(?P<nele>\\d+)\\Z'.format(array_name))\n fchk_lines = fchk_text.split('\\n')\n start_line = 0\n nline = 0\n found_match = False\n for i, line in enumerate(fchk_lines):\n match = matcher.match(line)\n if match is not None:\n found_match = True\n start_line = i +1\n nline = int(match.group('nele'))//5 + (1 * bool(int(match.group('nele'))%5))\n if found_match:\n data = np.array([float(x) for x in \" \".join(fchk_lines[start_line:start_line+nline]).split()])\n return data\n else:\n return None\n\ndef compute_rotation(w_au, rot_tensor, mw):\n hbar = physconst['h'] / (2*np.pi)\n prefactor = -72e6 * (hbar**2) * physconst['na'] / physconst['c']**2 / physconst['me']**2\n return prefactor * (w_au**2) * np.trace(rot_tensor) / mw / 3.0\n\ndef collect_rotations(fchk_text, prog_options):\n mw = np.sum(_array_from_fchk(fchk_text, array_name=\"Real atomic weights\"))\n rots = []\n if prog_options is not None:\n wls = prog_options.get('omega')\n if wls:\n # strip off the unit\n if isinstance(wls[-1], str):\n wls = wls[:-1]\n # better than doing one additional conversion just use the values in the fchk file\n au_freqs = _array_from_fchk(fchk_text, array_name=\"Frequencies for FD properties\")\n # sort wls in acend. order, then swap so they are in aced energy order (same as freqs from fchk)\n wls = list(sorted(wls))\n wls.reverse()\n if (au_freqs is not None) and len(au_freqs) == len(wls):\n all_rot_tensors = _array_from_fchk(fchk_text, array_name=\"FD Optical Rotation Tensor\").reshape(-1, 9)\n for w_au, w_nm, rot_tensor in zip(au_freqs, wls, all_rot_tensors):\n val = compute_rotation(w_au, rot_tensor.reshape(3,3), mw)\n rots.append({'value': val, 'wavelength': w_nm, 'gauge': 'GIAO'})\n return rots\n\ndef collect_hessian(fchk_text, natom):\n hessian_data = _array_from_fchk(fchk_text, array_name=\"Cartesian Force Constants\")\n full_data = np.zeros((3*natom, 3*natom))\n test_data = np.zeros((3*natom, 3*natom))\n ut_index = np.triu_indices_from(full_data)\n lt_index = np.tril_indices_from(full_data)\n full_data[ut_index] = hessian_data\n full_data[lt_index] = hessian_data\n if np.allclose(test_data, full_data):\n return None\n else:\n return full_data\n\ndef collect_gradient(fchk_text):\n grad_data = _array_from_fchk(fchk_text, array_name=\"Cartesian Gradient\")\n if np.allclose(grad_data, np.zeros_like(grad_data)):\n return None\n else:\n return grad_data\n\n# execution steps\n\ndef exe_g09():\n sp.run('g09 input.com output.log', check=True, shell=True, env=os.environ)\n\ndef exe_fchk():\n sp.run(['formchk','-3','vices.chk','vices.fchk'], env=os.environ)\n\n# input helpers\ndef _link_header(n):\n ret = []\n ret.append(\"--Link{}--\".format(n))\n ret.append(\"%chk=vices\")\n ret.append(\"%mem={}mb\".format(cpu_info.memory()))\n ret.append(\"%nproc={}\".format(cpu_info.ncore()))\n return ret\n\ndef _geometry_sec(input_json):\n mol_json = input_json.get('molecule')\n geom_array = mol_json.get('geometry')\n ret = []\n ret.append(\"{} {}\".format(int(mol_json.get('charge')), mol_json.get('multiplicity')))\n for i_atom, symb in enumerate(mol_json.get('symbols')):\n ret.append(\"{} {:20.12f} {:20.12f} {:20.12f}\".format(symb,\n geom_array[3*i_atom] * physconst['bohr2angstroms'],\n geom_array[3*i_atom+1] * physconst['bohr2angstroms'],\n geom_array[3*i_atom+2] * physconst['bohr2angstroms']))\n ret.append('')\n return ret\n\ndef _title(driver, mol_name, modelchem_name):\n # blank line after title\n if mol_name is None:\n mol_name = \"(no name)\"\n if modelchem_name is None:\n modelchem_name = '(no name)'\n return ['{} mol {} mc {}'.format(driver,mol_name, modelchem_name), '']\n\ndef _rotation_wls(omegas):\n return ['{}nm'.format(x) for x in omegas]\n\ndef _end_input():\n return [\"\\n\",\"\\n\", \"\\n\"]\n\ndef _route(driver,input_json):\n route_line = [\"#P\"]\n if driver == 'gradient':\n route_line.append(\"Force\")\n elif driver == 'hessian':\n route_line.append(\"Freq\")\n elif driver == 'rotation':\n route_line.append(\"\")\n route_line.append(\"{}/{}\".format(input_json['modelchem'].get('method'), input_json['modelchem'].get('basis')))\n route_line.append(\"scf(conver=11) int=ultrafine\")\n if driver == 'rotation':\n route_line.append('polar=OptRot')\n route_line.append('cphf(RdFreq,conver=11)')\n\n # Blank line follows route section\n return [\" \".join(route_line), '']\n\n\ndef write_input(driver, input_json):\n lines = []\n lines.extend(_link_header(0))\n lines.extend(_route(driver, input_json))\n lines.extend(_title(driver, input_json['molecule'].get('name'), input_json['modelchem'].get('name')))\n lines.extend(_geometry_sec(input_json))\n if driver == 'rotation':\n po = input_json.get('modelchem').get('program_options')\n if po:\n omegas = po.get('omega')\n if omegas:\n omegas = omegas[:-1]\n lines.extend(_rotation_wls(omegas))\n lines.extend(_end_input())\n infile_path = Path('input.com')\n infile_path.write_text(\"\\n\".join(lines))\n\n# main driver\ndef run(input_json):\n calc_type = input_json['modelchem'].get('driver')\n output_json = {}\n output_json['raw_output'] = {}\n output_json['success'] = False\n output_json['output'] = {}\n\n try:\n # write the input file\n write_input(calc_type, input_json)\n # exe g09\n exe_g09()\n # exe fchk\n exe_fchk()\n\n # gather up stuff\n fchk_path = Path('vices.fchk')\n raw_out_text = Path('output.log').read_text()\n hess = collect_hessian(fchk_path.read_text(), len(input_json['molecule']['symbols']))\n grad = collect_gradient(fchk_path.read_text())\n rotations = collect_rotations(fchk_path.read_text(), input_json['modelchem'].get('program_options'))\n\n # store the raw output\n output_json['raw_output']['log'] = raw_out_text\n # the fchk file can be too large, mongo documents have a max size of 16mb so if the file is more than 12 we wont\n # include it, just tell where the path is\n if (fchk_path.stat().st_size // 1024**2) <= 12:\n output_json['raw_output']['fchk'] = fchk_path.read_text()\n else:\n output_json['raw_output']['fchk_path'] = str(fchk_path.resolve())\n if hess is not None:\n output_json['output']['hessian'] = util.pack_json_field(hess)\n if grad is not None:\n output_json['output']['gradient'] = util.pack_json_field(grad)\n if rotations:\n output_json['output']['rotations'] = rotations\n output_json['success'] = True\n return output_json\n except Exception as e:\n output_json['success'] = False\n output_json['raw_output']['error'] = \"\\n\".join(traceback.format_exception(*sys.exc_info()))\n return output_json\n" ]
[ [ "numpy.allclose", "numpy.tril_indices_from", "numpy.triu_indices_from", "numpy.zeros_like", "numpy.zeros", "numpy.trace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
icemtel/carpet
[ "5905e02ab0e44822829a672955dccad3e09eea07" ]
[ "scripts/basin/worker_basin.py" ]
[ "'''\nIntegrates trajectory for many cycles\n- tries to load previously computed cycles; starts from the last point available\n- saved result in a separate file (e.g. phi_0_pt1.npy)\n- finishes early if a trajectory almost converged to a fixed point\n\n- IF integrated for many thousands of cycles - may want to uncomment `phi0 = phases_to_interval(phi0)`\n after finishing each cycle\n\nargs: irun, ncycle, tol, save_every, sim_name\n'''\n\n# import packages needed below\nimport carpet\nfrom carpet.various import phases_to_interval\nfrom sim_physics import N, solve_cycle\nimport sys, os\nimport numpy as np\nimport logging\n\ncarpet.setup_logging('worker.log')\n\n\n\ndef solve_cycles_many(phi0, tol, ncycle, save_every, conv_eps):\n '''\n Returns trajectory: phis: [phi0, phi1, phi2,..]\n Terminate when the distance travelled in one cycle less is than `termination_eps`, or when `ncycle` is reached.\n '''\n # save_every must be a positive integer; save_every = 1 => every cycle saved; save_every=2 => every 2nd saved\n if save_every == 0:\n raise NotImplementedError\n\n phis = [phi0]\n dphis_norm = []\n ts = [0]\n t = 0\n save_counter = 0\n\n for icycle in range(ncycle):\n sol = solve_cycle(phi0, tol, ncycle=1)\n phi1 = sol.y.T[-1] - 2 * np.pi\n t += sol.t[-1]\n\n # Save data once every `save_every` cycles\n save_counter += 1\n if save_counter == 1: # Add dphi for the first point & all points which got saved recently\n dphi = (phi1 - phi0)\n dphi_norm = np.sqrt(1 / N) * np.linalg.norm(dphi)\n dphis_norm.append(dphi_norm)\n # END if change in cycle is too small => (therefore close to a fixed point)\n if dphi_norm < conv_eps:\n return np.array(phis), np.array(ts), np.array(dphis_norm)\n # For small dphi; with zero mean phase; the norm above is equivalent to\n # `np.sqrt(1 - carpet.order_parameter(dphi) ** 2)`\n if save_counter == save_every:\n phis.append(phi1)\n ts.append(t)\n save_counter = 0 # reset save counter\n\n phi0 = phi1.copy() # set initial condition for the next cycle\n # phi0 = phases_to_interval(phi0)\n\n return np.array(phis), np.array(ts), np.array(dphis_norm)\n\n\n\ndef get_traj_filename(irun, ipart, path):\n if ipart == 0:\n return os.path.join(path, f'phi_{irun}.npy')\n else:\n return os.path.join(path, f'phi_{irun}_pt{ipart}.npy')\n\n\ndef get_ts_filename(irun, ipart, path):\n if ipart == 0:\n return os.path.join(path, f'ts_{irun}.npy')\n else:\n return os.path.join(path, f'ts_{irun}_pt{ipart}.npy')\n\n\ndef load_phis(irun, path):\n '''\n Read previous parts of the trajectory\n '''\n phis_list = []\n for ipart in range(64):\n filename = get_traj_filename(irun, ipart, path)\n if os.path.isfile(filename):\n phis_pt = np.load(filename)\n phis_list.append(phis_pt)\n else:\n break\n return np.concatenate(phis_list) # trajectory glued back from parts\n\n## Prepare input\nirun, ncycle_total, tol, save_every, sim_name = int(sys.argv[1]), int(sys.argv[2]), float(sys.argv[3]), \\\n int(sys.argv[4]), str(sys.argv[5])\n\n# Folder names\nobjfolder = f'obj/{sim_name}/'\noutfolder = f'out/{sim_name}/'\nconv_eps = 0.99e-4\n\n# Find how many parts the trajectory already has\nipart_last = None\n# Find the last existing part of the trajectory\nfor i in range(64): # maximum number of parts\n filename = get_traj_filename(irun, i, outfolder)\n if os.path.isfile(filename):\n ipart_last = i\n else:\n break\n\n# If trajectory exists -> load, get initial condition and number of cycles\nif ipart_last is not None:\n phis_old = load_phis(irun, outfolder)\n ncycle_old = (len(phis_old) - 1) * save_every # assume that input save_every is the same as used in prev. sims!\n phi0 = phis_old[-1]\n ipart = ipart_last + 1\n del phis_old # free up memory\nelse:\n ipart = 0\n ncycle_old = 0\n # Load input\n input_filename = objfolder + f'phi0_{irun}.npy'\n phi0 = np.load(input_filename)\n\n## Run simulation\nncycle_extra = ncycle_total - ncycle_old\nif ncycle_extra > 0:\n phis, ts = solve_cycles_many(phi0, tol, ncycle_extra, save_every, conv_eps)\n\n if ipart > 0: # remove the first point because it's the same as the last point in the first part\n phis = phis[1:]\n ts = ts[1:]\n\n ## Save output\n if len(phis) > 1: # length = 1 if imeediately finished simulation AND part > 0\n os.makedirs(outfolder, exist_ok=True)\n # Mean and std frequency\n # Time points\n filename = get_ts_filename(irun, ipart, outfolder)\n np.save(filename, ts)\n # Phases - saved the last to make sure that everything else is saved as well\n filename = get_traj_filename(irun, ipart, outfolder)\n np.save(filename, phis)\n\n" ]
[ [ "numpy.sqrt", "numpy.linalg.norm", "numpy.save", "numpy.concatenate", "numpy.load", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CxrImagePreProcessing/SemanticGenesis
[ "53a72e8df413b61279a6ce21532ac70dcf57fda5" ]
[ "self_discovery/vnet3d.py" ]
[ "import numpy as np\nfrom keras import backend as K\nfrom keras.engine import Input, Model\nfrom keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization, PReLU, Deconvolution3D,Concatenate\n\nK.set_image_data_format(\"channels_first\")\n\ntry:\n from keras.engine import merge\nexcept ImportError:\n from keras.layers.merge import concatenate\n\n\n\ndef vnet_model_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, deconvolution=False,\n depth=4, n_base_filters=32, batch_normalization=False, activation_name=\"sigmoid\"):\n \"\"\"\n Builds the 3D VNet Keras model.f\n :param metrics: List metrics to be calculated during model training (default is dice coefficient).\n :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice\n coefficient for each label as metric.\n :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following\n layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required\n to train the model.\n :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling\n layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.\n :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be\n divisible by the pool size to the power of the depth of the VNet, that is pool_size^depth.\n :param pool_size: Pool size for the max pooling operations.\n :param n_labels: Number of binary labels that the model is learning.\n :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.\n :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This\n increases the amount memory required during training.\n :return: Untrained 3D VNet Model\n \"\"\"\n inputs = Input(input_shape)\n current_layer = inputs\n levels = list()\n num_layer = 0\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),\n batch_normalization=batch_normalization, layer_depth=num_layer)\n num_layer += 1\n layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,\n batch_normalization=batch_normalization, layer_depth=num_layer)\n num_layer += 1\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth-2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,\n n_filters=current_layer._keras_shape[1])(current_layer)\n # concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], layer_depth=num_layer,\n input_layer=up_convolution, batch_normalization=batch_normalization)\n num_layer += 1\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], layer_depth=num_layer,\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n num_layer += 1\n\n final_convolution = Conv3D(n_labels, (1, 1, 1),name=\"final_conv\")(current_layer)\n act = Activation(activation_name)(final_convolution)\n model = Model(inputs=inputs, outputs=act)\n\n return model\n\n\ndef create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None,\n padding='same', strides=(1, 1, 1), instance_normalization=False, layer_depth=None):\n \"\"\"\n\n :param strides:\n :param input_layer:\n :param n_filters:\n :param batch_normalization:\n :param kernel:\n :param activation: Keras activation layer to use. (default is 'relu')\n :param padding:\n :return:\n \"\"\"\n layer = Conv3D(n_filters, kernel, padding=padding, strides=strides, name=\"depth_\"+str(layer_depth)+\"_conv\")(input_layer)\n if batch_normalization:\n layer = BatchNormalization(axis=1, name=\"depth_\"+str(layer_depth)+\"_bn\")(layer)\n elif instance_normalization:\n try:\n from keras_contrib.layers.normalization import InstanceNormalization\n except ImportError:\n raise ImportError(\"Install keras_contrib in order to use instance normalization.\"\n \"\\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git\")\n layer = InstanceNormalization(axis=1, name=\"depth_\"+str(layer_depth)+\"_in\")(layer)\n if activation is None:\n return Activation('relu', name=\"depth_\"+str(layer_depth)+\"_relu\")(layer)\n else:\n return activation()(layer)\n\n\ndef compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n \"\"\"\n Each level has a particular output shape based on the number of filters used in that level and the depth or number \n of max pooling operations that have been done on the data at that point.\n :param image_shape: shape of the 3d image.\n :param pool_size: the pool_size parameter used in the max pooling operation.\n :param n_filters: Number of filters used by the last node in a given level.\n :param depth: The number of levels down in the U-shaped model a given node is.\n :return: 5D vector of the shape of the output node \n \"\"\"\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)\n\n\ndef get_up_convolution(n_filters, pool_size, kernel_size=(2, 2, 2), strides=(2, 2, 2),\n deconvolution=False):\n if deconvolution:\n return Deconvolution3D(filters=n_filters, kernel_size=kernel_size,\n strides=strides)\n else:\n return UpSampling3D(size=pool_size)\n\nif __name__ == '__main__':\n model=vnet_model_3d((1, 64, 64, 32),batch_normalization=True)\n model.summary()" ]
[ [ "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pierthodo/rtrl
[ "eae3ad3dbfa3aefb2923eea1061ea909ec073abd" ]
[ "rtrl/sac.py" ]
[ "from collections import deque\nfrom copy import deepcopy, copy\nfrom dataclasses import dataclass, InitVar\nfrom functools import lru_cache, reduce\nfrom itertools import chain\nimport numpy as np\nimport torch\nfrom torch.nn.functional import mse_loss\n\nfrom rtrl.memory import Memory\nfrom rtrl.nn import PopArt, no_grad, copy_shared, exponential_moving_average, hd_conv\nfrom rtrl.util import cached_property, partial\nimport rtrl.sac_models\n\n\n@dataclass(eq=0)\nclass Agent:\n observation_space: InitVar\n action_space: InitVar\n\n Model: type = rtrl.sac_models.Mlp\n OutputNorm: type = PopArt\n batchsize: int = 256 # training batch size\n memory_size: int = 1000000 # replay memory size\n lr: float = 0.0003 # learning rate\n discount: float = 0.99 # reward discount factor\n target_update: float = 0.005 # parameter for exponential moving average\n reward_scale: float = 5.\n entropy_scale: float = 1.\n start_training: int = 10000\n device: str = None\n training_interval: int = 1\n\n model_nograd = cached_property(lambda self: no_grad(copy_shared(self.model)))\n\n num_updates = 0\n training_steps = 0\n\n def __post_init__(self, observation_space, action_space):\n device = self.device or (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = self.Model(observation_space, action_space)\n self.model = model.to(device)\n self.model_target = no_grad(deepcopy(self.model))\n\n self.actor_optimizer = torch.optim.Adam(self.model.actor.parameters(), lr=self.lr)\n self.critic_optimizer = torch.optim.Adam(self.model.critics.parameters(), lr=self.lr)\n self.memory = Memory(self.memory_size, self.batchsize, device)\n\n self.outputnorm = self.OutputNorm(self.model.critic_output_layers)\n self.outputnorm_target = self.OutputNorm(self.model_target.critic_output_layers)\n\n def act(self, obs, r, done, info, train=False):\n stats = []\n action, _ = self.model.act(obs, r, done, info, train)\n\n if train:\n self.memory.append(np.float32(r), np.float32(done), info, obs, action)\n if len(self.memory) >= self.start_training and self.training_steps % self.training_interval == 0:\n stats += self.train(),\n self.training_steps += 1\n return action, stats\n\n def train(self):\n obs, actions, rewards, next_obs, terminals = self.memory.sample()\n rewards, terminals = rewards[:, None], terminals[:, None] # expand for correct broadcasting below\n\n new_action_distribution = self.model.actor(obs)\n new_actions = new_action_distribution.rsample()\n\n # critic loss\n next_action_distribution = self.model_nograd.actor(next_obs)\n next_actions = next_action_distribution.sample()\n next_value = [c(next_obs, next_actions) for c in self.model_target.critics]\n next_value = reduce(torch.min, next_value)\n next_value = self.outputnorm_target.unnormalize(next_value)\n next_value = next_value - self.entropy_scale * next_action_distribution.log_prob(next_actions)[:, None]\n\n value_target = self.reward_scale * rewards + (1. - terminals) * self.discount * next_value\n value_target = self.outputnorm.update(value_target)\n\n values = [c(obs, actions) for c in self.model.critics]\n assert values[0].shape == value_target.shape and not value_target.requires_grad\n loss_critic = sum(mse_loss(v, value_target) for v in values)\n\n # actor loss\n new_value = [c(obs, new_actions) for c in self.model.critics]\n new_value = reduce(torch.min, new_value)\n new_value = self.outputnorm.unnormalize(new_value)\n\n loss_actor = self.entropy_scale * new_action_distribution.log_prob(new_actions)[:, None] - new_value\n assert loss_actor.shape == (self.batchsize, 1)\n loss_actor = self.outputnorm.normalize(loss_actor).mean()\n\n # update actor and critic\n self.critic_optimizer.zero_grad()\n loss_critic.backward()\n\n self.actor_optimizer.zero_grad()\n loss_actor.backward()\n \n self.critic_optimizer.step()\n self.actor_optimizer.step()\n\n # self.outputnorm.normalize(value_target, update=True) # This is not the right place to update PopArt\n\n # update target critics and normalizers\n exponential_moving_average(self.model_target.critics.parameters(), self.model.critics.parameters(), self.target_update)\n exponential_moving_average(self.outputnorm_target.parameters(), self.outputnorm.parameters(), self.target_update)\n\n return dict(\n loss_actor=loss_actor.detach(),\n loss_critic=loss_critic.detach(),\n outputnorm_mean=float(self.outputnorm.mean),\n outputnorm_std=float(self.outputnorm.std),\n memory_size=len(self.memory),\n )\n\n\nAvenueAgent = partial(\n Agent,\n entropy_scale=0.05,\n lr=0.0002,\n memory_size=500000,\n batchsize=100,\n training_interval=4,\n start_training=10000,\n Model=partial(rtrl.sac_models.ConvModel)\n)\n\n\n# === tests ============================================================================================================\ndef test_agent():\n from rtrl import Training, run\n Sac_Test = partial(\n Training,\n epochs=3,\n rounds=5,\n steps=100,\n Agent=partial(Agent, memory_size=1000000, start_training=256, batchsize=4),\n Env=partial(id=\"Pendulum-v0\", real_time=0),\n )\n run(Sac_Test)\n\n\ndef test_agent_avenue():\n from rtrl import Training, run\n from rtrl.envs import AvenueEnv\n Sac_Avenue_Test = partial(\n Training,\n epochs=3,\n rounds=5,\n steps=300,\n Agent=partial(AvenueAgent, device='cpu', training_interval=4, start_training=400),\n Env=partial(AvenueEnv, real_time=0),\n Test=partial(number=0), # laptop can't handle more than that\n )\n run(Sac_Avenue_Test)\n\n\ndef test_agent_avenue_hd():\n from rtrl import Training, run\n from rtrl.envs import AvenueEnv\n Sac_Avenue_Test = partial(\n Training,\n epochs=3,\n rounds=5,\n steps=300,\n Agent=partial(AvenueAgent, device='cpu', training_interval=4, start_training=400, Model=partial(Conv=hd_conv)),\n Env=partial(AvenueEnv, real_time=0, width=368, height=368),\n Test=partial(number=0), # laptop can't handle more than that\n )\n run(Sac_Avenue_Test)\n\n\nif __name__ == \"__main__\":\n test_agent()\n # test_agent_avenue()\n # test_agent_avenue_hd()\n" ]
[ [ "torch.nn.functional.mse_loss", "numpy.float32", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HsuJeremy/Spotify-Curator
[ "40c499f472566409fcd52e066f45267425452807" ]
[ "server/spotify_model/spotify_predict.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport joblib\nimport pandas as pd\nfrom schema import Schema\nfrom ml_model_abc import MLModel\n\n\nclass SpotifyModel(MLModel):\n # # Need to find a way to validate ints too or convert ints to floats\n # input_schema = Schema([{\n # 'acousticness': float,\n # 'danceability': float,\n # 'energy': float,\n # 'instrumentalness': float,\n # 'liveness': float,\n # 'loudness': float,\n # 'speechiness': float,\n # 'tempo': float,\n # 'valence': float\n # }])\n\n # output_schema = Schema({\n # 'prediction': float,\n # 'kmeans_prediction': float\n # })\n\n def __init__(self):\n dirpath = os.path.dirname(os.path.realpath(__file__))\n # Assumes that models are already loaded\n self.clf = joblib.load(os.path.join(dirpath, 'model_files', 'spotify.joblib'))\n self.kmeans_clf = joblib.load(os.path.join(dirpath, 'model_files', 'spotify_kmeans.joblib'))\n\n def predict(self, data):\n # # Call super method to validate data against input_schema\n # super().predict(data=data)\n\n # Assume data is in the form of audio_features array in original file\n df_song = pd.DataFrame(data)\n prediction = self.clf.predict_proba(df_song)\n kmeans_prediction = self.kmeans_clf.predict(df_song)\n result = {'prediction': float(prediction[0][1]), 'kmeans_prediction': float(kmeans_prediction[0])}\n # self.output_schema.validate(result)\n return result\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ariel-berger/Mask_Identification
[ "f22bc8975d34781e5db71ccc0c80fdc4d9608696" ]
[ "train_separately.py" ]
[ "import time\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom utils import train_utils\nfrom torch.utils.data import DataLoader\nfrom utils.types import Scores, Metrics\nfrom dataset import MaskDataset\nfrom models.separate_model import MaskModel, BBModel\nfrom utils.train_utils import get_metrics, accuracy, calc_iou, print_img_bb, unscale_bb\nimport numpy as np\n\nfrom ray import tune\n\n\ndef train(config):\n\tbb_model = BBModel(dropout=config['dropout'], hidden_bb_dim=config['hidden_bb_dim'])\n\tmask_model = MaskModel(dropout=config['dropout'], hidden_label_dim=config['hidden_label_dim'])\n\n\tif torch.cuda.is_available():\n\t\tbb_model = bb_model.cuda()\n\t\tmask_model = mask_model.cuda()\n\n\tbest_accuracy = 0\n\tbest_iou = 0\n\n\t# Create optimizer\n\toptimizer = torch.optim.Adam(list(bb_model.parameters()) + list(mask_model.parameters()), lr=config['lr_value'])\n\n\t# Create learning rate scheduler\n\tscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['step_size'])\n\t# Load dataset\n\ttrain_dataset = MaskDataset(path=config['train_path'])\n\n\ttrain_size = int(0.8 * len(train_dataset))\n\tvalidation_size = len(train_dataset) - train_size\n\ttrain_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, validation_size])\n\n\ttrain_loader = DataLoader(train_dataset, config['batch_size'], shuffle=True,\n\t\t\t\t\t\t\t num_workers=6)\n\teval_loader = DataLoader(val_dataset, config['batch_size'], shuffle=False,\n\t\t\t\t\t\t\t num_workers=6)\n\n\tfor epoch in tqdm(range(25), maxinterval=100):\n\t\tt = time.time()\n\t\tmetrics = train_utils.get_zeroed_metrics_dict()\n\n\t\tfor i, (image, bounding_box, label, shape, path) in enumerate(train_loader):\n\t\t\tif torch.cuda.is_available():\n\t\t\t\timage = image.cuda()\n\t\t\t\tbounding_box = bounding_box.cuda()\n\t\t\t\tlabel = label.cuda()\n\t\t\t\tshape = shape.cuda()\n\n\t\t\tbb_hat = bb_model(image)\n\t\t\tlabel_hat = mask_model(image)\n\t\t\t# compute losses\n\t\t\tbce_loss = nn.functional.binary_cross_entropy(label_hat.squeeze(-1), label)\n\t\t\tbb_loss = nn.functional.smooth_l1_loss(bb_hat, bounding_box)\n\n\t\t\t# Optimization step\n\t\t\toptimizer.zero_grad()\n\t\t\tbce_loss.backward()\n\t\t\tbb_loss.backward()\n\t\t\toptimizer.step()\n\n\t\t\t# Calculate metrics\n\t\t\tmetrics['train_accuracy'] += accuracy(label_hat, label)\n\t\t\tmetrics['train_iou'] += calc_iou(bb_hat, bounding_box, shape)\n\n\t\t\tmetrics['train_bb_loss'] += bb_loss.item() * image.size(0)\n\t\t\tmetrics['train_bce_loss'] += bce_loss.item() * image.size(0)\n\n\t\t# Learning rate scheduler step\n\t\tscheduler.step()\n\n\t\tmetrics['train_bb_loss'] /= len(train_loader.dataset)\n\t\tmetrics['train_bce_loss'] /= len(train_loader.dataset)\n\t\tmetrics['train_accuracy'] /= len(train_loader.dataset)\n\t\tmetrics['train_iou'] /= len(train_loader.dataset)\n\t\tmetrics['train_accuracy'] *= 100\n\t\tmetrics['train_iou'] *= 100\n\t\ttrain_iou, train_accuracy = metrics['train_iou'], metrics['train_accuracy']\n\t\tbce_loss, bb_loss = metrics['train_bce_loss'], metrics['train_bb_loss']\n\n\t\t# run evaluation on validation set\n\t\tbb_model.train(False)\n\t\tmask_model.train(False)\n\t\tmetrics['eval_accuracy'], metrics['eval_iou'], metrics['bce_loss'], metrics['bb_loss'] = evaluate(bb_model,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t mask_model,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t eval_loader)\n\t\tbb_model.train(True)\n\t\tmask_model.train(True)\n\n\t\tscore = (metrics['eval_accuracy'] + metrics['eval_iou']) / 2\n\t\ttune.report(score=score, accuracy=metrics['eval_accuracy'], iou=metrics['eval_iou'],\n\t\t\t\t\tbce_loss=metrics['bce_loss'], bb_loss=metrics['bb_loss'], train_bce_loss=metrics['train_bce_loss'],\n\t\t\t\t\ttrain_bb_loss=metrics['train_bb_loss'], train_accuracy=train_accuracy, train_iou=train_iou)\n\n\t\tif metrics['eval_accuracy'] > best_accuracy:\n\t\t\tbest_accuracy = metrics['eval_accuracy']\n\t\t\ttorch.save(mask_model.state_dict(), \"./mask_model.pth\")\n\t\tif metrics['eval_iou'] > best_iou:\n\t\t\tbest_iou = metrics['eval_iou']\n\t\t\ttorch.save(bb_model.state_dict(), \"./bb_model.pth\")\n\n\treturn\n\n\[email protected]_grad()\ndef evaluate(bb_model: nn.Module, mask_model: nn.Module, dataloader: DataLoader) -> Scores:\n\t\"\"\"\n\tEvaluate a model without gradient calculation\n\t:param model: instance of a model\n\t:param dataloader: dataloader to evaluate the model on\n\t:return: tuple of (accuracy, loss) values\n\t\"\"\"\n\taccuracy_score = 0\n\tiou_score = 0\n\tloss = 0\n\n\tfor i, (image, bounding_box, label, shape, path) in tqdm(enumerate(dataloader)):\n\t\tif torch.cuda.is_available():\n\t\t\timage = image.cuda()\n\t\t\tbounding_box = bounding_box.cuda()\n\t\t\tlabel = label.cuda()\n\t\t\tshape = shape.cuda()\n\n\t\tbb_hat = bb_model(image)\n\t\tlabel_hat = mask_model(image)\n\t\tif np.random.random() > 0.95:\n\t\t\tprint_img_bb(bb_hat, bounding_box, shape, path, 'eval_sep_')\n\t\tbce_loss = nn.functional.binary_cross_entropy(label_hat.squeeze(-1), label)\n\t\tbb_loss = nn.functional.smooth_l1_loss(bb_hat, bounding_box)\n\t\taccuracy_score += accuracy(label_hat, label)\n\t\tiou_score += calc_iou(bb_hat, bounding_box, shape)\n\n\tbce_loss /= len(dataloader.dataset)\n\tbb_loss /= len(dataloader.dataset)\n\taccuracy_score /= len(dataloader.dataset)\n\tiou_score /= len(dataloader.dataset)\n\taccuracy_score *= 100\n\tiou_score *= 100\n\n\treturn accuracy_score, iou_score, bce_loss.item(), bb_loss.item()\n\n\[email protected]_grad()\ndef predict(bb_model: nn.Module, mask_model: nn.Module, dataloader: DataLoader) -> Scores:\n\t\"\"\"\n\tpredict a model without gradient calculation\n\t:param model: instance of a model\n\t:param dataloader: dataloader to evaluate the model on\n\t:return: predictions\n\t\"\"\"\n\taccuracy_score = 0\n\tiou_score = 0\n\tloss = 0\n\tbb_list = []\n\tpred_list = []\n\tfiles_list = []\n\tfor i, (image, bounding_box, label, shape, path) in tqdm(enumerate(dataloader)):\n\t\tif torch.cuda.is_available():\n\t\t\timage = image.cuda()\n\t\t\tbounding_box = bounding_box.cuda()\n\t\t\tlabel = label.cuda()\n\t\t\tshape = shape.cuda()\n\n\t\tbb_hat = bb_model(image)\n\t\tlabel_hat = mask_model(image)\n\n\t\t# from relative bb to receiving format bb\n\t\tbb_hat = unscale_bb(bb_hat, shape)\n\t\tbb_hat = [[bb_hat[0][i].item(), bb_hat[1][i].item(), bb_hat[2][i].item(), bb_hat[3][i].item()] for i in\n\t\t\t\t range(len(bb_hat[0]))]\n\t\tbb_list.append(bb_hat)\n\n\t\tpred_list.append([int(x) for x in (label_hat.squeeze() < 0.5)])\n\t\tfiles_list.append([x[x.rfind('/', 1) + 1:] for x in path])\n\n\t# flatten lists\n\tpred_list = [item for sublist in pred_list for item in sublist]\n\tfiles_list = [item for sublist in files_list for item in sublist]\n\tbb_list = [item for sublist in bb_list for item in sublist]\n\treturn pred_list, files_list, bb_list\n" ]
[ [ "numpy.random.random", "torch.utils.data.DataLoader", "torch.utils.data.random_split", "torch.no_grad", "torch.cuda.is_available", "torch.nn.functional.smooth_l1_loss", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
self-supervisor/Escaping-Stochastic-Traps-With-Aleatoric-Mapping-Agents
[ "b55d8d938fe4c313936c9a15e32b738b5d033809" ]
[ "noisy_mnist/unit_test.py" ]
[ "import numpy as np\nimport pytest\nimport torch\n\nfrom noisy_mnist_aleatoric_uncertainty_for_poster import *\n\n\[email protected](scope=\"module\", params=[\"train\", \"test\"])\ndef noisy_mnist_env(request):\n\n mnist_env = NoisyMnistEnv(request.param, 0, 2)\n return mnist_env\n\n\[email protected](scope=\"module\", params=[\"mse\", \"aleatoric\"])\ndef noisy_mnist_experiment(request):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n mnist_env_train = NoisyMnistEnv(\"train\", 0, 2)\n mnist_env_test_zeros = NoisyMnistEnv(\"test\", 0, 2)\n mnist_env_test_ones = NoisyMnistEnv(\"test\", 0, 2)\n\n if request.param == \"mse\":\n model = Net()\n experiment = NoisyMNISTExperimentRun(\n repeats=1,\n training_steps=3000,\n checkpoint_loss=100,\n lr=0.001,\n model=model,\n mnist_env_train=mnist_env_train,\n mnist_env_test_zeros=mnist_env_test_zeros,\n mnist_env_test_ones=mnist_env_test_ones,\n device=device,\n )\n elif request.param == \"aleatoric\":\n model = AleatoricNet()\n experiment = NoisyMNISTExperimentRunAMA(\n repeats=1,\n training_steps=3000,\n checkpoint_loss=100,\n lr=0.001,\n model=model,\n mnist_env_train=mnist_env_train,\n mnist_env_test_zeros=mnist_env_test_zeros,\n mnist_env_test_ones=mnist_env_test_ones,\n device=device,\n )\n return experiment\n\n\ndef check_count_of_classes(x_arr, y_arr):\n same = 0\n not_same = 0\n for i, _ in enumerate(x_arr):\n if np.array_equal(x_arr[i], y_arr[i]):\n same += 1\n else:\n not_same += 1\n return same, not_same\n\n\ndef test_mnist_env_step(noisy_mnist_env):\n import math\n\n x_arr, y_arr = noisy_mnist_env.step()\n assert x_arr.shape == y_arr.shape # make sure batch shapes make sense\n for i, _ in enumerate(x_arr): # check batch is completely filled\n assert np.array_equal(x_arr[i], np.zeros((1, 28 * 28))) == False\n assert np.array_equal(y_arr[i], np.zeros((1, 28 * 28))) == False\n assert np.array_equal(np.zeros((1, 28 * 28)), np.zeros((1, 28 * 28))) == True\n same = 0\n not_same = 0\n for _ in range(\n 1000\n ): # check roughly half are deterministic transitions, half aren't\n x_arr, y_arr = noisy_mnist_env.step()\n same_sample, not_same_sample = check_count_of_classes(x_arr, y_arr)\n same += same_sample\n not_same += not_same_sample\n print(\"same\", same)\n print(\"not same\", not_same)\n assert math.isclose(same, not_same, rel_tol=0.2)\n\n\ndef test_mnist_env_random_sample_of_number(noisy_mnist_env):\n \"\"\"\n This test is a qualitative visual test, look in test images\n and make sure the number title is the same as the number\n \"\"\"\n import matplotlib.pyplot as plt\n import os\n import shutil\n\n if os.path.isdir(\"unit_test_images\"):\n shutil.rmtree(\"unit_test_images\")\n os.mkdir(\"unit_test_images\")\n\n for number in range(0, 10):\n digit = noisy_mnist_env.get_random_sample_of_number(number)\n plt.imshow(np.array(digit).reshape(28, 28))\n plt.title(str(number))\n plt.savefig(\"unit_test_images/\" + str(number) + \".png\")\n\n\ndef test_run_experiment(noisy_mnist_experiment):\n noisy_mnist_experiment.run_experiment()\n\n\ndef test_get_batch(noisy_mnist_experiment):\n envs = [\n noisy_mnist_experiment.env_train,\n noisy_mnist_experiment.env_test_zeros,\n noisy_mnist_experiment.env_test_ones,\n ]\n for an_env in envs:\n data, target = noisy_mnist_experiment.get_batch(an_env)\n assertions_for_generated_data(data)\n assertions_for_generated_data(target)\n\n\ndef assertions_for_generated_data(input_tensor):\n assert input_tensor.type() == \"torch.cuda.FloatTensor\"\n assert input_tensor.max() <= 1.0\n assert input_tensor.min() >= 0.0\n assert torch.all(torch.eq(input_tensor, torch.zeros_like(input_tensor))) == False\n assert (\n torch.all(\n torch.eq(torch.zeros_like(input_tensor), torch.zeros_like(input_tensor))\n )\n == True\n )\n assert batch_is_different(input_tensor) == True\n assert batch_is_different(torch.zeros_like(input_tensor)) == False\n\n\ndef batch_is_different(input_tensor):\n duplicate_tensors = 0\n for i, data_point_i in enumerate(input_tensor):\n for j, data_point_j in enumerate(input_tensor):\n if i != j:\n if torch.all(torch.eq(data_point_i, data_point_j)):\n duplicate_tensors += 1\n if duplicate_tensors != (len(input_tensor) - 1) * len(input_tensor):\n return True\n return False\n\n\ndef test_train_step(noisy_mnist_experiment):\n import copy\n\n model_copy = copy.deepcopy(noisy_mnist_experiment.model)\n loss_buffer_copy = copy.deepcopy(noisy_mnist_experiment.loss_buffer)\n noisy_mnist_experiment.train_step(1)\n\n assert_model_gets_updated(model_copy, noisy_mnist_experiment.model)\n\n assert len(loss_buffer_copy) == 0\n assert len(noisy_mnist_experiment.loss_buffer) > len(loss_buffer_copy)\n noisy_mnist_experiment.train_step(noisy_mnist_experiment.checkpoint_loss - 1)\n assert len(noisy_mnist_experiment.loss_buffer) == 0\n\n\ndef assert_model_gets_updated(old_model, updated_model):\n params = get_params_from_model(updated_model)\n copy_params = get_params_from_model(old_model)\n\n for i, _ in enumerate(params):\n assert torch.all(torch.eq(copy_params[i], params[i])) == False\n assert torch.all(torch.eq(copy_params[i], copy_params[i])) == True\n\n\ndef get_params_from_model(a_model):\n params = []\n for name, param in a_model.named_parameters():\n params.append(param)\n return param\n\n\ndef test_eval_step(noisy_mnist_experiment):\n import copy\n\n loss_buffer_1_copy = copy.deepcopy(noisy_mnist_experiment.loss_buffer_1)\n assert len(loss_buffer_1_copy) == 0\n\n noisy_mnist_experiment.eval_step(\"ones\", 0)\n assert len(noisy_mnist_experiment.loss_buffer_1) > len(loss_buffer_1_copy)\n noisy_mnist_experiment.eval_step(\"ones\", noisy_mnist_experiment.checkpoint_loss - 1)\n assert len(noisy_mnist_experiment.loss_buffer_1) == 0\n\n loss_buffer_0_copy = copy.deepcopy(noisy_mnist_experiment.loss_buffer_0)\n assert len(loss_buffer_0_copy) == 0\n\n noisy_mnist_experiment.eval_step(\"zeros\", 0)\n assert len(noisy_mnist_experiment.loss_buffer_0) > len(loss_buffer_0_copy)\n noisy_mnist_experiment.eval_step(\n \"zeros\", noisy_mnist_experiment.checkpoint_loss - 1\n )\n assert len(noisy_mnist_experiment.loss_buffer_0) == 0\n" ]
[ [ "numpy.array_equal", "torch.eq", "torch.zeros_like", "torch.cuda.is_available", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emonhossainraihan/Self-Supervised-Learning-for-Sketch
[ "953c13c7d0a3534091b22a0e0504da91be211155" ]
[ "Networks.py" ]
[ "import torch.nn as nn\nimport torchvision.models as backbone_\nimport torch.nn.functional as F\nimport torch\nfrom torchvision.ops import MultiScaleRoIAlign\nfrom collections import OrderedDict\nimport torch\nimport torchvision\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_sequence\n\nclass Resnet_Network(nn.Module):\n def __init__(self, hp, num_class = 81):\n super(Resnet_Network, self).__init__()\n\n self.hp = hp\n backbone = backbone_.resnet50(pretrained=False) #resnet50, resnet18, resnet34\n\n self.features = nn.Sequential()\n for name, module in backbone.named_children():\n if name not in ['avgpool', 'fc']:\n self.features.add_module(name, module)\n\n self.pool_method = nn.AdaptiveMaxPool2d(1) # as default\n\n if hp.fullysupervised:\n if hp.dataset_name == 'TUBerlin':\n num_class = 250\n elif hp.dataset_name == 'QuickDraw':\n num_class = 345\n self.classifier = nn.Linear(2048, num_class)\n\n\n def forward(self, x):\n x = self.features(x)\n x = self.pool_method(x).view(-1, 2048)\n if self.hp.fullysupervised:\n x = self.classifier(x)\n return x\n\n\n def extract_features(self, x, every_layer=True):\n feature_list = {}\n batch_size = x.shape[0]\n # https://stackoverflow.com/questions/47260715/\n # how-to-get-the-value-of-a-feature-in-a-layer-that-match-a-the-state-dict-in-pyto\n for name, module in self.features._modules.items():\n x = module(x)\n if every_layer and name in ['layer1', 'layer2', 'layer3', 'layer4']:\n feature_list[name] = self.pool_method(x).view(batch_size, -1)\n\n if not feature_list:\n feature_list['pre_logits'] = self.pool_method(x).view(batch_size, -1)\n\n return feature_list\n\n\n\nclass UNet_Decoder(nn.Module):\n def __init__(self, out_channels=3):\n super(UNet_Decoder, self).__init__()\n # self.linear_1 = nn.Linear(512, 8*8*256)\n # self.dropout = nn.Dropout(0.5)\n self.deconv_1 = Unet_UpBlock(512, 512)\n self.deconv_2 = Unet_UpBlock(512, 512)\n self.deconv_3 = Unet_UpBlock(512, 512)\n self.deconv_4 = Unet_UpBlock(512, 256)\n self.deconv_5= Unet_UpBlock(256, 128)\n self.deconv_6 = Unet_UpBlock(128, 64)\n self.deconv_7 = Unet_UpBlock(64, 32)\n self.final_image = nn.Sequential(*[nn.ConvTranspose2d(32, out_channels,\n kernel_size=4, stride=2,\n padding=1), nn.Tanh()])\n\n def forward(self, x):\n # x = self.linear_1(x)\n x = x.view(-1, 512, 1, 1)\n # x = self.dropout(x)\n x = self.deconv_1(x) #2\n x = self.deconv_2(x) #4\n x = self.deconv_3(x) #8\n x = self.deconv_4(x) #16\n x = self.deconv_5(x) #32\n x = self.deconv_6(x) #64\n x = self.deconv_7(x) #128\n x = self.final_image(x) #256\n return x\n\n\nclass Unet_UpBlock(nn.Module):\n def __init__(self, inner_nc, outer_nc):\n super(Unet_UpBlock, self).__init__()\n layers = [\n nn.ConvTranspose2d(inner_nc, outer_nc, 4, 2, 1, bias=True),\n nn.InstanceNorm2d(outer_nc),\n nn.ReLU(inplace=True),\n ]\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass Residual_UpBlock(nn.Module):\n def __init__(self, c_in, c_out, stride, output_padding, norm = 'InstanceNorm2d', c_hidden=None):\n super(Residual_UpBlock, self).__init__()\n c_hidden = c_out if c_hidden is None else c_hidden\n\n if norm == 'BatchNorm2d':\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = nn.InstanceNorm2d\n\n self.conv1 = nn.Sequential(\n norm_layer(c_in, affine=True),\n nn.LeakyReLU(),\n nn.Conv2d(c_in, c_hidden, kernel_size=3, stride=1, padding=1))\n\n self.conv2 = nn.Sequential(\n norm_layer(c_hidden, affine=True),\n nn.LeakyReLU(),\n nn.ConvTranspose2d(c_hidden, c_out, kernel_size=3,\n stride=stride, padding=1, output_padding=output_padding))\n\n self.residual = nn.ConvTranspose2d(c_in, c_out, kernel_size=3,\n stride=stride, padding=1, output_padding=output_padding)\n\n\n def forward(self, x):\n residual = self.residual(x)\n conv1 = self.conv1(x)\n conv2 = self.conv2(conv1)\n return residual + conv2\n\n\nclass ResNet_Decoder(nn.Module):\n def __init__(self):\n super(ResNet_Decoder, self).__init__()\n self.upblock5 = Residual_UpBlock(512, 256, (2,1), (1,0))\n self.upblock4 = Residual_UpBlock(256, 128, (2,1), (1,0))\n self.upblock3 = Residual_UpBlock(128, 64, (2,1), (1,0))\n self.upblock2 = Residual_UpBlock(64, 32, (2,2), (1,1))\n self.upblock1 = Residual_UpBlock(32, 32, (2,2), (1,1))\n self.upblock0 = Residual_UpBlock(32, 1, (1,1), (0,0))\n\n def forward(self, x):\n upblock5 = self.upblock5(x)\n upblock4 = self.upblock4(upblock5)\n upblock3 = self.upblock3(upblock4)\n upblock2 = self.upblock2(upblock3)\n upblock1 = self.upblock1(upblock2)\n upblock0 = self.upblock0(upblock1)\n return torch.tanh(upblock0)\n\n\nclass Sketch_LSTM(nn.Module):\n def __init__(self, inp_dim=5, hidden_size=512, LSTM_num_layers=2, dropout=0.5):\n super(Sketch_LSTM, self).__init__()\n self.inp_dim, self.hidden_size, self.LSTM_num_layers, self.bidirectional = inp_dim, hidden_size, LSTM_num_layers, 2\n self.LSTM_encoder = nn.LSTM(inp_dim, hidden_size,\n num_layers=LSTM_num_layers,\n dropout=dropout,\n batch_first=True, bidirectional=True)\n\n def forward(self, x, seq_len):\n # batch['stroke_wise_split'][:,:,:2] /= 800\n x = pack_padded_sequence(x.to(device), seq_len.to(device), batch_first=True, enforce_sorted=False)\n _ , (x_hidden, _) = self.LSTM_encoder(x.float())\n x_hidden = x_hidden.view(self.LSTM_num_layers, self.bidirectional, seq_len.shape[0], self.hidden_size)[-1].permute(1,0,2).reshape(seq_len.shape[0], -1)\n\n return x_hidden" ]
[ [ "torch.nn.Sequential", "torch.nn.AdaptiveMaxPool2d", "torch.nn.ConvTranspose2d", "torch.nn.LSTM", "torch.nn.Conv2d", "torch.nn.Tanh", "torch.tanh", "torch.nn.Linear", "torch.nn.InstanceNorm2d", "torch.nn.LeakyReLU", "torch.cuda.is_available", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
photosynthesis-team/photosynthesis.metrics
[ "4dc9e42057798a4c9c00e121c647a97fe90ea493" ]
[ "piq/utils/common.py" ]
[ "import torch\nimport re\nimport warnings\n\nfrom typing import Tuple, List, Optional, Union, Dict, Any\n\nSEMVER_VERSION_PATTERN = re.compile(\n r\"\"\"\n ^\n (?P<major>0|[1-9]\\d*)\n \\.\n (?P<minor>0|[1-9]\\d*)\n \\.\n (?P<patch>0|[1-9]\\d*)\n (?:-(?P<prerelease>\n (?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)\n (?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*\n ))?\n (?:\\+(?P<build>\n [0-9a-zA-Z-]+\n (?:\\.[0-9a-zA-Z-]+)*\n ))?\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\nPEP_440_VERSION_PATTERN = r\"\"\"\n v?\n (?:\n (?:(?P<epoch>[0-9]+)!)? # epoch\n (?P<release>[0-9]+(?:\\.[0-9]+)*) # release segment\n (?P<pre> # pre-release\n [-_\\.]?\n (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))\n [-_\\.]?\n (?P<pre_n>[0-9]+)?\n )?\n (?P<post> # post release\n (?:-(?P<post_n1>[0-9]+))\n |\n (?:\n [-_\\.]?\n (?P<post_l>post|rev|r)\n [-_\\.]?\n (?P<post_n2>[0-9]+)?\n )\n )?\n (?P<dev> # dev release\n [-_\\.]?\n (?P<dev_l>dev)\n [-_\\.]?\n (?P<dev_n>[0-9]+)?\n )?\n )\n (?:\\+(?P<local>[a-z0-9]+(?:[-_\\.][a-z0-9]+)*))? # local version\n\"\"\"\n\n\ndef _validate_input(\n tensors: List[torch.Tensor],\n dim_range: Tuple[int, int] = (0, -1),\n data_range: Tuple[float, float] = (0., -1.),\n # size_dim_range: Tuple[float, float] = (0., -1.),\n size_range: Optional[Tuple[int, int]] = None,\n) -> None:\n r\"\"\"Check that input(-s) satisfies the requirements\n Args:\n tensors: Tensors to check\n dim_range: Allowed number of dimensions. (min, max)\n data_range: Allowed range of values in tensors. (min, max)\n size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1)\n \"\"\"\n\n if not __debug__:\n return\n\n x = tensors[0]\n\n for t in tensors:\n assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}'\n assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}'\n\n if size_range is None:\n assert t.size() == x.size(), f'Expected tensors with same size, got {t.size()} and {x.size()}'\n else:\n assert t.size()[size_range[0]: size_range[1]] == x.size()[size_range[0]: size_range[1]], \\\n f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}'\n\n if dim_range[0] == dim_range[1]:\n assert t.dim() == dim_range[0], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}'\n elif dim_range[0] < dim_range[1]:\n assert dim_range[0] <= t.dim() <= dim_range[1], \\\n f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}'\n\n if data_range[0] < data_range[1]:\n assert data_range[0] <= t.min(), \\\n f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}'\n assert t.max() <= data_range[1], \\\n f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}'\n\n\ndef _reduce(x: torch.Tensor, reduction: str = 'mean') -> torch.Tensor:\n r\"\"\"Reduce input in batch dimension if needed.\n\n Args:\n x: Tensor with shape (N, *).\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``\n \"\"\"\n if reduction == 'none':\n return x\n elif reduction == 'mean':\n return x.mean(dim=0)\n elif reduction == 'sum':\n return x.sum(dim=0)\n else:\n raise ValueError(\"Unknown reduction. Expected one of {'none', 'mean', 'sum'}\")\n\n\ndef _parse_version(version: Union[str, bytes]) -> Tuple[int, ...]:\n \"\"\" Parses valid Python versions according to Semver and PEP 440 specifications.\n For more on Semver check: https://semver.org/\n For more on PEP 440 check: https://www.python.org/dev/peps/pep-0440/.\n\n Implementation is inspired by:\n - https://github.com/python-semver\n - https://github.com/pypa/packaging\n\n Args:\n version: unparsed information about the library of interest.\n\n Returns:\n parsed information about the library of interest.\n \"\"\"\n if isinstance(version, bytes):\n version = version.decode(\"UTF-8\")\n elif not isinstance(version, str) and not isinstance(version, bytes):\n raise TypeError(f\"not expecting type {type(version)}\")\n\n # Semver processing\n match = SEMVER_VERSION_PATTERN.match(version)\n if match:\n matched_version_parts: Dict[str, Any] = match.groupdict()\n release = tuple([int(matched_version_parts[k]) for k in ['major', 'minor', 'patch']])\n return release\n\n # PEP 440 processing\n regex = re.compile(r\"^\\s*\" + PEP_440_VERSION_PATTERN + r\"\\s*$\", re.VERBOSE | re.IGNORECASE)\n match = regex.search(version)\n\n if match is None:\n warnings.warn(f\"{version} is not a valid SemVer or PEP 440 string\")\n return tuple()\n\n release = tuple(int(i) for i in match.group(\"release\").split(\".\"))\n return release\n" ]
[ [ "torch.is_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
logic-and-learning-lab/aaai22-dcc
[ "e2176545f222fd0fe4788297f5448f278c46e440" ]
[ "benchmark.py" ]
[ " #!/usr/bin/env python3\nimport os\nimport sys\nimport psutil\nimport pathlib\nimport subprocess\nimport numpy as np\nimport scipy.stats as stats\nfrom popper.split import runner, prog_to_code\nfrom popper.utils import Settings\nfrom pyswip import Prolog\nfrom multiprocessing.pool import Pool, ThreadPool\nfrom multiprocessing import Process\nimport multiprocessing\nimport gen_data\nimport time\nimport pathlib\nimport logging\nimport random\nimport math\nfrom datetime import datetime\n\nNUM_TRAIN_EXAMPLES = 10\nNUM_TEST_EXAMPLES = 1000\n\nNUM_CPUS = 1\nTIMEOUT = 300\nTRIALS = list(range(1,21))\nTASKS = []\nTASKS += ['trains1']\nTASKS += ['trains2']\nTASKS += ['trains3']\nTASKS += ['trains4']\nTASKS += ['iggp-minimal-decay']\nTASKS += ['iggp-buttons']\nTASKS += ['iggp-rps']\nTASKS += ['iggp-coins']\nTASKS += ['dropk']\nTASKS += ['droplast']\nTASKS += ['evens']\nTASKS += ['finddup']\nTASKS += ['last']\nTASKS += ['len']\nTASKS += ['sorted']\nTASKS += ['sumlist']\n\npath = pathlib.Path().resolve()\n\ndef partmap(func, jobs, num_cups = NUM_CPUS):\n if num_cups == 1:\n return list(map(func, jobs))\n with ThreadPool(num_cups) as p:\n return list(p.map(func, jobs))\n\ndef get_time():\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n return current_time\n\n\ndef parpmap(func, jobs, num_cups = NUM_CPUS):\n\n # p = ctx.Process(target=run_experiment, args=(problem, config, experiment, sema, results), name=f'{problem}::{config}')\n # p.start()\n\n # if num_cups == 1:\n # return list(map(func, jobs))\n with Pool(num_cups, maxtasksperchild=1) as p:\n return list(p.map(func, jobs))\n\n# # old and horrible code\ndef call_(cmd, action=None, timeout=None):\n cmd = cmd.split(' ')\n p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n if action != None:\n p.stdin.write(action.encode('utf-8'))\n try:\n output, _err = p.communicate(timeout = timeout)\n return output.decode()\n except subprocess.TimeoutExpired:\n pass\n finally:\n try:\n parent = psutil.Process(p.pid)\n for child in parent.children(recursive=True):\n child.kill()\n except psutil.NoSuchProcess:\n pass\n p.kill()\n\ndef call_prolog(files, action, timeout):\n files = ','.join((f\"'{x}'\" for x in files))\n cmd = f\"load_files([{files}],[silent(true)]). {action}.\"\n # print(cmd)\n return call_('swipl -q', cmd, timeout)\n\ndef train_metagol(settings):\n task = settings.task\n trial = settings.trial\n metarules = 'metarules'\n if 'iggp' in task:\n ex_file = f'data/{task}/data/train/exs.pl'\n prim_file = f'data/{task}/metagol.pl'\n bk_file = f'data/{task}/data/train/bk.pl'\n elif 'train' in task:\n ex_file = f'data/{task}/data/train/{trial}.pl'\n prim_file = f'data/prims-trains.pl'\n bk_file = f'data/bk-trains.pl'\n else:\n ex_file = f'data/{task}/data/train/{trial}.pl'\n prim_file = f'data/prims-lists.pl'\n bk_file = f'data/bk-lists.pl'\n metarules = 'metarules-rec'\n\n load_files = ['metagol', prim_file, metarules, ex_file, bk_file]\n t1 = time.time()\n prog = call_prolog(load_files, 'run', TIMEOUT)\n t2 = time.time()\n if prog != None:\n prog = [x for x in prog.split('\\n') if ':-' in x]\n save_prog(settings, prog, t2-t1)\n\ndef train_aleph(settings):\n task = settings.task\n trial = settings.trial\n if 'iggp' in task:\n train_file = f'{path}/data/{task}/aleph.pl'\n else:\n train_file = f'{path}/data/{task}/data/aleph/{trial}.pl'\n cmd = \"induce(P),writeln('<PROG>'),numbervars(P,0,_),foreach(member(C,P),(write(C),write('. '))),writeln('</PROG>'),halt\"\n load_files = [train_file]\n t1 = time.time()\n try:\n prog = call_prolog(load_files, cmd, TIMEOUT)\n except:\n prog = None\n t2 = time.time()\n if prog != None:\n xs = prog.split('<PROG>')\n if len(xs) > 1:\n prog = xs[1].split('</PROG>')[0]\n # print('PROG1',prog)\n prog = prog.replace('\\n', ' ')\n prog = [x.strip() for x in prog.split('.') if len(x.strip()) > 0]\n # print('PROG2',prog)\n else:\n prog = None\n save_prog(settings, prog, t2-t1)\n\ndef gen_aleph_input(pos, neg, bk_file, bias_file, out_file):\n with open(out_file, 'w') as f:\n # read general aleph settings\n with open(bias_file) as tmp:\n f.write(tmp.read() + '\\n')\n f.write(':-begin_bg.\\n')\n with open(bk_file) as tmp:\n f.write(tmp.read() + '\\n')\n f.write(':-end_bg.\\n')\n f.write(':-begin_in_pos.\\n')\n for x in pos:\n x = x[4:].replace('))',')')\n f.write(x + '\\n')\n f.write(':-end_in_pos.\\n')\n f.write(':-begin_in_neg.\\n')\n for x in neg:\n x = x[4:].replace('))',')')\n f.write(x + '\\n')\n f.write(':-end_in_neg.\\n')\n\ndef gen_list_data():\n probs = []\n probs += [('dropk', gen_data.DropK)]\n probs += [('droplast', gen_data.DropLast)]\n probs += [('evens', gen_data.Evens)]\n probs += [('finddup', gen_data.FindDupl)]\n probs += [('last', gen_data.Last)]\n probs += [('len', gen_data.Len)]\n probs += [('member', gen_data.Member)]\n probs += [('sorted', gen_data.Sorted)]\n probs += [('sumlist', gen_data.SumList)]\n\n for (task, _) in probs:\n with open(f'data/{task}/all-bias.pl', 'w') as f:\n with open(f'data/bias-list.pl') as tmp:\n for line in tmp:\n f.write(line)\n f.write('\\n')\n with open(f'data/{task}/bias.pl') as tmp:\n for line in tmp:\n f.write(line)\n\n for (task, x) in probs:\n pathlib.Path(f'{path}/data/{task}/data/train/').mkdir(parents=True, exist_ok=True)\n pathlib.Path(f'{path}/data/{task}/data/test/').mkdir(parents=True, exist_ok=True)\n pathlib.Path(f'{path}/data/{task}/data/programs').mkdir(parents=True, exist_ok=True)\n pathlib.Path(f'{path}/data/{task}/data/results').mkdir(parents=True, exist_ok=True)\n for trial in TRIALS:\n print(get_time(),f'GEN DATA: task:{task}\\t trial:{trial}')\n # TRAIN DATA\n train_ex_file = f'{path}/data/{task}/data/train/{trial}.pl'\n train_pos = [x.gen_pos() for i in range(NUM_TRAIN_EXAMPLES)]\n train_neg = [x.gen_neg() for i in range(NUM_TRAIN_EXAMPLES)]\n with open(train_ex_file, 'w') as f:\n for ex in train_pos:\n f.write(f'pos({ex}).\\n')\n for ex in train_neg:\n f.write(f'neg({ex}).\\n')\n\n # TEST DATA\n train_ex_file = f'{path}/data/{task}/data/test/{trial}.pl'\n test_pos = [x.gen_pos() for i in range(NUM_TEST_EXAMPLES)]\n test_neg = [x.gen_neg() for i in range(NUM_TEST_EXAMPLES)]\n with open(train_ex_file, 'w') as f:\n for ex in test_pos:\n f.write(f'pos({ex}).\\n')\n for ex in test_neg:\n f.write(f'neg({ex}).\\n')\n\n # WRITE ALEPH INPUT\n pathlib.Path(f'{path}/data/{task}/data/aleph/').mkdir(parents=True, exist_ok=True)\n train_ex_file = f'{path}/data/{task}/data/aleph/{trial}.pl'\n with open(train_ex_file, 'w') as f:\n # read general aleph settings\n with open('aleph-lists.pl') as tmp:\n f.write(tmp.read() + '\\n')\n # read task-specific aleph settings\n with open(f'{path}/data/{task}/aleph.pl') as tmp:\n f.write(tmp.read() + '\\n')\n f.write(':-begin_bg.\\n')\n with open('data/bk-lists.pl') as tmp:\n f.write(tmp.read() + '\\n')\n f.write(':-end_bg.\\n')\n f.write(':-begin_in_pos.\\n')\n for ex in train_pos:\n f.write(ex + '.\\n')\n f.write(':-end_in_pos.\\n')\n f.write(':-begin_in_neg.\\n')\n for ex in train_neg:\n f.write(ex + '.\\n')\n f.write(':-end_in_neg.\\n')\n\n\n\n\ndef partition(xs, rate=80):\n k = int((len(xs) / 100)*rate)\n return xs[:k], xs[k:]\n\ndef gen_train_data():\n probs = []\n probs += ['trains1']\n probs += ['trains2']\n probs += ['trains3']\n probs += ['trains4']\n for task in probs:\n pos = []\n neg = []\n with open(f'data/{task}/exs.pl') as f:\n for line in f:\n line = line.strip()\n if line.startswith('pos'):\n pos.append(line)\n elif line.startswith('neg'):\n neg.append(line)\n\n for trial in TRIALS:\n random.shuffle(pos)\n random.shuffle(neg)\n\n train_pos, test_pos = partition(pos)\n train_neg, test_neg = partition(neg)\n\n path = f'data/{task}/data/train/'\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n with open(f'{path}/{trial}.pl', 'w') as f:\n for x in train_pos + train_neg:\n f.write(x + '\\n')\n path = f'data/{task}/data/test/'\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n with open(f'{path}/{trial}.pl', 'w') as f:\n for x in test_pos + test_neg:\n f.write(x + '\\n')\n\n path = f'data/{task}/data/aleph/'\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n # ALEPH\n gen_aleph_input(pos, neg, 'data/bk-trains.pl', f'data/aleph-trains.pl', f'{path}/{trial}.pl',)\n\n\n\ndef get_prog_file(settings):\n path = f'data/{settings.task}/programs'\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n fname = f'{path}/{settings.name}-{settings.trial}.csv'\n return fname\n\ndef save_prog(settings, prog, duration):\n fname = get_prog_file(settings)\n with open(fname, 'w') as f:\n if prog != None:\n for rule in prog:\n if rule[-1] != '.':\n rule += '.'\n f.write(rule + '\\n')\n f.write(f'%time,{duration}\\n')\n\ndef save_res(settings, tp, fn, tn, fp):\n path = f'data/{settings.task}/results/'\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n fname = f'{path}/{settings.name}-{settings.trial}.csv'\n with open(fname, 'w') as f:\n f.write(f'{tp}, {fn}, {tn}, {fp}')\n\n# def test_(settings):\n# prolog = Prolog()\n# prolog.consult(settings.ex_file)\n# prolog.consult(settings.bk_file)\n# prolog.consult('test.pl')\n# prolog.consult(f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv')\n# res = list(prolog.query('do_test(TP,FN,TN,FP)'))[0]\n# print(settings.name, settings.trial, res)\n# save_res(settings, res['TP'], res['FN'], res['TN'], res['FP'])\n\ndef test_(settings):\n from multiprocessing import Process\n p = Process(target=test__, args=(settings,))\n p.start()\n p.join()\n\ndef test__(settings):\n prolog = Prolog()\n prolog.consult(settings.ex_file)\n prolog.consult(settings.bk_file)\n prolog.consult('test.pl')\n prolog.consult(f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv')\n res = list(prolog.query('do_test(TP,FN,TN,FP)'))[0]\n save_res(settings, res['TP'], res['FN'], res['TN'], res['FP'])\n\n\n# def test_(settings):\n# # prolog = Prolog()\n# load_files = [settings.ex_file, settings.bk_file, 'test.pl', f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv']\n# cmd = 'do_test(TP,FN,TN,FP),halt.'\n# print(settings.name, settings.trial, res)\n# # save_res(settings, res['TP'], res['FN'], res['TN'], res['FP'])\n\n\ndef train_(settings):\n t1 = time.time()\n prog = runner(settings)\n t2 = time.time()\n if prog != None:\n prog = prog_to_code(prog)\n save_prog(settings, prog, t2-t1)\n\ndef train_lists(settings):\n settings.ex_file = f'{path}/data/{settings.task}/data/train/{settings.trial}.pl'\n settings.bias_file = f'{path}/data/{settings.task}/all-bias.pl'\n settings.bk_file = f'{path}/data/bk-lists.pl'\n train_(settings)\n\ndef train_iggp(settings):\n settings.ex_file = f'{path}/data/{settings.task}/data/train/exs.pl'\n settings.bias_file = f'{path}/data/{settings.task}/bias.pl'\n settings.bk_file = f'{path}/data/{settings.task}/data/train/bk.pl'\n train_(settings)\n\ndef train_trains(settings):\n settings.ex_file = f'{path}/data/{settings.task}/data/train/{settings.trial}.pl'\n settings.bias_file = f'{path}/data/bias-trains.pl'\n settings.bk_file = f'{path}/data/bk-trains.pl'\n train_(settings)\n\ndef test_lists(settings):\n settings.ex_file = f'{path}/data/{settings.task}/data/test/{settings.trial}.pl'\n settings.bias_file = f'{path}/data/{settings.task}/all-bias.pl'\n settings.bk_file = f'{path}/data/bk-lists.pl'\n\ndef test_iggp(settings):\n settings.ex_file = f'{path}/data/{settings.task}/data/test/exs.pl'\n settings.bias_file = f'{path}/data/{settings.task}/bias.pl'\n settings.bk_file = f'{path}/data/{settings.task}/data/test/bk.pl'\n\ndef test_trains(settings):\n settings.ex_file = f'{path}/data/{settings.task}/data/test/{settings.trial}.pl'\n settings.bias_file = f'{path}/data/{settings.task}/bias.pl'\n settings.bk_file = f'{path}/data/bk-trains.pl'\n\ndef get_metagol_settings(task, trial):\n settings = Settings(cmd_line=False)\n settings.task = task\n settings.trial = trial\n settings.name = 'metagol'\n return settings\n\ndef get_aleph_settings(task, trial):\n settings = Settings(cmd_line=False)\n settings.task = task\n settings.trial = trial\n settings.name = 'aleph'\n return settings\n\ndef get_settings(trial, task, baseline=False, constraints=True, chunking=True, lazy=True, optimistic=False):\n settings = Settings(cmd_line=False)\n settings.eval_timeout = 0.001\n settings.timeout = TIMEOUT\n settings.trial = trial\n settings.task = task\n settings.baseline = baseline\n settings.constraints = constraints\n settings.chunking = chunking\n settings.lazy = lazy\n settings.optimistic = optimistic\n if baseline:\n settings.name = f'popper'\n elif optimistic:\n settings.name = f'optimistic'\n elif constraints == False:\n settings.name = f'dumb'\n elif chunking == False:\n settings.name = 'no-chunking'\n elif lazy == False:\n settings.name = 'no-eagerness'\n else:\n settings.name = f'dcc'\n return settings\n\ndef train_popper(settings):\n if 'iggp' in settings.task:\n train_iggp(settings)\n elif 'train' in settings.task:\n train_trains(settings)\n else:\n train_lists(settings)\n\ndef test_popper(settings):\n print(get_time(),f'TEST: task:{settings.task}\\t task:{settings.name}\\t trial:{settings.trial}')\n if 'iggp' in settings.task:\n test_iggp(settings)\n elif 'train' in settings.task:\n test_trains(settings)\n else:\n test_lists(settings)\n test_(settings)\n\ndef myround(x):\n if x < 1:\n x = round(x,1)\n if x == 0:\n return 0\n return x\n return int(x)\n\ndef get_predictions(settings, stuff):\n prolog = Prolog()\n prolog.consult(settings.ex_file)\n prolog.consult(settings.bk_file)\n prolog.consult('test.pl')\n prolog.consult(f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv')\n res = list(prolog.query('get_predictions(S0,S1)'))[0]\n stuff['stuff'] = res['S0']+res['S1']\n\ndef get_acc_array(task, settings, trial):\n with multiprocessing.Manager() as manager:\n l = manager.dict()\n l['stuff'] = None\n p = Process(target=get_predictions, args=(settings, l))\n p.start()\n p.join()\n # print(l['stuff'])\n return l['stuff']\n\ndef get_accs(task, settings):\n path = f'data/{task}/results/'\n accs = []\n for trial in TRIALS:\n fname = f'{path}/{settings.name}-{trial}.csv'\n with open(fname) as f:\n for line in f:\n xs = line.split(',')\n if len(xs) > 1:\n # print(task, settings.name, line)\n tp, fn, tn, fp = int(xs[0]), int(xs[1]), int(xs[2]), int(xs[3])\n # print(tp,fn,tn,fp)\n # accs.append(tp / (tp+fn))\n accs.append((tp + tn) / (tp+fn+tn+fp))\n return int(np.mean(accs)*100), int(stats.sem(accs)*100)\n\ndef get_time_(task, settings, trial):\n settings.task = task\n settings.trial = trial\n fname = get_prog_file(settings)\n with open(fname) as f:\n for line in f:\n if line.startswith('%time'):\n return float(line.split(',')[1])\n\ndef get_times(task, settings):\n settings.task = task\n times = []\n for trial in TRIALS:\n settings.trial = trial\n fname = get_prog_file(settings)\n with open(fname) as f:\n for line in f:\n if line.startswith('%time'):\n times.append(float(line.split(',')[1]))\n return myround(np.mean(times)), myround(stats.sem(times))\n\ndef print_rows(systems, func):\n for task in TASKS:\n x = '\\\\tw{' + task + '}'\n for system in systems:\n value,err = func(task, system)\n x += f' & {value} $\\pm$ {err}'\n x+= ' \\\\\\\\'\n print(x)\n\n# def tests():\n# # dcc\n# x = get_settings(1, 1)\n# # popper\n# # y = get_settings(1, 1, baseline=True)\n# # no constraints\n# # y = get_settings(1, 1, constraints=False)\n# # lazy\n# # y = get_settings(1, 1, lazy=False)\n# # compression\n# y = get_settings(1, 1, chunking=False)\n# accs = {system:[] for system in [x, y]}\n# times = {system:[] for system in [x, y]}\n# for system in [x, y]:\n# for task in TASKS:\n# for trial in TRIALS:\n# acc = get_acc_(task, system, trial)\n# time = get_time_(task, system, trial)\n# accs[system].append(acc)\n# times[system].append(time)\n# # xs = accs[x]\n# # ys = accs[y]\n\n# # McN = math.pow((b-c),2) / (b+c)\n# # print(f'accuracies p-value: {1-stats.chi2.cdf(McN,1):.3f}')\n\n# xs = times[x]\n# ys = times[y]\n# print(xs)\n# print(ys)\n# res = stats.ttest_rel(xs, ys)\n# print(f'times p-value: {res.pvalue:.5f}')\n\ndef tests():\n # dcc\n x = get_settings(1, 1)\n # popper\n y = get_settings(1, 1, baseline=True)\n # no constraints\n # y = get_settings(1, 1, constraints=False)\n # lazy\n # y = get_settings(1, 1, lazy=False)\n # compression\n # y = get_settings(1, 1, chunking=False)\n\n\n # TRIALS = [2]\n # T\n # ACCS\n predictions = {system:[] for system in [x, y]}\n for settings in [x, y]:\n for task in TASKS:\n settings.task = task\n for trial in TRIALS:\n settings.trial = trial\n if 'iggp' in task:\n test_iggp(settings)\n elif 'train' in task:\n test_trains(settings)\n else:\n test_lists(settings)\n predictions[settings].extend(get_acc_array(task, settings, trial))\n xs = predictions[x]\n ys = predictions[y]\n print('xs',xs)\n print('ys',ys)\n b = sum(1.0 for (x, y) in zip(xs, ys) if x == 1 and y == 0)\n c = sum(1.0 for (x, y) in zip(xs, ys) if x == 0 and y == 1)\n print(b, c)\n McN = math.pow((b-c),2) / (b+c)\n print(f'accuracies p-value: {1-stats.chi2.cdf(McN,1):.3f}')\n\n\n # times = {system:[] for system in [x, y]}\n # TIMES\n # for system in [x, y]:\n # for task in TASKS:\n # for trial in TRIALS:\n # time = get_time_(task, system, trial)\n # times[system].append(time)\n\n # xs = times[x]\n # ys = times[y]\n # print(xs)\n # print(ys)\n # res = stats.ttest_rel(xs, ys)\n # print(f'times p-value: {res.pvalue:.5f}')\n\ndef print_results1():\n systems = []\n # dcc\n systems.append(get_settings(1, 1))\n # popper\n systems.append(get_settings(1, 1, baseline=True))\n # aleph\n systems.append(get_aleph_settings(1, 1))\n # metagol\n systems.append(get_metagol_settings(1, 1))\n print_rows(systems, get_accs)\n print('TIMES'*10)\n print_rows(systems, get_times)\n\ndef print_results2():\n systems = []\n # dcc\n systems.append(get_settings(1, 1))\n # optimistic dcc\n systems.append(get_settings(1, 1, optimistic=True))\n # dcc without constraints\n systems.append(get_settings(1, 1, constraints=False))\n # dcc without lazy coverage\n systems.append(get_settings(1, 1, lazy=False))\n # dcc without chunking\n systems.append(get_settings(1, 1, chunking=False))\n\n print_rows(systems, get_accs)\n print('TIMES'*10)\n print_rows(systems, get_times)\n\ndef train_aux(job):\n print(get_time(), f'TRAIN {job.name}: task:{job.task}\\t trial:{job.trial}')\n if job.name == 'metagol':\n train_metagol(job)\n elif job.name == 'aleph':\n train_aleph(job)\n else:\n train_popper(job)\n\ndef do_it(mode):\n jobs = []\n\n for trial in TRIALS:\n for task in TASKS:\n # aleph\n jobs.append(get_aleph_settings(task, trial))\n # metagol\n jobs.append(get_metagol_settings(task, trial))\n # popper\n jobs.append(get_settings(trial, task, baseline=True))\n # dcc\n jobs.append(get_settings(trial, task))\n # optimistic dcc\n jobs.append(get_settings(trial, task, optimistic=True))\n # dcc without constraints\n jobs.append(get_settings(trial, task, constraints=False))\n # dcc without chunking\n jobs.append(get_settings(trial, task, chunking=False))\n # dcc without lazy coverage\n jobs.append(get_settings(trial, task, lazy=False))\n if mode == 'train':\n partmap(train_aux, jobs)\n elif mode == 'test':\n print('testing')\n partmap(test_popper, jobs)\n elif mode == 'results':\n print_results1()\n print('----------')\n print_results2()\n \nif __name__ == '__main__':\n # pass\n x = sys.argv[1]\n if x == 'gen':\n gen_train_data()\n gen_list_data()\n\n do_it(x)" ]
[ [ "scipy.stats.chi2.cdf", "scipy.stats.sem", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
oojo12/aavae
[ "dd92797d34c8f0cbd4d7d0bb846f0a9f32f5e189" ]
[ "src/models/projection.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass ProjectionHeadAE(nn.Module):\n def __init__(self, input_dim=2048, hidden_dim=2048, output_dim=128):\n super(ProjectionHeadAE, self).__init__()\n\n self.projection_head = nn.Sequential(\n nn.Linear(input_dim, hidden_dim, bias=True),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, output_dim, bias=False)\n )\n\n def forward(self, x):\n return self.projection_head(x)\n\n\nclass ProjectionHeadVAE(nn.Module):\n def __init__(self, input_dim=2048, hidden_dim=2048, output_dim=128):\n super(ProjectionHeadVAE, self).__init__()\n\n self.first_layer = nn.Sequential(\n nn.Linear(input_dim, hidden_dim, bias=True),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(),\n )\n\n self.mu = nn.Linear(hidden_dim, output_dim, bias=False)\n self.logvar = nn.Linear(hidden_dim, output_dim, bias=False)\n\n def forward(self, x):\n x = self.first_layer(x)\n return self.mu(x), self.logvar(x)\n" ]
[ [ "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.BatchNorm1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
borgwang/reinforce_py
[ "41f67327ae7e1bf87d4648e3ea5f406466c532c9" ]
[ "algorithms/A3C/atari/atari_env_deprecated.py" ]
[ "import os\n\nimport gym\nimport numpy as np\n\nfrom skimage.color import rgb2gray\nfrom skimage.transform import resize\n\n\nclass Atari(object):\n s_dim = [84, 84, 1]\n a_dim = 3\n\n def __init__(self, args, record_video=False):\n self.env = gym.make('BreakoutNoFrameskip-v4')\n self.ale = self.env.env.ale # ale interface\n if record_video:\n video_dir = os.path.join(args.save_path, 'videos')\n if not os.path.exists(video_dir):\n os.makedirs(video_dir)\n self.env = gym.wrappers.Monitor(\n self.env, video_dir, video_callable=lambda x: True, resume=True)\n self.ale = self.env.env.env.ale\n\n self.screen_size = Atari.s_dim[:2] # 84x84\n self.noop_max = 30\n self.frame_skip = 4\n self.frame_feq = 4\n self.s_dim = Atari.s_dim\n self.a_dim = Atari.a_dim\n\n self.action_space = [1, 2, 3] # Breakout specify\n self.done = True\n\n def new_round(self):\n if not self.done: # dead but not done\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n obs = self.preprocess(obs)\n else: # terminal\n self.env.reset()\n # No-op\n for _ in range(np.random.randint(1, self.noop_max + 1)):\n obs, _, done, _ = self.env.step(0)\n obs = self.preprocess(obs)\n return obs\n\n def preprocess(self, observ):\n return resize(rgb2gray(observ), self.screen_size)\n\n def step(self, action):\n observ, reward, dead = None, 0, False\n for _ in range(self.frame_skip):\n lives_before = self.ale.lives()\n o, r, self.done, _ = self.env.step(self.action_space[action])\n lives_after = self.ale.lives()\n reward += r\n if lives_before > lives_after:\n dead = True\n break\n observ = self.preprocess(o)\n observ = np.reshape(observ, newshape=self.screen_size + [1])\n self.state = np.append(self.state[:, :, 1:], observ, axis=2)\n\n return self.state, reward, dead, self.done\n" ]
[ [ "numpy.reshape", "numpy.append", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nicrie/xeofs
[ "4c0ed49b45794ce0abb641c98b82638b2faa4828" ]
[ "tests/models/test_rotator_wrapper.py" ]
[ "import numpy as np\nimport pandas as pd\nimport xarray as xr\nimport pytest\n\nfrom xeofs.models.eof import EOF\nfrom xeofs.pandas.eof import EOF as pdEOF\nfrom xeofs.xarray.eof import EOF as xrEOF\nfrom xeofs.models.rotator import Rotator\nfrom xeofs.pandas.rotator import Rotator as pdRotator\nfrom xeofs.xarray.rotator import Rotator as xrRotator\n\n\[email protected]('n_rot, power, scaling', [\n (2, 1, 0),\n (5, 1, 1),\n (7, 1, 2),\n (2, 2, 0),\n (5, 2, 1),\n (7, 2, 2),\n])\ndef test_wrapper_solutions(n_rot, power, scaling, sample_array):\n # Solutions of numpy, pandas and xarray wrapper are the same\n X = sample_array\n df = pd.DataFrame(X)\n da = xr.DataArray(X)\n # Perform analysis with all three wrappers\n numpy_model = EOF(X)\n numpy_model.solve()\n numpy_rot = Rotator(numpy_model, n_rot=n_rot, power=power)\n\n pandas_model = pdEOF(df)\n pandas_model.solve()\n pandas_rot = pdRotator(pandas_model, n_rot=n_rot, power=power)\n\n xarray_model = xrEOF(da, dim='dim_0')\n xarray_model.solve()\n xarray_rot = xrRotator(xarray_model, n_rot=n_rot, power=power)\n\n # Explained variance\n desired_expvar = numpy_rot.explained_variance()\n actual_pandas_expvar = pandas_rot.explained_variance().squeeze()\n actual_xarray_expvar = xarray_rot.explained_variance()\n # Explained variance ratio\n desired_expvar_ratio = numpy_rot.explained_variance_ratio()\n actual_pandas_expvar_ratio = pandas_rot.explained_variance_ratio().squeeze()\n actual_xarray_expvar_ratio = xarray_rot.explained_variance_ratio()\n # PCs\n desired_pcs = numpy_rot.pcs(scaling=scaling)\n actual_pandas_pcs = pandas_rot.pcs(scaling=scaling).values\n actual_xarray_pcs = xarray_rot.pcs(scaling=scaling).values\n # EOFs\n desired_eofs = numpy_rot.eofs(scaling=scaling)\n actual_pandas_eofs = pandas_rot.eofs(scaling=scaling).values\n actual_xarray_eofs = xarray_rot.eofs(scaling=scaling).values\n # EOFs as correlation\n desired_eofs_corr = numpy_rot.eofs_as_correlation()\n actual_pandas_eofs_corr = pandas_rot.eofs_as_correlation()\n actual_xarray_eofs_corr = xarray_rot.eofs_as_correlation()\n # Reconstructed X\n desired_Xrec = numpy_rot.reconstruct_X()\n actual_pandas_Xrec = pandas_rot.reconstruct_X()\n actual_xarray_Xrec = xarray_rot.reconstruct_X()\n # Projection onto EOFs\n desired_proj = numpy_rot.project_onto_eofs(X, scaling=scaling)\n actual_pandas_proj = pandas_rot.project_onto_eofs(df, scaling=scaling)\n actual_xarray_proj = xarray_rot.project_onto_eofs(da, scaling=scaling)\n\n np.testing.assert_allclose(actual_pandas_expvar, desired_expvar)\n np.testing.assert_allclose(actual_pandas_expvar_ratio, desired_expvar_ratio)\n np.testing.assert_allclose(actual_pandas_pcs, desired_pcs)\n np.testing.assert_allclose(actual_pandas_eofs, desired_eofs)\n np.testing.assert_allclose(actual_pandas_eofs_corr[0], desired_eofs_corr[0])\n np.testing.assert_allclose(actual_pandas_eofs_corr[1], desired_eofs_corr[1])\n np.testing.assert_allclose(actual_pandas_Xrec, desired_Xrec)\n np.testing.assert_allclose(actual_pandas_proj, desired_proj)\n\n np.testing.assert_allclose(actual_xarray_expvar, desired_expvar)\n np.testing.assert_allclose(actual_xarray_expvar_ratio, desired_expvar_ratio)\n np.testing.assert_allclose(actual_xarray_pcs, desired_pcs)\n np.testing.assert_allclose(actual_xarray_eofs, desired_eofs)\n np.testing.assert_allclose(actual_xarray_eofs_corr[0], desired_eofs_corr[0])\n np.testing.assert_allclose(actual_xarray_eofs_corr[1], desired_eofs_corr[1])\n np.testing.assert_allclose(actual_xarray_Xrec, desired_Xrec)\n np.testing.assert_allclose(actual_xarray_proj, desired_proj)\n\n\[email protected]('n_rot, power, scaling', [\n (2, 1, 0),\n (5, 1, 1),\n (7, 1, 2),\n (2, 2, 0),\n (5, 2, 1),\n (7, 2, 2),\n])\ndef test_wrapper_multivariate_solutions(n_rot, power, scaling, sample_array):\n # Solutions of numpy, pandas and xarray wrapper are the same\n X = sample_array\n X1 = X[:, :10]\n X2 = X[:, 10:]\n df1 = pd.DataFrame(X1)\n df2 = pd.DataFrame(X2)\n da1 = xr.DataArray(X1)\n da2 = xr.DataArray(X2)\n # Perform analysis with all three wrappers\n numpy_model = EOF([X1, X2])\n numpy_model.solve()\n numpy_rot = Rotator(numpy_model, n_rot=n_rot, power=power)\n\n pandas_model = pdEOF([df1, df2])\n pandas_model.solve()\n pandas_rot = pdRotator(pandas_model, n_rot=n_rot, power=power)\n\n xarray_model = xrEOF([da1, da2], dim='dim_0')\n xarray_model.solve()\n xarray_rot = xrRotator(xarray_model, n_rot=n_rot, power=power)\n\n # Explained variance\n desired_expvar = numpy_rot.explained_variance()\n actual_pandas_expvar = pandas_rot.explained_variance().squeeze()\n actual_xarray_expvar = xarray_rot.explained_variance()\n # Explained variance ratio\n desired_expvar_ratio = numpy_rot.explained_variance_ratio()\n actual_pandas_expvar_ratio = pandas_rot.explained_variance_ratio().squeeze()\n actual_xarray_expvar_ratio = xarray_rot.explained_variance_ratio()\n # PCs\n desired_pcs = numpy_rot.pcs(scaling=scaling)\n actual_pandas_pcs = pandas_rot.pcs(scaling=scaling)\n actual_xarray_pcs = xarray_rot.pcs(scaling=scaling)\n # EOFs\n desired_eofs = numpy_rot.eofs(scaling=scaling)\n actual_pandas_eofs = pandas_rot.eofs(scaling=scaling)\n actual_xarray_eofs = xarray_rot.eofs(scaling=scaling)\n # EOFs as correlation\n desired_eofs_corr = numpy_rot.eofs_as_correlation()\n actual_pandas_eofs_corr = pandas_rot.eofs_as_correlation()\n actual_xarray_eofs_corr = xarray_rot.eofs_as_correlation()\n # Reconstructed X\n desired_Xrec = numpy_rot.reconstruct_X()\n actual_pandas_Xrec = pandas_rot.reconstruct_X()\n actual_xarray_Xrec = xarray_rot.reconstruct_X()\n # Projection onto EOFs\n desired_proj = numpy_rot.project_onto_eofs([X1, X2], scaling=scaling)\n actual_pandas_proj = pandas_rot.project_onto_eofs([df1, df2], scaling=scaling)\n actual_xarray_proj = xarray_rot.project_onto_eofs([da1, da2], scaling=scaling)\n\n np.testing.assert_allclose(actual_pandas_expvar, desired_expvar)\n np.testing.assert_allclose(actual_pandas_expvar_ratio, desired_expvar_ratio)\n np.testing.assert_allclose(actual_pandas_pcs, desired_pcs)\n np.testing.assert_allclose(actual_pandas_eofs[0], desired_eofs[0])\n np.testing.assert_allclose(actual_pandas_eofs[1], desired_eofs[1])\n np.testing.assert_allclose(actual_pandas_eofs_corr[0][0], desired_eofs_corr[0][0])\n np.testing.assert_allclose(actual_pandas_eofs_corr[0][1], desired_eofs_corr[0][1])\n np.testing.assert_allclose(actual_pandas_eofs_corr[1][0], desired_eofs_corr[1][0])\n np.testing.assert_allclose(actual_pandas_eofs_corr[1][1], desired_eofs_corr[1][1])\n np.testing.assert_allclose(actual_pandas_Xrec[0], desired_Xrec[0])\n np.testing.assert_allclose(actual_pandas_Xrec[1], desired_Xrec[1])\n np.testing.assert_allclose(actual_pandas_proj, desired_proj)\n\n np.testing.assert_allclose(actual_xarray_expvar, desired_expvar)\n np.testing.assert_allclose(actual_xarray_expvar_ratio, desired_expvar_ratio)\n np.testing.assert_allclose(actual_xarray_pcs, desired_pcs)\n np.testing.assert_allclose(actual_xarray_eofs[0], desired_eofs[0])\n np.testing.assert_allclose(actual_xarray_eofs[1], desired_eofs[1])\n np.testing.assert_allclose(actual_xarray_eofs_corr[0][0], desired_eofs_corr[0][0])\n np.testing.assert_allclose(actual_xarray_eofs_corr[0][1], desired_eofs_corr[0][1])\n np.testing.assert_allclose(actual_xarray_eofs_corr[1][0], desired_eofs_corr[1][0])\n np.testing.assert_allclose(actual_xarray_eofs_corr[1][1], desired_eofs_corr[1][1])\n np.testing.assert_allclose(actual_xarray_Xrec[0], desired_Xrec[0])\n np.testing.assert_allclose(actual_xarray_Xrec[1], desired_Xrec[1])\n np.testing.assert_allclose(actual_xarray_proj, desired_proj)\n" ]
[ [ "pandas.DataFrame", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
safecloud-project/recast
[ "e9138580594569cbbc7d325e8cd4b1740667edac" ]
[ "pyproxy/pyproxy/metadata.py" ]
[ "\"\"\"\nMetadata management for the files and blocks stored in playcloud\n\"\"\"\nimport datetime\nimport logging\nimport json\nimport random\nimport socket\nimport time\n\nimport enum\nimport IPy\nimport numpy\nimport redis\n\nLOGGER = logging.getLogger(\"metadata\")\n\ndef compute_block_key(path, index, length=2):\n \"\"\"\n Computes a block key from a file path and an index.\n Args:\n path(str): Path to the file related to the blocloggerk\n index(int): index of the block\n length(int, optional): Length of the index part of the key for zero filling (Defaults to 2)\n Returns:\n str: Block key\n \"\"\"\n return path + \"-\" + str(index).zfill(length)\n\ndef uniform_random_selection(t, n):\n \"\"\"\n Args:\n t(int): Number of pointers\n n(int): Number of blocks avaialable\n Returns:\n list(int): A list of indices that can be used for random selection of t\n elements in a list of n elements\n \"\"\"\n if t >= n:\n return [index for index in xrange(n)]\n difference = n - t\n if difference < t:\n selected = [index for index in xrange(n)]\n while len(selected) > t:\n selected.pop(random.randint(0, len(selected) - 1))\n return selected\n selected = []\n while len(selected) < t:\n index = random.randint(0, n - 1)\n if index not in selected:\n selected.append(index)\n return selected\n\ndef normal_selection(t, n, std=1000):\n \"\"\"\n Select pointer indices using normal distribution\n Args:\n t(int): Number of pointers\n n(int): Number of blocks\n std(int, optional): Standard deviation (defaults to 1000)\n Returns:\n list(int): A list of unique indices ranging from 0 to (n - 1) selected\n using normal distribution\n \"\"\"\n selected = []\n if t >= n:\n return [element for element in xrange(n)]\n std = min(n, std)\n difference = n - t\n if difference < t:\n selected = [index for index in xrange(n)]\n while len(selected) > t:\n index = int(round(numpy.random.normal(len(selected), min(std, len(selected)))))\n if index < 0 or index >= len(selected): # Checking that we are withtin bounds\n continue\n selected.pop(index)\n return selected\n while len(selected) < t:\n index = int(round(numpy.random.normal(n, std)))\n if index < 0 or index >= n: # Checking that we are withtin bounds\n continue\n if index in selected:\n continue\n selected.append(index)\n return selected\n\n\nclass BlockType(enum.Enum):\n \"\"\"\n Informs on the type of block and its use where DATA blocks are needed for\n decoding while PARITY blocks are needed for reconstruction\n \"\"\"\n DATA = 0\n PARITY = 1\n\nclass MetaBlock(object):\n \"\"\"\n A class that represents a data block\n \"\"\"\n def __init__(self, key, providers=None, creation_date=None,\n block_type=BlockType.DATA, checksum=None, entangled_with=None,\n size=0):\n \"\"\"\n MetaBlock constructor\n Args:\n key (str): Key under which the block is stored\n providers (list(str), optional): Ids of the providers\n creation_date (datetime.datetime, optional): Time of creation of the\n block, defaults to\n current time\n block_type (BlockType, optional): Type of the block\n checksum (bytes, optional): SHA256 digest of the data\n entangled_with(list(str), optional): List of documents the block is\n entangled with\n \"\"\"\n self.key = key\n if providers:\n self.providers = providers\n else:\n self.providers = []\n if creation_date is None:\n self.creation_date = datetime.datetime.now()\n else:\n self.creation_date = creation_date\n self.block_type = block_type\n self.checksum = checksum\n if entangled_with:\n self.entangled_with = entangled_with\n else:\n self.entangled_with = []\n self.size = size\n\n def __json__(self):\n \"\"\"\n Returns a representation of a MetaBlock as a serializable dictionary\n Returns:\n dict: Returns a representation of a MetaBlock as a serializable dictionary\n \"\"\"\n return {\n \"key\": self.key,\n \"providers\": [provider for provider in self.providers],\n \"creation_date\": self.creation_date.isoformat(),\n \"block_type\": self.block_type.name,\n \"checksum\": convert_binary_to_hex_digest(self.checksum),\n \"entangled_with\": self.entangled_with\n }\n\n def __str__(self):\n \"\"\"\n Returns a string representation of a Metadata object\n Return:\n str: a string representation of a Metadata object\n \"\"\"\n return json.dumps(self.__json__())\n\ndef convert_binary_to_hex_digest(binary_digest):\n \"\"\"\n Converts a binary digest from hashlib.sha256.digest\n Args:\n binary_digest(str): Binary digest from hashlib.sha256.digest()\n Returns:\n str: Equivalent of the hexdigest for the same input\n \"\"\"\n return \"\".join([\"{:02x}\".format(ord(c)) for c in binary_digest])\n\nclass MetaDocument(object):\n \"\"\"\n A class describing how a file has been stored in the system\n \"\"\"\n\n def __init__(self, path, original_size=0):\n \"\"\"\n Constructor for Metadata objects\n Args:\n path(string): Path to the file in the system\n original_size(int): Original size of the file in bytes\n \"\"\"\n self.path = path\n self.creation_date = datetime.datetime.now()\n self.blocks = []\n self.entangling_blocks = []\n self.original_size = original_size\n\n def __json__(self):\n \"\"\"\n Returns a representation of a MetaBlock as a serializable dictionary\n Returns:\n dict: Returns a representation of a MetaBlock as a serializable dictionary\n \"\"\"\n return {\n \"path\": self.path,\n \"creation_date\": self.creation_date.isoformat(),\n \"blocks\": [block.__json__() for block in self.blocks],\n \"entangling_blocks\": self.entangling_blocks,\n \"original_size\": self.original_size\n }\n\n def __str__(self):\n \"\"\"\n \"\"\"\n return json.dumps(self.__json__())\n\ndef extract_entanglement_data(block_data):\n \"\"\"\n Extract and list the entangling information from the blocks header\n Args:\n block_data(str): A data block with an entanglement header\n Returns:\n list((str, int)): A list of the blocks used for entanglement\n \"\"\"\n header_delimiter = chr(29)\n pos = block_data.find(header_delimiter)\n if pos <= 0:\n return \"\"\n raw_header = block_data[:pos]\n formatted_header = json.loads(raw_header)\n return formatted_header\n\ndef extract_document_size(block_data):\n \"\"\"\n Args:\n block_data(str): A data block with an entanglement header\n Returns:\n int: The size of the original document\n \"\"\"\n if not block_data or not isinstance(block_data, str):\n raise ValueError(\"argument block_data must be a non-empty sequence of bytes\")\n header_delimiter = chr(29)\n start = block_data.find(header_delimiter) + 1\n end = block_data.find(header_delimiter, start)\n return int(block_data[start:end])\n\ndef split_blocks_field(path, joined_keys):\n pos = 0\n extracted_keys = []\n while pos != -1 and pos < len(joined_keys):\n start = joined_keys.find(path, pos)\n if start == -1:\n break\n end = joined_keys.find(\",\", start + len(path))\n if end == -1:\n extracted_keys.append(joined_keys[start:])\n else:\n extracted_keys.append(joined_keys[start:end])\n pos = end\n return extracted_keys\n\nclass Files(object):\n \"\"\"\n Represents metadata stored in the cluster\n \"\"\"\n FILE_PREFIX = \"files:\"\n BLOCK_PREFIX = \"blocks:\"\n READ_BUFFER_SIZE = 100\n CONNECTION_POOLS = {}\n\n @staticmethod\n def get_pool(host, port):\n \"\"\"\n Gets an existing connection pool to a given server or creates a new one\n Args:\n host(str): Host of the redis server\n port(int): Port number the resdis server is listening on\n Returns:\n BlockingConnectionPool: A blocking redis connection pool\n \"\"\"\n if not isinstance(host, (str, unicode)) or not host:\n raise ValueError(\"host argument must be a non empty string\")\n if not isinstance(port, int) or port <= 0 or port > 65535:\n raise ValueError(\"port argument must be an integer between 0 and 65535\")\n if not host in Files.CONNECTION_POOLS:\n Files.CONNECTION_POOLS[host] = {}\n if not port in Files.CONNECTION_POOLS[host]:\n pool = redis.ConnectionPool(host=host,\n port=port,\n db=0,\n max_connections=128,\n socket_keepalive=True)\n Files.CONNECTION_POOLS[host][port] = pool\n return Files.CONNECTION_POOLS[host][port]\n\n def __init__(self, host=\"metadata\", port=6379, pointer_selector=normal_selection):\n try:\n ip_address = str(IPy.IP(host))\n except ValueError:\n ip_address = socket.gethostbyname(host)\n pool = Files.get_pool(ip_address, port)\n self.redis = redis.StrictRedis(connection_pool=pool,\n encoding=None,\n socket_keepalive=True)\n self.select_pointers = pointer_selector\n\n def exists(self, path):\n \"\"\"\n Checks if a file is in the metadata\n Args:\n path(str): Path to the file\n Returns:\n bool: True if the file exists in the documents\n \"\"\"\n if not path or not isinstance(path, (str, unicode)):\n raise ValueError(\"path argument must be a non empty string\")\n return self.redis.exists(\"{:s}{:s}\".format(Files.FILE_PREFIX, path))\n\n def get(self, path):\n \"\"\"\n Returns a Metadata object stored under a given path.\n Args:\n path(str): The key the Metadata object was stored under\n Returns:\n MetaDocument: The Metadata object stored under the key\n Raises:\n ValueError: If path is an empty string\n \"\"\"\n if not path or not isinstance(path, (str, unicode)):\n raise ValueError(\"path argument must be a valid non-empty string\")\n return self.get_files([path])[0]\n\n def get_files(self, paths):\n \"\"\"\n Returns a Metadata object stored under a given path.\n Args:\n paths(list(str)): The key the Metadata object was stored under\n Returns:\n MetaDocument: The Metadata object stored under the key\n Raises:\n ValueError: If the paths argument is an empty list or if one of the\n paths is an empty string\n KeyError: If one of the paths requested does not exist\n \"\"\"\n if not paths:\n raise ValueError(\"path argument must be a valid list of string\")\n\n pipeline = self.redis.pipeline()\n translated_paths = []\n # Pipeline command to check that all paths exist\n for path in paths:\n if not path:\n raise ValueError(\"path in paths list must be a valid non-empty string\")\n file_key = \"{:s}{:s}\".format(Files.FILE_PREFIX, path)\n translated_paths.append(file_key)\n pipeline.exists(file_key)\n # Check result from pipelined exists requests to make sure that all the\n # paths exist and pipeline command to get file hashes\n for index, in_database in enumerate(pipeline.execute()):\n if not in_database:\n raise KeyError(\"path {:s} not found\".format(paths[index]))\n pipeline.hgetall(translated_paths[index])\n hashes = pipeline.execute()\n metadata = []\n keys = []\n for hsh in hashes:\n mtdt = Files.parse_metadata(hsh)\n path = hsh.get(\"path\")\n keys += split_blocks_field(path, hsh.get(\"blocks\"))\n metadata.append(mtdt)\n blocks = self.get_blocks(keys)\n step = len(blocks) / len(metadata)\n for index in xrange(0, len(metadata)):\n metadata[index].blocks = blocks[index * step:(index * step) + step]\n return sorted(metadata, key=lambda mtdt: mtdt.path)\n\n def get_block(self, key):\n \"\"\"\n Returns a block from the database\n Returns:\n key(str): The key under which a block is stored\n Returns:\n MetaBlock: The MetaBlock that was retrieved\n \"\"\"\n return self.get_blocks([key])[0]\n\n def get_blocks(self, keys):\n \"\"\"\n Returns multiple blocks from the database\n Args:\n keys(list(str)): A list of keys under witch the blocks to fetch are stored\n Returns:\n list(MetaBlock): The MetaBlocks that were retrieved\n Raises:\n KeyError: If one of the keys does not exist\n \"\"\"\n pipeline = self.redis.pipeline()\n translated_keys = [\"{:s}{:s}\".format(Files.BLOCK_PREFIX, key) for key in keys]\n for key in translated_keys:\n pipeline.exists(key)\n for index, is_in_database in enumerate(pipeline.execute()):\n if not is_in_database:\n raise KeyError(\"key {:s} not found ({:d} = {:s})\".format(keys[index], index, translated_keys[index]))\n pipeline.hgetall(translated_keys[index])\n blocks = [Files.parse_metablock(hsh) for hsh in pipeline.execute()]\n return sorted(blocks, key=lambda block: block.key)\n\n def put(self, path, metadata):\n \"\"\"\n Stores a Metadata object using the given path as the key\n Args:\n metadata(MetaDocument): The object to store\n Returns:\n str: The key under which the object was stored\n \"\"\"\n start = time.clock()\n if not path:\n raise ValueError(\"path argument must be a valid non-empty string\")\n if not metadata:\n raise ValueError(\"metadata argument must be a valid Metadata object\")\n entangling_block_keys = [compute_block_key(eb[0], eb[1]) for eb in metadata.entangling_blocks]\n entangling_blocks = self.get_blocks(entangling_block_keys)\n\n pipeline = self.redis.pipeline(transaction=True)\n for block in entangling_blocks:\n block.entangled_with.append(path)\n pipeline.hset(\"{:s}{:s}\".format(Files.BLOCK_PREFIX, block.key),\n \"entangled_with\",\n \",\".join(sorted(block.entangled_with)))\n meta_hash = {\n \"path\": metadata.path,\n \"creation_date\": str(metadata.creation_date),\n \"original_size\": metadata.original_size,\n \"blocks\": \",\".join([block.key for block in metadata.blocks]),\n \"entangling_blocks\": json.dumps(metadata.entangling_blocks)\n }\n block_keys = []\n for block in metadata.blocks:\n block_hash = {\n \"key\": block.key,\n \"creation_date\": str(block.creation_date),\n \"providers\": \",\".join(sorted(block.providers)),\n \"block_type\": block.block_type.name,\n \"checksum\": block.checksum,\n \"entangled_with\": \",\".join(sorted(block.entangled_with)),\n \"size\": block.size\n }\n metablock_key = \"{:s}{:s}\".format(Files.BLOCK_PREFIX, block.key)\n timestamp = (block.creation_date - datetime.datetime(1970, 1, 1)).total_seconds()\n block_keys.append(timestamp)\n block_keys.append(block.key)\n pipeline.hmset(metablock_key, block_hash)\n pipeline.zadd(\"block_index\", *block_keys)\n pipeline.hmset(\"files:{:s}\".format(path), meta_hash)\n timestamp = (metadata.creation_date - datetime.datetime(1970, 1, 1)).total_seconds()\n pipeline.zadd(\"file_index\", timestamp, path)\n pipeline.execute()\n end = time.clock()\n elapsed = end - start\n LOGGER.debug(\"Storing metadata for {:s} took {:f} seconds\".format(path, elapsed))\n return path\n\n @staticmethod\n def parse_metablock(record):\n \"\"\"\n Parses a metablock from an object\n Args:\n record(dict): A dictionary describing the metablock\n Returns:\n MetaBlock: The parsed MetaBlock\n \"\"\"\n key = record.get(\"key\")\n try:\n creation_date = datetime.datetime.strptime(record.get(\"creation_date\"),\"%Y-%m-%d %H:%M:%S.%f\")\n except:\n creation_date = datetime.datetime.strptime(record.get(\"creation_date\"),\"%Y-%m-%d %H:%M:%S\")\n providers = record.get(\"providers\").strip()\n if providers:\n providers = providers.split(\",\")\n else:\n providers = []\n\n entangled_with = record.get(\"entangled_with\", \"\").strip()\n if entangled_with:\n entangled_with = entangled_with.split(\",\")\n else:\n entangled_with = []\n\n block_type = BlockType[record.get(\"block_type\")]\n checksum = record.get(\"checksum\")\n size = int(record.get(\"size\", \"0\"))\n metablock = MetaBlock(key,\n creation_date=creation_date,\n providers=providers,\n block_type=block_type,\n checksum=checksum,\n entangled_with=entangled_with,\n size=size)\n return metablock\n\n @staticmethod\n def parse_metadata(record):\n \"\"\"\n Parses metadata information from a record.\n Args:\n record(dict): A dictionary describing the metadata\n Returns:\n MetaDocument: The parsed Metadata\n \"\"\"\n path = record.get(\"path\")\n original_size = int(record.get(\"original_size\"))\n creation_date = datetime.datetime.strptime(record.get(\"creation_date\"),\n \"%Y-%m-%d %H:%M:%S.%f\")\n metadata = MetaDocument(path, original_size=original_size)\n metadata.creation_date = creation_date\n metadata.entangling_blocks = json.loads(record.get(\"entangling_blocks\"))\n return metadata\n\n def keys(self):\n \"\"\"\n Returns a list of all the files stored in the system\n Returns:\n list(str): The list of files in the system\n \"\"\"\n return self.redis.zrange(\"file_index\", 0, -1)\n\n def list_blocks(self):\n \"\"\"\n Returns a list of the blocks in the system\n Returns:\n list(str): A list of all the blocks in the system\n \"\"\"\n return self.redis.zrange(\"block_index\", 0, -1)\n\n def values(self):\n \"\"\"\n Returns all files metadata objects\n Returns:\n list(MetaDocument): All the metadata object stored in the system\n \"\"\"\n filenames = self.keys()\n if not filenames:\n return []\n return self.get_files(filenames)\n\n def select_random_blocks(self, requested):\n \"\"\"\n Returns up to blocks_desired randomly selected metablocks from the index\n Args:\n requested(int): The number of random blocks to select\n Returns:\n list(MetaBlock): randomly selected blocks\n \"\"\"\n start = time.clock()\n blocks_desired = requested\n blocks_available = self.redis.zcard(\"block_index\")\n\n if blocks_available <= blocks_desired:\n block_keys = self.redis.zrange(\"block_index\", 0, blocks_available)\n return [self.get_block(key) for key in block_keys]\n\n selected_indexes = self.select_pointers(blocks_desired, blocks_available)\n\n selected_keys = []\n for index in selected_indexes:\n selected_key = self.redis.zrange(\"block_index\", index, index + 1)[0]\n selected_keys.append(selected_key)\n random_blocks = self.get_blocks(selected_keys)\n end = time.clock()\n elapsed = end - start\n LOGGER.debug(\"Took {:f} seconds to select random blocks\".format(elapsed))\n return random_blocks\n\n def get_entanglement_graph(self):\n \"\"\"\n Scan the database to return the entanglement graph\n Returns:\n dict(str, list): The entanglement graph\n \"\"\"\n graph = {}\n filenames = self.keys()\n for filename in filenames:\n metadata = self.get(filename)\n creation_date = str(metadata.creation_date)\n entangling_blocks = json.dumps(metadata.entangling_blocks)\n blocks = str([[block.key, block.providers[0]] for block in metadata.blocks])\n graph[filename] = [\n creation_date,\n entangling_blocks,\n blocks\n ]\n return graph\n\n def has_been_entangled_enough(self, block_key, pointers):\n \"\"\"\n Tests whether a block can have its replicas deleted due to a high enough\n number of pointers directed at it.\n Returns True if it is entangled with enough documents to have its replicas\n deleted, False otherwise.\n Args:\n block_key(str): Path of the block\n pointers(int): The number of documents that use the block as part of\n their entanglement\n Returns:\n bool: Whether the replicas of the block can be erased\n Raises:\n ValueError:\n * if path is not of type `str` or empty\n * if pointers is not of type `int` or is lower than 0\n \"\"\"\n if not block_key or not isinstance(block_key, str):\n raise ValueError(\"path argument must be a valid non-empty string\")\n if not isinstance(pointers, int) or pointers < 0:\n raise ValueError(\"pointers argument must be a valid integer greater or equal to 0\")\n metablock = self.get_block(block_key)\n return len(metablock.entangled_with) >= pointers\n\n def get_blocks_from_provider(self, provider):\n \"\"\"\n Returns the list of blocks located on a given provider\n Args:\n provider(str): Name of the provider\n Returns:\n list(MetaBlock): The list of blocks located on the provider\n \"\"\"\n if not isinstance(provider, str) or not provider:\n raise ValueError(\"provider argument must be a non empty string\")\n block_names = self.list_blocks()\n if not block_names:\n return []\n number_of_blocks = len(block_names)\n step = min(number_of_blocks, Files.READ_BUFFER_SIZE)\n blocks_from_provider = []\n for index in xrange(0, number_of_blocks, step):\n current_range = block_names[index:index + step]\n blocks = self.get_blocks(current_range)\n for block in blocks:\n if provider in block.providers:\n blocks_from_provider.append(block)\n return blocks_from_provider\n\n def get_number_of_blocks_available(self):\n \"\"\"\n Return the number of blocks in the system\n Returns:\n int: number of blocks in the system\n \"\"\"\n return self.redis.zcard(\"block_index\")\n" ]
[ [ "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MeteoSwiss/mwr_raw2l1
[ "6f8d8b80c203bcbd7f42a53b618ae63f321b68cb" ]
[ "mwr_raw2l1/readers/reader_rpg_base.py" ]
[ "import struct\n\nimport numpy as np\n\nfrom mwr_raw2l1.errors import FileTooLong, FileTooShort, MissingTimeInput, TimerefError, UnknownFileType\nfrom mwr_raw2l1.log import logger\nfrom mwr_raw2l1.readers.reader_rpg_helpers import interpret_angle, interpret_coord, interpret_time\nfrom mwr_raw2l1.utils.file_utils import get_binary\n\nBYTE_ORDER = '<' # byte order in all RPG files assumed little-endian #TODO: ask Harald whether this is true (PC/Unix)\n\nFILETYPE_CONFS = { # assign metadata to each known filecode\n # BRT files\n 666666: dict(type='brt', anglever=1, formatchar_angle='f'),\n 666667: dict(type='brt', anglever=1, formatchar_angle='f'),\n 666000: dict(type='brt', anglever=2, formatchar_angle='i'),\n 667000: dict(type='brt', anglever=2, formatchar_angle='i'),\n # BLB files\n 567845847: dict(type='blb', structver=1),\n 567845848: dict(type='blb', structver=2),\n # IRT files\n 671112495: dict(type='irt', structver=1),\n 671112496: dict(type='irt', structver=2, anglever=1, formatchar_angle='f'),\n 671112000: dict(type='irt', structver=2, anglever=2, formatchar_angle='i'),\n # MET files\n 599658943: dict(type='met', structver=1),\n 599658944: dict(type='met', structver=2),\n # HKD files\n 837854832: dict(type='hkd'),\n}\n\n\nclass BaseReader(object):\n def __init__(self, filename, accept_localtime=False):\n self.filename = filename\n self.accept_localtime = accept_localtime\n self.data = {}\n self.data_bin = None\n self.byte_offset = 0 # counter for consumed bytes, increased by each method\n self.filecode = None\n self.filestruct = None\n\n def run(self):\n \"\"\"do whole read-in from files and interpretation and checking of data\"\"\"\n logger.info('Reading data from ' + self.filename)\n self.data_bin = get_binary(self.filename)\n self.read() # fills self.data\n self.check_data()\n del self.data_bin # after read() all contents of data_bin have been interpreted to data\n del self.byte_offset # after read() has checked that all data have been read, this quantity is useless\n\n def read(self):\n \"\"\"read and interpret all data in self.data_bin\"\"\"\n\n # sequence must be preserved as self.byte_offset is increased by each method, hence they are all semi-private\n self._read_filecode()\n self.interpret_filecode()\n self._read_header()\n self.interpret_header()\n self._read_meas()\n self.interpret_raw_data()\n if self.byte_offset < len(self.data_bin):\n raise FileTooLong('Not all bytes consumed. Interpreted {} bytes during read-in but {} contains {}'.format(\n self.byte_offset, self.filename, len(self.data_bin)))\n\n def decode_binary(self, encoding_pattern, byte_order=BYTE_ORDER):\n \"\"\"decode next variables from binary stream and write to dict self.data + augment self.byte_offset\n\n Args:\n encoding_pattern: a list of tuples or lists containing the individual variable description\n e.g. [dict(name='n_meas', type='i', shape=(1,)), dict(name='Tb', type='f', shape=(n_freq,)), ...]\n \"\"\"\n for enc in encoding_pattern:\n full_type = byte_order + np.prod(enc['shape']) * enc['type']\n out = struct.unpack_from(full_type, self.data_bin, self.byte_offset)\n self.byte_offset += struct.calcsize(full_type) # multiplication with shape already done for full type\n\n if len(out) == 1: # extract from tuple if it has only one element, otherwise return tuple\n self.data[enc['name']] = out[0]\n else:\n self.data[enc['name']] = out\n\n def decode_binary_np(self, encoding_pattern, n_entries, byte_order=BYTE_ORDER):\n \"\"\"decode from binary stream via :class:`numpy.ndarray` to write to dict self.data + augment self.byte_offset\n\n Args:\n encoding_pattern: a list of tuples or lists containing the individual variable description for one time step\n e.g. [dict(name='time_raw', type='i', shape=(1,)), dict(name='Tb', type='f', shape=(n_freq,)), ...]\n \"\"\"\n dtype_np = np.dtype([(ep['name'], byte_order+ep['type'], ep['shape']) for ep in encoding_pattern])\n names = [ep['name'] for ep in encoding_pattern]\n bytes_per_var = np.array([struct.calcsize(ep['type']) * np.prod(ep['shape']) for ep in encoding_pattern])\n\n byte_offset_start = self.byte_offset\n n_bytes = bytes_per_var.sum() * n_entries\n self.byte_offset += n_bytes\n if len(self.data_bin) < self.byte_offset:\n err_msg = 'number of bytes in file {} does not match the one inferred from n_meas'.format(self.filename)\n logger.error(err_msg)\n raise FileTooShort(err_msg)\n\n arr = np.frombuffer(self.data_bin[byte_offset_start: self.byte_offset], dtype=dtype_np)\n for idx, name in enumerate(names):\n if encoding_pattern[idx]['shape'] == (1,): # variables which only have a time dimension shall not be 2d\n self.data[name] = arr[name].flatten()\n else:\n self.data[name] = arr[name]\n\n def interpret_filecode(self):\n \"\"\"assign configuration for read in of file with corresponding file code\"\"\"\n try:\n self.filestruct = FILETYPE_CONFS[self.filecode]\n except KeyError:\n raise UnknownFileType('reader not specified for files with filecode {:d} as used in {:s}'.format(\n self.filecode, self.filename))\n\n def interpret_header(self):\n \"\"\"interpret data read in with _read_header\"\"\"\n # transform frequency and ir_wavelength to 1D-numpy array for later import to xarray in Measurement class\n for var in ['frequency', 'ir_wavelength', 'scan_ele']:\n if var in self.data.keys():\n self.data[var] = np.array(self.data[var]).ravel()\n\n def interpret_raw_data(self):\n \"\"\"interpret data read in with _read_meas (e.g. get ele/azi from pointing code or datetime from timecode)\"\"\"\n # interpret time\n try: # assume data-dict in all subclasses contains time\n self.data['time'] = interpret_time(self.data['time_raw'])\n except KeyError as err:\n raise MissingTimeInput('Did not find {} in read-in data from file {}'.format(err, self.filename))\n\n # interpret ele/azi\n if 'pointing_raw' in self.data.keys():\n self.data['ele'], self.data['azi'] = interpret_angle(self.data['pointing_raw'], self.filestruct['anglever'])\n\n # interpret lat/lon\n for coord in ('lon_raw', 'lat_raw'):\n if coord in self.data.keys():\n self.data[coord[0:3]] = interpret_coord(self.data[coord])\n\n # set zeros in brightness temperatures to NaN\n for var in ('Tb', 'Tb_scan', 'IRT' 'tb', 'tb_scan', 'irt'):\n if var in self.data.keys():\n if (self.data[var] == 0).any():\n if self.data[var].flags['WRITEABLE'] is False: # make a copy if variable is not writeable\n self.data[var] = self.data[var].copy()\n self.data[var][self.data[var] == 0] = np.nan\n\n def check_data(self):\n \"\"\"general checks for the consistency of the data which can be applied to all file type readers\"\"\"\n if not self.accept_localtime and self.data['timeref'] == 0:\n raise TimerefError('Time encoded in local time but UTC required by \"accept_localtime\"')\n\n def _read_filecode(self):\n \"\"\"read filecode from binary data. first of the _read... methods to be executed (according to order in file)\"\"\"\n self.filecode = struct.unpack_from('<i', self.data_bin, self.byte_offset)[0]\n self.byte_offset += 4\n\n def _read_header(self):\n \"\"\"read header from binary data. second of the _read... methods to be executed (according to order in file)\"\"\"\n pass\n\n def _read_meas(self):\n \"\"\"read measurement from binary. third of the _read... methods to be executed (according to order in file)\"\"\"\n pass\n" ]
[ [ "numpy.prod", "numpy.frombuffer", "numpy.array", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wesselb/gpar
[ "70f5cb7cd2dec075e33dd7d9cd133b5bc1798777" ]
[ "examples/paper/ml_data/evaluate_neural_net.py" ]
[ "import numpy as np\nfrom keras.datasets import mnist\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l1_l2\nfrom keras.utils import np_utils\n\n\ndef build_model(params):\n n_hidden_layers = int(np.round(params[\"n_hidden_layers\"]))\n n_neurons = int(np.round(params[\"n_neurons\"]))\n log_l1_weight_reg = params[\"log_l1_weight_reg\"]\n log_l2_weight_reg = params[\"log_l2_weight_reg\"]\n prob_drop_out = float(params[\"prob_drop_out\"])\n log_l_rate = params[\"log_learning_rate\"]\n\n model = Sequential()\n model.add(\n Dense(\n n_neurons,\n input_shape=(784,),\n W_regularizer=l1_l2(\n l1=np.exp(log_l1_weight_reg), l2=np.exp(log_l2_weight_reg)\n ),\n )\n )\n model.add(Activation(\"relu\"))\n model.add(Dropout(prob_drop_out))\n for i in range(n_hidden_layers - 1):\n model.add(\n Dense(\n n_neurons,\n W_regularizer=l1_l2(\n l1=np.exp(log_l1_weight_reg), l2=np.exp(log_l2_weight_reg)\n ),\n )\n )\n model.add(Activation(\"relu\"))\n model.add(Dropout(prob_drop_out))\n n_classes = 10\n model.add(Dense(n_classes))\n model.add(Activation(\"softmax\"))\n\n adam = Adam(lr=np.exp(log_l_rate), beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n model.compile(loss=\"categorical_crossentropy\", optimizer=adam)\n\n return model\n\n\ndef fit_model(x_train, y_train, x_test, y_test, x_val, y_val, params):\n nb_epoch = 150\n batch_size = 4000\n model = build_model(params)\n history = model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=nb_epoch,\n verbose=2,\n validation_data=(x_val, y_val),\n )\n\n return history\n\n\ndef get_results(params):\n nb_classes = 10\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.reshape(60000, 784)\n x_test = x_test.reshape(10000, 784)\n x_train = x_train.astype(\"float32\")\n x_test = x_test.astype(\"float32\")\n x_train /= 255\n x_test /= 255\n\n state = np.random.get_state()\n np.random.seed(0)\n perm = np.random.permutation(60000)\n i_train = perm[0:50000]\n i_val = perm[50000:60000]\n np.random.set_state(state)\n\n x_val = x_train[i_val, :]\n y_val = y_train[i_val]\n x_train = x_train[i_train, :]\n y_train = y_train[i_train]\n\n y_train = np_utils.to_categorical(y_train, nb_classes)\n y_test = np_utils.to_categorical(y_test, nb_classes)\n y_val = np_utils.to_categorical(y_val, nb_classes)\n\n history = fit_model(x_train, y_train, x_test, y_test, x_val, y_val, params)\n\n results = {\n \"params\": params,\n \"val_loss\": history.history[\"val_loss\"],\n \"train_loss\": history.history[\"loss\"],\n \"epochs\": history.epoch,\n }\n\n return results\n\n\nif __name__ == \"__main__\":\n params = {\n \"n_hidden_layers\": 2,\n \"n_neurons\": 100,\n \"log_l1_weight_reg\": -10,\n \"log_l2_weight_reg\": -10,\n \"prob_drop_out\": 0.2,\n \"log_learning_rate\": -10,\n }\n get_results(params=params)\n" ]
[ [ "numpy.random.get_state", "numpy.random.seed", "numpy.round", "numpy.random.permutation", "numpy.random.set_state", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
moneypi/rcnn_mxnet_debug
[ "ee918c878ccee7877f87072f9a86218fa77de2f6" ]
[ "symdata/anchor.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport cv2\nfrom symdata.bbox import bbox_overlaps, bbox_transform\n\n\nclass AnchorGenerator:\n def __init__(self, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n self._num_anchors = len(anchor_scales) * len(anchor_ratios)\n self._feat_stride = feat_stride\n self._base_anchors = self._generate_base_anchors(feat_stride, np.array(anchor_scales), np.array(anchor_ratios))\n\n def generate(self, feat_height, feat_width):\n shift_x = np.arange(0, feat_width) * self._feat_stride\n shift_y = np.arange(0, feat_height) * self._feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = self._num_anchors\n K = shifts.shape[0]\n all_anchors = self._base_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))\n all_anchors = all_anchors.reshape((K * A, 4))\n return all_anchors\n\n @staticmethod\n def _generate_base_anchors(base_size, scales, ratios):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n \"\"\"\n base_anchor = np.array([1, 1, base_size, base_size]) - 1\n ratio_anchors = AnchorGenerator._ratio_enum(base_anchor, ratios)\n anchors = np.vstack([AnchorGenerator._scale_enum(ratio_anchors[i, :], scales)\n for i in range(ratio_anchors.shape[0])])\n return anchors\n\n @staticmethod\n def _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\n @staticmethod\n def _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors\n\n @staticmethod\n def _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n @staticmethod\n def _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\nclass AnchorSampler:\n def __init__(self, batch_rois=256, fg_fraction=0.5, fg_overlap=0.7, bg_overlap=0.3):\n self._num_batch = batch_rois\n self._num_fg = int(batch_rois * fg_fraction)\n self._fg_overlap = fg_overlap\n self._bg_overlap = bg_overlap\n\n def assign(self, img, anchors, gt_boxes, im_height, im_width):\n num_anchors = anchors.shape[0]\n\n # filter out padded gt_boxes\n valid_labels = np.where(gt_boxes[:, -1] > 0)[0]\n gt_boxes = gt_boxes[valid_labels]\n\n # filter out anchors outside the region\n inds_inside = np.where((anchors[:, 0] >= 0) &\n (anchors[:, 2] < im_width) &\n (anchors[:, 1] >= 0) &\n (anchors[:, 3] < im_height))[0]\n anchors = anchors[inds_inside, :]\n\n # for anchor in anchors:\n # tmp = img.copy()\n # cv2.rectangle(tmp, (int(anchor[0]), int(anchor[1])), (int(anchor[2]), int(anchor[3])),\n # (255, 0, 0), 1)\n # cv2.imshow(\"tmp\", tmp)\n # cv2.waitKey(10)\n\n # for gt_box in gt_boxes:\n # cv2.rectangle(img, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])),\n # (255, 0, 0), 1)\n # cv2.imshow(\"tmp\", img)\n # cv2.waitKey(200)\n\n num_valid = len(inds_inside)\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.ones((num_valid,), dtype=np.float32) * -1\n bbox_targets = np.zeros((num_valid, 4), dtype=np.float32)\n bbox_weights = np.zeros((num_valid, 4), dtype=np.float32)\n\n # sample for positive labels\n if gt_boxes.size > 0:\n # overlap between the anchors and the gt boxes\n # overlaps (ex, gt)\n overlaps = bbox_overlaps(anchors.astype(np.float), gt_boxes.astype(np.float))\n gt_max_overlaps = overlaps.max(axis=0)\n\n # fg anchors: anchor with highest overlap for each gt; or overlap > iou thresh\n fg_inds = np.where((overlaps >= self._fg_overlap) | (overlaps == gt_max_overlaps))[0]\n\n # subsample to num_fg\n if len(fg_inds) > self._num_fg:\n fg_inds = np.random.choice(fg_inds, size=self._num_fg, replace=False)\n\n # bg anchor: anchor with overlap < iou thresh but not highest overlap for some gt\n bg_inds = np.where((overlaps < self._bg_overlap) & (overlaps < gt_max_overlaps))[0]\n\n if len(bg_inds) > self._num_batch - len(fg_inds):\n bg_inds = np.random.choice(bg_inds, size=self._num_batch - len(fg_inds), replace=False)\n\n # assign label\n labels[fg_inds] = 1\n labels[bg_inds] = 0\n\n # assign to argmax overlap\n argmax_overlaps = overlaps.argmax(axis=1)\n\n # for anchor in anchors[bg_inds, :]:\n # tmp = img.copy()\n # cv2.rectangle(tmp, (int(anchor[0]), int(anchor[1])), (int(anchor[2]), int(anchor[3])),\n # (255, 0, 0), 1)\n # cv2.imshow(\"tmp2\", tmp)\n # cv2.waitKey(200)\n\n bbox_targets[fg_inds, :] = bbox_transform(anchors[fg_inds, :], gt_boxes[argmax_overlaps[fg_inds], :])\n\n # only fg anchors has bbox_targets\n bbox_weights[fg_inds, :] = 1\n else:\n # randomly draw bg anchors\n bg_inds = np.random.choice(np.arange(num_valid), size=self._num_batch, replace=False)\n labels[bg_inds] = 0\n\n all_labels = np.ones((num_anchors,), dtype=np.float32) * -1\n all_labels[inds_inside] = labels\n all_bbox_targets = np.zeros((num_anchors, 4), dtype=np.float32)\n all_bbox_targets[inds_inside, :] = bbox_targets\n all_bbox_weights = np.zeros((num_anchors, 4), dtype=np.float32)\n all_bbox_weights[inds_inside, :] = bbox_weights\n\n return all_labels, all_bbox_targets, all_bbox_weights\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.meshgrid", "numpy.random.choice", "numpy.arange", "numpy.ones", "numpy.round", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ShiminLei/LA-Dialog-Generation-System
[ "c44fb8399d75e9a7e0f08dd12d6c05e2e87653c4" ]
[ "meld-utt-skip.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\n\nimport torch\n\nfrom laed import evaluators, utt_skip_utils\nfrom laed import main as engine\nfrom laed.dataset import corpora\nfrom laed.dataset import data_loaders\nfrom laed.models import sent_models\nfrom laed.utils import str2bool, prepare_dirs_loggers, get_time, process_config\n\narg_lists = []\nparser = argparse.ArgumentParser()\n\n\ndef add_argument_group(name):\n arg = parser.add_argument_group(name)\n arg_lists.append(arg)\n return arg\n\n\ndef get_config():\n config, unparsed = parser.parse_known_args()\n return config, unparsed\n\n\n# Data\ndata_arg = add_argument_group('Data')\ndata_arg.add_argument('--data_dir', type=str, nargs='+', default=['data/meld'])\ndata_arg.add_argument('--log_dir', type=str, default='logs')\n\n# Network\nnet_arg = add_argument_group('Network')\nnet_arg.add_argument('--y_size', type=int, default=20)\nnet_arg.add_argument('--k', type=int, default=10)\nnet_arg.add_argument('--use_mutual', type=str2bool, default=True)\nnet_arg.add_argument('--use_reg_kl', type=str2bool, default=True)\n\nnet_arg.add_argument('--rnn_cell', type=str, default='gru')\nnet_arg.add_argument('--embed_size', type=int, default=200)\nnet_arg.add_argument('--utt_type', type=str, default='rnn')\nnet_arg.add_argument('--enc_cell_size', type=int, default=512)\nnet_arg.add_argument('--dec_cell_size', type=int, default=512)\nnet_arg.add_argument('--bi_enc_cell', type=str2bool, default=False)\nnet_arg.add_argument('--max_utt_len', type=int, default=40)\nnet_arg.add_argument('--max_dec_len', type=int, default=40)\nnet_arg.add_argument('--max_vocab_cnt', type=int, default=10000)\nnet_arg.add_argument('--num_layer', type=int, default=1)\nnet_arg.add_argument('--use_attn', type=str2bool, default=False)\nnet_arg.add_argument('--attn_type', type=str, default='cat')\n\n# Training / test parameters\ntrain_arg = add_argument_group('Training')\ntrain_arg.add_argument('--op', type=str, default='adam')\ntrain_arg.add_argument('--backward_size', type=int, default=5)\ntrain_arg.add_argument('--step_size', type=int, default=1)\ntrain_arg.add_argument('--grad_clip', type=float, default=3.0)\ntrain_arg.add_argument('--init_w', type=float, default=0.1)\ntrain_arg.add_argument('--init_lr', type=float, default=0.001)\ntrain_arg.add_argument('--momentum', type=float, default=0.1)\ntrain_arg.add_argument('--lr_hold', type=int, default=1)\ntrain_arg.add_argument('--lr_decay', type=float, default=0.6)\ntrain_arg.add_argument('--dropout', type=float, default=0.3)\ntrain_arg.add_argument('--improve_threshold', type=float, default=0.996)\ntrain_arg.add_argument('--patient_increase', type=float, default=3.0)\ntrain_arg.add_argument('--early_stop', type=str2bool, default=True)\ntrain_arg.add_argument('--max_epoch', type=int, default=50)\n\n\n# MISC\nmisc_arg = add_argument_group('Misc')\nmisc_arg.add_argument('--save_model', type=str2bool, default=True)\nmisc_arg.add_argument('--use_gpu', type=str2bool, default=True)\nmisc_arg.add_argument('--print_step', type=int, default=500)\nmisc_arg.add_argument('--fix_batch', type=str2bool, default=False)\nmisc_arg.add_argument('--include_eod', type=str2bool, default=True)\nmisc_arg.add_argument('--ckpt_step', type=int, default=1500)\nmisc_arg.add_argument('--batch_size', type=int, default=60)\nmisc_arg.add_argument('--preview_batch_num', type=int, default=1)\nmisc_arg.add_argument('--gen_type', type=str, default='greedy')\nmisc_arg.add_argument('--avg_type', type=str, default='word')\nmisc_arg.add_argument('--beam_size', type=int, default=10)\nmisc_arg.add_argument('--forward_only', type=str2bool, default=False)\ndata_arg.add_argument('--load_sess', type=str, default=\"2018-05-28T14-08-32-meld-utt-skip.py\")\n\n\nlogger = logging.getLogger()\n\n\ndef main(config):\n prepare_dirs_loggers(config, os.path.basename(__file__))\n\n corpus_client = corpora.MELDCorpus(config)\n dial_corpus = corpus_client.get_corpus()\n train_dial, valid_dial, test_dial = dial_corpus['train'],\\\n dial_corpus['valid'],\\\n dial_corpus['test']\n\n evaluator = evaluators.BleuEvaluator(os.path.basename(__file__))\n # create data loader that feed the deep models\n train_feed = data_loaders.MELDSkipLoader(\"Train\", train_dial, config)\n valid_feed = data_loaders.MELDSkipLoader(\"Valid\", valid_dial, config)\n test_feed = data_loaders.MELDSkipLoader(\"Test\", test_dial, config)\n model = sent_models.DiVST(corpus_client, config)\n\n if config.forward_only:\n test_file = os.path.join(config.log_dir, config.load_sess,\n \"{}-test-{}.txt\".format(get_time(), config.gen_type))\n dump_file = os.path.join(config.log_dir, config.load_sess,\n \"{}-z.pkl\".format(get_time()))\n model_file = os.path.join(config.log_dir, config.load_sess, \"model\")\n else:\n test_file = os.path.join(config.session_dir,\n \"{}-test-{}.txt\".format(get_time(), config.gen_type))\n dump_file = os.path.join(config.session_dir, \"{}-z.pkl\".format(get_time()))\n model_file = os.path.join(config.session_dir, \"model\")\n\n if config.use_gpu:\n model.cuda()\n\n if config.forward_only is False:\n try:\n engine.train(model, train_feed, valid_feed,\n test_feed, config, evaluator, gen=utt_skip_utils.generate)\n except KeyboardInterrupt:\n print(\"Training stopped by keyboard.\")\n\n config.batch_size = 50\n model.load_state_dict(torch.load(model_file))\n engine.validate(model, test_feed, config)\n\n with open(os.path.join(dump_file), \"wb\") as f:\n print(\"Dumping test to {}\".format(dump_file))\n utt_skip_utils.dump_latent(model, test_feed, config, f, num_batch=None)\n\n with open(os.path.join(test_file), \"wb\") as f:\n print(\"Saving test to {}\".format(test_file))\n utt_skip_utils.generate(model, test_feed, config, evaluator, num_batch=None, dest_f=f)\n\n\nif __name__ == \"__main__\":\n config, unparsed = get_config()\n config = process_config(config)\n main(config)\n" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hammamqassim/QC-App-Oriented-Benchmarks
[ "e8e6c41a6b8378820d5b88653cad91fe6e855bc1" ]
[ "phase-estimation/qiskit/pe_benchmark.py" ]
[ "\"\"\"\nPhase Estimation Benchmark Program - Qiskit\n\"\"\"\n\nimport sys\nimport time\n\nimport numpy as np\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\n\nsys.path[1:1] = [\"_common\", \"_common/qiskit\", \"quantum-fourier-transform/qiskit\"]\nsys.path[1:1] = [\"../../_common\", \"../../_common/qiskit\", \"../../quantum-fourier-transform/qiskit\"]\nimport execute as ex\nimport metrics as metrics\nfrom qft_benchmark import inv_qft_gate\n\nnp.random.seed(0)\n\nverbose = False\n\n# saved subcircuits circuits for printing\nQC_ = None\nQFTI_ = None\nU_ = None\n\n############### Circuit Definition\n\ndef PhaseEstimation(num_qubits, theta):\n \n qr = QuantumRegister(num_qubits)\n \n num_counting_qubits = num_qubits - 1 # only 1 state qubit\n \n cr = ClassicalRegister(num_counting_qubits)\n qc = QuantumCircuit(qr, cr)\n\n # initialize counting qubits in superposition\n for i in range(num_counting_qubits):\n qc.h(qr[i])\n\n # change to |1> in state qubit, so phase will be applied by cphase gate\n qc.x(num_counting_qubits)\n\n qc.barrier()\n\n repeat = 1\n for j in reversed(range(num_counting_qubits)):\n # controlled operation: adds phase exp(i*2*pi*theta*repeat) to the state |1>\n # does nothing to state |0>\n cp, _ = CPhase(2*np.pi*theta, repeat)\n qc.append(cp, [j, num_counting_qubits])\n repeat *= 2\n\n #Define global U operator as the phase operator\n _, U = CPhase(2*np.pi*theta, 1)\n\n qc.barrier()\n \n # inverse quantum Fourier transform only on counting qubits\n qc.append(inv_qft_gate(num_counting_qubits), qr[:num_counting_qubits])\n \n qc.barrier()\n \n # measure counting qubits\n qc.measure([qr[m] for m in range(num_counting_qubits)], list(range(num_counting_qubits)))\n\n # save smaller circuit example for display\n global QC_, U_, QFTI_\n if QC_ == None or num_qubits <= 5:\n if num_qubits < 9: QC_ = qc\n if U_ == None or num_qubits <= 5:\n if num_qubits < 9: U_ = U\n if QFTI_ == None or num_qubits <= 5:\n if num_qubits < 9: QFTI_ = inv_qft_gate(num_counting_qubits)\n return qc\n\n#Construct the phase gates and include matching gate representation as readme circuit\ndef CPhase(angle, exponent):\n\n qc = QuantumCircuit(1, name=f\"U^{exponent}\")\n qc.p(angle*exponent, 0)\n phase_gate = qc.to_gate().control(1)\n\n return phase_gate, qc\n\n# Analyze and print measured results\ndef analyze_and_print_result(qc, result, num_counting_qubits, theta, num_shots):\n\n # get results as times a particular theta was measured\n counts = bitstring_to_theta(result.get_counts(qc), num_counting_qubits) \n \n if verbose: print(f\"For theta value {theta}, measured: {counts}\")\n\n # correct distribution is measuring theta 100% of the time\n correct_dist = {theta: 1.0}\n\n # generate thermal_dist with amplitudes instead, to be comparable to correct_dist\n bit_thermal_dist = metrics.uniform_dist(num_counting_qubits)\n thermal_dist = bitstring_to_theta(bit_thermal_dist, num_counting_qubits)\n\n # use our polarization fidelity rescaling\n fidelity = metrics.polarization_fidelity(counts, correct_dist, thermal_dist)\n\n return counts, fidelity\n\ndef bitstring_to_theta(counts, num_counting_qubits):\n theta_counts = {}\n for key in counts.keys():\n r = counts[key]\n theta = int(key,2) / (2**num_counting_qubits)\n if theta not in theta_counts.keys():\n theta_counts[theta] = 0\n theta_counts[theta] += r\n return theta_counts\n\n################ Benchmark Loop\n\n# Execute program with default parameters\ndef run(min_qubits=3, max_qubits=8, max_circuits=3, num_shots=100,\n backend_id='qasm_simulator', provider_backend=None,\n hub=\"ibm-q\", group=\"open\", project=\"main\", exec_options=None):\n\n print(\"Phase Estimation Benchmark Program - Qiskit\")\n\n num_state_qubits = 1 # default, not exposed to users, cannot be changed in current implementation\n\n # validate parameters (smallest circuit is 3 qubits)\n num_state_qubits = max(1, num_state_qubits)\n if max_qubits < num_state_qubits + 2:\n print(f\"ERROR: PE Benchmark needs at least {num_state_qubits + 2} qubits to run\")\n return\n min_qubits = max(max(3, min_qubits), num_state_qubits + 2)\n #print(f\"min, max, state = {min_qubits} {max_qubits} {num_state_qubits}\")\n\n # Initialize metrics module\n metrics.init_metrics()\n\n # Define custom result handler\n def execution_handler(qc, result, num_qubits, theta, num_shots):\n\n # determine fidelity of result set\n num_counting_qubits = int(num_qubits) - 1\n counts, fidelity = analyze_and_print_result(qc, result, num_counting_qubits, float(theta), num_shots)\n metrics.store_metric(num_qubits, theta, 'fidelity', fidelity)\n\n # Initialize execution module using the execution result handler above and specified backend_id\n ex.init_execution(execution_handler)\n ex.set_execution_target(backend_id, provider_backend=provider_backend,\n hub=hub, group=group, project=project, exec_options=exec_options)\n\n # Execute Benchmark Program N times for multiple circuit sizes\n # Accumulate metrics asynchronously as circuits complete\n for num_qubits in range(min_qubits, max_qubits + 1):\n \n # as circuit width grows, the number of counting qubits is increased\n num_counting_qubits = num_qubits - num_state_qubits - 1\n\n # determine number of circuits to execute for this group\n num_circuits = min(2 ** (num_counting_qubits), max_circuits)\n \n print(f\"************\\nExecuting [{num_circuits}] circuits with num_qubits = {num_qubits}\")\n \n # determine range of secret strings to loop over\n if 2**(num_counting_qubits) <= max_circuits:\n theta_range = [i/(2**(num_counting_qubits)) for i in list(range(num_circuits))]\n else:\n theta_range = [i/(2**(num_counting_qubits)) for i in np.random.choice(2**(num_counting_qubits), num_circuits, False)]\n\n # loop over limited # of random theta choices\n for theta in theta_range:\n # create the circuit for given qubit size and theta, store time metric\n ts = time.time()\n\n qc = PhaseEstimation(num_qubits, theta)\n metrics.store_metric(num_qubits, theta, 'create_time', time.time() - ts)\n\n # collapse the 3 sub-circuit levels used in this benchmark (for qiskit)\n qc2 = qc.decompose().decompose().decompose()\n \n # submit circuit for execution on target (simulator, cloud simulator, or hardware)\n ex.submit_circuit(qc2, num_qubits, theta, num_shots)\n\n # Wait for some active circuits to complete; report metrics when groups complete\n ex.throttle_execution(metrics.finalize_group)\n\n # Wait for all active circuits to complete; report metrics when groups complete\n ex.finalize_execution(metrics.finalize_group)\n\n # print a sample circuit\n print(\"Sample Circuit:\"); print(QC_ if QC_ != None else \" ... too large!\")\n print(\"\\nPhase Operator 'U' = \"); print(U_ if U_ != None else \" ... too large!\")\n print(\"\\nInverse QFT Circuit =\"); print(QFTI_ if QFTI_ != None else \" ... too large!\")\n\n # Plot metrics for all circuit sizes\n metrics.plot_metrics(\"Benchmark Results - Phase Estimation - Qiskit\")\n\n\n# if main, execute method\nif __name__ == '__main__': run()\n" ]
[ [ "numpy.random.seed", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]