repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
apurvak/tapas | [
"2987658c3b65c5ab6e698d6c57823dc30d3d0f96"
] | [
"tapas/experiments/table_retriever_experiment.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Table retriever experiment.\"\"\"\n\nimport csv\nimport functools\nimport os\nimport traceback\nfrom typing import Text, Optional\n\nfrom absl import app\nfrom absl import flags\nfrom tapas.models import table_retriever_model\nfrom tapas.scripts import eval_table_retriever_utils\nfrom tapas.utils import experiment_utils # pylint: disable=unused-import\nimport tensorflow.compat.v1 as tf\n\ntf.disable_v2_behavior()\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"data_format\", \"tfrecord\", \"The input data format.\")\n\nflags.DEFINE_multi_string(\n \"input_file_train\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_multi_string(\n \"input_file_eval\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_multi_string(\n \"input_file_predict\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"prediction_output_dir\", None,\n \"If not none or empty writes predictions to this directory. Otherwise \"\n \"writes predictions to model_dir.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded. Must match data generation.\")\n\nflags.DEFINE_integer(\"minutes_to_sleep_before_predictions\", 5,\n \"Time in minutes to sleep before starting to predict.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_string(\n \"eval_name\", \"default\",\n \"Name of the current evaluation set. Will be used in Tensorboard.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_float(\n \"grad_clipping\", None, \"If not None, gradients greater in absolute value\"\n \"than this number are clipped.\")\n\nflags.DEFINE_integer(\n \"down_projection_dim\", 0, \"Representation dimension of the query/table\"\n \"after down projection. If smaller than 1, no projection occurs.\")\n\nflags.DEFINE_bool(\n \"init_from_single_encoder\", True, \"If true, expects to load\"\n \"a checkpoint of a single encoder, that would be used to\"\n \"initialize both encoders.\")\n\nflags.DEFINE_integer(\"max_query_length\", 128,\n \"The query is capped to this length.\")\n\nflags.DEFINE_string(\n \"compression_type\",\n \"\",\n \"Compression to use when reading tfrecords. '' for no compression.\",\n)\n\nflags.DEFINE_integer(\n \"evaluated_checkpoint_step\", None,\n \"The step for a specific model checkpoint to be evaluated. If None, then\"\n \"all checkpoints are used.\")\n\nflags.DEFINE_string(\n \"evaluated_checkpoint_metric\", None,\n \"The metric used to chose a model checkpoint to be evaluated. If None, then\"\n \"all checkpoints are used.\")\n\nflags.DEFINE_bool(\"use_out_of_core_negatives\", False,\n \"If true, use all the negatives when\"\n \"using many TPU cores.\")\n\nflags.DEFINE_bool(\"mask_repeated_tables\", False,\n \"If true, mask tables that are repeated within a batch.\")\n\nflags.DEFINE_bool(\"mask_repeated_questions\", False,\n \"If true, mask questions that are repeated within a batch.\")\n\nflags.DEFINE_bool(\n \"ignore_table_content\", False,\n \"If true, use only the table headers to represent the table.\")\n\nflags.DEFINE_bool(\n \"use_mined_negatives\", False,\n \"If true, use mined negatives that should be given as\"\n \"additional table features.\")\n\nflags.DEFINE_list(\"disabled_features\", [],\n \"Features that should be disabled (for ablation studies).\")\n\n\ndef _get_test_input_fn(name, input_file):\n \"\"\"Gets input_fn for eval/predict modes.\"\"\"\n if input_file is None:\n return None\n input_fn = functools.partial(\n table_retriever_model.input_fn,\n name=name,\n file_patterns=input_file,\n data_format=FLAGS.data_format,\n is_training=False,\n max_seq_length=FLAGS.max_seq_length,\n compression_type=FLAGS.compression_type,\n use_mined_negatives=FLAGS.use_mined_negatives,\n include_id=True)\n return input_fn\n\n\ndef _predict_and_export_metrics(\n mode,\n input_fn,\n checkpoint_path,\n step,\n estimator,\n output_dir,\n):\n \"\"\"Exports model predictions and calculates precision@k.\"\"\"\n tf.logging.info(\"Running predictor for step %d.\", step)\n result = estimator.predict(input_fn=input_fn, checkpoint_path=checkpoint_path)\n output_predict_file = os.path.join(output_dir, f\"{mode}_results_{step}.tsv\")\n write_predictions(result, output_predict_file)\n\n # Compute precision@k.\n if (not FLAGS.evaluated_checkpoint_step or\n not FLAGS.evaluated_checkpoint_metric):\n p_at_k = eval_table_retriever_utils.eval_precision_at_k(\n query_prediction_files=output_predict_file,\n table_prediction_files=output_predict_file,\n make_tables_unique=True)\n experiment_utils.save_metrics(output_dir, mode, step, p_at_k)\n\n\ndef write_predictions(predictions,\n output_predict_file):\n \"\"\"Writes predictions to an output TSV file.\n\n Predictions header: [query_id, query_rep, table_id, table_rep]\n Args:\n predictions: model predictions\n output_predict_file: Path for wrinting the predicitons.\n \"\"\"\n with tf.io.gfile.GFile(output_predict_file, \"w\") as write_file:\n header = [\n \"query_id\",\n \"query_rep\",\n \"table_id\",\n \"table_rep\",\n ]\n writer = csv.DictWriter(write_file, fieldnames=header, delimiter=\"\\t\")\n writer.writeheader()\n\n for prediction in predictions:\n query_id = prediction[\"query_id\"]\n table_id = prediction[\"table_id\"]\n query_rep = prediction[\"query_rep\"]\n table_rep = prediction[\"table_rep\"]\n\n prediction_to_write = {\n \"query_id\": query_id[0].decode(\"utf-8\"),\n \"query_rep\": query_rep.tolist(),\n \"table_id\": table_id[0].decode(\"utf-8\"),\n \"table_rep\": table_rep.tolist(),\n }\n writer.writerow(prediction_to_write)\n\n\ndef main(_):\n bert_config = experiment_utils.bert_config_from_flags()\n total_steps = experiment_utils.num_train_steps()\n retriever_config = table_retriever_model.RetrieverConfig(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=total_steps,\n num_warmup_steps=experiment_utils.num_warmup_steps(),\n use_tpu=FLAGS.use_tpu,\n grad_clipping=FLAGS.grad_clipping,\n down_projection_dim=FLAGS.down_projection_dim,\n init_from_single_encoder=FLAGS.init_from_single_encoder,\n max_query_length=FLAGS.max_query_length,\n mask_repeated_tables=FLAGS.mask_repeated_tables,\n mask_repeated_questions=FLAGS.mask_repeated_questions,\n use_out_of_core_negatives=FLAGS.use_out_of_core_negatives,\n ignore_table_content=FLAGS.ignore_table_content,\n disabled_features=FLAGS.disabled_features,\n use_mined_negatives=FLAGS.use_mined_negatives,\n )\n\n model_fn = table_retriever_model.model_fn_builder(retriever_config)\n estimator = experiment_utils.build_estimator(model_fn)\n\n if FLAGS.do_train:\n tf.io.gfile.makedirs(FLAGS.model_dir)\n bert_config.to_json_file(os.path.join(FLAGS.model_dir, \"bert_config.json\"))\n retriever_config.to_json_file(\n os.path.join(FLAGS.model_dir, \"tapas_config.json\"))\n train_input_fn = functools.partial(\n table_retriever_model.input_fn,\n name=\"train\",\n file_patterns=FLAGS.input_file_train,\n data_format=FLAGS.data_format,\n is_training=True,\n max_seq_length=FLAGS.max_seq_length,\n compression_type=FLAGS.compression_type,\n use_mined_negatives=FLAGS.use_mined_negatives,\n include_id=False)\n estimator.train(input_fn=train_input_fn, max_steps=total_steps)\n\n eval_input_fn = _get_test_input_fn(\"eval\", FLAGS.input_file_eval)\n if FLAGS.do_eval:\n if eval_input_fn is None:\n raise ValueError(\"No input_file_eval specified!\")\n for _, checkpoint in experiment_utils.iterate_checkpoints(\n model_dir=estimator.model_dir,\n total_steps=total_steps,\n marker_file_prefix=os.path.join(estimator.model_dir,\n f\"eval_{FLAGS.eval_name}\"),\n minutes_to_sleep=FLAGS.minutes_to_sleep_before_predictions):\n tf.logging.info(\"Running eval: %s\", FLAGS.eval_name)\n try:\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=FLAGS.num_eval_steps,\n name=FLAGS.eval_name,\n checkpoint_path=checkpoint)\n tf.logging.info(\"Eval result:\\n%s\", result)\n except (ValueError, tf.errors.NotFoundError):\n tf.logging.error(\"Error getting predictions for checkpoint %s: %s\",\n checkpoint, traceback.format_exc())\n\n if FLAGS.do_predict:\n predict_input_fn = _get_test_input_fn(\"predict\", FLAGS.input_file_predict)\n if FLAGS.prediction_output_dir:\n prediction_output_dir = FLAGS.prediction_output_dir\n tf.io.gfile.makedirs(prediction_output_dir)\n else:\n prediction_output_dir = estimator.model_dir\n\n marker_file_prefix = os.path.join(prediction_output_dir, \"predict\")\n # When two separate jobs are launched we don't want conflicting markers.\n if predict_input_fn is not None:\n marker_file_prefix += \"_test\"\n if eval_input_fn is not None:\n marker_file_prefix += \"_dev\"\n\n single_step = FLAGS.evaluated_checkpoint_step\n if FLAGS.evaluated_checkpoint_metric:\n single_step = experiment_utils.get_best_step_for_metric(\n estimator.model_dir, FLAGS.evaluated_checkpoint_metric)\n for current_step, checkpoint in experiment_utils.iterate_checkpoints(\n model_dir=estimator.model_dir,\n total_steps=total_steps,\n marker_file_prefix=marker_file_prefix,\n single_step=single_step):\n try:\n if predict_input_fn is not None:\n _predict_and_export_metrics(\n mode=\"predict\",\n input_fn=predict_input_fn,\n checkpoint_path=checkpoint,\n step=current_step,\n estimator=estimator,\n output_dir=prediction_output_dir)\n\n if eval_input_fn is not None:\n _predict_and_export_metrics(\n mode=\"eval\",\n input_fn=eval_input_fn,\n checkpoint_path=checkpoint,\n step=current_step,\n estimator=estimator,\n output_dir=prediction_output_dir)\n except (ValueError, tf.errors.NotFoundError):\n tf.logging.error(\"Error getting predictions for checkpoint %s: %s\",\n checkpoint, traceback.format_exc())\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.io.gfile.makedirs",
"tensorflow.compat.v1.io.gfile.GFile",
"tensorflow.compat.v1.logging.info"
]
] |
zmxdream/Paddle | [
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c"
] | [
"python/paddle/fluid/tests/unittests/mkldnn/test_stack_mkldnn_op.py",
"python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, skip_check_grad_ci\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\n\n\[email protected]_if_not_cpu()\nclass TestStack2DOneDNNOp(OpTest):\n def initDefaultParameters(self):\n self.num_inputs = 4\n self.input_dim = (2, 2)\n self.axis = 1\n self.dtype = np.float32\n\n def initParameters(self):\n pass\n\n def getInputNames(self):\n input_names = []\n for i in range(self.num_inputs):\n input_names.append('x{}'.format(i))\n return input_names\n\n def setUp(self):\n self.initDefaultParameters()\n self.initParameters()\n self.op_type = 'stack'\n self.op_inputs = []\n\n for i in range(self.num_inputs):\n self.op_inputs.append(\n np.random.random(size=self.input_dim).astype(np.float32))\n\n input_list = []\n input_names = self.getInputNames()\n for i in range(self.num_inputs):\n input_list.append((input_names[i], self.op_inputs[i]))\n\n self.inputs = {'X': input_list}\n self.outputs = {'Y': np.stack(self.op_inputs, axis=self.axis)}\n self.attrs = {'axis': self.axis, 'use_mkldnn': True}\n\n def test_check_output(self):\n self.check_output_with_place(core.CPUPlace())\n\n # JUST FOR CI TO PASS, GRAD IS NOT IMPLEMENTED YET\n def test_check_grad(self):\n pass\n\n\nclass TestStack1DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (100)\n self.axis = 0\n\n\nclass TestStack1DAxis1OneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (100)\n self.axis = 1\n\n\nclass TestStack2DAxisLastOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (13, 24)\n self.num_inputs = 5\n self.axis = -1\n\n\nclass TestStack3DAxisNegativeOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (10, 128, 128)\n self.axis = -2\n\n\nclass TestStack3DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (10, 128, 128)\n self.num_inputs = 3\n self.axis = 1\n\n\nclass TestStack4DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (2, 2, 2, 2)\n self.num_inputs = 3\n self.axis = 4\n\n\nclass TestStack5DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (2, 3, 4, 5, 6)\n self.num_inputs = 6\n self.axis = 0\n\n\nif __name__ == \"__main__\":\n paddle.enable_static()\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.fluid as fluid\nimport unittest\nimport numpy as np\nimport os\nimport six\nfrom paddle.fluid.reader import keep_data_loader_order\n\nkeep_data_loader_order(False)\n\n\ndef create_reader(shape, batch_number):\n def __impl__():\n idx = 0\n for _ in six.moves.range(batch_number):\n yield np.ones(shape).astype('float32') * idx,\n idx += 1\n\n return __impl__\n\n\nclass DataLoaderKeepOrderTestBase(unittest.TestCase):\n def initParameters(self):\n self.iterable = False\n self.break_num = 10000\n\n def setUp(self):\n self.epoch_num = 3\n self.batch_num = 40\n self.shape = [3, 4, 5]\n self.initParameters()\n\n def clear_visited(self):\n self.visited = set()\n\n def build_network(self, places):\n input_data = fluid.data(shape=self.shape, dtype='float32', name=\"input\")\n loader = fluid.io.DataLoader.from_generator(\n capacity=16, feed_list=[input_data], iterable=self.iterable)\n\n fc = fluid.layers.fc(input_data, size=10)\n loss = fluid.layers.reduce_mean(fc)\n\n loader.set_batch_generator(\n create_reader(self.shape, self.batch_num),\n places=places if loader.iterable else None)\n\n return input_data, loss, loader\n\n def assertInputData(self, batch_id, input_data, dev_cnt,\n check_visited=True):\n if isinstance(input_data, list):\n self.assertTrue(len(input_data), dev_cnt)\n start_val = dev_cnt * batch_id\n for each_input_dict in input_data:\n input_tensor = np.array(each_input_dict[\"input\"])\n self.assertEqual(self.shape, list(input_tensor.shape))\n\n num = input_tensor.flatten()[0]\n equal = (input_tensor == num).all()\n self.assertTrue(equal)\n if check_visited:\n self.assertTrue(num not in self.visited)\n self.visited.add(num)\n\n start_val += 1\n else:\n self.assertEqual(\n list(input_data.shape),\n [self.shape[0] * dev_cnt] + self.shape[1:])\n start_val = dev_cnt * batch_id\n for idx in six.moves.range(dev_cnt):\n data_part = input_data[idx * self.shape[0]:(idx + 1) *\n self.shape[0], :]\n num = data_part.flatten()[0]\n self.assertTrue((data_part == num).all())\n if check_visited:\n self.assertTrue(num not in self.visited)\n self.visited.add(num)\n\n start_val += 1\n\n def get_places(self):\n place_list = [fluid.cpu_places(1), fluid.cpu_places(4)]\n if fluid.is_compiled_with_cuda():\n if os.name == \"nt\":\n place_list.extend([fluid.cuda_places(0)])\n else:\n place_list.extend(\n [fluid.cuda_places(0), fluid.cuda_places([0, 1])])\n return place_list\n\n def test_main(self):\n for p in self.get_places():\n use_compiled_program_list = [True] if len(p) > 1 else [False, True]\n for use_compiled_program in use_compiled_program_list:\n self.run_main_with_place(p, use_compiled_program)\n\n def run_main_with_place(self, places, use_compiled_program=True):\n with fluid.scope_guard(fluid.Scope()):\n with fluid.program_guard(fluid.Program(), fluid.Program()):\n input_data, loss, loader = self.build_network(places)\n fetch_list = [input_data]\n\n exe = fluid.Executor(places[0])\n exe.run(fluid.default_startup_program())\n\n dev_cnt = len(places)\n if dev_cnt > 1:\n self.assertTrue(use_compiled_program)\n\n main_program = fluid.default_main_program()\n if use_compiled_program:\n main_program = fluid.CompiledProgram(\n main_program).with_data_parallel(\n loss_name=loss.name, places=places)\n\n max_batch_num = min(self.break_num,\n int(self.batch_num / dev_cnt))\n\n if loader.iterable:\n early_break = False\n for epoch_id in six.moves.range(self.epoch_num):\n early_break = False\n self.clear_visited()\n batch_id = 0\n for data in loader():\n if batch_id >= self.break_num:\n early_break = True\n break\n self.assertInputData(\n batch_id, data, dev_cnt, check_visited=False)\n fetch_val, = exe.run(program=main_program,\n feed=data,\n fetch_list=fetch_list)\n self.assertInputData(batch_id, fetch_val, dev_cnt)\n batch_id += 1\n\n if dev_cnt == 1:\n self.assertEqual(batch_id, max_batch_num)\n else:\n self.assertLessEqual(batch_id, max_batch_num)\n\n if early_break:\n loader._reset()\n else:\n for epoch_id in six.moves.range(self.epoch_num):\n batch_id = 0\n self.clear_visited()\n loader.start()\n try:\n while True:\n if batch_id >= self.break_num:\n loader.reset()\n break\n fetch_val, = exe.run(program=main_program,\n fetch_list=fetch_list)\n self.assertInputData(batch_id, fetch_val,\n dev_cnt)\n batch_id += 1\n except fluid.core.EOFException:\n loader.reset()\n\n if dev_cnt == 1:\n self.assertEqual(batch_id, max_batch_num)\n else:\n self.assertLessEqual(batch_id, max_batch_num)\n\n\nclass IterableDataLoaderKeepOrderTest2(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = True\n self.break_num = 10000\n\n\nclass IterableDataLoaderKeepOrderTest3(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = False\n self.break_num = 2\n\n\nclass IterableDataLoaderKeepOrderTest4(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = True\n self.break_num = 2\n\n\nclass IterableDataLoaderKeepOrderTest5(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = False\n self.break_num = 0\n\n\nclass IterableDataLoaderKeepOrderTest6(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = True\n self.break_num = 0\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.stack",
"numpy.random.random"
],
[
"numpy.array",
"numpy.ones"
]
] |
Polas/omim | [
"03558b418b338f506fbf3aa72ddf15187a2005ee"
] | [
"search/search_quality/scoring_model.py"
] | [
"#!/usr/bin/env python3\n\nfrom math import exp, log\nfrom scipy.stats import pearsonr, t\nfrom sklearn import svm\nfrom sklearn.model_selection import GridSearchCV, KFold\nfrom sklearn.utils import resample\nimport argparse\nimport collections\nimport itertools\nimport numpy as np\nimport pandas as pd\nimport random\nimport sys\n\n\nMAX_DISTANCE_METERS = 2e6\nMAX_RANK = 255.0\nMAX_POPULARITY = 255.0\nRELEVANCES = {'Harmful': -3, 'Irrelevant': 0, 'Relevant': 1, 'Vital': 3}\nNAME_SCORES = ['Zero', 'Substring', 'Prefix', 'Full Match']\nSEARCH_TYPES = ['POI', 'Building', 'Street', 'Unclassified', 'Village', 'City', 'State', 'Country']\nFEATURES = ['DistanceToPivot', 'Rank', 'Popularity', 'Rating', 'FalseCats', 'ErrorsMade', 'MatchedFraction',\n 'AllTokensUsed', 'ExactCountryOrCapital'] + NAME_SCORES + SEARCH_TYPES\n\nBOOTSTRAP_ITERATIONS = 10000\n\n\ndef transform_name_score(value, categories_match):\n if categories_match == 1:\n return 'Zero'\n else:\n return value\n\n\ndef normalize_data(data):\n transform_distance = lambda v: min(v, MAX_DISTANCE_METERS) / MAX_DISTANCE_METERS\n\n data['DistanceToPivot'] = data['DistanceToPivot'].apply(transform_distance)\n data['Rank'] = data['Rank'].apply(lambda v: v / MAX_RANK)\n data['Popularity'] = data['Popularity'].apply(lambda v: v / MAX_POPULARITY)\n data['Relevance'] = data['Relevance'].apply(lambda v: RELEVANCES[v])\n\n cats = data['PureCats'].combine(data['FalseCats'], max)\n\n # TODO (@y, @m): do forward/backward/subset selection of features\n # instead of this merging. It would be great to conduct PCA on\n # the features too.\n data['NameScore'] = data['NameScore'].combine(cats, transform_name_score)\n\n # Adds dummy variables to data for NAME_SCORES.\n for ns in NAME_SCORES:\n data[ns] = data['NameScore'].apply(lambda v: int(ns == v))\n\n # Adds dummy variables to data for SEARCH_TYPES.\n\n # We unify BUILDING with POI here, as we don't have enough\n # training data to distinguish between them. Remove following\n # line as soon as the model will be changed or we will have enough\n # training data.\n data['SearchType'] = data['SearchType'].apply(lambda v: v if v != 'Building' else 'POI')\n for st in SEARCH_TYPES:\n data[st] = data['SearchType'].apply(lambda v: int(st == v))\n\n\ndef compute_ndcg(relevances):\n \"\"\"\n Computes NDCG (Normalized Discounted Cumulative Gain) for a given\n array of scores.\n \"\"\"\n\n dcg = sum(r / log(2 + i, 2) for i, r in enumerate(relevances))\n dcg_norm = sum(r / log(2 + i, 2) for i, r in enumerate(sorted(relevances, reverse=True)))\n return dcg / dcg_norm if dcg_norm != 0 else 0\n\n\ndef compute_ndcgs_without_ws(data):\n \"\"\"\n Computes NDCG (Normalized Discounted Cumulative Gain) for a given\n data. Returns an array of ndcg scores in the shape [num groups of\n features].\n \"\"\"\n\n grouped = data.groupby(data['SampleId'], sort=False).groups\n\n ndcgs = []\n for id in grouped:\n indices = grouped[id]\n relevances = np.array(data.ix[indices]['Relevance'])\n ndcgs.append(compute_ndcg(relevances))\n\n return ndcgs\n\n\ndef compute_ndcgs_for_ws(data, ws):\n \"\"\"\n Computes NDCG (Normalized Discounted Cumulative Gain) for a given\n data and an array of coeffs in a linear model. Returns an array of\n ndcg scores in the shape [num groups of features].\n \"\"\"\n\n data_scores = np.array([np.dot(data.ix[i][FEATURES], ws) for i in data.index])\n grouped = data.groupby(data['SampleId'], sort=False).groups\n\n ndcgs = []\n for id in grouped:\n indices = grouped[id]\n\n relevances = np.array(data.ix[indices]['Relevance'])\n scores = data_scores[indices]\n\n # Reoders relevances in accordance with decreasing scores.\n relevances = relevances[scores.argsort()[::-1]]\n ndcgs.append(compute_ndcg(relevances))\n\n return ndcgs\n\n\ndef transform_data(data):\n \"\"\"\n By a given data computes x and y that can be used as an input to a\n linear SVM.\n \"\"\"\n\n grouped = data.groupby(data['SampleId'], sort=False)\n\n xs, ys = [], []\n\n # k is used to create a balanced samples set for better linear\n # separation.\n k = 1\n for _, group in grouped:\n features, relevances = group[FEATURES], group['Relevance']\n\n n, total = len(group), 0\n for _, (i, j) in enumerate(itertools.combinations(range(n), 2)):\n dr = relevances.iloc[j] - relevances.iloc[i]\n y = np.sign(dr)\n if y == 0:\n continue\n\n x = np.array(features.iloc[j]) - np.array(features.iloc[i])\n\n # Need to multiply x by average drop in NDCG when i-th and\n # j-th are exchanged.\n x *= abs(dr * (1 / log(j + 2, 2) - 1 / log(i + 2, 2)))\n\n # This is needed to prevent disbalance in classes sizes.\n if y != k:\n x = np.negative(x)\n y = -y\n\n xs.append(x)\n ys.append(y)\n total += 1\n k = -k\n\n # Scales this group of features to equalize different search\n # queries.\n for i in range(-1, -total, -1):\n xs[i] = xs[i] / total\n return xs, ys\n\n\ndef show_pearson_statistics(xs, ys, features):\n \"\"\"\n Shows info about Pearson coefficient between features and\n relevancy.\n \"\"\"\n\n print('***** Correlation table *****')\n print('H0 - feature not is correlated with relevancy')\n print('H1 - feature is correlated with relevancy')\n print()\n\n cs, ncs = [], []\n for i, f in enumerate(features):\n zs = [x[i] for x in xs]\n (c, p) = pearsonr(zs, ys)\n\n correlated = p < 0.05\n print('{}: pearson={:.3f}, P(H1)={}'.format(f, c, 1 - p))\n if correlated:\n cs.append(f)\n else:\n ncs.append(f)\n\n print()\n print('Correlated:', cs)\n print('Non-correlated:', ncs)\n\n\ndef raw_output(features, ws):\n \"\"\"\n Prints feature-coeff pairs to the standard output.\n \"\"\"\n\n print('{:<20}{}'.format('Feature', 'Value'))\n print()\n for f, w in zip(features, ws):\n print('{:<20}{:.5f}'.format(f, w))\n\n\ndef print_const(name, value):\n print('double constexpr k{} = {:.7f};'.format(name, value))\n\n\ndef print_array(name, size, values):\n print('double constexpr {}[{}] = {{'.format(name, size))\n print(',\\n'.join(' {:.7f} /* {} */'.format(w, f) for (f, w) in values))\n print('};')\n\ndef cpp_output(features, ws):\n \"\"\"\n Prints feature-coeff pairs in the C++-compatible format.\n \"\"\"\n\n ns, st = [], []\n\n for f, w in zip(features, ws):\n if f in NAME_SCORES:\n ns.append((f, w))\n elif f in SEARCH_TYPES:\n st.append((f, w))\n else:\n print_const(f, w)\n print_array('kNameScore', 'NameScore::NAME_SCORE_COUNT', ns)\n print_array('kType', 'Model::TYPE_COUNT', st)\n\n\ndef show_bootstrap_statistics(clf, X, y, features):\n num_features = len(features)\n\n coefs = []\n for i in range(num_features):\n coefs.append([])\n\n for _ in range(BOOTSTRAP_ITERATIONS):\n X_sample, y_sample = resample(X, y)\n clf.fit(X_sample, y_sample)\n for i, c in enumerate(get_normalized_coefs(clf)):\n coefs[i].append(c)\n\n poi_index = features.index('POI')\n building_index = features.index('Building')\n coefs[building_index] = coefs[poi_index]\n\n intervals = []\n\n print()\n print('***** Bootstrap statistics *****')\n print('{:<20}{:<20}{:<10}{:<10}'.format('Feature', '95% interval', 't-value', 'Pr(>|t|)'))\n print()\n for i, cs in enumerate(coefs):\n values = np.array(cs)\n lo = np.percentile(values, 2.5)\n hi = np.percentile(values, 97.5)\n interval = '({:.3f}, {:.3f})'.format(lo, hi)\n tv = np.mean(values) / np.std(values)\n pr = (1.0 - t.cdf(x=abs(tv), df=len(values))) * 0.5\n\n stv = '{:.3f}'.format(tv)\n spr = '{:.3f}'.format(pr)\n print('{:<20}{:<20}{:<10}{:<10}'.format(features[i], interval, stv, spr))\n\n\ndef get_normalized_coefs(clf):\n ws = clf.coef_[0]\n max_w = max(abs(w) for w in ws)\n return np.divide(ws, max_w)\n\n\ndef main(args):\n data = pd.read_csv(sys.stdin)\n\n # Drop categorial requests cause we use different ranking model for them.\n data.drop(data[data['IsCategorialRequest'] == 1].index, inplace=True)\n data.reset_index(inplace=True, drop=True)\n data.drop(columns=['IsCategorialRequest', 'HasName'], inplace=True)\n\n normalize_data(data)\n\n ndcgs = compute_ndcgs_without_ws(data);\n print('Current NDCG: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))\n print()\n\n xs, ys = transform_data(data)\n\n clf = svm.LinearSVC(random_state=args.seed)\n cv = KFold(n_splits=5, shuffle=True, random_state=args.seed)\n\n # \"C\" stands for the regularizer constant.\n grid = {'C': np.power(10.0, np.arange(-5, 6))}\n gs = GridSearchCV(clf, grid, scoring='roc_auc', cv=cv)\n gs.fit(xs, ys)\n\n print('Best params: {}'.format(gs.best_params_))\n\n ws = get_normalized_coefs(gs.best_estimator_)\n\n # Following code restores coeffs for merged features.\n ws[FEATURES.index('Building')] = ws[FEATURES.index('POI')]\n\n ndcgs = compute_ndcgs_for_ws(data, ws)\n\n print('NDCG mean: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))\n print('ROC AUC: {:.3f}'.format(gs.best_score_))\n\n if args.pearson:\n print()\n show_pearson_statistics(xs, ys, FEATURES)\n\n print()\n print('***** Linear model weights *****')\n if args.cpp:\n cpp_output(FEATURES, ws)\n else:\n raw_output(FEATURES, ws)\n\n if args.bootstrap:\n show_bootstrap_statistics(clf, xs, ys, FEATURES)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', help='random seed', type=int)\n parser.add_argument('--pearson', help='show pearson statistics', action='store_true')\n parser.add_argument('--cpp', help='generate output in the C++ format', action='store_true')\n parser.add_argument('--bootstrap', help='show bootstrap confidence intervals', action='store_true')\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.divide",
"scipy.stats.pearsonr",
"numpy.sign",
"numpy.mean",
"pandas.read_csv",
"sklearn.svm.LinearSVC",
"numpy.arange",
"sklearn.model_selection.GridSearchCV",
"numpy.negative",
"sklearn.model_selection.KFold",
"numpy.array",
"numpy.std",
"numpy.dot",
"numpy.percentile",
"sklearn.utils.resample"
]
] |
meramossepu1/groundmotion-processing | [
"5cc19023b94e5b5b718590ce8cd05a22a4088a67",
"5cc19023b94e5b5b718590ce8cd05a22a4088a67"
] | [
"tests/gmprocess/metrics/imt/fas_arithmetic_mean_test.py",
"gmprocess/core/stationtrace.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# stdlib imports\nimport os.path\nimport re\n\n# third party imports\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\n\n# Local imports\nfrom gmprocess.metrics.station_summary import StationSummary\nfrom gmprocess.core.stationstream import StationStream\nfrom gmprocess.core.stationtrace import StationTrace\n\n\ndef test_fas():\n \"\"\"\n Testing based upon the work provided in\n https://github.com/arkottke/notebooks/blob/master/effective_amp_spectrum.ipynb\n \"\"\"\n ddir = os.path.join(\"data\", \"testdata\")\n datadir = pkg_resources.resource_filename(\"gmprocess\", ddir)\n\n \"\"\"\n Note: the testing data in the fas_*_.pkl files now uses the convention\n of using the next power of 2 for the number of FFT points. The original\n files based on thee Jupyter notebook above just used the length of the\n traces for the FFT.\n \"\"\"\n\n fas_file = os.path.join(datadir, \"fas_arithmetic_mean.pkl\")\n p1 = os.path.join(datadir, \"peer\", \"RSN763_LOMAP_GIL067.AT2\")\n p2 = os.path.join(datadir, \"peer\", \"RSN763_LOMAP_GIL337.AT2\")\n\n stream = StationStream([])\n for idx, fpath in enumerate([p1, p2]):\n with open(fpath, encoding=\"utf-8\") as file_obj:\n for _ in range(3):\n next(file_obj)\n meta = re.findall(r\"[.0-9]+\", next(file_obj))\n dt = float(meta[1])\n accels = np.array(\n [col for line in file_obj for col in line.split()], dtype=float\n )\n trace = StationTrace(\n data=accels,\n header={\n \"channel\": \"H\" + str(idx),\n \"delta\": dt,\n \"units\": \"acc\",\n \"standard\": {\n \"corner_frequency\": np.nan,\n \"station_name\": \"\",\n \"source\": \"json\",\n \"instrument\": \"\",\n \"instrument_period\": np.nan,\n \"source_format\": \"json\",\n \"comments\": \"\",\n \"structure_type\": \"\",\n \"sensor_serial_number\": \"\",\n \"source_file\": \"\",\n \"process_level\": \"raw counts\",\n \"process_time\": \"\",\n \"horizontal_orientation\": np.nan,\n \"vertical_orientation\": np.nan,\n \"units\": \"acc\",\n \"units_type\": \"acc\",\n \"instrument_sensitivity\": np.nan,\n \"instrument_damping\": np.nan,\n },\n },\n )\n stream.append(trace)\n\n for tr in stream:\n response = {\"input_units\": \"counts\", \"output_units\": \"cm/s^2\"}\n tr.setProvenance(\"remove_response\", response)\n\n target_df = pd.read_pickle(fas_file)\n ind_vals = target_df.index.values\n per = np.unique([float(i[0].split(\")\")[0].split(\"(\")[1]) for i in ind_vals])\n freqs = 1 / per\n imts = [\"fas\" + str(p) for p in per]\n summary = StationSummary.from_stream(\n stream, [\"arithmetic_mean\"], imts, bandwidth=30\n )\n\n pgms = summary.pgms\n # pgms.to_pickle(fas_file)\n for idx, f in enumerate(freqs):\n fstr = \"FAS(%.3f)\" % (1 / f)\n fval1 = pgms.loc[fstr, \"ARITHMETIC_MEAN\"].Result\n fval2 = target_df.loc[fstr, \"ARITHMETIC_MEAN\"].Result\n np.testing.assert_allclose(fval1, fval2, rtol=1e-5, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n test_fas()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# stdlib imports\nimport json\nimport copy\nimport logging\nfrom datetime import datetime\nimport getpass\nimport re\nimport inspect\n\n# third party imports\nimport numpy as np\nfrom obspy.core.trace import Trace\nimport prov\nimport prov.model\nfrom obspy.core.utcdatetime import UTCDateTime\nimport pandas as pd\n\n# local imports\nfrom gmprocess.utils.config import get_config\nfrom gmprocess.io.seedname import get_units_type\n\nUNITS = {\"acc\": \"cm/s^2\", \"vel\": \"cm/s\"}\nREVERSE_UNITS = {\"cm/s^2\": \"acc\", \"cm/s\": \"vel\"}\n\nPROCESS_LEVELS = {\n \"V0\": \"raw counts\",\n \"V1\": \"uncorrected physical units\",\n \"V2\": \"corrected physical units\",\n \"V3\": \"derived time series\",\n}\n\nREV_PROCESS_LEVELS = {\n \"raw counts\": \"V0\",\n \"uncorrected physical units\": \"V1\",\n \"corrected physical units\": \"V2\",\n \"derived time series\": \"V3\",\n}\n\nLENGTH_CONVERSIONS = {\"nm\": 1e9, \"um\": 1e6, \"mm\": 1e3, \"cm\": 1e2, \"m\": 1}\n\n# when checking to see if a channel is vertical,\n# 90 - abs(dip) must be less than or equal to this value\n# (i.e., dip must ne close to )\nMAX_DIP_OFFSET = 0.1\n\n# NOTE: if required is True then this means that the value must be\n# filled in with a value that does NOT match the default.\nSTANDARD_KEYS = {\n \"source_file\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"source\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"horizontal_orientation\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"vertical_orientation\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"station_name\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"instrument_period\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"instrument_damping\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"process_time\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"process_level\": {\n \"type\": str,\n \"required\": True,\n \"default\": list(PROCESS_LEVELS.values()),\n },\n \"sensor_serial_number\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"instrument\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"structure_type\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"corner_frequency\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"units\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"units_type\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"source_format\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"instrument_sensitivity\": {\n \"type\": float,\n \"required\": False,\n \"default\": np.nan,\n },\n \"comments\": {\"type\": str, \"required\": False, \"default\": \"\"},\n}\n\nINT_TYPES = [\n np.dtype(\"int8\"),\n np.dtype(\"int16\"),\n np.dtype(\"int32\"),\n np.dtype(\"int64\"),\n np.dtype(\"uint8\"),\n np.dtype(\"uint16\"),\n np.dtype(\"uint32\"),\n np.dtype(\"uint64\"),\n]\n\nFLOAT_TYPES = [np.dtype(\"float32\"), np.dtype(\"float64\")]\n\nTIMEFMT = \"%Y-%m-%dT%H:%M:%SZ\"\nTIMEFMT_MS = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nNS_PREFIX = \"seis_prov\"\nNS_SEIS = (NS_PREFIX, \"http://seisprov.org/seis_prov/0.1/#\")\n\nMAX_ID_LEN = 12\n\nPROV_TIME_FMT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nACTIVITIES = {\n \"waveform_simulation\": {\"code\": \"ws\", \"label\": \"Waveform Simulation\"},\n \"taper\": {\"code\": \"tp\", \"label\": \"Taper\"},\n \"stack_cross_correlations\": {\"code\": \"sc\", \"label\": \"Stack Cross Correlations\"},\n \"simulate_response\": {\"code\": \"sr\", \"label\": \"Simulate Response\"},\n \"rotate\": {\"code\": \"rt\", \"label\": \"Rotate\"},\n \"resample\": {\"code\": \"rs\", \"label\": \"Resample\"},\n \"remove_response\": {\"code\": \"rr\", \"label\": \"Remove Response\"},\n \"pad\": {\"code\": \"pd\", \"label\": \"Pad\"},\n \"normalize\": {\"code\": \"nm\", \"label\": \"Normalize\"},\n \"multiply\": {\"code\": \"nm\", \"label\": \"Multiply\"},\n \"merge\": {\"code\": \"mg\", \"label\": \"Merge\"},\n \"lowpass_filter\": {\"code\": \"lp\", \"label\": \"Lowpass Filter\"},\n \"interpolate\": {\"code\": \"ip\", \"label\": \"Interpolate\"},\n \"integrate\": {\"code\": \"ig\", \"label\": \"Integrate\"},\n \"highpass_filter\": {\"code\": \"hp\", \"label\": \"Highpass Filter\"},\n \"divide\": {\"code\": \"dv\", \"label\": \"Divide\"},\n \"differentiate\": {\"code\": \"df\", \"label\": \"Differentiate\"},\n \"detrend\": {\"code\": \"dt\", \"label\": \"Detrend\"},\n \"decimate\": {\"code\": \"dc\", \"label\": \"Decimate\"},\n \"cut\": {\"code\": \"ct\", \"label\": \"Cut\"},\n \"cross_correlate\": {\"code\": \"co\", \"label\": \"Cross Correlate\"},\n \"calculate_adjoint_source\": {\"code\": \"ca\", \"label\": \"Calculate Adjoint Source\"},\n \"bandstop_filter\": {\"code\": \"bs\", \"label\": \"Bandstop Filter\"},\n \"bandpass_filter\": {\"code\": \"bp\", \"label\": \"Bandpass Filter\"},\n}\n\n\nclass StationTrace(Trace):\n \"\"\"Subclass of Obspy Trace object which holds more metadata.\n\n ObsPy provides a Trace object that serves as a container for waveform data\n from a single channel, as well as some basic metadata about the waveform\n start/end times, number of points, sampling rate/interval, and\n network/station/channel/location information.\n\n gmprocess subclasses the Trace object with a StationTrace object, which\n provides the following additional features:\n\n - Validation that length of data matches the number of points in the\n metadata.\n - Validation that required values are set in metadata.\n - A `fail` method which can be used by processing routines to mark when\n processing of the StationTrace has failed some sort of check (signal\n to noise ratio, etc.)\n - A `free_field` property which can be used to query the object to\n ensure that its data comes from a free-field sensor. Note: this is\n not always known reliably, and different people have have different\n definitions of the term free_field. When possible, we define a\n mapping between location code and the free_field property. For\n example, see the LOCATION_CODES variable core.py in\n `gmprocess.io.fdsn`.\n - Methods (e.g., `getProvenance`, `setProvenance`) for tracking\n processing steps that have been performed. These are aligned with the\n SEIS-PROV standard for processing provenance, described here:\n http://seismicdata.github.io/SEIS-PROV/_generated_details.html#activities\n - Methods (e.g., `getParameter` and `setParameter`) for tracking of\n arbitrary metadata in the form of a dictionary as trace property\n (self.parameters).\n \"\"\"\n\n def __init__(self, data=np.array([]), header=None, inventory=None, config=None):\n \"\"\"Construct a StationTrace instance.\n\n Args:\n data (ndarray):\n numpy array of points.\n header (dict-like):\n Dictionary of metadata (see trace.stats docs).\n inventory (Inventory):\n Obspy Inventory object.\n config (dict):\n Dictionary containing configuration.\n If None, retrieve global config.\n \"\"\"\n prov_response = None\n if config is None:\n config = get_config()\n if inventory is None and header is None:\n raise ValueError(\n \"Cannot create StationTrace without header info or Inventory\"\n )\n elif inventory is not None and header is not None:\n # End up here if the format was read in with ObsPy and an\n # inventory was able to be constructed (e.g., miniseed+StationXML)\n try:\n seed_id = \"%s.%s.%s.%s\" % (\n header[\"network\"],\n header[\"station\"],\n header[\"location\"],\n header[\"channel\"],\n )\n start_time = header[\"starttime\"]\n (response, standard, coords, format_specific) = _stats_from_inventory(\n data, inventory, seed_id, start_time\n )\n header[\"response\"] = response\n header[\"coordinates\"] = coords\n header[\"standard\"] = standard\n header[\"format_specific\"] = format_specific\n except BaseException as e:\n raise ValueError(\n \"Failed to construct required metadata from inventory \"\n \"and input header data with exception: %s\" % e\n )\n elif inventory is None and header is not None and \"standard\" not in header:\n # End up here for ObsPy without an inventory (e.g., SAC).\n # This assumes that all of our readers include the \"standard\" key\n # in the header and that ObsPy one's do not.\n\n # NOTE: we are assuming that an ObsPy file that does NOT have an\n # inventory has been converted to cm/s^2 via the configurable\n # conversion factor in the config file.\n prov_response = {\"input_units\": \"counts\", \"output_units\": \"cm/s^2\"}\n try:\n (response, standard, coords, format_specific) = _stats_from_header(\n header, config\n )\n header[\"response\"] = response\n header[\"coordinates\"] = coords\n header[\"standard\"] = standard\n header[\"format_specific\"] = format_specific\n except BaseException:\n raise ValueError(\n \"Failed to construct required metadata from header data.\"\n )\n\n # Sometimes the channel names do not indicate which one is the\n # Z channel. If we have vertical_orientation information, then\n # let's get that and change the vertical channel to end in Z.\n # NOTE: `vertical_orientation` here is defined as the angle\n # from horizontal (aka, dip), not inclination.\n if not np.isnan(header[\"standard\"][\"vertical_orientation\"]):\n delta = np.abs(np.abs(header[\"standard\"][\"vertical_orientation\"]) - 90.0)\n is_z = header[\"channel\"].endswith(\"Z\")\n if delta < MAX_DIP_OFFSET and not is_z:\n header[\"channel\"] = header[\"channel\"][0:-1] + \"Z\"\n\n # Apply conversion factor if one was specified for this format\n if (\n \"format_specific\" in header\n and \"conversion_factor\" in header[\"format_specific\"]\n ):\n data *= header[\"format_specific\"][\"conversion_factor\"]\n\n super(StationTrace, self).__init__(data=data, header=header)\n self.provenance = []\n if prov_response is not None:\n self.setProvenance(\"remove_response\", prov_response)\n self.parameters = {}\n self.cached = {}\n self.validate()\n\n @property\n def free_field(self):\n \"\"\"Is this station a free-field station?\n\n Returns:\n bool: True if a free-field sensor, False if not.\n \"\"\"\n stype = self.stats.standard[\"structure_type\"]\n non_free = [\n \"building\",\n \"bridge\",\n \"dam\",\n \"borehole\",\n \"hole\",\n \"crest\",\n \"toe\",\n \"foundation\",\n \"body\",\n \"roof\",\n \"floor\",\n ]\n for ftype in non_free:\n if re.search(ftype, stype.lower()) is not None:\n return False\n\n return True\n\n def fail(self, reason):\n \"\"\"Note that a check on this StationTrace failed for a given reason.\n\n This method will set the parameter \"failure\", and store the reason\n provided, plus the name of the calling function.\n\n Args:\n reason (str):\n Reason given for failure.\n\n \"\"\"\n istack = inspect.stack()\n calling_module = istack[1][3]\n self.setParameter(\"failure\", {\"module\": calling_module, \"reason\": reason})\n trace_id = \"%s\" % self.id\n logging.info(\"%s - %s - %s\" % (calling_module, trace_id, reason))\n\n def validate(self):\n \"\"\"Ensure that all required metadata fields have been set.\n\n Raises:\n KeyError:\n - When standard dictionary is missing required fields\n - When standard values are of the wrong type\n - When required values are set to a default.\n ValueError:\n - When number of points in header does not match data length.\n \"\"\"\n # here's something we thought obspy would do...\n # verify that npts matches length of data\n if self.stats.npts != len(self.data):\n raise ValueError(\n \"Number of points in header does not match the number of \"\n \"points in the data.\"\n )\n\n if \"remove_response\" not in self.getProvenanceKeys():\n self.stats.standard.units = \"raw counts\"\n else:\n self.stats.standard.units = REVERSE_UNITS[\n self.getProvenance(\"remove_response\")[0][\"output_units\"]\n ]\n\n # are all of the defined standard keys in the standard dictionary?\n req_keys = set(STANDARD_KEYS.keys())\n std_keys = set(list(self.stats.standard.keys()))\n if not req_keys <= std_keys:\n missing = str(req_keys - std_keys)\n raise KeyError(\n 'Missing standard values in StationTrace header: \"%s\"' % missing\n )\n type_errors = []\n required_errors = []\n for key in req_keys:\n keydict = STANDARD_KEYS[key]\n value = self.stats.standard[key]\n required = keydict[\"required\"]\n vtype = keydict[\"type\"]\n default = keydict[\"default\"]\n if not isinstance(value, vtype):\n type_errors.append(key)\n if required:\n if isinstance(default, list):\n if value not in default:\n required_errors.append(key)\n if value == default:\n required_errors.append(key)\n\n type_error_msg = \"\"\n if len(type_errors):\n fmt = 'The following standard keys have the wrong type: \"%s\"'\n tpl = \",\".join(type_errors)\n type_error_msg = fmt % tpl\n\n required_error_msg = \"\"\n if len(required_errors):\n fmt = 'The following standard keys are required: \"%s\"'\n tpl = \",\".join(required_errors)\n required_error_msg = fmt % tpl\n\n error_msg = type_error_msg + \"\\n\" + required_error_msg\n if len(error_msg.strip()):\n raise KeyError(error_msg)\n\n def getProvenanceKeys(self):\n \"\"\"Get a list of all available provenance keys.\n\n Returns:\n list: List of available provenance keys.\n \"\"\"\n if not len(self.provenance):\n return []\n pkeys = []\n for provdict in self.provenance:\n pkeys.append(provdict[\"prov_id\"])\n return pkeys\n\n def getProvenance(self, prov_id):\n \"\"\"Get seis-prov compatible attributes whose id matches prov_id.\n\n See http://seismicdata.github.io/SEIS-PROV/_generated_details.html\n\n Args:\n prov_id (str):\n Provenance ID (see URL above).\n\n Returns:\n list: Sequence of prov_attribute dictionaries (see URL above).\n \"\"\"\n matching_prov = []\n if not len(self.provenance):\n return matching_prov\n for provdict in self.provenance:\n if provdict[\"prov_id\"] == prov_id:\n matching_prov.append(provdict[\"prov_attributes\"])\n return matching_prov\n\n def setProvenance(self, prov_id, prov_attributes):\n \"\"\"Update a trace's provenance information.\n\n Args:\n trace (obspy.core.trace.Trace):\n Trace of strong motion dataself.\n prov_id (str):\n Activity prov:id (see URL above).\n prov_attributes (dict or list):\n Activity attributes for the given key.\n \"\"\"\n provdict = {\"prov_id\": prov_id, \"prov_attributes\": prov_attributes}\n self.provenance.append(provdict)\n self.validate()\n\n def getAllProvenance(self):\n \"\"\"Get internal list of processing history.\n\n Returns:\n list:\n Sequence of dictionaries containing fields:\n - prov_id Activity prov:id (see URL above).\n - prov_attributes Activity attributes for the given key.\n \"\"\"\n return self.provenance\n\n def getProvenanceDocument(self, base_prov=None):\n \"\"\"Generate a provenance document.\n\n Args:\n base_prov:\n Base provenance document.\n\n Returns:\n Provenance document.\n \"\"\"\n if base_prov is None:\n pr = prov.model.ProvDocument()\n pr.add_namespace(*NS_SEIS)\n pr = _get_person_agent(pr)\n pr = _get_software_agent(pr)\n pr = _get_waveform_entity(self, pr)\n else:\n pr = _get_waveform_entity(self, copy.deepcopy(base_prov))\n sequence = 1\n for provdict in self.getAllProvenance():\n provid = provdict[\"prov_id\"]\n prov_attributes = provdict[\"prov_attributes\"]\n if provid not in ACTIVITIES:\n fmt = \"Unknown or invalid processing parameter %s\"\n logging.debug(fmt % provid)\n continue\n pr = _get_activity(pr, provid, prov_attributes, sequence)\n sequence += 1\n return pr\n\n def setProvenanceDocument(self, provdoc):\n software = {}\n person = {}\n for record in provdoc.get_records():\n ident = record.identifier.localpart\n parts = ident.split(\"_\")\n sptype = parts[1]\n # hashid = '_'.join(parts[2:])\n # sp, sptype, hashid = ident.split('_')\n if sptype == \"sa\":\n for attr_key, attr_val in record.attributes:\n key = attr_key.localpart\n if isinstance(attr_val, prov.identifier.Identifier):\n attr_val = attr_val.uri\n software[key] = attr_val\n elif sptype == \"pp\":\n for attr_key, attr_val in record.attributes:\n key = attr_key.localpart\n if isinstance(attr_val, prov.identifier.Identifier):\n attr_val = attr_val.uri\n person[key] = attr_val\n elif sptype == \"wf\": # waveform tag\n continue\n else: # these are processing steps\n params = {}\n sptype = \"\"\n for attr_key, attr_val in record.attributes:\n key = attr_key.localpart\n if key == \"label\":\n continue\n elif key == \"type\":\n _, sptype = attr_val.split(\":\")\n continue\n if isinstance(attr_val, datetime):\n attr_val = UTCDateTime(attr_val)\n params[key] = attr_val\n self.setProvenance(sptype, params)\n self.setParameter(\"software\", software)\n self.setParameter(\"user\", person)\n\n def hasParameter(self, param_id):\n \"\"\"Check to see if Trace contains a given parameter.\n\n Args:\n param_id (str): Name of parameter to check.\n\n Returns:\n bool: True if parameter is set, False if not.\n \"\"\"\n return param_id in self.parameters\n\n def setParameter(self, param_id, param_attributes):\n \"\"\"Add to the StationTrace's set of arbitrary metadata.\n\n Args:\n param_id (str):\n Key for parameters dictionary.\n param_attributes (dict or list):\n Parameters for the given key.\n \"\"\"\n self.parameters[param_id] = param_attributes\n\n def setCached(self, name, array_dict):\n \"\"\"Store a dictionary of arrays in StationTrace.\n\n Args:\n name (str):\n Name of data dictionary to be stored.\n array_dict (dict):\n Dictionary with:\n - key array name\n - value as numpy array\n \"\"\"\n self.cached[name] = array_dict\n\n def getCached(self, name):\n \"\"\"Retrieve a dictionary of arrays.\n\n Args:\n name (str):\n Name of dictionary to retrieve.\n Returns:\n dict: Dictionary of arrays (see setSpectrum).\n \"\"\"\n if name not in self.cached:\n raise KeyError(\"%s not in set of spectra arrays.\" % name)\n return self.cached[name]\n\n def hasCached(self, name):\n \"\"\"Check if StationTrace has cached attribute.\"\"\"\n if name not in self.cached:\n return False\n return True\n\n def getCachedNames(self):\n \"\"\"Return list of arrays that have been cached.\n\n Returns:\n list: List of cached arrays in this StationTrace.\n \"\"\"\n return list(self.cached.keys())\n\n def getParameterKeys(self):\n \"\"\"Get a list of all available parameter keys.\n\n Returns:\n list: List of available parameter keys.\n \"\"\"\n return list(self.parameters.keys())\n\n def getParameter(self, param_id):\n \"\"\"Retrieve some arbitrary metadata.\n\n Args:\n param_id (str):\n Key for parameters dictionary.\n\n Returns:\n dict or list:\n Parameters for the given key.\n \"\"\"\n if param_id not in self.parameters:\n raise KeyError(\"Parameter %s not found in StationTrace\" % param_id)\n return self.parameters[param_id]\n\n def getProvDataFrame(self):\n columns = [\"Process Step\", \"Process Attribute\", \"Process Value\"]\n df = pd.DataFrame(columns=columns)\n values = []\n attributes = []\n steps = []\n indices = []\n index = 0\n for activity in self.getAllProvenance():\n provid = activity[\"prov_id\"]\n provstep = ACTIVITIES[provid][\"label\"]\n prov_attrs = activity[\"prov_attributes\"]\n steps += [provstep] * len(prov_attrs)\n indices += [index] * len(prov_attrs)\n for key, value in prov_attrs.items():\n attributes.append(key)\n if isinstance(value, UTCDateTime):\n value = value.datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n values.append(str(value))\n index += 1\n\n mdict = {\n \"Index\": indices,\n \"Process Step\": steps,\n \"Process Attribute\": attributes,\n \"Process Value\": values,\n }\n df = pd.DataFrame(mdict)\n return df\n\n def getProvSeries(self):\n \"\"\"Return a pandas Series containing the processing history for the\n trace.\n\n BO.NGNH31.HN2 Remove Response input_units counts\n - output_units cm/s^2\n - Taper side both\n - window_type Hann\n - taper_width 0.05\n\n Returns:\n Series:\n Pandas Series (see above).\n \"\"\"\n tpl = (self.stats.network, self.stats.station, self.stats.channel)\n recstr = \"%s.%s.%s\" % tpl\n values = []\n attributes = []\n steps = []\n for activity in self.getAllProvenance():\n provid = activity[\"prov_id\"]\n provstep = ACTIVITIES[provid][\"label\"]\n prov_attrs = activity[\"prov_attributes\"]\n steps += [provstep] * len(prov_attrs)\n for key, value in prov_attrs.items():\n attributes.append(key)\n values.append(str(value))\n records = [recstr] * len(attributes)\n index = [records, steps, attributes]\n row = pd.Series(values, index=index)\n return row\n\n def __str__(self, id_length=None, indent=0):\n \"\"\"\n Extends Trace __str__.\n \"\"\"\n # set fixed id width\n\n if id_length:\n out = \"%%-%ds\" % (id_length)\n trace_id = out % self.id\n else:\n trace_id = \"%s\" % self.id\n out = \"\"\n # output depending on delta or sampling rate bigger than one\n if self.stats.sampling_rate < 0.1:\n if hasattr(self.stats, \"preview\") and self.stats.preview:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \"\n + \"%(delta).1f s, %(npts)d samples [preview]\"\n )\n else:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \" + \"%(delta).1f s, %(npts)d samples\"\n )\n else:\n if hasattr(self.stats, \"preview\") and self.stats.preview:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \"\n + \"%(sampling_rate).1f Hz, %(npts)d samples [preview]\"\n )\n else:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \"\n + \"%(sampling_rate).1f Hz, %(npts)d samples\"\n )\n # check for masked array\n if np.ma.count_masked(self.data):\n out += \" (masked)\"\n if self.hasParameter(\"failure\"):\n out += \" (failed)\"\n else:\n out += \" (passed)\"\n ind_str = \" \" * indent\n return ind_str + trace_id + out % (self.stats)\n\n\ndef _stats_from_inventory(data, inventory, seed_id, start_time):\n if len(inventory.source):\n if inventory.sender is not None and inventory.sender != inventory.source:\n source = \"%s,%s\" % (inventory.source, inventory.sender)\n else:\n source = inventory.source\n\n network_code, station_code, location_code, channel_code = seed_id.split(\".\")\n\n selected_inventory = inventory.select(\n network=network_code,\n station=station_code,\n location=location_code,\n channel=channel_code,\n time=start_time,\n )\n\n station = selected_inventory.networks[0].stations[0]\n channel = station.channels[0]\n\n coords = {\n \"latitude\": channel.latitude,\n \"longitude\": channel.longitude,\n \"elevation\": channel.elevation,\n }\n\n standard = {}\n\n # things we'll never get from an inventory object\n standard[\"corner_frequency\"] = np.nan\n standard[\"instrument_damping\"] = np.nan\n standard[\"instrument_period\"] = np.nan\n standard[\"structure_type\"] = \"\"\n standard[\"process_time\"] = \"\"\n\n if data.dtype in INT_TYPES:\n standard[\"process_level\"] = \"raw counts\"\n else:\n standard[\"process_level\"] = \"uncorrected physical units\"\n\n standard[\"source\"] = source\n standard[\"source_file\"] = \"\"\n standard[\"instrument\"] = \"\"\n standard[\"sensor_serial_number\"] = \"\"\n if channel.sensor is not None:\n standard[\"instrument\"] = \"%s %s %s %s\" % (\n channel.sensor.type,\n channel.sensor.manufacturer,\n channel.sensor.model,\n channel.sensor.description,\n )\n if channel.sensor.serial_number is not None:\n standard[\"sensor_serial_number\"] = channel.sensor.serial_number\n else:\n standard[\"sensor_serial_number\"] = \"\"\n\n if channel.azimuth is not None:\n standard[\"horizontal_orientation\"] = channel.azimuth\n else:\n standard[\"horizontal_orientation\"] = np.nan\n\n if channel.dip is not None:\n # Note: vertical orientatin is defined here as angle from horizontal\n standard[\"vertical_orientation\"] = channel.dip\n else:\n standard[\"vertical_orientation\"] = np.nan\n\n standard[\"units_type\"] = get_units_type(channel_code)\n\n if len(channel.comments):\n comments = \" \".join(\n channel.comments[i].value for i in range(len(channel.comments))\n )\n standard[\"comments\"] = comments\n else:\n standard[\"comments\"] = \"\"\n standard[\"station_name\"] = \"\"\n if station.site.name != \"None\":\n standard[\"station_name\"] = station.site.name\n # extract the remaining standard info and format_specific info\n # from a JSON string in the station description.\n\n format_specific = {}\n if station.description is not None and station.description != \"None\":\n jsonstr = station.description\n try:\n big_dict = json.loads(jsonstr)\n standard.update(big_dict[\"standard\"])\n format_specific = big_dict[\"format_specific\"]\n except json.decoder.JSONDecodeError:\n format_specific[\"description\"] = jsonstr\n\n if \"source_format\" not in standard or standard[\"source_format\"] is None:\n standard[\"source_format\"] = \"fdsn\"\n\n standard[\"instrument_sensitivity\"] = np.nan\n response = None\n if channel.response is not None:\n response = channel.response\n if hasattr(response, \"instrument_sensitivity\"):\n units = response.instrument_sensitivity.input_units\n if \"/\" in units:\n num, denom = units.split(\"/\")\n if num.lower() not in LENGTH_CONVERSIONS:\n raise KeyError(\n \"Sensitivity input units of %s are not supported.\" % units\n )\n conversion = LENGTH_CONVERSIONS[num.lower()]\n sensitivity = response.instrument_sensitivity.value * conversion\n response.instrument_sensitivity.value = sensitivity\n standard[\"instrument_sensitivity\"] = sensitivity\n else:\n standard[\n \"instrument_sensitivity\"\n ] = response.instrument_sensitivity.value\n\n return (response, standard, coords, format_specific)\n\n\ndef _stats_from_header(header, config):\n if \"_format\" in header and header._format.lower() == \"sac\":\n # The plan is to add separate if blocks to support the different\n # formats as we encounter them here. See the SAC header documentation\n # here:\n # http://ds.iris.edu/files/sac-manual/manual/file_format.html\n\n # Todo: add support for SAC with PZ file.\n\n coords = {\n \"latitude\": header[\"sac\"][\"stla\"],\n \"longitude\": header[\"sac\"][\"stlo\"],\n \"elevation\": header[\"sac\"][\"stel\"],\n }\n standard = {}\n standard[\"corner_frequency\"] = np.nan\n standard[\"instrument_damping\"] = np.nan\n standard[\"instrument_period\"] = np.nan\n standard[\"structure_type\"] = \"\"\n standard[\"process_time\"] = \"\"\n standard[\"process_level\"] = \"uncorrected physical units\"\n standard[\"source\"] = config[\"read\"][\"sac_source\"]\n standard[\"source_file\"] = \"\"\n standard[\"instrument\"] = \"\"\n standard[\"sensor_serial_number\"] = \"\"\n standard[\"instrument\"] = \"\"\n standard[\"sensor_serial_number\"] = \"\"\n standard[\"horizontal_orientation\"] = float(header[\"sac\"][\"cmpaz\"])\n # Note: vertical orientatin is defined here as angle from horizontal\n standard[\"vertical_orientation\"] = 90.0 - float(header[\"sac\"][\"cmpinc\"])\n utype = get_units_type(header[\"channel\"])\n standard[\"units_type\"] = utype\n standard[\"units\"] = UNITS[utype]\n standard[\"comments\"] = \"\"\n standard[\"station_name\"] = \"\"\n standard[\"station_name\"] = header[\"station\"]\n format_specific = {\n \"conversion_factor\": float(config[\"read\"][\"sac_conversion_factor\"])\n }\n standard[\"source_format\"] = header._format\n standard[\"instrument_sensitivity\"] = np.nan\n response = None\n else:\n raise Exception(\"Format unsuppored without StationXML file.\")\n\n return (response, standard, coords, format_specific)\n\n\ndef _get_software_agent(pr, gmprocess_version):\n \"\"\"Get the seis-prov entity for the gmprocess software.\n\n Args:\n pr (prov.model.ProvDocument):\n Existing ProvDocument.\n gmprocess_version (str):\n gmprocess version.\n\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with gmprocess software name/version.\n \"\"\"\n software = \"gmprocess\"\n hashstr = \"0000001\"\n agent_id = \"seis_prov:sp001_sa_%s\" % hashstr\n giturl = \"https://github.com/usgs/groundmotion-processing\"\n pr.agent(\n agent_id,\n other_attributes=(\n (\n (\"prov:label\", software),\n (\n \"prov:type\",\n prov.identifier.QualifiedName(prov.constants.PROV, \"SoftwareAgent\"),\n ),\n (\"seis_prov:software_name\", software),\n (\"seis_prov:software_version\", gmprocess_version),\n (\n \"seis_prov:website\",\n prov.model.Literal(giturl, prov.constants.XSD_ANYURI),\n ),\n )\n ),\n )\n return pr\n\n\ndef _get_person_agent(pr, config=None):\n \"\"\"Get the seis-prov entity for the user software.\n\n Args:\n pr (prov.model.ProvDocument):\n Existing ProvDocument.\n config (dict):\n Configuration options.\n\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with gmprocess software name/version.\n \"\"\"\n username = getpass.getuser()\n if config is None:\n config = get_config()\n fullname = \"\"\n email = \"\"\n if \"user\" in config:\n if \"name\" in config[\"user\"]:\n fullname = config[\"user\"][\"name\"]\n if \"email\" in config[\"user\"]:\n email = config[\"user\"][\"email\"]\n hashstr = \"0000001\"\n person_id = \"seis_prov:sp001_pp_%s\" % hashstr\n pr.agent(\n person_id,\n other_attributes=(\n (\n (\"prov:label\", username),\n (\n \"prov:type\",\n prov.identifier.QualifiedName(prov.constants.PROV, \"Person\"),\n ),\n (\"seis_prov:name\", fullname),\n (\"seis_prov:email\", email),\n )\n ),\n )\n return pr\n\n\ndef _get_waveform_entity(trace, pr):\n \"\"\"Get the seis-prov entity for an input Trace.\n\n Args:\n trace (Trace):\n Input Obspy Trace object.\n pr (Prov):\n prov.model.ProvDocument\n\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with waveform entity information.\n \"\"\"\n tpl = (\n trace.stats.network.lower(),\n trace.stats.station.lower(),\n trace.stats.channel.lower(),\n )\n waveform_hash = \"%s_%s_%s\" % tpl\n waveform_id = \"seis_prov:sp001_wf_%s\" % waveform_hash\n pr.entity(\n waveform_id,\n other_attributes=(\n (\n (\"prov:label\", \"Waveform Trace\"),\n (\"prov:type\", \"seis_prov:waveform_trace\"),\n )\n ),\n )\n return pr\n\n\ndef _get_activity(pr, activity, attributes, sequence):\n \"\"\"Get the seis-prov entity for an input processing \"activity\".\n\n See\n http://seismicdata.github.io/SEIS-PROV/_generated_details.html#activities\n\n for details on the types of activities that are possible to capture.\n\n\n Args:\n pr (prov.model.ProvDocument):\n Existing ProvDocument.\n activity (str):\n The prov:id for the input activity.\n attributes (dict):\n The attributes associated with the activity.\n sequence (int):\n Integer used to identify the order in which the activities were\n performed.\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with input activity.\n \"\"\"\n activity_dict = ACTIVITIES[activity]\n hashid = \"%07i\" % sequence\n code = activity_dict[\"code\"]\n label = activity_dict[\"label\"]\n activity_id = \"sp%03i_%s_%s\" % (sequence, code, hashid)\n pr_attributes = [(\"prov:label\", label), (\"prov:type\", \"seis_prov:%s\" % activity)]\n for key, value in attributes.items():\n if isinstance(value, float):\n value = prov.model.Literal(value, prov.constants.XSD_DOUBLE)\n elif isinstance(value, int):\n value = prov.model.Literal(value, prov.constants.XSD_INT)\n elif isinstance(value, UTCDateTime):\n value = prov.model.Literal(\n value.strftime(TIMEFMT), prov.constants.XSD_DATETIME\n )\n\n att_tuple = (\"seis_prov:%s\" % key, value)\n pr_attributes.append(att_tuple)\n pr.activity(\"seis_prov:%s\" % activity_id, other_attributes=pr_attributes)\n return pr\n"
] | [
[
"pandas.read_pickle",
"numpy.testing.assert_allclose"
],
[
"pandas.Series",
"numpy.dtype",
"pandas.DataFrame",
"numpy.abs",
"numpy.ma.count_masked",
"numpy.isnan",
"numpy.array"
]
] |
romybu22/chameleon-smart-sampling | [
"d0f0588ed9d38e9c133482a68e84379c21892080"
] | [
"acr_module/acr/preprocessing/doc2vec_adressa.py"
] | [
"import argparse\nimport pandas as pd\nimport numpy as np\nimport re\nimport nltk\nfrom sklearn.preprocessing import LabelEncoder\n\n\nfrom ..utils import serialize\nfrom .tokenization import tokenize_articles, nan_to_str, convert_tokens_to_int, get_words_freq\n\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom nltk.tokenize import word_tokenize\n\n\ndef create_args_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--input_articles_csv_path', default='',\n help='Input path of the news CSV file.')\n\n parser.add_argument(\n '--output_article_content_embeddings', default='',\n help='')\n return parser\n\ndef load_input_csv(path):\n news_df = pd.read_csv(path, encoding = 'utf-8' \n #,nrows=1000\n )\n #Making sure articles are sorted by there encoded id\n news_df.sort_values('id_encoded', inplace=True)\n return news_df\n\n'''\ndef process_cat_features(dataframe):\n article_id_encoder = LabelEncoder()\n dataframe['id_encoded'] = article_id_encoder.fit_transform(dataframe['id'])\n\n #category_id_encoder = LabelEncoder()\n #dataframe['categoryid_encoded'] = category_id_encoder.fit_transform(dataframe['categoryid'])\n\n #domainid_encoder = LabelEncoder()\n #dataframe['domainid_encoded'] = domainid_encoder.fit_transform(dataframe['domainid'])\n\n\n return article_id_encoder#, category_id_encoder, domainid_encoder\n\n\ndef save_article_cat_encoders(output_path, article_id_encoder, category_id_encoder, domainid_encoder):\n to_serialize = {'article_id': article_id_encoder, \n 'category_id': category_id_encoder, \n 'publisher_id': domainid_encoder}\n serialize(output_path, to_serialize)\n'''\n\ndef tokenize_norwegian_article(text, first_sentences=12, max_words_length=1000):\n #Removing pipes for correct sentence tokenization\n text = text.replace('|', '.')\n words_tokenized = []\n sent_count = 0\n for sentence in nltk.tokenize.sent_tokenize(text, language='norwegian'): \n sent_tokenized = nltk.tokenize.word_tokenize(sentence, language='norwegian')\n if len(sent_tokenized) >= 3 and sent_tokenized[-1] in ['.', '!', '?', ';'] and \\\n sent_tokenized != ['Saken', 'oppdateres', '.']: \n sent_count += 1\n words_tokenized.extend(sent_tokenized) \n if sent_count == first_sentences:\n break\n return words_tokenized[:max_words_length]\n\n\ndef export_article_content_embeddings(content_article_embeddings, output_article_content_embeddings):\n output_path = output_article_content_embeddings\n print('Exporting ACR Label Encoders, Article metadata and embeddings to {}'.format(output_path))\n #to_serialize = (acr_label_encoders, articles_metadata_df, content_article_embeddings)\n to_serialize = content_article_embeddings\n serialize(output_path, to_serialize)\n\n\ndef main():\n parser = create_args_parser()\n args = parser.parse_args()\n\n print('Loading news article CSV: {}'.format(args.input_articles_csv_path))\n news_df = load_input_csv(args.input_articles_csv_path)\n print('N. docs: {}'.format(len(news_df)))\n\n '''\n print('Encoding categorical features')\n article_id_encoder, category_id_encoder, domainid_encoder = process_cat_features(news_df)\n print('Exporting LabelEncoders of categorical features: {}'.format(args.output_label_encoders))\n save_article_cat_encoders(args.output_label_encoders, \n article_id_encoder, \n category_id_encoder, \n domainid_encoder)\n '''\n\n print('Tokenizing articles...')\n tokenized_articles = tokenize_articles(news_df['text_highlights'].values, tokenization_fn=tokenize_norwegian_article)\n\n #print('Computing word frequencies...')\n #words_freq = get_words_freq(tokenized_articles)\n #print('Corpus vocabulary size: {}'.format(len(words_freq)))\n\n print('Processing documents...')\n tagged_data = [TaggedDocument(words=w, tags=[i]) for i, w in enumerate(tokenized_articles)] \n\n\n print('Training doc2vec')\n max_epochs = 30\n vec_size = 250\n alpha = 0.025\n model = Doc2Vec(vector_size=vec_size,\n alpha=alpha, \n min_alpha=alpha, \n window=5,\n negative=5,\n min_count=2, \n max_vocab_size=100000,\n dm = 1,\n dm_mean=1,\n workers=6)\n \n model.build_vocab(tagged_data)\n\n for epoch in range(max_epochs):\n print('iteration {0}'.format(epoch))\n model.train(tagged_data,\n total_examples=model.corpus_count,\n epochs=1) #model.iter)\n # decrease the learning rate\n model.alpha -= 0.0002\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\n\n del tokenized_articles\n\n\n #print('Encoding categorical features')\n #article_id_encoder = process_cat_features(news_df)\n\n print('Concatenating article content embeddings, making sure that they are sorted by the encoded article id')\n article_content_embeddings = np.vstack([model.docvecs[i-1] for i in news_df['id_encoded'].values]) \n embedding_for_padding_article = np.mean(article_content_embeddings, axis=0)\n content_article_embeddings_with_padding = np.vstack([embedding_for_padding_article, article_content_embeddings])\n del article_content_embeddings\n\n #Checking if content articles embedding size correspond to the last article_id\n assert content_article_embeddings_with_padding.shape[0] == news_df['id_encoded'].tail(1).values[0]+1\n\n print('Exporting article content embeddings')\n del news_df\n export_article_content_embeddings(content_article_embeddings_with_padding, args.output_article_content_embeddings)\n\n #Ps: To experiment with these doc2vec embeddings, it is necessary to deserialize \"acr_articles_metadata_embeddings.pickle\", substitute the content_article_embedding and serialize for further usage by NAR module\n #This is made by acr_module/notebooks/ACR_Results_Visualization_Gcom_doc2vec.ipynb\n\nif __name__ == '__main__':\n main()\n\n\n'''\nDATA_DIR=/media/data/projects/personal/doutorado/adressa_news/data_transformed && \\\npython3 -m acr.preprocessing.doc2vec_adressa \\\n --input_articles_csv_path ${DATA_DIR}/articles_tfrecords_v4_first_12_sent.csv \\\n --output_article_content_embeddings ${DATA_DIR}/pickles_v4/article_content_embeddings_v4_doc2vec.pickle\n\n#--input_articles_csv_path ${DATA_DIR}/adressa_articles.csv \\\n#--output_article_content_embeddings ${DATA_DIR}/pickles/article_content_embeddings_doc2vec.pickle \n'''"
] | [
[
"pandas.read_csv",
"numpy.mean",
"numpy.vstack"
]
] |
mabrahamdevops/python_notebooks | [
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330"
] | [
"notebooks/__code/radial_profile/event_handler.py"
] | [
"import numpy as np\nimport pyqtgraph as pg\nfrom qtpy import QtGui\n\nfrom __code._utilities.parent import Parent\nfrom __code.radial_profile.display import Display\n\n\nclass EventHandler(Parent):\n\n def file_index_changed(self):\n file_index = self.parent.ui.slider.value()\n live_image = self.parent.get_selected_image(file_index)\n\n _view = self.parent.ui.image_view.getView()\n _view_box = _view.getViewBox()\n _state = _view_box.getState()\n\n first_update = False\n if self.parent.histogram_level == []:\n first_update = True\n _histo_widget = self.parent.ui.image_view.getHistogramWidget()\n self.parent.histogram_level = _histo_widget.getLevels()\n\n _image = np.transpose(live_image)\n self.parent.ui.image_view.setImage(_image)\n self.parent.live_image = _image\n _view_box.setState(_state)\n\n if not first_update:\n _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])\n\n def guide_color_changed(self):\n red = self.parent.ui.guide_red_slider.value()\n green = self.parent.ui.guide_green_slider.value()\n blue = self.parent.ui.guide_blue_slider.value()\n alpha = self.parent.ui.guide_alpha_slider.value()\n self.parent.guide_color_slider['red'] = red\n self.parent.guide_color_slider['green'] = green\n self.parent.guide_color_slider['blue'] = blue\n self.parent.guide_color_slider['alpha'] = alpha\n self.circle_center_changed()\n\n self.parent.ui.image_view.removeItem(self.parent.line_view_binning)\n\n o_display = Display(parent=self.parent)\n o_display.grid()\n\n def circle_center_changed(self):\n if self.parent.ui.sector_full_circle.isChecked():\n if self.parent.sector_g:\n self.parent.ui.image_view.removeItem(self.parent.sector_g)\n return\n\n x0 = float(self.parent.ui.circle_x.text())\n y0 = float(self.parent.ui.circle_y.text())\n from_angle = np.float(str(self.parent.ui.sector_from_value.text()))\n to_angle = np.float(str(self.parent.ui.sector_to_value.text()))\n\n self.calculate_corners_angles()\n self.update_angle_label_position()\n\n [y1, x1] = self.calculate_sector_xy_position(angle=from_angle, x0=x0, y0=y0)\n [y2, x2] = self.calculate_sector_xy_position(angle=to_angle, x0=x0, y0=y0)\n\n pos = np.array([[x0, y0], [x1, y1], [x2, y2]])\n adj = np.array([[0, 1], [1, 2], [2, 0]])\n\n symbols = ['+', 'o', 'o']\n\n lines = np.array([(255, 0, 0, 255, 2), (255, 0, 0, 0, 1), (255, 0, 0, 255, 2)],\n dtype=[('red', np.ubyte), ('green', np.ubyte), ('blue', np.ubyte), ('alpha', np.ubyte),\n ('width', float)])\n\n if self.parent.sector_g:\n self.parent.ui.image_view.removeItem(self.parent.sector_g)\n self.parent.sector_g = pg.GraphItem()\n self.parent.ui.image_view.addItem(self.parent.sector_g)\n self.parent.sector_g.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False)\n \n def update_angle_label_position(self):\n x0 = np.int(str(self.parent.ui.circle_x.text()))\n y0 = np.int(str(self.parent.ui.circle_y.text()))\n\n # add angle 0, 90, 180 and 270 labels\n if self.parent.angle_0 is None:\n self.parent.angle_0 = pg.TextItem(text=u'0\\u00b0', anchor=(0, 1))\n self.parent.angle_90 = pg.TextItem(text=u'90\\u00b0', anchor=(0, 1))\n self.parent.angle_180 = pg.TextItem(text=u'180\\u00b0', anchor=(0, 0))\n self.parent.angle_270 = pg.TextItem(text=u'270\\u00b0', anchor=(1, 1))\n\n self.parent.ui.image_view.addItem(self.parent.angle_0)\n self.parent.ui.image_view.addItem(self.parent.angle_90)\n self.parent.ui.image_view.addItem(self.parent.angle_180)\n self.parent.ui.image_view.addItem(self.parent.angle_270)\n\n self.parent.angle_0.setPos(np.int(x0), 0)\n self.parent.angle_90.setPos(self.parent.height, y0)\n self.parent.angle_180.setPos(x0, self.parent.width)\n self.parent.angle_270.setPos(0, y0)\n \n def calculate_sector_xy_position(self, angle=0, x0=0, y0=0):\n x = np.NaN\n y = np.NaN\n\n angle_top_right = self.parent.corners['top_right']\n angle_bottom_right = self.parent.corners['bottom_right']\n angle_bottom_left = self.parent.corners['bottom_left']\n angle_top_left = self.parent.corners['top_left']\n\n # print(\"angle_top_right: {}\".format(angle_top_right))\n # print(\"angle_bottom_right: {}\".format(angle_bottom_right))\n # print(\"angle_bottom_left: {}\".format(angle_bottom_left))\n # print(\"angle_top_left: {}\".format(angle_top_left))\n\n if (angle_top_right <= angle) and \\\n (angle <= angle_bottom_right):\n # right\n\n # get x\n x = self.parent.height\n\n # get y\n _angle = np.abs(90 - angle)\n\n if angle == 90:\n y = 0\n else:\n angle_rad = np.deg2rad(_angle)\n y = np.tan(angle_rad) * (self.parent.height - x0)\n\n if angle <= 90:\n y = y0 - y\n else:\n y = y0 + y\n\n elif angle_bottom_right < angle < angle_bottom_left:\n # bottom\n\n # get y\n y = self.parent.width\n\n # get x\n _angle = np.abs(180 - angle)\n\n if angle == 180:\n x = 0\n else:\n angle_rad = np.deg2rad(_angle)\n x = (y - y0) * np.tan(angle_rad)\n\n if angle <= 180:\n x = x0 + x\n else:\n x = x0 - x\n\n elif angle_bottom_left <= angle <= angle_top_left:\n # left\n\n # get x\n x = 0\n\n # get y\n _angle = np.abs(270 - angle)\n\n if angle == 270:\n y = 0\n else:\n angle_rad = np.deg2rad(_angle)\n y = np.tan(angle_rad) * x0\n\n if angle <= 270:\n y = y0 + y\n else:\n y = y0 - y\n\n else:\n # top\n\n # get y\n y = 0\n\n # get x\n b_right_part = True\n if angle > angle_top_left:\n angle = np.abs(360 - angle)\n b_right_part = False\n\n if angle == 0:\n x = 0\n else:\n angle_rad = np.deg2rad(angle)\n x = y0 * np.tan(angle_rad)\n\n if b_right_part:\n x = x0 + x\n else:\n x = x0 - x\n\n return [y, x]\n \n def calculate_corners_angles(self):\n '''top vertical being angle 0'''\n\n x0 = float(str(self.parent.ui.circle_x.text()))\n y0 = float(str(self.parent.ui.circle_y.text()))\n\n width = self.parent.width\n height = self.parent.height\n # width = self.parent.height\n # height = self.parent.width\n\n theta_tr = np.NaN # angle top right\n theta_br = np.NaN # bottom right\n theta_bl = np.NaN # bottom left\n theta_tl = np.NaN # top left\n\n theta_tr = np.arctan((width - x0) / y0)\n theta_tr_deg = np.rad2deg(theta_tr)\n\n theta_br = np.pi - np.arctan((width - x0) / (height - y0))\n theta_br_deg = np.rad2deg(theta_br)\n\n theta_bl = np.pi + np.arctan(x0 / (height - y0))\n theta_bl_deg = np.rad2deg(theta_bl)\n\n theta_tl = 2 * np.pi - np.arctan(x0 / y0)\n theta_tl_deg = np.rad2deg(theta_tl)\n\n self.parent.corners['top_right'] = theta_tr_deg\n self.parent.corners['bottom_right'] = theta_br_deg\n self.parent.corners['bottom_left'] = theta_bl_deg\n self.parent.corners['top_left'] = theta_tl_deg\n \n def sector_radio_button_changed(self):\n is_full_circle = self.parent.ui.sector_full_circle.isChecked()\n if is_full_circle:\n _status_sector = False\n self.remove_angle_label()\n else:\n _status_sector = True\n self.update_angle_label_position()\n\n self.parent.ui.sector_from_label.setEnabled(_status_sector)\n self.parent.ui.sector_from_value.setEnabled(_status_sector)\n self.parent.ui.sector_from_units.setEnabled(_status_sector)\n self.parent.ui.sector_to_label.setEnabled(_status_sector)\n self.parent.ui.sector_to_value.setEnabled(_status_sector)\n self.parent.ui.sector_to_units.setEnabled(_status_sector)\n self.parent.ui.from_angle_slider.setEnabled(_status_sector)\n self.parent.ui.to_angle_slider.setEnabled(_status_sector)\n self.parent.sector_changed()\n\n def remove_angle_label(self):\n if self.parent.angle_0:\n self.parent.ui.image_view.removeItem(self.parent.angle_0)\n\n if self.parent.angle_90:\n self.parent.ui.image_view.removeItem(self.parent.angle_90)\n\n if self.parent.angle_180:\n self.parent.ui.image_view.removeItem(self.parent.angle_180)\n\n if self.parent.angle_270:\n self.parent.ui.image_view.removeItem(self.parent.angle_270)\n\n self.parent.angle_0 = None\n self.parent.angle_90 = None\n self.parent.angle_180 = None\n self.parent.angle_270 = None\n\n def update_max_radius_item(self):\n is_max_radius_selected = self.parent.ui.max_radius_radioButton.isChecked()\n self.max_radius_handler(is_max_radius_selected=is_max_radius_selected)\n\n def max_radius_handler(self, is_max_radius_selected=None):\n if self.parent.max_radius_item:\n self.parent.ui.image_view.removeItem(self.parent.max_radius_item)\n\n if is_max_radius_selected:\n x0 = float(str(self.parent.ui.circle_x.text()))\n y0 = float(str(self.parent.ui.circle_y.text()))\n max_radius = self.parent.ui.max_radius_slider.value()\n\n _pen = QtGui.QPen()\n _pen.setColor(QtGui.QColor(0, 0, 255))\n _pen.setWidth(0.4)\n\n self.parent.max_radius_item = pg.CircleROI([x0 - max_radius, y0 - max_radius],\n [2*max_radius, 2*max_radius],\n movable=False,\n resizable=False,\n pen=_pen)\n handles = self.parent.max_radius_item.getHandles()\n self.parent.ui.image_view.addItem(self.parent.max_radius_item)\n for _handle in handles:\n self.parent.max_radius_item.removeHandle(_handle)\n\n def retrieve_max_radius_possible(self):\n x0 = float(str(self.parent.ui.circle_x.text()))\n y0 = float(str(self.parent.ui.circle_y.text()))\n width = self.parent.width\n height = self.parent.height\n\n def lenght_is(x=0, y=0):\n return np.sqrt(x**2 + y**2)\n\n # to top left distance\n x = x0\n y = y0\n top_left = lenght_is(x=x, y=y)\n\n # to top right distance\n x = width - x0\n y = y0\n top_right = lenght_is(x=x, y=y)\n\n # to bottom left corner\n x = x0\n y = height - y0\n bottom_left = lenght_is(x=x, y=y)\n\n # to bottom right corner\n x = width - x0\n y = height - y0\n bottom_right = lenght_is(x=x, y=y)\n\n max_distance = np.max([top_left, top_right, bottom_left, bottom_right])\n return max_distance\n\n def update_max_radius_value(self):\n max_radius = self.retrieve_max_radius_possible()\n current_radius_value = self.parent.ui.max_radius_slider.value()\n if current_radius_value > max_radius:\n self.parent.ui.max_radius_slider.setValue(max_radius)\n self.parent.ui.max_radius_slider.setMaximum(max_radius)\n"
] | [
[
"numpy.sqrt",
"numpy.transpose",
"numpy.rad2deg",
"numpy.arctan",
"numpy.abs",
"numpy.max",
"numpy.tan",
"numpy.deg2rad",
"numpy.array",
"numpy.int"
]
] |
pything/draugr | [
"2fda662f2fa97236e4495a6af2b8237516fa428b"
] | [
"draugr/visualisation/matplotlib_utilities/styles/cyclers.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Christian Heider Nielsen\"\n__doc__ = r\"\"\"\n\n Created on 18-02-2021\n \"\"\"\n\n__all__ = [\n \"monochrome_hatch_cycler\",\n \"simple_hatch_cycler\",\n \"monochrome_line_no_marker_cycler\",\n \"monochrome_line_cycler\",\n]\n\nfrom matplotlib import cycler\n\nfrom draugr.visualisation.matplotlib_utilities.styles.hatching import (\n four_times_denser_hatch,\n)\nfrom draugr.visualisation.matplotlib_utilities.styles.lines import (\n line_styles,\n marker_styles,\n)\n\nsimple_hatch_cycler = cycler(\"hatch\", four_times_denser_hatch)\nmonochrome_hatch_cycler = (\n cycler(\"color\", \"w\")\n * cycler(\"facecolor\", \"w\")\n * cycler(\"edgecolor\", \"k\")\n * simple_hatch_cycler\n)\n\nmonochrome_line_no_marker_cycler = cycler(\"color\", [\"k\"]) * cycler(\n \"linestyle\", line_styles\n)\n\nmonochrome_line_cycler = (\n cycler(\"color\", [\"k\"])\n * cycler(\"linestyle\", line_styles)\n * cycler(\"marker\", marker_styles)\n)\n\nif __name__ == \"__main__\":\n print([a for _, a in zip(range(10), monochrome_line_cycler)])\n"
] | [
[
"matplotlib.cycler"
]
] |
emarkou/scikit-learn | [
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025"
] | [
"examples/compose/plot_compare_reduction.py",
"sklearn/utils/random.py",
"benchmarks/bench_plot_randomized_svd.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n=================================================================\nSelecting dimensionality reduction with Pipeline and GridSearchCV\n=================================================================\n\nThis example constructs a pipeline that does dimensionality\nreduction followed by prediction with a support vector\nclassifier. It demonstrates the use of ``GridSearchCV`` and\n``Pipeline`` to optimize over different classes of estimators in a\nsingle CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality\nreductions are compared to univariate feature selection during\nthe grid search.\n\nAdditionally, ``Pipeline`` can be instantiated with the ``memory``\nargument to memoize the transformers within the pipeline, avoiding to fit\nagain the same transformers over and over.\n\nNote that the use of ``memory`` to enable caching becomes interesting when the\nfitting of a transformer is costly.\n\n# %%\nIllustration of ``Pipeline`` and ``GridSearchCV``\n###############################################################################\n\nThis section illustrates the use of a ``Pipeline`` with ``GridSearchCV``\n\"\"\"\n\n# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.decomposition import PCA, NMF\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nprint(__doc__)\n\npipe = Pipeline([\n # the reduce_dim stage is populated by the param_grid\n ('reduce_dim', 'passthrough'),\n ('classify', LinearSVC(dual=False, max_iter=10000))\n])\n\nN_FEATURES_OPTIONS = [2, 4, 8]\nC_OPTIONS = [1, 10, 100, 1000]\nparam_grid = [\n {\n 'reduce_dim': [PCA(iterated_power=7), NMF()],\n 'reduce_dim__n_components': N_FEATURES_OPTIONS,\n 'classify__C': C_OPTIONS\n },\n {\n 'reduce_dim': [SelectKBest(chi2)],\n 'reduce_dim__k': N_FEATURES_OPTIONS,\n 'classify__C': C_OPTIONS\n },\n]\nreducer_labels = ['PCA', 'NMF', 'KBest(chi2)']\n\ngrid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid)\nX, y = load_digits(return_X_y=True)\ngrid.fit(X, y)\n\nmean_scores = np.array(grid.cv_results_['mean_test_score'])\n# scores are in the order of param_grid iteration, which is alphabetical\nmean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))\n# select score for best C\nmean_scores = mean_scores.max(axis=0)\nbar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *\n (len(reducer_labels) + 1) + .5)\n\nplt.figure()\nCOLORS = 'bgrcmyk'\nfor i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):\n plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])\n\nplt.title(\"Comparing feature reduction techniques\")\nplt.xlabel('Reduced number of features')\nplt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)\nplt.ylabel('Digit classification accuracy')\nplt.ylim((0, 1))\nplt.legend(loc='upper left')\n\nplt.show()\n\n# %%\n# Caching transformers within a ``Pipeline``\n###############################################################################\n# It is sometimes worthwhile storing the state of a specific transformer\n# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers\n# such situations. Therefore, we use the argument ``memory`` to enable caching.\n#\n# .. warning::\n# Note that this example is, however, only an illustration since for this\n# specific case fitting PCA is not necessarily slower than loading the\n# cache. Hence, use the ``memory`` constructor parameter when the fitting\n# of a transformer is costly.\n\nfrom joblib import Memory\nfrom shutil import rmtree\n\n# Create a temporary folder to store the transformers of the pipeline\nlocation = 'cachedir'\nmemory = Memory(location=location, verbose=10)\ncached_pipe = Pipeline([('reduce_dim', PCA()),\n ('classify', LinearSVC(dual=False, max_iter=10000))],\n memory=memory)\n\n# This time, a cached pipeline will be used within the grid search\n\n\n# Delete the temporary cache before exiting\nmemory.clear(warn=False)\nrmtree(location)\n\n# %%\n# The ``PCA`` fitting is only computed at the evaluation of the first\n# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The\n# other configurations of ``C`` will trigger the loading of the cached ``PCA``\n# estimator data, leading to save processing time. Therefore, the use of\n# caching the pipeline using ``memory`` is highly beneficial when fitting\n# a transformer is costly.\n",
"# Author: Hamzeh Alsalhi <[email protected]>\n#\n# License: BSD 3 clause\nimport numpy as np\nimport scipy.sparse as sp\nimport array\n\nfrom . import check_random_state\nfrom ._random import sample_without_replacement\n\n__all__ = ['sample_without_replacement']\n\n\ndef _random_choice_csc(n_samples, classes, class_probability=None,\n random_state=None):\n \"\"\"Generate a sparse random matrix given column class distributions\n\n Parameters\n ----------\n n_samples : int,\n Number of samples to draw in each column.\n\n classes : list of size n_outputs of arrays of size (n_classes,)\n List of classes for each column.\n\n class_probability : list of size n_outputs of arrays of \\\n shape (n_classes,), default=None\n Class distribution of each column. If None, uniform distribution is\n assumed.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the sampled classes.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n random_matrix : sparse csc matrix of size (n_samples, n_outputs)\n\n \"\"\"\n data = array.array('i')\n indices = array.array('i')\n indptr = array.array('i', [0])\n\n for j in range(len(classes)):\n classes[j] = np.asarray(classes[j])\n if classes[j].dtype.kind != 'i':\n raise ValueError(\"class dtype %s is not supported\" %\n classes[j].dtype)\n classes[j] = classes[j].astype(np.int64, copy=False)\n\n # use uniform distribution if no class_probability is given\n if class_probability is None:\n class_prob_j = np.empty(shape=classes[j].shape[0])\n class_prob_j.fill(1 / classes[j].shape[0])\n else:\n class_prob_j = np.asarray(class_probability[j])\n\n if not np.isclose(np.sum(class_prob_j), 1.0):\n raise ValueError(\"Probability array at index {0} does not sum to \"\n \"one\".format(j))\n\n if class_prob_j.shape[0] != classes[j].shape[0]:\n raise ValueError(\"classes[{0}] (length {1}) and \"\n \"class_probability[{0}] (length {2}) have \"\n \"different length.\".format(j,\n classes[j].shape[0],\n class_prob_j.shape[0]))\n\n # If 0 is not present in the classes insert it with a probability 0.0\n if 0 not in classes[j]:\n classes[j] = np.insert(classes[j], 0, 0)\n class_prob_j = np.insert(class_prob_j, 0, 0.0)\n\n # If there are nonzero classes choose randomly using class_probability\n rng = check_random_state(random_state)\n if classes[j].shape[0] > 1:\n p_nonzero = 1 - class_prob_j[classes[j] == 0]\n nnz = int(n_samples * p_nonzero)\n ind_sample = sample_without_replacement(n_population=n_samples,\n n_samples=nnz,\n random_state=random_state)\n indices.extend(ind_sample)\n\n # Normalize probabilities for the nonzero elements\n classes_j_nonzero = classes[j] != 0\n class_probability_nz = class_prob_j[classes_j_nonzero]\n class_probability_nz_norm = (class_probability_nz /\n np.sum(class_probability_nz))\n classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),\n rng.rand(nnz))\n data.extend(classes[j][classes_j_nonzero][classes_ind])\n indptr.append(len(indices))\n\n return sp.csc_matrix((data, indices, indptr),\n (n_samples, len(classes)),\n dtype=int)\n",
"\"\"\"\nBenchmarks on the power iterations phase in randomized SVD.\n\nWe test on various synthetic and real datasets the effect of increasing\nthe number of power iterations in terms of quality of approximation\nand running time. A number greater than 0 should help with noisy matrices,\nwhich are characterized by a slow spectral decay.\n\nWe test several policy for normalizing the power iterations. Normalization\nis crucial to avoid numerical issues.\n\nThe quality of the approximation is measured by the spectral norm discrepancy\nbetween the original input matrix and the reconstructed one (by multiplying\nthe randomized_svd's outputs). The spectral norm is always equivalent to the\nlargest singular value of a matrix. (3) justifies this choice. However, one can\nnotice in these experiments that Frobenius and spectral norms behave\nvery similarly in a qualitative sense. Therefore, we suggest to run these\nbenchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to\ncompute.\n\nThe benchmarks follow.\n\n(a) plot: time vs norm, varying number of power iterations\n data: many datasets\n goal: compare normalization policies and study how the number of power\n iterations affect time and norm\n\n(b) plot: n_iter vs norm, varying rank of data and number of components for\n randomized_SVD\n data: low-rank matrices on which we control the rank\n goal: study whether the rank of the matrix and the number of components\n extracted by randomized SVD affect \"the optimal\" number of power iterations\n\n(c) plot: time vs norm, varying datasets\n data: many datasets\n goal: compare default configurations\n\nWe compare the following algorithms:\n- randomized_svd(..., power_iteration_normalizer='none')\n- randomized_svd(..., power_iteration_normalizer='LU')\n- randomized_svd(..., power_iteration_normalizer='QR')\n- randomized_svd(..., power_iteration_normalizer='auto')\n- fbpca.pca() from https://github.com/facebook/fbpca (if installed)\n\nConclusion\n----------\n- n_iter=2 appears to be a good default value\n- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU\n gives similar errors to QR but is cheaper. That's what 'auto' implements.\n\nReferences\n----------\n(1) Finding structure with randomness: Stochastic algorithms for constructing\n approximate matrix decompositions\n Halko, et al., 2009 https://arxiv.org/abs/0909.4061\n\n(2) A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert\n\n(3) An implementation of a randomized algorithm for principal component\n analysis\n A. Szlam et al. 2014\n\"\"\"\n\n# Author: Giorgio Patrini\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nimport gc\nimport pickle\nfrom time import time\nfrom collections import defaultdict\nimport os.path\n\nfrom sklearn.utils._arpack import _init_arpack_v0\nfrom sklearn.utils import gen_batches\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.utils.extmath import randomized_svd\nfrom sklearn.datasets import make_low_rank_matrix, make_sparse_uncorrelated\nfrom sklearn.datasets import (fetch_lfw_people,\n fetch_openml,\n fetch_20newsgroups_vectorized,\n fetch_olivetti_faces,\n fetch_rcv1)\n\ntry:\n import fbpca\n fbpca_available = True\nexcept ImportError:\n fbpca_available = False\n\n# If this is enabled, tests are much slower and will crash with the large data\nenable_spectral_norm = False\n\n# TODO: compute approximate spectral norms with the power method as in\n# Estimating the largest eigenvalues by the power and Lanczos methods with\n# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on\n# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.\n# This approximation is a very fast estimate of the spectral norm, but depends\n# on starting random vectors.\n\n# Determine when to switch to batch computation for matrix norms,\n# in case the reconstructed (dense) matrix is too large\nMAX_MEMORY = int(2e9)\n\n# The following datasets can be downloaded manually from:\n# CIFAR 10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat\nCIFAR_FOLDER = \"./cifar-10-batches-py/\"\nSVHN_FOLDER = \"./SVHN/\"\n\ndatasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',\n 'mnist_784', 'CIFAR', 'a3a', 'SVHN', 'uncorrelated matrix']\n\nbig_sparse_datasets = ['big sparse matrix', 'rcv1']\n\n\ndef unpickle(file_name):\n with open(file_name, 'rb') as fo:\n return pickle.load(fo, encoding='latin1')[\"data\"]\n\n\ndef handle_missing_dataset(file_folder):\n if not os.path.isdir(file_folder):\n print(\"%s file folder not found. Test skipped.\" % file_folder)\n return 0\n\n\ndef get_data(dataset_name):\n print(\"Getting dataset: %s\" % dataset_name)\n\n if dataset_name == 'lfw_people':\n X = fetch_lfw_people().data\n elif dataset_name == '20newsgroups':\n X = fetch_20newsgroups_vectorized().data[:, :100000]\n elif dataset_name == 'olivetti_faces':\n X = fetch_olivetti_faces().data\n elif dataset_name == 'rcv1':\n X = fetch_rcv1().data\n elif dataset_name == 'CIFAR':\n if handle_missing_dataset(CIFAR_FOLDER) == \"skip\":\n return\n X1 = [unpickle(\"%sdata_batch_%d\" % (CIFAR_FOLDER, i + 1))\n for i in range(5)]\n X = np.vstack(X1)\n del X1\n elif dataset_name == 'SVHN':\n if handle_missing_dataset(SVHN_FOLDER) == 0:\n return\n X1 = sp.io.loadmat(\"%strain_32x32.mat\" % SVHN_FOLDER)['X']\n X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]\n X = np.vstack(X2)\n del X1\n del X2\n elif dataset_name == 'low rank matrix':\n X = make_low_rank_matrix(n_samples=500, n_features=int(1e4),\n effective_rank=100, tail_strength=.5,\n random_state=random_state)\n elif dataset_name == 'uncorrelated matrix':\n X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,\n random_state=random_state)\n elif dataset_name == 'big sparse matrix':\n sparsity = int(1e6)\n size = int(1e6)\n small_size = int(1e4)\n data = np.random.normal(0, 1, int(sparsity/10))\n data = np.repeat(data, 10)\n row = np.random.uniform(0, small_size, sparsity)\n col = np.random.uniform(0, small_size, sparsity)\n X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))\n del data\n del row\n del col\n else:\n X = fetch_openml(dataset_name).data\n return X\n\n\ndef plot_time_vs_s(time, norm, point_labels, title):\n plt.figure()\n colors = ['g', 'b', 'y']\n for i, l in enumerate(sorted(norm.keys())):\n if l != \"fbpca\":\n plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())\n else:\n plt.plot(time[l], norm[l], label=l, marker='^', c='red')\n\n for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):\n plt.annotate(label, xy=(x, y), xytext=(0, -20),\n textcoords='offset points', ha='right', va='bottom')\n plt.legend(loc=\"upper right\")\n plt.suptitle(title)\n plt.ylabel(\"norm discrepancy\")\n plt.xlabel(\"running time [s]\")\n\n\ndef scatter_time_vs_s(time, norm, point_labels, title):\n plt.figure()\n size = 100\n for i, l in enumerate(sorted(norm.keys())):\n if l != \"fbpca\":\n plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)\n for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):\n plt.annotate(label, xy=(x, y), xytext=(0, -80),\n textcoords='offset points', ha='right',\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"arc3\"),\n va='bottom', size=11, rotation=90)\n else:\n plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)\n for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):\n plt.annotate(label, xy=(x, y), xytext=(0, 30),\n textcoords='offset points', ha='right',\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"arc3\"),\n va='bottom', size=11, rotation=90)\n\n plt.legend(loc=\"best\")\n plt.suptitle(title)\n plt.ylabel(\"norm discrepancy\")\n plt.xlabel(\"running time [s]\")\n\n\ndef plot_power_iter_vs_s(power_iter, s, title):\n plt.figure()\n for l in sorted(s.keys()):\n plt.plot(power_iter, s[l], label=l, marker='o')\n plt.legend(loc=\"lower right\", prop={'size': 10})\n plt.suptitle(title)\n plt.ylabel(\"norm discrepancy\")\n plt.xlabel(\"n_iter\")\n\n\ndef svd_timing(X, n_comps, n_iter, n_oversamples,\n power_iteration_normalizer='auto', method=None):\n \"\"\"\n Measure time for decomposition\n \"\"\"\n print(\"... running SVD ...\")\n if method is not 'fbpca':\n gc.collect()\n t0 = time()\n U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,\n power_iteration_normalizer,\n random_state=random_state, transpose=False)\n call_time = time() - t0\n else:\n gc.collect()\n t0 = time()\n # There is a different convention for l here\n U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,\n l=n_oversamples+n_comps)\n call_time = time() - t0\n\n return U, mu, V, call_time\n\n\ndef norm_diff(A, norm=2, msg=True, random_state=None):\n \"\"\"\n Compute the norm diff with the original matrix, when randomized\n SVD is called with *params.\n\n norm: 2 => spectral; 'fro' => Frobenius\n \"\"\"\n\n if msg:\n print(\"... computing %s norm ...\" % norm)\n if norm == 2:\n # s = sp.linalg.norm(A, ord=2) # slow\n v0 = _init_arpack_v0(min(A.shape), random_state)\n value = sp.sparse.linalg.svds(A,\n k=1,\n return_singular_vectors=False,\n v0=v0)\n else:\n if sp.sparse.issparse(A):\n value = sp.sparse.linalg.norm(A, ord=norm)\n else:\n value = sp.linalg.norm(A, ord=norm)\n return value\n\n\ndef scalable_frobenius_norm_discrepancy(X, U, s, V):\n # if the input is not too big, just call scipy\n if X.shape[0] * X.shape[1] < MAX_MEMORY:\n A = X - U.dot(np.diag(s).dot(V))\n return norm_diff(A, norm='fro')\n\n print(\"... computing fro norm by batches...\")\n batch_size = 1000\n Vhat = np.diag(s).dot(V)\n cum_norm = .0\n for batch in gen_batches(X.shape[0], batch_size):\n M = X[batch, :] - U[batch, :].dot(Vhat)\n cum_norm += norm_diff(M, norm='fro', msg=False)\n return np.sqrt(cum_norm)\n\n\ndef bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):\n\n all_time = defaultdict(list)\n if enable_spectral_norm:\n all_spectral = defaultdict(list)\n X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)\n all_frobenius = defaultdict(list)\n X_fro_norm = norm_diff(X, norm='fro', msg=False)\n\n for pi in power_iter:\n for pm in ['none', 'LU', 'QR']:\n print(\"n_iter = %d on sklearn - %s\" % (pi, pm))\n U, s, V, time = svd_timing(X, n_comps, n_iter=pi,\n power_iteration_normalizer=pm,\n n_oversamples=n_oversamples)\n label = \"sklearn - %s\" % pm\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if fbpca_available:\n print(\"n_iter = %d on fbca\" % (pi))\n U, s, V, time = svd_timing(X, n_comps, n_iter=pi,\n power_iteration_normalizer=pm,\n n_oversamples=n_oversamples,\n method='fbpca')\n label = \"fbpca\"\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if enable_spectral_norm:\n title = \"%s: spectral norm diff vs running time\" % (dataset_name)\n plot_time_vs_s(all_time, all_spectral, power_iter, title)\n title = \"%s: Frobenius norm diff vs running time\" % (dataset_name)\n plot_time_vs_s(all_time, all_frobenius, power_iter, title)\n\n\ndef bench_b(power_list):\n\n n_samples, n_features = 1000, 10000\n data_params = {'n_samples': n_samples, 'n_features': n_features,\n 'tail_strength': .7, 'random_state': random_state}\n dataset_name = \"low rank matrix %d x %d\" % (n_samples, n_features)\n ranks = [10, 50, 100]\n\n if enable_spectral_norm:\n all_spectral = defaultdict(list)\n all_frobenius = defaultdict(list)\n for rank in ranks:\n X = make_low_rank_matrix(effective_rank=rank, **data_params)\n if enable_spectral_norm:\n X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)\n X_fro_norm = norm_diff(X, norm='fro', msg=False)\n\n for n_comp in [int(rank/2), rank, rank*2]:\n label = \"rank=%d, n_comp=%d\" % (rank, n_comp)\n print(label)\n for pi in power_list:\n U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,\n power_iteration_normalizer='LU')\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) /\n X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if enable_spectral_norm:\n title = \"%s: spectral norm diff vs n power iteration\" % (dataset_name)\n plot_power_iter_vs_s(power_iter, all_spectral, title)\n title = \"%s: Frobenius norm diff vs n power iteration\" % (dataset_name)\n plot_power_iter_vs_s(power_iter, all_frobenius, title)\n\n\ndef bench_c(datasets, n_comps):\n all_time = defaultdict(list)\n if enable_spectral_norm:\n all_spectral = defaultdict(list)\n all_frobenius = defaultdict(list)\n\n for dataset_name in datasets:\n X = get_data(dataset_name)\n if X is None:\n continue\n\n if enable_spectral_norm:\n X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)\n X_fro_norm = norm_diff(X, norm='fro', msg=False)\n n_comps = np.minimum(n_comps, np.min(X.shape))\n\n label = \"sklearn\"\n print(\"%s %d x %d - %s\" %\n (dataset_name, X.shape[0], X.shape[1], label))\n U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,\n method=label)\n\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if fbpca_available:\n label = \"fbpca\"\n print(\"%s %d x %d - %s\" %\n (dataset_name, X.shape[0], X.shape[1], label))\n U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,\n method=label)\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if len(all_time) == 0:\n raise ValueError(\"No tests ran. Aborting.\")\n\n if enable_spectral_norm:\n title = \"normalized spectral norm diff vs running time\"\n scatter_time_vs_s(all_time, all_spectral, datasets, title)\n title = \"normalized Frobenius norm diff vs running time\"\n scatter_time_vs_s(all_time, all_frobenius, datasets, title)\n\n\nif __name__ == '__main__':\n random_state = check_random_state(1234)\n\n power_iter = np.linspace(0, 6, 7, dtype=int)\n n_comps = 50\n\n for dataset_name in datasets:\n X = get_data(dataset_name)\n if X is None:\n continue\n print(\" >>>>>> Benching sklearn and fbpca on %s %d x %d\" %\n (dataset_name, X.shape[0], X.shape[1]))\n bench_a(X, dataset_name, power_iter, n_oversamples=2,\n n_comps=np.minimum(n_comps, np.min(X.shape)))\n\n print(\" >>>>>> Benching on simulated low rank matrix with variable rank\")\n bench_b(power_iter)\n\n print(\" >>>>>> Benching sklearn and fbpca default configurations\")\n bench_c(datasets + big_sparse_datasets, n_comps)\n\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"sklearn.svm.LinearSVC",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.ylim",
"sklearn.feature_selection.SelectKBest",
"numpy.array",
"sklearn.datasets.load_digits",
"sklearn.decomposition.NMF",
"matplotlib.pyplot.xlabel",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.bar"
],
[
"numpy.empty",
"numpy.insert",
"numpy.asarray",
"numpy.sum"
],
[
"scipy.sparse.linalg.svds",
"numpy.diag",
"sklearn.datasets.make_sparse_uncorrelated",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"numpy.vstack",
"scipy.linalg.norm",
"sklearn.utils.validation.check_random_state",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.annotate",
"sklearn.datasets.fetch_openml",
"matplotlib.pyplot.suptitle",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"sklearn.utils.gen_batches",
"scipy.io.loadmat",
"numpy.random.uniform",
"sklearn.datasets.fetch_lfw_people",
"sklearn.datasets.fetch_rcv1",
"numpy.repeat",
"sklearn.datasets.fetch_20newsgroups_vectorized",
"numpy.min",
"sklearn.datasets.fetch_olivetti_faces",
"sklearn.datasets.make_low_rank_matrix",
"scipy.sparse.linalg.norm",
"matplotlib.pyplot.legend",
"scipy.sparse.issparse",
"scipy.sparse.csr_matrix",
"matplotlib.pyplot.show",
"numpy.sqrt",
"matplotlib.pyplot.xlabel",
"sklearn.utils.extmath.randomized_svd"
]
] |
junhyeokahn/RLBasicAlgorithm | [
"25e3e471336cb7855e28c0f9905e2214afdeb2e4"
] | [
"DP/PolicyEvaluation.py"
] | [
"import numpy as np\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\")\n\nfrom lib.envs.gridworld import GridworldEnv\n\nenv = GridworldEnv()\n\ndef policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n \"\"\"\n Evaluate a policy given an environment and a full description of the\n environment's dynamics.\n\n Args:\n policy: [S, A] shaped matrix representing the policy.\n\n env: OpenAI env. env.P represents the transition probabilities of the\n environment. env.P[s][a] is a (prob, next_state, reward, done) tuple.\n\n theta: We stop evaluation once our value function change is less than\n theta for all states.\n\n discount_factor: gamma discount factor.\n\n Returns:\n Vector of length env.nS representing the value function.\n \"\"\"\n # Start with a random (all 0) value function\n V = np.zeros(env.nS)\n while True:\n delta=0\n for s in range(env.nS):\n v=0\n for a, action_prob in enumerate(policy[s]):\n for transition_prob, next_state, reward, done in env.P[s][a]:\n v += action_prob * (reward + discount_factor*V[next_state])\n delta = max(delta, np.abs(v-V[s]))\n V[s] = v\n\n if delta<theta:\n break\n return np.array(V)\n\nrandom_policy = np.ones([env.nS, env.nA]) / env.nA\nv = policy_eval(random_policy, env)\n\n# Test: Make sure the evaluated policy is what we expected\nexpected_v = np.array([ 0, -14, -20, -22,\n -14, -18, -20, -20,\n -20, -20, -18, -14,\n -22, -20, -14, 0 ])\nnp.testing.assert_array_almost_equal(v, expected_v, decimal=2)\n\nprint(\"Reshaped Grid Value Function:\")\nprint(v.reshape(env.shape))\nprint(\"\")\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.abs",
"numpy.testing.assert_array_almost_equal",
"numpy.array"
]
] |
pmartincalvo/osmnx | [
"15eddc0672f0ec951ada1b89eb417df44d35636e"
] | [
"osmnx/utils.py"
] | [
"import sys\nimport os\nimport datetime as dt\nimport unicodedata\nimport networkx as nx\nimport numpy as np\nimport logging as lg\nfrom . import settings\n\n\ndef citation():\n \"\"\"\n Print the OSMnx package's citation information.\n\n Boeing, G. 2017. OSMnx: New Methods for Acquiring, Constructing, Analyzing,\n and Visualizing Complex Street Networks. Computers, Environment and Urban\n Systems, 65(126-139). https://doi.org/10.1016/j.compenvurbsys.2017.05.004\n \"\"\"\n\n cite = (\"To cite OSMnx, use:\\n\\n\"\n \"Boeing, G. 2017. OSMnx: New Methods for Acquiring, Constructing, Analyzing, \"\n \"and Visualizing Complex Street Networks. Computers, Environment and Urban \"\n \"Systems, 65(126-139). https://doi.org/10.1016/j.compenvurbsys.2017.05.004\"\n \"\\n\\n\"\n \"BibTeX entry for LaTeX users:\\n\\n\"\n\n \"@article{boeing_osmnx_2017,\\n\"\n \" title = {{OSMnx}: {New} {Methods} for {Acquiring}, {Constructing}, {Analyzing}, and {Visualizing} {Complex} {Street} {Networks}},\\n\"\n \" volume = {65},\\n\"\n \" doi = {10.1016/j.compenvurbsys.2017.05.004},\\n\"\n \" number = {126-139},\\n\"\n \" journal = {Computers, Environment and Urban Systems},\\n\"\n \" author = {Boeing, Geoff},\\n\"\n \" year = {2017}\\n\"\n \"}\")\n\n print(cite)\n\n\ndef make_str(value):\n \"\"\"\n Convert a passed-in value to unicode if Python 2, or string if Python 3.\n\n Parameters\n ----------\n value : any\n the value to convert to unicode/string\n\n Returns\n -------\n unicode or string\n \"\"\"\n if (sys.version_info > (3, 0)):\n # python 3.x has no unicode type, so if error, use str type\n return str(value)\n else:\n # for python 2.x compatibility, use unicode\n return unicode(value)\n\n\ndef config(data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n use_cache=settings.use_cache,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_tags_node=settings.useful_tags_node,\n useful_tags_path=settings.useful_tags_path,\n osm_xml_node_attrs=settings.osm_xml_node_attrs,\n osm_xml_node_tags=settings.osm_xml_node_tags,\n osm_xml_way_attrs=settings.osm_xml_way_attrs,\n osm_xml_way_tags=settings.osm_xml_way_tags,\n default_access=settings.default_access,\n default_crs=settings.default_crs,\n default_user_agent=settings.default_user_agent,\n default_referer=settings.default_referer,\n default_accept_language=settings.default_accept_language,\n nominatim_endpoint=settings.nominatim_endpoint,\n nominatim_key=settings.nominatim_key,\n overpass_endpoint=settings.overpass_endpoint,\n all_oneway=settings.all_oneway):\n \"\"\"\n Configure osmnx by setting the default global vars to desired values.\n\n Parameters\n ---------\n data_folder : string\n where to save and load data files\n logs_folder : string\n where to write the log files\n imgs_folder : string\n where to save figures\n cache_folder : string\n where to save the http response cache\n use_cache : bool\n if True, use a local cache to save/retrieve http responses instead of\n calling API repetitively for the same request URL\n log_file : bool\n if true, save log output to a log file in logs_folder\n log_console : bool\n if true, print log output to the console\n log_level : int\n one of the logger.level constants\n log_name : string\n name of the logger\n useful_tags_node : list\n a list of useful OSM tags to attempt to save from node elements\n useful_tags_path : list\n a list of useful OSM tags to attempt to save from path elements\n default_access : string\n default filter for OSM \"access\" key\n default_crs : string\n default CRS to set when creating graphs\n default_user_agent : string\n HTTP header user-agent\n default_referer : string\n HTTP header referer\n default_accept_language : string\n HTTP header accept-language\n nominatim_endpoint : string\n which API endpoint to use for nominatim queries\n nominatim_key : string\n your API key, if you are using an endpoint that requires one\n overpass_endpoint : string\n which API endpoint to use for overpass queries\n all_oneway : boolean\n if True, forces all paths to be loaded as oneway ways, preserving\n the original order of nodes stored in the OSM way XML.\n\n Returns\n -------\n None\n \"\"\"\n\n # set each global variable to the passed-in parameter value\n settings.use_cache = use_cache\n settings.cache_folder = cache_folder\n settings.data_folder = data_folder\n settings.imgs_folder = imgs_folder\n settings.logs_folder = logs_folder\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_tags_node = useful_tags_node\n settings.useful_tags_path = useful_tags_path\n settings.useful_tags_node = list(set(useful_tags_node + osm_xml_node_attrs + osm_xml_node_tags))\n settings.useful_tags_path = list(set(useful_tags_path + osm_xml_way_attrs + osm_xml_way_tags))\n settings.osm_xml_node_attrs = osm_xml_node_attrs\n settings.osm_xml_node_tags = osm_xml_node_tags\n settings.osm_xml_way_attrs = osm_xml_way_attrs\n settings.osm_xml_way_tags = osm_xml_way_tags\n settings.default_access = default_access\n settings.default_crs = default_crs\n settings.default_user_agent = default_user_agent\n settings.default_referer = default_referer\n settings.default_accept_language = default_accept_language\n settings.nominatim_endpoint = nominatim_endpoint\n settings.nominatim_key = nominatim_key\n settings.overpass_endpoint = overpass_endpoint\n settings.all_oneway = all_oneway\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n log('Configured osmnx')\n\n\ndef great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=6371009):\n \"\"\"\n Vectorized function to calculate the great-circle distance between two\n points or between vectors of points, using haversine.\n\n Parameters\n ----------\n lat1 : float or array of float\n lng1 : float or array of float\n lat2 : float or array of float\n lng2 : float or array of float\n earth_radius : numeric\n radius of earth in units in which distance will be returned (default is\n meters)\n\n Returns\n -------\n distance : float or vector of floats\n distance or vector of distances from (lat1, lng1) to (lat2, lng2) in\n units of earth_radius\n \"\"\"\n\n phi1 = np.deg2rad(lat1)\n phi2 = np.deg2rad(lat2)\n d_phi = phi2 - phi1\n\n theta1 = np.deg2rad(lng1)\n theta2 = np.deg2rad(lng2)\n d_theta = theta2 - theta1\n\n h = np.sin(d_phi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(d_theta / 2) ** 2\n h = np.minimum(1.0, h) # protect against floating point errors\n\n arc = 2 * np.arcsin(np.sqrt(h))\n\n # return distance in units of earth_radius\n distance = arc * earth_radius\n return distance\n\n\ndef euclidean_dist_vec(y1, x1, y2, x2):\n \"\"\"\n Vectorized function to calculate the euclidean distance between two points\n or between vectors of points.\n\n Parameters\n ----------\n y1 : float or array of float\n x1 : float or array of float\n y2 : float or array of float\n x2 : float or array of float\n\n Returns\n -------\n distance : float or array of float\n distance or vector of distances from (x1, y1) to (x2, y2) in graph units\n \"\"\"\n\n # euclid's formula\n distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n return distance\n\n\ndef log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n # if logging to file is turned on\n if settings.log_file:\n # get the current logger (or create a new one, if none), then log\n # message at requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print to\n # the console\n if settings.log_console:\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout. this prevents\n # logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for console display so it doesn't break\n # windows terminals\n message = unicodedata.normalize('NFKD', make_str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n\n\ndef get_logger(level=None, name=None, filename=None):\n \"\"\"\n Create a logger or return the current one if already instantiated.\n\n Parameters\n ----------\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n logger.logger\n \"\"\"\n\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if not getattr(logger, 'handler_set', None):\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime('%Y_%m_%d')\n log_filename = os.path.join(settings.logs_folder, '{}_{}.log'.format(filename, todays_date))\n\n # if the logs folder does not already exist, create it\n if not os.path.exists(settings.logs_folder):\n os.makedirs(settings.logs_folder)\n\n # create file handler and log formatter and set them up\n handler = lg.FileHandler(log_filename, encoding='utf-8')\n formatter = lg.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.handler_set = True\n\n return logger\n\n\ndef get_unique_nodes_ordered_from_way(way_edges_df):\n \"\"\"\n Function to recover the original order of nodes from a dataframe\n of edges associated with a single OSM way.\n\n Parameters\n ----------\n way_edges_df : pandas.DataFrame()\n Dataframe containing columns 'u' and 'v' corresponding to\n origin/desitination nodes.\n\n Returns\n -------\n unique_ordered_nodes : list\n An ordered list of unique node IDs\n\n NOTE: If the edges do not all connect (e.g. [(1, 2), (2,3),\n (10, 11), (11, 12), (12, 13)]), then this method will return\n only those nodes associated with the largest component of\n connected edges, even if subsequent connected chunks are contain\n more total nodes. This is done to ensure a proper topological\n representation of nodes in the XML way records because if there\n are unconnected components, the sorting algorithm cannot recover\n their original order. I don't believe that we would ever encounter\n this kind of disconnected structure of nodes within a given way,\n but as best I could tell it is not explicitly forbidden in the\n OSM XML design schema. I'm using a print statement right now to\n tell the user whether or not any nodes have been dropped and\n how many.\n \"\"\"\n\n G = nx.MultiDiGraph()\n all_nodes = list(way_edges_df['u'].values) + \\\n list(way_edges_df['v'].values)\n\n G.add_nodes_from(all_nodes)\n G.add_edges_from(way_edges_df[['u', 'v']].values)\n wccs = nx.weakly_connected_components(G)\n largest_wcc = max(wccs, key=len)\n node_subset = set(largest_wcc)\n\n # NOTE: this code (L387-403) is copied from geo_utils.py\n # which cannot be imported here without triggering a\n # circular import error. This should be fixed next time the\n # code base is refactored\n\n # copy nodes into new graph\n G2 = G.__class__()\n G2.add_nodes_from((n, G.nodes[n]) for n in node_subset)\n\n # copy edges to new graph, including parallel edges\n if G2.is_multigraph:\n G2.add_edges_from((n, nbr, key, d)\n for n, nbrs in G.adj.items() if n in node_subset\n for nbr, keydict in nbrs.items() if nbr in node_subset\n for key, d in keydict.items())\n else:\n G2.add_edges_from((n, nbr, d)\n for n, nbrs in G.adj.items() if n in node_subset\n for nbr, d in nbrs.items() if nbr in node_subset)\n\n # update graph attribute dict, and return graph\n G2.graph.update(G.graph)\n\n unique_ordered_nodes = list(nx.topological_sort(G2))\n num_unique_nodes = len(np.unique(all_nodes))\n\n if len(unique_ordered_nodes) < num_unique_nodes:\n print('Recovered order for {0} of {1} nodes'.format(\n len(unique_ordered_nodes), num_unique_nodes))\n\n return unique_ordered_nodes"
] | [
[
"numpy.cos",
"numpy.deg2rad",
"numpy.sqrt",
"numpy.sin",
"numpy.unique",
"numpy.minimum"
]
] |
bert9bert/statsmodels | [
"898ddfc483c45bb0f8e5156dd8506abda84c9b63"
] | [
"statsmodels/discrete/discrete_model.py"
] | [
"\"\"\"\nLimited dependent variable and qualitative variables.\n\nIncludes binary outcomes, count data, (ordered) ordinal data and limited\ndependent variables.\n\nGeneral References\n--------------------\n\nA.C. Cameron and P.K. Trivedi. `Regression Analysis of Count Data`.\n Cambridge, 1998\n\nG.S. Madalla. `Limited-Dependent and Qualitative Variables in Econometrics`.\n Cambridge, 1983.\n\nW. Greene. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.\n\"\"\"\nfrom __future__ import division\n\n__all__ = [\"Poisson\", \"Logit\", \"Probit\", \"MNLogit\", \"NegativeBinomial\"]\n\nfrom statsmodels.compat.python import lmap, lzip, range\nimport numpy as np\nfrom scipy.special import gammaln\nfrom scipy import stats, special, optimize # opt just for nbin\nimport statsmodels.tools.tools as tools\nfrom statsmodels.tools import data as data_tools\nfrom statsmodels.tools.decorators import (resettable_cache,\n cache_readonly)\nfrom statsmodels.regression.linear_model import OLS\nfrom scipy import stats, special, optimize # opt just for nbin\nfrom scipy.stats import nbinom\nfrom statsmodels.tools.sm_exceptions import PerfectSeparationError\nfrom statsmodels.tools.numdiff import (approx_fprime, approx_hess,\n approx_hess_cs, approx_fprime_cs)\nimport statsmodels.base.model as base\nfrom statsmodels.base.data import handle_data # for mnlogit\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.compat.numpy import np_matrix_rank\nfrom pandas.core.api import get_dummies\n\nfrom statsmodels.base.l1_slsqp import fit_l1_slsqp\ntry:\n import cvxopt\n have_cvxopt = True\nexcept ImportError:\n have_cvxopt = False\n\n#TODO: When we eventually get user-settable precision, we need to change\n# this\nFLOAT_EPS = np.finfo(float).eps\n\n#TODO: add options for the parameter covariance/variance\n# ie., OIM, EIM, and BHHH see Green 21.4\n\n_discrete_models_docs = \"\"\"\n\"\"\"\n\n_discrete_results_docs = \"\"\"\n %(one_line_description)s\n\n Parameters\n ----------\n model : A DiscreteModel instance\n params : array-like\n The parameters of a fitted model.\n hessian : array-like\n The hessian of the fitted model.\n scale : float\n A scale parameter for the covariance matrix.\n\n Returns\n -------\n *Attributes*\n\n aic : float\n Akaike information criterion. `-2*(llf - p)` where `p` is the number\n of regressors including the intercept.\n bic : float\n Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the\n number of regressors including the intercept.\n bse : array\n The standard errors of the coefficients.\n df_resid : float\n See model definition.\n df_model : float\n See model definition.\n fitted_values : array\n Linear predictor XB.\n llf : float\n Value of the loglikelihood\n llnull : float\n Value of the constant-only loglikelihood\n llr : float\n Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`\n llr_pvalue : float\n The chi-squared probability of getting a log-likelihood ratio\n statistic greater than llr. llr has a chi-squared distribution\n with degrees of freedom `df_model`.\n prsquared : float\n McFadden's pseudo-R-squared. `1 - (llf / llnull)`\n%(extra_attr)s\"\"\"\n\n_l1_results_attr = \"\"\" nnz_params : Integer\n The number of nonzero parameters in the model. Train with\n trim_params == True or else numerical error will distort this.\n trimmed : Boolean array\n trimmed[i] == True if the ith parameter was trimmed from the model.\"\"\"\n\n\n# helper for MNLogit (will be generally useful later)\n\ndef _numpy_to_dummies(endog):\n if endog.dtype.kind in ['S', 'O']:\n endog_dummies, ynames = tools.categorical(endog, drop=True,\n dictnames=True)\n elif endog.ndim == 2:\n endog_dummies = endog\n ynames = range(endog.shape[1])\n else:\n endog_dummies, ynames = tools.categorical(endog, drop=True,\n dictnames=True)\n return endog_dummies, ynames\n\n\ndef _pandas_to_dummies(endog):\n if endog.ndim == 2:\n if endog.shape[1] == 1:\n yname = endog.columns[0]\n endog_dummies = get_dummies(endog.iloc[:, 0])\n else: # series\n yname = 'y'\n endog_dummies = endog\n else:\n yname = endog.name\n endog_dummies = get_dummies(endog)\n ynames = endog_dummies.columns.tolist()\n\n return endog_dummies, ynames, yname\n\n\n#### Private Model Classes ####\n\n\nclass DiscreteModel(base.LikelihoodModel):\n \"\"\"\n Abstract class for discrete choice models.\n\n This class does not do anything itself but lays out the methods and\n call signature expected of child classes in addition to those of\n statsmodels.model.LikelihoodModel.\n \"\"\"\n def __init__(self, endog, exog, **kwargs):\n super(DiscreteModel, self).__init__(endog, exog, **kwargs)\n self.raise_on_perfect_prediction = True\n\n def initialize(self):\n \"\"\"\n Initialize is called by\n statsmodels.model.LikelihoodModel.__init__\n and should contain any preprocessing that needs to be done for a model.\n \"\"\"\n # assumes constant\n self.df_model = float(np_matrix_rank(self.exog) - 1)\n self.df_resid = (float(self.exog.shape[0] -\n np_matrix_rank(self.exog)))\n\n def cdf(self, X):\n \"\"\"\n The cumulative distribution function of the model.\n \"\"\"\n raise NotImplementedError\n\n def pdf(self, X):\n \"\"\"\n The probability density (mass) function of the model.\n \"\"\"\n raise NotImplementedError\n\n def _check_perfect_pred(self, params, *args):\n endog = self.endog\n fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]]))\n if (self.raise_on_perfect_prediction and\n np.allclose(fittedvalues - endog, 0)):\n msg = \"Perfect separation detected, results not available\"\n raise PerfectSeparationError(msg)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n \"\"\"\n Fit the model using maximum likelihood.\n\n The rest of the docstring is from\n statsmodels.base.model.LikelihoodModel.fit\n \"\"\"\n if callback is None:\n callback = self._check_perfect_pred\n else:\n pass # make a function factory to have multiple call-backs\n\n mlefit = super(DiscreteModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n\n return mlefit # up to subclasses to wrap results\n\n fit.__doc__ += base.LikelihoodModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=True,\n callback=None, alpha=0, trim_mode='auto',\n auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03,\n qc_verbose=False, **kwargs):\n \"\"\"\n Fit the model using a regularized maximum likelihood.\n The regularization method AND the solver used is determined by the\n argument method.\n\n Parameters\n ----------\n start_params : array-like, optional\n Initial guess of the solution for the loglikelihood maximization.\n The default is an array of zeros.\n method : 'l1' or 'l1_cvxopt_cp'\n See notes for details.\n maxiter : Integer or 'defined_by_method'\n Maximum number of iterations to perform.\n If 'defined_by_method', then use method defaults (see notes).\n full_output : bool\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n disp : bool\n Set to True to print convergence messages.\n fargs : tuple\n Extra arguments passed to the likelihood function, i.e.,\n loglike(x,*args)\n callback : callable callback(xk)\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n retall : bool\n Set to True to return list of solutions at each iteration.\n Available in Results object's mle_retvals attribute.\n alpha : non-negative scalar or numpy array (same size as parameters)\n The weight multiplying the l1 penalty term\n trim_mode : 'auto, 'size', or 'off'\n If not 'off', trim (set to zero) parameters that would have been\n zero if the solver reached the theoretical minimum.\n If 'auto', trim params using the Theory above.\n If 'size', trim params if they have very small absolute value\n size_trim_tol : float or 'auto' (default = 'auto')\n For use when trim_mode == 'size'\n auto_trim_tol : float\n For sue when trim_mode == 'auto'. Use\n qc_tol : float\n Print warning and don't allow auto trim when (ii) (above) is\n violated by this much.\n qc_verbose : Boolean\n If true, print out a full QC report upon failure\n\n Notes\n -----\n Extra parameters are not penalized if alpha is given as a scalar.\n An example is the shape parameter in NegativeBinomial `nb1` and `nb2`.\n\n Optional arguments for the solvers (available in Results.mle_settings)::\n\n 'l1'\n acc : float (default 1e-6)\n Requested accuracy as used by slsqp\n 'l1_cvxopt_cp'\n abstol : float\n absolute accuracy (default: 1e-7).\n reltol : float\n relative accuracy (default: 1e-6).\n feastol : float\n tolerance for feasibility conditions (default: 1e-7).\n refinement : int\n number of iterative refinement steps when solving KKT\n equations (default: 1).\n\n\n Optimization methodology\n\n With :math:`L` the negative log likelihood, we solve the convex but\n non-smooth problem\n\n .. math:: \\\\min_\\\\beta L(\\\\beta) + \\\\sum_k\\\\alpha_k |\\\\beta_k|\n\n via the transformation to the smooth, convex, constrained problem\n in twice as many variables (adding the \"added variables\" :math:`u_k`)\n\n .. math:: \\\\min_{\\\\beta,u} L(\\\\beta) + \\\\sum_k\\\\alpha_k u_k,\n\n subject to\n\n .. math:: -u_k \\\\leq \\\\beta_k \\\\leq u_k.\n\n With :math:`\\\\partial_k L` the derivative of :math:`L` in the\n :math:`k^{th}` parameter direction, theory dictates that, at the\n minimum, exactly one of two conditions holds:\n\n (i) :math:`|\\\\partial_k L| = \\\\alpha_k` and :math:`\\\\beta_k \\\\neq 0`\n (ii) :math:`|\\\\partial_k L| \\\\leq \\\\alpha_k` and :math:`\\\\beta_k = 0`\n\n \"\"\"\n ### Set attributes based on method\n if method in ['l1', 'l1_cvxopt_cp']:\n cov_params_func = self.cov_params_func_l1\n else:\n raise Exception(\"argument method == %s, which is not handled\"\n % method)\n\n ### Bundle up extra kwargs for the dictionary kwargs. These are\n ### passed through super(...).fit() as kwargs and unpacked at\n ### appropriate times\n alpha = np.array(alpha)\n assert alpha.min() >= 0\n try:\n kwargs['alpha'] = alpha\n except TypeError:\n kwargs = dict(alpha=alpha)\n kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0])\n kwargs['trim_mode'] = trim_mode\n kwargs['size_trim_tol'] = size_trim_tol\n kwargs['auto_trim_tol'] = auto_trim_tol\n kwargs['qc_tol'] = qc_tol\n kwargs['qc_verbose'] = qc_verbose\n\n ### Define default keyword arguments to be passed to super(...).fit()\n if maxiter == 'defined_by_method':\n if method == 'l1':\n maxiter = 1000\n elif method == 'l1_cvxopt_cp':\n maxiter = 70\n\n ## Parameters to pass to super(...).fit()\n # For the 'extra' parameters, pass all that are available,\n # even if we know (at this point) we will only use one.\n extra_fit_funcs = {'l1': fit_l1_slsqp}\n if have_cvxopt and method == 'l1_cvxopt_cp':\n from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp\n extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp\n elif method.lower() == 'l1_cvxopt_cp':\n message = (\"Attempt to use l1_cvxopt_cp failed since cvxopt \"\n \"could not be imported\")\n\n if callback is None:\n callback = self._check_perfect_pred\n else:\n pass # make a function factory to have multiple call-backs\n\n mlefit = super(DiscreteModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs,\n cov_params_func=cov_params_func, **kwargs)\n\n return mlefit # up to subclasses to wrap results\n\n def cov_params_func_l1(self, likelihood_model, xopt, retvals):\n \"\"\"\n Computes cov_params on a reduced parameter space\n corresponding to the nonzero parameters resulting from the\n l1 regularized fit.\n\n Returns a full cov_params matrix, with entries corresponding\n to zero'd values set to np.nan.\n \"\"\"\n H = likelihood_model.hessian(xopt)\n trimmed = retvals['trimmed']\n nz_idx = np.nonzero(trimmed == False)[0]\n nnz_params = (trimmed == False).sum()\n if nnz_params > 0:\n H_restricted = H[nz_idx[:, None], nz_idx]\n # Covariance estimate for the nonzero params\n H_restricted_inv = np.linalg.inv(-H_restricted)\n else:\n H_restricted_inv = np.zeros(0)\n\n cov_params = np.nan * np.ones(H.shape)\n cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv\n\n return cov_params\n\n def predict(self, params, exog=None, linear=False):\n \"\"\"\n Predict response variable of a model given exogenous variables.\n \"\"\"\n raise NotImplementedError\n\n def _derivative_exog(self, params, exog=None, dummy_idx=None,\n count_idx=None):\n \"\"\"\n This should implement the derivative of the non-linear function\n \"\"\"\n raise NotImplementedError\n\nclass BinaryModel(DiscreteModel):\n\n def __init__(self, endog, exog, **kwargs):\n super(BinaryModel, self).__init__(endog, exog, **kwargs)\n if (not issubclass(self.__class__, MultinomialModel) and\n not np.all((self.endog >= 0) & (self.endog <= 1))):\n raise ValueError(\"endog must be in the unit interval.\")\n\n\n def predict(self, params, exog=None, linear=False):\n \"\"\"\n Predict response variable of a model given exogenous variables.\n\n Parameters\n ----------\n params : array-like\n Fitted parameters of the model.\n exog : array-like\n 1d or 2d array of exogenous values. If not supplied, the\n whole exog attribute of the model is used.\n linear : bool, optional\n If True, returns the linear predictor dot(exog,params). Else,\n returns the value of the cdf at the linear predictor.\n\n Returns\n -------\n array\n Fitted values at exog.\n \"\"\"\n if exog is None:\n exog = self.exog\n if not linear:\n return self.cdf(np.dot(exog, params))\n else:\n return np.dot(exog, params)\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n bnryfit = super(BinaryModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1BinaryResults(self, bnryfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n return L1BinaryResultsWrapper(discretefit)\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"\n For computing marginal effects standard errors.\n\n This is used only in the case of discrete and count regressors to\n get the variance-covariance of the marginal effects. It returns\n [d F / d params] where F is the predict.\n\n Transform can be 'dydx' or 'eydx'. Checking is done in margeff\n computations for appropriate transform.\n \"\"\"\n if exog is None:\n exog = self.exog\n dF = self.pdf(np.dot(exog, params))[:,None] * exog\n if 'ey' in transform:\n dF /= self.predict(params, exog)[:,None]\n return dF\n\n def _derivative_exog(self, params, exog=None, transform='dydx',\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects returns dF(XB) / dX where F(.) is\n the predicted probabilities\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n \"\"\"\n #note, this form should be appropriate for\n ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit\n if exog is None:\n exog = self.exog\n margeff = np.dot(self.pdf(np.dot(exog, params))[:,None],\n params[None,:])\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:,None]\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff\n\nclass MultinomialModel(BinaryModel):\n\n def _handle_data(self, endog, exog, missing, hasconst, **kwargs):\n if data_tools._is_using_ndarray_type(endog, None):\n endog_dummies, ynames = _numpy_to_dummies(endog)\n yname = 'y'\n elif data_tools._is_using_pandas(endog, None):\n endog_dummies, ynames, yname = _pandas_to_dummies(endog)\n else:\n endog = np.asarray(endog)\n endog_dummies, ynames = _numpy_to_dummies(endog)\n yname = 'y'\n\n if not isinstance(ynames, dict):\n ynames = dict(zip(range(endog_dummies.shape[1]), ynames))\n\n self._ynames_map = ynames\n data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs)\n data.ynames = yname # overwrite this to single endog name\n data.orig_endog = endog\n self.wendog = data.endog\n\n # repeating from upstream...\n for key in kwargs:\n try:\n setattr(self, key, data.__dict__.pop(key))\n except KeyError:\n pass\n return data\n\n def initialize(self):\n \"\"\"\n Preprocesses the data for MNLogit.\n \"\"\"\n super(MultinomialModel, self).initialize()\n # This is also a \"whiten\" method in other models (eg regression)\n self.endog = self.endog.argmax(1) # turn it into an array of col idx\n self.J = self.wendog.shape[1]\n self.K = self.exog.shape[1]\n self.df_model *= (self.J-1) # for each J - 1 equation.\n self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1)\n\n def predict(self, params, exog=None, linear=False):\n \"\"\"\n Predict response variable of a model given exogenous variables.\n\n Parameters\n ----------\n params : array-like\n 2d array of fitted parameters of the model. Should be in the\n order returned from the model.\n exog : array-like\n 1d or 2d array of exogenous values. If not supplied, the\n whole exog attribute of the model is used. If a 1d array is given\n it assumed to be 1 row of exogenous variables. If you only have\n one regressor and would like to do prediction, you must provide\n a 2d array with shape[1] == 1.\n linear : bool, optional\n If True, returns the linear predictor dot(exog,params). Else,\n returns the value of the cdf at the linear predictor.\n\n Notes\n -----\n Column 0 is the base case, the rest conform to the rows of params\n shifted up one for the base case.\n \"\"\"\n if exog is None: # do here to accomodate user-given exog\n exog = self.exog\n if exog.ndim == 1:\n exog = exog[None]\n pred = super(MultinomialModel, self).predict(params, exog, linear)\n if linear:\n pred = np.column_stack((np.zeros(len(exog)), pred))\n return pred\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n if start_params is None:\n start_params = np.zeros((self.K * (self.J-1)))\n else:\n start_params = np.asarray(start_params)\n callback = lambda x : None # placeholder until check_perfect_pred\n # skip calling super to handle results from LikelihoodModel\n mnfit = base.LikelihoodModel.fit(self, start_params = start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n mnfit.params = mnfit.params.reshape(self.K, -1, order='F')\n mnfit = MultinomialResults(self, mnfit)\n return MultinomialResultsWrapper(mnfit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n if start_params is None:\n start_params = np.zeros((self.K * (self.J-1)))\n else:\n start_params = np.asarray(start_params)\n mnfit = DiscreteModel.fit_regularized(\n self, start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n mnfit.params = mnfit.params.reshape(self.K, -1, order='F')\n mnfit = L1MultinomialResults(self, mnfit)\n return L1MultinomialResultsWrapper(mnfit)\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"\n For computing marginal effects standard errors.\n\n This is used only in the case of discrete and count regressors to\n get the variance-covariance of the marginal effects. It returns\n [d F / d params] where F is the predicted probabilities for each\n choice. dFdparams is of shape nobs x (J*K) x (J-1)*K.\n The zero derivatives for the base category are not included.\n\n Transform can be 'dydx' or 'eydx'. Checking is done in margeff\n computations for appropriate transform.\n \"\"\"\n if exog is None:\n exog = self.exog\n if params.ndim == 1: # will get flatted from approx_fprime\n params = params.reshape(self.K, self.J-1, order='F')\n\n eXB = np.exp(np.dot(exog, params))\n sum_eXB = (1 + eXB.sum(1))[:,None]\n J, K = lmap(int, [self.J, self.K])\n repeat_eXB = np.repeat(eXB, J, axis=1)\n X = np.tile(exog, J-1)\n # this is the derivative wrt the base level\n F0 = -repeat_eXB * X / sum_eXB ** 2\n # this is the derivative wrt the other levels when\n # dF_j / dParams_j (ie., own equation)\n #NOTE: this computes too much, any easy way to cut down?\n F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2)\n F1 = F1.transpose((1,0,2)) # put the nobs index first\n\n # other equation index\n other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool)\n F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \\\n (sum_eXB**2)).transpose((1,0,2))[:, other_idx]\n dFdX = np.concatenate((F0[:, None,:], F1), axis=1)\n\n if 'ey' in transform:\n dFdX /= self.predict(params, exog)[:, :, None]\n return dFdX\n\n def _derivative_exog(self, params, exog=None, transform='dydx',\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects returns dF(XB) / dX where F(.) is\n the predicted probabilities\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n\n For Multinomial models the marginal effects are\n\n P[j] * (params[j] - sum_k P[k]*params[k])\n\n It is returned unshaped, so that each row contains each of the J\n equations. This makes it easier to take derivatives of this for\n standard errors. If you want average marginal effects you can do\n margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects\n for choice J are in column J\n \"\"\"\n J = int(self.J) # number of alternative choices\n K = int(self.K) # number of variables\n #note, this form should be appropriate for\n ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit\n if exog is None:\n exog = self.exog\n if params.ndim == 1: # will get flatted from approx_fprime\n params = params.reshape(K, J-1, order='F')\n zeroparams = np.c_[np.zeros(K), params] # add base in\n\n cdf = self.cdf(np.dot(exog, params))\n margeff = np.array([cdf[:,[j]]* (zeroparams[:,j]-np.array([cdf[:,[i]]*\n zeroparams[:,i] for i in range(int(J))]).sum(0))\n for j in range(J)])\n margeff = np.transpose(margeff, (1,2,0))\n # swap the axes to make sure margeff are in order nobs, K, J\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:,None,:]\n\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff.reshape(len(exog), -1, order='F')\n\nclass CountModel(DiscreteModel):\n def __init__(self, endog, exog, offset=None, exposure=None, missing='none',\n **kwargs):\n super(CountModel, self).__init__(endog, exog, missing=missing,\n offset=offset,\n exposure=exposure, **kwargs)\n if exposure is not None:\n self.exposure = np.log(self.exposure)\n self._check_inputs(self.offset, self.exposure, self.endog)\n if offset is None:\n delattr(self, 'offset')\n if exposure is None:\n delattr(self, 'exposure')\n\n def _check_inputs(self, offset, exposure, endog):\n if offset is not None and offset.shape[0] != endog.shape[0]:\n raise ValueError(\"offset is not the same length as endog\")\n\n if exposure is not None and exposure.shape[0] != endog.shape[0]:\n raise ValueError(\"exposure is not the same length as endog\")\n\n def _get_init_kwds(self):\n # this is a temporary fixup because exposure has been transformed\n # see #1609\n kwds = super(CountModel, self)._get_init_kwds()\n if 'exposure' in kwds and kwds['exposure'] is not None:\n kwds['exposure'] = np.exp(kwds['exposure'])\n return kwds\n\n def predict(self, params, exog=None, exposure=None, offset=None,\n linear=False):\n \"\"\"\n Predict response variable of a count model given exogenous variables.\n\n Notes\n -----\n If exposure is specified, then it will be logged by the method.\n The user does not need to log it first.\n \"\"\"\n #TODO: add offset tp\n if exog is None:\n exog = self.exog\n offset = getattr(self, 'offset', 0)\n exposure = getattr(self, 'exposure', 0)\n\n else:\n if exposure is None:\n exposure = 0\n else:\n exposure = np.log(exposure)\n if offset is None:\n offset = 0\n\n if not linear:\n return np.exp(np.dot(exog, params[:exog.shape[1]]) + exposure + offset) # not cdf\n else:\n return np.dot(exog, params[:exog.shape[1]]) + exposure + offset\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"\n For computing marginal effects standard errors.\n\n This is used only in the case of discrete and count regressors to\n get the variance-covariance of the marginal effects. It returns\n [d F / d params] where F is the predict.\n\n Transform can be 'dydx' or 'eydx'. Checking is done in margeff\n computations for appropriate transform.\n \"\"\"\n if exog is None:\n exog = self.exog\n #NOTE: this handles offset and exposure\n dF = self.predict(params, exog)[:,None] * exog\n if 'ey' in transform:\n dF /= self.predict(params, exog)[:,None]\n return dF\n\n def _derivative_exog(self, params, exog=None, transform=\"dydx\",\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects. These are the marginal effects\n d F(XB) / dX\n For the Poisson model F(XB) is the predicted counts rather than\n the probabilities.\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n \"\"\"\n # group 3 poisson, nbreg, zip, zinb\n if exog is None:\n exog = self.exog\n margeff = self.predict(params, exog)[:,None] * params[None,:]\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:,None]\n\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n cntfit = super(CountModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n discretefit = CountResults(self, cntfit)\n return CountResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1CountResults(self, cntfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n return L1CountResultsWrapper(discretefit)\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n\nclass OrderedModel(DiscreteModel):\n pass\n\n#### Public Model Classes ####\n\nclass Poisson(CountModel):\n __doc__ = \"\"\"\n Poisson model for count data\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' :\n \"\"\"offset : array_like\n Offset is added to the linear prediction with coefficient equal to 1.\n exposure : array_like\n Log(exposure) is added to the linear prediction with coefficient\n equal to 1.\n\n \"\"\" + base._missing_param_doc}\n\n\n def cdf(self, X):\n \"\"\"\n Poisson model cumulative distribution function\n\n Parameters\n -----------\n X : array-like\n `X` is the linear predictor of the model. See notes.\n\n Returns\n -------\n The value of the Poisson CDF at each point.\n\n Notes\n -----\n The CDF is defined as\n\n .. math:: \\\\exp\\\\left(-\\\\lambda\\\\right)\\\\sum_{i=0}^{y}\\\\frac{\\\\lambda^{i}}{i!}\n\n where :math:`\\\\lambda` assumes the loglinear model. I.e.,\n\n .. math:: \\\\ln\\\\lambda_{i}=X\\\\beta\n\n The parameter `X` is :math:`X\\\\beta` in the above formula.\n \"\"\"\n y = self.endog\n return stats.poisson.cdf(y, np.exp(X))\n\n def pdf(self, X):\n \"\"\"\n Poisson model probability mass function\n\n Parameters\n -----------\n X : array-like\n `X` is the linear predictor of the model. See notes.\n\n Returns\n -------\n pdf : ndarray\n The value of the Poisson probability mass function, PMF, for each\n point of X.\n\n Notes\n --------\n The PMF is defined as\n\n .. math:: \\\\frac{e^{-\\\\lambda_{i}}\\\\lambda_{i}^{y_{i}}}{y_{i}!}\n\n where :math:`\\\\lambda` assumes the loglinear model. I.e.,\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n\n The parameter `X` is :math:`x_{i}\\\\beta` in the above formula.\n \"\"\"\n y = self.endog\n return np.exp(stats.poisson.logpmf(y, np.exp(X)))\n\n def loglike(self, params):\n \"\"\"\n Loglikelihood of Poisson model\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n --------\n .. math:: \\\\ln L=\\\\sum_{i=1}^{n}\\\\left[-\\\\lambda_{i}+y_{i}x_{i}^{\\\\prime}\\\\beta-\\\\ln y_{i}!\\\\right]\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n XB = np.dot(self.exog, params) + offset + exposure\n endog = self.endog\n return np.sum(-np.exp(XB) + endog*XB - gammaln(endog+1))\n\n def loglikeobs(self, params):\n \"\"\"\n Loglikelihood for observations of Poisson model\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n --------\n .. math:: \\\\ln L_{i}=\\\\left[-\\\\lambda_{i}+y_{i}x_{i}^{\\\\prime}\\\\beta-\\\\ln y_{i}!\\\\right]\n\n for observations :math:`i=1,...,n`\n\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n XB = np.dot(self.exog, params) + offset + exposure\n endog = self.endog\n #np.sum(stats.poisson.logpmf(endog, np.exp(XB)))\n return -np.exp(XB) + endog*XB - gammaln(endog+1)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n cntfit = super(CountModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n\n if 'cov_type' in kwargs:\n cov_kwds = kwargs.get('cov_kwds', {})\n kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}\n else:\n kwds = {}\n discretefit = PoissonResults(self, cntfit, **kwds)\n return PoissonResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1PoissonResults(self, cntfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n return L1PoissonResultsWrapper(discretefit)\n\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"fit the model subject to linear equality constraints\n\n The constraints are of the form `R params = q`\n where R is the constraint_matrix and q is the vector of\n constraint_values.\n\n The estimation creates a new model with transformed design matrix,\n exog, and converts the results back to the original parameterization.\n\n Parameters\n ----------\n constraints : formula expression or tuple\n If it is a tuple, then the constraint needs to be given by two\n arrays (constraint_matrix, constraint_value), i.e. (R, q).\n Otherwise, the constraints can be given as strings or list of\n strings.\n see t_test for details\n start_params : None or array_like\n starting values for the optimization. `start_params` needs to be\n given in the original parameter space and are internally\n transformed.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the transformed model.\n\n Returns\n -------\n results : Results instance\n\n \"\"\"\n\n #constraints = (R, q)\n # TODO: temporary trailing underscore to not overwrite the monkey\n # patched version\n # TODO: decide whether to move the imports\n from patsy import DesignInfo\n from statsmodels.base._constraints import fit_constrained\n\n # same pattern as in base.LikelihoodModel.t_test\n lc = DesignInfo(self.exog_names).linear_constraint(constraints)\n R, q = lc.coefs, lc.constants\n\n # TODO: add start_params option, need access to tranformation\n # fit_constrained needs to do the transformation\n params, cov, res_constr = fit_constrained(self, R, q,\n start_params=start_params,\n fit_kwds=fit_kwds)\n #create dummy results Instance, TODO: wire up properly\n res = self.fit(maxiter=0, method='nm', disp=0,\n warn_convergence=False) # we get a wrapper back\n res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)\n res.mle_retvals['iterations'] = res_constr.mle_retvals.get(\n 'iterations', np.nan)\n res.mle_retvals['converged'] = res_constr.mle_retvals['converged']\n res._results.params = params\n res._results.normalized_cov_params = cov\n k_constr = len(q)\n res._results.df_resid += k_constr\n res._results.df_model -= k_constr\n res._results.constraints = lc\n res._results.k_constr = k_constr\n res._results.results_constrained = res_constr\n return res\n\n\n def score(self, params):\n \"\"\"\n Poisson model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta}=\\\\sum_{i=1}^{n}\\\\left(y_{i}-\\\\lambda_{i}\\\\right)x_{i}\n\n where the loglinear model is assumed\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n X = self.exog\n L = np.exp(np.dot(X,params) + offset + exposure)\n return np.dot(self.endog - L, X)\n\n def score_obs(self, params):\n \"\"\"\n Poisson model Jacobian of the log-likelihood for each observation\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray (nobs, k_vars)\n The score vector of the model evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta}=\\\\left(y_{i}-\\\\lambda_{i}\\\\right)x_{i}\n\n for observations :math:`i=1,...,n`\n\n where the loglinear model is assumed\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n X = self.exog\n L = np.exp(np.dot(X,params) + offset + exposure)\n return (self.endog - L)[:,None] * X\n\n def hessian(self, params):\n \"\"\"\n Poisson model Hessian matrix of the loglikelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta\\\\partial\\\\beta^{\\\\prime}}=-\\\\sum_{i=1}^{n}\\\\lambda_{i}x_{i}x_{i}^{\\\\prime}\n\n where the loglinear model is assumed\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n X = self.exog\n L = np.exp(np.dot(X,params) + exposure + offset)\n return -np.dot(L*X.T, X)\n\nclass Logit(BinaryModel):\n __doc__ = \"\"\"\n Binary choice logit model\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc}\n\n def cdf(self, X):\n \"\"\"\n The logistic cumulative distribution function\n\n Parameters\n ----------\n X : array-like\n `X` is the linear predictor of the logit model. See notes.\n\n Returns\n -------\n 1/(1 + exp(-X))\n\n Notes\n ------\n In the logit model,\n\n .. math:: \\\\Lambda\\\\left(x^{\\\\prime}\\\\beta\\\\right)=\\\\text{Prob}\\\\left(Y=1|x\\\\right)=\\\\frac{e^{x^{\\\\prime}\\\\beta}}{1+e^{x^{\\\\prime}\\\\beta}}\n \"\"\"\n X = np.asarray(X)\n return 1/(1+np.exp(-X))\n\n def pdf(self, X):\n \"\"\"\n The logistic probability density function\n\n Parameters\n -----------\n X : array-like\n `X` is the linear predictor of the logit model. See notes.\n\n Returns\n -------\n pdf : ndarray\n The value of the Logit probability mass function, PMF, for each\n point of X. ``np.exp(-x)/(1+np.exp(-X))**2``\n\n Notes\n -----\n In the logit model,\n\n .. math:: \\\\lambda\\\\left(x^{\\\\prime}\\\\beta\\\\right)=\\\\frac{e^{-x^{\\\\prime}\\\\beta}}{\\\\left(1+e^{-x^{\\\\prime}\\\\beta}\\\\right)^{2}}\n \"\"\"\n X = np.asarray(X)\n return np.exp(-X)/(1+np.exp(-X))**2\n\n def loglike(self, params):\n \"\"\"\n Log-likelihood of logit model.\n\n Parameters\n -----------\n params : array-like\n The parameters of the logit model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n ------\n .. math:: \\\\ln L=\\\\sum_{i}\\\\ln\\\\Lambda\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n logistic distribution is symmetric.\n \"\"\"\n q = 2*self.endog - 1\n X = self.exog\n return np.sum(np.log(self.cdf(q*np.dot(X,params))))\n\n def loglikeobs(self, params):\n \"\"\"\n Log-likelihood of logit model for each observation.\n\n Parameters\n -----------\n params : array-like\n The parameters of the logit model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n ------\n .. math:: \\\\ln L=\\\\sum_{i}\\\\ln\\\\Lambda\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n for observations :math:`i=1,...,n`\n\n where :math:`q=2y-1`. This simplification comes from the fact that the\n logistic distribution is symmetric.\n \"\"\"\n q = 2*self.endog - 1\n X = self.exog\n return np.log(self.cdf(q*np.dot(X,params)))\n\n def score(self, params):\n \"\"\"\n Logit model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params: array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta}=\\\\sum_{i=1}^{n}\\\\left(y_{i}-\\\\Lambda_{i}\\\\right)x_{i}\n \"\"\"\n\n y = self.endog\n X = self.exog\n L = self.cdf(np.dot(X,params))\n return np.dot(y - L,X)\n\n def score_obs(self, params):\n \"\"\"\n Logit model Jacobian of the log-likelihood for each observation\n\n Parameters\n ----------\n params: array-like\n The parameters of the model\n\n Returns\n -------\n jac : ndarray, (nobs, k_vars)\n The derivative of the loglikelihood for each observation evaluated\n at `params`.\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta}=\\\\left(y_{i}-\\\\Lambda_{i}\\\\right)x_{i}\n\n for observations :math:`i=1,...,n`\n\n \"\"\"\n\n y = self.endog\n X = self.exog\n L = self.cdf(np.dot(X, params))\n return (y - L)[:,None] * X\n\n def hessian(self, params):\n \"\"\"\n Logit model Hessian matrix of the log-likelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta\\\\partial\\\\beta^{\\\\prime}}=-\\\\sum_{i}\\\\Lambda_{i}\\\\left(1-\\\\Lambda_{i}\\\\right)x_{i}x_{i}^{\\\\prime}\n \"\"\"\n X = self.exog\n L = self.cdf(np.dot(X,params))\n return -np.dot(L*(1-L)*X.T,X)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n bnryfit = super(Logit, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n\n discretefit = LogitResults(self, bnryfit)\n return BinaryResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\nclass Probit(BinaryModel):\n __doc__ = \"\"\"\n Binary choice Probit model\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc}\n\n def cdf(self, X):\n \"\"\"\n Probit (Normal) cumulative distribution function\n\n Parameters\n ----------\n X : array-like\n The linear predictor of the model (XB).\n\n Returns\n --------\n cdf : ndarray\n The cdf evaluated at `X`.\n\n Notes\n -----\n This function is just an alias for scipy.stats.norm.cdf\n \"\"\"\n return stats.norm._cdf(X)\n\n def pdf(self, X):\n \"\"\"\n Probit (Normal) probability density function\n\n Parameters\n ----------\n X : array-like\n The linear predictor of the model (XB).\n\n Returns\n --------\n pdf : ndarray\n The value of the normal density function for each point of X.\n\n Notes\n -----\n This function is just an alias for scipy.stats.norm.pdf\n\n \"\"\"\n X = np.asarray(X)\n return stats.norm._pdf(X)\n\n\n def loglike(self, params):\n \"\"\"\n Log-likelihood of probit model (i.e., the normal distribution).\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n -----\n .. math:: \\\\ln L=\\\\sum_{i}\\\\ln\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n\n q = 2*self.endog - 1\n X = self.exog\n return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)),\n FLOAT_EPS, 1)))\n\n def loglikeobs(self, params):\n \"\"\"\n Log-likelihood of probit model for each observation\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n -----\n .. math:: \\\\ln L_{i}=\\\\ln\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n for observations :math:`i=1,...,n`\n\n where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n\n q = 2*self.endog - 1\n X = self.exog\n return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))\n\n\n def score(self, params):\n \"\"\"\n Probit model score (gradient) vector\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta}=\\\\sum_{i=1}^{n}\\\\left[\\\\frac{q_{i}\\\\phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}{\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}\\\\right]x_{i}\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n y = self.endog\n X = self.exog\n XB = np.dot(X,params)\n q = 2*y - 1\n # clip to get rid of invalid divide complaint\n L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)\n return np.dot(L,X)\n\n def score_obs(self, params):\n \"\"\"\n Probit model Jacobian for each observation\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n jac : ndarray, (nobs, k_vars)\n The derivative of the loglikelihood for each observation evaluated\n at `params`.\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta}=\\\\left[\\\\frac{q_{i}\\\\phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}{\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}\\\\right]x_{i}\n\n for observations :math:`i=1,...,n`\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n y = self.endog\n X = self.exog\n XB = np.dot(X,params)\n q = 2*y - 1\n # clip to get rid of invalid divide complaint\n L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)\n return L[:,None] * X\n\n def hessian(self, params):\n \"\"\"\n Probit model Hessian matrix of the log-likelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta\\\\partial\\\\beta^{\\\\prime}}=-\\\\lambda_{i}\\\\left(\\\\lambda_{i}+x_{i}^{\\\\prime}\\\\beta\\\\right)x_{i}x_{i}^{\\\\prime}\n\n where\n\n .. math:: \\\\lambda_{i}=\\\\frac{q_{i}\\\\phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}{\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}\n\n and :math:`q=2y-1`\n \"\"\"\n X = self.exog\n XB = np.dot(X,params)\n q = 2*self.endog - 1\n L = q*self.pdf(q*XB)/self.cdf(q*XB)\n return np.dot(-L*(L+XB)*X.T,X)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n bnryfit = super(Probit, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n discretefit = ProbitResults(self, bnryfit)\n return BinaryResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\nclass MNLogit(MultinomialModel):\n __doc__ = \"\"\"\n Multinomial logit model\n\n Parameters\n ----------\n endog : array-like\n `endog` is an 1-d vector of the endogenous response. `endog` can\n contain strings, ints, or floats. Note that if it contains strings,\n every distinct string will be a category. No stripping of whitespace\n is done.\n exog : array-like\n A nobs x k array where `nobs` is the number of observations and `k`\n is the number of regressors. An intercept is not included by default\n and should be added by the user. See `statsmodels.tools.add_constant`.\n %(extra_params)s\n\n Attributes\n ----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n J : float\n The number of choices for the endogenous variable. Note that this\n is zero-indexed.\n K : float\n The actual number of parameters for the exogenous design. Includes\n the constant if the design has one.\n names : dict\n A dictionary mapping the column number in `wendog` to the variables\n in `endog`.\n wendog : array\n An n x j array where j is the number of unique categories in `endog`.\n Each column of j is a dummy variable indicating the category of\n each observation. See `names` for a dictionary mapping each column to\n its category.\n\n Notes\n -----\n See developer notes for further information on `MNLogit` internals.\n \"\"\" % {'extra_params' : base._missing_param_doc}\n\n def pdf(self, eXB):\n \"\"\"\n NotImplemented\n \"\"\"\n raise NotImplementedError\n\n def cdf(self, X):\n \"\"\"\n Multinomial logit cumulative distribution function.\n\n Parameters\n ----------\n X : array\n The linear predictor of the model XB.\n\n Returns\n --------\n cdf : ndarray\n The cdf evaluated at `X`.\n\n Notes\n -----\n In the multinomial logit model.\n .. math:: \\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\n \"\"\"\n eXB = np.column_stack((np.ones(len(X)), np.exp(X)))\n return eXB/eXB.sum(1)[:,None]\n\n def loglike(self, params):\n \"\"\"\n Log-likelihood of the multinomial logit model.\n\n Parameters\n ----------\n params : array-like\n The parameters of the multinomial logit model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n ------\n .. math:: \\\\ln L=\\\\sum_{i=1}^{n}\\\\sum_{j=0}^{J}d_{ij}\\\\ln\\\\left(\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)\n\n where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0\n if not.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n d = self.wendog\n logprob = np.log(self.cdf(np.dot(self.exog,params)))\n return np.sum(d * logprob)\n\n def loglikeobs(self, params):\n \"\"\"\n Log-likelihood of the multinomial logit model for each observation.\n\n Parameters\n ----------\n params : array-like\n The parameters of the multinomial logit model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n ------\n .. math:: \\\\ln L_{i}=\\\\sum_{j=0}^{J}d_{ij}\\\\ln\\\\left(\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)\n\n for observations :math:`i=1,...,n`\n\n where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0\n if not.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n d = self.wendog\n logprob = np.log(self.cdf(np.dot(self.exog,params)))\n return d * logprob\n\n def score(self, params):\n \"\"\"\n Score matrix for multinomial logit model log-likelihood\n\n Parameters\n ----------\n params : array\n The parameters of the multinomial logit model.\n\n Returns\n --------\n score : ndarray, (K * (J-1),)\n The 2-d score vector, i.e. the first derivative of the\n loglikelihood function, of the multinomial logit model evaluated at\n `params`.\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta_{j}}=\\\\sum_{i}\\\\left(d_{ij}-\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)x_{i}\n\n for :math:`j=1,...,J`\n\n In the multinomial model the score matrix is K x J-1 but is returned\n as a flattened array to work with the solvers.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,\n params))[:,1:]\n #NOTE: might need to switch terms if params is reshaped\n return np.dot(firstterm.T, self.exog).flatten()\n\n def loglike_and_score(self, params):\n \"\"\"\n Returns log likelihood and score, efficiently reusing calculations.\n\n Note that both of these returned quantities will need to be negated\n before being minimized by the maximum likelihood fitting machinery.\n\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n cdf_dot_exog_params = self.cdf(np.dot(self.exog, params))\n loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params))\n firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:]\n score_array = np.dot(firstterm.T, self.exog).flatten()\n return loglike_value, score_array\n\n def score_obs(self, params):\n \"\"\"\n Jacobian matrix for multinomial logit model log-likelihood\n\n Parameters\n ----------\n params : array\n The parameters of the multinomial logit model.\n\n Returns\n --------\n jac : ndarray, (nobs, k_vars*(J-1))\n The derivative of the loglikelihood for each observation evaluated\n at `params` .\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta_{j}}=\\\\left(d_{ij}-\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)x_{i}\n\n for :math:`j=1,...,J`, for observations :math:`i=1,...,n`\n\n In the multinomial model the score vector is K x (J-1) but is returned\n as a flattened array. The Jacobian has the observations in rows and\n the flatteded array of derivatives in columns.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,\n params))[:,1:]\n #NOTE: might need to switch terms if params is reshaped\n return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1)\n\n def hessian(self, params):\n \"\"\"\n Multinomial logit Hessian matrix of the log-likelihood\n\n Parameters\n -----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (J*K, J*K)\n The Hessian, second derivative of loglikelihood function with\n respect to the flattened parameters, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta_{j}\\\\partial\\\\beta_{l}}=-\\\\sum_{i=1}^{n}\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\left[\\\\boldsymbol{1}\\\\left(j=l\\\\right)-\\\\frac{\\\\exp\\\\left(\\\\beta_{l}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right]x_{i}x_{l}^{\\\\prime}\n\n where\n :math:`\\\\boldsymbol{1}\\\\left(j=l\\\\right)` equals 1 if `j` = `l` and 0\n otherwise.\n\n The actual Hessian matrix has J**2 * K x K elements. Our Hessian\n is reshaped to be square (J*K, J*K) so that the solvers can use it.\n\n This implementation does not take advantage of the symmetry of\n the Hessian and could probably be refactored for speed.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n X = self.exog\n pr = self.cdf(np.dot(X,params))\n partials = []\n J = self.wendog.shape[1] - 1\n K = self.exog.shape[1]\n for i in range(J):\n for j in range(J): # this loop assumes we drop the first col.\n if i == j:\n partials.append(\\\n -np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X))\n else:\n partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X))\n H = np.array(partials)\n # the developer's notes on multinomial should clear this math up\n H = np.transpose(H.reshape(J,J,K,K), (0,2,1,3)).reshape(J*K,J*K)\n return H\n\n\n#TODO: Weibull can replaced by a survival analsysis function\n# like stat's streg (The cox model as well)\n#class Weibull(DiscreteModel):\n# \"\"\"\n# Binary choice Weibull model\n#\n# Notes\n# ------\n# This is unfinished and untested.\n# \"\"\"\n##TODO: add analytic hessian for Weibull\n# def initialize(self):\n# pass\n#\n# def cdf(self, X):\n# \"\"\"\n# Gumbell (Log Weibull) cumulative distribution function\n# \"\"\"\n## return np.exp(-np.exp(-X))\n# return stats.gumbel_r.cdf(X)\n# # these two are equivalent.\n# # Greene table and discussion is incorrect.\n#\n# def pdf(self, X):\n# \"\"\"\n# Gumbell (LogWeibull) probability distribution function\n# \"\"\"\n# return stats.gumbel_r.pdf(X)\n#\n# def loglike(self, params):\n# \"\"\"\n# Loglikelihood of Weibull distribution\n# \"\"\"\n# X = self.exog\n# cdf = self.cdf(np.dot(X,params))\n# y = self.endog\n# return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf))\n#\n# def score(self, params):\n# y = self.endog\n# X = self.exog\n# F = self.cdf(np.dot(X,params))\n# f = self.pdf(np.dot(X,params))\n# term = (y*f/F + (1 - y)*-f/(1-F))\n# return np.dot(term,X)\n#\n# def hessian(self, params):\n# hess = nd.Jacobian(self.score)\n# return hess(params)\n#\n# def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):\n## The example had problems with all zero start values, Hessian = 0\n# if start_params is None:\n# start_params = OLS(self.endog, self.exog).fit().params\n# mlefit = super(Weibull, self).fit(start_params=start_params,\n# method=method, maxiter=maxiter, tol=tol)\n# return mlefit\n#\n\nclass NegativeBinomial(CountModel):\n __doc__ = \"\"\"\n Negative Binomial Model for count data\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n\n References\n ----------\n\n References:\n\n Greene, W. 2008. \"Functional forms for the negtive binomial model\n for count data\". Economics Letters. Volume 99, Number 3, pp.585-590.\n Hilbe, J.M. 2011. \"Negative binomial regression\". Cambridge University\n Press.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' :\n \"\"\"loglike_method : string\n Log-likelihood type. 'nb2','nb1', or 'geometric'.\n Fitted value :math:`\\\\mu`\n Heterogeneity parameter :math:`\\\\alpha`\n\n - nb2: Variance equal to :math:`\\\\mu + \\\\alpha\\\\mu^2` (most common)\n - nb1: Variance equal to :math:`\\\\mu + \\\\alpha\\\\mu`\n - geometric: Variance equal to :math:`\\\\mu + \\\\mu^2`\n offset : array_like\n Offset is added to the linear prediction with coefficient equal to 1.\n exposure : array_like\n Log(exposure) is added to the linear prediction with coefficient\n equal to 1.\n\n \"\"\" + base._missing_param_doc}\n def __init__(self, endog, exog, loglike_method='nb2', offset=None,\n exposure=None, missing='none', **kwargs):\n super(NegativeBinomial, self).__init__(endog, exog, offset=offset,\n exposure=exposure,\n missing=missing, **kwargs)\n self.loglike_method = loglike_method\n self._initialize()\n if loglike_method in ['nb2', 'nb1']:\n self.exog_names.append('alpha')\n self.k_extra = 1\n else:\n self.k_extra = 0\n # store keys for extras if we need to recreate model instance\n # we need to append keys that don't go to super\n self._init_keys.append('loglike_method')\n\n def _initialize(self):\n if self.loglike_method == 'nb2':\n self.hessian = self._hessian_nb2\n self.score = self._score_nbin\n self.loglikeobs = self._ll_nb2\n self._transparams = True # transform lnalpha -> alpha in fit\n elif self.loglike_method == 'nb1':\n self.hessian = self._hessian_nb1\n self.score = self._score_nb1\n self.loglikeobs = self._ll_nb1\n self._transparams = True # transform lnalpha -> alpha in fit\n elif self.loglike_method == 'geometric':\n self.hessian = self._hessian_geom\n self.score = self._score_geom\n self.loglikeobs = self._ll_geometric\n else:\n raise NotImplementedError(\"Likelihood type must nb1, nb2 or \"\n \"geometric\")\n\n # Workaround to pickle instance methods\n def __getstate__(self):\n odict = self.__dict__.copy() # copy the dict since we change it\n del odict['hessian']\n del odict['score']\n del odict['loglikeobs']\n return odict\n\n def __setstate__(self, indict):\n self.__dict__.update(indict)\n self._initialize()\n\n def _ll_nbin(self, params, alpha, Q=0):\n endog = self.endog\n mu = self.predict(params)\n size = 1/alpha * mu**Q\n prob = size/(size+mu)\n coeff = (gammaln(size+endog) - gammaln(endog+1) -\n gammaln(size))\n llf = coeff + size*np.log(prob) + endog*np.log(1-prob)\n return llf\n\n def _ll_nb2(self, params):\n if self._transparams: # got lnalpha during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n return self._ll_nbin(params[:-1], alpha, Q=0)\n\n def _ll_nb1(self, params):\n if self._transparams: # got lnalpha during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n return self._ll_nbin(params[:-1], alpha, Q=1)\n\n def _ll_geometric(self, params):\n # we give alpha of 1 because it's actually log(alpha) where alpha=0\n return self._ll_nbin(params, 1, 0)\n\n def loglike(self, params):\n r\"\"\"\n Loglikelihood for negative binomial model\n\n Parameters\n ----------\n params : array-like\n The parameters of the model. If `loglike_method` is nb1 or\n nb2, then the ancillary parameter is expected to be the\n last element.\n\n Returns\n -------\n llf : float\n The loglikelihood value at `params`\n\n Notes\n -----\n Following notation in Greene (2008), with negative binomial\n heterogeneity parameter :math:`\\alpha`:\n\n .. math::\n\n \\lambda_i &= exp(X\\beta) \\\\\n \\theta &= 1 / \\alpha \\\\\n g_i &= \\theta \\lambda_i^Q \\\\\n w_i &= g_i/(g_i + \\lambda_i) \\\\\n r_i &= \\theta / (\\theta+\\lambda_i) \\\\\n ln \\mathcal{L}_i &= ln \\Gamma(y_i+g_i) - ln \\Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)\n\n where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1.\n For the geometric, :math:`\\alpha=0` as well.\n\n \"\"\"\n llf = np.sum(self.loglikeobs(params))\n return llf\n\n def _score_geom(self, params):\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n dparams = exog * (y-mu)/(mu+1)\n return dparams.sum(0)\n\n def _score_nbin(self, params, Q=0):\n \"\"\"\n Score vector for NB2 model\n \"\"\"\n if self._transparams: # lnalpha came in during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n params = params[:-1]\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n a1 = 1/alpha * mu**Q\n if Q: # nb1\n dparams = exog*mu/alpha*(np.log(1/(alpha + 1)) +\n special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha))\n dalpha = ((alpha*(y - mu*np.log(1/(alpha + 1)) -\n mu*(special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha) + 1)) -\n mu*(np.log(1/(alpha + 1)) +\n special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha)))/\n (alpha**2*(alpha + 1))).sum()\n\n else: # nb2\n dparams = exog*a1 * (y-mu)/(mu+a1)\n da1 = -alpha**-2\n dalpha = (special.digamma(a1+y) - special.digamma(a1) + np.log(a1)\n - np.log(a1+mu) - (a1+y)/(a1+mu) + 1).sum()*da1\n\n #multiply above by constant outside sum to reduce rounding error\n if self._transparams:\n return np.r_[dparams.sum(0), dalpha*alpha]\n else:\n return np.r_[dparams.sum(0), dalpha]\n\n def _score_nb1(self, params):\n return self._score_nbin(params, Q=1)\n\n def _hessian_geom(self, params):\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n\n # for dl/dparams dparams\n dim = exog.shape[1]\n hess_arr = np.empty((dim, dim))\n const_arr = mu*(1+y)/(mu+1)**2\n for i in range(dim):\n for j in range(dim):\n if j > i:\n continue\n hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *\n const_arr, axis=0)\n tri_idx = np.triu_indices(dim, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n return hess_arr\n\n\n def _hessian_nb1(self, params):\n \"\"\"\n Hessian of NB1 model.\n \"\"\"\n if self._transparams: # lnalpha came in during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n\n params = params[:-1]\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n\n a1 = mu/alpha\n\n # for dl/dparams dparams\n dim = exog.shape[1]\n hess_arr = np.empty((dim+1,dim+1))\n #const_arr = a1*mu*(a1+y)/(mu+a1)**2\n # not all of dparams\n dparams = exog/alpha*(np.log(1/(alpha + 1)) +\n special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha))\n\n dmudb = exog*mu\n xmu_alpha = exog*mu/alpha\n trigamma = (special.polygamma(1, mu/alpha + y) -\n special.polygamma(1, mu/alpha))\n for i in range(dim):\n for j in range(dim):\n if j > i:\n continue\n hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] +\n xmu_alpha[:,i,None] * xmu_alpha[:,j,None] *\n trigamma, axis=0)\n tri_idx = np.triu_indices(dim, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n\n # for dl/dparams dalpha\n da1 = -alpha**-2\n dldpda = np.sum(-mu/alpha * dparams + exog*mu/alpha *\n (-trigamma*mu/alpha**2 - 1/(alpha+1)), axis=0)\n\n hess_arr[-1,:-1] = dldpda\n hess_arr[:-1,-1] = dldpda\n\n # for dl/dalpha dalpha\n digamma_part = (special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha))\n\n log_alpha = np.log(1/(alpha+1))\n alpha3 = alpha**3\n alpha2 = alpha**2\n mu2 = mu**2\n dada = ((alpha3*mu*(2*log_alpha + 2*digamma_part + 3) -\n 2*alpha3*y + alpha2*mu2*trigamma +\n 4*alpha2*mu*(log_alpha + digamma_part) +\n alpha2 * (2*mu - y) +\n 2*alpha*mu2*trigamma +\n 2*alpha*mu*(log_alpha + digamma_part) +\n mu2*trigamma)/(alpha**4*(alpha2 + 2*alpha + 1)))\n hess_arr[-1,-1] = dada.sum()\n\n return hess_arr\n\n def _hessian_nb2(self, params):\n \"\"\"\n Hessian of NB2 model.\n \"\"\"\n if self._transparams: # lnalpha came in during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n a1 = 1/alpha\n params = params[:-1]\n\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n\n # for dl/dparams dparams\n dim = exog.shape[1]\n hess_arr = np.empty((dim+1,dim+1))\n const_arr = a1*mu*(a1+y)/(mu+a1)**2\n for i in range(dim):\n for j in range(dim):\n if j > i:\n continue\n hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *\n const_arr, axis=0)\n tri_idx = np.triu_indices(dim, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n\n # for dl/dparams dalpha\n da1 = -alpha**-2\n dldpda = np.sum(mu*exog*(y-mu)*da1/(mu+a1)**2 , axis=0)\n hess_arr[-1,:-1] = dldpda\n hess_arr[:-1,-1] = dldpda\n\n # for dl/dalpha dalpha\n #NOTE: polygamma(1,x) is the trigamma function\n da2 = 2*alpha**-3\n dalpha = da1 * (special.digamma(a1+y) - special.digamma(a1) +\n np.log(a1) - np.log(a1+mu) - (a1+y)/(a1+mu) + 1)\n dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) -\n special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) +\n (y - mu)/(mu + a1)**2)).sum()\n hess_arr[-1,-1] = dada\n\n return hess_arr\n\n #TODO: replace this with analytic where is it used?\n def score_obs(self, params):\n sc = approx_fprime_cs(params, self.loglikeobs)\n return sc\n\n def fit(self, start_params=None, method='bfgs', maxiter=35,\n full_output=1, disp=1, callback=None,\n cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):\n\n # Note: don't let super handle robust covariance because it has\n # transformed params\n\n if self.loglike_method.startswith('nb') and method not in ['newton',\n 'ncg']:\n self._transparams = True # in case same Model instance is refit\n elif self.loglike_method.startswith('nb'): # method is newton/ncg\n self._transparams = False # because we need to step in alpha space\n\n if start_params is None:\n # Use poisson fit as first guess.\n #TODO, Warning: this assumes exposure is logged\n offset = getattr(self, \"offset\", 0) + getattr(self, \"exposure\", 0)\n if np.size(offset) == 1 and offset == 0:\n offset = None\n mod_poi = Poisson(self.endog, self.exog, offset=offset)\n start_params = mod_poi.fit(disp=0).params\n if self.loglike_method.startswith('nb'):\n start_params = np.append(start_params, 0.1)\n mlefit = super(NegativeBinomial, self).fit(start_params=start_params,\n maxiter=maxiter, method=method, disp=disp,\n full_output=full_output, callback=lambda x:x,\n **kwargs)\n # TODO: Fix NBin _check_perfect_pred\n if self.loglike_method.startswith('nb'):\n # mlefit is a wrapped counts results\n self._transparams = False # don't need to transform anymore now\n # change from lnalpha to alpha\n if method not in [\"newton\", \"ncg\"]:\n mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])\n\n nbinfit = NegativeBinomialResults(self, mlefit._results)\n result = NegativeBinomialResultsWrapper(nbinfit)\n else:\n result = mlefit\n\n if cov_kwds is None:\n cov_kwds = {} #TODO: make this unnecessary ?\n result._get_robustcov_results(cov_type=cov_type,\n use_self=True, use_t=use_t, **cov_kwds)\n return result\n\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n\n if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and\n alpha != 0):\n # don't penalize alpha if alpha is scalar\n k_params = self.exog.shape[1] + self.k_extra\n alpha = alpha * np.ones(k_params)\n alpha[-1] = 0\n\n # alpha for regularized poisson to get starting values\n alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha\n\n self._transparams = False\n if start_params is None:\n # Use poisson fit as first guess.\n #TODO, Warning: this assumes exposure is logged\n offset = getattr(self, \"offset\", 0) + getattr(self, \"exposure\", 0)\n if np.size(offset) == 1 and offset == 0:\n offset = None\n mod_poi = Poisson(self.endog, self.exog, offset=offset)\n start_params = mod_poi.fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=0, callback=callback,\n alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params\n if self.loglike_method.startswith('nb'):\n start_params = np.append(start_params, 0.1)\n\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1NegativeBinomialResults(self, cntfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n\n return L1NegativeBinomialResultsWrapper(discretefit)\n\n\n### Results Class ###\n\nclass DiscreteResults(base.LikelihoodModelResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for the discrete dependent variable models.\",\n \"extra_attr\" : \"\"}\n\n def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None,\n use_t=None):\n #super(DiscreteResults, self).__init__(model, params,\n # np.linalg.inv(-hessian), scale=1.)\n self.model = model\n self.df_model = model.df_model\n self.df_resid = model.df_resid\n self._cache = resettable_cache()\n self.nobs = model.exog.shape[0]\n self.__dict__.update(mlefit.__dict__)\n\n if not hasattr(self, 'cov_type'):\n # do this only if super, i.e. mlefit didn't already add cov_type\n # robust covariance\n if use_t is not None:\n self.use_t = use_t\n if cov_type == 'nonrobust':\n self.cov_type = 'nonrobust'\n self.cov_kwds = {'description' : 'Standard Errors assume that the ' +\n 'covariance matrix of the errors is correctly ' +\n 'specified.'}\n else:\n if cov_kwds is None:\n cov_kwds = {}\n from statsmodels.base.covtype import get_robustcov_results\n get_robustcov_results(self, cov_type=cov_type, use_self=True,\n **cov_kwds)\n\n\n\n def __getstate__(self):\n try:\n #remove unpicklable callback\n self.mle_settings['callback'] = None\n except (AttributeError, KeyError):\n pass\n return self.__dict__\n\n @cache_readonly\n def prsquared(self):\n return 1 - self.llf/self.llnull\n\n @cache_readonly\n def llr(self):\n return -2*(self.llnull - self.llf)\n\n @cache_readonly\n def llr_pvalue(self):\n return stats.chisqprob(self.llr, self.df_model)\n\n @cache_readonly\n def llnull(self):\n\n model = self.model\n kwds = model._get_init_kwds()\n # TODO: what parameters to pass to fit?\n mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)\n # TODO: consider catching and warning on convergence failure?\n # in the meantime, try hard to converge. see\n # TestPoissonConstrained1a.test_smoke\n res_null = mod_null.fit(disp=0, warn_convergence=False,\n maxiter=10000)\n return res_null.llf\n\n @cache_readonly\n def fittedvalues(self):\n return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]])\n\n @cache_readonly\n def aic(self):\n return -2*(self.llf - (self.df_model+1))\n\n @cache_readonly\n def bic(self):\n return -2*self.llf + np.log(self.nobs)*(self.df_model+1)\n\n def _get_endog_name(self, yname, yname_list):\n if yname is None:\n yname = self.model.endog_names\n if yname_list is None:\n yname_list = self.model.endog_names\n return yname, yname_list\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n \"\"\"Get marginal effects of the fitted model.\n\n Parameters\n ----------\n at : str, optional\n Options are:\n\n - 'overall', The average of the marginal effects at each\n observation.\n - 'mean', The marginal effects at the mean of each regressor.\n - 'median', The marginal effects at the median of each regressor.\n - 'zero', The marginal effects at zero for each regressor.\n - 'all', The marginal effects at each observation. If `at` is all\n only margeff will be available from the returned object.\n\n Note that if `exog` is specified, then marginal effects for all\n variables not specified by `exog` are calculated using the `at`\n option.\n method : str, optional\n Options are:\n\n - 'dydx' - dy/dx - No transformation is made and marginal effects\n are returned. This is the default.\n - 'eyex' - estimate elasticities of variables in `exog` --\n d(lny)/d(lnx)\n - 'dyex' - estimate semielasticity -- dy/d(lnx)\n - 'eydx' - estimate semeilasticity -- d(lny)/dx\n\n Note that tranformations are done after each observation is\n calculated. Semi-elasticities for binary variables are computed\n using the midpoint method. 'dyex' and 'eyex' do not make sense\n for discrete variables.\n atexog : array-like, optional\n Optionally, you can provide the exogenous variables over which to\n get the marginal effects. This should be a dictionary with the key\n as the zero-indexed column number and the value of the dictionary.\n Default is None for all independent variables less the constant.\n dummy : bool, optional\n If False, treats binary variables (if present) as continuous. This\n is the default. Else if True, treats binary variables as\n changing from 0 to 1. Note that any variable that is either 0 or 1\n is treated as binary. Each binary variable is treated separately\n for now.\n count : bool, optional\n If False, treats count variables (if present) as continuous. This\n is the default. Else if True, the marginal effect is the\n change in probabilities when each observation is increased by one.\n\n Returns\n -------\n DiscreteMargins : marginal effects instance\n Returns an object that holds the marginal effects, standard\n errors, confidence intervals, etc. See\n `statsmodels.discrete.discrete_margins.DiscreteMargins` for more\n information.\n\n Notes\n -----\n When using after Poisson, returns the expected number of events\n per period, assuming that the model is loglinear.\n \"\"\"\n from statsmodels.discrete.discrete_margins import DiscreteMargins\n return DiscreteMargins(self, (at, method, atexog, dummy, count))\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05,\n yname_list=None):\n \"\"\"Summarize the Regression Results\n\n Parameters\n -----------\n yname : string, optional\n Default is `y`\n xname : list of strings, optional\n Default is `var_##` for ## in p the number of regressors\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n\n top_left = [('Dep. Variable:', None),\n ('Model:', [self.model.__class__.__name__]),\n ('Method:', ['MLE']),\n ('Date:', None),\n ('Time:', None),\n #('No. iterations:', [\"%d\" % self.mle_retvals['iterations']]),\n ('converged:', [\"%s\" % self.mle_retvals['converged']])\n ]\n\n top_right = [('No. Observations:', None),\n ('Df Residuals:', None),\n ('Df Model:', None),\n ('Pseudo R-squ.:', [\"%#6.4g\" % self.prsquared]),\n ('Log-Likelihood:', None),\n ('LL-Null:', [\"%#8.5g\" % self.llnull]),\n ('LLR p-value:', [\"%#6.4g\" % self.llr_pvalue])\n ]\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Regression Results\"\n\n #boiler plate\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n yname, yname_list = self._get_endog_name(yname, yname_list)\n # for top of table\n smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],\n yname=yname, xname=xname, title=title)\n # for parameters, etc\n smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n if hasattr(self, 'constraints'):\n smry.add_extra_txt(['Model has been estimated subject to linear '\n 'equality constraints.'])\n\n #diagnostic table not used yet\n #smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n # yname=yname, xname=xname,\n # title=\"\")\n return smry\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental function to summarize regression results\n\n Parameters\n -----------\n xname : List of strings of length equal to the number of parameters\n Names of the independent variables (optional)\n yname : string\n Name of the dependent variable (optional)\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n # Summary\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n\n if hasattr(self, 'constraints'):\n smry.add_text('Model has been estimated subject to linear '\n 'equality constraints.')\n\n return smry\n\n\n\nclass CountResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for count data\",\n \"extra_attr\" : \"\"}\n @cache_readonly\n def resid(self):\n \"\"\"\n Residuals\n\n Notes\n -----\n The residuals for Count models are defined as\n\n .. math:: y - p\n\n where :math:`p = \\\\exp(X\\\\beta)`. Any exposure and offset variables\n are also handled.\n \"\"\"\n return self.model.endog - self.predict()\n\nclass NegativeBinomialResults(CountResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for NegativeBinomial 1 and 2\",\n \"extra_attr\" : \"\"}\n\n @cache_readonly\n def lnalpha(self):\n return np.log(self.params[-1])\n\n @cache_readonly\n def lnalpha_std_err(self):\n return self.bse[-1] / self.params[-1]\n\n @cache_readonly\n def aic(self):\n # + 1 because we estimate alpha\n k_extra = getattr(self.model, 'k_extra', 0)\n return -2*(self.llf - (self.df_model + self.k_constant + k_extra))\n\n @cache_readonly\n def bic(self):\n # + 1 because we estimate alpha\n k_extra = getattr(self.model, 'k_extra', 0)\n return -2*self.llf + np.log(self.nobs)*(self.df_model +\n self.k_constant + k_extra)\n\nclass L1CountResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for count data fit by l1 regularization\",\n \"extra_attr\" : _l1_results_attr}\n #discretefit = CountResults(self, cntfit)\n\n def __init__(self, model, cntfit):\n super(L1CountResults, self).__init__(model, cntfit)\n # self.trimmed is a boolean array with T/F telling whether or not that\n # entry in params has been set zero'd out.\n self.trimmed = cntfit.mle_retvals['trimmed']\n self.nnz_params = (self.trimmed == False).sum()\n # update degrees of freedom\n self.model.df_model = self.nnz_params - 1\n self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)\n # adjust for extra parameter in NegativeBinomial nb1 and nb2\n # extra parameter is not included in df_model\n k_extra = getattr(self.model, 'k_extra', 0)\n self.model.df_model -= k_extra\n self.model.df_resid += k_extra\n self.df_model = self.model.df_model\n self.df_resid = self.model.df_resid\n\nclass PoissonResults(CountResults):\n def predict_prob(self, n=None, exog=None, exposure=None, offset=None,\n transform=True):\n \"\"\"\n Return predicted probability of each count level for each observation\n\n Parameters\n ----------\n n : array-like or int\n The counts for which you want the probabilities. If n is None\n then the probabilities for each count from 0 to max(y) are\n given.\n\n Returns\n -------\n ndarray\n A nobs x n array where len(`n`) columns are indexed by the count\n n. If n is None, then column 0 is the probability that each\n observation is 0, column 1 is the probability that each\n observation is 1, etc.\n \"\"\"\n if n is not None:\n counts = np.atleast_2d(n)\n else:\n counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1))\n mu = self.predict(exog=exog, exposure=exposure, offset=offset,\n transform=transform, linear=False)[:,None]\n # uses broadcasting\n return stats.poisson.pmf(counts, mu)\n\nclass L1PoissonResults(L1CountResults, PoissonResults):\n pass\n\nclass L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults):\n pass\n\nclass OrderedResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" : \"A results class for ordered discrete data.\" , \"extra_attr\" : \"\"}\n pass\n\nclass BinaryResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" : \"A results class for binary data\", \"extra_attr\" : \"\"}\n\n def pred_table(self, threshold=.5):\n \"\"\"\n Prediction table\n\n Parameters\n ----------\n threshold : scalar\n Number between 0 and 1. Threshold above which a prediction is\n considered 1 and below which a prediction is considered 0.\n\n Notes\n ------\n pred_table[i,j] refers to the number of times \"i\" was observed and\n the model predicted \"j\". Correct predictions are along the diagonal.\n \"\"\"\n model = self.model\n actual = model.endog\n pred = np.array(self.predict() > threshold, dtype=float)\n bins = np.array([0, 0.5, 1])\n return np.histogram2d(actual, pred, bins=bins)[0]\n\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05,\n yname_list=None):\n smry = super(BinaryResults, self).summary(yname, xname, title, alpha,\n yname_list)\n fittedvalues = self.model.cdf(self.fittedvalues)\n absprederror = np.abs(self.model.endog - fittedvalues)\n predclose_sum = (absprederror < 1e-4).sum()\n predclose_frac = predclose_sum / len(fittedvalues)\n\n #add warnings/notes\n etext = []\n if predclose_sum == len(fittedvalues): #nobs?\n wstr = \"Complete Separation: The results show that there is\"\n wstr += \"complete separation.\\n\"\n wstr += \"In this case the Maximum Likelihood Estimator does \"\n wstr += \"not exist and the parameters\\n\"\n wstr += \"are not identified.\"\n etext.append(wstr)\n elif predclose_frac > 0.1: # TODO: get better diagnosis\n wstr = \"Possibly complete quasi-separation: A fraction \"\n wstr += \"%4.2f of observations can be\\n\" % predclose_frac\n wstr += \"perfectly predicted. This might indicate that there \"\n wstr += \"is complete\\nquasi-separation. In this case some \"\n wstr += \"parameters will not be identified.\"\n etext.append(wstr)\n if etext:\n smry.add_extra_txt(etext)\n return smry\n summary.__doc__ = DiscreteResults.summary.__doc__\n\n @cache_readonly\n def resid_dev(self):\n \"\"\"\n Deviance residuals\n\n Notes\n -----\n Deviance residuals are defined\n\n .. math:: d_j = \\\\pm\\\\left(2\\\\left[Y_j\\\\ln\\\\left(\\\\frac{Y_j}{M_jp_j}\\\\right) + (M_j - Y_j\\\\ln\\\\left(\\\\frac{M_j-Y_j}{M_j(1-p_j)} \\\\right) \\\\right] \\\\right)^{1/2}\n\n where\n\n :math:`p_j = cdf(X\\\\beta)` and :math:`M_j` is the total number of\n observations sharing the covariate pattern :math:`j`.\n\n For now :math:`M_j` is always set to 1.\n \"\"\"\n #These are the deviance residuals\n #model = self.model\n endog = self.model.endog\n #exog = model.exog\n # M = # of individuals that share a covariate pattern\n # so M[i] = 2 for i = two share a covariate pattern\n M = 1\n p = self.predict()\n #Y_0 = np.where(exog == 0)\n #Y_M = np.where(exog == M)\n #NOTE: Common covariate patterns are not yet handled\n res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \\\n endog*np.sqrt(2*M*np.abs(np.log(p)))\n return res\n\n @cache_readonly\n def resid_pearson(self):\n \"\"\"\n Pearson residuals\n\n Notes\n -----\n Pearson residuals are defined to be\n\n .. math:: r_j = \\\\frac{(y - M_jp_j)}{\\\\sqrt{M_jp_j(1-p_j)}}\n\n where :math:`p_j=cdf(X\\\\beta)` and :math:`M_j` is the total number of\n observations sharing the covariate pattern :math:`j`.\n\n For now :math:`M_j` is always set to 1.\n \"\"\"\n # Pearson residuals\n #model = self.model\n endog = self.model.endog\n #exog = model.exog\n # M = # of individuals that share a covariate pattern\n # so M[i] = 2 for i = two share a covariate pattern\n # use unique row pattern?\n M = 1\n p = self.predict()\n return (endog - M*p)/np.sqrt(M*p*(1-p))\n\n @cache_readonly\n def resid_response(self):\n \"\"\"\n The response residuals\n\n Notes\n -----\n Response residuals are defined to be\n\n .. math:: y - p\n\n where :math:`p=cdf(X\\\\beta)`.\n \"\"\"\n return self.model.endog - self.predict()\n\nclass LogitResults(BinaryResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for Logit Model\",\n \"extra_attr\" : \"\"}\n @cache_readonly\n def resid_generalized(self):\n \"\"\"\n Generalized residuals\n\n Notes\n -----\n The generalized residuals for the Logit model are defined\n\n .. math:: y - p\n\n where :math:`p=cdf(X\\\\beta)`. This is the same as the `resid_response`\n for the Logit model.\n \"\"\"\n # Generalized residuals\n return self.model.endog - self.predict()\n\nclass ProbitResults(BinaryResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for Probit Model\",\n \"extra_attr\" : \"\"}\n @cache_readonly\n def resid_generalized(self):\n \"\"\"\n Generalized residuals\n\n Notes\n -----\n The generalized residuals for the Probit model are defined\n\n .. math:: y\\\\frac{\\\\phi(X\\\\beta)}{\\\\Phi(X\\\\beta)}-(1-y)\\\\frac{\\\\phi(X\\\\beta)}{1-\\\\Phi(X\\\\beta)}\n \"\"\"\n # generalized residuals\n model = self.model\n endog = model.endog\n XB = self.predict(linear=True)\n pdf = model.pdf(XB)\n cdf = model.cdf(XB)\n return endog * pdf/cdf - (1-endog)*pdf/(1-cdf)\n\nclass L1BinaryResults(BinaryResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"Results instance for binary data fit by l1 regularization\",\n \"extra_attr\" : _l1_results_attr}\n def __init__(self, model, bnryfit):\n super(L1BinaryResults, self).__init__(model, bnryfit)\n # self.trimmed is a boolean array with T/F telling whether or not that\n # entry in params has been set zero'd out.\n self.trimmed = bnryfit.mle_retvals['trimmed']\n self.nnz_params = (self.trimmed == False).sum()\n self.model.df_model = self.nnz_params - 1\n self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)\n self.df_model = self.model.df_model\n self.df_resid = self.model.df_resid\n\n\nclass MultinomialResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for multinomial data\", \"extra_attr\" : \"\"}\n def _maybe_convert_ynames_int(self, ynames):\n # see if they're integers\n try:\n for i in ynames:\n if ynames[i] % 1 == 0:\n ynames[i] = str(int(ynames[i]))\n except TypeError:\n pass\n return ynames\n\n def _get_endog_name(self, yname, yname_list, all=False):\n \"\"\"\n If all is False, the first variable name is dropped\n \"\"\"\n model = self.model\n if yname is None:\n yname = model.endog_names\n if yname_list is None:\n ynames = model._ynames_map\n ynames = self._maybe_convert_ynames_int(ynames)\n # use range below to ensure sortedness\n ynames = [ynames[key] for key in range(int(model.J))]\n ynames = ['='.join([yname, name]) for name in ynames]\n if not all:\n yname_list = ynames[1:] # assumes first variable is dropped\n else:\n yname_list = ynames\n return yname, yname_list\n\n def pred_table(self):\n \"\"\"\n Returns the J x J prediction table.\n\n Notes\n -----\n pred_table[i,j] refers to the number of times \"i\" was observed and\n the model predicted \"j\". Correct predictions are along the diagonal.\n \"\"\"\n ju = self.model.J - 1 # highest index\n # these are the actual, predicted indices\n #idx = lzip(self.model.endog, self.predict().argmax(1))\n bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju]))\n return np.histogram2d(self.model.endog, self.predict().argmax(1),\n bins=bins)[0]\n\n @cache_readonly\n def bse(self):\n bse = np.sqrt(np.diag(self.cov_params()))\n return bse.reshape(self.params.shape, order='F')\n\n @cache_readonly\n def aic(self):\n return -2*(self.llf - (self.df_model+self.model.J-1))\n\n @cache_readonly\n def bic(self):\n return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1)\n\n def conf_int(self, alpha=.05, cols=None):\n confint = super(DiscreteResults, self).conf_int(alpha=alpha,\n cols=cols)\n return confint.transpose(2,0,1)\n\n def margeff(self):\n raise NotImplementedError(\"Use get_margeff instead\")\n\n @cache_readonly\n def resid_misclassified(self):\n \"\"\"\n Residuals indicating which observations are misclassified.\n\n Notes\n -----\n The residuals for the multinomial model are defined as\n\n .. math:: argmax(y_i) \\\\neq argmax(p_i)\n\n where :math:`argmax(y_i)` is the index of the category for the\n endogenous variable and :math:`argmax(p_i)` is the index of the\n predicted probabilities for each category. That is, the residual\n is a binary indicator that is 0 if the category with the highest\n predicted probability is the same as that of the observed variable\n and 1 otherwise.\n \"\"\"\n # it's 0 or 1 - 0 for correct prediction and 1 for a missed one\n return (self.model.wendog.argmax(1) !=\n self.predict().argmax(1)).astype(float)\n\n def summary2(self, alpha=0.05, float_format=\"%.4f\"):\n \"\"\"Experimental function to summarize regression results\n\n Parameters\n -----------\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary2.Summary : class to hold summary\n results\n\n \"\"\"\n\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_dict(summary2.summary_model(self))\n # One data frame per value of endog\n eqn = self.params.shape[1]\n confint = self.conf_int(alpha)\n for i in range(eqn):\n coefs = summary2.summary_params(self, alpha, self.params[:,i],\n self.bse[:,i], self.tvalues[:,i], self.pvalues[:,i],\n confint[i])\n # Header must show value of endog\n level_str = self.model.endog_names + ' = ' + str(i)\n coefs[level_str] = coefs.index\n coefs = coefs.ix[:,[-1,0,1,2,3,4,5]]\n smry.add_df(coefs, index=False, header=True, float_format=float_format)\n smry.add_title(results=self)\n return smry\n\n\nclass L1MultinomialResults(MultinomialResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for multinomial data fit by l1 regularization\",\n \"extra_attr\" : _l1_results_attr}\n def __init__(self, model, mlefit):\n super(L1MultinomialResults, self).__init__(model, mlefit)\n # self.trimmed is a boolean array with T/F telling whether or not that\n # entry in params has been set zero'd out.\n self.trimmed = mlefit.mle_retvals['trimmed']\n self.nnz_params = (self.trimmed == False).sum()\n\n #Note: J-1 constants\n self.model.df_model = self.nnz_params - (self.model.J - 1)\n self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)\n self.df_model = self.model.df_model\n self.df_resid = self.model.df_resid\n\n\n#### Results Wrappers ####\n\nclass OrderedResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(OrderedResultsWrapper, OrderedResults)\n\nclass CountResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(CountResultsWrapper, CountResults)\n\nclass NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(NegativeBinomialResultsWrapper,\n NegativeBinomialResults)\n\nclass PoissonResultsWrapper(lm.RegressionResultsWrapper):\n pass\n #_methods = {\n # \"predict_prob\" : \"rows\",\n # }\n #_wrap_methods = lm.wrap.union_dicts(\n # lm.RegressionResultsWrapper._wrap_methods,\n # _methods)\nwrap.populate_wrapper(PoissonResultsWrapper, PoissonResults)\n\nclass L1CountResultsWrapper(lm.RegressionResultsWrapper):\n pass\n\nclass L1PoissonResultsWrapper(lm.RegressionResultsWrapper):\n pass\n #_methods = {\n # \"predict_prob\" : \"rows\",\n # }\n #_wrap_methods = lm.wrap.union_dicts(\n # lm.RegressionResultsWrapper._wrap_methods,\n # _methods)\nwrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults)\n\nclass L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1NegativeBinomialResultsWrapper,\n L1NegativeBinomialResults)\n\nclass BinaryResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\"resid_dev\" : \"rows\",\n \"resid_generalized\" : \"rows\",\n \"resid_pearson\" : \"rows\",\n \"resid_response\" : \"rows\"\n }\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\nwrap.populate_wrapper(BinaryResultsWrapper, BinaryResults)\n\nclass L1BinaryResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults)\n\nclass MultinomialResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\"resid_misclassified\" : \"rows\"}\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\nwrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults)\n\nclass L1MultinomialResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults)\n\n\nif __name__==\"__main__\":\n import numpy as np\n import statsmodels.api as sm\n# Scratch work for negative binomial models\n# dvisits was written using an R package, I can provide the dataset\n# on request until the copyright is cleared up\n#TODO: request permission to use dvisits\n data2 = np.genfromtxt('../datasets/dvisits/dvisits.csv', names=True)\n# note that this has missing values for Accident\n endog = data2['doctorco']\n exog = data2[['sex','age','agesq','income','levyplus','freepoor',\n 'freerepa','illness','actdays','hscore','chcond1',\n 'chcond2']].view(float, np.ndarray).reshape(len(data2),-1)\n exog = sm.add_constant(exog, prepend=True)\n poisson_mod = Poisson(endog, exog)\n poisson_res = poisson_mod.fit()\n# nb2_mod = NegBinTwo(endog, exog)\n# nb2_res = nb2_mod.fit()\n# solvers hang (with no error and no maxiter warn...)\n# haven't derived hessian (though it will be block diagonal) to check\n# newton, note that Lawless (1987) has the derivations\n# appear to be something wrong with the score?\n# according to Lawless, traditionally the likelihood is maximized wrt to B\n# and a gridsearch on a to determin ahat?\n# or the Breslow approach, which is 2 step iterative.\n nb2_params = [-2.190,.217,-.216,.609,-.142,.118,-.497,.145,.214,.144,\n .038,.099,.190,1.077] # alpha is last\n # taken from Cameron and Trivedi\n# the below is from Cameron and Trivedi as well\n# endog2 = np.array(endog>=1, dtype=float)\n# skipped for now, binary poisson results look off?\n data = sm.datasets.randhie.load()\n nbreg = NegativeBinomial\n mod = nbreg(data.endog, data.exog.view((float,9)))\n#FROM STATA:\n params = np.asarray([-.05654133, -.21214282, .0878311, -.02991813, .22903632,\n .06210226, .06799715, .08407035, .18532336])\n bse = [0.0062541, 0.0231818, 0.0036942, 0.0034796, 0.0305176, 0.0012397,\n 0.0198008, 0.0368707, 0.0766506]\n lnalpha = .31221786\n mod.loglike(np.r_[params,np.exp(lnalpha)])\n poiss_res = Poisson(data.endog, data.exog.view((float,9))).fit()\n func = lambda x: -mod.loglike(x)\n grad = lambda x: -mod.score(x)\n from scipy import optimize\n# res1 = optimize.fmin_l_bfgs_b(func, np.r_[poiss_res.params,.1],\n# approx_grad=True)\n res1 = optimize.fmin_bfgs(func, np.r_[poiss_res.params,.1], fprime=grad)\n from statsmodels.tools.numdiff import approx_hess_cs\n# np.sqrt(np.diag(-np.linalg.inv(approx_hess_cs(np.r_[params,lnalpha], mod.loglike))))\n#NOTE: this is the hessian in terms of alpha _not_ lnalpha\n hess_arr = mod.hessian(res1)\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.asarray",
"numpy.size",
"numpy.log",
"scipy.stats.poisson.pmf",
"numpy.allclose",
"pandas.core.api.get_dummies",
"numpy.transpose",
"numpy.append",
"numpy.concatenate",
"numpy.abs",
"numpy.genfromtxt",
"numpy.histogram2d",
"scipy.stats.norm._pdf",
"numpy.nonzero",
"numpy.linspace",
"numpy.sqrt",
"numpy.tile",
"numpy.eye",
"numpy.atleast_2d",
"scipy.special.gammaln",
"numpy.zeros",
"numpy.repeat",
"numpy.triu_indices",
"numpy.all",
"numpy.max",
"numpy.finfo",
"numpy.empty",
"scipy.stats.chisqprob",
"numpy.linalg.inv",
"numpy.exp",
"scipy.special.digamma",
"scipy.optimize.fmin_bfgs",
"numpy.array",
"numpy.dot",
"scipy.special.polygamma",
"scipy.stats.norm._cdf"
]
] |
volpatto/UQpy | [
"acbe1d6e655e98917f56b324f019881ea9ccca82"
] | [
"example/Bayesian/More advanced examples with FE models - Sfepy/material_homogenization.py"
] | [
"#!/usr/bin/env python\n\n# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport sys\nsys.path.append('.')\n\nimport matplotlib as mlp\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PolyCollection\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n\nimport numpy as np\n\nfrom sfepy.base.base import Struct, output\nfrom sfepy.base.log import Log\nfrom sfepy import data_dir\n\nclass MaterialSimulator(object):\n\n @staticmethod\n def create_app(filename, is_homog=False, **kwargs):\n from sfepy.base.conf import ProblemConf, get_standard_keywords\n from sfepy.homogenization.homogen_app import HomogenizationApp\n from sfepy.applications import PDESolverApp\n\n required, other = get_standard_keywords()\n if is_homog:\n required.remove('equations')\n\n conf = ProblemConf.from_file(filename, required, other,\n define_args=kwargs)\n options = Struct(output_filename_trunk=None,\n save_ebc=False,\n save_ebc_nodes=False,\n save_regions=False,\n save_regions_as_groups=False,\n save_field_meshes=False,\n solve_not=False,\n )\n output.set_output(filename='sfepy_log.txt', quiet=True)\n\n if is_homog:\n app = HomogenizationApp(conf, options, 'material_opt_micro:')\n\n else:\n app = PDESolverApp(conf, options, 'material_opt_macro:')\n\n app.conf.opt_data = {}\n opts = conf.options\n if hasattr(opts, 'parametric_hook'): # Parametric study.\n parametric_hook = conf.get_function(opts.parametric_hook)\n app.parametrize(parametric_hook)\n\n return app\n\n def __init__(self, macro_fn, micro_fn, phis, plot_meshes_bool=False):\n self.macro_app = self.create_app(macro_fn, is_homog=False, is_opt=True)\n self.micro_app = self.create_app(micro_fn, is_homog=True, is_opt=True)\n self.phis = phis\n self.plot_meshes_bool = plot_meshes_bool\n\n @staticmethod\n def rotate_mat(D, angle):\n s = np.sin(angle)\n c = np.cos(angle)\n s2 = s**2\n c2 = c**2\n sc = s * c\n T = np.array([[c2, 0, s2, 0, 2*sc,0],\n [0, 1, 0, 0, 0, 0],\n [s2, 0, c2, 0, -2*sc, 0],\n [0, 0, 0, c, 0, -s],\n [-sc, 0, sc, 0, c2 - s2, 0],\n [0, 0, 0, s, 0, c]])\n\n return np.dot(np.dot(T, D), T.T)\n\n def plot_meshes(self):\n # plot mesh for micro problem\n pb = self.micro_app.problem\n coors = pb.domain.mesh.coors\n #print(set(coors[:,2]))\n graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])\n graph_slice = np.zeros((graph.shape[0], 4))\n for j in range(graph.shape[0]):\n graph_slice[j,:] = graph[j,coors[graph[j,:],2] == 0]\n cells_matrix = pb.domain.regions['Ym'].get_cells()\n cells_fibers = pb.domain.regions['Yf'].get_cells()\n fig = plt.figure(figsize = (12, 5))\n ax = fig.add_subplot(121)\n pc = PolyCollection(verts=coors[graph[cells_matrix,0:4],:2], facecolors='white', \n edgecolors='black')\n ax.add_collection(pc)\n pc = PolyCollection(verts=coors[graph[cells_fibers,0:4],:2], facecolors='gray', \n edgecolors='black')\n ax.add_collection(pc)\n ax.axis('equal')\n ax.set_title('2D plot of microstructure')\n ax = fig.add_subplot(122, projection='3d')\n for e in range(graph.shape[0]):\n if e in cells_fibers:\n color = 'gray'\n else:\n color = 'white'\n tupleList = coors[graph[e,:],:]\n vertices = [[0, 1, 2, 3], [4, 5, 6, 7], \n [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]\n verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] \n for ix in range(len(vertices))]\n pc3d = Poly3DCollection(verts=verts, facecolors=color, \n edgecolors='black', linewidths=1, alpha=0.5)\n ax.add_collection3d(pc3d)\n ax.set_title('3D plot of microstructure')\n plt.show(fig)\n \n # plot mesh for macro problem\n pb = self.macro_app.problem\n coors = pb.domain.mesh.coors\n graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])\n fig2 = plt.figure(figsize=(5,6))\n ax = fig2.add_subplot(111, projection='3d')\n for e in range(graph.shape[0]):\n tupleList = coors[graph[e,:],:]\n vertices = [[0, 1, 2, 3], [4, 5, 6, 7], \n [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]\n verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] \n for ix in range(len(vertices))]\n pc3d = Poly3DCollection(verts=verts, facecolors='white', \n edgecolors='black', linewidths=1, alpha=0.5)\n ax.add_collection3d(pc3d)\n ax.set_xlim3d(-0.03, 0.03)\n ax.set_ylim3d(-0.01, 0.01)\n ax.set_zlim3d(-0.01, 0.1)\n ax.set_title('3D plot of macro system')\n plt.show(fig2)\n return None\n\n def mat_eval(self, x):\n mic_od = self.micro_app.conf.opt_data\n mac_od = self.macro_app.conf.opt_data\n\n mic_od['coefs'] = {}\n mic_od['mat_params'] = x_norm2real(x)\n self.micro_app()\n\n D = mic_od['D_homog']\n comp_k = []\n for phi in self.phis:\n #print('phi = %d' % phi)\n\n mac_od['D_homog'] = self.rotate_mat(D, np.deg2rad(phi))\n self.macro_app()\n\n comp_k.append(mac_od['k'])\n\n # added by Audrey: get a plot of a slice of the mesh\n if self.plot_meshes_bool:\n self.plot_meshes()\n \n return comp_k\n\ndef bounds():\n x_L = [120e9, 0.2, 2e9, 0.2]\n x_U = [200e9, 0.45, 8e9, 0.45]\n return x_L, x_U\n\ndef x_norm2real(x):\n x_L, x_U = np.array(bounds())\n return x * (x_U - x_L) + x_L\n\ndef x_real2norm(x):\n x_L, x_U = np.array(bounds())\n return (x - x_L) / (x_U - x_L)\n\nmicro_filename = data_dir + '/examples/homogenization/' + 'homogenization_opt.py'\nmacro_filename = data_dir + '/examples/homogenization/' + 'linear_elasticity_opt.py'\n\ndef one_simulation(x0, plot_meshes_bool=False):\n \"\"\"\n This function is the main callable here: it takes in as input the parameter vector, \n here x0=[E_fiber, nu_fiber, E_matrix, nu_matrix], and returns the simulated output \n (here slope of the force-elongation curve obtained during a tensile test), to be compared\n with the measured data.\n \"\"\"\n x0 = x0.reshape((-1, ))\n phis = [0, 30, 60, 90]\n #exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])\n ms = MaterialSimulator(macro_filename, micro_filename,\n phis,\n plot_meshes_bool=plot_meshes_bool)\n qoi = ms.mat_eval(x0)\n return qoi\n\ndef one_simulation_2params(x0, plot_meshes_bool=False):\n x0 = x0.reshape((-1, ))\n x0 = np.array([x0[0], 0.45, x0[1], 0.])\n phis = [0, 30, 60, 90]\n #exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])\n ms = MaterialSimulator(macro_filename, micro_filename,\n phis, plot_meshes_bool=plot_meshes_bool)\n\n qoi = ms.mat_eval(x0)\n return qoi\n\ndef one_simulation_2params_rvs(x0, plot_meshes_bool=False):\n x0 = x0.reshape((-1, ))\n x0 = np.array([x0[0], 0.45, x0[1], 0.])\n phis = [0, 30, 60, 90]\n ms = MaterialSimulator(macro_filename, micro_filename,\n phis,\n plot_meshes_bool=plot_meshes_bool)\n\n qoi = ms.mat_eval(x0)\n qoi = np.tile(np.array(qoi), 100)\n return qoi\n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.collections.PolyCollection",
"numpy.cos",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.deg2rad"
]
] |
robotsorcerer/LevelSetPy | [
"54064ee7fd0144e0d658dd4f6121cbc1fda664b9"
] | [
"ExplicitIntegration/Integration/ode_cfl_1.py"
] | [
"__all__ = [\"odeCFL1\"]\n\nimport cupy as cp\nimport numpy as np\nfrom LevelSetPy.Utilities import *\nfrom .ode_cfl_set import odeCFLset\nfrom .ode_cfl_call import odeCFLcallPostTimestep\n\ndef odeCFL1(schemeFunc, tspan, y0, options=None, schemeData=None):\n \"\"\"\n odeCFL1: integrate a CFL constrained ODE (eg a PDE by method of lines).\n\n [ t, y, schemeData ] = odeCFL1(schemeFunc, tspan, y0, options, schemeData)\n\n Integrates a system forward in time by CFL constrained timesteps\n using a first order forward Euler scheme\n (which happens to be the first order TVD RK scheme).\n\n parameters:\n schemeFunc\t Function handle to a CFL constrained ODE system\n (typically an approximation to an HJ term, see below).\n tspan Range of time over which to integrate (see below).\n y0 Initial condition vector\n (typically the data array in vector form).\n options An option structure generated by odeCFLset\n (use [] as a placeholder if necessary).\n schemeData Structure passed through to schemeFunc.\n\n\n t Output time(s) (see below).\n y Output state (see below).\n schemeData Output version of schemeData (see below).\n\n A CFL constrained ODE system is described by a function with prototype\n\n [ ydot, stepBound, schemeData ] = schemeFunc(t, y, schemeData)\n\n where t is the current time, y the current state vector and\n schemeData is passed directly through. The output stepBound\n is the maximum allowed time step that will be taken by this function\n (typically the option parameter factorCFL will choose a smaller step size).\n\n The time interval tspan may be given as\n 1) A two entry vector [ t0 tf ], in which case the output will\n be scalar t = tf and a row vector y = y(tf).\n 2) A vector with three or more entries, in which case the output will\n be column vector t = tspan and each row of y will be the solution\n at one of the times in tspan. Unlike Matlab's ode suite routines,\n this version just repeatedly calls version (1), so it is not\n particularly efficient.\n\n Depending on the options specified, the final time may not be reached.\n If integration terminates early, then t (in tspan case (1)) or t(end)\n (in tspan case(2)) will contain the final time reached.\n\n Note that using this routine for integrating HJ PDEs will usually\n require that the data array be turned into a vector before the call\n and reshaped into an array after the call. Option (2) for tspan should\n not be used in this case because of the excessive memory requirements\n for storing solutions at multiple timesteps.\n\n The output version of schemeData will normally be identical to the inp.t\n version, and therefore can be ignored. However, it is possible for\n schemeFunc or a PostTimestep routine (see odeCFLset) to modify the\n structure during integration, and the version of schemeData at tf is\n returned in this output argument.\n\n\n Copyright 2005 Ian M. Mitchell ([email protected]).\n This software is used, copied and distributed under the licensing\n agreement contained in the file LICENSE in the top directory of\n the distribution.\n\n Ian Mitchell, 5/14/03.\n Calling parameters modified to more closely match Matlab's ODE suite\n Ian Mitchell, 2/6/04.\n Modified to allow vector level sets. Ian Mitchell, 11/23/04.\n Modified to add terminalEvent option, Ian Mitchell, 1/30/05.\n\n Lekan Molu, 08/21/2021\n \"\"\"\n small = 100 * eps\n #---------------------------------------------------------------------------\n # Make sure we have the default options settings\n if not options:\n options = odeCFLset()\n\n # Number of timesteps to be returned.\n numT = len(tspan)\n #---------------------------------------------------------------------------\n # If we were asked to integrate forward to a final time.\n if(numT == 2):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Is this a vector level set integration?\n if(iscell(y0)):\n numY = len(y0)\n # We need a cell vector form of schemeFunc.\n if(iscell(schemeFunc)):\n schemeFuncCell = schemeFunc\n else:\n schemeFuncCell = [schemeFunc for i in range(numY)]\n else:\n # Set numY, but be careful: ((numY == 1) & iscell(y0)) is possible.\n numY = 1\n # We need a cell vector form of schemeFunc.\n schemeFuncCell = schemeFunc\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n t = tspan[0]\n steps = 0; startTime = cputime(); stepBound = np.zeros((numY), dtype=np.float64)\n ydot = cell(numY, 1); y = copy.copy(y0)\n\n while(tspan[1] - t >= small * np.abs(tspan[1])):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # First substep: Forward Euler from t_n to t_{n+1}.\n\n # Approximate the derivative and CFL restriction.\n for i in range(numY):\n ydot[i], stepBound[i], schemeData = schemeFuncCell[i](t, y, schemeData)\n # If this is a vector level set, rotate the lists of vector arguments.\n if(iscell(y)):\n y = y[1:]\n\n if(iscell(schemeData)):\n schemeData = schemeData[1:]\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Determine CFL bound on timestep, but not beyond the final time.\n # For vector level sets, use the most restrictive stepBound.\n # We'll use this fixed timestep for both substeps.\n deltaT = np.min(np.hstack((options.factorCFL*stepBound, \\\n tspan[1] - t, options.maxStep)))\n # If there is a terminal event function registered, we need\n # to maintain the info from the last timestep.\n if options.terminalEvent:\n yOld , tOld = y, t\n # Update time.\n t += deltaT\n # Update level set functions.\n if(iscell(y)):\n for i in range(numY):\n y1[i] +=(deltaT * ydot[i])\n else:\n y1 = y + deltaT * ydot[0]\n steps += 1\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # If there is one or more post-timestep routines, call them.\n if options.postTimestep:\n y, schemeData = odeCFLcallPostTimestep(t, y, schemeData, options)\n\n # If we are in single step mode, then do not repeat.\n if(strcmp(options.singleStep, 'on')):\n break\n\n # If there is a terminal event function, establish initial sign\n # of terminal event vector.\n if options.terminalEvent:\n eventValue, schemeData = options.terminalEvent(t, y, tOld, yOld, schemeData)\n\n if((steps > 1) and np.any(np.sign(eventValue) != np.sign(eventValueOld))):\n break\n else:\n eventValueOld = eventValue\n\n endTime = cputime()\n if(strcmp(options.stats, 'on')):\n info(f'{steps} steps in {(endTime-startTime):.2} seconds from {tspan[0]} to {t}.')\n elif(numT > 2):\n # If we were asked for the solution at multiple timesteps.\n t, y, schemeData = odeCFLmultipleSteps(schemeFunc, tspan, y0, options, schemeData)\n else:\n # Malformed time span.\n error('tspan must contain at least two entries')\n\n\n return t, y, schemeData\n"
] | [
[
"numpy.sign",
"numpy.hstack",
"numpy.abs",
"numpy.zeros"
]
] |
siqim/Machine-Learning-with-Graphs | [
"697d83bb206be0825ebaf0dad128b9eb24908705"
] | [
"examples/dataset.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on December 30, 2020\n\n@author: Siqi Miao\n\"\"\"\n\nimport torch\nfrom torch_sparse import SparseTensor\nimport torch_geometric.transforms as T\n\nfrom pathlib2 import Path\nimport scipy.io as sio\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom skmultilearn.model_selection import iterative_train_test_split\nfrom ogb.nodeproppred import PygNodePropPredDataset, Evaluator\n\n\nclass Dataset(object):\n def __init__(self, root, name, make_edge_index=False):\n\n self.root = root\n self.name = name\n self.make_edge_index = make_edge_index\n\n self.num_classes = None\n self.split_idx = None\n self.x = None\n self.y = None\n self.adj_t = None\n self.edge_index = None\n self.num_nodes = None\n self.criterion = None\n self.metric = None\n\n self.heterophily_dataset = ['chameleon', 'actor']\n\n if name == 'ogb':\n self.setup_ogb()\n elif name == 'wiki':\n self.setup_wiki()\n elif name in self.heterophily_dataset:\n self.setup_geom()\n else:\n raise KeyboardInterrupt\n\n def setup_ogb(self):\n\n dataset = PygNodePropPredDataset(name='ogbn-arxiv', root=self.root, transform=T.ToSparseTensor())\n data = dataset[0]\n\n self.metric = 'Accuracy'\n self.num_classes = dataset.num_classes\n self.split_idx = dataset.get_idx_split()\n\n self.x = data.x\n self.y = data.y\n self.adj_t = data.adj_t.to_symmetric()\n self.num_nodes = data.num_nodes\n\n if self.make_edge_index:\n row = self.adj_t.storage.row()\n col = self.adj_t.storage.col()\n self.edge_index = torch.stack((row, col), dim=0)\n\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def setup_wiki(self):\n\n mat = sio.loadmat(self.root / 'wiki' / 'POS.mat')\n\n self.metric = 'MicroF1'\n self.num_nodes = 4777\n self.num_classes = 40\n\n adj_t = mat['network'].tocoo()\n self.adj_t = SparseTensor(row=torch.LongTensor(adj_t.row), col=torch.LongTensor(adj_t.col),\n sparse_sizes=(self.num_nodes, self.num_nodes))\n\n if self.make_edge_index:\n row = self.adj_t.storage.row()\n col = self.adj_t.storage.col()\n self.edge_index = torch.stack((row, col), dim=0)\n\n self.y = torch.from_numpy(mat['group'].todense()).float()\n idx = torch.arange(self.y.shape[0]).view(-1, 1)\n train_idx, _, test_idx, _ = iterative_train_test_split(idx, self.y, test_size=0.1)\n self.split_idx = {'train': train_idx.view(-1), 'valid': test_idx.view(-1), 'test': test_idx.view(-1)}\n\n self.criterion = torch.nn.BCEWithLogitsLoss() # for multi-label classification\n\n def setup_geom(self):\n edge_file = self.root / self.name / 'out1_graph_edges.txt'\n feature_label_file = self.root / self.name / 'out1_node_feature_label.txt'\n\n self.metric = 'Accuracy'\n\n edges = edge_file.open('r').readlines()[1:]\n edges = torch.LongTensor([(lambda x: [int(x[0]), int(x[1])])(edge.strip().split('\\t')) for edge in edges])\n self.num_nodes = torch.max(edges).item() + 1\n self.adj_t = SparseTensor(row=torch.LongTensor(edges[:, 0]), col=torch.LongTensor(edges[:, 1]),\n sparse_sizes=(self.num_nodes, self.num_nodes))\n # self.adj_t = self.adj_t.to_symmetric()\n\n if self.make_edge_index:\n self.edge_index = edges.t()\n\n idx = []\n x = []\n y = []\n xy = feature_label_file.open('r').readlines()[1:]\n for line in xy:\n node_id, feature, label = line.strip().split('\\t')\n idx.append(int(node_id))\n\n if self.name == 'actor':\n one_hot = torch.zeros(932)\n pos_with_ones = list(map(int, feature.split(',')))\n one_hot[pos_with_ones] = 1\n x.append(one_hot.int().tolist())\n else:\n x.append(list(map(int, feature.split(','))))\n y.append(int(label))\n\n _, indices = torch.sort(torch.LongTensor(idx))\n self.x = torch.LongTensor(x)[indices]\n self.y = torch.LongTensor(y).view(-1, 1)[indices]\n self.num_classes = torch.max(self.y).item() + 1\n\n idx = torch.arange(self.y.shape[0]).view(-1, 1)\n train_idx, val_test_idx = train_test_split(idx, test_size=0.4, stratify=self.y)\n val_idx, test_idx = train_test_split(val_test_idx, test_size=0.5, stratify=self.y[val_test_idx.squeeze()])\n self.split_idx = {'train': train_idx.view(-1), 'valid': val_idx.view(-1), 'test': test_idx.view(-1)}\n\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def eval(self, y_true, logits, split_idx):\n\n if self.name == 'ogb':\n evaluator = Evaluator(name='ogbn-arxiv')\n y_pred = logits.argmax(dim=1, keepdim=True)\n train_acc = evaluator.eval({\n 'y_true': y_true[split_idx['train']],\n 'y_pred': y_pred[split_idx['train']],\n })['acc']\n valid_acc = evaluator.eval({\n 'y_true': y_true[split_idx['valid']],\n 'y_pred': y_pred[split_idx['valid']],\n })['acc']\n test_acc = evaluator.eval({\n 'y_true': y_true[split_idx['test']],\n 'y_pred': y_pred[split_idx['test']],\n })['acc']\n return train_acc, valid_acc, test_acc\n\n elif self.name == 'wiki':\n y_pred = torch.sigmoid(logits) > 0.5\n train_f1 = f1_score(y_true[split_idx['train']], y_pred[split_idx['train']], average='micro')\n valid_f1 = f1_score(y_true[split_idx['valid']], y_pred[split_idx['valid']], average='micro')\n test_f1 = f1_score(y_true[split_idx['test']], y_pred[split_idx['test']], average='micro')\n return train_f1, valid_f1, test_f1\n\n elif self.name in self.heterophily_dataset:\n y_pred = logits.argmax(dim=1, keepdim=True)\n train_acc = accuracy_score(y_true[split_idx['train']], y_pred[split_idx['train']])\n valid_acc = accuracy_score(y_true[split_idx['valid']], y_pred[split_idx['valid']])\n test_acc = accuracy_score(y_true[split_idx['test']], y_pred[split_idx['test']])\n return train_acc, valid_acc, test_acc\n\n\nif __name__ == '__main__':\n data = Dataset(root=Path('../dataset'), name='ogb', make_edge_index=True)\n"
] | [
[
"scipy.io.loadmat",
"torch.stack",
"torch.nn.CrossEntropyLoss",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score",
"torch.arange",
"torch.nn.BCEWithLogitsLoss",
"torch.max",
"torch.zeros",
"torch.sigmoid",
"torch.LongTensor",
"sklearn.model_selection.train_test_split"
]
] |
yinonbaron/biomass_distribution | [
"783a8d2f59754bde9b0ea802512b131abbe7d8a0"
] | [
"plants/non_wood_biomass/non_wood_biomass.py"
] | [
"\n# coding: utf-8\n\n# # Estimating the fraction of plant biomass which is not woody\n# To estimate the total non-woody plant biomass, we rely on two methods. The first is to estimate the global average leaf and root mass fractions, and the second is by estimating the total biomass of roots and leaves.\n# \n# ## Method1 - fraction of leaves and roots\n# To estimate the global average leaf and root mass fractions, we rely on a recent meta-analysis which collected data on the lead, shoot and root mass fractions in several different biomes ([Poorter et al.](http://dx.doi.org/10.1111/j.1469-8137.2011.03952.x)). Here are the mean leaf, shoot, and root mass fractions in each biome:\n\n# In[1]:\n\n# Load dependencies\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import gmean\nimport sys\nsys.path.insert(0,'../../statistics_helper/')\nfrom fraction_helper import *\n\n# Load data from Poorter et al.\nfractions = pd.read_excel('non_wood_biomass_data.xlsx','Poorter',skiprows=1,index_col=0)\nfractions\n\n\n# We calculate weighted mean of leaf and root mass fractions. We use the fraction of total plant biomass in each biome as our weights from [Erb et al.](http://dx.doi.org/10.1038/ngeo2782) for the weighted mean. Here is the data from Erb et al.:\n\n# In[2]:\n\n# Load data on the total plant biomass in each biome from Erb et al.\nbiomes = pd.read_excel('non_wood_biomass_data.xlsx','Erb',skiprows=1)\nbiomes\n\n\n# The specific biomes in Erb et al. are not fully matching the biomes in Poorter et al., and thus we traslate between the biomes in the two studies:\n\n# In[3]:\n\n# Calculate the sum of the mass fractions of leaves and roots\nnon_wood_frac = (fractions['LMF']+fractions['RMF'])/fractions.sum(axis=1)\n\n# Calculate the total biomass of each biome by the biomes reported in Poorter et al.\ntot_biomass = biomes.groupby('Categories included in Poorter').sum()\n\n# For the temperate steppe, desert and mountain, we use the mean values from grassland and shrubland in Poorter et al.\nnon_wood_frac.loc['Grassland, shrubland'] = frac_mean(np.array([non_wood_frac.loc['Grassland'],non_wood_frac.loc['Shrubland']]))\n\n\n# Set the non-woody fraction as a column in the biome data\ntot_biomass['Non wood fraction'] = non_wood_frac\n\n# Calculate the weighed average of the non-woody biomass fraction\nmean_non_wood_frac = np.average(tot_biomass['Non wood fraction'], weights= tot_biomass['Total biomass [Gt C]'])\nprint('Our global average for non-woody mass fraction is ≈%.0f percent' %(mean_non_wood_frac*100))\n\n\n# Our estimate of the total non-woody plant biomass is the product of our best estimate of the total plant biomass and our estimate of the global average non-woody mass fraction:\n\n# In[4]:\n\n\n# Our best estimate for the total biomass\ntot_plant_biomass = 450e15\n\n# Multiply our estimate for the non-woody mass fraction by our estimate\n# of the total plant biomass\nmethod1_non_wood_biomass = mean_non_wood_frac*tot_plant_biomass\n\nprint('Our best estimate for the total non-wood plant biomass based on the fraction of roots and leaves is ≈%.0f Gt C' %(method1_non_wood_biomass/1e15))\n\n\n# ## Method2 - total biomass of leaves and roots\n# Our second method for estimating the total non-woody plant biomass is based on estimating the total biomass of roots and leaves. For roots, we rely on the estimate made by [Jackson et al.](http://dx.doi.org/10.1007/BF00333714):\n\n# In[5]:\n\nroots_jackson = 146e15\n\n\n# To estimate the total biomass of leaves, we rely on biome averages on the leaf area index (LAI) from [Asner et al.](http://dx.doi.org/10.1046/j.1466-822X.2003.00026.x). Here is the data from Asner et al.:\n\n# In[6]:\n\nbiome_LAI = pd.read_excel('non_wood_biomass_data.xlsx','Asner',skiprows=1,index_col=0)\nbiome_LAI\n\n\n# We use data on the area on each biome from the book \"Biogeochemistry\", and multiply the LAI in each biome by the total area of each biome to estimate the global leaf area:\n\n# In[7]:\n\n# Load biome area data\nbiome_area = pd.read_excel('non_wood_biomass_data.xlsx','Biome area',skiprows=1,index_col=0)\n\n# Calculate the mean LAI for boreal forests\nbiome_LAI.loc['Boreal forest'] = gmean(biome_LAI.loc[['Boreal DBL','Boreal ENL']])\n\n# Calculate the mean LAI for temperate forests\nbiome_LAI.loc['Temperate forest'] = gmean(biome_LAI.loc[['Temperate DBL','Temperate EBL','Temperate ENL']])\n\n# Calculate the mean LAI for tropical forests\nbiome_LAI.loc['Tropical forest'] = gmean(biome_LAI.loc[['Tropical DBL','Tropical EBL']])\n\n# Calculate the mean LAI for temperate grasslands\nbiome_LAI.loc['Temperate grassland'] = biome_LAI.loc['Grassland']\n\n# Calculate the mean LAI for tropical savanna\nbiome_LAI.loc['Tropical savanna'] = gmean(biome_LAI.loc[['Grassland','Shrubland']])\n\n# Multiply the mean LAI in each biome by the total area of each biome\ntot_leaf_area = (biome_LAI['LAI [m^2 m^-2]']*biome_area['Area [m^2]']).sum()\nprint('Our estimate for the total leaf area is ≈%.1e m^2' % tot_leaf_area)\n\n\n# To convert the total leaf area into total biomass of leaves, we use an estimate for the average leaf mass per area (LMA) from the Glopnet database [Wright et al.](http://dx.doi.org/10.1038/nature02403):\n\n# In[8]:\n\n# Load the glopnet data\nglopnet_data = pd.read_excel('non_wood_biomass_data.xlsx','glopnet_data')\n\n# Calculate the geometric mean of the LMA\ngeomean_LMA = 10**glopnet_data.loc[glopnet_data['GF']=='T',['log LMA']].mean()\n\n# Convert the global leaf area to global leaf biomass\ntot_leaf_biomass = tot_leaf_area*geomean_LMA/2\n\nprint('Our estimate for the global leaf biomass is ≈%.1f Gt C' %(tot_leaf_biomass/1e15))\n\n\n# We sum our estimates for the total biomass of roots and leaves to produce our estimate of the total non-woody plant biomass:\n\n# In[9]:\n\nmethod2_non_wood_biomass = tot_leaf_biomass + roots_jackson\nprint('Our best estimate for the total non-wood plant biomass based on estimates of the total biomass of roots and leaves is ≈%.0f Gt C' %(method2_non_wood_biomass/1e15))\n\n\n# We use the geometric mean of our estimates from the two methods as our best estimate for the total non-woody plant biomass:\n\n# In[10]:\n\nbest_non_wood_biomass = gmean([method1_non_wood_biomass,method2_non_wood_biomass])\nprint('Our best estimate for the total non-wood plant biomass is ≈%.0f Gt C' %(best_non_wood_biomass/1e15))\n\n\n# # Estimating the total belowground plant biomass\n# To estimate the total belowground plant biomass, we use the same procedure as for estimating the total non-woody plant biomass. We rely on two methods - the first is based on calculating the mean root mass fraction.\n# ## Method1 - fraction of roots\n# To estimate the global average root mass fractions, we rely on a recent meta-analysis which collected data on the lead, shoot and root mass fractions in several different biomes ([Poorter et al.](http://dx.doi.org/10.1111/j.1469-8137.2011.03952.x)). We calculate the global average root mass fraction by taking into account the relative plant biomass present in each biome, based on data from [Erb et al.](http://dx.doi.org/10.1038/ngeo2782).\n\n# In[11]:\n\n# Calculate the root mass fraction in each biome based on data from Poorter et al.\nroot_frac = (fractions['RMF'])/fractions.sum(axis=1)\n\n# For the temperate steppe, desert and mountain, we use the mean values from grassland and shrubland in Poorter et al.\nroot_frac.loc['Grassland, shrubland'] = frac_mean(np.array([root_frac.loc['Grassland'],root_frac.loc['Shrubland']]))\n\n\n# Set the root fraction as a column in the biome data\ntot_biomass['Root fraction'] = root_frac\n\n# Calculate the weighted average root mass fraction\nmean_root_frac = np.average(tot_biomass['Root fraction'], weights= tot_biomass['Total biomass [Gt C]'])\n\nprint('Our estimate for the global average root mass fraction is ≈%.1f percent' %(mean_root_frac*100))\n\n\n# To estimate the total biomass of roots, we multiply the global average root mass fraction by our best estimate for the total plant biomass:\n\n# In[12]:\n\nmethod1_root_biomass = mean_root_frac*tot_plant_biomass\n\nprint('Our estimate of the total root biomass based on the global average root mass fraction is ≈%0.1f Gt C' %(method1_root_biomass/1e15))\n\n\n# As a second source for estimating the global biomass of roots, we rely on the estimate in [Jackson et al.](http://dx.doi.org/10.1007/BF00333714). We use the geometric mean of the estimate from the two methods as our best estimate of the total biomass of roots, which we use as our best estimate for the total belowground plant biomass:\n\n# In[13]:\n\nbest_root_biomass = gmean([method1_root_biomass,roots_jackson])\n\nprint('Our best estimate for the total belowground plant biomass is ≈%0.1f Gt C' %(best_root_biomass/1e15))\n\n"
] | [
[
"numpy.array",
"pandas.read_excel",
"numpy.average",
"scipy.stats.gmean"
]
] |
Davidyz/AutoStacker | [
"9f637891b9379b166e41597bcd44a8011561beea"
] | [
"modules/algo.py"
] | [
"import numpy as np\n\nfrom modules.imageRW import Image\nfrom typing import Iterator, Optional, List\nfrom __future__ import annotations\n\nclass InputException(Exception):\n pass\n\ndef mean(images: Iterator[Image], group_size: int) -> Iterator[Image|None]:\n stackImage: Image|None = None\n while True:\n try:\n i = next(images)\n for j in range(group_size):\n if stackImage:\n stackImage += i / group_size\n else:\n stackImage = np.zeros(i.shape, dtype=np.uint32).view(Image)\n stackImage += i.copy() / group_size\n stackImage.setExif(i.exif)\n yield stackImage\n stackImage = None\n except StopIteration:\n break\n\ndef maxBright(images: Iterator[Image], group_size: int) -> Iterator[Image|None]:\n stackImage: Image|None = None\n\n while True:\n try:\n i = next(images)\n for j in range(group_size):\n if stackImage:\n stackImage = np.array(np.maximum(stackImage, i)).view(Image)\n else:\n stackImage = np.zeros(i.shape, dtype=np.uint32).view(Image)\n stackImage = np.array(np.maximum(stackImage, i)).view(Image)\n stackImage.setExif(i.exif)\n yield stackImage\n stackImage = None\n except StopIteration:\n break\n\ndef mode(images: Iterator[Image]) -> List[Image]:\n modeArray = []\n return modeArray\n\nALGORITHMS = {'mean': mean,\n 'max': maxBright}\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.maximum",
"numpy.zeros"
]
] |
zoi-mibtp/pyDNase | [
"047d2f89af6109a530505b370782c4841d710cbf"
] | [
"pyDNase/scripts/dnase_average_profile.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport pyDNase\nimport numpy as np\nimport matplotlib as mpl\nfrom clint.textui import progress, puts\n#Required for headless operation\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\nparser = argparse.ArgumentParser(description='Plots average profile of DNase (or Tn5 for ATAC-seq) activity surrounding a list of regions in a BED file')\nparser.add_argument(\"-w\", \"--window_size\", help=\"Size of flanking area around centre of the regions to plot (default: 100)\",default=100,type=int)\nparser.add_argument(\"-bf\", \"--bias-file\", help=\"Location of the sorted, index\",default = None,type=str)\nparser.add_argument(\"-i\",action=\"store_true\", help=\"Ignores any strand information in BED file and plots data relative to reference strand\",default=False)\nparser.add_argument(\"-c\",action=\"store_true\", help=\"Combine the strand information into one graph\",default=False)\nparser.add_argument(\"-n\",action=\"store_true\", help=\"Normalise cut counts to a fraction peaks\",default=False)\nparser.add_argument(\"-b\",action=\"store_true\", help=\"Normalise for cutting bias\",default=False)\nparser.add_argument(\"-A\",action=\"store_true\", help=\"ATAC-seq mode\",default=False)\nparser.add_argument(\"regions\", help=\"BED file of the regions you want to generate the average profile for\")\nparser.add_argument(\"reads\", help=\"The BAM file containing the DNase-seq data\")\nparser.add_argument(\"output\", help=\"filename to write the output to\")\nargs = parser.parse_args()\n\nreads = pyDNase.BAMHandler(args.reads,ATAC=args.A)\nif args.b:\n if args.bias_file != None:\n freads = pyDNase.BAMHandlerWithBias(pyDNase.FASTAHandler(args.bias_file),args.reads,ATAC=args.A)\n else:\n raise ValueError(\"No FASTA file provided for bias correction!\")\nregions = pyDNase.GenomicIntervalSet(args.regions)\n\n\n\n#Set all strands to positive if \"ignore strands\" is enabled\nif args.i:\n for each in regions:\n each.strand = \"+\"\n\nputs(\"Resizing Regions to {0}\".format(args.window_size))\nregions.resizeRegions(args.window_size)\n\nfw = []\nrv = []\nputs(\"Reading Data from BAM file...\")\nfor each in progress.bar(regions):\n if sum(reads[each][\"+\"]) and sum(reads[each][\"-\"]):\n if args.b:\n try:\n fw.append(np.divide(reads[each][\"+\"],freads[each][\"+\"]))\n rv.append(np.divide(reads[each][\"-\"],freads[each][\"-\"]))\n except Exception:\n pass\n else:\n fw.append(reads[each][\"+\"])\n rv.append(reads[each][\"-\"])\n\nif args.n:\n fw = [list(map(float,i))for i in fw]\n rv = [list(map(float,i)) for i in rv]\n fw = [np.divide(np.subtract(i, min(i)), np.subtract(max(i) , min(i))) for i in fw]\n rv = [np.divide(np.subtract(i, min(i)), np.subtract(max(i) , min(i))) for i in rv]\n\nif args.c:\n plt.plot(np.add(np.mean(fw,axis=0),np.mean(rv,axis=0)),c=\"red\")\nelse:\n plt.plot(np.mean(fw,axis=0),c=\"red\")\n plt.plot(np.mean(rv,axis=0),c=\"blue\")\n\n#Pad the axis out reads bit\nrcParams['xtick.major.pad'] = 20 \nrcParams['ytick.major.pad'] = 20\n\n#Sort out the X axis ticks\nticks = [0,args.window_size,args.window_size*2]\nlabels = [-args.window_size,0,args.window_size]\nplt.xticks(ticks, labels)\n\n#Make the yaxis start from 0\nplt.gca().set_ylim(0)\n\n#Makes ticks only appear on the left hand side\nplt.gca().yaxis.set_ticks_position('left')\n\n#Remove top and right borders\nplt.gca().spines['top'].set_visible(False)\nplt.gca().spines['right'].set_visible(False)\n\nplt.gca().tick_params(axis='both', which='major', labelsize=28, pad=12)\n\nif args.bias_file:\n plt.gca().set_ylabel('Average DNase Activity\\n (Observed/Expected)',size=\"32\", multialignment='center')\nelse:\n if args.A:\n plt.gca().set_ylabel('Average Tn5 integrations',size=\"26\", multialignment='center')\n else:\n plt.gca().set_ylabel('Average DNase activity',size=\"26\", multialignment='center')\nplt.savefig(args.output,bbox_inches='tight')\n"
] | [
[
"numpy.divide",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.use",
"numpy.mean"
]
] |
remichartier/014_selfDrivingCarND_BehavioralCloningProject | [
"1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff"
] | [
"archiveOldVersions/generator_v02.py"
] | [
"#!/usr/bin/env python\n\n# History\n# v01 : adaptation from the one given by Udacity to work\n# v02 : adapt to commonFunctions_v10.py to use generator.\n# Start adding again everything from model_v12.py (image augmentation)\n\nimport os\nimport csv\nimport cv2\nimport numpy as np\nimport sklearn\n\nfrom math import ceil\nfrom random import shuffle\nfrom sklearn.model_selection import train_test_split\n\nfrom commonFunctions_v10 import get_lines_logfile \nfrom commonFunctions_v10 import get_info_from_lines\nfrom commonFunctions_v10 import flip_horizontally\n\nSTEER_CORRECTION_FACTOR = 0.2 # to tune up for left and right images/measurements\n\n# Set our batch size for fit generator\nbatch_len= 6\n\n# Reading CSV file, extracting lines.\nsamples = get_lines_logfile()\n\ntrain_samples, validation_samples = train_test_split(samples[1:], test_size=0.2)\n\n\ndef generator(samples, batch_size=batch_len):\n num_samples = len(samples)\n # print('num_samples : {}'.format(num_samples))\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n # correction : should go only until min(num_samples,offset+batch_size)\n batch_samples = samples[offset: min(num_samples,offset+batch_size)]\n\n # here will get both center, left, right images + their measurements.\n # if batch_size = 32 --> 32*3 = 96 images ....\n images, angles = get_info_from_lines(batch_samples,STEER_CORRECTION_FACTOR,nb_images=None)\n # data augmentation flip horizontally image + inverse measurements\n augm_images, augm_measurements = flip_horizontally(images,angles)\n images.extend(augm_images)\n angles.extend(augm_measurements)\n \n # Nvidia : need to convert images in YUV ...\n images = RGB2YUV(images)\n \n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# Set our batch size (*3 due to image center + left + right ....), then *2 due to flip of each images\nbatch_size=batch_len*3*2 #6*3*2 = 36 ....\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Activation, Dropout\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/127.5 - 1.,\n input_shape=(160,320,3)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, \n steps_per_epoch=ceil(len(train_samples)/batch_size), \n validation_data=validation_generator, \n validation_steps=ceil(len(validation_samples)/batch_size), \n epochs=5, verbose=1)"
] | [
[
"numpy.array",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle"
]
] |
DanPorter/babelscan | [
"71fa43f13a8318efbcdb412c4fca533d4b6f9ec9"
] | [
"babelscan_unit_test.py"
] | [
"\"\"\"\nUnit test for babelscan\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport babelscan\n\n\nprint('####################################################')\nprint('############## babelscan unit tests ################')\nprint('####################################################')\nprint('\\n')\nprint(babelscan.module_info())\n\nfile = r\"C:\\Users\\dgpor\\Dropbox\\Python\\ExamplePeaks\\810002.nxs\" # eta scan with pilatus\ncv_file = r\"C:\\Users\\dgpor\\Dropbox\\Python\\ExamplePeaks\\857991.nxs\" # trajectory scan/ cvscan/ kthZebra\nim_file = r'C:\\\\Users\\\\dgpor\\\\OneDrive - Diamond Light Source Ltd\\\\I16\\\\Nexus_Format\\\\example_nexus\\\\872996.nxs' # hkl scan with data\ndat_file = r'C:\\\\Users\\\\dgpor\\\\OneDrive - Diamond Light Source Ltd\\\\I16\\\\Nexus_Format\\\\example_nexus\\\\872996.dat'\ndatadir = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\example_nexus\" # eta scan with pilatus\nrsmap = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\example_nexus\\872996-pilatus3_100k-files\\rsmap_872996_201215_101906.nxs\"\ni10_file = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\I10_nexus\\i10-578596.nxs\"\ni06_file = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\I06_example\\227980.dat\"\n\n\nprint('\\n\\n############ File Type Tests ##############')\nprint('standard I16 eta scan:')\nscan = babelscan.file_loader(file)\nprint(scan)\nprint('\\nI16 CV scan:')\nscan = babelscan.file_loader(cv_file)\nprint(scan)\nprint('\\nI16 hkl scan:')\nscan = babelscan.file_loader(im_file)\nprint(scan)\nprint('\\nI16 .dat file:')\nscan = babelscan.file_loader(dat_file)\nprint(scan)\nprint('\\nI16 rsmap file:')\nscan = babelscan.file_loader(rsmap)\nprint(scan)\nprint('\\nI10 Nexus file:')\nscan = babelscan.file_loader(i10_file)\nprint(scan)\nprint('\\nI06 .dat file:')\nscan = babelscan.file_loader(i06_file, scan_command_name='command')\nprint(scan)\n\n\nprint('\\n\\n############ Missing count_time Tests ##############')\nscan = babelscan.file_loader(file, debug='all')\nscan.add2namespace(['count_time', 'counttime', 'Time', 't'], None, 'count_time')\nprint(scan)\nprint('\\n\\n')\nprint(scan('count_time'))\nprint('\\n\\n')\nprint(scan('nroi[31,31]'))\n\n\nprint('\\n\\n############### FolderMonitor Tests ################')\nexp = babelscan.FolderMonitor(datadir)\nscan = exp.scan(0)\nprint(scan)\n\n\nprint('\\n\\n##################### Plot Tests ###################')\nscan = exp.scan(794940)\nx, y, dy, xlab, ylab = scan.get_plot_data('axes', 'nroi_peak[31,31]', '/count_time/Transmission', 'np.sqrt(x+0.1)')\n\nplt.figure()\nplt.errorbar(x, y, dy, fmt='-o')\nplt.xlabel(xlab)\nplt.ylabel(ylab)\nplt.title(scan.title())\n\nscan.plot.plot_image('sum', clim=[0, 100])\nplt.show()\n\nprint('\\n\\n##################### Fit Tests ###################')\nscan = exp(877619) # merlin\nscan.fit('axes', 'nroi_peak[31, 31]')\nscan.plot('axes', ['nroi_peak[31, 31]', 'fit'])\nprint(scan.string('amplitude'))\n\nscan = exp.scan(794940) # multipeak\nscan.fit.multi_peak_fit(npeaks=2)\nscan.plot('axes', ['signal', 'fit', 'p1_fit', 'p2_fit', 'bkg_fit'])\nplt.show()\n\nprint('\\n\\n################# MultiScan Tests ##################')\nscan_range = range(794932, 794947, 1) # datadir, sperp, spara, eta scans\nscans = exp.scans(scan_range, ['sperp', 'spara'])\nprint(scans)\n\n\nprint('\\n\\n################### Volume Tests ###################')\nscan = babelscan.file_loader(im_file)\nvolume = scan.volume()\nprint('%r, %s' % (scan, scan.find_image()))\nprint(volume)\nprint(np.max(volume))\nprint(volume.peak_search())\n\nscan1 = babelscan.file_loader(dat_file)\nvolume1 = scan1.volume()\nprint('\\n%r' % scan1)\nprint(volume1)\nprint(np.max(volume1))\nprint(volume1.peak_search())\n\nscan2 = babelscan.file_loader(file)\nvolume2 = scan2.volume()\nprint('\\n%r, %s' % (scan2, scan2.find_image()))\nprint(volume2)\nprint(np.max(volume2))\nprint(volume2.peak_search())\n\nscan3 = babelscan.file_loader(rsmap)\nvolume3 = scan3.volume()\nprint('\\n%r, %s' % (scan3, scan3.find_image()))\nprint(volume3)\nprint(np.max(volume3))\nprint(volume3.peak_search())\n\n# Volume plot\nvolume2.plot()\nam = np.array(volume2.argmax())\nprint('Volume argmax:', am, am - (10, 10, 10), am + (10, 10, 10))\nfrom babelscan.plotting_matplotlib import create_axes, labels\nax = create_axes()\nvolume2.plot.cut(am-(10,10,10), am+(10,10,10), axes=ax)\nlabels('Volume', 'pixels', 'value', legend=True, axes=ax)\nplt.show()\n\n\nprint('\\n\\n#################### Time Tests ####################')\nallscan = exp.allscannumbers()\nfor scn in allscan:\n scan = exp.scan(scn)\n scan.options(start_time_name=['start_time', 'TimeSec'], end_time_name=['end_time', 'TimeSec'])\n scan.add2namespace(['counttime', 'Time', 't'], other_names='count_time', default_value=0)\n start_time = scan.time_start()\n duration = scan.duration()\n print('#%s start: %s, duration: %s' % (scn, start_time, duration))\n\n\nprint('\\n\\n#################### .dat Tests ####################')\nexp.set_format('%d.dat')\nallscan = exp.allscannumbers()\nfor scn in allscan:\n scan = exp.scan(scn)\n scan.options(start_time_name=['start_time', 'TimeSec'], end_time_name=['end_time', 'TimeSec'])\n scan.add2namespace(['counttime', 'Time', 't'], other_names='count_time', default_value=0)\n start_time = scan.time_start()\n duration = scan.duration()\n print(scan)\n print('#%s start: %s, duration: %s' % (scn, start_time, duration))\n\n\nprint('\\n\\n########## More FolderMonitor Tests ################')\nexp = babelscan.FolderMonitor(datadir)\n# Add options\nexp.options(\n str_list=['scan_number', 'scan_command', 'axes', 'signal', 'start_time', 'end_time', 'count_time'],\n start_time_name=['start_time', 'TimeSec'],\n end_time_name=['end_time', 'TimeSec'],\n names={'count_time': ['Time', 'counttime', 't']},\n defaults={'count_time': 0, 'start_time': None, 'end_time': None}\n)\nallfiles = exp.allscanfiles()\nfor f in allfiles:\n print(exp.scan(f))\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.max",
"matplotlib.pyplot.xlabel"
]
] |
li-phone/DetectionCompetition | [
"a917f16790ec30358e3cfe1aa6e327a2070a1235"
] | [
"mmdet-v2/tools/third_party/useless/cocoutils/coco_check.py"
] | [
"import os\nimport json\nimport cv2 as cv\nimport numpy as np\nfrom tqdm import tqdm\n\ntry:\n from pandas import json_normalize\nexcept:\n from pandas.io.json import json_normalize\n\n\ndef load_dict(fname):\n with open(fname, \"r\") as fp:\n o = json.load(fp, )\n return o\n\n\ndef save_dict(fname, d, mode='w', **kwargs):\n # 持久化写入\n with open(fname, mode, encoding='utf-8') as fp:\n # json.dump(d, fp, cls=NpEncoder, indent=1, separators=(',', ': '))\n json.dump(d, fp, **kwargs)\n\n\ndef get_segmentation(points):\n return [points[0], points[1], points[2] + points[0], points[1],\n points[2] + points[0], points[3] + points[1], points[0], points[3] + points[1]]\n\n\ndef check_coco(src, dst, img_dir=None, replace=True):\n if not replace:\n print('There is an existed {}.'.format(dst))\n return\n coco = load_dict(src)\n cats = json_normalize(coco['categories'])\n cats = cats.sort_values(by='id')\n coco['categories'] = cats.to_dict('records')\n\n imgs = json_normalize(coco['images'])\n if 'image_id' in list(imgs.columns):\n imgs = imgs.rename(columns={'image_id': 'id'})\n imgs['file_name'] = imgs['file_name'].apply(lambda x: os.path.basename(x))\n imgs = imgs.sort_values(by='id')\n coco['images'] = imgs.to_dict('records')\n\n if 'annotations' in coco:\n anns = json_normalize(coco['annotations'])\n else:\n ann_fakes = [\n {\"area\": 100, \"iscrowd\": 0, \"image_id\": image['id'], \"bbox\": [0, 0, 10, 10], \"category_id\": 1, \"id\": 1}\n for image in coco['images']\n ]\n anns = json_normalize(ann_fakes)\n anns['id'] = list(range(anns.shape[0]))\n anns = anns.to_dict('records')\n for v in anns:\n if 'segmentation' not in v:\n seg = get_segmentation(v['bbox'])\n v['segmentation'] = [[float(_) for _ in seg]]\n coco['annotations'] = anns\n # check image shape\n if img_dir is not None:\n for i, v in tqdm(enumerate(coco['images'])):\n if os.path.exists(os.path.join(img_dir, v['file_name'])):\n img_ = cv.imread(os.path.join(img_dir, v['file_name']))\n height_, width_, _ = img_.shape\n else:\n height_, width_, _ = None, None, 3\n v['width'] = width_\n v['height'] = height_\n save_dict(dst, coco)\n print('check_coco done!')\n return dst\n\n\ndef check_box(coco, save_name, img_dir):\n if isinstance(coco, str):\n coco = load_dict(coco)\n images = {v['id']: v for v in coco['images']}\n cat2label = {v['id']: v['name'] for v in coco['categories']}\n annotations = {v['id']: v for v in coco['annotations']}\n error_boxes = []\n for k, v in annotations.items():\n b = v['bbox']\n image = images[v['image_id']]\n if not (0 <= b[0] <= image['width'] and 0 <= b[1] <= image['height'] and b[2] > 0 and b[3] > 0 \\\n and 0 <= b[0] + b[2] <= image['width'] and 0 <= b[1] + b[3] <= image['height']):\n error_boxes.append(v['id'])\n from draw_box import DrawBox\n draw = DrawBox(len(cat2label))\n\n def save_coco():\n coco['annotations'] = [v for k, v in annotations.items()]\n save_dict(save_name, coco)\n print('save done!')\n\n def help():\n print('Q: quit, Z: save, X: delete, *: stride\\n' \\\n 'W: up, A: left, S: down, D: right\\n' \\\n 'L: box left, R: box right, T: box top, B: box bottom\\n')\n\n stride = 10\n while len(error_boxes) > 0:\n print('error boxes size: ', len(error_boxes))\n v = annotations[error_boxes[0]]\n b = v['bbox']\n b = [b[0], b[1], b[2] + b[0], b[3] + b[1]]\n image = images[v['image_id']]\n src_img = cv.imread(os.path.join(img_dir, image['file_name']))\n cv.namedWindow('Error_Box', cv.WINDOW_NORMAL)\n direction = 0\n while True:\n img = draw.draw_box(src_img, [b], [cat2label[v['category_id']]])\n show_img = np.array(img).copy()\n cv.imshow(\"Error_Box\", show_img)\n key = cv.waitKeyEx(0)\n if key == 104:\n help()\n break\n elif key == 56:\n try:\n s = float(input('please input number: '))\n stride = s\n print('stride', stride)\n except:\n print('please input number!')\n elif key == 113:\n error_boxes.pop(0)\n break\n elif key == 120:\n ann_id = error_boxes[0]\n annotations.pop(ann_id)\n error_boxes.pop(0)\n b = [b[0], b[1], b[2] - b[0], b[3] - b[1]]\n v['bbox'] = b\n save_coco()\n break\n elif key == 122:\n error_boxes.pop(0)\n b = [b[0], b[1], b[2] - b[0], b[3] - b[1]]\n v['bbox'] = b\n save_coco()\n break\n elif key == 108:\n direction = 0\n elif key == 116:\n direction = 1\n elif key == 114:\n direction = 2\n elif key == 98:\n direction = 3\n elif key == 97:\n b[direction] -= stride\n b[direction] = max(b[direction], 0)\n elif key == 119:\n b[direction] -= stride\n b[direction] = max(b[direction], 0)\n elif key == 100:\n b[direction] += stride\n b[direction] = min(b[direction], show_img.shape[1])\n elif key == 115:\n b[direction] += stride\n b[direction] = min(b[direction], show_img.shape[0])\n save_coco()\n print('check_box done!')\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser(description='Check ann_file')\n parser.add_argument('ann_file', help='annotation file or test image directory')\n parser.add_argument('save_name', help='save_name')\n parser.add_argument('img_dir', help='img_dir')\n parser.add_argument('--check_type', default='coco,box', help='check_type')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n check_type = args.check_type.split(',')\n if 'coco' in check_type:\n args.ann_file = check_coco(args.ann_file, args.save_name, args.img_dir)\n if 'box' in check_type:\n check_box(args.ann_file, args.save_name, args.img_dir)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"pandas.io.json.json_normalize"
]
] |
BaiduXLab/apollo | [
"2764e934b6d0da1342be781447348288ac84c5e9"
] | [
"modules/tools/create_map/create_map.py"
] | [
"#!/usr/bin/env python\n\n###############################################################################\n# Copyright 2017 The Apollo Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\"\"\"\nCreate base map from localization and mobileye lane detection\n\"\"\"\n\nimport argparse\nimport csv\nimport math\nimport numpy as np\nimport os\nimport rospy\nimport sys\n\nfrom modules.map.proto.map_pb2 import Map\nfrom modules.map.proto.map_lane_pb2 import LaneBoundaryType, Lane\nfrom modules.map.proto.map_road_pb2 import BoundaryEdge, Road\n\nfrom modules.routing.proto.routing_pb2 import LaneWaypoint\nfrom modules.routing.proto.poi_pb2 import POI, Landmark\n\nclass DataPoint:\n \"\"\"\n class of data sample (localization and mobileye lane detection)\n \"\"\"\n\n def __init__(self):\n self.pos_x = 0.0 # localization\n self.pos_y = 0.0\n self.pos_z = 0.0\n self.theta = 0.0 # heading\n self.dist_left = 0.0 # distance to left lane marking\n self.conf_left = 0 # confidence of left lane marking (0/1: low confidence, -1/-2: high confidence)\n self.dist_right = 0.0 # distance to right lane marking\n self.conf_right = 0 # confidence of right lane marking (0/1: low confidence, -1/-2: high confidence)\n self.width = 0.0 # lane width\n self.ratio = 0.0 # relative position within a lane (dist_left / width)\n self.center_x = 0.0 # point on the center line of current lane\n self.center_y = 0.0\n\ndef distance(x1, y1, x2, y2):\n \"\"\"\n l2 distance\n \"\"\"\n\n return math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))\n\ndef interpolate_width(data, default_width):\n \"\"\"\n fill 'width' field of all data samples by interpolation\n \"\"\"\n\n # Collect a set of consecutive entries with low confidence on left OR right lane detection \n intervals = []\n interval_begin = -1\n interval_end = -1\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 or entry.conf_right >= 0:\n if interval_begin < 0:\n interval_begin = index\n interval_end = index\n else:\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n interval_begin = -1\n interval_end = -1\n entry.width = entry.dist_left + entry.dist_right\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n\n # Iterate through intervals to interpolate width\n for interval in intervals:\n for index in range(interval[0], interval[1] + 1):\n if interval[0] == 0 and interval[1] == len(data) - 1:\n data[index].width = default_width\n else:\n if interval[0] == 0:\n data[index].width = data[interval[1] + 1].width\n elif interval[1] == len(data) - 1:\n data[index].width = data[interval[0] - 1].width\n else:\n alpha = float(index - interval[0] + 1) / (interval[1] - interval[0] + 2)\n data[index].width = (1.0 - alpha) * data[interval[0] - 1].width + alpha * data[interval[1] + 1].width\n\n # Fill in dist_left/right and conf_left/right using interpolated width\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 and entry.conf_right < 0:\n entry.dist_left = entry.width - entry.dist_right\n entry.conf_left = -1\n elif entry.conf_left < 0 and entry.conf_right >= 0:\n entry.dist_right = entry.width - entry.dist_left\n entry.conf_right = -1\n\ndef interpolate_ratio(data, default_ratio):\n \"\"\"\n fill 'ratio' field of all data samples by interpolation\n \"\"\"\n\n # Collect a set of consecutive entries with low confidence on left AND right lane detection \n intervals = []\n interval_begin = -1\n interval_end = -1\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 and entry.conf_right >= 0:\n if interval_begin < 0:\n interval_begin = index\n interval_end = index\n else:\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n interval_begin = -1\n interval_end = -1\n entry.ratio = float(entry.dist_left) / entry.width\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n\n # Iterate through intervals to interpolate ratio\n for interval in intervals:\n for index in range(interval[0], interval[1] + 1):\n if interval[0] == 0 and interval[1] == len(data) - 1:\n data[index].ratio = default_ratio\n else:\n if interval[0] == 0:\n data[index].ratio = data[interval[1] + 1].ratio\n elif interval[1] == len(data) - 1:\n data[index].ratio = data[interval[0] - 1].ratio\n else:\n alpha = float(index - interval[0] + 1) / (interval[1] - interval[0] + 2)\n data[index].ratio = (1.0 - alpha) * data[interval[0] - 1].ratio + alpha * data[interval[1] + 1].ratio\n\n # Fill in dist_left/right and conf_left/right using interpolated ratio\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 and entry.conf_right >= 0:\n entry.dist_left = entry.width * entry.ratio\n entry.dist_right = entry.width - entry.dist_left\n entry.conf_left = -1\n entry.conf_right = -1\n\ndef compute_center(data):\n \"\"\"\n fill 'center_x' and 'center_y' fields of all data samples\n \"\"\"\n\n for entry in data:\n pos_x = entry.pos_x\n pos_y = entry.pos_y\n pos_z = entry.pos_z\n theta = entry.theta\n dist_left = entry.dist_left\n dist_right = entry.dist_right\n\n theta_left = theta + np.pi / 2.0\n pos_l_x = pos_x + dist_left * np.cos(theta_left)\n pos_l_y = pos_y + dist_left * np.sin(theta_left)\n\n theta_right = theta - np.pi / 2.0\n pos_r_x = pos_x + dist_right * np.cos(theta_right)\n pos_r_y = pos_y + dist_right * np.sin(theta_right)\n\n entry.center_x = (pos_l_x + pos_r_x) / 2.0\n entry.center_y = (pos_l_y + pos_r_y) / 2.0\n\ndef sample_data(data, sample_distance):\n \"\"\"\n sample 'data' at the interval of 'sample_distance'\n \"\"\"\n\n result = []\n\n if len(data) > 0:\n last_x = data[0].center_x\n last_y = data[0].center_y\n result.append(data[0])\n\n for entry in data[1:]:\n if distance(last_x, last_y, entry.center_x, entry.center_y) > sample_distance:\n result.append(entry)\n last_x = entry.center_x\n last_y = entry.center_y\n\n return result\n\ndef extract_data(data, dim):\n \"\"\"\n extract dimension 'dim' (center_x, center_y or width) of 'data' into a list\n \"\"\"\n\n result = []\n for entry in data:\n if dim == 'center_x':\n result.append(entry.center_x)\n elif dim == 'center_y':\n result.append(entry.center_y)\n elif dim == 'width':\n result.append(entry.width)\n return result\n\ndef laplacian_operator(data):\n \"\"\"\n apply laplacian operator on data\n \"\"\"\n\n lap = []\n lap.append(0.0)\n for index in range(1, len(data) - 1):\n lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index])\n lap.append(0.0)\n return lap\n\ndef laplacian_smooth(data, alpha = 0.5, iterations = 3):\n \"\"\"\n apply laplacian smoothing on data\n \"\"\"\n\n for iteration in range(iterations):\n lap = laplacian_operator(data)\n for index in range(len(data)):\n data[index] += alpha * lap[index]\n\ndef update_data(data, dim, new_data):\n \"\"\"\n copy new_data to dimension 'dim' of 'data'\n \"\"\"\n\n for entry, new_entry in zip(data, new_data):\n if dim == 'center_x':\n entry.center_x = new_entry\n elif dim == 'center_y':\n entry.center_y = new_entry\n elif dim == 'width':\n entry.width = new_entry\n\ndef smooth_dimension(data, dim):\n \"\"\"\n smooth dimension 'dim' of 'data'\n \"\"\"\n\n extracted_data = extract_data(data, dim)\n if dim == 'width':\n laplacian_smooth(extracted_data, 1.0, 1000)\n else:\n laplacian_smooth(extracted_data, 1.0, 1000)\n update_data(data, dim, extracted_data)\n\ndef smooth_center_width(data):\n \"\"\"\n smooth centers and widths of data\n \"\"\"\n\n smooth_dimension(data, 'center_x')\n smooth_dimension(data, 'center_y')\n smooth_dimension(data, 'width')\n\ndef split_data(data, max_lane_length):\n \"\"\"\n split data into multiple lists, each of which is not longer than 'max_lane_length'\n \"\"\"\n\n result = []\n current = []\n total_length = 0.0\n\n if len(data) > 0:\n last_x = data[0].center_x\n last_y = data[0].center_y\n current.append(data[0])\n\n for entry in data[1:]:\n current.append(entry)\n\n d = distance(last_x, last_y, entry.center_x, entry.center_y)\n total_length += d\n\n if total_length > max_lane_length:\n result.append(current)\n\n current = []\n current.append(entry)\n total_length = 0.0\n\n last_x = entry.center_x\n last_y = entry.center_y\n\n if total_length > 0.0:\n result.append(current)\n\n return result\n\ndef create_lane(data, offset, lane_count, left_lanes, right_lanes):\n \"\"\"\n create a lane using 'data' whose lateral index is 'offset'\n offset = 0: center lane; offset < 0: left lanes; offset > 0: right lanes\n lane_count: longitutional index of lane (used for naming)\n left_lanes, right_lanes: number of left/right lanes (used for boundary types)\n \"\"\"\n\n total_length = 0.0\n total_left_length = 0.0\n total_right_length = 0.0\n\n lane = Lane()\n lane.id.id = \"lane_\" + str(lane_count) + \"_\" + str(offset)\n\n lane_central_curve_seg = lane.central_curve.segment.add()\n\n start_heading = data[0].theta\n\n lane_left_boundary_curve_seg = lane.left_boundary.curve.segment.add()\n lane_left_boundary_curve_seg.heading = float(start_heading)\n lane_left_boundary_curve_seg.s = 0.0\n\n lane_right_boundary_curve_seg = lane.right_boundary.curve.segment.add()\n lane_right_boundary_curve_seg.heading = float(start_heading)\n lane_right_boundary_curve_seg.s = 0.0\n\n last_l_x = 0.0\n last_l_y = 0.0\n\n last_c_x = 0.0\n last_c_y = 0.0\n\n last_r_x = 0.0\n last_r_y = 0.0\n\n for (index, entry) in enumerate(data):\n theta = entry.theta\n theta_left = theta + np.pi / 2.0\n theta_right = theta - np.pi / 2.0\n\n pos_c_x = entry.center_x\n pos_c_y = entry.center_y\n\n pos_l_x = pos_c_x + entry.width * (0.5 - offset) * np.cos(theta_left)\n pos_l_y = pos_c_y + entry.width * (0.5 - offset) * np.sin(theta_left)\n\n pos_r_x = pos_c_x + entry.width * (0.5 + offset) * np.cos(theta_right)\n pos_r_y = pos_c_y + entry.width * (0.5 + offset) * np.sin(theta_right)\n\n pos_c_x = (pos_l_x + pos_r_x) / 2.0\n pos_c_y = (pos_l_y + pos_r_y) / 2.0\n\n if index == 0:\n lane_central_curve_seg.start_position.x = pos_c_x\n lane_central_curve_seg.start_position.y = pos_c_y\n\n lane_left_boundary_curve_seg.start_position.x = pos_l_x\n lane_left_boundary_curve_seg.start_position.y = pos_l_y\n\n lane_right_boundary_curve_seg.start_position.x = pos_r_x\n lane_right_boundary_curve_seg.start_position.y = pos_r_y\n\n else:\n d = distance(last_c_x, last_c_y, pos_c_x, pos_c_y)\n total_length += d\n\n d_left = distance(last_l_x, last_l_y, pos_l_x, pos_l_y)\n total_left_length += d_left\n\n d_right = distance(last_r_x, last_r_y, pos_r_x, pos_r_y)\n total_right_length += d_right\n\n point = lane_central_curve_seg.line_segment.point.add()\n point.x = pos_c_x\n point.y = pos_c_y\n\n point = lane_left_boundary_curve_seg.line_segment.point.add()\n point.x = pos_l_x\n point.y = pos_l_y\n\n point = lane_right_boundary_curve_seg.line_segment.point.add() \n point.x = pos_r_x\n point.y = pos_r_y\n\n sample = lane.left_sample.add()\n sample.s = total_length\n sample.width = entry.width / 2.0\n\n sample = lane.right_sample.add()\n sample.s = total_length\n sample.width = entry.width / 2.0\n\n last_l_x = pos_l_x\n last_l_y = pos_l_y\n\n last_r_x = pos_r_x\n last_r_y = pos_r_y\n\n last_c_x = pos_c_x\n last_c_y = pos_c_y\n\n lane_central_curve_seg.length = total_length\n lane_left_boundary_curve_seg.length = total_left_length\n lane_right_boundary_curve_seg.length = total_right_length\n\n boundary_type = lane.left_boundary.boundary_type.add()\n boundary_type.s = 0.0\n if offset == -left_lanes:\n boundary_type.types.append(LaneBoundaryType.DOUBLE_YELLOW)\n else:\n boundary_type.types.append(LaneBoundaryType.DOTTED_WHITE)\n\n lane.left_boundary.length = total_left_length\n\n boundary_type = lane.right_boundary.boundary_type.add()\n boundary_type.s = 0.0\n if offset == right_lanes:\n boundary_type.types.append(LaneBoundaryType.CURB)\n else:\n boundary_type.types.append(LaneBoundaryType.DOTTED_WHITE)\n\n lane.right_boundary.length = total_right_length\n\n lane.length = total_length\n lane.speed_limit = 29.06\n lane.type = Lane.CITY_DRIVING\n lane.turn = Lane.NO_TURN\n\n return lane\n\ndef create_road(data, left_lanes, right_lanes):\n \"\"\"\n create a road using 'data'\n left_lanes, right_lanes: number of left/right lanes\n \"\"\"\n road = Road()\n road.id.id = \"road\"\n section = road.section.add()\n section.id.id = \"section\"\n\n left_edge = section.boundary.outer_polygon.edge.add()\n left_edge.type = BoundaryEdge.LEFT_BOUNDARY\n\n right_edge = section.boundary.outer_polygon.edge.add()\n right_edge.type = BoundaryEdge.RIGHT_BOUNDARY\n\n total_left_length = 0.0\n total_right_length = 0.0\n\n start_heading = data[0].theta\n\n left_edge_curve_seg = left_edge.curve.segment.add()\n left_edge_curve_seg.heading = float(start_heading)\n left_edge_curve_seg.s = 0.0\n\n right_edge_curve_seg = right_edge.curve.segment.add()\n right_edge_curve_seg.heading = float(start_heading)\n right_edge_curve_seg.s = 0.0\n\n last_l_x = 0.0\n last_l_y = 0.0\n\n last_r_x = 0.0\n last_r_y = 0.0\n\n for (index, entry) in enumerate(data):\n theta = entry.theta\n theta_left = theta + np.pi / 2.0\n theta_right = theta - np.pi / 2.0\n\n pos_l_x = entry.center_x + entry.width * (0.5 + left_lanes) * np.cos(theta_left)\n pos_l_y = entry.center_y + entry.width * (0.5 + left_lanes) * np.sin(theta_left)\n\n pos_r_x = entry.center_x + entry.width * (0.5 + right_lanes) * np.cos(theta_right)\n pos_r_y = entry.center_y + entry.width * (0.5 + right_lanes) * np.sin(theta_right)\n\n if index == 0:\n left_edge_curve_seg.start_position.x = pos_l_x\n left_edge_curve_seg.start_position.y = pos_l_y\n\n right_edge_curve_seg.start_position.x = pos_r_x\n right_edge_curve_seg.start_position.y = pos_r_y\n\n else:\n d_left = distance(last_l_x, last_l_y, pos_l_x, pos_l_y)\n total_left_length += d_left\n\n d_right = distance(last_r_x, last_r_y, pos_r_x, pos_r_y)\n total_right_length += d_right\n\n point = left_edge_curve_seg.line_segment.point.add()\n point.x = pos_l_x\n point.y = pos_l_y\n\n point = right_edge_curve_seg.line_segment.point.add() \n point.x = pos_r_x\n point.y = pos_r_y\n\n last_l_x = pos_l_x\n last_l_y = pos_l_y\n\n last_r_x = pos_r_x\n last_r_y = pos_r_y\n\n left_edge_curve_seg.length = total_left_length\n right_edge_curve_seg.length = total_right_length\n\n return road\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Generate Base Map from Recorded Localization and Mobileye Lane Detection')\n parser.add_argument(\n '-i',\n '--input_file',\n help='Recorded localization and mobileye lane detection in CSV format',\n type=str,\n default='/tmp/lane.csv')\n parser.add_argument(\n '--debug',\n help='Print debugging info in /tmp',\n action='store_true')\n parser.add_argument(\n '-o',\n '--output_file',\n help='Output file name of generated base map',\n type=str,\n default='modules/map/data/gen/base_map.txt')\n parser.add_argument(\n '-e',\n '--end_waypoint_file',\n help='Output file name of default end waypoint',\n type=str,\n default='modules/map/data/gen/default_end_way_point.txt')\n parser.add_argument(\n '--default_width',\n help='Default lane width in meters (only effective when mobileye lane detection fails for ALL frames)',\n type=float,\n default=3.5)\n parser.add_argument(\n '--sample_distance',\n help='minimum distance (in meters) of two adjacent samples of a lane',\n type=float,\n default=0.2)\n parser.add_argument(\n '--max_lane_length',\n help='maximum length (in meters) of a lane (longer lanes will be split)',\n type=float,\n default=100.0)\n parser.add_argument(\n '--left_lanes',\n help='Number of lanes on the left',\n type=int,\n default=0)\n parser.add_argument(\n '--right_lanes',\n help='Number of lanes on the right',\n type=int,\n default=0)\n args = vars(parser.parse_args())\n\n csv_file_name = args['input_file']\n map_file_name = args['output_file']\n waypoint_file_name = args['end_waypoint_file']\n default_width = args['default_width']\n debug_option = args['debug']\n sample_distance = args['sample_distance']\n max_lane_length = args['max_lane_length']\n left_lanes = args['left_lanes']\n right_lanes = args['right_lanes']\n\n default_ratio = 0.5\n temp_csv_file_name = '/tmp/lane_interpolation.csv'\n\n rows = []\n with open(csv_file_name, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n rows.append(row)\n\n # Extract data samples\n data = []\n for row in rows[1:]:\n entry = DataPoint()\n entry.pos_x = float(row[0])\n entry.pos_y = float(row[1])\n entry.pos_z = float(row[2])\n entry.theta = float(row[3])\n entry.dist_left = abs(float(row[4]))\n entry.conf_left = int(row[5])\n if entry.dist_left < 0.1:\n entry.conf_left = 0\n entry.dist_right = abs(float(row[6]))\n entry.conf_right = int(row[7])\n if entry.dist_right < 0.1:\n entry.conf_right = 0\n entry.width = default_width\n entry.ratio = default_ratio\n data.append(entry)\n\n # Fill in widths using interpolation\n interpolate_width(data, default_width)\n # Fill in ratios using interpolation\n interpolate_ratio(data, default_ratio)\n # Fill in centers\n compute_center(data)\n\n # Sample data at the interval of sample_distance\n data = sample_data(data, sample_distance)\n # Smooth center curves and widths\n smooth_center_width(data)\n\n # Output debug info if necessary\n if debug_option:\n with open(temp_csv_file_name, 'w') as csvfile:\n for row in data:\n csvfile.write(\n \"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n\" %\n (row.pos_x, row.pos_y, row.pos_z, row.theta, row.dist_left, row.conf_left, row.dist_right, row.conf_right, row.width, row.ratio, row.center_x, row.center_y))\n\n # Split data samples into lists with maximum length of max_lane_length\n list_data = split_data(data, max_lane_length)\n\n # Create individual lanes\n lane_sets = []\n for (lane_count, lane_data) in enumerate(list_data):\n lane_set = []\n for offset in range(-left_lanes, right_lanes + 1):\n lane_set.append(create_lane(lane_data, offset, lane_count, left_lanes, right_lanes))\n lane_sets.append(lane_set)\n\n # Create road\n road = create_road(data, left_lanes, right_lanes)\n\n # Create map\n mp = Map()\n mp.header.version = \"1.400000\"\n mp.header.date = \"20170919\"\n mp.header.district = \"101\"\n\n # Set up predecessors, successors, left/right neighbors\n for lane_count in range(len(lane_sets)):\n for lane_offset in range(len(lane_sets[lane_count])):\n if lane_count != 0:\n lane_sets[lane_count][lane_offset].predecessor_id.add().id = lane_sets[lane_count - 1][lane_offset].id.id\n if lane_count != len(lane_sets) - 1:\n lane_sets[lane_count][lane_offset].successor_id.add().id = lane_sets[lane_count + 1][lane_offset].id.id\n if lane_offset != 0:\n lane_sets[lane_count][lane_offset].left_neighbor_forward_lane_id.add().id = lane_sets[lane_count][lane_offset - 1].id.id\n if lane_offset != len(lane_sets[lane_count]) - 1:\n lane_sets[lane_count][lane_offset].right_neighbor_forward_lane_id.add().id = lane_sets[lane_count][lane_offset + 1].id.id\n\n # Add road/lanes to map and let road contain lanes\n mp.road.extend([road])\n for lane_set in lane_sets:\n for lane in lane_set:\n mp.road[0].section[0].lane_id.add().id = lane.id.id\n mp.lane.extend([lane])\n\n # Output map\n with open(map_file_name, \"w\") as f:\n f.write(mp.__str__())\n\n # Create default end_way_point using the farthest point of last central lane \n last_central_lane = lane_sets[-1][left_lanes]\n\n poi = POI()\n landmark = poi.landmark.add()\n landmark.name = \"default\"\n waypoint = landmark.waypoint.add()\n waypoint.id = last_central_lane.id.id\n waypoint.s = last_central_lane.length\n waypoint.pose.x = last_central_lane.central_curve.segment[0].line_segment.point[-1].x\n waypoint.pose.y = last_central_lane.central_curve.segment[0].line_segment.point[-1].y\n\n # Output default end_way_point\n with open(waypoint_file_name, \"w\") as f:\n f.write(poi.__str__())\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.sin",
"numpy.cos"
]
] |
beiyuouo/fedhf | [
"0caa873a5db7494b0f9197848c34243fcb8c49f6"
] | [
"fedhf/api/dpm/laplace.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : fedhf\\api\\dpm\\laplace_noise.py\n# @Time : 2022-05-02 22:39:42\n# @Author : Bingjie Yan\n# @Email : [email protected]\n# @License : Apache License 2.0\n\nimport numpy as np\nimport torch\n\n\ndef laplace_noise(sensitivity, size, epsilon, **kwargs):\n \"\"\"\n Generate Laplace noise with the given sensitivity.\n :param sensitivity: the sensitivity of the privacy mechanism\n :param size: the size of the noise\n :param epsilon: the privacy parameter\n :param kwargs: other parameters\n :return: the generated noise\n \"\"\"\n noise_scale = sensitivity / epsilon\n return np.random.laplace(0, noise_scale, size)\n\n\ndef laplace_clip(model: torch.nn.Module, clip: float):\n \"\"\"\n Clip the model parameters.\n :param model: the model\n :param clip: the clipping bound\n :return: None\n \"\"\"\n for k, v in model.named_parameters():\n v.grad /= max(1, v.grad.norm(1) / clip)"
] | [
[
"numpy.random.laplace"
]
] |
jiahfong/alr | [
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a"
] | [
"docs/source/experiments/legacy/ssl_vs_bald_vs_ssal_basic/mnist/recycle/det_SSL/pseudo_label.py",
"docs/source/experiments/warm_start/mnist/restart/train.py"
] | [
"r\"\"\"\nvanilla pseudo-labeling implementation\n\"\"\"\nfrom collections import defaultdict\n\nfrom alr.utils import timeop, manual_seed\nfrom alr.data.datasets import Dataset\nfrom alr.data import UnlabelledDataset\nfrom alr.training import VanillaPLTrainer\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr import MCDropout\n\nimport pickle\nimport numpy as np\nimport torch\nimport torch.utils.data as torchdata\nfrom torch.nn import functional as F\nfrom pathlib import Path\n\n\nif __name__ == \"__main__\":\n manual_seed(42)\n kwargs = dict(num_workers=4, pin_memory=True)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n sizes = np.arange(20, 260, 10)\n N = len(sizes)\n # validation dataset size\n VAL_SIZE = 5_000\n # according to the paper:\n BATCH_SIZE = 32\n UNLABELLED_BATCH_SIZE = 256\n # at least prolong the epoch to have this many points (see RandomFixedLengthSampler)\n MIN_TRAIN_SIZE = 12_500\n # well, early stopping should kick-in before then.\n EPOCHS = 200\n REPEATS = 6\n\n # paths\n pl_metrics = Path(\"pl_metrics\")\n metrics = Path(\"metrics\")\n saved_models = Path(\"saved_models\")\n metrics.mkdir()\n saved_models.mkdir()\n log_every = 2\n\n accs = defaultdict(list)\n\n for r in range(1, REPEATS + 1):\n for i, n in enumerate(sizes, 1):\n train, test = Dataset.MNIST.get()\n train, pool = torchdata.random_split(train, (n, len(train) - n))\n pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))\n pool = UnlabelledDataset(pool, debug=True)\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n\n print(f\"=== Iteration {i} of {N} ({i/N:.2%}) ===\")\n print(f\"\\ttrain: {len(train)}; pool: {len(pool)}; test: {len(test)}\")\n\n if (i - 1) % log_every == 0 and r == 1:\n pl_log = str(pl_metrics / f\"dsize_{n}\")\n else:\n pl_log = None\n\n trainer = VanillaPLTrainer(\n model,\n labelled_loss=F.nll_loss,\n unlabelled_loss=F.nll_loss,\n optimiser=\"Adam\",\n patience=3,\n reload_best=True,\n track_pl_metrics=pl_log,\n device=device,\n )\n\n train_loader = torchdata.DataLoader(\n train,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n train, length=MIN_TRAIN_SIZE, shuffle=True\n ),\n **kwargs,\n )\n pool_loader = torchdata.DataLoader(\n pool,\n batch_size=UNLABELLED_BATCH_SIZE,\n shuffle=True,\n **kwargs,\n )\n val_loader = torchdata.DataLoader(\n val,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n test_loader = torchdata.DataLoader(\n test,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n\n with timeop() as t:\n history = trainer.fit(\n train_loader,\n pool_loader,\n val_loader,\n epochs=EPOCHS,\n )\n\n test_metrics = trainer.evaluate(test_loader)\n accs[n].append(test_metrics[\"acc\"])\n print(\n f\"\\t[train] loss, acc: ({history['stage2']['train_loss'][-1]}, {history['stage2']['train_acc'][-1]})\\n\"\n f\"\\t[test] loss, acc: ({test_metrics['loss']}, {test_metrics['acc']})\\n\"\n f\"\\ttime: {t}\"\n )\n\n if pl_log:\n torch.save(\n model.state_dict(),\n saved_models / f\"repeat_{r}_dsize_{n}_weights.pth\",\n )\n\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n }\n with open(metrics / f\"repeat_{r}_dsize_{n}_metrics.pkl\", \"wb\") as fp:\n pickle.dump(payload, fp)\n\n with open(\"accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n",
"r\"\"\"\nFrom previous experiments, we saw that ephemeral pseudo-labelling helped boost accuracy\ndespite starting with only 20 points. We could kick-start BALD with 85% accuracy with 24 iterations\nbut it seems like using 80% accuracy at 10 iterations is a good trade-off. It's harder to gain more\naccuracy as the number of iteration increases.\n\nThis experiment kick-starts BALD10 acquisition by warming the model to 80% accuracy (with 10 iterations\nof ephemeral pseudo-labelling). However, the acquisition loop will NOT run ephemeral P.L. as we've seen\na decrease in performance when doing so. There are two possibilities: (1) warm-starting the model\nhas caused it to lower its entropy on the pool dataset, hence causing it to actually perform worse.\n(2) warm-starting it actually helped! my bet is (unfortunately) on the former, given previous observations\n(i.e. ephemeral bald10 performs worse than bald10 -- but i'm hopeful, notwithstanding.).\n\"\"\"\nfrom collections import defaultdict\n\nfrom alr.utils import manual_seed, eval_fwd_exp, timeop\nfrom alr.acquisition import BALD\nfrom alr import MCDropout\nfrom alr.data.datasets import Dataset\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.data import UnlabelledDataset, DataManager\nfrom alr.training import Trainer\nfrom alr.training.repeated_acquisition_utils import (\n get_confident_indices,\n RelabelledDataset,\n)\n\nimport torch\nimport torch.utils.data as torchdata\nimport pickle\nfrom torch.nn import functional as F\nfrom pathlib import Path\n\n\ndef main(b, threshold, warm_start_iters, log_every):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n # --- constants ---\n BATCH_SIZE = 64\n EPOCHS = 200\n REPS = 6\n ITERS = 23\n # +1 because of the structure of our loop\n warm_start_iters += 1\n VAL_SIZE = 5_000\n MIN_TRAIN_LEN = 12_500\n\n # --- setup ---\n train, pool, test = Dataset.MNIST.get_fixed()\n val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))\n pool = UnlabelledDataset(pool, debug=True)\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n bald = BALD(eval_fwd_exp(model), device=device, batch_size=1024, **kwargs)\n dm = DataManager(train, pool, bald)\n val_loader = torchdata.DataLoader(\n val,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n test_loader = torchdata.DataLoader(\n test,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n\n warm_start_accs = []\n accs = defaultdict(list)\n template = f\"wsi={warm_start_iters}_b={b}_thresh={threshold}\"\n pl_metrics = Path(\"pl_metrics\") / template\n metrics = Path(\"metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n\n for r in range(1, REPS + 1):\n print(f\"- Repeat {r} of {REPS} -\")\n dm.reset()\n ws_accs_r = {}\n # store temporarily labelled points (will be union-ed with the training dataset)\n pseudo_labelled_points = None\n for i in range(1, warm_start_iters + 1):\n if pseudo_labelled_points is not None:\n full_train_dataset = torchdata.ConcatDataset(\n (dm.labelled, pseudo_labelled_points)\n )\n else:\n full_train_dataset = dm.labelled\n train_length = len(full_train_dataset)\n print(\n f\"=== Warm start iteration {i} of {warm_start_iters} ({i / warm_start_iters:.2%}) ===\"\n )\n print(\n f\"\\ttrain: {train_length}; \"\n f\"pool: {dm.n_unlabelled}; \"\n f\"val: {len(val)}; \"\n f\"test: {len(test)}\"\n )\n model.reset_weights()\n\n # -- stage 1: train --\n trainer = Trainer(\n model, F.nll_loss, \"Adam\", patience=3, reload_best=True, device=device\n )\n train_loader = torchdata.DataLoader(\n full_train_dataset,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n full_train_dataset, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(train_loader, val_loader, epochs=EPOCHS)\n\n test_metrics = trainer.evaluate(test_loader)\n ws_accs_r[train_length] = test_metrics[\"acc\"]\n\n print(\n f\"\\t[test] loss, acc: ({test_metrics['loss']:.4f}, {test_metrics['acc']:.4f}); time: {t}\"\n )\n\n with open(\n metrics / f\"repeat_{r}_dsize_{train_length}_metrics.pkl\", \"wb\"\n ) as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n }\n pickle.dump(payload, fp)\n\n if (i - 1) % log_every == 0:\n torch.save(\n model.state_dict(),\n saved_models / f\"repeat_{r}_dsize_{train_length}_weights.pth\",\n )\n\n # skip if this is the last iteration\n if i == warm_start_iters:\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n continue\n\n # -- stage 2: acquire more data into the training set --\n\n # -- acquire using pseudo-labels --\n dm.unlabelled.debug = True\n idxs, plabs = get_confident_indices(\n model=model,\n dataset=dm.unlabelled,\n threshold=threshold,\n root=((pl_metrics / f\"repeat_{r}\") if r == 1 else None),\n step=i,\n device=device,\n **kwargs,\n )\n\n if idxs.shape[0]:\n truth = torchdata.Subset(dm.unlabelled, idxs)\n\n # replace true labels with pseudo-labels\n pseudo_labelled_points = RelabelledDataset(truth, plabs)\n assert len(pseudo_labelled_points) == idxs.shape[0]\n else:\n print(\n f\"\\tSelf-labelling didn't happen because none of the pseudo-labels are confident enough.\"\n )\n warm_start_accs.append(ws_accs_r)\n\n dm.unlabelled.debug = False\n\n print(\n f\"Warm-started with {warm_start_iters} iterations. Beginning AL acquisitions\"\n )\n\n for i in range(1, ITERS + 1):\n dm.acquire(b=b)\n print(f\"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===\")\n print(\n f\"\\ttrain: {dm.n_labelled}; val: {len(val)}; \"\n f\"pool: {dm.n_unlabelled}; test: {len(test)}\"\n )\n # model.reset_weights() # leverage p.l. from before, DON'T reset!\n trainer = Trainer(\n model,\n F.nll_loss,\n optimiser=\"Adam\",\n patience=3,\n reload_best=True,\n device=device,\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n trainer.fit(train_loader, val_loader, epochs=EPOCHS)\n test_metric = trainer.evaluate(test_loader)\n print(f\"\\t[test] acc: {test_metric['acc']}, time: {t}\")\n accs[dm.n_labelled].append(test_metric[\"acc\"])\n\n with open(f\"{template}_warm_start_accs.pkl\", \"wb\") as fp:\n pickle.dump(warm_start_accs, fp)\n\n with open(f\"{template}_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n\nif __name__ == \"__main__\":\n main(b=10, threshold=0.9, warm_start_iters=10, log_every=2)\n"
] | [
[
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
],
[
"torch.utils.data.Subset",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.utils.data.ConcatDataset"
]
] |
texifter/trust-defender | [
"08747df28adc3d2431a73087e06cb0647e8397d2"
] | [
"test_nnet.py"
] | [
"import argparse\nimport numpy\nimport pandas as pd\nimport os\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_json\nfrom ngram_classifier import NGramClassifier\nfrom sklearn.metrics import precision_recall_fscore_support\n\nCLASS_WEIGHTS = [\n (\"num_days\", 0.997821848), \n (\"statuses_per_day\", 1.065570851),\n (\"followers_per_day\", 1.021055002),\n (\"following_per_day\", 1.122703153),\n (\"desc_len_terms\", 1.171072307),\n (\"num_list_items\", 1.017727903),\n (\"num_hashtags\", 0.889418197),\n (\"url_count\", 1.018365516)\n]\n\ndef get_input_vector(row, classifier):\n '''\n (classifier): p_good\n (classifier): p_bot\n num_days\n statuses_per_day\n followers_per_day\n following_per_day\n desc_len_terms\n num_list_items\n num_hashtags\n url_count\n '''\n class_probs = classifier.classify_text(str(row[\"user_profile_description\"]))\n ret = [ class_probs[\"good\"], class_probs[\"bot\"]]\n for label, weight in CLASS_WEIGHTS:\n ret.append(float(row[label]) * weight)\n return ret\n\n\ndef get_training_output(row):\n class_label = str(row[\"class_value\"])\n return 0.0 if class_label == \"good\" else 1.0\n\n\ndef recall_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\ndef precision_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef f1_m(y_true, y_pred):\n precision = precision_m(y_true, y_pred)\n recall = recall_m(y_true, y_pred)\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", help=\"test input csv file\")\n parser.add_argument(\"-m\", \"--model\", help=\"ngram model file\")\n parser.add_argument(\"-n\", \"--nnetmodel\", help=\"NNet model file\")\n args = parser.parse_args()\n\n if not args.input:\n raise \"missing input file\"\n if not args.model:\n raise \"missing ngram model file\"\n if not args.nnetmodel:\n raise \"missing nnet model file\"\n\n classifier = NGramClassifier(model_path=args.model)\n\n with open(args.nnetmodel, 'r') as json_file:\n loaded_model_json = json_file.read()\n nnet = model_from_json(loaded_model_json)\n nnet.load_weights(f'{args.nnetmodel}.h5')\n nnet.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc',f1_m,precision_m, recall_m])\n \n df_test = pd.read_csv(args.input, keep_default_na=False)\n targets_x = []\n targets_y = []\n predictions = []\n for index, row in df_test.iterrows():\n input_vector = get_input_vector(row, classifier)\n targets_x.append(input_vector)\n targets_y.append(get_training_output(row))\n loss, accuracy, f1_score, precision, recall = nnet.evaluate(numpy.array(targets_x), numpy.array(targets_y), verbose=0)\n\n print(f'loss: {loss}, acc: {accuracy}, prec: {precision}, recall: {recall}, f1: {f1_score}')\n"
] | [
[
"pandas.read_csv",
"numpy.array"
]
] |
liuzuxin/metadrive | [
"850c207536531bc85179084acd7c30ab14a66111"
] | [
"metadrive/examples/profile_metadrive.py"
] | [
"import time\n\nimport numpy as np\n\nfrom metadrive import MetaDriveEnv\nfrom metadrive.utils import setup_logger\n\nif __name__ == '__main__':\n print(\"Start to profile the efficiency of MetaDrive with 1000 maps and ~8 vehicles!\")\n setup_logger(debug=False)\n env = MetaDriveEnv(dict(\n environment_num=1000,\n start_seed=1010,\n ))\n obs = env.reset()\n start = time.time()\n action = [0.0, 1.]\n total_steps = 10000\n vehicle_num = [len(env.engine.traffic_manager.vehicles)]\n for s in range(total_steps):\n o, r, d, i = env.step(action)\n if d:\n env.reset()\n vehicle_num.append(len(env.engine.traffic_manager.vehicles))\n if (s + 1) % 100 == 0:\n print(\n \"Finish {}/10000 simulation steps. Time elapse: {:.4f}. Average FPS: {:.4f}, Average number of \"\n \"vehicles: {:.4f}\".format(\n s + 1,\n time.time() - start, (s + 1) / (time.time() - start), np.mean(vehicle_num)\n )\n )\n print(\n \"Total Time Elapse: {:.3f}, average FPS: {:.3f}, average number of vehicles: {:.3f}.\".format(\n time.time() - start, total_steps / (time.time() - start), np.mean(vehicle_num)\n )\n )\n"
] | [
[
"numpy.mean"
]
] |
archiviral/machine-learning-assignments | [
"198d5a713344ac33fe479eed01c534a3ab12d78c"
] | [
"assignment_4/dtd.py"
] | [
"import argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom copy import deepcopy\n\nimport numpy as np\n\nCONTINOUS_COLUMNS = [0, 2, 3, 9, 10, 11]\nTTL = 30\n\n\nclass Node:\n def __init__(self, prediction, continuous=None, unqs=None, column=None, median=None):\n self.children = []\n self.column = column\n self.continuous = continuous\n self.unqs = unqs\n self.median = median\n self.prediction = prediction\n\n\ndef entropy(xd, y, continuous, label=None):\n indicesl = []\n median = None\n unqs = None\n\n if not continuous:\n unqs, counts = np.unique(xd, return_counts=True)\n entropy = 0\n\n for unq, count in zip(unqs, counts):\n indices = np.argwhere(xd == unq)\n indicesl.append(indices)\n ys = y[indices]\n cnts = np.unique(ys, return_counts=True)[1]\n probs = cnts / ys.shape[0]\n ent = np.sum(-1 * probs * np.log2(probs))\n entropy = entropy + ((count / xd.shape[0]) * ent) \n else:\n xd = xd.astype(int)\n median = np.median(xd)\n \n entropy = 0\n conds = [xd < median, xd >= median]\n for cond in conds:\n indices = np.argwhere(cond)\n indicesl.append(indices)\n ys = y[indices]\n cnts = np.unique(ys, return_counts=True)[1]\n probs = cnts / ys.shape[0]\n ent = np.sum(-1 * probs * np.log2(probs))\n entropy = entropy + ((ys.shape[0] / xd.shape[0]) * ent) \n \n # if label: print(label, entropy)\n return entropy, indicesl, median, unqs\n\n\ndef create_tree(x, y, labels):\n # print(x.shape[0], 'rows.')\n ents = []\n indicesll = []\n medians = []\n unqsl = []\n\n for i in range(x.shape[1]):\n ent, indicesl, median, unqs = entropy(x[:, i], y, continuous=i in CONTINOUS_COLUMNS, label=labels[i])\n ents.append(ent)\n indicesll.append(indicesl)\n medians.append(median)\n unqsl.append(unqs)\n\n minent = min(ents)\n vals, cnts = np.unique(y, return_counts=True)\n prediction = vals[np.argmax(cnts)]\n \n if not minent or len(list(filter(lambda x: x.shape[0] > 0, indicesl))) < 2:\n # print('Leaf node.')\n node = Node(prediction=prediction)\n return node\n\n column = ents.index(minent)\n indicesl = indicesll[column]\n median = medians[column]\n unqs = unqsl[column]\n\n # print('[*] Splitting by column', column, ':', labels[column])\n # print('[*] Number of branches :', len(indicesl))\n\n node = Node(prediction=prediction, column=column, continuous=column in CONTINOUS_COLUMNS, median=median, unqs=unqs)\n for indices in indicesl:\n indices = indices.flatten()\n child = create_tree(x[indices, :], y[indices, :], labels)\n node.children.append(child)\n \n if len(node.children) < 2:\n node.children = []\n node.column = None\n node.median = None\n \n return node\n\n\ndef height(tree):\n return 1 + max([height(child) for child in tree.children]) if tree.children else 1\n\n\ndef cnodes(tree):\n return 1 + sum([cnodes(child) for child in tree.children])\n\n\ndef __predict(tree, xr):\n if not tree.children: return tree.prediction\n\n if tree.continuous:\n if int(xr[tree.column]) < tree.median:\n return __predict(tree.children[0], xr)\n else:\n return __predict(tree.children[1], xr)\n else:\n try:\n return __predict(tree.children[list(tree.unqs).index(xr[tree.column])], xr)\n except ValueError:\n return tree.prediction\n\n\ndef predict(tree, x, y=None):\n preds = []\n accuracy = None\n\n for i in range(x.shape[0]):\n preds.append(__predict(tree, x[i, :]))\n preds = np.array(preds)\n\n if isinstance(y, np.ndarray):\n y = y.flatten().astype(np.uint8)\n accuracy = np.sum(preds == y) / y.shape[0]\n return preds, accuracy\n\n\n\ndef prune(tree, nb):\n copied = deepcopy(tree)\n count = 0\n stack = [copied]\n\n while True:\n node = stack.pop()\n if count == nb:\n # print('Node nb', nb, ', Removing', len(node.children), 'children.')\n node.children = []\n return copied\n for child in node.children:\n stack.append(child)\n count = count + 1\n\n\ndef optimize(tree, x, y, begin):\n global_best_tree = tree\n global_best_accr = predict(tree, x, y)[1]\n\n while True:\n start = time.time()\n best_tree = global_best_tree\n best_accr = global_best_accr\n print(height(global_best_tree), cnodes(global_best_tree), global_best_accr)\n\n for i in range(cnodes(global_best_tree)):\n if time.time() - begin > TTL:\n return best_tree\n\n pruned = prune(global_best_tree, i)\n # print(f'[*] Pruned node {i}. Height: {height(pruned)}. Nodes: {cnodes(pruned)}.')\n accr = predict(pruned, x, y)[1]\n if accr > best_accr:\n best_accr = accr\n best_tree = pruned\n print('[*] Iteration time:', time.time() - start)\n if best_accr > global_best_accr:\n global_best_accr = best_accr\n global_best_tree = best_tree\n else:\n return global_best_tree\n\n\n\ndef dt(args):\n begin = time.time()\n with open(args.trainfile) as f:\n train = np.loadtxt(f, delimiter=',', dtype=object)\n train = np.delete(train, 3, 1)\n x = train[1:, :-1]\n y = train[1:, -1:]\n y = y.astype(np.uint8)\n labels = train[0, :]\n\n tree = create_tree(x, y, labels)\n print(f'[*] Tree created. Height: {height(tree)}. Nodes: {cnodes(tree)}.')\n\n \n with open(args.validfile) as f:\n valid = np.loadtxt(f, delimiter=',', dtype=object)\n valid = np.delete(valid, 3, 1)\n x = valid[1:, :-1]\n y = valid[1:, -1:]\n\n optimized = optimize(tree, x, y, begin)\n print(f'[*] Optimized tree. Height: {height(optimized)}. Nodes: {cnodes(optimized)}.')\n\n preds, accuracy = predict(optimized, x, y)\n np.savetxt(args.validpred, preds, fmt='%i')\n print('[*] Accuracy on validation data:', accuracy)\n\n\n with open(args.testfile) as f:\n test = np.loadtxt(f, delimiter=',', dtype=object)\n test = np.delete(test, 3, 1)\n x = test[1:, :-1]\n if args.testlabels:\n with open(args.testlabels) as f:\n y = np.loadtxt(f, delimiter=',', dtype=int)\n preds, accuracy = predict(optimized, x, y)\n print('[*] Accuracy on test data:', accuracy)\n else:\n preds, accuracy = predict(optimized, x)\n np.savetxt(args.testpred, preds, fmt='%i')\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('trainfile', type=str)\n parser.add_argument('validfile', type=str)\n parser.add_argument('testfile', type=str)\n parser.add_argument('validpred', type=str)\n parser.add_argument('testpred', type=str)\n parser.add_argument('testlabels', nargs='?', default='', type=str)\n parser.set_defaults(func=dt)\n\n if len(sys.argv) < 2:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args()\n args.func(args)\n\n\n\nif __name__=='__main__':\n main()\n"
] | [
[
"numpy.sum",
"numpy.log2",
"numpy.argwhere",
"numpy.savetxt",
"numpy.median",
"numpy.argmax",
"numpy.delete",
"numpy.array",
"numpy.unique",
"numpy.loadtxt"
]
] |
pentagram-lang/pentagram | [
"5c4dc2fc516ec2844dc71ddb778ddadec036ce55"
] | [
"bootstrap/pentagram/interpret/block_test.py"
] | [
"from __future__ import annotations\n\nfrom numpy import int32\nfrom pentagram.interpret.block import interpret_block\nfrom pentagram.interpret.test import init_test_frame_stack\nfrom pentagram.machine import MachineExpressionStack\nfrom pentagram.machine import MachineFrameStack\nfrom pentagram.machine import MachineNumber\nfrom pentagram.syntax import SyntaxBlock\nfrom pentagram.syntax import SyntaxExpression\nfrom pentagram.syntax import SyntaxNumber\n\n\ndef test_interpret_block_enter() -> None:\n block = SyntaxBlock(\n [SyntaxExpression([SyntaxNumber(int32(4))])]\n )\n frame_stack = init_test_frame_stack(\n block, MachineExpressionStack([])\n )\n interpret_block(frame_stack)\n assert frame_stack == init_test_frame_stack(\n block,\n MachineExpressionStack([MachineNumber(int32(4))]),\n term_index=1,\n )\n\n\ndef test_interpret_block_exit() -> None:\n block = SyntaxBlock(\n [SyntaxExpression([SyntaxNumber(int32(4))])]\n )\n frame_stack = init_test_frame_stack(\n block, MachineExpressionStack([]), statement_index=1\n )\n interpret_block(frame_stack)\n assert frame_stack == MachineFrameStack([])\n"
] | [
[
"numpy.int32"
]
] |
cnll0075/Merlion | [
"37fb75ccb204d128fde8ad4230f7893da724cf7c"
] | [
"ts_datasets/ts_datasets/anomaly/smd.py"
] | [
"#\n# Copyright (c) 2021 salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n#\nimport os\nimport sys\nimport logging\nimport requests\nimport tarfile\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom ts_datasets.anomaly.base import TSADBaseDataset\n\n_logger = logging.getLogger(__name__)\n_logger.setLevel(logging.DEBUG)\n_handler = logging.StreamHandler(sys.stdout)\n_handler.setLevel(logging.DEBUG)\n_logger.addHandler(_handler)\n\n\nclass SMD(TSADBaseDataset):\n \"\"\"\n The Server Machine Dataset (SMD) is a new 5-week-long dataset from\n a large Internet company collected and made publicly available.\n It contains data from 28 server machines and each machine is monitored by 33 metrics.\n SMD is divided into training set and testing set of equal size.\n\n - source: https://github.com/NetManAIOps/OmniAnomaly\n \"\"\"\n\n filename = \"ServerMachineDataset\"\n url = \"https://www.dropbox.com/s/x53ph5cru62kv0f/ServerMachineDataset.tar.gz?dl=1\"\n valid_subsets = (\n [f\"machine-1-{i}\" for i in range(1, 9)]\n + [f\"machine-2-{i}\" for i in range(1, 10)]\n + [f\"machine-3-{i}\" for i in range(1, 12)]\n )\n\n def __init__(self, subset=\"all\", rootdir=None):\n super().__init__()\n if subset == \"all\":\n subset = self.valid_subsets\n elif type(subset) == str:\n assert subset in self.valid_subsets, f\"subset should be in {self.valid_subsets}, but got {subset}\"\n subset = [subset]\n\n if rootdir is None:\n fdir = os.path.dirname(os.path.abspath(__file__))\n merlion_root = os.path.abspath(os.path.join(fdir, \"..\", \"..\", \"..\"))\n rootdir = os.path.join(merlion_root, \"data\", \"smd\")\n\n # Download the SMD dataset if it doesn't exist\n download(_logger, rootdir, SMD.url, SMD.filename)\n for s in subset:\n # Load training/test datasets\n df, metadata = combine_train_test_datasets(\n *SMD._load_data(directory=os.path.join(rootdir, SMD.filename), sequence_name=s)\n )\n self.time_series.append(df)\n self.metadata.append(metadata)\n\n @staticmethod\n def _load_data(directory, sequence_name):\n with open(os.path.join(directory, \"test\", f\"{sequence_name}.txt\"), \"r\") as f:\n test_data = np.genfromtxt(f, dtype=np.float32, delimiter=\",\")\n with open(os.path.join(directory, \"test_label\", f\"{sequence_name}.txt\"), \"r\") as f:\n test_labels = np.genfromtxt(f, dtype=np.float32, delimiter=\",\")\n with open(os.path.join(directory, \"train\", f\"{sequence_name}.txt\"), \"r\") as f:\n train_data = np.genfromtxt(f, dtype=np.float32, delimiter=\",\")\n return (pd.DataFrame(train_data), pd.DataFrame(test_data), test_labels.astype(int))\n\n\ndef combine_train_test_datasets(train_df, test_df, test_labels):\n train_df.columns = [str(c) for c in train_df.columns]\n test_df.columns = [str(c) for c in test_df.columns]\n df = pd.concat([train_df, test_df]).reset_index()\n if \"index\" in df:\n df.drop(columns=[\"index\"], inplace=True)\n df.index = pd.to_datetime(df.index * 60, unit=\"s\")\n df.index.rename(\"timestamp\", inplace=True)\n # There are no labels for training examples, so the training labels are set to 0 by default\n # The dataset is only for unsupervised time series anomaly detection\n metadata = pd.DataFrame(\n {\n \"trainval\": df.index < df.index[train_df.shape[0]],\n \"anomaly\": np.concatenate([np.zeros(train_df.shape[0], dtype=int), test_labels]),\n },\n index=df.index,\n )\n return df, metadata\n\n\ndef download(logger, datapath, url, filename):\n os.makedirs(datapath, exist_ok=True)\n compressed_file = os.path.join(datapath, f\"{filename}.tar.gz\")\n\n # Download the compressed dataset\n if not os.path.exists(compressed_file):\n logger.info(\"Downloading \" + url)\n with requests.get(url, stream=True) as r:\n with open(compressed_file, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.flush()\n\n # Uncompress the downloaded tar file\n if not os.path.exists(os.path.join(datapath, \"_SUCCESS\")):\n logger.info(f\"Uncompressing {compressed_file}\")\n tar = tarfile.open(compressed_file, \"r:gz\")\n tar.extractall(path=datapath)\n tar.close()\n Path(os.path.join(datapath, \"_SUCCESS\")).touch()\n"
] | [
[
"numpy.zeros",
"pandas.DataFrame",
"pandas.to_datetime",
"pandas.concat",
"numpy.genfromtxt"
]
] |
asa008/nhyai | [
"33be2078cf2835d85fedc901d343568e79a5941f"
] | [
"backend/api/ocr/text/keras_detect.py"
] | [
"\"\"\"\nYOLO_v3 Model Defined in Keras.\nReference: https://github.com/qqwweee/keras-yolo3.git\n\"\"\"\nfrom config import kerasTextModel,IMGSIZE,keras_anchors,class_names,GPU,GPUID\nfrom .keras_yolo3 import yolo_text,box_layer,K\n\nfrom apphelper.image import resize_im,letterbox_image\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\ngraph = tf.get_default_graph()##解决web.py 相关报错问题\n\nanchors = [float(x) for x in keras_anchors.split(',')]\nanchors = np.array(anchors).reshape(-1, 2)\nnum_anchors = len(anchors)\n\nnum_classes = len(class_names)\ntextModel = yolo_text(num_classes,anchors)\ntextModel.load_weights(kerasTextModel)\n\n\nsess = K.get_session()\nimage_shape = K.placeholder(shape=(2, ))##图像原尺寸:h,w\ninput_shape = K.placeholder(shape=(2, ))##图像resize尺寸:h,w\nbox_score = box_layer([*textModel.output,image_shape,input_shape],anchors, num_classes)\n\n\n\ndef text_detect(img,prob = 0.05):\n im = Image.fromarray(img)\n scale = IMGSIZE[0]\n w,h = im.size\n w_,h_ = resize_im(w,h, scale=scale, max_scale=2048)##短边固定为608,长边max_scale<4000\n #boxed_image,f = letterbox_image(im, (w_,h_))\n boxed_image = im.resize((w_,h_), Image.BICUBIC)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n imgShape = np.array([[h,w]])\n inputShape = np.array([[h_,w_]])\n \n \n global graph\n with graph.as_default():\n ##定义 graph变量 解决web.py 相关报错问题\n \"\"\"\n pred = textModel.predict_on_batch([image_data,imgShape,inputShape])\n box,scores = pred[:,:4],pred[:,-1]\n \n \"\"\"\n box,scores = sess.run(\n [box_score],\n feed_dict={\n textModel.input: image_data,\n input_shape: [h_, w_],\n image_shape: [h, w],\n K.learning_phase(): 0\n })[0]\n \n\n keep = np.where(scores>prob)\n box[:, 0:4][box[:, 0:4]<0] = 0\n box[:, 0][box[:, 0]>=w] = w-1\n box[:, 1][box[:, 1]>=h] = h-1\n box[:, 2][box[:, 2]>=w] = w-1\n box[:, 3][box[:, 3]>=h] = h-1\n box = box[keep[0]]\n scores = scores[keep[0]]\n return box,scores\n\n"
] | [
[
"numpy.array",
"numpy.where",
"numpy.expand_dims",
"tensorflow.get_default_graph"
]
] |
eyalnaor/DeepTemporalSR | [
"7d8c821431dec3a4c480550c61a6033fcac5e640"
] | [
"torch_resizer.py"
] | [
"'''\nCode courtesy of Ben Feinstein & Assaf Shocher\nPlease see their work:\nhttps://github.com/assafshocher/PyTorch-Resizer\nhttps://github.com/feinsteinben\n'''\nimport numpy as np\nimport torch\nfrom math import pi\nfrom torch import nn\n\n\nclass Resizer(nn.Module):\n def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True, device=None, dtype=None):\n super(Resizer, self).__init__()\n # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa\n scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)\n self.device = device\n # Choose interpolation method, each method has the matching kernel size\n method, kernel_width = {\n \"cubic\": (cubic, 4.0),\n \"lanczos2\": (lanczos2, 4.0),\n \"lanczos3\": (lanczos3, 6.0),\n \"box\": (box, 1.0),\n \"linear\": (linear, 2.0),\n None: (cubic, 6.0) # set default interpolation method as cubic\n }.get(kernel)\n\n # Antialiasing is only used when downscaling\n antialiasing *= (np.any(np.array(scale_factor) < 1))\n\n # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient\n sorted_dims = np.argsort(np.array(scale_factor))\n self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]\n\n # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction\n field_of_view_list = []\n weights_list = []\n for dim in self.sorted_dims:\n # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the\n # weights that multiply the values there to get its result.\n weights, field_of_view = self.contributions(in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing)\n\n # convert to torch tensor\n if dtype is not None:\n weights = torch.tensor(weights.T, dtype=dtype, device=device)\n else:\n weights = torch.tensor(weights.T, dtype=torch.float32, device=device)\n # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for\n # tmp_im[field_of_view.T], (bsxfun style)\n weights_list.append(nn.Parameter(torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]), requires_grad=False))\n field_of_view_list.append(nn.Parameter(torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long, device=device), requires_grad=False))\n\n self.field_of_view = nn.ParameterList(field_of_view_list)\n self.weights = nn.ParameterList(weights_list)\n self.in_shape = in_shape\n\n def forward(self, in_tensor):\n x = in_tensor\n\n # make sure input is in the correct size\n assert list(self.in_shape[1:]) == list(x.shape[1:]), 'wrong input shape: %s, expected %s' % (str(x.shape), str(self.in_shape))\n\n # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim\n for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):\n # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize\n x = torch.transpose(x, dim, 0)\n\n # This is a bit of a complicated multiplication: x[field_of_view] is a tensor of order image_dims+1.\n # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim\n # only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with\n # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:\n # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the\n # same number\n x = torch.sum(x[fov] * w, dim=0).to(self.device)\n\n # Finally we swap back the axes to the original order\n x = torch.transpose(x, dim, 0)\n\n return x\n\n def fix_scale_and_size(self, input_shape, output_shape, scale_factor):\n # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the\n # same size as the number of input dimensions)\n if scale_factor is not None:\n # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.\n if np.isscalar(scale_factor) and len(input_shape) > 1:\n scale_factor = [scale_factor, scale_factor]\n\n # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales\n scale_factor = list(scale_factor)\n scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor\n\n # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size\n # to all the unspecified dimensions\n if output_shape is not None:\n output_shape = list(input_shape[len(output_shape):]) + list(np.uint(np.array(output_shape)))\n\n # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is\n # sub-optimal, because there can be different scales to the same output-shape.\n if scale_factor is None:\n scale_factor = np.array(output_shape) / np.array(input_shape)\n\n # Dealing with missing output-shape. calculating according to scale-factor\n if output_shape is None:\n output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))\n\n return scale_factor, output_shape\n\n def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):\n # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied\n # such that each position from the field_of_view will be multiplied with a matching filter from the\n # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers\n # around it. This is only done for one dimension of the image.\n\n # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of\n # 1/sf. this means filtering is more 'low-pass filter'.\n fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing and scale < 1.0 else kernel\n kernel_width *= 1.0 / scale if antialiasing and scale < 1.0 else 1.0\n\n # These are the coordinates of the output image\n out_coordinates = np.arange(1, out_length+1)\n\n # since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting\n # the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.\n # to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.\n shifted_out_coordinates = out_coordinates - (out_length - in_length*scale)/2\n\n # These are the matching positions of the output-coordinates on the input image coordinates.\n # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:\n # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.\n # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to\n # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big\n # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).\n # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is\n # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:\n # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)\n match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)\n\n # This is the left boundary to start multiplying the filter from, it depends on the size of the filter\n left_boundary = np.floor(match_coordinates - kernel_width / 2)\n\n # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers\n # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)\n expanded_kernel_width = np.ceil(kernel_width) + 2\n\n # Determine a set of field_of_view for each each output position, these are the pixels in the input image\n # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the\n # vertical dim is the pixels it 'sees' (kernel_size + 2)\n field_of_view = np.squeeze(np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1))\n\n # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the\n # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in\n # 'field_of_view')\n weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)\n\n # Normalize weights to sum up to 1. be careful from dividing by 0\n sum_weights = np.sum(weights, axis=1)\n sum_weights[sum_weights == 0] = 1.0\n weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)\n\n # We use this mirror structure as a trick for reflection padding at the boundaries\n mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))\n field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]\n\n # Get rid of weights and pixel positions that are of zero weight\n non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))\n weights = np.squeeze(weights[:, non_zero_out_pixels])\n field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])\n\n # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size\n return weights, field_of_view\n\n\n# These next functions are all interpolation methods. x is the distance from the left pixel center\n\n\ndef cubic(x):\n absx = np.abs(x)\n absx2 = absx ** 2\n absx3 = absx ** 3\n return ((1.5*absx3 - 2.5*absx2 + 1) * (absx <= 1) +\n (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * ((1 < absx) & (absx <= 2)))\n\n\ndef lanczos2(x):\n return (((np.sin(pi*x) * np.sin(pi*x/2) + np.finfo(np.float32).eps) /\n ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps))\n * (abs(x) < 2))\n\n\ndef box(x):\n return ((-0.5 <= x) & (x < 0.5)) * 1.0\n\n\ndef lanczos3(x):\n return (((np.sin(pi*x) * np.sin(pi*x/3) + np.finfo(np.float32).eps) /\n ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps))\n * (abs(x) < 3))\n\n\ndef linear(x):\n return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))\n"
] | [
[
"torch.sum",
"numpy.sum",
"numpy.ceil",
"numpy.squeeze",
"numpy.any",
"torch.nn.ParameterList",
"numpy.floor",
"numpy.abs",
"torch.tensor",
"numpy.arange",
"numpy.mod",
"numpy.expand_dims",
"numpy.array",
"numpy.sin",
"numpy.finfo",
"numpy.isscalar",
"torch.transpose"
]
] |
robfalck/AoC2017 | [
"fa19f3fb42d979b60888a1954bea571c9d4ee735"
] | [
"day24/day24.py"
] | [
"from __future__ import print_function, division, absolute_import\n\nimport copy\nimport time\nimport numpy as np\nimport sys\n\n\nclass Bridge(object):\n\n def __init__(self, initial_components, available_components):\n self.components = list(initial_components)\n self.score = sum([sum(tup) for tup in self.components])\n self.available_components = available_components\n\n def next_required_number(self):\n if len(self.components) == 1:\n c = self.components[0]\n nrn = c[0] if c.index(0) == 1 else c[1]\n else:\n c1 = self.components[-1]\n c2 = self.components[-2]\n nrn = c1[0] if c1[1] in c2 else c1[1]\n return nrn\n\n def add_component(self, c):\n nrn = self.next_required_number()\n if nrn not in c:\n raise ValueError('Invalid connection, wrong port. Needed: {0} Got: {1}'.format(nrn, str(c)))\n if c not in self.available_components:\n raise ValueError('Component unavailable:', c)\n self.components.append(c)\n self.score += sum(c)\n self.available_components.remove(c)\n\n # def score(self):\n # return sum([sum(tup) for tup in self.components])\n\n def length(self):\n return len(self.components)\n\n def assemble_next(self):\n \"\"\"\n Find the next required number in the bridge. Return\n a *new* list of bridges each with a different valid\n component on the end, depending on the available components.\n\n Returns\n -------\n\n \"\"\"\n nrn = self.next_required_number()\n next_components = [c for c in self.available_components if nrn in c]\n new_bridges = []\n\n for nx in next_components:\n b = Bridge(initial_components=tuple(self.components),\n available_components=self.available_components.copy())\n b.add_component(nx)\n new_bridges.append(b)\n return new_bridges\n\n def __str__(self):\n s = '--'.join(['{0}/{1}'.format(*c) for c in self.components])\n return s\n\n\ndef solve(inp):\n\n components = [(int(line.split('/')[0]), int(line.split('/')[1])) for line in inp]\n\n starting_comps = [c for c in components if 0 in c]\n\n bridges = []\n\n for sc in starting_comps:\n bridges.append(Bridge((sc,), set(components)-set((sc,))))\n\n complete_bridges = []\n complete_bridges.extend(bridges)\n\n for i in range(1000):\n print('.', end='')\n sys.stdout.flush()\n\n new_bridges = []\n for b in bridges:\n new_bridges.extend(b.assemble_next())\n\n if not new_bridges:\n # Terminate once no new bridges can be built\n break\n\n bridges = new_bridges\n complete_bridges.extend(new_bridges)\n strongest_bridge = complete_bridges[np.argmax([b.score for b in complete_bridges])]\n\n print()\n print('Strongest bridge:')\n print(' ', str(strongest_bridge))\n print(' strength = ', strongest_bridge.score, 'length =', strongest_bridge.length())\n\n longest_length = np.max([b.length() for b in complete_bridges])\n\n longest_bridges = [b for b in bridges if b.length() == longest_length]\n\n strongest_longest_bridge = longest_bridges[np.argmax([b.score for b in longest_bridges])]\n\n print('Strongest longest bridge:')\n print(' ', str(strongest_longest_bridge))\n print(' strength = ', strongest_longest_bridge.score, 'length =', strongest_longest_bridge.length())\n\n\n\n\n\nif __name__ == '__main__':\n\n\n with open('test_input.txt', 'r') as f:\n puzzle_input = [line.strip() for line in f.readlines() if line]\n\n t0 = time.time()\n solve(puzzle_input)\n print('Time to solve test:', time.time()-t0, 'sec')\n\n with open('input.txt', 'r') as f:\n puzzle_input = [line.strip() for line in f.readlines() if line]\n\n t0 = time.time()\n solve(puzzle_input)\n print('Time to solve:', time.time()-t0, 'sec')\n"
] | [
[
"numpy.argmax"
]
] |
verypluming/transitivity | [
"46808ff20a2aed55a54be58c35427b630711d014"
] | [
"scripts/format_veridicality.py"
] | [
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport pandas as pd\nimport re\nimport copy\nimport os\nimport sys\nimport random\nimport argparse\n\n\ndef trans_label(label):\n if label == \"yes\":\n return \"entailment\"\n elif label == \"unk\":\n return \"neutral\"\n\ndef check_pn(sentence):\n prop_n = {\"ann\":\"Ann\", \"bob\":\"Bob\", \"chris\":\"Chris\", \"daniel\":\"Daniel\", \"elliot\":\"Elliot\", \"fred\":\"Fred\", \"greg\":\"Greg\", \"henry\":\"Henry\", \"tom\":\"Tom\", \"john\":\"John\"}\n for pn, npn in prop_n.items():\n if pn in sentence:\n tmp1 = re.compile(pn)\n tmp2 = re.compile(npn)\n sentence = re.sub(pn, npn, sentence)\n return sentence\n\ndef check_factive(pred):\n positive_clause_preds = ['realized', 'acknowledged', 'remembered', 'noted', 'found', 'noticed', 'learned', 'saw', 'revealed', 'discovered', 'understood', 'knew', 'admitted', 'recognized', 'observed']\n neutral_clause_preds = ['felt', 'claimed', 'doubted', 'hoped', 'predicted', 'implied', 'suspected', 'wished', 'thought', 'believed', 'heard', 'expected', 'estimated', 'assumed', 'argued']\n #positive_clause_preds = ['realized', 'knew', 'remembered']\n #neutral_clause_preds = ['hoped', 'felt', 'mentioned']\n\n if pred in positive_clause_preds:\n return \"f\"\n elif pred in neutral_clause_preds:\n return \"nf\"\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter)\nparser.add_argument(\"--input\", nargs='?', type=str, help=\"input file\")\nARGS = parser.parse_args()\nsentences = []\nfiles = glob.glob(ARGS.input+\"/data*.tsv\")\nfor file in files:\n with open(file, \"r\") as f:\n for line in f:\n if re.search(\"data_t_h\", file):\n s1, s2, depth, connect, label = line.split(\"\\t\")\n genre = \"ph.depth\"+str(depth)+\".boolean\"+str(connect)+\".\"+label.strip()\n s1 = check_pn(s1)\n s2 = check_pn(s2)\n if re.search(\"punct\", s1):\n s1 = re.sub(\"\\spunct\", \",\", s1)\n if re.search(\"punct\", s2):\n s1 = re.sub(\"\\spunct\", \",\", s2)\n s1 = s1[0].upper() + s1[1:]\n s1 = s1.strip()+\".\"\n s2 = s2[0].upper() + s2[1:]\n s2 = s2.strip()+\".\"\n sentences.append([genre, s1, s2, trans_label(label.strip())])\n elif re.search(\"data_ft_t\", file):\n s1, s2, depth, connect, th_label, label = line.split(\"\\t\")\n genre = \"fpp.depth\"+str(depth)+\".boolean\"+str(connect)+\".\"+label.strip()\n s1 = check_pn(s1)\n s2 = check_pn(s2)\n if re.search(\"punct\", s1):\n s1 = re.sub(\"\\spunct\", \",\", s1)\n if re.search(\"punct\", s2):\n s1 = re.sub(\"\\spunct\", \",\", s2)\n s1 = s1[0].upper() + s1[1:]\n s1 = s1.strip()+\".\"\n s2 = s2[0].upper() + s2[1:]\n s2 = s2.strip()+\".\"\n sentences.append([genre, s1, s2, trans_label(label.strip())])\n\ndf = pd.DataFrame(sentences, columns=['genre', 'sentence1', 'sentence2', 'gold_label'])\ntrain =pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])\ntrain['index'] = df.index\ntrain['promptID'] = df.index\ntrain['pairID'] = df.index\ntrain['gold_label'] = df[\"gold_label\"]\ntrain['genre'] = df[\"genre\"]\ntrain['sentence1'] = df[\"sentence1\"]\ntrain['sentence2'] = df[\"sentence2\"]\nfinal_train = train.sample(frac=1)\nfinal_train.to_csv(ARGS.input+\"/train.tsv\", sep=\"\\t\", index=False)\n\nsentences = []\nwith open(ARGS.input+\"/data_ft_h.tsv\", \"r\") as f:\n for line in f:\n s1, s2, depth, connect, th_label, label = line.split(\"\\t\")\n pred_label = check_factive(s1.split(\" \")[1])\n genre = \"depth\"+str(depth)+\".boolean\"+str(connect)+\".\"+pred_label+\".\"+th_label\n s1 = check_pn(s1)\n s2 = check_pn(s2)\n if re.search(\"punct\", s1):\n s1 = re.sub(\"\\spunct\", \",\", s1)\n if re.search(\"punct\", s2):\n s1 = re.sub(\"\\spunct\", \",\", s2)\n s1 = s1[0].upper() + s1[1:]\n s1 = s1.strip()+\".\"\n s2 = s2[0].upper() + s2[1:]\n s2 = s2.strip()+\".\"\n sentences.append([genre, s1, s2, trans_label(label.strip())])\n\ndf2 = pd.DataFrame(sentences, columns=['genre', 'sentence1', 'sentence2', 'gold_label'])\ntest = pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])\ntest['index'] = df2.index\ntest['promptID'] = df2.index\ntest['pairID'] = df2.index\ntest['gold_label'] = df2[\"gold_label\"]\ntest['genre'] = df2[\"genre\"]\ntest['sentence1'] = df2[\"sentence1\"]\ntest['sentence2'] = df2[\"sentence2\"]\nfinal_test = test.sample(frac=1)\nfinal_test.to_csv(ARGS.input+\"/dev_matched.tsv\", sep=\"\\t\", index=False)"
] | [
[
"pandas.DataFrame"
]
] |
jvparidon/sub2vec | [
"adb9e72b64dc6dbde3c2060ee0d3964ab623a149"
] | [
"subs2vec/norms.py"
] | [
"\"\"\"Predict lexical norms, either to evaluate word vectors, or to get norms for unnormed words.\"\"\"\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model\nimport sklearn.model_selection\nimport sklearn.preprocessing\nimport sklearn.utils\nimport argparse\nimport os\nfrom .vecs import Vectors\nfrom .utensils import log_timer\nimport logging\nlogging.basicConfig(format='[{levelname}] {message}', style='{', level=logging.INFO)\npath = os.path.dirname(__file__)\n\n\n@log_timer\ndef evaluate_norms(lang, vecs_fname, alpha=1.0):\n \"\"\"Predict lexical norms to evaluate a set of word vectors in a given language.\n \n Writes scores to tab-separated text file but also returns them.\n\n :param lang: language to evaluate word vectors in (uses two-letter ISO codes)\n :param vecs_fname: word vectors to evaluate\n :param alpha: regularization strength, default 1.0, set higher for small datasets\n :return: pandas DataFrame containing the norms results\n \"\"\"\n norms_path = os.path.join(path, 'datasets', 'norms')\n if not os.path.exists('results'):\n os.mkdir('results')\n results_path = os.path.join('results', 'norms')\n if not os.path.exists(results_path):\n os.mkdir(results_path)\n logging.info(f'evaluating lexical norm prediction with {vecs_fname}')\n vectors = Vectors(vecs_fname, normalize=True, n=1e6, d=300)\n scores = []\n for norms_fname in os.listdir(norms_path):\n if norms_fname.startswith(lang):\n logging.info(f'predicting norms from {norms_fname}')\n norms = pd.read_csv(os.path.join(norms_path, norms_fname), sep='\\t', comment='#')\n norms = norms.set_index('word')\n score = predict_norms(vectors, norms, alpha)['scores']\n score['source'] = norms_fname\n scores.append(score)\n scores_fname = os.path.split(vecs_fname)[1].replace('.vec', '.tsv')\n if len(scores) > 0:\n scores = pd.concat(scores)\n scores.to_csv(os.path.join(results_path, scores_fname), sep='\\t', index=False)\n return scores\n\n\n@log_timer\ndef predict_norms(vectors, norms, alpha=1.0):\n \"\"\"Predict lexical norms and return score.\n\n :param vectors: Vectors object containing word vectors\n :param norms: pandas DataFrame of lexical norms\n :param alpha: regularization strength, default 1.0, set higher for small datasets\n :return: dict containing scores and predictions in separate pandas DataFrames\n \"\"\"\n vecs_df = vectors.as_df()\n cols = norms.columns.values\n df = norms.join(vecs_df, how='inner')\n # compensate for missing ys somehow\n total = len(norms)\n missing = len(norms) - len(df)\n penalty = (total - missing) / total\n logging.info(f'missing vectors for {missing} out of {total} words')\n df = sklearn.utils.shuffle(df) # shuffle is important for unbiased results on ordered datasets!\n\n model = sklearn.linear_model.Ridge(alpha=alpha) # use ridge regression models\n cv = sklearn.model_selection.RepeatedKFold(n_splits=5, n_repeats=10)\n\n # compute crossvalidated prediction scores\n scores = []\n for col in cols:\n # set dependent variable and calculate 10-fold mean fit/predict scores\n df_subset = df.loc[:, vecs_df.columns.values] # use .loc[] so copy is created and no setting with copy warning is issued\n df_subset[col] = df[col]\n df_subset = df_subset.dropna() # drop NaNs for this specific y\n x = df_subset[vecs_df.columns.values]\n y = df_subset[col]\n cv_scores = sklearn.model_selection.cross_val_score(model, x, y, cv=cv)\n median_score = np.median(cv_scores)\n penalized_score = median_score * penalty\n scores.append({\n 'norm': col,\n 'adjusted r': np.sqrt(penalized_score), # take square root of explained variance to get Pearson r\n 'adjusted r-squared': penalized_score,\n 'r-squared': median_score,\n 'r': np.sqrt(median_score),\n })\n\n # predict (extend norms)\n x_full = df[vecs_df.columns.values]\n predictions = df.loc[:, cols] # use .loc[] so copy is created and no setting with copy warning is raised by pandas\n for col in cols:\n # set dependent variable and fit, but predict for whole x (so including unobserved y)\n df_subset = df.loc[:, vecs_df.columns.values] # use .loc[] so copy is created and no setting with copy warning is raised\n df_subset[col] = df[col]\n df_subset = df_subset.dropna() # drop NaNs for this specific y\n x = df_subset[vecs_df.columns.values]\n y = df_subset[col]\n model.fit(x, y)\n predictions[f'{col} predicted'] = model.predict(x_full)\n\n return {'scores': pd.DataFrame(scores), 'predictions': predictions}\n\n\ndef extend_norms(vecs_fname, norms_fname, alpha=1.0):\n \"\"\"Extend lexical norms to unobserved words, using word vectors.\n\n Writes predictions to tab-separated text file.\n\n :param vecs_fname: file containing word vectors to use for prediction.\n :param norms_fname: file containing norms in tab-separated columns, first column should contain words,\n first line should contain column names, unobserved cells should be left empty\n :param alpha: regularization strength, default 1.0, set higher for small datasets\n \"\"\"\n logging.info(f'extending lexical norms with {vecs_fname}')\n vectors = Vectors(vecs_fname, normalize=True, n=1e6, d=300)\n norms = pd.read_csv(norms_fname, sep='\\t', comment='#')\n norms = norms.set_index('word')\n results = predict_norms(vectors, norms, alpha)\n base_fname = '.'.join(norms_fname.split('.')[:-1])\n results['scores'].to_csv(f'{base_fname}.scores.tsv', sep='\\t', index=False)\n results['predictions'].to_csv(f'{base_fname}.predictions.tsv', sep='\\t', index=True)\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='perform crossvalidated penalized regression of lexical norms using word vectors as predictors')\n argparser.add_argument('lang', help='language to predict norms for (uses two-letter ISO language codes)')\n argparser.add_argument('vecs_fname', help='vectors to evaluate (or use for lexical norm extension')\n argparser.add_argument('--extend_norms', help='file containing lexical norms to extend')\n argparser.add_argument('--alpha', type=float, default=1.0, help='regularization strength, default 1.0, set higher for small datasets')\n args = argparser.parse_args()\n\n if args.extend_norms:\n extend_norms(vecs_fname=args.vecs_fname, norms_fname=args.extend_norms, alpha=args.alpha)\n else:\n print(evaluate_norms(lang=args.lang, vecs_fname=args.vecs_fname, alpha=args.alpha))\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.median",
"pandas.concat",
"numpy.sqrt"
]
] |
IntelligentSensor/PHMRepository | [
"8684c7851970293d607d18c580cec7edbf72ad17"
] | [
"Prognostics/dl-models.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport random\nimport numpy as np\nimport seaborn as sns\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom preprocess import preprocess\n\n\nimport keras as K\nimport tensorflow as tf\nfrom keras.regularizers import l2\nfrom keras.utils import plot_model\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.callbacks import TensorBoard\nfrom keras.models import model_from_json\nfrom keras.layers import Input, Dense, Flatten, Activation\nfrom keras.layers import Conv1D, Dropout, BatchNormalization, MaxPooling1D\n\nfrom keras.layers import LSTM, Bidirectional\nfrom keras.layers.core import Flatten, Dense, Dropout\n\npy_ver = sys.version\nk_ver = K.__version__\ntf_ver = tf.__version__\n\nK.backend.clear_session()\n\nprint(\"Using Python version \" + str(py_ver))\nprint(\"Using Keras version \" + str(k_ver))\nprint(\"Using TensorFlow version \" + str(tf_ver))\n\nClass_dict={0:'正常', 1:'溶液地未连接', 2:'流通池接地', 3:'电缆线未连接', 4:'球泡破裂', 5:'支架损坏',\n 6:'电极污染', 7:'电解液缺失', 8:'水样波动'}\n\n# 训练参数\nbatch_size = 10\nepochs = 30\nnum_classes = 9\nlength = 2048\nBatchNorm = True # 是否批量归一化\nnumber = 200 # 每类样本的数量\nnormal = True # 是否标准化\nrate = [0.7,0.2,0.1] # 测试集验证集划分比例\n\npath = '/Users/tung/Python/WorkProject/PHMresearch/WDCNN&LR_FaultDiagnosis/'\npreprocess = preprocess()\n\nx_train, y_train, x_valid, y_valid, x_test, y_test = preprocess.prepro(d_path=path+'data/0HP',length=length,\n number=number,\n normal=normal,\n rate=rate,\n enc=True, enc_step=340)\n\n# 输入卷积的时候还需要修改一下,增加通道数目\nx_train, x_valid, x_test = x_train[:,:,np.newaxis], x_valid[:,:,np.newaxis], x_test[:,:,np.newaxis]\n\n# 输入数据的维度\ninput_shape =x_train.shape[1:]\n\nprint('训练样本维度:', x_train.shape)\nprint(x_train.shape[0], '训练样本个数')\nprint('验证样本的维度', x_valid.shape)\nprint(x_valid.shape[0], '验证样本个数')\nprint('测试样本的维度', x_test.shape)\nprint(x_test.shape[0], '测试样本个数')\n\ndef get_label(row):\n for c in range(len(row)):\n if row[c]==1:\n return c\n\ndef decode(arr):\n temp = np.zeros(len(arr))\n for i in range(len(arr)):\n temp[i] = get_label(arr[i])\n return temp\ny_test_decode = decode(y_test)\ny_train_decode = decode(y_train)\n\ndef auc(y_true, y_pred):\n auc = tf.metrics.auc(y_true, y_pred)[1]\n K.get_session().run(tf.local_variables_initializer())\n return auc\n\n'WDCNN'\n# 自定义卷积层wdcnn\ndef wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding, pool_size, BatchNormal):\n \"\"\"wdcnn层神经元\n \n :param filters: 卷积核的数目,整数\n :param kernerl_size: 卷积核的尺寸,整数\n :param strides: 步长,整数\n :param conv_padding: 'same','valid'\n :param pool_padding: 'same','valid'\n :param pool_size: 池化层核尺寸,整数\n :param BatchNormal: 是否Batchnormal,布尔值\n :return: model\n \"\"\"\n model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides,\n padding=conv_padding, kernel_regularizer=l2(1e-4)))\n if BatchNormal:\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding))\n return model\n\n# 实例化序贯模型\nmodel = Sequential()\n# 搭建输入层,第一层卷积。因为要指定input_shape,所以单独放出来\nmodel.add(Conv1D(filters=16, kernel_size=64, strides=16, padding='same',kernel_regularizer=l2(1e-4), input_shape=input_shape))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling1D(pool_size=2))\n\n#0-1之间均匀分布的Dropout\n# model.add(Dropout( np.round(random.uniform(0,1), 2) ))\n\n# 第二层卷积\nmodel = wdcnn(filters=32, kernerl_size=3, strides=1, conv_padding='same',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 第三层卷积\nmodel = wdcnn(filters=64, kernerl_size=3, strides=1, conv_padding='same',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 第四层卷积\nmodel = wdcnn(filters=64, kernerl_size=3, strides=1, conv_padding='same',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 第五层卷积\nmodel = wdcnn(filters=64, kernerl_size=3, strides=1, conv_padding='valid',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 从卷积到全连接需要展平\nmodel.add(Flatten())\n\n# 添加全连接层\nmodel.add(Dense(units=90, activation='relu', kernel_regularizer=l2(1e-4)))\n# 增加输出层\nmodel.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\nmodel.summary()\n\n# 编译模型 评价函数和损失函数相似,不过评价函数的结果不会用于训练过程中\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy',\n metrics=['accuracy']) #metrics=[auc]\n\nstart = datetime.now()\n\n# TensorBoard调用查看一下训练情况\ntb_cb = TensorBoard(log_dir='logs')\n\n# 开始模型训练\nhistory = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_valid, y_valid), shuffle=True,\n callbacks=[tb_cb])\n\nprint(\"This took \", datetime.now() - start)\n\n#变dropout率\n#BN与训练速度和识别率\n#样本量与识别率及标准差的关系\n#对输入数据添加高斯白噪声\ndef wgn(x, snr):\n snr = 10**(snr/10.0)\n xpower = np.sum(x**2)/len(x)\n npower = xpower / snr\n return np.random.randn(len(x)) * np.sqrt(npower)\n\ntest = x_train[0]\ntestwgn = wgn(test, 10).reshape((2048, 1)) #-4dB~10dB\ntemp = test + testwgn\n\n#第一层卷积核大小与抗噪\n#feature map特征可分性\n#保存模型\nmodel_path = path + 'models/wdcnn.h5'\nmodel.save(model_path)\ndel model\n\n# 模型包含一个自定义 wdcnn 类的实例\nmodel = load_model(path+'models/wdcnn.h5', custom_objects={'wdcnn': wdcnn})\nmodel.summary()\n#fine-tune\n\n#evaluation\nscore = model.evaluate(x=x_test, y=y_test, verbose=0)\nprint(\"测试集上的损失:\", score[0])\nprint(\"测试集上的损失:\",score[1])\nplot_model(model=model, to_file=path+'models/wdcnn.png', show_shapes=True)\n\n#prediction\nstart = datetime.now()\n\nunknown = x_test[0].reshape((1, 2048, 1))\npredicted = model.predict(unknown)\nprint(\"Using model to predict fault for features: \")\nprint(unknown)\nprint(\"\\nPredicted softmax vector is: \")\nprint(predicted)\nprint(\"\\nPredicted fault is: \")\nprint(Class_dict[np.argmax(predicted)])\n\nprint(\"This took \", datetime.now() - start)\n\n'LSTM'\nx_train = x_train.reshape((x_train.shape[0], 16, 128)) #time_step、input_dim\nx_valid = x_valid.reshape((x_valid.shape[0], 16, 128))\nx_test = x_test.reshape((x_test.shape[0], 16, 128))\n\nmodel = Sequential()\n\n#隐藏层设置为10, input_shape(time_step、input_dim) stateful=True使用状态RNN\nmodel.add(LSTM(units=9, input_shape=(x_train.shape[1], x_train.shape[2])))\nmodel.add(BatchNormalization())\n\n#全连接层,输出单个类,units为num_classes\nmodel.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\n\n#告诉模型输入的格式\nmodel.build((None, x_train.shape[1], x_train.shape[2])) #time_step、input_dim\n\n# #重置网络中所有层的状态\n# model.reset_states()\n\n# #重置指定层的状态\n# model.layers[0].reset_states()\nmodel.summary()\n\n#损失函数为交叉熵,优化器为Adam,学习率为0.001\nmodel.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['acc'])\n\nstart = datetime.now()\nhistory =model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_valid, y_valid))#训练模型并进行测试\n\nprint(\"This took \", datetime.now() - start)\n\n#保存模型\nmodel_path = path+'models/LSTM.h5'\nmodel.save(model_path)\ndel model\n\nmodel = load_model(path+'models/LSTM.h5')\nmodel.summary()\n\n#evaluation\nscore = history.model.evaluate(x=x_test, y=y_test, verbose=0)\nprint(\"测试集上的损失:\", score[0])\nprint(\"测试集上的损失:\",score[1])\nplot_model(model=model, to_file=path+'models/LSTM.png', show_shapes=True)\n\n#prediction\nstart = datetime.now()\n\nunknown = x_test[0].reshape((1, 16, 128))\npredicted = model.predict(unknown)\nprint(\"Using model to predict species for features: \")\nprint(unknown)\nprint(\"\\nPredicted softmax vector is: \")\nprint(predicted)\nprint(\"\\nPredicted fault is: \")\nprint(Class_dict[np.argmax(predicted)])\n\nprint(\"This took \", datetime.now() - start)\n\n'biLSTM'\nmodel = Sequential()\n#隐藏层设置为10, input_shape元组第二个参数指\nmodel.add(Bidirectional(LSTM(units=9, input_shape=(x_train.shape[1], x_train.shape[2])))) # activation='tanh'\nmodel.add(BatchNormalization())\n\n#全连接层,输出单个类,units为num_classes\nmodel.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\n\n#告诉模型输入的格式\nmodel.build((None, x_train.shape[1], x_train.shape[2])) #time_step、input_dim\nmodel.summary()\n\n#损失函数为交叉熵,优化器为Adam,学习率为0.001\nmodel.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['acc'])\n\nstart = datetime.now()\nhistory =model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_valid, y_valid))#训练模型并进行测试\n\nprint(\"This took \", datetime.now() - start)\n\n#嵌套网络保存\nmodel.save_weights(path+'models/biLSTM.h5')\nmodel.load_weights(path+'models/biLSTM.h5',by_name=True)\njson_string = model.to_json()\nmodel=model_from_json(json_string)\nmodel.build((None, x_train.shape[1], x_train.shape[2])) #time_step、input_dim\nmodel.summary()\n\n#evaluation\nscore = history.model.evaluate(x=x_test, y=y_test, verbose=0)\nprint(\"测试集上的损失:\",score[0])\nprint(\"测试集上的损失:\",score[1])\nplot_model(model=model, to_file=path+'models/biLSTM.png', show_shapes=True)\n\n#prediction\nstart = datetime.now()\n\nunknown = x_test[0].reshape((1, 16, 128))\npredicted = model.predict(unknown)\nprint(\"Using model to predict species for features: \")\nprint(unknown)\nprint(\"\\nPredicted softmax vector is: \")\nprint(predicted)\nprint(\"\\nPredicted fault is: \")\nprint(Class_dict[np.argmax(predicted)])\n\nprint(\"This took \", datetime.now() - start)\n"
] | [
[
"numpy.sum",
"numpy.argmax",
"numpy.sqrt",
"tensorflow.metrics.auc",
"tensorflow.local_variables_initializer"
]
] |
xccheng/mars | [
"8146d1b7d3f3bc2a652c414a336a2f884a06a108"
] | [
"mars/dataframe/groupby/transform.py"
] | [
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nfrom ... import opcodes\nfrom ...core import OutputType\nfrom ...custom_log import redirect_custom_log\nfrom ...serialize import BoolField, TupleField, DictField, AnyField, StringField\nfrom ...utils import enter_current_session\nfrom ..operands import DataFrameOperandMixin, DataFrameOperand\nfrom ..utils import build_empty_df, build_empty_series, parse_index\n\n\nclass GroupByTransform(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.TRANSFORM\n _op_module_ = 'dataframe.groupby'\n\n _func = AnyField('func')\n _args = TupleField('args')\n _kwds = DictField('kwds')\n\n _call_agg = BoolField('call_agg')\n\n # for chunk\n _tileable_op_key = StringField('tileable_op_key')\n\n def __init__(self, func=None, args=None, kwds=None, call_agg=None, output_types=None,\n tileable_op_key=None, **kw):\n super().__init__(_func=func, _args=args, _kwds=kwds, _call_agg=call_agg,\n _output_types=output_types, _tileable_op_key=tileable_op_key, **kw)\n\n @property\n def func(self):\n return self._func\n\n @property\n def args(self):\n return getattr(self, '_args', None) or ()\n\n @property\n def kwds(self):\n return getattr(self, '_kwds', None) or dict()\n\n @property\n def call_agg(self):\n return self._call_agg\n\n @property\n def tileable_op_key(self):\n return self._tileable_op_key\n\n def _infer_df_func_returns(self, in_groupby, dtypes, index):\n index_value, output_types, new_dtypes = None, None, None\n\n output_types = [OutputType.dataframe] \\\n if in_groupby.op.output_types[0] == OutputType.dataframe_groupby else [OutputType.series]\n\n try:\n empty_groupby = in_groupby.op.build_mock_groupby()\n with np.errstate(all='ignore'):\n if self.call_agg:\n infer_df = empty_groupby.agg(self.func, *self.args, **self.kwds)\n else:\n infer_df = empty_groupby.transform(self.func, *self.args, **self.kwds)\n\n # todo return proper index when sort=True is implemented\n index_value = parse_index(None, in_groupby.key, self.func)\n\n if isinstance(infer_df, pd.DataFrame):\n output_types = [OutputType.dataframe]\n new_dtypes = new_dtypes or infer_df.dtypes\n else:\n output_types = [OutputType.series]\n new_dtypes = new_dtypes or (infer_df.name, infer_df.dtype)\n except: # noqa: E722 # nosec\n pass\n\n self.output_types = output_types if not self.output_types else self.output_types\n dtypes = new_dtypes if dtypes is None else dtypes\n index_value = index_value if index is None else parse_index(index)\n return dtypes, index_value\n\n def __call__(self, groupby, dtypes=None, index=None):\n in_df = groupby.inputs[0]\n\n dtypes, index_value = self._infer_df_func_returns(groupby, dtypes, index)\n for arg, desc in zip((self.output_types, dtypes, index_value),\n ('output_types', 'dtypes', 'index')):\n if arg is None:\n raise TypeError(f'Cannot determine {desc} by calculating with enumerate data, '\n 'please specify it as arguments')\n\n if self.output_types[0] == OutputType.dataframe:\n new_shape = (np.nan if self.call_agg else in_df.shape[0], len(dtypes))\n return self.new_dataframe([groupby], shape=new_shape, dtypes=dtypes, index_value=index_value,\n columns_value=parse_index(dtypes.index, store_data=True))\n else:\n name, dtype = dtypes\n new_shape = (np.nan,) if self.call_agg else groupby.shape\n return self.new_series([groupby], name=name, shape=new_shape, dtype=dtype,\n index_value=index_value)\n\n @classmethod\n def tile(cls, op):\n in_groupby = op.inputs[0]\n out_df = op.outputs[0]\n\n chunks = []\n for c in in_groupby.chunks:\n inp_chunks = [c]\n\n new_op = op.copy().reset_key()\n new_op._tileable_op_key = op.key\n if op.output_types[0] == OutputType.dataframe:\n new_index = c.index if c.ndim == 2 else c.index + (0,)\n chunks.append(new_op.new_chunk(\n inp_chunks, index=new_index, shape=(np.nan, len(out_df.dtypes)), dtypes=out_df.dtypes,\n columns_value=out_df.columns_value, index_value=out_df.index_value))\n else:\n chunks.append(new_op.new_chunk(\n inp_chunks, name=out_df.name, index=(c.index[0],), shape=(np.nan,), dtype=out_df.dtype,\n index_value=out_df.index_value))\n\n new_op = op.copy()\n kw = out_df.params.copy()\n kw['chunks'] = chunks\n if op.output_types[0] == OutputType.dataframe:\n kw['nsplits'] = ((np.nan,) * len(chunks), (len(out_df.dtypes),))\n else:\n kw['nsplits'] = ((np.nan,) * len(chunks),)\n return new_op.new_tileables([in_groupby], **kw)\n\n @classmethod\n @redirect_custom_log\n @enter_current_session\n def execute(cls, ctx, op):\n in_data = ctx[op.inputs[0].key]\n out_chunk = op.outputs[0]\n\n if not in_data:\n if op.output_types[0] == OutputType.dataframe:\n ctx[op.outputs[0].key] = build_empty_df(out_chunk.dtypes)\n else:\n ctx[op.outputs[0].key] = build_empty_series(out_chunk.dtype)\n return\n\n if op.call_agg:\n result = in_data.agg(op.func, *op.args, **op.kwds)\n else:\n result = in_data.transform(op.func, *op.args, **op.kwds)\n\n if result.ndim == 2:\n result = result.astype(op.outputs[0].dtypes, copy=False)\n else:\n result = result.astype(op.outputs[0].dtype, copy=False)\n ctx[op.outputs[0].key] = result\n\n\ndef groupby_transform(groupby, func, *args, dtypes=None, index=None, output_types=None, **kwargs):\n # todo this can be done with sort_index implemented\n if not groupby.op.groupby_params.get('as_index', True):\n raise NotImplementedError('transform when set_index == False is not supported')\n\n call_agg = kwargs.pop('_call_agg', False)\n if not call_agg and isinstance(func, (dict, list)):\n raise TypeError(f'Does not support transform with {type(func)}')\n\n op = GroupByTransform(func=func, args=args, kwds=kwargs, output_types=output_types,\n call_agg=call_agg)\n return op(groupby, dtypes=dtypes, index=index)\n"
] | [
[
"numpy.errstate"
]
] |
AyrtonB/ElexonDataPortal | [
"939c811f85dff15d0f7eb164fd1982ba0307192e"
] | [
"ElexonDataPortal/dev/orchestrator.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).\n\n__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',\n 'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',\n 'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',\n 'query_orchestrator']\n\n# Cell\nimport pandas as pd\nfrom tqdm import tqdm\nfrom warnings import warn\nfrom requests.models import Response\n\nfrom . import utils, raw\n\n# Cell\ndef retry_request(raw, method, kwargs, n_attempts=3):\n attempts = 0\n success = False\n\n while (attempts < n_attempts) and (success == False):\n try:\n r = getattr(raw, method)(**kwargs)\n utils.check_status(r)\n success = True\n except Exception as e:\n attempts += 1\n if attempts == n_attempts:\n raise e\n\n return r\n\ndef if_possible_parse_local_datetime(df):\n dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']\n\n dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]\n sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]\n\n if len(dt_cols)==1 and len(sp_cols)==1:\n df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])\n\n return df\n\ndef SP_and_date_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)\n date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]\n\n for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):\n kwargs.update({\n kwargs_map['date']: datetime.strftime('%Y-%m-%d'),\n kwargs_map['SP']: SP,\n })\n\n missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_SP = utils.parse_xml_response(r)\n df = pd.concat([df, df_SP])\n\n df = utils.expand_cols(df)\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef handle_capping(\n r: Response,\n df: pd.DataFrame,\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n end_date: str,\n request_type: str,\n **kwargs\n):\n capping_applied = utils.check_capping(r)\n assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'\n\n if capping_applied == True: # only subset of date range returned\n dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']\n dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]\n\n if len(dt_cols) == 1:\n start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')\n if 'start_time' in kwargs.keys():\n kwargs['start_time'] = '00:00'\n\n if pd.to_datetime(start_date) >= pd.to_datetime(end_date):\n warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\\nThe `start_date` will be set one day earlier than the `end_date`.')\n start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')\n\n warn(f'Response was capped, request is rerunning for missing data from {start_date}')\n df_rerun = date_range_request(\n method=method,\n kwargs_map=kwargs_map,\n func_params=func_params,\n api_key=api_key,\n start_date=start_date,\n end_date=end_date,\n request_type=request_type,\n **kwargs\n )\n\n df = pd.concat([df, df_rerun])\n df = df.drop_duplicates()\n\n else:\n warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')\n\n return df\n\ndef date_range_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n request_type: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n for kwarg in ['start_time', 'end_time']:\n if kwarg not in kwargs_map.keys():\n kwargs_map[kwarg] = kwarg\n\n kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')\n kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = pd.to_datetime(end_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')\n\n if 'SP' in kwargs_map.keys():\n kwargs[kwargs_map['SP']] = '*'\n func_params.remove('SP')\n func_params += [kwargs_map['SP']]\n\n missing_kwargs = list(set(func_params) - set(['start_date', 'end_date', 'start_time', 'end_time'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n if request_type == 'date_range':\n kwargs.pop(kwargs_map['start_time'])\n kwargs.pop(kwargs_map['end_time'])\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df = utils.parse_xml_response(r)\n df = if_possible_parse_local_datetime(df)\n\n # Handling capping\n df = handle_capping(\n r,\n df,\n method=method,\n kwargs_map=kwargs_map,\n func_params=func_params,\n api_key=api_key,\n end_date=end_date,\n request_type=request_type,\n **kwargs\n )\n\n return df\n\n# Cell\ndef year_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n start_year = int(pd.to_datetime(start_date).strftime('%Y'))\n end_year = int(pd.to_datetime(end_date).strftime('%Y'))\n\n for year in tqdm(range(start_year, end_year+1), desc=stream):\n kwargs.update({kwargs_map['year']: year})\n\n missing_kwargs = list(set(func_params) - set(['year'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_year = utils.parse_xml_response(r)\n df = pd.concat([df, df_year])\n\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef construct_year_month_pairs(start_date, end_date):\n dt_rng = pd.date_range(start_date, end_date, freq='M')\n\n if len(dt_rng) == 0:\n year_month_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %b').split(' '))]\n else:\n year_month_pairs = [tuple(dt.strftime('%Y %b').split(' ')) for dt in dt_rng]\n\n year_month_pairs = [(int(year), week.upper()) for year, week in year_month_pairs]\n\n return year_month_pairs\n\ndef year_and_month_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n year_month_pairs = construct_year_month_pairs(start_date, end_date)\n\n for year, month in tqdm(year_month_pairs, desc=stream):\n kwargs.update({\n kwargs_map['year']: year,\n kwargs_map['month']: month\n })\n\n missing_kwargs = list(set(func_params) - set(['year', 'month'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_year = utils.parse_xml_response(r)\n df = pd.concat([df, df_year])\n\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef clean_year_week(year, week):\n year = int(year)\n\n if week == '00':\n year = int(year) - 1\n week = 52\n\n else:\n year = int(year)\n week = int(week.strip('0'))\n\n return year, week\n\ndef construct_year_week_pairs(start_date, end_date):\n dt_rng = pd.date_range(start_date, end_date, freq='W')\n\n if len(dt_rng) == 0:\n year_week_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %W').split(' '))]\n else:\n year_week_pairs = [tuple(dt.strftime('%Y %W').split(' ')) for dt in dt_rng]\n\n year_week_pairs = [clean_year_week(year, week) for year, week in year_week_pairs]\n\n return year_week_pairs\n\ndef year_and_week_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n year_week_pairs = construct_year_week_pairs(start_date, end_date)\n\n for year, week in tqdm(year_week_pairs, desc=stream):\n kwargs.update({\n kwargs_map['year']: year,\n kwargs_map['week']: week\n })\n\n missing_kwargs = list(set(func_params) - set(['year', 'week'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_year = utils.parse_xml_response(r)\n df = pd.concat([df, df_year])\n\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef non_temporal_request(\n method: str,\n api_key: str,\n n_attempts: int=3,\n **kwargs\n):\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df = utils.parse_xml_response(r)\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef query_orchestrator(\n method: str,\n api_key: str,\n request_type: str,\n kwargs_map: dict=None,\n func_params: list=None,\n start_date: str=None,\n end_date: str=None,\n n_attempts: int=3,\n **kwargs\n):\n if request_type not in ['non_temporal']:\n kwargs.update({\n 'kwargs_map': kwargs_map,\n 'func_params': func_params,\n 'start_date': start_date,\n 'end_date': end_date,\n })\n\n if request_type in ['date_range', 'date_time_range']:\n kwargs.update({\n 'request_type': request_type,\n })\n\n request_type_to_func = {\n 'SP_and_date': SP_and_date_request,\n 'date_range': date_range_request,\n 'date_time_range': date_range_request,\n 'year': year_request,\n 'year_and_month': year_and_month_request,\n 'year_and_week': year_and_week_request,\n 'non_temporal': non_temporal_request\n }\n\n assert request_type in request_type_to_func.keys(), f\"{request_type} must be one of: {', '.join(request_type_to_func.keys())}\"\n request_func = request_type_to_func[request_type]\n\n df = request_func(\n method=method,\n api_key=api_key,\n n_attempts=n_attempts,\n **kwargs\n )\n\n df = df.reset_index(drop=True)\n\n return df"
] | [
[
"pandas.date_range",
"pandas.DataFrame",
"pandas.Timedelta",
"pandas.to_datetime",
"pandas.concat"
]
] |
sx14/hierarchical-relationship | [
"d9ed2f0c3394e435374cf3ab5afeb47a6a56ed9a"
] | [
"open_relation/infer/tree_infer2.py"
] | [
"# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\n\n\n\n\n\ndef cal_rank_scores(label_num):\n # rank scores [1 - 10]\n # s = a(x - b)^2 + c\n # if rank is 0, score is 10\n # b = num-1\n s_min = 1.0\n s_max = 10.0\n b = label_num - 1\n c = s_min\n a = (s_max - c) / b ** 2\n rank_scores = [0] * label_num\n for r in range(label_num):\n rank_scores[r] = a*(r-b)**2 + c\n return rank_scores\n\n\ndef cal_rank_scores1(n_item):\n s_max = 10\n ranks = np.arange(1, n_item+1).astype(np.float)\n\n s = (np.cos(ranks / n_item * np.pi) + 1) * (s_max * 1.0 / 2)\n return s\n\n\nclass TreeNode:\n def __init__(self, name, index):\n self._rank = -1\n self._name = name\n self._index = index\n self._parents = []\n self._children = []\n\n def __str__(self):\n return '%s[%d]' % (self._name, self._rank)\n\n def add_children(self, child):\n self._children.append(child)\n\n def children(self):\n return self._children\n\n def append_parent(self, parent):\n self._parents.append(parent)\n\n def set_rank(self, rank):\n self._rank = rank\n\n def rank(self):\n return self._rank\n\n def index(self):\n return self._index\n\n def name(self):\n return self._name\n\n\ndef construct_tree(label_hier, ranked_inds):\n ind2node = dict()\n for label in label_hier.get_all_labels():\n hnode = label_hier.get_node_by_name(label)\n tnode = TreeNode(label, hnode.index())\n ind2node[hnode.index()] = tnode\n\n for label in label_hier.get_all_labels():\n hnode = label_hier.get_node_by_name(label)\n tnode = ind2node[hnode.index()]\n hypers = hnode.hypers()\n for hyper in hypers:\n pnode = ind2node[hyper.index()]\n pnode.add_children(tnode)\n tnode.append_parent(pnode)\n\n for r, ind in enumerate(ranked_inds):\n rank = r + 1 # 1 based\n tnode = ind2node[ind]\n tnode.set_rank(rank)\n\n return ind2node\n\n\ndef top_down(tree, label_hier):\n def choose_child(children, parent_rank):\n choice = None\n if len(children) == 1:\n choice = children[0]\n elif len(children) > 1:\n ranked_children = sorted(children, key=lambda c: c.rank())\n r1 = ranked_children[0].rank()\n r2 = ranked_children[1].rank()\n if (r2 - r1) > r1:\n # r1 is confident, and doesn't confuse with r2\n choice = ranked_children[0]\n return choice\n\n # root as default\n root_ind = label_hier.root().index()\n tnode = tree[root_ind]\n while tnode:\n choice = tnode\n tnode = choose_child(tnode.children(), 0)\n return [choice.index(), choice.rank()]\n\n\ndef bottom_up(tree, label_hier, top2_raw, thr):\n node1 = label_hier.get_node_by_index(top2_raw[0][0])\n node2 = label_hier.get_node_by_index(top2_raw[1][0])\n n1_path = node1.trans_hyper_inds()\n n2_path = node2.trans_hyper_inds()\n min_plength = min(len(n1_path), len(n2_path))\n common_path = set(n1_path) & set(n2_path)\n if len(common_path) * 1.0 / min_plength >= thr:\n pred_ind = max(common_path)\n return [pred_ind, tree[pred_ind].rank()]\n else:\n return top2_raw[0]\n\n\ndef my_infer(label_hier, scores, target):\n obj_thr = {'b_u': 0.75,\n 't_d': 0.5,\n 'min_dis': label_hier.label_sum() / 7,\n 'half': label_hier.label_sum() / 3}\n pre_thr = {'b_u': 0.6,\n 't_d': 0.4,\n 'min_dis': 3,\n 'half': 10}\n thr = {'obj': obj_thr,\n 'pre': pre_thr}\n\n threshold = thr[target]\n\n # label_ind 2 rank\n ranked_inds = np.argsort(scores).tolist()\n ranked_inds.reverse() # descending\n\n # top2 raw label as default predictions\n raw_top2 = []\n for r, ind in enumerate(ranked_inds):\n node = label_hier.get_node_by_index(ind)\n if node.is_raw() and len(raw_top2) < 2:\n raw_top2.append([ind, r+1])\n\n # confident part\n half_rank = threshold['half']\n if raw_top2[0][1] < half_rank and (raw_top2[1][1] - raw_top2[0][1]) > threshold['min_dis']:\n cands = raw_top2\n elif raw_top2[0][1] < half_rank and (raw_top2[1][1] - raw_top2[0][1]) <= threshold['min_dis']:\n ind2node = construct_tree(label_hier, ranked_inds)\n cands = [bottom_up(ind2node, label_hier, raw_top2, threshold['b_u']), raw_top2[0]]\n elif raw_top2[0][1] >= half_rank and (raw_top2[1][1] - raw_top2[0][1]) <= threshold['min_dis']:\n ind2node = construct_tree(label_hier, ranked_inds)\n cands = [bottom_up(ind2node, label_hier, raw_top2, threshold['t_d']), raw_top2[0]]\n if cands[0][0] == raw_top2[0][0]:\n cands = [top_down(ind2node, label_hier), raw_top2[0]]\n else:\n ind2node = construct_tree(label_hier, ranked_inds)\n cands = [top_down(ind2node, label_hier), raw_top2[0]]\n return cands\n\n"
] | [
[
"numpy.arange",
"numpy.argsort",
"numpy.cos"
]
] |
honpui/RFCN | [
"c3e24ea9a143e6ba31698dc6031f6681517eaaff"
] | [
"main.py"
] | [
"\"\"\"\nRFCN\n\"\"\"\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torchvision\nimport torch.nn.functional as functional\n\nfrom dataset import SBDClassSeg, MyTestData\nfrom transform import Colorize\nfrom criterion import CrossEntropyLoss2d\nfrom model import RFCN, FCN8s\nfrom myfunc import imsave, tensor2image\nimport MR\n\nimport visdom\nimport numpy as np\nimport argparse\nimport os\nimport gc\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--phase', type=str, default='train', help='train or test')\nparser.add_argument('--param', type=str, default=None, help='path to pre-trained parameters')\nparser.add_argument('--data', type=str, default='./train', help='path to input data')\nparser.add_argument('--out', type=str, default='./out', help='path to output data')\nopt = parser.parse_args()\n\nopt.phase = 'train'\nopt.data = '/media/xyz/Files/data/datasets'\nopt.out = '/media/xyz/Files/data/models/torch/RFCN_pretrain'\nopt.param = '/media/xyz/Files/data/models/torch/RFCN_pretrain/RFCN-epoch-4-step-11354.pth'\n\nprint(opt)\n\nvis = visdom.Visdom()\nwin0 = vis.image(torch.zeros(3, 100, 100))\nwin1 = vis.image(torch.zeros(3, 100, 100))\nwin2 = vis.image(torch.zeros(3, 100, 100))\nwin22 = vis.image(torch.zeros(3, 100, 100))\nwin3 = vis.image(torch.zeros(3, 100, 100))\ncolor_transform = Colorize()\n\"\"\"parameters\"\"\"\niterNum = 30\n\n\"\"\"data loader\"\"\"\n# dataRoot = '/media/xyz/Files/data/datasets'\n# checkRoot = '/media/xyz/Files/fcn8s-deconv'\ndataRoot = opt.data\nif not os.path.exists(opt.out):\n os.mkdir(opt.out)\nif opt.phase == 'train':\n checkRoot = opt.out\n loader = torch.utils.data.DataLoader(\n SBDClassSeg(dataRoot, split='train', transform=True),\n batch_size=1, shuffle=True, num_workers=4, pin_memory=True)\nelse:\n outputRoot = opt.out\n loader = torch.utils.data.DataLoader(\n MyTestData(dataRoot, transform=True),\n batch_size=1, shuffle=True, num_workers=4, pin_memory=True)\n\n\"\"\"nets\"\"\"\nmodel = RFCN()\nif opt.param is None:\n vgg16 = torchvision.models.vgg16(pretrained=True)\n model.copy_params_from_vgg16(vgg16, copy_fc8=False, init_upscore=True)\nelse:\n model.load_state_dict(torch.load(opt.param))\n\ncriterion = CrossEntropyLoss2d()\noptimizer = torch.optim.Adam(model.parameters(), 0.0001, betas=(0.5, 0.999))\n\nmodel = model.cuda()\n\nmr_sal = MR.MR_saliency()\nif opt.phase == 'train':\n \"\"\"train\"\"\"\n for it in range(iterNum):\n epoch_loss = []\n for ib, data in enumerate(loader):\n # prior map\n _img = tensor2image(data[0][0])\n pmap = mr_sal.saliency(_img).astype(float) / 255.0\n pmap = 1.0 - pmap\n pmap = torch.unsqueeze(torch.FloatTensor(pmap), 0)\n pmap = torch.unsqueeze(pmap, 0)\n pmap = Variable(pmap).cuda()\n img = Variable(data[0]).cuda()\n\n # segmentation gt and bg&fg gt\n targets_S = Variable(data[1]).cuda()\n targets_G = torch.LongTensor(1, targets_S.size()[-2], targets_S.size()[-1]).fill_(0)\n targets_G[0][data[1] == 0] == 1\n targets_G = Variable(targets_G).cuda()\n\n model.zero_grad()\n loss = 0\n for ir in range(3):\n outputs = model(torch.cat((img, pmap.detach()), 1)) # detach or not?\n loss_S = criterion(outputs[:, :21, :, :], targets_S)\n loss_G = criterion(outputs[:, -2:, :, :], targets_G)\n _loss = loss_G + loss_S\n _loss.backward()\n loss += _loss.data[0]\n\n # update prior map\n del pmap\n gc.collect()\n pmap = functional.sigmoid(outputs[:, -1, :, :])\n pmap = torch.unsqueeze(pmap, 0)\n\n # visulize\n image = img[0].data.cpu()\n image[0] = image[0] + 122.67891434\n image[1] = image[1] + 116.66876762\n image[2] = image[2] + 104.00698793\n title = 'input (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n vis.image(image, win=win1, env='fcn', opts=dict(title=title))\n title = 'output_c (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n vis.image(color_transform(outputs[0, :21].cpu().max(0)[1].data),\n win=win2, env='fcn', opts=dict(title=title))\n title = 'output_l (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n bb = functional.sigmoid(outputs[0, -1:].cpu().data)\n vis.image(bb.repeat(3, 1, 1),\n win=win22, env='fcn', opts=dict(title=title))\n title = 'target (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n vis.image(color_transform(targets_S.cpu().data),\n win=win3, env='fcn', opts=dict(title=title))\n\n del outputs\n gc.collect()\n\n # update the net\n optimizer.step()\n\n # show loss plot in this batch\n epoch_loss.append(loss)\n average = sum(epoch_loss) / len(epoch_loss)\n print('loss: %.4f (epoch: %d, step: %d)' % (loss, it, ib))\n epoch_loss.append(average)\n x = np.arange(1, len(epoch_loss) + 1, 1)\n title = 'loss'\n vis.line(np.array(epoch_loss), x, env='fcn', win=win0,\n opts=dict(title=title))\n\n del img, targets_S, targets_G\n gc.collect()\n\n # save parameters in each iteration\n filename = ('%s/RFCN-epoch-%d-step-%d.pth' \\\n % (checkRoot, it, ib))\n torch.save(model.state_dict(), filename)\n print('save: (epoch: %d, step: %d)' % (it, ib))\nelse:\n for ib, data in enumerate(loader):\n print('testing batch %d' % ib)\n inputs = Variable(data[0]).cuda()\n outputs = model(inputs)\n hhh = color_transform(outputs[0].cpu().max(0)[1].data)\n imsave(os.path.join(outputRoot, data[1][0] + '.png'), hhh)\n"
] | [
[
"torch.unsqueeze",
"torch.FloatTensor",
"torch.nn.functional.sigmoid",
"torch.load",
"torch.autograd.Variable",
"torch.zeros",
"numpy.array"
]
] |
ricklupton/cued_datalogger | [
"dde38d04819782922e757f1eed8e5eb44cbe4f84"
] | [
"cued_datalogger/analysis/sonogram.py"
] | [
"import sys,traceback\n\nfrom cued_datalogger.api.numpy_extensions import to_dB\nfrom cued_datalogger.api.pyqt_extensions import BaseNControl, MatplotlibCanvas\nfrom cued_datalogger.api.pyqtgraph_extensions import ColorMapPlotWidget\nfrom cued_datalogger.api.toolbox import Toolbox\n\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QSlider, QPushButton, QLabel, QSpinBox, QHBoxLayout, QGridLayout\n\nimport numpy as np\n\nimport scipy.signal\n\n\nclass MatplotlibSonogramContourWidget(MatplotlibCanvas):\n \"\"\"A MatplotlibCanvas widget displaying the Sonogram contour plot.\"\"\"\n\n def __init__(self, sonogram_toolbox=None,\n channel=None,\n contour_spacing_dB=None,\n num_contours=None):\n self.sonogram_toolbox = sonogram_toolbox\n self.channel = channel\n self.contour_spacing_dB = contour_spacing_dB\n self.num_contours = num_contours\n\n #self.sonogram_toolbox.num_contours_slider.valueChanged.connect(self.update_plot)\n #self.sonogram_toolbox.num_contours_spinbox.valueChanged.connect(self.update_plot)\n #self.sonogram_toolbox.contour_spacing_slider.valueChanged.connect(self.update_plot)\n #self.sonogram_toolbox.contour_spacing_spinbox.valueChanged.connect(self.update_plot)\n\n MatplotlibCanvas.__init__(self, \"Sonogram: Contour Plot\")\n\n self.update_plot()\n\n def update_plot(self):\n \"\"\"Redraw the sonogram on the canvas.\"\"\"\n if self.channel is not None:\n self.F_bins, self.T_bins = np.meshgrid(self.channel.data(\"sonogram_frequency\"),\n self.channel.data(\"sonogram_time\"))\n\n self.axes.clear()\n\n self.update_contour_sequence()\n\n self.axes.contour(self.F_bins, self.T_bins,\n to_dB(np.abs(self.channel.data(\"sonogram\"))),\n self.contour_sequence)\n\n self.axes.set_xlabel('Freq (Hz)')\n self.axes.set_ylabel('Time (s)')\n\n self.axes.set_xlim(self.channel.data(\"sonogram_frequency\").min(),\n self.channel.data(\"sonogram_frequency\").max())\n self.axes.set_ylim(self.channel.data(\"sonogram_time\").min(),\n self.channel.data(\"sonogram_time\").max())\n\n self.draw()\n\n def update_contour_sequence(self):\n \"\"\"Update the array which says where to plot contours, how many etc.\"\"\"\n if self.channel is not None:\n # Create a vector with the right spacing from min to max value\n self.contour_sequence = np.arange(to_dB(np.abs(self.channel.data(\"sonogram\"))).min(),\n to_dB(np.abs(self.channel.data(\"sonogram\"))).max(),\n self.contour_spacing_dB)\n # Take the appropriate number of contours\n self.contour_sequence = self.contour_sequence[-self.num_contours:]\n\n def update_contour_spacing(self, value):\n \"\"\"Slot for updating the plot when the contour spacing is changed.\"\"\"\n self.contour_spacing_dB = value\n self.update_plot()\n\n def update_num_contours(self, value):\n \"\"\"Slot for updating the plot when the number of contours is changed.\"\"\"\n self.num_contours = value\n self.update_plot()\n\n def set_selected_channels(self, selected_channels):\n \"\"\"Update which channel is being plotted.\"\"\"\n # If no channel list is given\n if not selected_channels:\n self.channel = None\n else:\n self.channel = selected_channels[0]\n self.update_plot()\n\n\nclass SonogramDisplayWidget(ColorMapPlotWidget):\n \"\"\"\n The SonogramDisplayWidget is the main display widget for everything in\n the sonogram domain.\n \"\"\"\n def __init__(self, parent=None,\n window_width=256,\n window_overlap_fraction=8,\n contour_spacing_dB=5,\n num_contours=5):\n\n super().__init__(parent)\n self.parent = parent\n\n self.channels = []\n\n self.window_width = window_width\n self.window_overlap_fraction = window_overlap_fraction\n self.contour_spacing_dB = contour_spacing_dB\n self.num_contours = num_contours\n\n self.PlotWidget.setLabel('bottom', \"Frequency\", \"Hz\")\n self.PlotWidget.setLabel('left', \"Time\", \"s\")\n\n self.show()\n\n def update_window_width(self, value):\n \"\"\"Slot for updating the plot when the window width is changed.\"\"\"\n self.window_width = value\n self.update_plot()\n\n def update_window_overlap_fraction(self, value):\n \"\"\"Slot for updating the plot when the window overlap fraction is changed.\"\"\"\n self.window_overlap_fraction = value\n self.update_plot()\n\n def update_contour_spacing(self, value):\n \"\"\"Slot for updating the plot when the contour spacing is changed.\"\"\"\n self.contour_spacing_dB = value\n self.update_plot()\n\n def update_num_contours(self, value):\n \"\"\"Slot for updating the plot when the number of contours is changed.\"\"\"\n self.num_contours = value\n self.update_plot()\n\n def calculate_sonogram(self):\n \"\"\"Calculate the sonogram, and store the values in the channel\n (including autogenerated datasets). Sonogram data is in complex form.\"\"\"\n for channel in self.channels:\n if channel.is_dataset(\"time_series\"):\n (frequencies,\n times,\n spectrum) = scipy.signal.spectrogram(channel.data(\"time_series\"),\n channel.metadata(\"sample_rate\"),\n window=scipy.signal.get_window('hann', self.window_width),\n nperseg=self.window_width,\n noverlap=self.window_width // self.window_overlap_fraction,\n return_onesided=False,\n mode = 'complex')\n # SciPy's spectrogram gives the FT transposed, so we need to transpose it back\n spectrum = spectrum.transpose()\n # Scipy calculates all the conjugate spectra/frequencies as well -\n # we only want the positive ones\n frequencies = np.abs(frequencies[:frequencies.size // 2 + 1])\n spectrum = spectrum[:, :spectrum.shape[1] // 2 + 1]\n\n channel.add_dataset(\"sonogram_frequency\", data=frequencies, units=\"Hz\")\n channel.add_dataset(\"sonogram_omega\", data=frequencies*2*np.pi, units=\"rad\")\n channel.add_dataset(\"sonogram_time\", data=times, units=\"s\")\n\n channel.add_dataset(\"sonogram\", data=spectrum, units=None)\n channel.add_dataset(\"sonogram_phase\", data=np.angle(spectrum), units='rad')\n channel.add_dataset(\"sonogram_step\", data=self.window_width // self.window_overlap_fraction, units=None)\n\n def update_plot(self):\n \"\"\"Clear the canvas and replot.\"\"\"\n self.clear()\n if self.channels is not None:\n for channel in self.channels:\n if not channel.is_dataset(\"sonogram\"):\n self.calculate_sonogram()\n self.plot_colormap(channel.data(\"sonogram_frequency\"),\n channel.data(\"sonogram_time\"),\n to_dB(np.abs(channel.data(\"sonogram\"))),\n num_contours=self.num_contours,\n contour_spacing_dB=self.contour_spacing_dB)\n\n def set_selected_channels(self, selected_channels):\n \"\"\"Update which channel is being plotted.\"\"\"\n self.channels = []\n\n if selected_channels:\n self.channels = selected_channels\n\n self.update_plot()\n\n\nclass SonogramToolbox(Toolbox):\n \"\"\"Toolbox containing Sonogram controls.\"\"\"\n\n sig_window_width_changed = pyqtSignal(int)\n sig_window_overlap_fraction_changed = pyqtSignal(int)\n sig_num_contours_changed = pyqtSignal(int)\n sig_contour_spacing_changed = pyqtSignal(int)\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.parent = parent\n\n self.window_width = 256\n self.window_overlap_fraction = 8\n self.num_contours = 5\n self.contour_spacing_dB = 5\n\n self.init_ui()\n\n\n def init_ui(self):\n #------------Window width controls------------\n self.window_width_label = QLabel(self)\n self.window_width_label.setText(\"Window width\")\n # Create control\n self.window_width_control = BaseNControl(Qt.Vertical, self)\n self.window_width_control.set_power_range(0, 10)\n self.window_width_control.set_value(self.window_width)\n self.window_width_control.valueChanged.connect(self.sig_window_width_changed)\n\n #------------Window increment controls------------\n self.window_overlap_fraction_label = QLabel(self)\n self.window_overlap_fraction_label.setText(\"Window overlap fraction\")\n # Create control\n self.window_overlap_fraction_control = BaseNControl(Qt.Vertical, self)\n self.window_overlap_fraction_control.set_power_range(0, 6)\n self.window_overlap_fraction_control.set_value(self.window_overlap_fraction)\n self.window_overlap_fraction_control.valueChanged.connect(self.sig_window_overlap_fraction_changed.emit)\n\n #------------Contour spacing controls------------\n self.contour_spacing_label = QLabel(self)\n self.contour_spacing_label.setText(\"Contour spacing\")\n # Create spinbox\n self.contour_spacing_spinbox = QSpinBox(self)\n self.contour_spacing_spinbox.setObjectName(\"contour_spacing_spinbox\")\n self.contour_spacing_spinbox.setRange(1, 12)\n # Create slider\n self.contour_spacing_slider = QSlider(Qt.Vertical, self)\n self.contour_spacing_slider.setObjectName(\"contour_spacing_slider\")\n self.contour_spacing_slider.setRange(1, 12)\n # Connect spinbox and slider together\n self.contour_spacing_spinbox.valueChanged.connect(self.contour_spacing_slider.setValue)\n self.contour_spacing_slider.valueChanged.connect(self.contour_spacing_spinbox.setValue)\n # Set values\n self.contour_spacing_spinbox.setValue(self.contour_spacing_dB)\n self.contour_spacing_slider.setValue(self.contour_spacing_dB)\n # Update screen on change\n self.contour_spacing_slider.valueChanged.connect(self.sig_contour_spacing_changed.emit)\n self.contour_spacing_spinbox.valueChanged.connect(self.sig_contour_spacing_changed.emit)\n\n #------------Num contours controls------------\n self.num_contours_label = QLabel(self)\n self.num_contours_label.setText(\"Num contours\")\n # Create spinbox\n self.num_contours_spinbox = QSpinBox(self)\n self.num_contours_spinbox.setObjectName(\"num_contours_spinbox\")\n self.num_contours_spinbox.setRange(1, 12)\n # Create slider\n self.num_contours_slider = QSlider(Qt.Vertical, self)\n self.num_contours_slider.setObjectName(\"num_contours_slider\")\n self.num_contours_slider.setRange(1, 12)\n # Connect spinbox and slider together\n self.num_contours_spinbox.valueChanged.connect(self.num_contours_slider.setValue)\n self.num_contours_slider.valueChanged.connect(self.num_contours_spinbox.setValue)\n # Set values\n self.num_contours_spinbox.setValue(self.num_contours)\n self.num_contours_slider.setValue(self.num_contours)\n # Update screen on change\n self.num_contours_slider.valueChanged.connect(self.sig_num_contours_changed.emit)\n self.num_contours_spinbox.valueChanged.connect(self.sig_num_contours_changed.emit)\n\n #------------Matplotlib window controls---------\n # Create button\n self.convert_to_contour_btn = QPushButton(\"Show as contour plot\", self)\n self.convert_to_contour_btn.resize(self.convert_to_contour_btn.sizeHint())\n self.convert_to_contour_btn.clicked.connect(self.open_contour_plot)\n\n #------------Layout------------\n # Sonogram controls:\n self.sonogram_controls_tab = QWidget(self)\n\n sonogram_controls_layout = QGridLayout()\n sonogram_controls_layout.addWidget(self.window_width_label, 0, 0)\n sonogram_controls_layout.addWidget(self.window_width_control, 1, 0)\n sonogram_controls_layout.addWidget(self.window_overlap_fraction_label, 0, 1)\n sonogram_controls_layout.addWidget(self.window_overlap_fraction_control, 1, 1)\n\n self.sonogram_controls_tab.setLayout(sonogram_controls_layout)\n\n # Plot controls:\n self.plot_controls_tab = QWidget(self)\n\n plot_controls_layout = QGridLayout()\n plot_controls_layout.addWidget(self.contour_spacing_label, 1, 0)\n plot_controls_layout.addWidget(self.contour_spacing_spinbox, 2, 0)\n plot_controls_layout.addWidget(self.contour_spacing_slider, 3, 0)\n plot_controls_layout.addWidget(self.num_contours_label, 1, 1)\n plot_controls_layout.addWidget(self.num_contours_spinbox, 2, 1)\n plot_controls_layout.addWidget(self.num_contours_slider, 3, 1)\n\n self.plot_controls_tab.setLayout(plot_controls_layout)\n\n # Export:\n self.export_tab = QWidget(self)\n\n export_layout = QGridLayout()\n export_layout.addWidget(self.convert_to_contour_btn, 0, 0)\n export_layout.setRowStretch(1,1)\n\n self.export_tab.setLayout(export_layout)\n\n #-------------Add tabs-----------------\n self.addTab(self.plot_controls_tab, \"Plot Controls\")\n self.addTab(self.sonogram_controls_tab, \"Sonogram Controls\")\n self.addTab(self.export_tab, \"Export\")\n\n def open_contour_plot(self):\n if hasattr(self, 'contour_plot'):\n self.contour_plot.close()\n delattr(self, 'contour_plot')\n else:\n self.contour_plot = MatplotlibSonogramContourWidget(channel=self.channel,\n contour_spacing_dB=self.contour_spacing_dB,\n num_contours=self.num_contours)\n self.sig_contour_spacing_changed.connect(self.contour_plot.update_contour_spacing)\n self.sig_num_contours_changed.connect(self.contour_plot.update_num_contours)\n self.contour_plot.show()\n\n def set_selected_channels(self, selected_channels):\n \"\"\"Update which channel is being plotted\"\"\"\n # If no channel list is given\n if not selected_channels:\n self.channel = None\n else:\n self.channel = selected_channels[0]\n print(\"Sonogram channel:\" + self.channel.name)\n\n\n if hasattr(self, 'contour_plot'):\n self.contour_plot.set_selected_channels(selected_channels)\n\ndef func_1(t, w, x, A=4e3):\n \"\"\"A simple decaying sine wave function.\"\"\"\n return A * np.exp((1j*w - x)*t)\n\n\ndef function_generator(t):\n \"\"\"A simple function generator with echoes.\"\"\"\n f1 = func_1(t, 2000*2*np.pi, 2)\n f2 = func_1(t, 500*2*np.pi, 1)\n # Create an echo of one of the functions\n f1[f1.size//2:] += f1[:f1.size//2]\n result = f1 + f2\n return result\n\n\nif __name__ == '__main__':\n duration = 10.0\n t = np.arange(0.0, duration, 1/4096)\n sig = function_generator(t)\n\n app = 0\n\n app = QApplication(sys.argv)\n\n w = QWidget()\n\n hbox = QHBoxLayout()\n w.setLayout(hbox)\n\n toolbox = SonogramToolbox(w)\n displaywidget = SonogramDisplayWidget()\n\n from cued_datalogger.api.channel import Channel\n displaywidget.channel = Channel()\n displaywidget.channel.add_dataset(\"time_series\", data=sig)\n displaywidget.channel.add_dataset(\"time\", data=t)\n\n displaywidget.update_plot()\n\n hbox.addWidget(toolbox)\n hbox.addWidget(displaywidget)\n\n toolbox.contour_spacing_slider.valueChanged.connect(displaywidget.update_contour_spacing)\n toolbox.contour_spacing_spinbox.valueChanged.connect(displaywidget.update_contour_spacing)\n\n toolbox.num_contours_slider.valueChanged.connect(displaywidget.update_num_contours)\n toolbox.num_contours_spinbox.valueChanged.connect(displaywidget.update_num_contours)\n\n toolbox.window_overlap_fraction_control.valueChanged.connect(displaywidget.update_window_overlap_fraction)\n\n toolbox.window_width_control.valueChanged.connect(displaywidget.update_window_width)\n\n w.show()\n\n sys.exit(app.exec_())\n\n"
] | [
[
"numpy.arange",
"numpy.abs",
"numpy.angle",
"numpy.exp"
]
] |
zhaojing1995/One-shot_ReID | [
"a109a1aee5ad1036b20ba0779af565c09506469a"
] | [
"tools.py"
] | [
"import numpy as np\n\n\n\nif __name__==\"__main__\":\n b = np.load(\"logs/l_feas/test1.npy\")\n print(b)"
] | [
[
"numpy.load"
]
] |
RyanJDick/halite_rl | [
"e6309a24d3d613171ceb6522ddf07fece3815e62"
] | [
"halite_rl/ppo/sample.py"
] | [
"import numpy as np\n\nimport torch\n\nfrom halite_rl.utils import SubProcessWrapper\n\n\nclass EpisodeData():\n def __init__(self):\n self.observations = [] # Observations (states).\n self.actions = [] # Selected actions.\n self.act_log_probs = [] # Log probability of selected action.\n self.value_preds = [] # Value predictions given observation (from critic network).\n self.rewards = [] # Rewards obtained in each step.\n self.step_info = [] # Additional details about the step for logging purposes.\n\ndef sample_batch(models, env_constructor, device, config):\n \"\"\"Sample a batch of environment rollouts.\n\n Parameters:\n -----------\n models : dict[str: nn.Module]\n Dict mapping player_ids to actor-critic NN models.\n config : dict\n Config settings.\n\n Returns:\n --------\n TODO\n\n \"\"\"\n\n # Initialize envs.\n envs = [SubProcessWrapper(env_constructor) for _ in range(config[\"SAMPLE_PARALLEL_ENVS\"])]\n\n player_ids = list(models.keys())\n\n # EpisodeData for in-progress episodes.\n # ep_datas[i][p_id] references the EpisodeData for player p_id in the i'th env.\n ep_datas = [{p_id: None for p_id in player_ids} for _ in envs]\n\n # actions[i][p_id] references the action for player p_id in the i'th env.\n actions = [{p_id: None for p_id in player_ids} for _ in envs]\n\n num_steps = {p_id: 0 for p_id in player_ids}\n\n # final_ep_datas[p_id][i] references the EpisodeData for the i'th episode collected for player p_id.\n final_ep_datas = {p_id: [] for p_id in player_ids}\n\n # While at least one player is below SAMPLE_MIN_NUM_STEPS.\n while np.any(np.array([n for n in num_steps.values()]) < config[\"SAMPLE_MIN_NUM_STEPS\"]):\n # 1. Step all envs asynchronously.\n\n # Keep a record of which envs were 'reset' and which were 'stepped' so that we\n # know what return values to expect when we receive the results asynchronously.\n env_was_reset = []\n for i_env, env in enumerate(envs):\n if not env.call_sync(\"is_in_progress\"):\n env_was_reset.append(True)\n for p_id in player_ids:\n ep_data = ep_datas[i_env][p_id]\n # If this is not the very first iteration, then save the episode.\n if ep_data is not None:\n # Drop the last observation, as we never acted on it.\n ep_data.observations = ep_data.observations[:len(ep_data.rewards)]\n final_ep_datas[p_id].append(ep_data)\n num_steps[p_id] += len(ep_data.rewards)\n ep_datas[i_env] = {p_id: EpisodeData() for p_id in player_ids}\n env.call_async(\"reset\")\n else:\n env_was_reset.append(False)\n actions = {p_id: ep_datas[i_env][p_id].actions[-1] for p_id in player_ids}\n env.call_async(\"step\", actions)\n\n # 2. Receive results from async env steps.\n\n for i_env, env in enumerate(envs):\n if env_was_reset[i_env]:\n obs = env.get_result()\n for p_id in player_ids:\n ep_datas[i_env][p_id].observations.append(obs[p_id])\n else:\n obs, rewards, dones, step_infos = env.get_result()\n for p_id in player_ids:\n ep_data = ep_datas[i_env][p_id]\n ep_data.observations.append(obs[p_id])\n ep_data.rewards.append(rewards[p_id])\n # step_infos entry should already exist for this step.\n ep_data.step_info[-1].update(step_infos[p_id])\n\n # 3. Sample actions.\n\n player_id_to_state_batch = {p_id: [] for p_id in player_ids}\n for i_env, env in enumerate(envs):\n for p_id in player_ids:\n player_id_to_state_batch[p_id].append(ep_datas[i_env][p_id].observations[-1])\n\n for p_id in player_ids:\n model = models[p_id]\n with torch.no_grad():\n state_batch = np.array(player_id_to_state_batch[p_id])\n state_batch = torch.Tensor(state_batch)\n state_batch = state_batch.to(device)\n ship_act_logits, shipyard_act_logits, value_preds = model(state_batch)\n\n ship_action_dist, shipyard_action_dist = model.get_action_distribution(\n ship_act_logits, shipyard_act_logits, state_batch)\n\n ship_action = ship_action_dist.sample()\n shipyard_action = shipyard_action_dist.sample()\n ship_act_entropy = ship_action_dist.entropy()\n shipyard_act_entropy = shipyard_action_dist.entropy()\n\n action_log_prob = model.action_log_prob(\n ship_action_dist,\n shipyard_action_dist,\n ship_action,\n shipyard_action,\n )\n\n ship_action = ship_action.cpu().detach().numpy()\n shipyard_action = shipyard_action.cpu().detach().numpy()\n action_log_prob = action_log_prob.cpu().detach().numpy()\n value_preds = value_preds.cpu().detach().numpy()\n ship_act_entropy = ship_act_entropy.cpu().detach().numpy()\n shipyard_act_entropy = shipyard_act_entropy.cpu().detach().numpy()\n\n for i_env, env in enumerate(envs):\n if env.call_sync(\"is_in_progress\"):\n ep_data = ep_datas[i_env][p_id]\n ep_data.actions.append((\n ship_action[i_env, ...],\n shipyard_action[i_env, ...],\n ))\n ep_data.act_log_probs.append(action_log_prob[i_env])\n ep_data.value_preds.append(value_preds[i_env])\n # Create step_info entry with info for step that hasn't happend (in env) yet.\n ep_data.step_info.append(\n {\n \"ship_action_dist_entropy\": ship_act_entropy[i_env],\n \"shipyard_action_dist_entropy\": shipyard_act_entropy[i_env],\n }\n )\n\n # Close all envs\n for e in envs:\n e.close()\n\n return final_ep_datas\n"
] | [
[
"numpy.array",
"torch.no_grad",
"torch.Tensor"
]
] |
sethaxen/arviz | [
"422c00b3cc24f3983bea283396bff0195374dcc3"
] | [
"arviz/plots/compareplot.py"
] | [
"\"\"\"Summary plot for model comparison.\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom .plot_utils import _scale_fig_size\r\n\r\n\r\ndef plot_compare(\r\n comp_df,\r\n insample_dev=True,\r\n plot_standard_error=True,\r\n plot_ic_diff=True,\r\n order_by_rank=True,\r\n figsize=None,\r\n textsize=None,\r\n plot_kwargs=None,\r\n ax=None,\r\n):\r\n \"\"\"\r\n Summary plot for model comparison.\r\n\r\n This plot is in the style of the one used in the book Statistical Rethinking (Chapter 6)\r\n by Richard McElreath.\r\n\r\n Notes\r\n -----\r\n Defaults to comparing Widely Accepted Information Criterion (WAIC) if present in comp_df column,\r\n otherwise compares Leave-one-out (loo)\r\n\r\n\r\n Parameters\r\n ----------\r\n comp_df : pd.DataFrame\r\n Result of the `az.compare()` method\r\n insample_dev : bool, optional\r\n Plot in-sample deviance, that is the value of the information criteria without the\r\n penalization given by the effective number of parameters (pIC). Defaults to True\r\n plot_standard_error : bool, optional\r\n Plot the standard error of the information criteria estimate. Defaults to True\r\n plot_ic_diff : bool, optional\r\n Plot standard error of the difference in information criteria between each model\r\n and the top-ranked model. Defaults to True\r\n order_by_rank : bool\r\n If True (default) ensure the best model is used as reference.\r\n figsize : tuple, optional\r\n If None, size is (6, num of models) inches\r\n textsize: float\r\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\r\n on figsize.\r\n plot_kwargs : dict, optional\r\n Optional arguments for plot elements. Currently accepts 'color_ic',\r\n 'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse',\r\n 'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize'\r\n ax : axes, optional\r\n Matplotlib axes\r\n\r\n Returns\r\n -------\r\n ax : matplotlib axes\r\n\r\n\r\n Examples\r\n --------\r\n Show default compare plot\r\n\r\n .. plot::\r\n :context: close-figs\r\n\r\n >>> import arviz as az\r\n >>> model_compare = az.compare({'Centered 8 schools': az.load_arviz_data('centered_eight'),\r\n >>> 'Non-centered 8 schools': az.load_arviz_data('non_centered_eight')})\r\n >>> az.plot_compare(model_compare)\r\n\r\n Plot standard error and information criteria difference only\r\n\r\n .. plot::\r\n :context: close-figs\r\n\r\n >>> az.plot_compare(model_compare, insample_dev=False)\r\n\r\n \"\"\"\r\n if figsize is None:\r\n figsize = (6, len(comp_df))\r\n\r\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\r\n\r\n if ax is None:\r\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\r\n\r\n if plot_kwargs is None:\r\n plot_kwargs = {}\r\n\r\n yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1, retstep=True)\r\n yticks_pos[1::2] = yticks_pos[1::2] + step / 2\r\n\r\n yticks_labels = [\"\"] * len(yticks_pos)\r\n\r\n _information_criterion = [\"waic\", \"loo\"]\r\n column_index = [c.lower() for c in comp_df.columns]\r\n for information_criterion in _information_criterion:\r\n if information_criterion in column_index:\r\n break\r\n else:\r\n raise ValueError(\r\n \"comp_df must contain one of the following\"\r\n \" information criterion: {}\".format(_information_criterion)\r\n )\r\n\r\n if order_by_rank:\r\n comp_df.sort_values(by=\"rank\", inplace=True)\r\n\r\n if plot_ic_diff:\r\n yticks_labels[0] = comp_df.index[0]\r\n yticks_labels[2::2] = comp_df.index[1:]\r\n ax.set_yticks(yticks_pos)\r\n ax.errorbar(\r\n x=comp_df[information_criterion].iloc[1:],\r\n y=yticks_pos[1::2],\r\n xerr=comp_df.dse[1:],\r\n color=plot_kwargs.get(\"color_dse\", \"grey\"),\r\n fmt=plot_kwargs.get(\"marker_dse\", \"^\"),\r\n mew=linewidth,\r\n elinewidth=linewidth,\r\n )\r\n\r\n else:\r\n yticks_labels = comp_df.index\r\n ax.set_yticks(yticks_pos[::2])\r\n\r\n if plot_standard_error:\r\n ax.errorbar(\r\n x=comp_df[information_criterion],\r\n y=yticks_pos[::2],\r\n xerr=comp_df.se,\r\n color=plot_kwargs.get(\"color_ic\", \"k\"),\r\n fmt=plot_kwargs.get(\"marker_ic\", \"o\"),\r\n mfc=\"None\",\r\n mew=linewidth,\r\n lw=linewidth,\r\n )\r\n else:\r\n ax.plot(\r\n comp_df[information_criterion],\r\n yticks_pos[::2],\r\n color=plot_kwargs.get(\"color_ic\", \"k\"),\r\n marker=plot_kwargs.get(\"marker_ic\", \"o\"),\r\n mfc=\"None\",\r\n mew=linewidth,\r\n lw=0,\r\n )\r\n\r\n if insample_dev:\r\n ax.plot(\r\n comp_df[information_criterion] - (2 * comp_df[\"p_\" + information_criterion]),\r\n yticks_pos[::2],\r\n color=plot_kwargs.get(\"color_insample_dev\", \"k\"),\r\n marker=plot_kwargs.get(\"marker_insample_dev\", \"o\"),\r\n mew=linewidth,\r\n lw=0,\r\n )\r\n\r\n ax.axvline(\r\n comp_df[information_criterion].iloc[0],\r\n ls=plot_kwargs.get(\"ls_min_ic\", \"--\"),\r\n color=plot_kwargs.get(\"color_ls_min_ic\", \"grey\"),\r\n lw=linewidth,\r\n )\r\n\r\n scale_col = information_criterion + \"_scale\"\r\n if scale_col in comp_df:\r\n scale = comp_df[scale_col].iloc[0].capitalize()\r\n else:\r\n scale = \"Deviance\"\r\n ax.set_xlabel(scale, fontsize=ax_labelsize)\r\n ax.set_yticklabels(yticks_labels)\r\n ax.set_ylim(-1 + step, 0 - step)\r\n ax.tick_params(labelsize=xt_labelsize)\r\n\r\n return ax\r\n"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.subplots"
]
] |
rox38431/EyeJaundice | [
"ee5939d203013cd522fbacdfcb75970bd696c962"
] | [
"interpretability/guided_back_propagation.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 2019/8/4 上午9:45\r\n\r\n@author: mick.yi\r\n\r\n\"\"\"\r\nimport torch\r\nfrom torch import nn\r\nimport numpy as np\r\n\r\n\r\nclass GuidedBackPropagation(object):\r\n\r\n def __init__(self, net):\r\n self.net = net\r\n for (name, module) in self.net.named_modules():\r\n if isinstance(module, nn.ReLU):\r\n module.register_backward_hook(self.backward_hook)\r\n self.net.eval()\r\n\r\n @classmethod\r\n def backward_hook(cls, module, grad_in, grad_out):\r\n \"\"\"\r\n\r\n :param module:\r\n :param grad_in: tuple,长度为1\r\n :param grad_out: tuple,长度为1\r\n :return: tuple(new_grad_in,)\r\n \"\"\"\r\n return torch.clamp(grad_in[0], min=0.0),\r\n\r\n def __call__(self, inputs, index=None):\r\n \"\"\"\r\n\r\n :param inputs: [1,3,H,W]\r\n :param index: class_id\r\n :return:\r\n \"\"\"\r\n self.net.zero_grad()\r\n output = self.net(inputs) # [1,num_classes]\r\n if index is None:\r\n index = np.argmax(output.cpu().data.numpy())\r\n target = output[0][index]\r\n\r\n target.backward()\r\n\r\n return inputs.grad[0] # [3,H,W]\r\n"
] | [
[
"torch.clamp"
]
] |
angiemsu/netharn | [
"728cb40aad299baf62c689430d07b29c67d8cf21",
"728cb40aad299baf62c689430d07b29c67d8cf21"
] | [
"netharn/util/nms/torch_nms.py",
"netharn/util/nms/nms_core.py"
] | [
"import torch\nimport numpy as np\n\n\ndef torch_nms(tlbr, scores, classes=None, thresh=.5, bias=0, fast=False):\n \"\"\"\n Non maximum suppression implemented with pytorch tensors\n\n CURRENTLY NOT WORKING\n\n Args:\n tlbr (Tensor): Bounding boxes of one image in the format (tlbr)\n scores (Tensor): Scores of each box\n classes (Tensor, optional): the classes of each box. If specified nms is applied to each class separately.\n thresh (float): iou threshold\n\n Returns:\n ByteTensor: keep: boolean array indicating which boxes were not pruned.\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import torch\n >>> import numpy as np\n >>> tlbr = torch.FloatTensor(np.array([\n >>> [0, 0, 100, 100],\n >>> [100, 100, 10, 10],\n >>> [10, 10, 100, 100],\n >>> [50, 50, 100, 100],\n >>> [100, 100, 130, 130],\n >>> [100, 100, 130, 130],\n >>> [100, 100, 130, 130],\n >>> ], dtype=np.float32))\n >>> scores = torch.FloatTensor(np.array([.1, .5, .9, .1, .3, .5, .4]))\n >>> classes = torch.FloatTensor(np.array([0, 0, 0, 0, 0, 0]))\n >>> thresh = .5\n >>> keep = torch_nms(tlbr, scores, classes, thresh)\n >>> bboxes[keep]\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import torch\n >>> import numpy as np\n >>> # Test to check that conflicts are correctly resolved\n >>> tlbr = torch.FloatTensor(np.array([\n >>> [100, 100, 150, 101],\n >>> [120, 100, 180, 101],\n >>> [150, 100, 200, 101],\n >>> ], dtype=np.float32))\n >>> scores = torch.FloatTensor(np.linspace(.8, .9, len(tlbr)))\n >>> classes = None\n >>> thresh = .3\n >>> keep = torch_nms(tlbr, scores, classes, thresh, fast=False)\n >>> bboxes[keep]\n \"\"\"\n if tlbr.numel() == 0:\n return []\n\n # Sort coordinates by descending score\n ordered_scores, order = scores.sort(0, descending=True)\n\n from netharn import util\n boxes = util.Boxes(tlbr[order], 'tlbr')\n ious = boxes.ious(boxes, bias=bias)\n\n # if False:\n # x1, y1, x2, y2 = tlbr[order].split(1, 1)\n\n # # Compute dx and dy between each pair of boxes (these mat contain every pair twice...)\n # dx = (x2.min(x2.t()) - x1.max(x1.t())).clamp_(min=0)\n # dy = (y2.min(y2.t()) - y1.max(y1.t())).clamp_(min=0)\n\n # # Compute iou\n # intersections = dx * dy\n # areas = (x2 - x1) * (y2 - y1)\n # unions = (areas + areas.t()) - intersections\n # ious = intersections / unions\n\n # Filter based on iou (and class)\n conflicting = (ious > thresh).triu(1)\n\n if classes is not None:\n ordered_classes = classes[order]\n same_class = (ordered_classes.unsqueeze(0) == ordered_classes.unsqueeze(1))\n conflicting = (conflicting & same_class)\n # Now we have a 2D matrix where conflicting[i, j] indicates if box[i]\n # conflicts with box[j]. For each box[i] we want to only keep the first\n # one that does not conflict with any other box[j].\n\n # Find out how many conflicts each ordered box has with other boxes that\n # have higher scores than it does. In other words...\n # n_conflicts[i] is the number of conflicts box[i] has with other boxes\n # that have a **higher score** than box[i] does. We will definately\n # keep any box where n_conflicts is 0, but we need to postprocess because\n # we might actually keep some boxes currently marked as conflicted.\n n_conflicts = conflicting.sum(0).byte()\n\n if not fast:\n # It is not enought to simply use all places where there are no\n # conflicts. Say we have boxes A, B, and C, where A conflicts with B,\n # B conflicts with C but A does not conflict with C. The fact that we\n # use A should mean that C is not longer conflicted.\n\n if True:\n # Marginally faster. best=618.2 us\n ordered_keep = np.zeros(len(conflicting), dtype=np.uint8)\n supress = np.zeros(len(conflicting), dtype=np.bool)\n for i, row in enumerate(conflicting.cpu().numpy() > 0):\n if not supress[i]:\n ordered_keep[i] = 1\n supress[row] = 1\n ordered_keep = torch.ByteTensor(ordered_keep).to(tlbr.device)\n else:\n # Marginally slower: best=1.382 ms,\n n_conflicts_post = n_conflicts.cpu()\n conflicting = conflicting.cpu()\n\n keep_len = len(n_conflicts_post) - 1\n for i in range(1, keep_len):\n if n_conflicts_post[i] > 0:\n n_conflicts_post -= conflicting[i]\n\n n_conflicts = n_conflicts_post.to(n_conflicts.device)\n ordered_keep = (n_conflicts == 0)\n else:\n # Now we can simply keep any box that has no conflicts.\n ordered_keep = (n_conflicts == 0)\n\n # Unsort, so keep is aligned with input boxes\n keep = ordered_keep.new(*ordered_keep.size())\n keep.scatter_(0, order, ordered_keep)\n return keep\n\n\ndef test_class_torch():\n import numpy as np\n import torch\n import netharn as nh\n import ubelt as ub\n # from netharn.util.nms.torch_nms import torch_nms\n # from netharn.util import non_max_supression\n\n thresh = .5\n\n num = 500\n rng = nh.util.ensure_rng(0)\n cpu_boxes = nh.util.Boxes.random(num, scale=400.0, rng=rng, format='tlbr', tensor=True)\n cpu_tlbr = cpu_boxes.to_tlbr().data\n # cpu_scores = torch.Tensor(rng.rand(len(cpu_tlbr)))\n # make all scores unique to ensure comparability\n cpu_scores = torch.Tensor(np.linspace(0, 1, len(cpu_tlbr)))\n cpu_cls = torch.LongTensor(rng.randint(0, 10, len(cpu_tlbr)))\n\n tlbr = cpu_boxes.to_tlbr().data.to('cuda')\n scores = cpu_scores.to('cuda')\n classes = cpu_cls.to('cuda')\n\n keep1 = []\n for idxs in ub.group_items(range(len(classes)), classes.cpu().numpy()).values():\n # cls_tlbr = tlbr.take(idxs, axis=0)\n # cls_scores = scores.take(idxs, axis=0)\n cls_tlbr = tlbr[idxs]\n cls_scores = scores[idxs]\n cls_keep = torch_nms(cls_tlbr, cls_scores, thresh=thresh, bias=0)\n keep1.extend(list(ub.compress(idxs, cls_keep.cpu().numpy())))\n keep1 = sorted(keep1)\n\n keep_ = torch_nms(tlbr, scores, classes=classes, thresh=thresh, bias=0)\n keep2 = np.where(keep_.cpu().numpy())[0].tolist()\n\n keep3 = nh.util.non_max_supression(tlbr.cpu().numpy(),\n scores.cpu().numpy(),\n classes=classes.cpu().numpy(),\n thresh=thresh, bias=0, impl='gpu')\n\n print(len(keep1))\n print(len(keep2))\n print(len(keep3))\n\n print(set(keep1) - set(keep2))\n print(set(keep2) - set(keep1))\n\n\ndef _benchmark():\n \"\"\"\n python -m netharn.util.nms.torch_nms _benchmark --show\n\n SeeAlso:\n PJR Darknet NonMax supression\n https://github.com/pjreddie/darknet/blob/master/src/box.c\n\n Lightnet NMS\n https://gitlab.com/EAVISE/lightnet/blob/master/lightnet/data/transform/_postprocess.py#L116\n\n \"\"\"\n import torch\n import numpy as np\n import netharn as nh\n from netharn.util.nms.torch_nms import torch_nms\n from netharn.util import non_max_supression\n import ubelt as ub\n import itertools as it\n\n N = 100\n bestof = 10\n\n ydata = ub.ddict(list)\n # xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, 2000]\n\n # max number of boxes yolo will spit out at a time\n max_boxes = 19 * 19 * 5\n\n xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, max_boxes]\n # xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500]\n xdata = [10, 100, 500]\n\n rng = nh.util.ensure_rng(0)\n\n thresh = 0.5\n\n for num in xdata:\n print('\\n\\n---- number of boxes = {} ----\\n'.format(num))\n\n outputs = {}\n\n # Build random test boxes and scores\n cpu_boxes = nh.util.Boxes.random(num, scale=10.0, rng=rng, format='tlbr', tensor=True)\n cpu_tlbr = cpu_boxes.to_tlbr().data\n # cpu_scores = torch.Tensor(rng.rand(len(cpu_tlbr)))\n # make all scores unique to ensure comparability\n cpu_scores = torch.Tensor(np.linspace(0, 1, len(cpu_tlbr)))\n cpu_cls = torch.LongTensor(rng.randint(0, 10, len(cpu_tlbr)))\n\n # Format boxes in lightnet format\n cpu_ln_boxes = torch.cat([cpu_boxes.to_cxywh().data, cpu_scores[:, None], cpu_cls.float()[:, None]], dim=-1)\n\n # Move boxes to numpy\n np_tlbr = cpu_tlbr.numpy()\n np_scores = cpu_scores.numpy()\n np_cls = cpu_cls.numpy() # NOQA\n\n gpu = torch.device('cuda', 0)\n\n measure_gpu = torch.cuda.is_available()\n measure_cpu = False or not torch.cuda.is_available()\n\n def _ln_output_to_keep(ln_output, ln_boxes):\n keep = []\n for row in ln_output:\n # Find the index that we kept\n idxs = np.where(np.all(np.isclose(ln_boxes, row), axis=1))[0]\n assert len(idxs) == 1\n keep.append(idxs[0])\n assert np.all(np.isclose(ln_boxes[keep], ln_output))\n return keep\n\n if measure_gpu:\n # Move boxes to the GPU\n gpu_tlbr = cpu_tlbr.to(gpu)\n gpu_scores = cpu_scores.to(gpu)\n gpu_cls = cpu_cls.to(gpu) # NOQA\n gpu_ln_boxes = cpu_ln_boxes.to(gpu)\n\n t1 = ub.Timerit(N, bestof=bestof, label='torch(gpu)')\n for timer in t1:\n with timer:\n keep = torch_nms(gpu_tlbr, gpu_scores, thresh=thresh)\n torch.cuda.synchronize()\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = np.where(keep.cpu().numpy())[0]\n\n t1 = ub.Timerit(N, bestof=bestof, label='cython(gpu)')\n for timer in t1:\n with timer:\n keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='gpu')\n torch.cuda.synchronize()\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n from lightnet.data.transform._postprocess import NonMaxSupression\n t1 = ub.Timerit(N, bestof=bestof, label='lightnet-slow(gpu)')\n for timer in t1:\n with timer:\n ln_output = NonMaxSupression._nms(gpu_ln_boxes, nms_thresh=thresh, class_nms=False, fast=False)\n torch.cuda.synchronize()\n # convert lightnet NMS output to keep for consistency\n keep = _ln_output_to_keep(ln_output, gpu_ln_boxes)\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n if False:\n t1 = ub.Timerit(N, bestof=bestof, label='lightnet-fast(gpu)')\n for timer in t1:\n with timer:\n ln_output = NonMaxSupression._nms(gpu_ln_boxes, nms_thresh=thresh, class_nms=False, fast=True)\n torch.cuda.synchronize()\n # convert lightnet NMS output to keep for consistency\n keep = _ln_output_to_keep(ln_output, gpu_ln_boxes)\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n if measure_cpu:\n t1 = ub.Timerit(N, bestof=bestof, label='torch(cpu)')\n for timer in t1:\n with timer:\n keep = torch_nms(cpu_tlbr, cpu_scores, thresh=thresh)\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = np.where(keep.cpu().numpy())[0]\n\n if True:\n t1 = ub.Timerit(N, bestof=bestof, label='cython(cpu)')\n for timer in t1:\n with timer:\n keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='cpu')\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n t1 = ub.Timerit(N, bestof=bestof, label='numpy(cpu)')\n for timer in t1:\n with timer:\n keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='py')\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n # Check that all kept boxes do not have more than `threshold` ious\n for key, idxs in outputs.items():\n ious = nh.util.box_ious(np_tlbr[idxs], np_tlbr[idxs])\n max_iou = (np.tril(ious) - np.eye(len(ious))).max()\n if max_iou > thresh:\n print('{} produced a bad result with max_iou={}'.format(key, max_iou))\n\n # Check result consistency:\n print('\\nResult stats:')\n for key in sorted(outputs.keys()):\n print(' * {:<20}: num={}'.format(key, len(outputs[key])))\n\n print('\\nResult overlaps (method1, method2: jaccard):')\n datas = []\n for k1, k2 in it.combinations(sorted(outputs.keys()), 2):\n idxs1 = set(outputs[k1])\n idxs2 = set(outputs[k2])\n jaccard = len(idxs1 & idxs2) / len(idxs1 | idxs2)\n datas.append((k1, k2, jaccard))\n datas = sorted(datas, key=lambda x: -x[2])\n for k1, k2, jaccard in datas:\n print(' * {:<20}, {:<20}: {:0.4f}'.format(k1, k2, jaccard))\n\n nh.util.mplutil.autompl()\n nh.util.mplutil.multi_plot(xdata, ydata, xlabel='num boxes', ylabel='seconds')\n nh.util.show_if_requested()\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m netharn.util.nms.torch_nms all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n",
"import torch\nimport numpy as np\nimport ubelt as ub\nfrom netharn.util.nms import py_nms\nfrom netharn.util import profiler\nfrom netharn.util.nms import torch_nms\nimport warnings\n\n_impls = {}\n_impls['py'] = py_nms.py_nms\n_impls['torch'] = torch_nms.torch_nms\n_automode = 'py'\ntry:\n from netharn.util.nms import cpu_nms\n _impls['cpu'] = cpu_nms.cpu_nms\n _automode = 'cpu'\nexcept Exception:\n warnings.warn('cpu_nms is not available')\ntry:\n if torch.cuda.is_available():\n from netharn.util.nms import gpu_nms\n _impls['gpu'] = gpu_nms.gpu_nms\n _automode = 'gpu'\nexcept Exception:\n warnings.warn('gpu_nms is not available')\n\n\[email protected]\ndef non_max_supression(tlbr, scores, thresh, bias=0.0, classes=None,\n impl='auto'):\n \"\"\"\n Non-Maximum Suppression\n\n Args:\n tlbr (ndarray): Nx4 boxes in tlbr format\n scores (ndarray): score for each bbox\n thresh (float): iou threshold\n bias (float): bias for iou computation either 0 or 1\n (hint: choosing 1 is wrong computer vision community)\n classes (ndarray or None): integer classes. If specified NMS is done\n on a perclass basis.\n impl (str): implementation can be auto, python, cpu, or gpu\n\n\n CommandLine:\n python ~/code/netharn/netharn/util/nms/nms_core.py nms\n python ~/code/netharn/netharn/util/nms/nms_core.py nms:0\n python ~/code/netharn/netharn/util/nms/nms_core.py nms:1\n\n References:\n https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx\n https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n https://github.com/bharatsingh430/soft-nms/blob/master/lib/nms/cpu_nms.pyx <- TODO\n\n Example:\n >>> dets = np.array([\n >>> [0, 0, 100, 100],\n >>> [100, 100, 10, 10],\n >>> [10, 10, 100, 100],\n >>> [50, 50, 100, 100],\n >>> ], dtype=np.float32)\n >>> scores = np.array([.1, .5, .9, .1])\n >>> thresh = .5\n >>> keep = non_max_supression(dets, scores, thresh, impl='py')\n >>> print('keep = {!r}'.format(keep))\n keep = [2, 1, 3]\n\n Example:\n >>> import ubelt as ub\n >>> dets = np.array([\n >>> [0, 0, 100, 100],\n >>> [100, 100, 10, 10],\n >>> [10, 10, 100, 100],\n >>> [50, 50, 100, 100],\n >>> [100, 100, 150, 101],\n >>> [120, 100, 180, 101],\n >>> [150, 100, 200, 101],\n >>> ], dtype=np.float32)\n >>> scores = np.linspace(0, 1, len(dets))\n >>> thresh = .2\n >>> solutions = {}\n >>> for impl in _impls:\n >>> solutions[impl] = sorted(non_max_supression(dets, scores, thresh, impl=impl))\n >>> print('solutions = {}'.format(ub.repr2(solutions, nl=1)))\n >>> assert ub.allsame(solutions.values())\n \"\"\"\n if tlbr.shape[0] == 0:\n return []\n\n if impl == 'auto':\n impl = _automode\n\n if classes is not None:\n keep = []\n for idxs in ub.group_items(range(len(classes)), classes).values():\n # cls_tlbr = tlbr.take(idxs, axis=0)\n # cls_scores = scores.take(idxs, axis=0)\n cls_tlbr = tlbr[idxs]\n cls_scores = scores[idxs]\n cls_keep = non_max_supression(cls_tlbr, cls_scores, thresh=thresh,\n bias=bias, impl=impl)\n keep.extend(list(ub.take(idxs, cls_keep)))\n return keep\n else:\n if impl == 'py':\n keep = py_nms.py_nms(tlbr, scores, thresh, bias=float(bias))\n elif impl == 'torch':\n was_tensor = torch.is_tensor(tlbr)\n if not was_tensor:\n tlbr = torch.Tensor(tlbr)\n scores = torch.Tensor(scores)\n flags = torch_nms.torch_nms(tlbr, scores, thresh=thresh,\n bias=float(bias))\n keep = np.where(flags.cpu().numpy())[0]\n else:\n # TODO: it would be nice to be able to pass torch tensors here\n nms = _impls[impl]\n tlbr = tlbr.astype(np.float32)\n scores = scores.astype(np.float32)\n # dets = np.hstack((tlbr, scores[:, None])).astype(np.float32)\n if impl == 'gpu':\n # HACK: we should parameterize which device is used\n device = torch.cuda.current_device()\n keep = nms(tlbr, scores, thresh, bias=float(bias), device_id=device)\n else:\n keep = nms(tlbr, scores, thresh, bias=float(bias))\n return keep\n\n\n# TODO: soft nms\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m netharn.util.nms.nms_core all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n"
] | [
[
"torch.cuda.synchronize",
"numpy.isclose",
"torch.ByteTensor",
"torch.cuda.is_available",
"torch.device",
"numpy.tril"
],
[
"torch.cuda.is_available",
"torch.is_tensor",
"torch.cuda.current_device",
"torch.Tensor"
]
] |
eino/pyvista | [
"b9c4e67d43491958f70b04cd2664965b938910ba"
] | [
"examples/00-load/create-explicit-structured-grid.py"
] | [
"\"\"\"\n.. _ref_create_explicit_structured_grid:\n\nCreating an Explicit Structured Grid\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nCreate an explicit structured grid from NumPy arrays.\n\nNote this feature is only available for ``vtk>=9``.\n\n\"\"\"\n\nimport numpy as np\n\nimport pyvista as pv\n\nni, nj, nk = 4, 5, 6\nsi, sj, sk = 20, 10, 1\n\nxcorn = np.arange(0, (ni + 1) * si, si)\nxcorn = np.repeat(xcorn, 2)\nxcorn = xcorn[1:-1]\nxcorn = np.tile(xcorn, 4 * nj * nk)\n\nycorn = np.arange(0, (nj + 1) * sj, sj)\nycorn = np.repeat(ycorn, 2)\nycorn = ycorn[1:-1]\nycorn = np.tile(ycorn, (2 * ni, 2 * nk))\nycorn = np.transpose(ycorn)\nycorn = ycorn.flatten()\n\nzcorn = np.arange(0, (nk + 1) * sk, sk)\nzcorn = np.repeat(zcorn, 2)\nzcorn = zcorn[1:-1]\nzcorn = np.repeat(zcorn, (4 * ni * nj))\n\ncorners = np.stack((xcorn, ycorn, zcorn))\ncorners = corners.transpose()\n\nif pv._vtk.VTK9:\n dims = np.asarray((ni, nj, nk)) + 1\n grid = pv.ExplicitStructuredGrid(dims, corners)\n grid = grid.compute_connectivity()\n grid.plot(show_edges=True)\n"
] | [
[
"numpy.tile",
"numpy.transpose",
"numpy.repeat",
"numpy.asarray",
"numpy.arange",
"numpy.stack"
]
] |
TiKeil/Trust-region-TSRBLOD-code | [
"70fb396aa07b57028771e3e6e424ab3d1ace10f0"
] | [
"scripts/plot_mu_d.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n#\n# ~~~\n# This file is part of the paper:\n#\n# \"A relaxed localized trust-region reduced basis approach for\n# optimization of multiscale problems\"\n#\n# by: Tim Keil and Mario Ohlberger\n#\n# https://github.com/TiKeil/Trust-region-TSRBLOD-code\n#\n# Copyright 2019-2022 all developers. All rights reserved.\n# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n# Authors:\n# Tim Keil (2022)\n# ~~~\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom pymor.core.logger import set_log_levels\nfrom pymor.core.defaults import set_defaults\nfrom pymor.core.cache import disable_caching\nfrom pdeopt.tools import print_iterations_and_walltime\nset_log_levels({'pymor': 'ERROR',\n 'notebook': 'INFO'})\n\ndef prepare_kernels():\n set_log_levels({'pymor': 'WARN'})\n set_defaults({\"pymor.algorithms.gram_schmidt.gram_schmidt.rtol\": 1e-4})# <-- very important for the estimator\n set_defaults({\"pymor.algorithms.gram_schmidt.gram_schmidt.check\": False})\n disable_caching()\n\nuse_pool = True\nif use_pool:\n from pymor.parallel.mpi import MPIPool\n pool = MPIPool()\n # store_in_tmp = '/scratch/tmp/t_keil02/lrblod/tmp'\n store_in_tmp = 'tmp'\nelse:\n from pymor.parallel.dummy import DummyPool\n pool = DummyPool()\n store_in_tmp = False\npool.apply(prepare_kernels)\nprint_on_ranks = True\n\n'''\n Variables for the experiment and discretization\n'''\n\ncoarse_elements = 20\nn = 1200\ndiameter = np.sqrt(2)/n\n\ntwo_scale_estimator_for_RBLOD = False\nsave_correctors = False\n\nuse_FEM = True\n#use_FEM = False\nuse_fine_mesh = True\n#use_fine_mesh = False\n\n# skip_estimator = False\nskip_estimator = True\n\nadd_error_residual = True\n# add_error_residual = False\n\nfrom pdeopt.problems import large_thermal_block\nfrom pdeopt.discretizer import discretize_quadratic_NCD_pdeopt_stationary_cg\nfrom pdeopt.discretize_gridlod import (discretize_gridlod, discretize_quadratic_pdeopt_with_gridlod)\n\nhigh_conductivity, low_conductivity, min_diffusivity, rhs_value = 4., 1.2, 1., 10.\nfirst_factor, second_factor = 4, 8\n\nprint(f'\\nVARIABLES: \\n'\n f'Coarse elements: {coarse_elements} x {coarse_elements}\\n'\n f'Fine elements: {n} x {n}\\n'\n f'high_c/low_c/min_c: {high_conductivity}/{low_conductivity}/{min_diffusivity}\\n'\n f'rhs/f_1/f_2: {rhs_value}/{first_factor}/{second_factor}\\n')\n\nglobal_problem, world, local_problem_constructer, f, aFines, f_fine = \\\n large_thermal_block(diameter, coarse_elements, blocks=(4, 4), plot=False, return_fine=use_FEM,\n high_conductivity=high_conductivity, low_conductivity=low_conductivity, rhs_value=rhs_value,\n first_factor=first_factor, second_factor=second_factor, min_diffusivity=min_diffusivity)\ndomain_of_interest = None\n\nproblem = global_problem\n\nmu_d = global_problem.parameter_space.sample_randomly(1, seed=23)[0]\nmu_d_array = mu_d.to_numpy()\n\nfor i in [3,4,6,7,8,9,11,14]:\n mu_d_array[i] = high_conductivity\nfor i in [3,4,5,6]:\n mu_d_array[i+25] = low_conductivity\n\nmu_d = mu_d.parameters.parse(mu_d_array)\nnorm_mu_d = np.linalg.norm(mu_d_array)\n# mu_d = None\n\n'''\n Some plotting\n'''\n\n#### plotting\nfrom pdeopt.gridlod_model import construct_aFine_from_mu\nfrom perturbations_for_2d_data import visualize\n\nvis_mu_block_1_array = mu_d_array.copy()\nvis_mu_block_2_array = mu_d_array.copy()\nfor i in range(0,len(mu_d_array),2):\n vis_mu_block_1_array[i] = 0\n vis_mu_block_2_array[i+1] = 0\nvis_mu_block_1 = mu_d.parameters.parse(vis_mu_block_1_array)\nvis_mu_block_2 = mu_d.parameters.parse(vis_mu_block_2_array)\n\nplt.figure()\naFine = construct_aFine_from_mu(aFines, global_problem.diffusion.coefficients, mu_d)\nvisualize.drawCoefficient_origin(np.array([n, n]), aFine, colorbar_font_size=10, logNorm=False)\n\nplt.figure()\naFine = construct_aFine_from_mu(aFines, global_problem.diffusion.coefficients, vis_mu_block_1)\nvisualize.drawCoefficient_origin(np.array([n, n]), aFine, colorbar_font_size=10, logNorm=False)\n\nplt.figure()\naFine = construct_aFine_from_mu(aFines, global_problem.diffusion.coefficients, vis_mu_block_2)\nvisualize.drawCoefficient_origin(np.array([n, n]), aFine, colorbar_font_size=10, logNorm=False)\n\nplt.show()\n\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.sqrt",
"numpy.linalg.norm"
]
] |
liang324/wrs | [
"46eadec355c61a9c7bac1fa0f3cf419b2aac19aa"
] | [
"basis/trimesh_new/resources/helpers/id_helper.py"
] | [
"\"\"\"\nfeatures.py\n---------------\n\nIn trimesh.comparison, we arbitrarily threshold identifier values\nat a certain number of significant figures.\n\nThis file permutates meshes around and observes how their identifier,\nwhich is supposed to be pretty invariant to translation and tessellation\nchanges. We use this to generate the arbitrary sigfig thresholds.\n\"\"\"\n\nimport numpy as np\nimport trimesh\n\nimport time\nimport json\nimport os\n\nimport collections\nimport logging\n\nTOL_ZERO = 1e-12\n\n\ndef permutations(mesh,\n function=lambda x: x.identifier,\n displacement_max=1e-8,\n count=1000,\n subdivisions=2,\n cutoff=3600):\n \"\"\"\n Permutate a mesh, record the maximum it deviates from the original mesh\n and the resulting value of an identifier function.\n\n Parameters\n ----------\n mesh: Trimesh object\n function: function which takes a single mesh as an argument\n and returns an (n,) float vector\n subdivisions: the maximum number of times to subdivide the mesh\n count: int, number of times to permutate each subdivision step\n\n Returns\n -----------\n identifiers: numpy array of identifiers\n \"\"\"\n\n identifiers = []\n start = time.time()\n\n # do subdivisions\n divided = [mesh.copy()]\n for j in range(subdivisions - 1):\n divided.append(divided[-1].copy().subdivide())\n\n for i, displacement in enumerate(np.linspace(0.0,\n displacement_max / mesh.scale,\n count)):\n # get one of the subdivided meshes\n current = np.random.choice(divided).copy()\n\n if i > (count / 10):\n # run first bunch without tessellation permutation\n current = current.permutate.tessellation()\n # after the first few displace it a lot\n\n transformed = trimesh.permutate.transform(current)\n # noisy = trimesh.permutate.noise(transformed, displacement)\n\n identifier = function(transformed)\n identifiers.append(identifier)\n\n if (time.time() - start) > cutoff:\n print('bailing for time:{} count:{}'.format(time.time() - start,\n i))\n return np.array(identifiers)\n\n return np.array(identifiers)\n\n\ndef get_meshes(path='../../../models', cutoff=None):\n \"\"\"\n Get a list of single- body meshes to test identifiers on.\n\n Parameters\n ------------\n path: str, location of models\n cutoff: int, number of meshes to stop loading at\n\n Returns\n ------------\n meshes: (n,) list of Trimesh objects\n \"\"\"\n\n bodies = collections.deque()\n for file_name in os.listdir(path):\n try:\n mesh = trimesh.load(os.path.join(path, file_name))\n split = mesh.split()\n bodies.extend(split)\n if len(split) > 1:\n bodies.append(mesh)\n except BaseException:\n continue\n\n if cutoff is not None and len(bodies) > cutoff:\n return np.array(bodies)\n\n for i in range(100):\n cylinder = trimesh.creation.cylinder(\n radius=np.random.random() * 100,\n height=np.random.random() * 1000,\n sections=int(np.clip(np.random.random() * 720,\n 20,\n 720)))\n\n capsule = trimesh.creation.capsule(\n radius=np.random.random() * 100,\n height=np.random.random() * 1000,\n count=np.clip(np.random.random(2) * 720,\n 20,\n 720).astype(int))\n bodies.append(cylinder)\n bodies.append(capsule)\n for i in range(10):\n bodies.append(trimesh.creation.random_soup(\n int(np.clip(np.random.random() * 1000,\n 20,\n 1000))))\n bodies.append(trimesh.creation.icosphere())\n bodies.append(trimesh.creation.uv_sphere())\n bodies.append(trimesh.creation.icosahedron())\n\n return np.array(bodies)\n\n\ndef data_stats(data):\n data = np.asanyarray(data, dtype=np.float64)\n\n # mean identifier\n mean = data.mean(axis=0)\n # thresholdable percentile\n percent = np.abs(mean - np.abs(np.percentile(data, 99.999, axis=0)))\n\n return mean, percent\n\n\nif __name__ == '__main__':\n trimesh.util.attach_to_log(level=logging.INFO)\n\n meshes = get_meshes()\n\n print('loaded meshes!')\n\n # we want the whole thing to last less than\n hours = 5\n cutoff = (hours * 3600) / len(meshes)\n cutoff = 30\n result = []\n running = []\n\n for i, m in enumerate(meshes):\n\n # calculate permutations\n identifier = permutations(m,\n count=1000,\n cutoff=cutoff)\n # get data\n mean, percent = data_stats(identifier)\n\n nz = np.logical_and(np.abs(mean) > TOL_ZERO,\n np.abs(percent) > TOL_ZERO)\n\n r = np.ones_like(mean) * 10\n r[nz] = np.round(np.log10(np.abs(mean[nz] / percent[nz]))) - 1\n\n running.append(r)\n result.append({'mean': mean.tolist(),\n 'percent': percent.tolist()})\n\n print('\\n\\n{}/{}'.format(i, len(meshes) - 1))\n print('mean', mean)\n print('percent', percent)\n print('oom', mean / percent)\n print('curun', running[-1])\n print('minrun', np.min(running, axis=0))\n print('meanrun', np.mean(running, axis=0))\n\n # every loop dump everything\n # thrash- ey for sure but intermediate results are great\n name_out = 'res.json'\n with open(name_out, 'w') as file_obj:\n json.dump(result,\n file_obj,\n indent=4)\n"
] | [
[
"numpy.ones_like",
"numpy.random.choice",
"numpy.asanyarray",
"numpy.abs",
"numpy.random.random",
"numpy.min",
"numpy.array",
"numpy.linspace",
"numpy.percentile",
"numpy.mean"
]
] |
mingcv/Bread | [
"20dedfe2105b08ce8499b216c3c2bfd3699af17f"
] | [
"train_NFM.py"
] | [
"import argparse\nimport datetime\nimport os\nimport traceback\n\nimport kornia\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom tqdm.autonotebook import tqdm\n\nimport models\nfrom datasets import LowLightDataset, LowLightFDataset\nfrom models import PSNR, SSIM, CosineLR\nfrom tools import SingleSummaryWriter\nfrom tools import saver, mutils\n\n\ndef get_args():\n parser = argparse.ArgumentParser('Breaking Downing the Darkness')\n parser.add_argument('--num_gpus', type=int, default=1, help='number of gpus being used')\n parser.add_argument('--num_workers', type=int, default=12, help='num_workers of dataloader')\n parser.add_argument('--batch_size', type=int, default=1, help='The number of images per batch among all devices')\n parser.add_argument('-m1', '--model1', type=str, default='INet',\n help='Model1 Name')\n parser.add_argument('-m2', '--model2', type=str, default='NSNet',\n help='Model2 Name')\n parser.add_argument('-m3', '--model3', type=str, default='NSNet',\n help='Model3 Name')\n\n parser.add_argument('-m1w', '--model1_weight', type=str, default=None,\n help='Model Name')\n parser.add_argument('-m2w', '--model2_weight', type=str, default=None,\n help='Model Name')\n\n parser.add_argument('--comment', type=str, default='default',\n help='Project comment')\n parser.add_argument('--graph', action='store_true')\n parser.add_argument('--no_sche', action='store_true')\n parser.add_argument('--sampling', action='store_true')\n\n parser.add_argument('--slope', type=float, default=2.)\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--optim', type=str, default='adam', help='select optimizer for training, '\n 'suggest using \\'admaw\\' until the'\n ' very final stage then switch to \\'sgd\\'')\n parser.add_argument('--num_epochs', type=int, default=500)\n parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')\n parser.add_argument('--save_interval', type=int, default=500, help='Number of steps between saving')\n parser.add_argument('--data_path', type=str, default='./data/LOL',\n help='the root folder of dataset')\n parser.add_argument('--log_path', type=str, default='logs/')\n parser.add_argument('--saved_path', type=str, default='logs/')\n args = parser.parse_args()\n return args\n\n\nclass ModelNSNet(nn.Module):\n def __init__(self, model1, model2, model3):\n super().__init__()\n self.texture_loss = models.SSIML1Loss(channels=1)\n self.model_ianet = model1(in_channels=1, out_channels=1)\n self.model_nsnet = model2(in_channels=2, out_channels=1)\n self.model_fusenet = model3(in_channels=3, out_channels=1)\n\n assert opt.model1_weight is not None\n self.load_weight(self.model_ianet, opt.model1_weight)\n self.load_weight(self.model_nsnet, opt.model2_weight)\n self.model_ianet.eval()\n self.model_nsnet.eval()\n self.eps = 1e-2\n\n def load_weight(self, model, weight_pth):\n state_dict = torch.load(weight_pth)\n ret = model.load_state_dict(state_dict, strict=True)\n print(ret)\n\n def noise_syn(self, illumi, strength):\n return torch.exp(-illumi) * strength\n\n def forward(self, image, image_gt, training=True):\n texture_nss = []\n with torch.no_grad():\n if training:\n image = image.squeeze(0)\n image_gt = image_gt.repeat(8, 1, 1, 1)\n\n texture_in, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image), 1, dim=1)\n texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image_gt), 1, dim=1)\n\n texture_in_down = F.interpolate(texture_in, scale_factor=0.5, mode='bicubic', align_corners=True)\n illumi = self.model_ianet(texture_in_down)\n illumi = F.interpolate(illumi, scale_factor=2, mode='bicubic', align_corners=True)\n noisy_gt = texture_in / torch.clamp_min(illumi, self.eps)\n\n for strength in [0, 0.05, 0.1]:\n illumi = torch.clamp(illumi, 0., 1.)\n attention = self.noise_syn(illumi, strength=strength)\n texture_res = self.model_nsnet(torch.cat([noisy_gt, attention], dim=1))\n texture_ns = noisy_gt + texture_res\n texture_nss.append(texture_ns)\n\n texture_nss = torch.cat(texture_nss, dim=1).detach()\n\n texture_fuse = self.model_fusenet(texture_nss)\n restor_loss = self.texture_loss(texture_fuse, texture_gt)\n psnr = PSNR(texture_fuse, texture_gt)\n ssim = SSIM(texture_fuse, texture_gt).item()\n return noisy_gt, texture_nss, texture_fuse, texture_res, illumi, restor_loss, psnr, ssim\n\n\ndef train(opt):\n if torch.cuda.is_available():\n torch.cuda.manual_seed(42)\n else:\n torch.manual_seed(42)\n\n timestamp = mutils.get_formatted_time()\n opt.saved_path = opt.saved_path + f'/{opt.comment}/{timestamp}'\n opt.log_path = opt.log_path + f'/{opt.comment}/{timestamp}/tensorboard/'\n os.makedirs(opt.log_path, exist_ok=True)\n os.makedirs(opt.saved_path, exist_ok=True)\n\n training_params = {'batch_size': opt.batch_size,\n 'shuffle': True,\n 'drop_last': True,\n 'num_workers': opt.num_workers}\n\n val_params = {'batch_size': 1,\n 'shuffle': False,\n 'drop_last': True,\n 'num_workers': opt.num_workers}\n\n training_set = LowLightFDataset(os.path.join(opt.data_path, 'train'), image_split='images_aug')\n training_generator = DataLoader(training_set, **training_params)\n\n val_set = LowLightDataset(os.path.join(opt.data_path, 'eval'))\n val_generator = DataLoader(val_set, **val_params)\n\n model1 = getattr(models, opt.model1)\n model2 = getattr(models, opt.model2)\n model3 = getattr(models, opt.model3)\n writer = SingleSummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")}/')\n\n model = ModelNSNet(model1, model2, model3)\n print(model)\n\n if opt.num_gpus > 0:\n model = model.cuda()\n if opt.num_gpus > 1:\n model = nn.DataParallel(model)\n\n if opt.optim == 'adam':\n optimizer = torch.optim.Adam(model.model_fusenet.parameters(), opt.lr)\n else:\n optimizer = torch.optim.SGD(model.model_fusenet.parameters(), opt.lr, momentum=0.9, nesterov=True)\n\n scheduler = CosineLR(optimizer, opt.lr, opt.num_epochs)\n epoch = 0\n step = 0\n model.model_fusenet.train()\n\n num_iter_per_epoch = len(training_generator)\n\n try:\n for epoch in range(opt.num_epochs):\n last_epoch = step // num_iter_per_epoch\n if epoch < last_epoch:\n continue\n\n epoch_loss = []\n progress_bar = tqdm(training_generator)\n\n saver.base_url = os.path.join(opt.saved_path, 'results', '%03d' % epoch)\n if not opt.sampling:\n for iter, (data, target, name) in enumerate(progress_bar):\n if iter < step - last_epoch * num_iter_per_epoch:\n progress_bar.update()\n continue\n try:\n if opt.num_gpus == 1:\n data = data.cuda()\n target = target.cuda()\n\n optimizer.zero_grad()\n\n noisy_gt, texture_nss, texture_fuse, texture_res, \\\n illumi, restor_loss, psnr, ssim = model(data, target, training=True)\n\n loss = restor_loss\n loss.backward()\n optimizer.step()\n\n epoch_loss.append(float(loss))\n\n progress_bar.set_description(\n 'Step: {}. Epoch: {}/{}. Iteration: {}/{}. restor_loss: {:.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(\n step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, restor_loss.item(), psnr,\n ssim))\n writer.add_scalar('Loss/train', loss, step)\n writer.add_scalar('PSNR/train', psnr, step)\n writer.add_scalar('SSIM/train', ssim, step)\n\n # log learning_rate\n current_lr = optimizer.param_groups[0]['lr']\n writer.add_scalar('learning_rate', current_lr, step)\n\n step += 1\n\n except Exception as e:\n print('[Error]', traceback.format_exc())\n print(e)\n continue\n\n if not opt.no_sche:\n scheduler.step()\n\n if epoch % opt.val_interval == 0:\n model.model_fusenet.eval()\n loss_ls = []\n psnrs = []\n ssims = []\n\n for iter, (data, target, name) in enumerate(val_generator):\n with torch.no_grad():\n if opt.num_gpus == 1:\n data = data.cuda()\n target = target.cuda()\n\n noisy_gt, texture_nss, texture_fuse, texture_res, \\\n illumi, restor_loss, psnr, ssim = model(data, target, training=False)\n texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(target), 1, dim=1)\n\n saver.save_image(noisy_gt, name=os.path.splitext(name[0])[0] + '_in')\n saver.save_image(texture_nss.transpose(0, 1), name=os.path.splitext(name[0])[0] + '_ns')\n saver.save_image(texture_fuse, name=os.path.splitext(name[0])[0] + '_fuse')\n saver.save_image(texture_res, name=os.path.splitext(name[0])[0] + '_res')\n saver.save_image(illumi, name=os.path.splitext(name[0])[0] + '_ill')\n saver.save_image(target, name=os.path.splitext(name[0])[0] + '_gt')\n\n loss = restor_loss\n loss_ls.append(loss.item())\n psnrs.append(psnr)\n ssims.append(ssim)\n\n loss = np.mean(np.array(loss_ls))\n psnr = np.mean(np.array(psnrs))\n ssim = np.mean(np.array(ssims))\n\n print(\n 'Val. Epoch: {}/{}. Loss: {:1.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(\n epoch, opt.num_epochs, loss, psnr, ssim))\n writer.add_scalar('Loss/val', loss, step)\n writer.add_scalar('PSNR/val', psnr, step)\n writer.add_scalar('SSIM/val', ssim, step)\n\n save_checkpoint(model, f'{opt.model3}_{\"%03d\" % epoch}_{psnr}_{ssim}_{step}.pth')\n\n model.model_fusenet.train()\n if opt.sampling:\n exit(0)\n except KeyboardInterrupt:\n save_checkpoint(model, f'{opt.model3}_{epoch}_{step}_keyboardInterrupt.pth')\n writer.close()\n writer.close()\n\n\ndef save_checkpoint(model, name):\n if isinstance(model, nn.DataParallel):\n torch.save(model.module.model_fusenet.state_dict(), os.path.join(opt.saved_path, name))\n else:\n torch.save(model.model_fdnet.state_dict(), os.path.join(opt.saved_path, name))\n\n\nif __name__ == '__main__':\n opt = get_args()\n train(opt)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"torch.clamp",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.no_grad",
"torch.exp",
"torch.cuda.is_available",
"numpy.array",
"torch.nn.DataParallel",
"torch.clamp_min",
"torch.cat",
"torch.nn.functional.interpolate"
]
] |
NunoEdgarGFlowHub/agents-1 | [
"c62215debda5bf5d89723f4112f1e3e2f063cd52"
] | [
"tf_agents/bandits/policies/policy_utilities.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for bandit policies.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.utils import common\n\n\nclass InfoFields(object):\n \"\"\"Strings which can be used in the policy info fields.\"\"\"\n # Mean of predicted rewards (per arm).\n PREDICTED_REWARDS_MEAN = 'predicted_rewards_mean'\n # Samples of predicted rewards (per arm).\n PREDICTED_REWARDS_SAMPLED = 'predicted_rewards_sampled'\n # Type of bandit policy (see enumerations in `BanditPolicyType`).\n BANDIT_POLICY_TYPE = 'bandit_policy_type'\n # Used to store the chosen action for a per-arm model.\n CHOSEN_ARM_FEATURES = 'chosen_arm_features'\n\n\nPolicyInfo = collections.namedtuple( # pylint: disable=invalid-name\n 'PolicyInfo',\n (policy_step.CommonFields.LOG_PROBABILITY,\n InfoFields.PREDICTED_REWARDS_MEAN,\n InfoFields.PREDICTED_REWARDS_SAMPLED,\n InfoFields.BANDIT_POLICY_TYPE))\n# Set default empty tuple for all fields.\nPolicyInfo.__new__.__defaults__ = ((),) * len(PolicyInfo._fields)\n\n\nPerArmPolicyInfo = collections.namedtuple( # pylint: disable=invalid-name\n 'PerArmPolicyInfo',\n (policy_step.CommonFields.LOG_PROBABILITY,\n InfoFields.PREDICTED_REWARDS_MEAN,\n InfoFields.PREDICTED_REWARDS_SAMPLED,\n InfoFields.BANDIT_POLICY_TYPE,\n InfoFields.CHOSEN_ARM_FEATURES))\n# Set default empty tuple for all fields.\nPerArmPolicyInfo.__new__.__defaults__ = ((),) * len(PerArmPolicyInfo._fields)\n\n\nclass BanditPolicyType(object):\n \"\"\"Enumeration of bandit policy types.\"\"\"\n # No bandit policy type specified.\n UNKNOWN = 0\n # Greedy decision made by bandit agent.\n GREEDY = 1\n # Random decision for exploration made by epsilon-greedy agent sampled from\n # uniform distribution over actions.\n UNIFORM = 2\n\n\ndef create_bandit_policy_type_tensor_spec(shape):\n \"\"\"Create tensor spec for bandit policy type.\"\"\"\n return tensor_spec.BoundedTensorSpec(\n shape=shape, dtype=tf.int32,\n minimum=BanditPolicyType.UNKNOWN, maximum=BanditPolicyType.UNIFORM)\n\n\[email protected]\ndef masked_argmax(input_tensor, mask, output_type=tf.int32):\n \"\"\"Computes the argmax where the allowed elements are given by a mask.\n\n Args:\n input_tensor: Rank-2 Tensor of floats.\n mask: 0-1 valued Tensor of the same shape as input.\n output_type: Integer type of the output.\n\n Returns:\n A Tensor of rank 1 and type `output_type`, with the masked argmax of every\n row of `input_tensor`.\n \"\"\"\n input_tensor.shape.assert_is_compatible_with(mask.shape)\n neg_inf = tf.constant(-float('Inf'), input_tensor.dtype)\n tf.compat.v1.assert_equal(\n tf.reduce_max(mask, axis=1), tf.constant(1, dtype=mask.dtype))\n modified_input = tf.compat.v2.where(\n tf.cast(mask, tf.bool), input_tensor, neg_inf)\n return tf.argmax(modified_input, axis=-1, output_type=output_type)\n\n\ndef has_bandit_policy_type(info, check_for_tensor=False):\n \"\"\"Check if policy info has `bandit_policy_type` field/tensor.\"\"\"\n if info in ((), None):\n return False\n fields = getattr(info, '_fields', None)\n has_field = fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields\n if has_field and check_for_tensor:\n return isinstance(info.bandit_policy_type, tf.Tensor)\n else:\n return has_field\n\n\ndef set_bandit_policy_type(info, bandit_policy_type):\n \"\"\"Sets the InfoFields.BANDIT_POLICY_TYPE on info to bandit_policy_type.\n\n If policy `info` does not support InfoFields.BANDIT_POLICY_TYPE, this method\n returns `info` as-is (without any modification).\n\n Args:\n info: Policy info on which to set bandit policy type.\n bandit_policy_type: Tensor containing BanditPolicyType enums or TensorSpec\n from `create_bandit_policy_type_tensor_spec()`.\n\n Returns:\n Policy info with modified field (if possible).\n \"\"\"\n if info in ((), None):\n return PolicyInfo(bandit_policy_type=bandit_policy_type)\n fields = getattr(info, '_fields', None)\n if fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields:\n return info._replace(bandit_policy_type=bandit_policy_type)\n try:\n info[InfoFields.BANDIT_POLICY_TYPE] = bandit_policy_type\n except TypeError:\n pass\n return info\n\n\[email protected]\ndef bandit_policy_uniform_mask(values, mask):\n \"\"\"Set bandit policy type tensor to BanditPolicyType.UNIFORM based on mask.\n\n Set bandit policy type `values` to BanditPolicyType.UNIFORM; returns tensor\n where output[i] is BanditPolicyType.UNIFORM if mask[i] is True, otherwise it\n is left as values[i].\n\n Args:\n values: Tensor containing `BanditPolicyType` enumerations.\n mask: Tensor of the same shape as `values` with boolean flags indicating\n values to set to `BanditPolicyType.UNIFORM`.\n\n Returns:\n Tensor containing `BanditPolicyType` enumerations with masked values.\n \"\"\"\n tf.compat.v1.assert_equal(tf.shape(mask), tf.shape(values))\n return tf.where(\n mask, tf.fill(tf.shape(values), BanditPolicyType.UNIFORM), values)\n"
] | [
[
"tensorflow.shape",
"tensorflow.reduce_max",
"tensorflow.cast",
"tensorflow.argmax",
"tensorflow.constant"
]
] |
hchyun6086/auto-editor | [
"beef008763bcaad00b83d5b506f436e6edc8963e"
] | [
"auto_editor/audiotsm2/base/analysis_synthesis.py"
] | [
"'''audiotsm2/base/analysis_synthesis.py'''\n\nimport numpy as np\n\nfrom auto_editor.audiotsm2.utils import (windows, CBuffer, NormalizeBuffer)\nfrom .tsm import TSM\n\nEPSILON = 0.0001\n\n\nclass AnalysisSynthesisTSM(TSM):\n def __init__(self, converter, channels, frame_length, analysis_hop, synthesis_hop,\n analysis_window, synthesis_window, delta_before=0, delta_after=0):\n self._converter = converter\n\n self._channels = channels\n self._frame_length = frame_length\n self._analysis_hop = analysis_hop\n self._synthesis_hop = synthesis_hop\n\n self._analysis_window = analysis_window\n self._synthesis_window = synthesis_window\n\n self._delta_before = delta_before\n self._delta_after = delta_after\n\n # When the analysis hop is larger than the frame length, some samples\n # from the input need to be skipped. self._skip_input_samples tracks\n # how many samples should be skipped before reading the analysis frame.\n self._skip_input_samples = 0\n\n # This attribute is used to start the output signal in the middle of a\n # frame, which should be the peek of the window function\n self._skip_output_samples = 0\n\n # Compute the normalize window\n self._normalize_window = windows.product(self._analysis_window,\n self._synthesis_window)\n\n if(self._normalize_window is None):\n self._normalize_window = np.ones(self._frame_length)\n\n # Initialize the buffers\n delta = self._delta_before + self._delta_after\n self._in_buffer = CBuffer(self._channels, self._frame_length + delta)\n self._analysis_frame = np.empty(\n (self._channels, self._frame_length + delta))\n self._out_buffer = CBuffer(self._channels, self._frame_length)\n self._normalize_buffer = NormalizeBuffer(self._frame_length)\n\n self.clear()\n\n def clear(self):\n # Clear the buffers\n self._in_buffer.remove(self._in_buffer.length)\n self._out_buffer.remove(self._out_buffer.length)\n self._out_buffer.right_pad(self._frame_length)\n self._normalize_buffer.remove(self._normalize_buffer.length)\n\n # Left pad the input with half a frame of zeros, and ignore that half\n # frame in the output. This makes the output signal start in the middle\n # of a frame, which should be the peak of the window function.\n self._in_buffer.write(np.zeros(\n (self._channels, self._delta_before + self._frame_length // 2)))\n self._skip_output_samples = self._frame_length // 2\n\n self._converter.clear()\n\n def flush_to(self, writer):\n if(self._in_buffer.remaining_length == 0):\n raise RuntimeError(\n \"There is still data to process in the input buffer, flush_to method \"\n \"should only be called when write_to returns True.\"\n )\n\n n = self._out_buffer.write_to(writer)\n if(self._out_buffer.ready == 0):\n # The output buffer is empty\n self.clear()\n return n, True\n\n return n, False\n\n def get_max_output_length(self, input_length):\n input_length -= self._skip_input_samples\n if(input_length <= 0):\n return 0\n\n n_frames = input_length // self._analysis_hop + 1\n return n_frames * self._synthesis_hop\n\n def _process_frame(self):\n \"\"\"Read an analysis frame from the input buffer, process it, and write\n the result to the output buffer.\"\"\"\n # Generate the analysis frame and discard the input samples that will\n # not be needed anymore\n self._in_buffer.peek(self._analysis_frame)\n self._in_buffer.remove(self._analysis_hop)\n\n # Apply the analysis window\n windows.apply(self._analysis_frame, self._analysis_window)\n\n # Convert the analysis frame into a synthesis frame\n synthesis_frame = self._converter.convert_frame(self._analysis_frame)\n\n # Apply the synthesis window\n windows.apply(synthesis_frame, self._synthesis_window)\n\n # Overlap and add the synthesis frame in the output buffer\n self._out_buffer.add(synthesis_frame)\n\n # The overlap and add step changes the volume of the signal. The\n # normalize_buffer is used to keep track of \"how much of the input\n # signal was added\" to each part of the output buffer, allowing to\n # normalize it.\n self._normalize_buffer.add(self._normalize_window)\n\n # Normalize the samples that are ready to be written to the output\n normalize = self._normalize_buffer.to_array(end=self._synthesis_hop)\n normalize[normalize < EPSILON] = 1\n self._out_buffer.divide(normalize)\n self._out_buffer.set_ready(self._synthesis_hop)\n self._normalize_buffer.remove(self._synthesis_hop)\n\n def read_from(self, reader):\n n = reader.skip(self._skip_input_samples)\n self._skip_input_samples -= n\n if(self._skip_input_samples > 0):\n return n\n\n n += self._in_buffer.read_from(reader)\n\n if(self._in_buffer.remaining_length == 0 and\n self._out_buffer.remaining_length >= self._synthesis_hop):\n # The input buffer has enough data to process, and there is enough\n # space in the output buffer to store the output\n self._process_frame()\n\n # Skip output samples if necessary\n skipped = self._out_buffer.remove(self._skip_output_samples)\n self._out_buffer.right_pad(skipped)\n self._skip_output_samples -= skipped\n\n # Set the number of input samples to be skipped\n self._skip_input_samples = self._analysis_hop - self._frame_length\n if self._skip_input_samples < 0:\n self._skip_input_samples = 0\n\n return n\n\n def set_speed(self, speed):\n self._analysis_hop = int(self._synthesis_hop * speed)\n self._converter.set_analysis_hop(self._analysis_hop)\n\n def write_to(self, writer):\n n = self._out_buffer.write_to(writer)\n self._out_buffer.right_pad(n)\n\n if(self._in_buffer.remaining_length > 0 and self._out_buffer.ready == 0):\n # There is not enough data to process in the input buffer, and the\n # output buffer is empty\n return n, True\n\n return n, False\n"
] | [
[
"numpy.ones",
"numpy.empty",
"numpy.zeros"
]
] |
mastratton3/great_expectations | [
"151970d776c942bfc23cdd90c7ed00b57a34559d"
] | [
"great_expectations/dataset/pandas_dataset.py"
] | [
"from __future__ import division\n\nimport inspect\nimport json\nimport re\nfrom datetime import datetime\nfrom functools import wraps\nimport jsonschema\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom dateutil.parser import parse\nfrom scipy import stats\nfrom six import PY3, integer_types, string_types\nfrom numbers import Number\n\nfrom .dataset import Dataset\nfrom great_expectations.data_asset.util import DocInherit, parse_result_format\nfrom great_expectations.dataset.util import \\\n is_valid_partition_object, is_valid_categorical_partition_object, is_valid_continuous_partition_object, \\\n _scipy_distribution_positional_args_from_dict, validate_distribution_parameters\n\n\nclass MetaPandasDataset(Dataset):\n \"\"\"MetaPandasDataset is a thin layer between Dataset and PandasDataset.\n\n This two-layer inheritance is required to make @classmethod decorators work.\n\n Practically speaking, that means that MetaPandasDataset implements \\\n expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \\\n and PandasDataset implements the expectation methods themselves.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MetaPandasDataset, self).__init__(*args, **kwargs)\n\n @classmethod\n def column_map_expectation(cls, func):\n \"\"\"Constructs an expectation using column-map semantics.\n\n\n The MetaPandasDataset implementation replaces the \"column\" parameter supplied by the user with a pandas Series\n object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation\n logic while preserving the standard Dataset signature and expected behavior.\n\n See :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>` \\\n for full documentation of this function.\n \"\"\"\n if PY3:\n argspec = inspect.getfullargspec(func)[0][1:]\n else:\n argspec = inspect.getargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(self, column, mostly=None, result_format=None, *args, **kwargs):\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n result_format = parse_result_format(result_format)\n\n # FIXME temporary fix for missing/ignored value\n ignore_values = [None, np.nan]\n if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:\n ignore_values = []\n # Counting the number of unexpected values can be expensive when there is a large\n # number of np.nan values.\n # This only happens on expect_column_values_to_not_be_null expectations.\n # Since there is no reason to look for most common unexpected values in this case,\n # we will instruct the result formatting method to skip this step.\n result_format['partial_unexpected_count'] = 0 \n\n series = self[column]\n\n # FIXME rename to mapped_ignore_values?\n if len(ignore_values) == 0:\n boolean_mapped_null_values = np.array(\n [False for value in series])\n else:\n boolean_mapped_null_values = np.array([True if (value in ignore_values) or (pd.isnull(value)) else False\n for value in series])\n\n element_count = int(len(series))\n\n # FIXME rename nonnull to non_ignored?\n nonnull_values = series[boolean_mapped_null_values == False]\n nonnull_count = int((boolean_mapped_null_values == False).sum())\n\n boolean_mapped_success_values = func(\n self, nonnull_values, *args, **kwargs)\n success_count = np.count_nonzero(boolean_mapped_success_values)\n\n unexpected_list = list(\n nonnull_values[boolean_mapped_success_values == False])\n unexpected_index_list = list(\n nonnull_values[boolean_mapped_success_values == False].index)\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly)\n\n return_obj = self._format_map_output(\n result_format, success,\n element_count, nonnull_count,\n len(unexpected_list),\n unexpected_list, unexpected_index_list\n )\n\n # FIXME Temp fix for result format\n if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:\n del return_obj['result']['unexpected_percent_nonmissing']\n try:\n del return_obj['result']['partial_unexpected_counts']\n del return_obj['result']['partial_unexpected_list']\n except KeyError:\n pass\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n\n return inner_wrapper\n\n @classmethod\n def column_pair_map_expectation(cls, func):\n \"\"\"\n The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating\n truthiness of some condition on a per row basis across a pair of columns.\n \"\"\"\n if PY3:\n argspec = inspect.getfullargspec(func)[0][1:]\n else:\n argspec = inspect.getargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(self, column_A, column_B, mostly=None, ignore_row_if=\"both_values_are_missing\", result_format=None, *args, **kwargs):\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n series_A = self[column_A]\n series_B = self[column_B]\n\n if ignore_row_if == \"both_values_are_missing\":\n boolean_mapped_null_values = series_A.isnull() & series_B.isnull()\n elif ignore_row_if == \"either_value_is_missing\":\n boolean_mapped_null_values = series_A.isnull() | series_B.isnull()\n elif ignore_row_if == \"never\":\n boolean_mapped_null_values = series_A.map(lambda x: False)\n else:\n raise ValueError(\n \"Unknown value of ignore_row_if: %s\", (ignore_row_if,))\n\n assert len(series_A) == len(\n series_B), \"Series A and B must be the same length\"\n\n # This next bit only works if series_A and _B are the same length\n element_count = int(len(series_A))\n nonnull_count = (boolean_mapped_null_values == False).sum()\n\n nonnull_values_A = series_A[boolean_mapped_null_values == False]\n nonnull_values_B = series_B[boolean_mapped_null_values == False]\n nonnull_values = [value_pair for value_pair in zip(\n list(nonnull_values_A),\n list(nonnull_values_B)\n )]\n\n boolean_mapped_success_values = func(\n self, nonnull_values_A, nonnull_values_B, *args, **kwargs)\n success_count = boolean_mapped_success_values.sum()\n\n unexpected_list = [value_pair for value_pair in zip(\n list(series_A[(boolean_mapped_success_values == False) & (\n boolean_mapped_null_values == False)]),\n list(series_B[(boolean_mapped_success_values == False) & (\n boolean_mapped_null_values == False)])\n )]\n unexpected_index_list = list(series_A[(boolean_mapped_success_values == False) & (\n boolean_mapped_null_values == False)].index)\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly)\n\n return_obj = self._format_map_output(\n result_format, success,\n element_count, nonnull_count,\n len(unexpected_list),\n unexpected_list, unexpected_index_list\n )\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n return inner_wrapper\n\n @classmethod\n def multicolumn_map_expectation(cls, func):\n \"\"\"\n The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of\n evaluating truthiness of some condition on a per row basis across a set of columns.\n \"\"\"\n if PY3:\n argspec = inspect.getfullargspec(func)[0][1:]\n else:\n argspec = inspect.getargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(self, column_list, mostly=None, ignore_row_if=\"all_values_are_missing\",\n result_format=None, *args, **kwargs):\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n test_df = self[column_list]\n\n if ignore_row_if == \"all_values_are_missing\":\n boolean_mapped_skip_values = test_df.isnull().all(axis=1)\n elif ignore_row_if == \"any_value_is_missing\":\n boolean_mapped_skip_values = test_df.isnull().any(axis=1)\n elif ignore_row_if == \"never\":\n boolean_mapped_skip_values = pd.Series([False] * len(test_df))\n else:\n raise ValueError(\n \"Unknown value of ignore_row_if: %s\", (ignore_row_if,))\n\n boolean_mapped_success_values = func(\n self, test_df[boolean_mapped_skip_values == False], *args, **kwargs)\n success_count = boolean_mapped_success_values.sum()\n nonnull_count = (~boolean_mapped_skip_values).sum()\n element_count = len(test_df)\n\n unexpected_list = test_df[(boolean_mapped_skip_values == False) & (boolean_mapped_success_values == False)]\n unexpected_index_list = list(unexpected_list.index)\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly)\n\n return_obj = self._format_map_output(\n result_format, success,\n element_count, nonnull_count,\n len(unexpected_list),\n unexpected_list.to_dict(orient='records'), unexpected_index_list\n )\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n return inner_wrapper\n\n\nclass PandasDataset(MetaPandasDataset, pd.DataFrame):\n \"\"\"\n PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.\n\n For the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`\n\n Notes:\n 1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \\\n data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \\\n property on the original data frame.\n 2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection\n is performed by default).\n \"\"\"\n\n # this is necessary to subclass pandas in a proper way.\n # NOTE: specifying added properties in this way means that they will NOT be carried over when\n # the dataframe is manipulated, which we might want. To specify properties that are carried over\n # to manipulation results, we would just use `_metadata = ['row_count', ...]` here. The most likely\n # case is that we want the former, but also want to re-initialize these values to None so we don't\n # get an attribute error when trying to access them (I think this could be done in __finalize__?)\n _internal_names = pd.DataFrame._internal_names + [\n 'caching',\n ]\n _internal_names_set = set(_internal_names)\n\n # We may want to expand or alter support for subclassing dataframes in the future:\n # See http://pandas.pydata.org/pandas-docs/stable/extending.html#extending-subclassing-pandas\n\n @property\n def _constructor(self):\n return self.__class__\n\n def __finalize__(self, other, method=None, **kwargs):\n if isinstance(other, PandasDataset):\n self._initialize_expectations(other.get_expectations_config(\n discard_failed_expectations=False,\n discard_result_format_kwargs=False,\n discard_include_configs_kwargs=False,\n discard_catch_exceptions_kwargs=False))\n # If other was coerced to be a PandasDataset (e.g. via _constructor call during self.copy() operation)\n # then it may not have discard_subset_failing_expectations set. Default to self value\n self.discard_subset_failing_expectations = getattr(other, \"discard_subset_failing_expectations\",\n self.discard_subset_failing_expectations)\n if self.discard_subset_failing_expectations:\n self.discard_failing_expectations()\n super(PandasDataset, self).__finalize__(other, method, **kwargs)\n return self\n\n def __init__(self, *args, **kwargs):\n super(PandasDataset, self).__init__(*args, **kwargs)\n self.discard_subset_failing_expectations = kwargs.get(\n 'discard_subset_failing_expectations', False)\n\n def get_row_count(self):\n return self.shape[0]\n\n def get_table_columns(self):\n return list(self.columns)\n\n def get_column_sum(self, column):\n return self[column].sum()\n\n def get_column_max(self, column, parse_strings_as_datetimes=False):\n temp_column = self[column].dropna()\n if parse_strings_as_datetimes:\n temp_column = temp_column.map(parse)\n return temp_column.max()\n\n def get_column_min(self, column, parse_strings_as_datetimes=False):\n temp_column = self[column].dropna()\n if parse_strings_as_datetimes:\n temp_column = temp_column.map(parse)\n return temp_column.min()\n\n def get_column_mean(self, column):\n return self[column].mean()\n\n def get_column_nonnull_count(self, column):\n series = self[column]\n null_indexes = series.isnull()\n nonnull_values = series[null_indexes == False]\n return len(nonnull_values)\n\n def get_column_value_counts(self, column):\n return self[column].value_counts()\n\n def get_column_unique_count(self, column):\n return self.get_column_value_counts(column).shape[0]\n\n def get_column_modes(self, column):\n return list(self[column].mode().values)\n\n def get_column_median(self, column):\n return self[column].median()\n\n def get_column_stdev(self, column):\n return self[column].std()\n\n def get_column_hist(self, column, bins):\n hist, bin_edges = np.histogram(self[column], bins, density=False)\n return list(hist)\n\n def get_column_count_in_range(self, column, min_val=None, max_val=None, min_strictly=False, max_strictly=True):\n # TODO this logic could probably go in the non-underscore version if we want to cache\n if min_val is None and max_val is None:\n raise ValueError('Must specify either min or max value')\n if min_val is not None and max_val is not None and min_val > max_val:\n raise ValueError('Min value must be <= to max value')\n\n result = self[column]\n if min_val is not None:\n if min_strictly:\n result = result[result > min_val]\n else:\n result = result[result >= min_val]\n if max_val is not None:\n if max_strictly:\n result = result[result < max_val]\n else:\n result = result[result <= max_val]\n return len(result)\n\n\n ### Expectation methods ###\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_unique(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n return ~column.duplicated(keep=False)\n\n # @Dataset.expectation(['column', 'mostly', 'result_format'])\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_be_null(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None, include_nulls=True):\n\n return ~column.isnull()\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_null(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n return column.isnull()\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_of_type(self, column, type_,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n # Target Datasource {numpy, python} was removed in favor of a simpler type mapping\n type_map = {\n \"null\": [type(None), np.nan],\n \"boolean\": [bool, np.bool_],\n \"int\": [int, np.int64] + list(integer_types),\n \"long\": [int, np.longdouble] + list(integer_types),\n \"float\": [float, np.float_],\n \"double\": [float, np.longdouble],\n \"bytes\": [bytes, np.bytes_],\n \"string\": [string_types, np.string_]\n }\n\n target_type = type_map[type_]\n\n return column.map(lambda x: isinstance(x, tuple(target_type)))\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_in_type_list(self, column, type_list,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n # Target Datasource {numpy, python} was removed in favor of a simpler type mapping\n type_map = {\n \"null\": [type(None), np.nan],\n \"boolean\": [bool, np.bool_],\n \"int\": [int, np.int64] + list(integer_types),\n \"long\": [int, np.longdouble] + list(integer_types),\n \"float\": [float, np.float_],\n \"double\": [float, np.longdouble],\n \"bytes\": [bytes, np.bytes_],\n \"string\": [string_types, np.string_]\n }\n\n # Build one type list with each specified type list from type_map\n target_type_list = list()\n for type_ in type_list:\n target_type_list += type_map[type_]\n\n return column.map(lambda x: isinstance(x, tuple(target_type_list)))\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_in_set(self, column, value_set,\n mostly=None,\n parse_strings_as_datetimes=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n parsed_value_set = self._parse_value_set(value_set)\n else:\n parsed_value_set = value_set\n\n return column.isin(parsed_value_set)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_be_in_set(self, column, value_set,\n mostly=None,\n parse_strings_as_datetimes=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n parsed_value_set = self._parse_value_set(value_set)\n else:\n parsed_value_set = value_set\n\n return ~column.isin(parsed_value_set)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_between(self,\n column,\n min_value=None, max_value=None,\n parse_strings_as_datetimes=None,\n output_strftime_format=None,\n allow_cross_type_comparisons=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n if min_value is None and max_value is None:\n raise ValueError(\"min_value and max_value cannot both be None\")\n\n if parse_strings_as_datetimes:\n if min_value:\n min_value = parse(min_value)\n\n if max_value:\n max_value = parse(max_value)\n\n temp_column = column.map(parse)\n\n else:\n temp_column = column\n\n if min_value is not None and max_value is not None and min_value > max_value:\n raise ValueError(\"min_value cannot be greater than max_value\")\n\n def is_between(val):\n # TODO Might be worth explicitly defining comparisons between types (for example, between strings and ints).\n # Ensure types can be compared since some types in Python 3 cannot be logically compared.\n # print type(val), type(min_value), type(max_value), val, min_value, max_value\n\n if type(val) == None:\n return False\n else:\n if min_value is not None and max_value is not None:\n if allow_cross_type_comparisons:\n try:\n return (min_value <= val) and (val <= max_value)\n except TypeError:\n return False\n\n else:\n if (isinstance(val, string_types) != isinstance(min_value, string_types)) or (isinstance(val, string_types) != isinstance(max_value, string_types)):\n raise TypeError(\n \"Column values, min_value, and max_value must either be None or of the same type.\")\n\n return (min_value <= val) and (val <= max_value)\n\n elif min_value is None and max_value is not None:\n if allow_cross_type_comparisons:\n try:\n return val <= max_value\n except TypeError:\n return False\n\n else:\n if isinstance(val, string_types) != isinstance(max_value, string_types):\n raise TypeError(\n \"Column values, min_value, and max_value must either be None or of the same type.\")\n\n return val <= max_value\n\n elif min_value is not None and max_value is None:\n if allow_cross_type_comparisons:\n try:\n return min_value <= val\n except TypeError:\n return False\n\n else:\n if isinstance(val, string_types) != isinstance(min_value, string_types):\n raise TypeError(\n \"Column values, min_value, and max_value must either be None or of the same type.\")\n\n return min_value <= val\n\n else:\n return False\n\n return temp_column.map(is_between)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_increasing(self, column, strictly=None, parse_strings_as_datetimes=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n temp_column = column.map(parse)\n\n col_diff = temp_column.diff()\n\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[0] = pd.Timedelta(1)\n\n if strictly:\n return col_diff > pd.Timedelta(0)\n else:\n return col_diff >= pd.Timedelta(0)\n\n else:\n col_diff = column.diff()\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[col_diff.isnull()] = 1\n\n if strictly:\n return col_diff > 0\n else:\n return col_diff >= 0\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_decreasing(self, column, strictly=None, parse_strings_as_datetimes=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n temp_column = column.map(parse)\n\n col_diff = temp_column.diff()\n\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[0] = pd.Timedelta(-1)\n\n if strictly:\n return col_diff < pd.Timedelta(0)\n else:\n return col_diff <= pd.Timedelta(0)\n\n else:\n col_diff = column.diff()\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[col_diff.isnull()] = -1\n\n if strictly:\n return col_diff < 0\n else:\n return col_diff <= 0\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_value_lengths_to_be_between(self, column, min_value=None, max_value=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n if min_value is None and max_value is None:\n raise ValueError(\"min_value and max_value cannot both be None\")\n\n # Assert that min_value and max_value are integers\n try:\n if min_value is not None and not float(min_value).is_integer():\n raise ValueError(\"min_value and max_value must be integers\")\n\n if max_value is not None and not float(max_value).is_integer():\n raise ValueError(\"min_value and max_value must be integers\")\n\n except ValueError:\n raise ValueError(\"min_value and max_value must be integers\")\n\n column_lengths = column.astype(str).str.len()\n\n if min_value is not None and max_value is not None:\n return column_lengths.between(min_value, max_value)\n\n elif min_value is None and max_value is not None:\n return column_lengths <= max_value\n\n elif min_value is not None and max_value is None:\n return column_lengths >= min_value\n\n else:\n return False\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_value_lengths_to_equal(self, column, value,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n return column.str.len() == value\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_regex(self, column, regex,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n return column.astype(str).str.contains(regex)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_match_regex(self, column, regex,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n return ~column.astype(str).str.contains(regex)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_regex_list(self, column, regex_list, match_on=\"any\",\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n regex_matches = []\n for regex in regex_list:\n regex_matches.append(column.astype(str).str.contains(regex))\n regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)\n\n if match_on == \"any\":\n return regex_match_df.any(axis='columns')\n elif match_on == \"all\":\n return regex_match_df.all(axis='columns')\n else:\n raise ValueError(\"match_on must be either 'any' or 'all'\")\n\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_match_regex_list(self, column, regex_list,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n regex_matches = []\n for regex in regex_list:\n regex_matches.append(column.astype(str).str.contains(regex))\n regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)\n\n return ~regex_match_df.any(axis='columns')\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_strftime_format(self, column, strftime_format,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None,\n meta=None):\n # Below is a simple validation that the provided format can both format and parse a datetime object.\n # %D is an example of a format that can format but not parse, e.g.\n try:\n datetime.strptime(datetime.strftime(\n datetime.now(), strftime_format), strftime_format)\n except ValueError as e:\n raise ValueError(\n \"Unable to use provided strftime_format. \" + e.message)\n\n def is_parseable_by_format(val):\n try:\n datetime.strptime(val, strftime_format)\n return True\n except TypeError as e:\n raise TypeError(\"Values passed to expect_column_values_to_match_strftime_format must be of type string.\\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.\")\n\n except ValueError as e:\n return False\n\n return column.map(is_parseable_by_format)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_dateutil_parseable(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n def is_parseable(val):\n try:\n if type(val) != str:\n raise TypeError(\n \"Values passed to expect_column_values_to_be_dateutil_parseable must be of type string.\\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.\")\n\n parse(val)\n return True\n\n except (ValueError, OverflowError):\n return False\n\n return column.map(is_parseable)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_json_parseable(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n def is_json(val):\n try:\n json.loads(val)\n return True\n except:\n return False\n\n return column.map(is_json)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_json_schema(self, column, json_schema,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n def matches_json_schema(val):\n try:\n val_json = json.loads(val)\n jsonschema.validate(val_json, json_schema)\n # jsonschema.validate raises an error if validation fails.\n # So if we make it this far, we know that the validation succeeded.\n return True\n except jsonschema.ValidationError:\n return False\n except jsonschema.SchemaError:\n raise\n except:\n raise\n\n return column.map(matches_json_schema)\n\n @DocInherit\n @MetaPandasDataset.column_aggregate_expectation\n def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(self, column, distribution,\n p_value=0.05, params=None,\n result_format=None,\n include_config=False,\n catch_exceptions=None, meta=None):\n column = self[column]\n\n if p_value <= 0 or p_value >= 1:\n raise ValueError(\"p_value must be between 0 and 1 exclusive\")\n\n # Validate params\n try:\n validate_distribution_parameters(\n distribution=distribution, params=params)\n except ValueError as e:\n raise e\n\n # Format arguments for scipy.kstest\n if (isinstance(params, dict)):\n positional_parameters = _scipy_distribution_positional_args_from_dict(\n distribution, params)\n else:\n positional_parameters = params\n\n # K-S Test\n ks_result = stats.kstest(column, distribution,\n args=positional_parameters)\n\n return {\n \"success\": ks_result[1] >= p_value,\n \"result\": {\n \"observed_value\": ks_result[1],\n \"details\": {\n \"expected_params\": positional_parameters,\n \"observed_ks_result\": ks_result\n }\n }\n }\n\n @DocInherit\n @MetaPandasDataset.column_aggregate_expectation\n def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(self, column, partition_object=None, p=0.05, bootstrap_samples=None, bootstrap_sample_size=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n column = self[column]\n\n if not is_valid_continuous_partition_object(partition_object):\n raise ValueError(\"Invalid continuous partition object.\")\n\n # TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object\n if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):\n raise ValueError(\"Partition endpoints must be finite.\")\n\n if \"tail_weights\" in partition_object and np.sum(partition_object[\"tail_weights\"]) > 0:\n raise ValueError(\"Partition cannot have tail weights -- endpoints must be finite.\")\n\n test_cdf = np.append(np.array([0]), np.cumsum(\n partition_object['weights']))\n\n def estimated_cdf(x):\n return np.interp(x, partition_object['bins'], test_cdf)\n\n if bootstrap_samples is None:\n bootstrap_samples = 1000\n\n if bootstrap_sample_size is None:\n # Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've\n # compressed via a partition.\n\n # Sampling too few elements will make the test insensitive to significant differences, especially\n # for nonoverlapping ranges.\n bootstrap_sample_size = len(partition_object['weights']) * 2\n\n results = [stats.kstest(\n np.random.choice(column, size=bootstrap_sample_size, replace=True),\n estimated_cdf)[1]\n for k in range(bootstrap_samples)]\n\n test_result = (1 + sum(x >= p for x in results)) / \\\n (bootstrap_samples + 1)\n\n hist, bin_edges = np.histogram(column, partition_object['bins'])\n below_partition = len(\n np.where(column < partition_object['bins'][0])[0])\n above_partition = len(\n np.where(column > partition_object['bins'][-1])[0])\n\n # Expand observed partition to report, if necessary\n if below_partition > 0 and above_partition > 0:\n observed_bins = [np.min(column)] + \\\n partition_object['bins'] + [np.max(column)]\n observed_weights = np.concatenate(\n ([below_partition], hist, [above_partition])) / len(column)\n elif below_partition > 0:\n observed_bins = [np.min(column)] + partition_object['bins']\n observed_weights = np.concatenate(\n ([below_partition], hist)) / len(column)\n elif above_partition > 0:\n observed_bins = partition_object['bins'] + [np.max(column)]\n observed_weights = np.concatenate(\n (hist, [above_partition])) / len(column)\n else:\n observed_bins = partition_object['bins']\n observed_weights = hist / len(column)\n\n observed_cdf_values = np.cumsum(observed_weights)\n\n return_obj = {\n \"success\": test_result > p,\n \"result\": {\n \"observed_value\": test_result,\n \"details\": {\n \"bootstrap_samples\": bootstrap_samples,\n \"bootstrap_sample_size\": bootstrap_sample_size,\n \"observed_partition\": {\n \"bins\": observed_bins,\n \"weights\": observed_weights.tolist()\n },\n \"expected_partition\": {\n \"bins\": partition_object['bins'],\n \"weights\": partition_object['weights']\n },\n \"observed_cdf\": {\n \"x\": observed_bins,\n \"cdf_values\": [0] + observed_cdf_values.tolist()\n },\n \"expected_cdf\": {\n \"x\": partition_object['bins'],\n \"cdf_values\": test_cdf.tolist()\n }\n }\n }\n }\n\n return return_obj\n\n\n @DocInherit\n @MetaPandasDataset.column_pair_map_expectation\n def expect_column_pair_values_to_be_equal(self,\n column_A,\n column_B,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n return column_A == column_B\n\n @DocInherit\n @MetaPandasDataset.column_pair_map_expectation\n def expect_column_pair_values_A_to_be_greater_than_B(self,\n column_A,\n column_B,\n or_equal=None,\n parse_strings_as_datetimes=None,\n allow_cross_type_comparisons=None,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n # FIXME\n if allow_cross_type_comparisons == True:\n raise NotImplementedError\n\n if parse_strings_as_datetimes:\n temp_column_A = column_A.map(parse)\n temp_column_B = column_B.map(parse)\n\n else:\n temp_column_A = column_A\n temp_column_B = column_B\n\n if or_equal == True:\n return temp_column_A >= temp_column_B\n else:\n return temp_column_A > temp_column_B\n\n @DocInherit\n @MetaPandasDataset.column_pair_map_expectation\n def expect_column_pair_values_to_be_in_set(self,\n column_A,\n column_B,\n value_pairs_set,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n temp_df = pd.DataFrame({\"A\": column_A, \"B\": column_B})\n value_pairs_set = {(x, y) for x, y in value_pairs_set}\n\n results = []\n for i, t in temp_df.iterrows():\n if pd.isnull(t[\"A\"]):\n a = None\n else:\n a = t[\"A\"]\n\n if pd.isnull(t[\"B\"]):\n b = None\n else:\n b = t[\"B\"]\n\n results.append((a, b) in value_pairs_set)\n\n return pd.Series(results, temp_df.index)\n\n @DocInherit\n @MetaPandasDataset.multicolumn_map_expectation\n def expect_multicolumn_values_to_be_unique(self,\n column_list,\n ignore_row_if=\"all_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n threshold = len(column_list.columns)\n # Do not dropna here, since we have separately dealt with na in decorator\n return column_list.nunique(dropna=False, axis=1) >= threshold\n"
] | [
[
"numpy.sum",
"pandas.Series",
"numpy.cumsum",
"numpy.interp",
"numpy.histogram",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.random.choice",
"numpy.count_nonzero",
"pandas.Timedelta",
"scipy.stats.kstest",
"numpy.max",
"pandas.concat",
"numpy.min",
"numpy.array",
"pandas.isnull",
"numpy.where"
]
] |
victimsnino/ReactivePlusPlus | [
"bb187cc52936bce7c1ef4899d7dbb9c970cef291"
] | [
"ci/create_graphs_for_benchmark_data.py"
] | [
"import plotly.offline as pyo\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport pandas as pd\nimport plotly.graph_objects as go\n\ndef rindex(lst, value):\n return len(lst) - lst[::-1].index(value) - 1\n \ndashboard = open(\"./gh-pages/benchmark.html\", 'w')\ndashboard.write(\"<html><head></head><body>\" + \"\\n\")\ndashboard.write(\"<p> TIP: Each graph can be zoomed in via selection of interested region! Double-click to return to original zoom mode </p>\")\nadd_js = True\n\ndef dump_plot(fig, name):\n global add_js\n global dashboard\n\n dashboard.write(f\"<details> <summary><b>{name}</b></summary>\")\n dashboard.write(pyo.plot(fig, include_plotlyjs=add_js, output_type='div'))\n dashboard.write(\"</details><br>\")\n\n add_js = False\n\n\nresults = pd.read_csv(\"./gh-pages/results.csv\", index_col=\"id\")\nall_commits = list(results[\"commit\"].unique())\ntake_last=20\n# duplicate last row to fix issue with splines\nresults = pd.concat([results, results[results['commit'] == results[\"commit\"].unique()[-1]]]).reset_index(drop=True)\n\ncolormap = px.colors.qualitative.Plotly\nfor platform, data in results.groupby(\"platform\", sort=False, as_index=False):\n dashboard.write(f\"<h2>{platform} </h2>\")\n for name, bench_data in data.groupby(\"benchmark_name\", sort=False, as_index=False):\n fig = go.Figure()\n for i, (test_case, test_cases_data) in enumerate(bench_data.groupby(\"test_case\", sort=False, as_index=False)):\n for source, source_data in test_cases_data.groupby(\"source\", sort=False, as_index=False):\n commit_indexes=[all_commits.index(c) for c in source_data[\"commit\"]]\n fig.add_trace(go.Scatter(x=commit_indexes,\n y=source_data[\"value\"],\n line_shape='spline',\n mode='lines+markers',\n marker_color=colormap[i],\n line_color=colormap[i],\n line_dash='solid' if source == 'rpp' else 'dot',\n name=f'{test_case}, {source}'))\n if source == 'rpp':\n fig.add_trace(go.Scatter(\n x=commit_indexes + commit_indexes[::-1],\n y=pd.concat([source_data['lowerBound'],\n source_data['upperBound'][::-1]]),\n fill='toself',\n fillcolor=colormap[i],\n line_color=colormap[i],\n name=f'{test_case}, {source}',\n showlegend=False,\n mode=\"lines\",\n opacity=0.3,\n line_shape='spline',\n hoverinfo='skip'\n ))\n\n min_val = bench_data.groupby(\"commit\", sort=False)[\"value\"].agg([\"min\"])[-take_last:].min().values[0]\n max_val = bench_data.groupby(\"commit\", sort=False)[\"value\"].agg([\"max\"])[-take_last:].max().values[0]\n diff = (max_val - min_val) * 0.05\n min_val -= diff\n max_val += diff\n fig.update_layout(\n hovermode=\"x unified\",\n title_x=0.5,\n title=name,\n xaxis_title=\"Commit\",\n yaxis_title=\"ns/iter\",\n legend_title=\"Legend Title\",\n xaxis=dict(\n tickmode='array',\n tickvals=list(range(0, len(all_commits))),\n ticktext=all_commits,\n tickangle=-35,\n rangeslider=dict(visible=True)\n ),\n yaxis=dict(\n # autorange=True,\n fixedrange=False\n ))\n\n fig['layout']['xaxis'].update(range=[len(all_commits)-take_last, len(all_commits)])\n fig['layout']['yaxis'].update(range=[min_val, max_val])\n \n dump_plot(fig, name)\n\n\ndashboard.write(\"</body></html>\" + \"\\n\")\ndashboard.close()\n"
] | [
[
"pandas.read_csv",
"pandas.concat"
]
] |
broadinstitute/tissue_purifier | [
"989ce9d58bba99a3f1c49743eed22dcc64e5f159"
] | [
"src/tissue_purifier/utils/nms_util.py"
] | [
"import torch\nimport numpy\nfrom typing import Union, List, Any\n\n\nclass NonMaxSuppression:\n \"\"\"\n Given a set of bounding box defined over possibly different tissue\n Use Intersection_over_Minimum criteria to filter out overlapping proposals.\n \"\"\"\n\n @staticmethod\n @torch.no_grad()\n def compute_nm_mask(score: Union[torch.Tensor, numpy.ndarray],\n ids: Union[torch.Tensor, numpy.ndarray, List[Any]],\n patches_xywh: Union[torch.Tensor, numpy.ndarray],\n iom_threshold: float) -> (torch.Tensor, torch.Tensor):\n \"\"\"\n Filter the proposals according to their score and their Intersection over Minimum.\n\n Args:\n score: score used to sort the proposals of shape (N)\n ids: vector or list of shape (N) with the (tissue) id.\n IoMIN is always zero between patches with different (tissue) ids.\n patches_xywh: coordinates with the proposals of shape (N, 4) where 4 stand for x,y,w,h.\n iom_threshold: threshold of Intersection over Minimum. If IoM is larger than this value the proposals\n will be suppressed during NMS. Only the proposal with larger score will survive.\n\n Returns:\n (nms_mask_n, iomin_nn) where nms_mask_n is a boolean tensor of shape (N) with True\n if the proposal survived NMS and iomin_nn with the value of the IoMIN among all possible pairs.\n \"\"\"\n\n def _to_numpy(_x):\n if isinstance(_x, torch.Tensor):\n return _x.detach().cpu().numpy()\n elif isinstance(_x, numpy.ndarray):\n return _x\n elif isinstance(_x, list):\n return numpy.array(_x)\n\n def _to_torch(_x):\n if isinstance(_x, torch.Tensor):\n return _x\n elif isinstance(_x, numpy.ndarray):\n return torch.from_numpy(_x)\n else:\n raise Exception(\"Expected a torch.tensor or a numpy.ndarray. Received {0}\".format(type(_x)))\n\n # the tissue ids can be a list of string. Therefore I can not convert to torch tensor directly.\n ids_numpy = _to_numpy(ids)\n assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4\n assert score.shape == ids_numpy.shape == patches_xywh[:, 0].shape\n\n # this is O(N^2) algorithm (all boxes compared to all other boxes) but it is very simple\n x, y, w, h = _to_torch(patches_xywh).unbind(dim=-1)\n overlap_measure_tmp_nn = NonMaxSuppression._compute_iomin(x=x, y=y, w=w, h=h)\n\n mask_same_id_nn_numpy = (ids_numpy == ids_numpy[:, None])\n mask_same_id_nn = _to_torch(mask_same_id_nn_numpy).to(device=overlap_measure_tmp_nn.device)\n overlap_measure_nn = overlap_measure_tmp_nn * mask_same_id_nn # if ids are different IoMIN = 0\n\n binarized_overlap_nn = (overlap_measure_nn > iom_threshold).float()\n nms_mask_n = NonMaxSuppression.perform_nms_selection(mask_overlap_nn=binarized_overlap_nn,\n score_n=score,\n possible_n=torch.ones_like(score).bool())\n return nms_mask_n, overlap_measure_nn\n\n @staticmethod\n def perform_nms_selection(mask_overlap_nn: torch.Tensor,\n score_n: torch.Tensor,\n possible_n: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given a set of n proposals and the (n x n) binarized mask which describes if two proposals are\n mutually exclusive it performs the greedy NMS in parallel (if possible).\n\n Args:\n mask_overlap_nn: Binarized overlap matrix with 1 if IoMIN > threshold and 0 otherwise, i.e 1 means that\n two proposals are incompatible, 0 means that they are compatible.\n score_n: Score of the proposal. Higher score proposal have precedence.\n possible_n: Vector with 1 if the proposal can be chosen and 0 otherwise.\n\n Note:\n The algorithm terminates when there are no more suitable proposals\n (because they have all been suppressed by higher scoring ones).\n\n Returns:\n mask_nms_n: A tensor with the same shape as :attr:'score_n'. The entries are 1 if that proposal\n has been selected (i.e. survived NMS) and 0 otherwise.\n \"\"\"\n # reshape\n score_1n = score_n.unsqueeze(-2)\n possible_1n = possible_n.unsqueeze(-2)\n idx_n1 = torch.arange(start=0, end=score_n.shape[-1], step=1, device=score_n.device).view(-1, 1).long()\n selected_n1 = torch.zeros_like(score_n).unsqueeze(dim=-1)\n\n # Greedy algorithm in a loop\n n_iter = 0\n while possible_1n.sum() > 0:\n n_iter += 1\n score_mask_nn = mask_overlap_nn * (score_1n * possible_1n)\n index_n1 = torch.max(score_mask_nn, keepdim=True, dim=-1)[1]\n selected_n1 += possible_1n.transpose(dim0=-1, dim1=-2) * (idx_n1 == index_n1)\n blocks_1n = torch.sum(mask_overlap_nn * selected_n1, keepdim=True, dim=-2)\n possible_1n *= (blocks_1n == 0)\n mask_selected_n = selected_n1.squeeze(dim=-1).bool()\n # print(\"DEBUG nms performed in \", n_iter)\n # print(\"DEBUG nms. Mask \", mask_selected_n.shape, mask_selected_n.sum(), mask_selected_n.dtype)\n return mask_selected_n\n\n @staticmethod\n def _unroll_and_compare(x_tmp: torch.Tensor, label: str) -> torch.Tensor:\n \"\"\" Given a vector of size: (*, n) creates an output of size (*, n, n)\n obtained by comparing all vector entries with all other vector entries\n The comparison is either: MIN,MAX \"\"\"\n if label == \"MAX\":\n y_tmp = torch.max(x_tmp.unsqueeze(dim=-1), x_tmp.unsqueeze(dim=-2))\n elif label == \"MIN\":\n y_tmp = torch.min(x_tmp.unsqueeze(dim=-1), x_tmp.unsqueeze(dim=-2))\n else:\n raise Exception(\"label is unknown. It is \", label)\n return y_tmp\n\n @staticmethod\n def _compute_iomin(\n x: torch.Tensor,\n y: torch.Tensor,\n w: torch.Tensor,\n h: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given x,y,w,h compute the Intersection over Min Area (IoMin) among all possible pairs.\n\n Args:\n x: torch.Tensor of shape: (n) with the x-coordinate\n y: torch.Tensor of shape: (n) with the y-coordinate\n w: torch.Tensor of shape: (n) with the width\n h: torch.Tensor of shape: (n) with the height\n\n Returns:\n A matrix of shape (n, n) with the IoMIN\n \"\"\"\n\n assert x.shape == y.shape == w.shape == h.shape\n\n # compute x1,x3,y1,y3 and area\n x1 = x\n x3 = x + w\n y1 = y\n y3 = y + h\n area = w * h\n\n min_area_nn = NonMaxSuppression._unroll_and_compare(area, \"MIN\")\n xi1_nn = NonMaxSuppression._unroll_and_compare(x1, \"MAX\")\n yi1_nn = NonMaxSuppression._unroll_and_compare(y1, \"MAX\")\n xi3_nn = NonMaxSuppression._unroll_and_compare(x3, \"MIN\")\n yi3_nn = NonMaxSuppression._unroll_and_compare(y3, \"MIN\")\n\n intersection_area_nn = torch.clamp(xi3_nn - xi1_nn, min=0) * torch.clamp(yi3_nn - yi1_nn, min=0)\n return intersection_area_nn / min_area_nn\n"
] | [
[
"torch.sum",
"torch.ones_like",
"torch.zeros_like",
"torch.no_grad",
"torch.from_numpy",
"torch.arange",
"torch.max",
"numpy.array",
"torch.clamp"
]
] |
Sergio0694/sepconv-gan | [
"82d908ed5c3dd55d7b2f8603450dac5108751a3b"
] | [
"training/networks/discriminators/vgg19.py"
] | [
"import tensorflow as tf\n\ndef get_network(x):\n '''Gets a discriminator network with the shared base of the VGG19 network.\n\n x(tf.Tensor) -- the VGG19 base network\n '''\n\n with tf.variable_scope('VGG19_top', None, [x], reuse=tf.AUTO_REUSE):\n conv1 = tf.layers.conv2d(x, 512, 3, activation=tf.nn.leaky_relu, padding='same')\n conv2 = tf.layers.conv2d(conv1, 512, 3, activation=tf.nn.leaky_relu, padding='same') + x\n pool = tf.layers.max_pooling2d(conv2, 3, 2, padding='valid')\n flat = tf.reshape(pool, [pool.shape[0], -1])\n d1 = tf.layers.dense(flat, 2048, activation=tf.nn.leaky_relu)\n dropout1 = tf.layers.dropout(d1, 0.8)\n d2 = tf.layers.dense(dropout1, 2048, activation=tf.nn.leaky_relu)\n dropout2 = tf.layers.dropout(d2, 0.8)\n d3 = tf.layers.dense(dropout2, 1)\n return d3"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.layers.max_pooling2d",
"tensorflow.layers.dense",
"tensorflow.layers.dropout"
]
] |
juanjosegarciaripoll/seeq | [
"3554550c3348fbaae398737cf4ae5510a34d6665"
] | [
"seeq/test/test_parametric_control.py"
] | [
"\nfrom seeq.control import *\n\nimport unittest\n\nclass TestQControl(unittest.TestCase):\n π = np.pi\n σz = np.array([[1., 0.],[0., -1.]])\n σx = np.array([[0., 1.],[1., 0.]])\n σy = np.array([[0., -1.j],[1.j, 0.]])\n ψ0 = np.eye(2)\n\n def test_nothing(self):\n \"\"\"For a qubit to remain the same, we do nothing.\"\"\"\n Ug = np.eye(2)\n H = lambda t, x, ψ: x * (self.σx @ ψ)\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, tol=1e-8, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], 0.0, delta=1e-7)\n\n def test_nothing2(self):\n \"\"\"For a qubit to remain the same, we cancel the frequency.\"\"\"\n Ug = np.eye(2)\n H = lambda t, x, ψ: x[0] * (self.σx @ ψ) + (1.0 - x[1]) * (self.σz @ ψ)\n r = parametric_control([1.0, 0.1], H, self.ψ0, T=1.0, Ug=Ug, tol=1e-8, method='expm')\n self.assertEqual(len(r.x), 2)\n self.assertAlmostEqual(r.x[0], 0.0, delta=1e-7)\n self.assertAlmostEqual(r.x[1], 1.0, delta=1e-7)\n\n def test_qubit_flip(self):\n \"\"\"Construct a π/2 pulse.\"\"\"\n Ug = -1j*self.σy\n H = lambda t, x, ψ: (x * self.σy) @ ψ\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, tol=1e-9, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], self.π/2., delta=1e-7)\n\n def test_nothing_derivative(self):\n \"\"\"For a qubit to remain the same, we do nothing (with gradients).\"\"\"\n Ug = np.eye(2)\n H = lambda t, x, ψ: x * (self.σx @ ψ)\n dH = lambda t, x, ψ: [self.σx @ ψ]\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, dH=dH, tol=1e-8, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], 0.0, delta=1e-7)\n\n def test_qubit_flip_derivative(self):\n \"\"\"Construct a π/2 pulse (with gradients).\"\"\"\n Ug = -1j*self.σy\n H = lambda t, x, ψ: (x * self.σy) @ ψ\n dH = lambda t, x, ψ: [self.σy @ ψ]\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, dH=dH, tol=1e-9, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], self.π/2., delta=1e-7)\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom seeq.states import *\n"
] | [
[
"numpy.array",
"numpy.eye"
]
] |
wangdingyan/hybridUQ | [
"c141a4bec0e716a12444f7e9ab0d7c975df93184"
] | [
"chemprop/utils/uclass.py"
] | [
"import numpy as np\n\n\nclass uncertainty:\n def __init__(self):\n pass\n\n\nclass uncertainties:\n\n def __init__(self):\n self.uncertainty_collection = {}\n self.uncertainty_count = {}\n self.norm_func = {'MinMax' : lambda x: (x-np.min(x)) / (np.max(x)-np.min(x)),\n 'Zscore' : lambda x: (x-np.mean(x)) / np.std(x),\n 'Simple' : lambda x: x,\n 'Argsort': lambda x: np.argsort(np.argsort(x))}\n\n def add_unc(self,\n uc_name,\n uc_value):\n\n if uc_name not in self.uncertainty_collection:\n self.uncertainty_count[uc_name] = 1\n self.uncertainty_collection[uc_name] = uc_value\n else:\n self.uncertainty_count[uc_name] += 1\n self.uncertainty_collection[uc_name] += uc_value\n\n def mean(self):\n for name in self.uncertainty_collection:\n self.uncertainty_collection[name] = self.uncertainty_collection[name] / self.uncertainty_count[name]\n\n def get_dict(self):\n return self.uncertainty_collection\n\n def get_names(self):\n return set(self.uncertainty_collection.keys())\n\n def simply_add(self, name_list):\n return np.sum([self.uncertainty_collection[name] for name in name_list], axis=0)\n\n def ensemble(self, weights=None, norm_methods=None):\n if norm_methods is None:\n norm_methods = self.norm_func\n\n output = {}\n for weight_name in weights:\n if np.sum(list(weights[weight_name].values())) == 0:\n weights[weight_name] = {k:1.0 for k in weights[weight_name].keys()}\n\n for norm_name in norm_methods:\n output[f'{weight_name}_{norm_name}'] = np.mean([self.norm_func[norm_name](self.uncertainty_collection[uc_name])\\\n *weights[weight_name].get(uc_name, 0.) for uc_name in self.get_names()], axis=0)\n return output\n\n\n"
] | [
[
"numpy.sum",
"numpy.argsort",
"numpy.max",
"numpy.min",
"numpy.std",
"numpy.mean"
]
] |
HSU-S21-CS232/final-th150 | [
"cf0004c7a9e72b08a0c1c9985c8c43e83a0fb650"
] | [
"EZ Queue/screen.py"
] | [
"import os\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom PIL import ImageGrab\n\n\nclass Screen(object):\n\n WINDOW_NAME = 'data'\n\n def __init__(self):\n self.image = None\n self.data = None\n self.event = None\n\n @property\n def inverted_image_size(self):\n return (self.image.size[1], self.image.size[0])\n\n def normalize_data(self):\n self.data = cv2.cvtColor(self.data, cv2.COLOR_RGB2BGR)\n\n def get_data(self):\n self.image = ImageGrab.grab()\n\n self.data = np.array(\n self.image.getdata(), dtype='uint8'\n ).reshape(self.inverted_image_size + (3,))\n\n self.normalize_data()\n\n def get_match(self, template):\n return cv2.matchTemplate(\n self.data, template.data, cv2.TM_CCOEFF_NORMED)\n\n def initialize_window(self):\n cv2.namedWindow(self.WINDOW_NAME, cv2.WINDOW_NORMAL)\n cv2.resizeWindow(self.WINDOW_NAME, 800, 600)\n\n def show_data(self, gray=False):\n cv2.imshow(self.WINDOW_NAME, self.data)\n cv2.waitKey(1)\n\n def draw_rectangle(self, point, size):\n cv2.rectangle(\n self.data, point,\n (point[0] + size[0], point[1] + size[1]),\n (0, 0, 255), 2)\n\n def capture(self):\n while True:\n self.get_data()\n location = self.check_template()\n if location:\n self.event.callback(location)\n break\n if (not location and self.event.timeout > 0\n and time.time() >= self.event.timeout):\n self.event.timeout_callback()\n break\n\n def assign_event(self, event):\n self.event = event\n self.capture()\n\n def check_template(self):\n match = self.get_match(self.event.template)\n locations = np.where(match >= self.event.template.threshold)\n\n try:\n location = next(zip(*locations[::-1]))\n except StopIteration:\n return\n\n return location if location else None\n"
] | [
[
"numpy.where"
]
] |
HeyLey/catboost | [
"f472aed90604ebe727537d9d4a37147985e10ec2"
] | [
"contrib/python/numpy/numpy/lib/tests/test_type_check.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy.compat import long\nfrom numpy.testing import (\n TestCase, assert_, assert_equal, assert_array_equal, run_module_suite\n )\nfrom numpy.lib.type_check import (\n common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,\n nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close\n )\n\n\ndef assert_all(x):\n assert_(np.all(x), x)\n\n\nclass TestCommonType(TestCase):\n def test_basic(self):\n ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)\n af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)\n af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)\n af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)\n acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)\n acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)\n assert_(common_type(ai32) == np.float64)\n assert_(common_type(af16) == np.float16)\n assert_(common_type(af32) == np.float32)\n assert_(common_type(af64) == np.float64)\n assert_(common_type(acs) == np.csingle)\n assert_(common_type(acd) == np.cdouble)\n\n\nclass TestMintypecode(TestCase):\n\n def test_default_1(self):\n for itype in '1bcsuwil':\n assert_equal(mintypecode(itype), 'd')\n assert_equal(mintypecode('f'), 'f')\n assert_equal(mintypecode('d'), 'd')\n assert_equal(mintypecode('F'), 'F')\n assert_equal(mintypecode('D'), 'D')\n\n def test_default_2(self):\n for itype in '1bcsuwil':\n assert_equal(mintypecode(itype+'f'), 'f')\n assert_equal(mintypecode(itype+'d'), 'd')\n assert_equal(mintypecode(itype+'F'), 'F')\n assert_equal(mintypecode(itype+'D'), 'D')\n assert_equal(mintypecode('ff'), 'f')\n assert_equal(mintypecode('fd'), 'd')\n assert_equal(mintypecode('fF'), 'F')\n assert_equal(mintypecode('fD'), 'D')\n assert_equal(mintypecode('df'), 'd')\n assert_equal(mintypecode('dd'), 'd')\n #assert_equal(mintypecode('dF',savespace=1),'F')\n assert_equal(mintypecode('dF'), 'D')\n assert_equal(mintypecode('dD'), 'D')\n assert_equal(mintypecode('Ff'), 'F')\n #assert_equal(mintypecode('Fd',savespace=1),'F')\n assert_equal(mintypecode('Fd'), 'D')\n assert_equal(mintypecode('FF'), 'F')\n assert_equal(mintypecode('FD'), 'D')\n assert_equal(mintypecode('Df'), 'D')\n assert_equal(mintypecode('Dd'), 'D')\n assert_equal(mintypecode('DF'), 'D')\n assert_equal(mintypecode('DD'), 'D')\n\n def test_default_3(self):\n assert_equal(mintypecode('fdF'), 'D')\n #assert_equal(mintypecode('fdF',savespace=1),'F')\n assert_equal(mintypecode('fdD'), 'D')\n assert_equal(mintypecode('fFD'), 'D')\n assert_equal(mintypecode('dFD'), 'D')\n\n assert_equal(mintypecode('ifd'), 'd')\n assert_equal(mintypecode('ifF'), 'F')\n assert_equal(mintypecode('ifD'), 'D')\n assert_equal(mintypecode('idF'), 'D')\n #assert_equal(mintypecode('idF',savespace=1),'F')\n assert_equal(mintypecode('idD'), 'D')\n\n\nclass TestIsscalar(TestCase):\n\n def test_basic(self):\n assert_(np.isscalar(3))\n assert_(not np.isscalar([3]))\n assert_(not np.isscalar((3,)))\n assert_(np.isscalar(3j))\n assert_(np.isscalar(long(10)))\n assert_(np.isscalar(4.0))\n\n\nclass TestReal(TestCase):\n\n def test_real(self):\n y = np.random.rand(10,)\n assert_array_equal(y, np.real(y))\n\n def test_cmplx(self):\n y = np.random.rand(10,)+1j*np.random.rand(10,)\n assert_array_equal(y.real, np.real(y))\n\n\nclass TestImag(TestCase):\n\n def test_real(self):\n y = np.random.rand(10,)\n assert_array_equal(0, np.imag(y))\n\n def test_cmplx(self):\n y = np.random.rand(10,)+1j*np.random.rand(10,)\n assert_array_equal(y.imag, np.imag(y))\n\n\nclass TestIscomplex(TestCase):\n\n def test_fail(self):\n z = np.array([-1, 0, 1])\n res = iscomplex(z)\n assert_(not np.sometrue(res, axis=0))\n\n def test_pass(self):\n z = np.array([-1j, 1, 0])\n res = iscomplex(z)\n assert_array_equal(res, [1, 0, 0])\n\n\nclass TestIsreal(TestCase):\n\n def test_pass(self):\n z = np.array([-1, 0, 1j])\n res = isreal(z)\n assert_array_equal(res, [1, 1, 0])\n\n def test_fail(self):\n z = np.array([-1j, 1, 0])\n res = isreal(z)\n assert_array_equal(res, [0, 1, 1])\n\n\nclass TestIscomplexobj(TestCase):\n\n def test_basic(self):\n z = np.array([-1, 0, 1])\n assert_(not iscomplexobj(z))\n z = np.array([-1j, 0, -1])\n assert_(iscomplexobj(z))\n\n\nclass TestIsrealobj(TestCase):\n def test_basic(self):\n z = np.array([-1, 0, 1])\n assert_(isrealobj(z))\n z = np.array([-1j, 0, -1])\n assert_(not isrealobj(z))\n\n\nclass TestIsnan(TestCase):\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isnan(z) == 0\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore'):\n assert_all(np.isnan(np.array((1.,))/0.) == 0)\n\n def test_neginf(self):\n with np.errstate(divide='ignore'):\n assert_all(np.isnan(np.array((-1.,))/0.) == 0)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isnan(np.array((0.,))/0.) == 1)\n\n def test_integer(self):\n assert_all(np.isnan(1) == 0)\n\n def test_complex(self):\n assert_all(np.isnan(1+1j) == 0)\n\n def test_complex1(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isnan(np.array(0+0j)/0.) == 1)\n\n\nclass TestIsfinite(TestCase):\n # Fixme, wrong place, isfinite now ufunc\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isfinite(z) == 1\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((1.,))/0.) == 0)\n\n def test_neginf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((-1.,))/0.) == 0)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((0.,))/0.) == 0)\n\n def test_integer(self):\n assert_all(np.isfinite(1) == 1)\n\n def test_complex(self):\n assert_all(np.isfinite(1+1j) == 1)\n\n def test_complex1(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array(1+1j)/0.) == 0)\n\n\nclass TestIsinf(TestCase):\n # Fixme, wrong place, isinf now ufunc\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isinf(z) == 0\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((1.,))/0.) == 1)\n\n def test_posinf_scalar(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array(1.,)/0.) == 1)\n\n def test_neginf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((-1.,))/0.) == 1)\n\n def test_neginf_scalar(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array(-1.)/0.) == 1)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((0.,))/0.) == 0)\n\n\nclass TestIsposinf(TestCase):\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = isposinf(np.array((-1., 0, 1))/0.)\n assert_(vals[0] == 0)\n assert_(vals[1] == 0)\n assert_(vals[2] == 1)\n\n\nclass TestIsneginf(TestCase):\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = isneginf(np.array((-1., 0, 1))/0.)\n assert_(vals[0] == 1)\n assert_(vals[1] == 0)\n assert_(vals[2] == 0)\n\n\nclass TestNanToNum(TestCase):\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = nan_to_num(np.array((-1., 0, 1))/0.)\n assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))\n assert_(vals[1] == 0)\n assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))\n\n def test_integer(self):\n vals = nan_to_num(1)\n assert_all(vals == 1)\n vals = nan_to_num([1])\n assert_array_equal(vals, np.array([1], np.int))\n\n def test_complex_good(self):\n vals = nan_to_num(1+1j)\n assert_all(vals == 1+1j)\n\n def test_complex_bad(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n v = 1 + 1j\n v += np.array(0+1.j)/0.\n vals = nan_to_num(v)\n # !! This is actually (unexpectedly) zero\n assert_all(np.isfinite(vals))\n\n def test_complex_bad2(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n v = 1 + 1j\n v += np.array(-1+1.j)/0.\n vals = nan_to_num(v)\n assert_all(np.isfinite(vals))\n # Fixme\n #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))\n # !! This is actually (unexpectedly) positive\n # !! inf. Comment out for now, and see if it\n # !! changes\n #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))\n\n\nclass TestRealIfClose(TestCase):\n\n def test_basic(self):\n a = np.random.rand(10)\n b = real_if_close(a+1e-15j)\n assert_all(isrealobj(b))\n assert_array_equal(a, b)\n b = real_if_close(a+1e-7j)\n assert_all(iscomplexobj(b))\n b = real_if_close(a+1e-7j, tol=1e-6)\n assert_all(isrealobj(b))\n\n\nclass TestArrayConversion(TestCase):\n\n def test_asfarray(self):\n a = asfarray(np.array([1, 2, 3]))\n assert_equal(a.__class__, np.ndarray)\n assert_(np.issubdtype(a.dtype, np.float))\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite",
"numpy.issubdtype",
"numpy.sometrue",
"numpy.isscalar",
"numpy.isfinite",
"numpy.testing.assert_array_equal",
"numpy.compat.long",
"numpy.lib.type_check.common_type",
"numpy.random.rand",
"numpy.lib.type_check.isreal",
"numpy.isnan",
"numpy.lib.type_check.iscomplex",
"numpy.lib.type_check.iscomplexobj",
"numpy.lib.type_check.mintypecode",
"numpy.all",
"numpy.lib.type_check.isrealobj",
"numpy.isinf",
"numpy.errstate",
"numpy.lib.type_check.nan_to_num",
"numpy.lib.type_check.real_if_close",
"numpy.array",
"numpy.real",
"numpy.testing.assert_",
"numpy.imag"
]
] |
yusonghust/gcn | [
"4cacba4bd3d889a2139b19385774b2ee1cde80d4"
] | [
"graph.py"
] | [
"# -*- coding: utf-8 -*-\nimport networkx as nx\nimport numpy as np\nfrom utils import sparse_to_tuple\nimport scipy.sparse as sp\n\nclass Graph():\n def __init__(self,edgelist,weighted,directed,labelfile,featurefile):\n self.edgelist = edgelist\n self.weighted = weighted\n self.directed = directed\n self.G = self.build_graph()\n self.node_list = list(self.G.nodes())\n self.look_up = {}\n self.node_size = 0\n for node in self.node_list:\n self.look_up[node] = self.node_size\n self.node_size += 1\n self.labels = self.read_node_labels(labelfile)\n if featurefile is None:\n self.features = np.identity(n=len(self.node_list))\n #scipy.sparse.coo_matrix: A sparse matrix in COOrdinate format.\n #Where A[i[k], j[k]] = data[k].\n self.features = sparse_to_tuple(sp.coo_matrix(self.features))\n else:\n self.features = self.read_node_features(featurefile)\n\n\n def build_graph(self):\n '''\n Reads the input network using networkx.\n '''\n if self.weighted:\n G = nx.read_edgelist(self.edgelist, nodetype=int, data=(('weight',float),), create_using=nx.DiGraph())\n else:\n G = nx.read_edgelist(self.edgelist, nodetype=int, create_using=nx.DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n\n if not self.directed:\n G = G.to_undirected()\n return G\n\n def read_node_labels(self,filename):\n '''\n read node labels\n '''\n fin = open(filename, 'r')\n while 1:\n l = fin.readline()\n if l == '':\n break\n vec = l.split()\n self.G.nodes[int(vec[0])]['label'] = vec[1:]\n fin.close()\n\n def read_node_features(self,filename):\n '''\n read node features\n '''\n fin = open(filename, 'r')\n for l in fin.readlines():\n vec = l.split()\n self.G.nodes[int(vec[0])]['feature'] = np.array([float(x) for x in vec[1:]])\n fin.close()\n\n\n"
] | [
[
"scipy.sparse.coo_matrix"
]
] |
joesider9/forecasting_library | [
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197"
] | [
"Fuzzy_clustering/ver_tf2/Models_predict_manager.py",
"Fuzzy_clustering/version3/project_manager/PredictModelManager/CombineModelPredict.py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport logging, shutil, glob\nimport pymongo, joblib\nfrom Fuzzy_clustering.ver_tf2.Clusterer import clusterer\nfrom Fuzzy_clustering.ver_tf2.Cluster_predict_regressors import cluster_predict\nfrom Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict\nfrom Fuzzy_clustering.ver_tf2.Combine_predict_model import Combine_overall_predict\nfrom Fuzzy_clustering.ver_tf2.util_database import write_database\n\nclass ModelPredictManager_ver2(object):\n\n def __init__(self, path_model):\n self.istrained = False\n self.path_model = path_model\n try:\n self.load()\n except:\n pass\n\n def init(self, static_data, data_variables, use_db=False):\n self.data_variables = data_variables\n self.static_data = static_data\n self.thres_split = static_data['clustering']['thres_split']\n self.thres_act = static_data['clustering']['thres_act']\n self.n_clusters = static_data['clustering']['n_clusters']\n self.rated = static_data['rated']\n self.var_imp = static_data['clustering']['var_imp']\n self.var_lin = static_data['clustering']['var_lin']\n self.var_nonreg = static_data['clustering']['var_nonreg']\n\n self.create_logger()\n self.use_db = use_db\n if use_db:\n self.db = self.open_db()\n\n\n def open_db(self):\n try:\n myclient = pymongo.MongoClient(\n \"mongodb://\" + self.static_data['url'] + \":\" + self.static_data['port'] + \"/\")\n\n project_db = myclient[self.static_data['_id']]\n except:\n self.logger.info('Cannot open Database')\n self.use_db = False\n project_db = None\n raise ConnectionError('Cannot open Database')\n self.logger.info('Open Database successfully')\n return project_db\n\n def load_data(self):\n data_path = self.static_data['path_data']\n X = pd.read_csv(os.path.join(data_path, 'dataset_X_test.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n if os.path.exists(os.path.join(data_path, 'dataset_y_test.csv')):\n y = pd.read_csv(os.path.join(data_path, 'dataset_y_test.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n else:\n y=None\n\n if os.path.exists(os.path.join(data_path, 'dataset_cnn_test.pickle')):\n X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn_test.pickle'))\n X_cnn = X_cnn.transpose([0, 2, 3, 1])\n else:\n X_cnn = np.array([])\n\n if os.path.exists(os.path.join(data_path, 'dataset_lstm_test.pickle')):\n X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm_test.pickle'))\n else:\n X_lstm = np.array([])\n\n self.logger.info('Data loaded successfully')\n return X, X_cnn, X_lstm, y\n\n\n def create_logger(self):\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.INFO)\n handler = logging.FileHandler(os.path.join(self.static_data['path_project'], 'log_model_evaluation.log'), 'a')\n handler.setLevel(logging.INFO)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n self.logger.addHandler(handler)\n\n\n def predict_regressors(self, X_test, X_cnn_test, X_lstm_test, y_test=None):\n data_path = self.static_data['path_data']\n pred_cluster = dict()\n X_test = pd.DataFrame(self.sc.transform(X_test.values), columns=X_test.columns, index=X_test.index)\n if not hasattr(self, 'clusterer'):\n self.clusterer = clusterer(self.static_data['path_fuzzy_models'],\n self.static_data['clustering']['cluster_file'], self.static_data['type'])\n act_test = self.clusterer.compute_activations(X_test)\n act_test = self.check_if_all_nans(act_test)\n for clust in self.regressors.keys():\n if clust == 'Global':\n if len(self.regressors['Global']['models']) > 0:\n predict_module = global_predict(self.static_data)\n pred_cluster['Global'] = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm=X_lstm_test)\n if y_test is not None:\n pred_cluster['Global']['metrics'] = predict_module.evaluate(pred_cluster['Global'], self.scale_y.transform(y_test.values))\n pred_cluster['Global']['dates'] = X_test.index\n pred_cluster['Global']['index'] = np.arange(0, X_test.shape[0])\n else:\n dates = X_test.index[act_test[clust] >= self.thres_act]\n nind = np.where(act_test[clust] >= self.thres_act)[0]\n nind.sort()\n\n x = X_test.loc[dates]\n if y_test is not None:\n targ = y_test.loc[dates].values\n else:\n targ = None\n if len(X_cnn_test.shape) > 1:\n x_cnn = X_cnn_test[nind]\n else:\n x_cnn = np.array([])\n if len(X_lstm_test.shape) > 1:\n x_lstm = X_lstm_test[nind]\n else:\n x_lstm = np.array([])\n predict_module = cluster_predict(self.static_data, clust)\n pred_cluster[clust] = predict_module.predict(x.values, X_cnn=x_cnn, X_lstm=x_lstm)\n if targ is not None and targ.shape[0]>0:\n pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster[clust], self.scale_y.transform(targ))\n pred_cluster[clust]['dates'] = dates\n pred_cluster[clust]['index'] = nind\n predictions = dict()\n result_clust = pd.DataFrame()\n for clust in pred_cluster.keys():\n for method in pred_cluster[clust].keys():\n if not method in {'dates', 'index', 'metrics'}:\n if not method in predictions.keys():\n predictions[method] = pd.DataFrame(index=X_test.index, columns=[cl for cl in pred_cluster.keys()])\n predictions[method].loc[pred_cluster[clust]['dates'], clust] = pred_cluster[clust][method].ravel()\n elif method in {'metrics'}:\n result_clust = pd.concat([result_clust, pred_cluster[clust][method]['mae'].rename(clust)], axis=1)\n\n combine_overall = Combine_overall_predict(self.static_data)\n predictions_final = combine_overall.predict(pred_cluster, predictions)\n\n for method, pred in predictions_final.items():\n pred = self.scale_y.inverse_transform(pred.reshape(-1, 1))\n pred[np.where(pred<0)] = 0\n predictions_final[method] = pred\n\n if y_test is not None:\n result_clust.to_csv(os.path.join(data_path, 'result_of_clusters.csv'))\n\n return predictions_final\n\n def compute_metrics(self, pred, y):\n if self.rated is None:\n rated = y.ravel()\n else:\n rated = self.rated\n err = np.abs(pred.ravel() - y.ravel()) / rated\n sse = np.sum(np.square(pred.ravel() - y.ravel()))\n rms = np.sqrt(np.mean(np.square(err)))\n mae = np.mean(err)\n mse = sse / y.shape[0]\n\n return [sse, rms, mae, mse]\n\n def evaluate(self, pred_all, y):\n result = pd.DataFrame(index=[method for method in pred_all.keys()], columns=['sse', 'rms', 'mae', 'mse'])\n for method, pred in pred_all.items():\n if isinstance(pred, pd.DataFrame):\n result.loc[method] = self.compute_metrics(pred.values, y)\n else:\n result.loc[method] = self.compute_metrics(pred, y)\n\n return result\n\n def predict(self):\n if self.istrained:\n X, X_cnn, X_lstm, y = self.load_data()\n\n indices = X.index\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n X_cnn = X_cnn[index]\n else:\n index = indices\n\n predictions_final_temp = self.predict_regressors(X, X_cnn, X_lstm)\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=indices, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n\n return predictions_final\n else:\n raise ModuleNotFoundError('Model %s is not trained', self.static_data['_id'])\n\n def predict_online(self, X, X_cnn= np.array([]), X_lstm= np.array([])):\n if len(X_cnn.shape)>1:\n X_cnn = X_cnn.transpose([0, 2, 3, 1])\n if self.istrained:\n indices = X.index\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = X.index[np.where(X['flux'] > 1e-8)[0]]\n X = X.loc[index]\n X_cnn = X_cnn[np.where(X['flux'] > 1e-8)[0]]\n else:\n index = indices\n\n predictions_final_temp = self.predict_regressors(X, X_cnn, X_lstm)\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=indices, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n\n return predictions_final\n else:\n raise ModuleNotFoundError('Model %s is not trained', self.static_data['_id'])\n\n def evaluate_all(self):\n data_path = self.static_data['path_data']\n if self.istrained:\n X, X_cnn, X_lstm, y = self.load_data()\n y_test = y.copy()\n indices = X.index\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n y = y.iloc[index]\n X_cnn = X_cnn[index]\n index = indices[index]\n else:\n index = indices\n\n predictions_final_temp = self.predict_regressors(X, X_cnn, X_lstm, y)\n\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=indices, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n\n if y_test is not None:\n result_all = self.evaluate(predictions_final, y_test.values)\n result_all.to_csv(os.path.join(data_path, 'result_final.csv'))\n joblib.dump(predictions_final, os.path.join(data_path, 'predictions_final.pickle'))\n y_test.to_csv(os.path.join(data_path, 'target_test.csv'))\n else:\n raise ModuleNotFoundError('Model %s is not trained', self.static_data['_id'])\n\n def check_if_all_nans(self, activations):\n\n\n if activations.isna().all(axis=1).any() == True:\n indices = activations.index[activations.isna().all(axis=1).to_numpy().ravel()]\n if indices.shape[0]>50:\n raise RuntimeError('Too many nans. Please check your model')\n for ind in indices:\n act = activations.loc[ind]\n clust = act.idxmax()\n activations.loc[ind, clust] = 0.1\n\n return activations\n\n def load(self):\n if os.path.exists(os.path.join(self.path_model, 'manager' + '.pickle')):\n try:\n f = open(os.path.join(self.path_model, 'manager' + '.pickle'), 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n if 'path_model' in tmp_dict.keys():\n del tmp_dict['path_model']\n self.__dict__.update(tmp_dict)\n except:\n raise ValueError('Cannot find model for %s', self.path_model)\n else:\n raise ValueError('Cannot find model for %s', self.path_model)\n\nif __name__ == '__main__':\n from util_database import write_database\n from Fuzzy_clustering.ver_tf2.Projects_train_manager import ProjectsTrainManager\n\n static_data = write_database()\n project_manager = ProjectsTrainManager(static_data)\n project_manager.initialize()\n project_manager.create_datasets(project_manager.data_eval, test=True)\n project = [pr for pr in project_manager.group_static_data if pr['_id'] == 'Lach'][0]\n static_data = project['static_data']\n\n model = ModelPredictManager(static_data['path_model'])\n model.init(project['static_data'], project_manager.data_variables)\n model.evaluate_all()",
"import os\nimport numpy as np\nimport joblib\nfrom Fuzzy_clustering.version3.project_manager.PredictModelManager.Sklearn_combine_predict import sklearn_model_predict\n\nclass CombineModelPredict(object):\n def __init__(self, static_data):\n self.static_data = static_data\n self.istrained = False\n self.model_dir = os.path.join(self.static_data['path_model'], 'Combine_module')\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n\n self.model_type = self.static_data['type']\n self.combine_methods = self.static_data['combine_methods']\n methods = [method for method in self.static_data['project_methods'].keys() if\n self.static_data['project_methods'][method] == True]\n\n\n\n try:\n self.load(self.model_dir)\n except:\n pass\n self.methods = []\n for method in methods:\n if method == 'RBF_ALL_CNN':\n self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN', 'RBF-CNN'])\n elif method == 'RBF_ALL':\n self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN'])\n else:\n self.methods.append(method)\n self.methods += self.combine_methods\n self.weight_size_full = len(self.methods)\n self.weight_size = len(self.combine_methods)\n self.rated = self.static_data['rated']\n self.n_jobs = self.static_data['sklearn']['njobs']\n self.data_dir = self.static_data['path_data']\n\n def bcp_predict(self, X, w):\n preds = []\n for inp in X:\n inp=inp.reshape(-1,1)\n mask=~np.isnan(inp)\n pred = np.matmul(w[mask.T]/np.sum(w[mask.T]), inp[mask])\n preds.append(pred)\n\n return np.array(preds)\n\n def predict(self, predictions):\n if self.istrained==True:\n pred_combine = dict()\n self.combine_methods = [method for method in self.combine_methods if method in predictions.keys()]\n combine_method = 'average'\n for method in self.methods:\n pred = predictions[method].mean(axis=1).values.astype('float').reshape(-1, 1)\n pred[np.where(pred < 0)] = 0\n pred_combine['average_' + method] = pred\n\n combine_method = 'bcp'\n for method in self.combine_methods:\n if 'bcp_'+method in self.models.keys():\n pred = self.bcp_predict(predictions[method].values.astype('float'), self.models['bcp_'+method])\n pred[np.where(pred < 0)] = 0\n pred_combine['bcp_' + method] = pred\n\n for method in self.combine_methods:\n X_pred = predictions[method].values.astype('float')\n X_pred[np.where(np.isnan(X_pred))] = 0\n mlp_model = sklearn_model_predict(self.model_dir + '/' + method, self.rated, 'mlp', self.n_jobs)\n if mlp_model.istrained == True:\n pred = mlp_model.predict(X_pred)\n pred[np.where(pred < 0)] = 0\n pred_combine['mlp_' + method] = pred\n else:\n raise ImportError('Combine overall model seems not trained')\n\n return pred_combine\n\n def load(self, pathname):\n cluster_dir = os.path.join(pathname)\n if os.path.exists(os.path.join(cluster_dir, 'combine_models.pickle')):\n try:\n f = open(os.path.join(cluster_dir, 'combine_models.pickle'), 'rb')\n tmp_dict = joblib.load(f)\n f.close()\n del tmp_dict['model_dir']\n self.__dict__.update(tmp_dict)\n except:\n raise ImportError('Cannot open RLS model')\n else:\n raise ImportError('Cannot find RLS model')"
] | [
[
"pandas.DataFrame",
"numpy.arange",
"numpy.array",
"numpy.where",
"numpy.square",
"numpy.mean"
],
[
"numpy.array",
"numpy.sum",
"numpy.where",
"numpy.isnan"
]
] |
Johnson-Lsx/espnet | [
"01214cff08cdd737bcab93dd62e127169394d073"
] | [
"espnet/nets/pytorch_backend/transducer/transformer_decoder.py"
] | [
"\"\"\"Decoder definition for transformer-transducer models.\"\"\"\n\nimport torch\n\nfrom espnet.nets.pytorch_backend.transducer.blocks import build_blocks\nfrom espnet.nets.pytorch_backend.transducer.joint_network import JointNetwork\nfrom espnet.nets.pytorch_backend.transducer.utils import check_state\nfrom espnet.nets.pytorch_backend.transducer.utils import pad_batch_state\nfrom espnet.nets.pytorch_backend.transducer.utils import pad_sequence\nfrom espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm\nfrom espnet.nets.pytorch_backend.transformer.mask import subsequent_mask\nfrom espnet.nets.transducer_decoder_interface import TransducerDecoderInterface\n\n\nclass DecoderTT(TransducerDecoderInterface, torch.nn.Module):\n \"\"\"Decoder module for transformer-transducer models.\n\n Args:\n odim (int): dimension of outputs\n edim (int): dimension of encoder outputs\n jdim (int): dimension of joint-space\n dec_arch (list): list of layer definitions\n input_layer (str): input layer type\n repeat_block (int): repeat provided blocks N times if N > 1\n joint_activation_type (str) joint network activation type\n positional_encoding_type (str): positional encoding type\n positionwise_layer_type (str): linear\n positionwise_activation_type (str): positionwise activation type\n dropout_rate_embed (float): dropout rate for embedding layer (if specified)\n blank (int): blank symbol ID\n\n \"\"\"\n\n def __init__(\n self,\n odim,\n edim,\n jdim,\n dec_arch,\n input_layer=\"embed\",\n repeat_block=0,\n joint_activation_type=\"tanh\",\n positional_encoding_type=\"abs_pos\",\n positionwise_layer_type=\"linear\",\n positionwise_activation_type=\"relu\",\n dropout_rate_embed=0.0,\n blank=0,\n ):\n \"\"\"Construct a Decoder object for transformer-transducer models.\"\"\"\n torch.nn.Module.__init__(self)\n\n self.embed, self.decoders, ddim, _ = build_blocks(\n \"decoder\",\n odim,\n input_layer,\n dec_arch,\n repeat_block=repeat_block,\n positional_encoding_type=positional_encoding_type,\n positionwise_layer_type=positionwise_layer_type,\n positionwise_activation_type=positionwise_activation_type,\n dropout_rate_embed=dropout_rate_embed,\n padding_idx=blank,\n )\n\n self.after_norm = LayerNorm(ddim)\n\n self.joint_network = JointNetwork(odim, edim, ddim, jdim, joint_activation_type)\n\n self.dunits = ddim\n self.odim = odim\n\n self.blank = blank\n\n def init_state(self, batch_size=None, device=None, dtype=None):\n \"\"\"Initialize decoder states.\n\n Args:\n init_tensor (torch.Tensor): batch of input features (B, dec_dim)\n\n Returns:\n state (list): batch of decoder decoder states [L x None]\n\n \"\"\"\n state = [None] * len(self.decoders)\n\n return state\n\n def forward(self, tgt, tgt_mask, memory):\n \"\"\"Forward transformer-transducer decoder.\n\n Args:\n tgt (torch.Tensor): input token ids, int64 (batch, maxlen_out)\n if input_layer == \"embed\"\n input tensor\n (batch, maxlen_out, #mels) in the other cases\n tgt_mask (torch.Tensor): input token mask, (batch, maxlen_out)\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (include 1.2)\n memory (torch.Tensor): encoded memory, float32 (batch, maxlen_in, feat)\n\n Return:\n z (torch.Tensor): joint output (batch, maxlen_in, maxlen_out, odim)\n tgt_mask (torch.Tensor): score mask before softmax (batch, maxlen_out)\n\n \"\"\"\n tgt = self.embed(tgt)\n\n tgt, tgt_mask = self.decoders(tgt, tgt_mask)\n tgt = self.after_norm(tgt)\n\n h_enc = memory.unsqueeze(2)\n h_dec = tgt.unsqueeze(1)\n\n z = self.joint_network(h_enc, h_dec)\n\n return z, tgt_mask\n\n def score(self, hyp, cache):\n \"\"\"Forward one step.\n\n Args:\n hyp (dataclass): hypothesis\n cache (dict): states cache\n\n Returns:\n y (torch.Tensor): decoder outputs (1, dec_dim)\n (list): decoder states\n [L x (1, max_len, dec_dim)]\n lm_tokens (torch.Tensor): token id for LM (1)\n\n \"\"\"\n device = next(self.parameters()).device\n\n tgt = torch.tensor(hyp.yseq).unsqueeze(0).to(device=device)\n lm_tokens = tgt[:, -1]\n\n str_yseq = \"\".join([str(x) for x in hyp.yseq])\n\n if str_yseq in cache:\n y, new_state = cache[str_yseq]\n else:\n tgt_mask = subsequent_mask(len(hyp.yseq)).unsqueeze(0).to(device=device)\n\n state = check_state(hyp.dec_state, (tgt.size(1) - 1), self.blank)\n\n tgt = self.embed(tgt)\n\n new_state = []\n for s, decoder in zip(state, self.decoders):\n tgt, tgt_mask = decoder(tgt, tgt_mask, cache=s)\n new_state.append(tgt)\n\n y = self.after_norm(tgt[:, -1])\n\n cache[str_yseq] = (y, new_state)\n\n return y[0], new_state, lm_tokens\n\n def batch_score(self, hyps, batch_states, cache):\n \"\"\"Forward batch one step.\n\n Args:\n hyps (list): batch of hypotheses\n batch_states (list): decoder states\n [L x (B, max_len, dec_dim)]\n cache (dict): states cache\n\n Returns:\n batch_y (torch.Tensor): decoder output (B, dec_dim)\n batch_states (list): decoder states\n [L x (B, max_len, dec_dim)]\n lm_tokens (torch.Tensor): batch of token ids for LM (B)\n\n \"\"\"\n final_batch = len(hyps)\n device = next(self.parameters()).device\n\n process = []\n done = [None for _ in range(final_batch)]\n\n for i, hyp in enumerate(hyps):\n str_yseq = \"\".join([str(x) for x in hyp.yseq])\n\n if str_yseq in cache:\n done[i] = (*cache[str_yseq], hyp.yseq)\n else:\n process.append((str_yseq, hyp.yseq, hyp.dec_state))\n\n if process:\n batch = len(process)\n _tokens = pad_sequence([p[1] for p in process], self.blank)\n _states = [p[2] for p in process]\n\n batch_tokens = torch.LongTensor(_tokens).view(batch, -1).to(device=device)\n tgt_mask = (\n subsequent_mask(batch_tokens.size(-1))\n .unsqueeze(0)\n .expand(batch, -1, -1)\n .to(device=device)\n )\n\n dec_state = self.init_state()\n dec_state = self.create_batch_states(\n dec_state,\n _states,\n _tokens,\n )\n\n tgt = self.embed(batch_tokens)\n\n next_state = []\n for s, decoder in zip(dec_state, self.decoders):\n tgt, tgt_mask = decoder(tgt, tgt_mask, cache=s)\n next_state.append(tgt)\n\n tgt = self.after_norm(tgt[:, -1])\n\n j = 0\n for i in range(final_batch):\n if done[i] is None:\n new_state = self.select_state(next_state, j)\n\n done[i] = (tgt[j], new_state, process[j][2])\n cache[process[j][0]] = (tgt[j], new_state)\n\n j += 1\n\n batch_states = self.create_batch_states(\n batch_states, [d[1] for d in done], [d[2] for d in done]\n )\n batch_y = torch.stack([d[0] for d in done])\n\n lm_tokens = (\n torch.LongTensor([hyp.yseq[-1] for hyp in hyps])\n .view(final_batch)\n .to(device=device)\n )\n\n return batch_y, batch_states, lm_tokens\n\n def select_state(self, batch_states, idx):\n \"\"\"Get decoder state from batch of states, for given id.\n\n Args:\n batch_states (list): batch of decoder states\n [L x (B, max_len, dec_dim)]\n idx (int): index to extract state from batch of states\n\n Returns:\n state_idx (list): decoder states for given id\n [L x (1, max_len, dec_dim)]\n\n \"\"\"\n if batch_states[0] is None:\n return batch_states\n\n state_idx = [batch_states[layer][idx] for layer in range(len(self.decoders))]\n\n return state_idx\n\n def create_batch_states(self, batch_states, l_states, l_tokens):\n \"\"\"Create batch of decoder states.\n\n Args:\n batch_states (list): batch of decoder states\n [L x (B, max_len, dec_dim)]\n l_states (list): list of decoder states\n [B x [L x (1, max_len, dec_dim)]]\n l_tokens (list): list of token sequences for batch\n\n Returns:\n batch_states (list): batch of decoder states\n [L x (B, max_len, dec_dim)]\n\n \"\"\"\n if batch_states[0] is None:\n return batch_states\n\n max_len = max([len(t) for t in l_tokens])\n\n for layer in range(len(self.decoders)):\n batch_states[layer] = pad_batch_state(\n [s[layer] for s in l_states], max_len, self.blank\n )\n\n return batch_states\n"
] | [
[
"torch.stack",
"torch.tensor",
"torch.nn.Module.__init__",
"torch.LongTensor"
]
] |
rgap/storm | [
"5f477d6fa58c6c1ec8d8e2b57c3b21844cae17ac"
] | [
"storm_kit/mpc/control/control_utils.py"
] | [
"#\n# MIT License\n#\n# Copyright (c) 2020-2021 NVIDIA CORPORATION.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.#\nimport math\n\nimport numpy as np\nimport torch\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nimport ghalton\n\n\ndef scale_ctrl(ctrl, action_lows, action_highs, squash_fn='clamp'):\n if len(ctrl.shape) == 1:\n ctrl = ctrl[np.newaxis, :, np.newaxis]\n act_half_range = (action_highs - action_lows) / 2.0\n act_mid_range = (action_highs + action_lows) / 2.0\n if squash_fn == 'clamp':\n # ctrl = torch.clamp(ctrl, action_lows[0], action_highs[0])\n ctrl = torch.max(torch.min(ctrl, action_highs), action_lows)\n return ctrl\n elif squash_fn == 'clamp_rescale':\n ctrl = torch.clamp(ctrl, -1.0, 1.0)\n elif squash_fn == 'tanh':\n ctrl = torch.tanh(ctrl)\n elif squash_fn == 'identity':\n return ctrl\n return act_mid_range.unsqueeze(0) + ctrl * act_half_range.unsqueeze(0)\n\n#######################\n## STOMP Covariance ##\n#######################\n\ndef get_stomp_cov(horizon, d_action,\n tensor_args={'device':torch.device('cpu'),'dtype':torch.float32},\n cov_mode='vel', RETURN_R=False):\n \"\"\" Computes the covariance matrix following STOMP motion planner\n\n Coefficients from here: https://en.wikipedia.org/wiki/Finite_difference_coefficient\n More info here: https://github.com/ros-industrial/stomp_ros/blob/7fe40fbe6ad446459d8d4889916c64e276dbf882/stomp_core/src/utils.cpp#L36\n \"\"\"\n acc_fd_array = [0,-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12, 0]\n #acc_fd_array = [1/90, -3/20, 3/2, -49/18, 3/2 , -3/20, 1/90 ]\n\n #jerk_fd_array = [0, 1 / 12.0, -17 / 12.0, 46 / 12.0, -46 / 12.0, 17 / 12.0, -1 / 12.0]\n jerk_fd_array = [1 / 8.0, -1, 13/8, 0 , -13/8, 1, -1/8]\n\n #snap_fd_array = [-1/6, 2.0, -13/2, 28/3, -13/2, 2, -1/6]\n snap_fd_array = [0, 1, -4, 6, -4, 1, 0]\n #vel_fd_array = [0, 1.0/12.0 , -2.0/3.0 , 0 , 2.0/3.0 , -1.0/12.0 , 0 ]\n vel_fd_array = [0, 0 , 1, -2 , 1,0, 0 ]\n \n fd_array = acc_fd_array\n A = torch.zeros((d_action * horizon, d_action * horizon), device=tensor_args['device'],dtype=torch.float64)\n\n\n if(cov_mode == 'vel'):\n for k in range(d_action):\n for i in range(0, horizon):\n for j in range(-3,4):\n #print(j)\n index = i + j\n if(index < 0):\n index = 0\n continue\n if(index >= horizon):\n index = horizon - 1\n continue\n A[k * horizon + i,k * horizon + index] = fd_array[j + 3]\n elif(cov_mode == 'acc'):\n for k in range(d_action):\n for i in range(0, horizon):\n for j in range(-3,4):\n #print(j)\n index = i + j\n if(index < 0):\n index = 0\n continue\n if(index >= horizon):\n index = horizon - 1\n continue\n if(index >= horizon/2):\n #print(k * horizon + index - horizon//2)\n A[k * horizon + i,k * horizon - index - horizon//2 -1] = fd_array[j + 3] #* float((horizon-index) / horizon)\n else:\n A[k * horizon + i,k * horizon + index] = fd_array[j + 3] #* float(index/horizon) \n #plt.imshow(A)\n #plt.show()\n\n R = torch.matmul(A.transpose(-2,-1), A)\n #print(R[:horizon, :horizon])\n #plt.imshow(R)\n #plt.show()\n #print(R)\n #print(torch.det(R))\n \n cov = torch.inverse(R)\n cov = cov / torch.max(torch.abs(cov))\n #plt.imshow(cov)\n #plt.show()\n\n # also compute the cholesky decomposition:\n scale_tril = torch.zeros((d_action * horizon, d_action * horizon), **tensor_args)\n scale_tril = torch.linalg.cholesky(cov)\n '''\n k = 0\n act_cov_matrix = cov[k * horizon:k * horizon + horizon, k * horizon:k * horizon + horizon]\n print(act_cov_matrix.shape)\n print(torch.det(act_cov_matrix))\n local_cholesky = matrix_cholesky(act_cov_matrix)\n for k in range(d_action):\n \n scale_tril[k * horizon:k * horizon + horizon,k * horizon:k * horizon + horizon] = local_cholesky\n '''\n cov = cov.to(**tensor_args)\n scale_tril = scale_tril.to(**tensor_args) #* 0.1\n scale_tril = scale_tril / torch.max(scale_tril)\n if(RETURN_R):\n return cov, scale_tril, R\n return cov, scale_tril\n \n\n\n#######################\n## Gaussian Sampling ##\n#######################\n\n\ndef generate_noise(cov, shape, base_seed, filter_coeffs=None, device=torch.device('cpu')):\n \"\"\"\n Generate correlated Gaussian samples using autoregressive process\n \"\"\"\n torch.manual_seed(base_seed)\n beta_0, beta_1, beta_2 = filter_coeffs\n N = cov.shape[0]\n m = MultivariateNormal(loc=torch.zeros(N).to(device), covariance_matrix=cov)\n eps = m.sample(sample_shape=shape)\n # eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)\n if filter_coeffs is not None:\n for i in range(2, eps.shape[1]):\n eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]\n return eps \n\ndef generate_noise_np(cov, shape, base_seed, filter_coeffs=None):\n \"\"\"\n Generate correlated noisy samples using autoregressive process\n \"\"\"\n np.random.seed(base_seed)\n beta_0, beta_1, beta_2 = filter_coeffs\n N = cov.shape[0]\n eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)\n if filter_coeffs is not None:\n for i in range(2, eps.shape[1]):\n eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]\n return eps \n\n###########################\n## Quasi-Random Sampling ##\n###########################\n\ndef generate_prime_numbers(num):\n def is_prime(n):\n for j in range(2, ((n //2) + 1),1):\n if n % j == 0:\n return False\n return True\n\n primes = [0] * num #torch.zeros(num, device=device)\n primes[0] = 2\n curr_num = 1\n for i in range(1, num):\n while True:\n curr_num += 2\n if is_prime(curr_num):\n primes[i] = curr_num\n break\n \n return primes\n\ndef generate_van_der_corput_sample(idx, base):\n f, r = 1.0, 0\n while idx > 0:\n f /= base*1.0\n r += f * (idx % base)\n idx = idx // base\n return r\n\ndef generate_van_der_corput_samples_batch(idx_batch, base):\n inp_device = idx_batch.device\n batch_size = idx_batch.shape[0]\n f = 1.0 #torch.ones(batch_size, device=inp_device)\n r = torch.zeros(batch_size, device=inp_device)\n while torch.any(idx_batch > 0):\n f /= base*1.0\n r += f * (idx_batch % base) #* (idx_batch > 0)\n idx_batch = idx_batch // base\n return r\n\n\n# def generate_van_der_corput_samples_batch_2(idx_batch, bases):\n# inp_device = idx_batch.device\n# batch_size = idx_batch.shape[0]\n# f = torch.ones(batch_size, device=inp_device)\n# r = torch.zeros(batch_size, device=inp_device)\n \n# while torch.any(idx_batch > 0):\n# f /= bases*1.0\n# r += f * (idx_batch % base) #* (idx_batch > 0)\n# idx_batch = idx_batch // base\n \n# return r\n\ndef generate_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):\n if not use_ghalton:\n samples = torch.zeros(num_samples, ndims, device=device, dtype=float_dtype)\n if not bases:\n bases = generate_prime_numbers(ndims)\n idx_batch = torch.arange(1,num_samples+1, device=device)\n for dim in range(ndims):\n samples[:, dim] = generate_van_der_corput_samples_batch(idx_batch, bases[dim])\n else:\n \n if ndims <= 100:\n perms = ghalton.EA_PERMS[:ndims]\n sequencer = ghalton.GeneralizedHalton(perms)\n else:\n sequencer = ghalton.GeneralizedHalton(ndims, seed_val)\n samples = torch.tensor(sequencer.get(num_samples), device=device, dtype=float_dtype)\n return samples\n\n\ndef generate_gaussian_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):\n uniform_halton_samples = generate_halton_samples(num_samples, ndims, bases, use_ghalton, seed_val, device, float_dtype)\n\n gaussian_halton_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_halton_samples - 1)\n \n return gaussian_halton_samples\n\n\ndef generate_gaussian_sobol_samples(num_samples, ndims, seed_val, device=torch.device('cpu'), float_dtype=torch.float64):\n soboleng = torch.quasirandom.SobolEngine(dimension=ndims, scramble=True, seed=seed_val)\n uniform_sobol_samples = soboleng.draw(num_samples).to(device)\n\n gaussian_sobol_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_sobol_samples - 1)\n return gaussian_sobol_samples\n \n########################\n## Gaussian Utilities ##\n########################\n\n\ndef gaussian_logprob(mean, cov, x, cov_type=\"full\"):\n \"\"\"\n Calculate gaussian log prob for given input batch x\n Parameters\n ----------\n mean (np.ndarray): [N x num_samples] batch of means\n cov (np.ndarray): [N x N] covariance matrix\n x (np.ndarray): [N x num_samples] batch of sample values\n\n Returns\n --------\n log_prob (np.ndarray): [num_sampls] log probability of each sample\n \"\"\"\n N = cov.shape[0]\n if cov_type == \"diagonal\":\n cov_diag = cov.diagonal()\n cov_inv = np.diag(1.0 / cov_diag)\n cov_logdet = np.sum(np.log(cov_diag))\n else:\n cov_logdet = np.log(np.linalg.det(cov))\n cov_inv = np.linalg.inv(cov)\n diff = (x - mean).T\n mahalanobis_dist = -0.5 * np.sum((diff @ cov_inv) * diff, axis=1)\n const1 = -0.5 * N * np.log(2.0 * np.pi) \n const2 = -0.5*cov_logdet\n log_prob = mahalanobis_dist + const1 + const2\n return log_prob\n\ndef gaussian_logprobgrad(mean, cov, x, cov_type=\"full\"):\n if cov_type == \"diagonal\":\n cov_inv = np.diag(1.0/cov.diagonal())\n else:\n cov_inv = np.linalg.inv(cov)\n diff = (x - mean).T\n grad = diff @ cov_inv\n return grad\n\ndef gaussian_entropy(cov=None, L=None): #, cov_type=\"full\"):\n \"\"\"\n Entropy of multivariate gaussian given either covariance\n or cholesky decomposition of covariance\n \n \"\"\"\n if cov is not None:\n inp_device = cov.device\n cov_logdet = torch.log(torch.det(cov))\n # print(np.linalg.det(cov.cpu().numpy()))\n # print(torch.det(cov))\n N = cov.shape[0]\n\n else:\n inp_device = L.device\n cov_logdet = 2.0 * torch.sum(torch.log(torch.diagonal(L)))\n N = L.shape[0]\n # if cov_type == \"diagonal\":\n # cov_logdet = np.sum(np.log(cov.diagonal())) \n # else:\n # cov_logdet = np.log(np.linalg.det(cov))\n\n term1 = 0.5 * cov_logdet\n # pi = torch.tensor([math.pi], device=inp_device)\n # pre-calculate 1.0 + torch.log(2.0*pi) = 2.837877066\n term2 = 0.5 * N * 2.837877066\n\n ent = term1 + term2\n return ent.to(inp_device)\n\ndef gaussian_kl(mean0, cov0, mean1, cov1, cov_type=\"full\"):\n \"\"\"\n KL-divergence between Gaussians given mean and covariance\n KL(p||q) = E_{p}[log(p) - log(q)]\n\n \"\"\"\n N = cov0.shape[0]\n if cov_type == \"diagonal\":\n cov1_diag = cov1.diagonal()\n cov1_inv = np.diag(1.0 / cov1_diag)\n cov0_logdet = np.sum(np.log(cov0.diagonal()))\n cov1_logdet = np.sum(np.log(cov1_diag))\n else:\n cov1_inv = np.linalg.inv(cov1)\n cov0_logdet = np.log(np.linalg.det(cov0))\n cov1_logdet = np.log(np.linalg.det(cov1))\n\n term1 = 0.5 * np.trace(cov1_inv @ cov0)\n diff = (mean1 - mean0).T\n mahalanobis_dist = 0.5 * np.sum((diff @ cov1_inv) * diff, axis=1)\n term3 = 0.5 * (-1.0*N + cov1_logdet - cov0_logdet)\n return term1 + mahalanobis_dist + term3\n\n\n\ndef cost_to_go(cost_seq, gamma_seq):\n \"\"\"\n Calculate (discounted) cost to go for given cost sequence\n \"\"\"\n # if torch.any(gamma_seq == 0):\n # return cost_seq\n cost_seq = gamma_seq * cost_seq # discounted cost sequence\n # cost_seq = torch.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq = torch.fliplr(torch.cumsum(torch.fliplr(cost_seq), axis=-1)) # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq /= gamma_seq # un-scale it to get true discounted cost to go\n return cost_seq\n\ndef cost_to_go_np(cost_seq, gamma_seq):\n \"\"\"\n Calculate (discounted) cost to go for given cost sequence\n \"\"\"\n # if np.any(gamma_seq == 0):\n # return cost_seq\n cost_seq = gamma_seq * cost_seq # discounted reward sequence\n cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq /= gamma_seq # un-scale it to get true discounted cost to go\n return cost_seq\n\n\n############\n##Cholesky##\n############\ndef matrix_cholesky(A):\n L = torch.zeros_like(A) \n for i in range(A.shape[-1]):\n for j in range(i+1):\n s = 0.0\n for k in range(j):\n s = s + L[i,k] * L[j,k] \n \n L[i,j] = torch.sqrt(A[i,i] - s) if (i == j) else \\\n (1.0 / L[j,j] * (A[i,j] - s))\n return L\n\n# Batched Cholesky decomp\ndef batch_cholesky(A):\n L = torch.zeros_like(A)\n\n for i in range(A.shape[-1]):\n for j in range(i+1):\n s = 0.0\n for k in range(j):\n s = s + L[...,i,k] * L[...,j,k]\n\n L[...,i,j] = torch.sqrt(A[...,i,i] - s) if (i == j) else \\\n (1.0 / L[...,j,j] * (A[...,i,j] - s))\n return L\n"
] | [
[
"numpy.sum",
"torch.inverse",
"torch.min",
"numpy.diag",
"numpy.random.seed",
"torch.det",
"torch.sqrt",
"numpy.trace",
"numpy.log",
"torch.any",
"torch.max",
"torch.quasirandom.SobolEngine",
"torch.diagonal",
"torch.arange",
"torch.tanh",
"torch.device",
"numpy.zeros",
"torch.manual_seed",
"numpy.linalg.det",
"torch.tensor",
"torch.linalg.cholesky",
"numpy.cumsum",
"numpy.linalg.inv",
"torch.zeros_like",
"torch.erfinv",
"torch.fliplr",
"torch.abs",
"torch.zeros",
"torch.clamp"
]
] |
SamuelCahyawijaya/fast-transformers | [
"6ae8ed4cc50bd037968db4f5062e4d328aae73fe"
] | [
"tests/sparse_product/test_clustered_sparse_product_backward_cpu.py"
] | [
"#\n# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/\n# Written by Angelos Katharopoulos <[email protected]>,\n# Apoorv Vyas <[email protected]>\n#\n\nimport os\nfrom os import getenv\nimport time\nimport unittest\n\nimport torch\nfrom torch.nn.init import normal_\n\nfrom fast_transformers.aggregate import aggregate, broadcast\nfrom fast_transformers.hashing import compute_hashes\nfrom fast_transformers.clustering.hamming import cluster\nfrom fast_transformers.sparse_product import clustered_sparse_dot_product\n\ndef cluster_queries(Q, query_lengths, C, I, B):\n N, H, L, E = Q.shape\n planes = Q.new_empty((B, E+1))\n normal_(planes)\n planes[:, -1] = 0\n hashes = compute_hashes(Q.view(N*H*L, E), planes).view(N, H, L)\n # Cluster the hashes and return the cluster index per query\n groups, counts = cluster(\n hashes,\n query_lengths,\n clusters=C,\n iterations=I,\n bits=B\n )\n\n return groups, counts\n\n\nclass TestSparseProductBackward(unittest.TestCase):\n @property\n def device(self):\n return \"cpu\"\n\n def _zero_grad(self, Q, K):\n for x in [Q, K]:\n if x.grad is not None:\n x.grad[...] = 0\n\n def test_simple_grad(self):\n N = 2\n H = 2\n L = 1000\n E = 32\n S = 1000\n k = 32\n C = 50\n I = 5\n B = 16\n\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n\n lengths = torch.full((N,), L, dtype=torch.int32).to(self.device)\n groups, counts = cluster_queries(Q, lengths, C, I, B)\n Q_grouped = aggregate(Q, groups, 1/counts.float())\n QK = torch.einsum(\"nhle,nhse->nhls\", Q_grouped, K)\n _, topk = torch.topk(QK, k, dim=-1)\n topk = topk.contiguous()\n topk_broadcast = broadcast(\n topk.float(),\n groups,\n torch.ones_like(counts, dtype=torch.float32),\n torch.zeros((N, H, L, k), device=Q.device)\n )\n\n\n self._zero_grad(Q, K)\n QK_full = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK_selected = QK_full[\n torch.arange(N).view(N, 1, 1, 1).to(self.device),\n torch.arange(H).view(1, H, 1, 1).to(self.device),\n torch.arange(L).view(1, 1, L, 1).to(self.device),\n topk_broadcast.long()\n ]\n\n QK_selected.sum().backward()\n grad = [torch.clone(Q.grad), torch.clone(K.grad)]\n\n\n self._zero_grad(Q, K)\n QK_selected_hat = clustered_sparse_dot_product(\n Q, K, topk,\n groups, counts,\n lengths\n )\n\n QK_selected_hat.sum().backward()\n grad_hat = [torch.clone(Q.grad), torch.clone(K.grad)]\n\n self.assertLess(\n torch.abs(QK_selected - QK_selected_hat).max(),\n 1e-4\n )\n for g1, g2 in zip(grad, grad_hat):\n self.assertLess(\n torch.abs(g1 - g2).max(),\n 1e-4\n )\n\n @unittest.skipUnless(os.getenv(\"BENCHMARK_TESTS\", \"\"), \"no benchmarks\")\n def test_benchmark_forward(self):\n N = 12\n H = 8\n L = 1024\n S = 1024\n E = 32\n k = 32\n C = 100\n I = 10\n B = 32\n\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n lengths = torch.full((N,), L, dtype=torch.int32).to(self.device)\n groups, counts = cluster_queries(Q, lengths, C, I, B)\n Q_grouped = aggregate(Q, groups, 1/counts.float())\n QK = torch.einsum(\"nhle,nhse->nhls\", Q_grouped, K)\n _, topk = torch.topk(QK, k, dim=-1)\n topk = topk.contiguous()\n\n self._zero_grad(Q, K)\n n_runs = 10\n s = time.time()\n for i in range(n_runs):\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n e = time.time()\n t_full = (e - s) / n_runs\n\n self._zero_grad(Q, K)\n s = time.time()\n for i in range(n_runs):\n QK = clustered_sparse_dot_product(\n Q, K, topk,\n groups, counts,\n lengths\n )\n e = time.time()\n t_sparse = (e - s) / n_runs\n print(\"Benchmark Forward: T_Full: {}, T_Sparse: {}\".format(t_full, t_sparse))\n\n @unittest.skipUnless(os.getenv(\"BENCHMARK_TESTS\", \"\"), \"no benchmarks\")\n def test_benchmark_forward_backward(self):\n N = 12\n H = 8\n L = 1024\n S = 1024\n E = 32\n k = 32\n C = 100\n I = 10\n B = 32\n\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n lengths = torch.full((N,), L, dtype=torch.int32).to(self.device)\n groups, counts = cluster_queries(Q, lengths, C, I, B)\n Q_grouped = aggregate(Q, groups, 1/counts.float())\n QK = torch.einsum(\"nhle,nhse->nhls\", Q_grouped, K)\n _, topk = torch.topk(QK, k, dim=-1)\n topk = topk.contiguous()\n\n self._zero_grad(Q, K)\n n_runs = 10\n s = time.time()\n for i in range(n_runs):\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK.sum().backward()\n e = time.time()\n t_full = (e - s) / n_runs\n\n self._zero_grad(Q, K)\n s = time.time()\n for i in range(n_runs):\n QK = clustered_sparse_dot_product(\n Q, K, topk,\n groups, counts,\n lengths\n )\n QK.sum().backward()\n e = time.time()\n t_sparse = (e - s) / n_runs\n print(\"Benchmark Forward-Backward: T_Full: {}, T_Sparse: {}\".format(t_full, t_sparse))\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.ones_like",
"torch.randn",
"torch.nn.init.normal_",
"torch.full",
"torch.topk",
"torch.arange",
"torch.abs",
"torch.zeros",
"torch.einsum",
"torch.clone"
]
] |
caitsithx/dogs-vs-cats-redux | [
"3ff588cac9048a3c9f5a76de842a9cd2a4140218"
] | [
"cscreendataset.py"
] | [
"import os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torchvision import transforms\n\nimport settings\n\n# import transforms\n\nDATA_DIR = settings.DATA_DIR\nTRAIN_DIR = DATA_DIR + '/train-640'\nTEST_DIR = DATA_DIR + '/test-640'\n\n\ndef pil_load(img_path):\n with open(img_path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\nclass CommonDataSet(data.Dataset):\n def __init__(self, file_list_path, train_data=True, has_label=True,\n transform=None, split=0.8):\n df_train = pd.read_csv(file_list_path)\n df_value = df_train.values\n df_value = np.random.permutation(df_value)\n if has_label:\n split_index = int(df_value.shape[0] * split)\n if train_data:\n split_data = df_value[:split_index]\n else:\n split_data = df_value[split_index:]\n # print(split_data.shape)\n file_names = [None] * split_data.shape[0]\n labels = []\n\n for index, line in enumerate(split_data):\n f = line[0]\n labels.append(line[1:])\n file_names[index] = os.path.join(TRAIN_DIR, str(f) + '.jpg')\n\n else:\n file_names = [None] * df_train.values.shape[0]\n for index, line in enumerate(df_train.values):\n f = line[0]\n file_names[index] = TEST_DIR + '/' + str(int(f)) + '.jpg'\n # print(filenames[:100])\n self.transform = transform\n self.num = len(file_names)\n self.file_names = file_names\n self.train_data = train_data\n self.has_label = has_label\n\n if has_label:\n self.labels = np.array(labels, dtype=np.float32)\n\n def __getitem__(self, index):\n img = pil_load(self.file_names[index])\n if self.transform is not None:\n img = self.transform(img)\n\n if self.has_label:\n label = self.labels[index]\n return img, label, self.file_names[index]\n else:\n return img, self.file_names[index]\n\n def __len__(self):\n return self.num\n\n\ndef randomRotate(img):\n d = random.randint(0, 4) * 90\n img2 = img.rotate(d, resample=Image.NEAREST)\n return img2\n\n\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.Scale(320),\n transforms.RandomSizedCrop(224),\n # transforms.Scale(224),\n transforms.RandomHorizontalFlip(),\n transforms.Lambda(lambda x: randomRotate(x)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'trainv3': transforms.Compose([\n transforms.Scale(480),\n transforms.RandomSizedCrop(299),\n transforms.RandomHorizontalFlip(),\n transforms.Lambda(lambda x: randomRotate(x)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'valid': transforms.Compose([\n transforms.Scale(224),\n # transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'validv3': transforms.Compose([\n transforms.Scale(299),\n # transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'test': transforms.Compose([\n transforms.Scale(224),\n # transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'testv3': transforms.Compose([\n transforms.Scale(299),\n # transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n}\n\n'''\ndsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n for x in ['train', 'valid']}\ndset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size,\n shuffle=True, num_workers=4)\n for x in ['train', 'valid']}\n\ndset_sizes = {x: len(dsets[x]) for x in ['train', 'valid']}\ndset_classes = dsets['train'].classes\nsave_array(CLASSES_FILE, dset_classes)\n'''\n\n\ndef get_train_loader(model, batch_size=16, shuffle=True):\n if model.name.startswith('inception'):\n transkey = 'trainv3'\n else:\n transkey = 'train'\n if hasattr(model, 'batch_size'):\n batch_size = model.batch_size\n print(\"train batch_size %d \" % batch_size)\n dset = CommonDataSet(DATA_DIR + '/train_labels.csv',\n transform=data_transforms[transkey])\n dloader = torch.utils.data.DataLoader(dset, batch_size=batch_size,\n shuffle=shuffle, num_workers=4)\n dloader.num = dset.num\n return dloader\n\n\ndef get_val_loader(model, batch_size=16, shuffle=True):\n if model.name.startswith('inception'):\n transkey = 'validv3'\n else:\n transkey = 'valid'\n if hasattr(model, 'batch_size'):\n batch_size = model.batch_size\n # train_v2.csv\n dset = CommonDataSet(DATA_DIR + '/train_labels.csv', train_data=False,\n transform=data_transforms[transkey])\n dloader = torch.utils.data.DataLoader(dset, batch_size=batch_size,\n shuffle=shuffle, num_workers=4)\n dloader.num = dset.num\n return dloader\n\n\ndef get_test_loader(model, batch_size=16, shuffle=False):\n if model.name.startswith('inception'):\n transkey = 'testv3'\n else:\n transkey = 'test'\n if hasattr(model, 'batch_size'):\n batch_size = model.batch_size\n\n dset = CommonDataSet(DATA_DIR + '/sample_submission.csv', has_label=False,\n transform=data_transforms[transkey])\n dloader = torch.utils.data.DataLoader(dset, batch_size=batch_size,\n shuffle=shuffle, num_workers=4)\n dloader.num = dset.num\n return dloader\n\n\nif __name__ == '__main__':\n loader = get_train_loader()\n print(loader.num)\n for i, data in enumerate(loader):\n img, label, fn = data\n # print(fn)\n # print(label)\n if i > 10:\n break\n loader = get_val_loader()\n print(loader.num)\n for i, data in enumerate(loader):\n img, label, fn = data\n # print(fn)\n # print(label)\n if i > 10:\n break\n loader = get_test_loader()\n print(loader.num)\n for i, data in enumerate(loader):\n img, fn = data\n # print(fn)\n # print(label)\n if i > 10:\n break\n"
] | [
[
"pandas.read_csv",
"torch.utils.data.DataLoader",
"numpy.random.permutation",
"numpy.array"
]
] |
akashAD98/detectron2 | [
"295fbb8b96eda271869fc6955280d16596781766"
] | [
"detectron2/layers/batch_norm.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport torch\nimport torch.distributed as dist\nfrom fvcore.nn.distributed import differentiable_all_reduce\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ..utils import comm, env\n\nfrom .wrappers import BatchNorm2d\n\n\nclass FrozenBatchNorm2d(nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n It contains non-trainable buffers called\n \"weight\" and \"bias\", \"running_mean\", \"running_var\",\n initialized to perform identity transformation.\n\n The pre-trained backbone models from Caffe2 only contain \"weight\" and \"bias\",\n which are computed from the original four parameters of BN.\n The affine transform `x * weight + bias` will perform the equivalent\n computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.\n When loading a backbone model from Caffe2, \"running_mean\" and \"running_var\"\n will be left unchanged as identity transformation.\n\n Other pre-trained backbone models may contain all 4 parameters.\n\n The forward is implemented by `F.batch_norm(..., training=False)`.\n \"\"\"\n\n _version = 3\n\n def __init__(self, num_features, eps=1e-5):\n super().__init__()\n self.num_features = num_features\n self.eps = eps\n self.register_buffer(\"weight\", torch.ones(num_features))\n self.register_buffer(\"bias\", torch.zeros(num_features))\n self.register_buffer(\"running_mean\", torch.zeros(num_features))\n self.register_buffer(\"running_var\", torch.ones(num_features) - eps)\n\n def forward(self, x):\n if x.requires_grad:\n # When gradients are needed, F.batch_norm will use extra memory\n # because its backward op computes gradients for weight/bias as well.\n scale = self.weight * (self.running_var + self.eps).rsqrt()\n bias = self.bias - self.running_mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n out_dtype = x.dtype # may be half\n return x * scale.to(out_dtype) + bias.to(out_dtype)\n else:\n # When gradients are not needed, F.batch_norm is a single fused op\n # and provide more optimization opportunities.\n return F.batch_norm(\n x,\n self.running_mean,\n self.running_var,\n self.weight,\n self.bias,\n training=False,\n eps=self.eps,\n )\n\n def _load_from_state_dict(\n self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs\n ):\n version = local_metadata.get(\"version\", None)\n\n if version is None or version < 2:\n # No running_mean/var in early versions\n # This will silent the warnings\n if prefix + \"running_mean\" not in state_dict:\n state_dict[prefix + \"running_mean\"] = torch.zeros_like(self.running_mean)\n if prefix + \"running_var\" not in state_dict:\n state_dict[prefix + \"running_var\"] = torch.ones_like(self.running_var)\n\n super()._load_from_state_dict(\n state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs\n )\n\n def __repr__(self):\n return \"FrozenBatchNorm2d(num_features={}, eps={})\".format(self.num_features, self.eps)\n\n @classmethod\n def convert_frozen_batchnorm(cls, module):\n \"\"\"\n Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.\n\n Args:\n module (torch.nn.Module):\n\n Returns:\n If module is BatchNorm/SyncBatchNorm, returns a new module.\n Otherwise, in-place convert module and return it.\n\n Similar to convert_sync_batchnorm in\n https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py\n \"\"\"\n bn_module = nn.modules.batchnorm\n bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)\n res = module\n if isinstance(module, bn_module):\n res = cls(module.num_features)\n if module.affine:\n res.weight.data = module.weight.data.clone().detach()\n res.bias.data = module.bias.data.clone().detach()\n res.running_mean.data = module.running_mean.data\n res.running_var.data = module.running_var.data\n res.eps = module.eps\n else:\n for name, child in module.named_children():\n new_child = cls.convert_frozen_batchnorm(child)\n if new_child is not child:\n res.add_module(name, new_child)\n return res\n\n\ndef get_norm(norm, out_channels):\n \"\"\"\n Args:\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\n or a callable that takes a channel number and returns\n the normalization layer as a nn.Module.\n\n Returns:\n nn.Module or None: the normalization layer\n \"\"\"\n if norm is None:\n return None\n if isinstance(norm, str):\n if len(norm) == 0:\n return None\n norm = {\n \"BN\": BatchNorm2d,\n # Fixed in https://github.com/pytorch/pytorch/pull/36382\n \"SyncBN\": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,\n \"FrozenBN\": FrozenBatchNorm2d,\n \"GN\": lambda channels: nn.GroupNorm(32, channels),\n # for debugging:\n \"nnSyncBN\": nn.SyncBatchNorm,\n \"naiveSyncBN\": NaiveSyncBatchNorm,\n }[norm]\n return norm(out_channels)\n\n\nclass NaiveSyncBatchNorm(BatchNorm2d):\n \"\"\"\n In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient\n when the batch size on each worker is different.\n (e.g., when scale augmentation is used, or when it is applied to mask head).\n\n This is a slower but correct alternative to `nn.SyncBatchNorm`.\n\n Note:\n There isn't a single definition of Sync BatchNorm.\n\n When ``stats_mode==\"\"``, this module computes overall statistics by using\n statistics of each worker with equal weight. The result is true statistics\n of all samples (as if they are all on one worker) only when all workers\n have the same (N, H, W). This mode does not support inputs with zero batch size.\n\n When ``stats_mode==\"N\"``, this module computes overall statistics by weighting\n the statistics of each worker by their ``N``. The result is true statistics\n of all samples (as if they are all on one worker) only when all workers\n have the same (H, W). It is slower than ``stats_mode==\"\"``.\n\n Even though the result of this module may not be the true statistics of all samples,\n it may still be reasonable because it might be preferrable to assign equal weights\n to all workers, regardless of their (H, W) dimension, instead of putting larger weight\n on larger images. From preliminary experiments, little difference is found between such\n a simplified implementation and an accurate computation of overall mean & variance.\n \"\"\"\n\n def __init__(self, *args, stats_mode=\"\", **kwargs):\n super().__init__(*args, **kwargs)\n assert stats_mode in [\"\", \"N\"]\n self._stats_mode = stats_mode\n\n def forward(self, input):\n if comm.get_world_size() == 1 or not self.training:\n return super().forward(input)\n\n B, C = input.shape[0], input.shape[1]\n\n half_input = input.dtype == torch.float16\n if half_input:\n # fp16 does not have good enough numerics for the reduction here\n input = input.float()\n mean = torch.mean(input, dim=[0, 2, 3])\n meansqr = torch.mean(input * input, dim=[0, 2, 3])\n\n if self._stats_mode == \"\":\n assert B > 0, 'SyncBatchNorm(stats_mode=\"\") does not support zero batch size.'\n vec = torch.cat([mean, meansqr], dim=0)\n vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())\n mean, meansqr = torch.split(vec, C)\n momentum = self.momentum\n else:\n if B == 0:\n vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)\n vec = vec + input.sum() # make sure there is gradient w.r.t input\n else:\n vec = torch.cat(\n [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0\n )\n vec = differentiable_all_reduce(vec * B)\n\n total_batch = vec[-1].detach()\n momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0\n mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero\n\n var = meansqr - mean * mean\n invstd = torch.rsqrt(var + self.eps)\n scale = self.weight * invstd\n bias = self.bias - mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n\n self.running_mean += momentum * (mean.detach() - self.running_mean)\n self.running_var += momentum * (var.detach() - self.running_var)\n ret = input * scale + bias\n if half_input:\n ret = ret.half()\n return ret\n"
] | [
[
"torch.ones_like",
"torch.ones",
"torch.nn.GroupNorm",
"torch.distributed.get_world_size",
"torch.split",
"torch.zeros_like",
"torch.nn.functional.batch_norm",
"torch.rsqrt",
"torch.zeros",
"torch.cat",
"torch.mean"
]
] |
DeuroIO/Deuro-tensorflow | [
"7d0fa4948a6232976c4828ef9041f92993503fd5"
] | [
"tensorflow/contrib/distribute/python/mirrored_strategy.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class MirroredStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nfrom functools import partial\nimport threading\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import shared_variable_creator\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import device_util\nfrom tensorflow.python.training import distribute as distribute_lib\nfrom tensorflow.python.util import nest\n\n\n# TODO(josh11b): Replace asserts in this file with if ...: raise ...\n\n\[email protected]\ndef _enter_graph(g):\n if context.executing_eagerly():\n with g.as_default(), context.eager_mode():\n yield\n else:\n with g.as_default():\n yield\n\n\ndef _cpu_device(device):\n cpu_device = tf_device.DeviceSpec.from_string(device)\n cpu_device.merge_from(tf_device.DeviceSpec(device_type=\"CPU\", device_index=0))\n return cpu_device.to_string()\n\n\nclass _RequestedStop(Exception):\n pass\n\n\n# _call_for_each_replica and _reduce_non_distributed_value are not members of\n# MirroredStrategy so that they are generally not allowed to use anything\n# specific to MirroredStrategy and thus can be shared with other distribution\n# strategies.\n\n\n# TODO(yuefengz): maybe create a common class for those who need to call this\n# _call_for_each_replica.\ndef _call_for_each_replica(distribution, fn, args, kwargs):\n \"\"\"Run `fn` in separate threads, once per replica/worker device.\n\n Args:\n distribution: the DistributionStrategy object.\n fn: function to run (will be run once per device, each in its own thread).\n args: positional arguments for `fn`\n kwargs: keyword arguments for `fn`.\n\n Returns:\n Merged return value of `fn` across all replicas.\n\n Raises:\n RuntimeError: If fn() calls get_replica_context().merge_call() a different\n number of times from the available devices.\n \"\"\"\n # TODO(josh11b): Add this option once we add synchronization to variable\n # creation. Until then, this is pretty unsafe to use.\n run_concurrently = False\n if not context.executing_eagerly():\n # Needed for per-thread device, etc. contexts in graph mode.\n ops.get_default_graph().switch_to_thread_local()\n\n coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))\n\n shared_variable_store = {}\n\n # TODO(isaprykin): Create these threads once instead of during every run()\n # call.\n threads = []\n for index, d in enumerate(distribution.extended.worker_devices):\n variable_creator_fn = shared_variable_creator.make_fn(\n shared_variable_store, index)\n t = MirroredExtended._MirroredReplicaThread( # pylint: disable=protected-access\n distribution, coord, d, variable_creator_fn, fn,\n *values.select_device(d, args), **values.select_device(d, kwargs))\n threads.append(t)\n\n for t in threads:\n t.start()\n\n # When `fn` starts `should_run` event is set on _MirroredReplicaThread\n # (`MRT`) threads. The execution waits until\n # `MRT.has_paused` is set, which indicates that either `fn` is\n # complete or a `get_replica_context().merge_call()` is called. If `fn` is\n # complete, then `MRT.done` is set to True. Otherwise, arguments\n # of `get_replica_context().merge_call` from all paused threads are grouped\n # and the `merge_fn` is performed. Results of the\n # `get_replica_context().merge_call` are then set to `MRT.merge_result`.\n # Each such `get_replica_context().merge_call` call returns the\n # `MRT.merge_result` for that thread when `MRT.should_run` event\n # is reset again. Execution of `fn` resumes.\n\n try:\n with coord.stop_on_exception():\n all_done = False\n while not all_done and not coord.should_stop():\n done = []\n if run_concurrently:\n for t in threads:\n t.should_run.set()\n for t in threads:\n t.has_paused.wait()\n t.has_paused.clear()\n if coord.should_stop():\n return None\n done.append(t.done)\n else:\n for t in threads:\n t.should_run.set()\n t.has_paused.wait()\n t.has_paused.clear()\n if coord.should_stop():\n return None\n done.append(t.done)\n if coord.should_stop():\n return None\n all_done = all(done)\n if not all_done:\n if any(done):\n raise RuntimeError(\"Some replicas made a different number of \"\n \"replica_context().merge_call() calls.\")\n # get_replica_context().merge_call() case\n merge_args = values.regroup({t.device: t.merge_args for t in threads})\n merge_kwargs = values.regroup(\n {t.device: t.merge_kwargs for t in threads})\n # We capture the name_scope of the MRT when we call merge_fn\n # to ensure that if we have opened a name scope in the MRT,\n # it will be respected when executing the merge function. We only\n # capture the name_scope from the first MRT and assume it is\n # the same for all other MRTs.\n mtt_captured_name_scope = threads[0].captured_name_scope\n with ops.name_scope(mtt_captured_name_scope):\n merge_result = threads[0].merge_fn(distribution, *merge_args,\n **merge_kwargs)\n for t in threads:\n t.merge_result = values.select_device(t.device, merge_result)\n finally:\n for t in threads:\n t.should_run.set()\n coord.join(threads)\n\n return values.regroup({t.device: t.main_result for t in threads})\n\n\ndef _reduce_non_distributed_value(extended, reduce_op, value, destinations):\n \"\"\"Reduce a non-DistributedValue `value` to `destinations`.\"\"\"\n if isinstance(value, values.DistributedValues):\n raise ValueError(\"You are passing a `DistributedValue` to \"\n \"`_reduce_non_distributed_value`, which is not allowed.\")\n\n # If the same value is present on all replicas then the PerReplica value will\n # be a single value. We also handle the case when `value` is a single value\n # and equal to 0.\n if value == 0:\n return 0\n # If there is only a single value and the reduce op is MEAN,\n # that value should be on all destinations.\n if reduce_op == reduce_util.ReduceOp.MEAN:\n return value\n\n cross_device_ops_lib.validate_destinations(destinations)\n # We do not support a reduce op of SUM if the value is the same across\n # all replicas. We call this as part of assign functions for MirroredVariables\n # and summing up identical values across replicas is not clearly defined.\n if (len(extended.worker_devices) != 1 or\n not cross_device_ops_lib.check_destinations(destinations)):\n raise ValueError(\"A non-DistributedValues value %s cannot be reduced with \"\n \"the given reduce op %s.\" % (value, reduce_op))\n # TODO(anjalisridhar): Moves these methods to a device utility file?\n devices = cross_device_ops_lib.get_devices_from(destinations)\n if len(devices) == 1:\n with ops.device(devices[0]):\n return array_ops.identity(value)\n else:\n value_updates = {}\n for d in devices:\n with ops.device(d):\n value_updates[d] = array_ops.identity(value)\n return values.Mirrored(value_updates)\n\n\ndef _create_mirrored_variable(devices, real_mirrored_creator, *args, **kwargs): # pylint: disable=g-missing-docstring\n # Figure out what collections this variable should be added to.\n # We'll add the MirroredVariable to those collections instead.\n collections = kwargs.pop(\"collections\", None)\n if collections is None:\n collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n kwargs[\"collections\"] = []\n\n # Get synchronization value\n synchronization = kwargs.get(\"synchronization\",\n variable_scope.VariableSynchronization.ON_WRITE)\n if synchronization == variable_scope.VariableSynchronization.NONE:\n raise ValueError(\"`NONE` variable synchronization mode is not \"\n \"supported with `Mirrored` distribution strategy. Please\"\n \" change the `synchronization` for variable: \" +\n kwargs[\"name\"])\n elif synchronization == variable_scope.VariableSynchronization.ON_READ:\n # Variables that are to be synced on read are replica local.\n is_replica_local = True\n kwargs[\"trainable\"] = False\n elif (synchronization == variable_scope.VariableSynchronization.ON_WRITE or\n synchronization == variable_scope.VariableSynchronization.AUTO):\n # `AUTO` synchronization for `MirroredStrategy` is `ON_WRITE`.\n is_replica_local = False\n else:\n raise ValueError(\"Invalid variable synchronization mode: \" +\n synchronization + \" for variable: \" + kwargs[\"name\"])\n\n # Get aggregation value\n aggregation = kwargs.pop(\"aggregation\",\n variable_scope.VariableAggregation.NONE)\n if aggregation not in (\n variable_scope.VariableAggregation.NONE,\n variable_scope.VariableAggregation.SUM,\n variable_scope.VariableAggregation.MEAN,\n variable_scope.VariableAggregation.ONLY_FIRST_REPLICA\n ):\n raise ValueError(\"Invalid variable aggregation mode: \" + aggregation +\n \" for variable: \" + kwargs[\"name\"])\n\n # Ignore user-specified caching device, not needed for mirrored variables.\n kwargs.pop(\"caching_device\", None)\n\n # TODO(josh11b,apassos): It would be better if variable initialization\n # was never recorded on the tape instead of having to do this manually\n # here.\n with tape.stop_recording():\n index = real_mirrored_creator(devices, *args, **kwargs)\n\n if is_replica_local:\n result = values.ReplicaLocalVariable(\n index, index[devices[0]], aggregation)\n else:\n result = values.MirroredVariable(index, index[devices[0]], aggregation)\n\n # Add the wrapped variable to the requested collections.\n # The handling of eager mode and the global step matches\n # ResourceVariable._init_from_args().\n if not context.executing_eagerly():\n g = ops.get_default_graph()\n # If \"trainable\" is True, next_creator() will add the member variables\n # to the TRAINABLE_VARIABLES collection, so we manually remove\n # them and replace with the MirroredVariable. We can't set\n # \"trainable\" to False for next_creator() since that causes functions\n # like implicit_gradients to skip those variables.\n if kwargs.get(\"trainable\", True):\n collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)\n l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)\n for v in index.values():\n if v in l:\n l.remove(v)\n g.add_to_collections(collections, result)\n elif ops.GraphKeys.GLOBAL_STEP in collections:\n ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result)\n\n return result\n\n\nclass CoreMirroredStrategy(distribute_lib.DistributionStrategy):\n \"\"\"Mirrors vars to distribute across multiple devices and machines.\n\n *** core version ***\n\n This strategy uses one replica per device and sync replication for its\n multi-GPU version.\n\n When `cluster_spec` is given by the `configure` method., it turns into the\n mulit-worker version that works on multiple workers with in-graph replication.\n Note: `configure` will be called by higher-level APIs if running in\n distributed environment.\n\n There are several important concepts for distributed TensorFlow, e.g.\n `client`, `job`, 'task', `cluster`, `in-graph replication` and\n 'synchronous training' and they have already been defined in the\n [TensorFlow's documentation](https://www.tensorflow.org/deploy/distributed).\n The distribution strategy inherits these concepts as well and in addition to\n that we also clarify several more concepts:\n\n * **In-graph replication**: the `client` creates a single `tf.Graph` that\n specifies tasks for devices on all workers. The `client` then creates a\n client session which will talk to the `master` service of a `worker`. Then\n the `master` will partition the graph and distribute the work to all\n participating workers.\n * **Worker**: A `worker` is a TensorFlow `task` that usually maps to one\n physical machine. We will have multiple `worker`s with different `task`\n index. They all do similar things except for one worker checkpointing model\n variables, writing summaries, etc. in addition to its ordinary work.\n\n The multi-worker version of this class maps one replica to one device on a\n worker. It mirrors all model variables on all replicas. For example, if you\n have two `worker`s and each `worker` has 4 GPUs, it will create 8 copies of\n the model variables on these 8 GPUs. Then like in MirroredStrategy, each\n replica performs their computation with their own copy of variables unless in\n cross-replica model where variable or tensor reduction happens.\n\n Args:\n devices: a list of device strings.\n num_gpus: number of GPUs. For local training, either specify `devices` or\n `num_gpus`. In distributed training, this must be specified as number of\n GPUs on each worker.\n num_gpus_per_worker: number of GPUs per worker. This is the same as\n `num_gpus` and only one of `num_gpus` and `num_gpus_per_worker` can be\n specified.\n cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not\n set, the `configure` method will try to find the best one.\n auto_shard_dataset: whether to auto-shard the dataset when there are\n multiple workers.\n \"\"\"\n\n def __init__(self,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False):\n extended = CoreMirroredExtended(\n self, devices, num_gpus, num_gpus_per_worker,\n cross_device_ops, auto_shard_dataset)\n super(CoreMirroredStrategy, self).__init__(extended)\n\n\nclass CoreMirroredExtended(distribute_lib.DistributionStrategyExtended):\n \"\"\"Implementation of CoreMirroredStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False):\n super(CoreMirroredExtended, self).__init__(container_strategy)\n self._cross_device_ops = cross_device_ops\n self._auto_shard_dataset = auto_shard_dataset\n # Remember num GPUs which might be needed by `configure` method.\n if num_gpus is not None and num_gpus_per_worker is not None:\n raise ValueError(\n \"You cannot specify both `num_gpus` and `num_gpus_per_worker`.\")\n if num_gpus is not None:\n self._num_gpus = num_gpus\n else:\n self._num_gpus = num_gpus_per_worker\n\n self._initialize_local(self._num_gpus, devices)\n\n def _initialize_local(self, num_gpus, devices):\n \"\"\"Initializes the object for local training.\"\"\"\n self._cluster_spec = None\n # Convert `num_gpus` into `devices`, shouldn't specify both.\n if devices is None:\n if num_gpus is None:\n num_gpus = context.num_gpus()\n if num_gpus == 0:\n devices = [\"/device:CPU:0\"]\n else:\n devices = [\"/device:GPU:%d\" % d for d in range(num_gpus)]\n elif num_gpus is not None:\n raise ValueError(\"Must only specify one of `devices` and `num_gpus`.\")\n self._num_gpus = num_gpus\n # TODO(yuefengz): consider setting the default device.\n\n assert devices, \"Must specify at least one device.\"\n assert len(set(devices)) == len(devices), (\n \"No duplicates allowed in `devices` argument.\")\n # TODO(josh11b): Require at least 2 devices?\n self._devices = [device_util.resolve(d) for d in devices]\n self._canonical_device_set = set(self._devices)\n self._device_index = values.PerReplica(\n {d: i for i, d in enumerate(devices)})\n\n def _initialize_multi_worker(self, num_gpus, cluster_spec):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)\n self._cluster_spec = cluster_spec\n\n self._workers = []\n for job in [\"chief\", \"worker\"]:\n for task in range(len(cluster_spec.as_dict().get(job, []))):\n self._workers.append(\"/job:%s/task:%d\" % (job, task))\n\n if num_gpus is None:\n raise ValueError(\"`num_gpus` is required if `cluster_spec` is given.\")\n if num_gpus > 0:\n self._worker_devices = [\n (worker, [\n device_util.canonicalize(worker + \"/device:GPU:%d\" % gpu)\n for gpu in range(num_gpus)\n ]) for worker in self._workers\n ]\n else:\n self._worker_devices = [\n (worker, [device_util.canonicalize(worker, \"/device:CPU:0\")])\n for worker in self._workers\n ]\n\n devices = nest.flatten([l for _, l in self._worker_devices])\n\n # Setting `_default_device` will add a device scope in the\n # distribution.scope. We set the default device to the first worker. When\n # users specify device under distribution.scope by\n # with tf.device(\"/cpu:0\"):\n # ...\n # their ops will end up on the cpu device of its first worker, e.g.\n # \"/job:worker/task:0/device:CPU:0\". Note this is not used in replica mode.\n self._default_device = self._workers[0]\n\n assert devices, \"Must specify at least one device.\"\n assert len(set(devices)) == len(devices), (\n \"No duplicates allowed in `devices` argument.\")\n # TODO(josh11b): Require at least 2 devices?\n self._devices = [device_util.resolve(d) for d in devices]\n self._canonical_device_set = set(self._devices)\n self._device_index = values.PerReplica(\n {d: i for i, d in enumerate(devices)})\n\n def _create_variable(self, next_creator, *args, **kwargs):\n \"\"\"Create a mirrored variable. See `DistributionStrategy.scope`.\"\"\"\n colocate_with = kwargs.pop(\"colocate_with\", None)\n devices = self._get_devices_from(colocate_with)\n\n def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring\n index = {}\n for i, d in enumerate(devices):\n with ops.device(d):\n if i > 0:\n # Give replicas meaningful distinct names:\n var0name = index[devices[0]].name.split(\":\")[0]\n # We append a / to variable names created on replicas with id > 0 to\n # ensure that we ignore the name scope and instead use the given\n # name as the absolute name of the variable.\n kwargs[\"name\"] = \"%s/replica_%d/\" % (var0name, i)\n # Initialize replicas with the same value:\n def initial_value_fn(device=d):\n if context.executing_eagerly():\n init_value = index[devices[0]].value()\n return array_ops.identity(init_value)\n else:\n with ops.device(device):\n init_value = index[devices[0]].initial_value\n return array_ops.identity(init_value)\n kwargs[\"initial_value\"] = initial_value_fn\n with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):\n # Don't record operations (e.g. other variable reads) during\n # variable creation.\n with tape.stop_recording():\n v = next_creator(*args, **kwargs)\n assert not isinstance(v, values.DistributedVariable)\n index[d] = v\n return index\n\n return _create_mirrored_variable(devices, _real_mirrored_creator, *args,\n **kwargs)\n\n def _distribute_dataset(self, dataset_fn):\n if self._cluster_spec:\n return values.MultiWorkerDataset(\n partial(self._call_dataset_fn, dataset_fn), self._worker_devices,\n auto_shard=self._auto_shard_dataset)\n else:\n return values.PerReplicaDataset(\n self._call_dataset_fn(dataset_fn), self._devices)\n\n def _make_dataset_iterator(self, dataset):\n if self._cluster_spec:\n worker_device_pairs = self._worker_devices\n else:\n worker_device_pairs = [(\"/job:localhost\", self._devices)]\n return values.DatasetIterator(dataset, worker_device_pairs,\n self._num_replicas_in_sync)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n input_contexts = []\n if self._cluster_spec:\n num_workers = len(self._worker_devices)\n worker_device_pairs = self._worker_devices\n else:\n num_workers = 1\n worker_device_pairs = [(\"/job:localhost\", self._devices)]\n for i in range(num_workers):\n input_contexts.append(distribute_lib.InputContext(\n num_input_pipelines=num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=self._num_replicas_in_sync))\n return values.InputFunctionIterator(\n input_fn, worker_device_pairs, input_contexts)\n\n # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.\n def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,\n initial_loop_values=None):\n if initial_loop_values is None:\n initial_loop_values = {}\n initial_loop_values = nest.flatten(initial_loop_values)\n\n ctx = values.MultiStepContext()\n def body(i, *args):\n \"\"\"A wrapper around `fn` to create the while loop body.\"\"\"\n del args\n fn_inputs = iterator.get_next()\n if not isinstance(fn_inputs, tuple):\n fn_inputs = (fn_inputs,)\n fn_result = fn(ctx, fn_inputs)\n for (name, output) in ctx.last_step_outputs.items():\n # Convert all outputs to tensors, potentially from `DistributedValues`.\n ctx.last_step_outputs[name] = self._unwrap(output)\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs\n\n # We capture the control_flow_context at this point, before we run `fn`\n # inside a while_loop. This is useful in cases where we might need to exit\n # these contexts and get back to the outer context to do some things, for\n # e.g. create an op which should be evaluated only once at the end of the\n # loop on the host. One such usage is in creating metrics' value op.\n self._outer_control_flow_context = (\n ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access\n\n cond = lambda i, *args: i < iterations\n i = constant_op.constant(0)\n loop_result = control_flow_ops.while_loop(\n cond, body, [i] + initial_loop_values, name=\"\",\n parallel_iterations=1, back_prop=False, swap_memory=False,\n return_same_structure=True)\n del self._outer_control_flow_context\n\n ctx.run_op = control_flow_ops.group(loop_result)\n\n # Convert the last_step_outputs from a list to the original dict structure\n # of last_step_outputs.\n last_step_tensor_outputs = loop_result[1:]\n last_step_tensor_outputs_dict = nest.pack_sequence_as(\n ctx.last_step_outputs, last_step_tensor_outputs)\n\n for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access\n output = last_step_tensor_outputs_dict[name]\n # For outputs that have already been reduced, wrap them in a Mirrored\n # container, else in a PerReplica container.\n if reduce_op is None:\n last_step_tensor_outputs_dict[name] = values.regroup(\n {d: t for d, t in zip(self._devices, output)}, values.PerReplica)\n else:\n assert len(output) == 1\n last_step_tensor_outputs_dict[name] = output[0]\n\n ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access\n return ctx\n\n def _broadcast_to(self, tensor, destinations):\n # This is both a fast path for Python constants, and a way to delay\n # converting Python values to a tensor until we know what type it\n # should be converted to. Otherwise we have trouble with:\n # global_step.assign_add(1)\n # since the `1` gets broadcast as an int32 but global_step is int64.\n if isinstance(tensor, (float, int)):\n return tensor\n # TODO(josh11b): In eager mode, use one thread per device, or async mode.\n return self._get_cross_device_ops().broadcast(\n tensor, destinations or self._devices)\n\n def _call_for_each_replica(self, fn, args, kwargs):\n return _call_for_each_replica(self._container_strategy(), fn, args, kwargs)\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n del task_type, task_id\n\n if session_config:\n session_config.isolate_session_state = True\n\n if cluster_spec:\n self._initialize_multi_worker(self._num_gpus, cluster_spec)\n\n if self._cross_device_ops is None:\n if self._cluster_spec:\n # It currently cannot detect the toplogy of remote workers. So we\n # hard-code the multi-worker all-reduce algorithm for now.\n if len(self._workers) == 1:\n # The default is \"nccl\".\n self._cross_device_ops = (\n cross_device_ops_lib.AllReduceCrossDeviceOps())\n else:\n # The default is hierarchical reduce and broadcast.\n self._cross_device_ops = cross_device_ops_lib.MultiWorkerAllReduce(\n self._workers, self._num_gpus)\n else:\n self._cross_device_ops = cross_device_ops_lib.choose_the_best(\n self._devices, session_config=session_config)\n\n def _get_cross_device_ops(self):\n if self._cross_device_ops is None:\n self._cross_device_ops = (\n cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps())\n return self._cross_device_ops\n\n def _reduce_to(self, reduce_op, value, destinations):\n assert not isinstance(value, values.Mirrored)\n if not isinstance(value, values.DistributedValues):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return _reduce_non_distributed_value(self, reduce_op, value,\n destinations)\n return self._get_cross_device_ops().reduce(\n reduce_op, value, destinations=destinations)\n\n def _batch_reduce_to(self, reduce_op, value_destination_pairs):\n return self._get_cross_device_ops().batch_reduce(reduce_op,\n value_destination_pairs)\n\n def _update(self, var, fn, args, kwargs, group):\n # TODO(josh11b): In eager mode, use one thread per device.\n assert isinstance(var, values.DistributedVariable)\n updates = {}\n for d, v in var._index.items(): # pylint: disable=protected-access\n name = \"update_%d\" % self._device_index.get(d)\n with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):\n # If args and kwargs are not mirrored, the value is returned as is.\n updates[d] = fn(v,\n *values.select_device_mirrored(d, args),\n **values.select_device_mirrored(d, kwargs))\n return values.update_regroup(self, updates, group)\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n assert isinstance(colocate_with, list)\n # TODO(josh11b): In eager mode, use one thread per device.\n updates = {}\n for d in colocate_with:\n name = \"update_%d\" % self._device_index.get(d)\n with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):\n updates[d] = fn(*values.select_device_mirrored(d, args),\n **values.select_device_mirrored(d, kwargs))\n return values.update_regroup(self, updates, group)\n\n def read_var(self, replica_local_var):\n \"\"\"Read the aggregate value of a replica-local variable.\"\"\"\n if isinstance(replica_local_var, values.ReplicaLocalVariable):\n return replica_local_var._get_cross_replica() # pylint: disable=protected-access\n assert isinstance(replica_local_var, values.Mirrored)\n return array_ops.identity(replica_local_var.get())\n\n def _unwrap(self, val):\n if isinstance(val, values.DistributedValues):\n # Return in a deterministic order.\n if set(val.devices) == self._canonical_device_set:\n return [val.get(device=d) for d in self._devices]\n return [val.get(device=d) for d in sorted(val.devices)]\n return [val]\n\n def value_container(self, val):\n return values.value_container(val)\n\n @property\n def _num_replicas_in_sync(self):\n return len(self._devices)\n\n @property\n def worker_devices(self):\n # Make a copy to prevent users from accidentally mutating our copy.\n return list(self._devices)\n\n @property\n def parameter_devices(self):\n return list(self._devices)\n\n @property\n def experimental_between_graph(self):\n return False\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return True\n\n @property\n def should_save_summary(self):\n return True\n\n def non_slot_devices(self, var_list):\n del var_list\n return list(self._devices)\n\n def _get_devices_from(self, colocate_with=None):\n if colocate_with is None:\n return self._devices\n else:\n return cross_device_ops_lib.get_devices_from(colocate_with)\n\n class _MirroredReplicaThread(threading.Thread):\n \"\"\"A thread that runs() a function on a device.\"\"\"\n\n def __init__(self, dist, coord, device, variable_creator_fn, fn, *args,\n **kwargs):\n super(CoreMirroredExtended._MirroredReplicaThread, self).__init__() # pylint: disable=protected-access\n self.coord = coord\n self.distribution = dist\n self.device = device\n self.replica_id = dist.worker_devices.index(device)\n self.variable_creator_fn = variable_creator_fn\n # State needed to run and return the results of `fn`.\n self.main_fn = fn\n self.main_args = args\n self.main_kwargs = kwargs\n self.main_result = None\n self.done = False\n # State needed to run the next merge_call() (if any) requested via\n # ReplicaContext.\n self.merge_fn = None\n self.merge_args = None\n self.merge_kwargs = None\n self.merge_result = None\n self.captured_name_scope = None\n # We use a thread.Event for the main thread to signal when this\n # thread should start running (`should_run`), and another for\n # this thread to transfer control back to the main thread\n # (`has_paused`, either when it gets to a\n # `get_replica_context().merge_call` or when `fn` returns). In\n # either case the event starts cleared, is signaled by calling\n # set(). The receiving thread waits for the signal by calling\n # wait() and then immediately clearing the event using clear().\n self.should_run = threading.Event()\n self.has_paused = threading.Event()\n # These fields have to do with inheriting various contexts from the\n # parent thread:\n # pylint: disable=protected-access\n self.context_mode = context.context()._eager_context.mode\n if not context.context()._context_handle:\n context.context()._initialize_handle_and_devices()\n self.context_device_policy = (\n pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(\n context.context()._context_handle))\n self.graph = ops.get_default_graph()\n self._variable_creator_stack = self.graph._variable_creator_stack[:]\n self._captured_var_scope = variable_scope.get_variable_scope()\n # Adding a \"/\" at end lets us re-enter this scope later.\n self._name_scope = self.graph.get_name_scope()\n if self._name_scope:\n self._name_scope += \"/\"\n if self.replica_id > 0:\n if not self._name_scope:\n self._name_scope = \"\"\n self._name_scope += \"replica_%d/\" % self.replica_id\n\n def run(self):\n # pylint: disable=protected-access\n self.graph._variable_creator_stack = self._variable_creator_stack\n self.should_run.wait()\n self.should_run.clear()\n try:\n if self.coord.should_stop():\n return\n with self.coord.stop_on_exception(), \\\n context.context()._mode(self.context_mode), \\\n context.context().device_policy(self.context_device_policy), \\\n _enter_graph(self.graph), \\\n MirroredReplicaContext(self.distribution, constant_op.constant(\n self.replica_id, dtypes.int32)), \\\n ops.device(self.device), \\\n ops.name_scope(self._name_scope), \\\n variable_scope.variable_scope(\n self._captured_var_scope, reuse=self.replica_id > 0), \\\n variable_scope.variable_creator_scope(self.variable_creator_fn):\n self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)\n self.done = True\n finally:\n self.has_paused.set()\n\n\nclass MirroredStrategy(distribute_lib.DistributionStrategy):\n \"\"\"Mirrors vars to distribute across multiple devices and machines.\n\n *** contrib version ***\n\n This strategy uses one replica per device and sync replication for its\n multi-GPU version.\n\n When `cluster_spec` is given by the `configure` method., it turns into the\n mulit-worker version that works on multiple workers with in-graph replication.\n Note: `configure` will be called by higher-level APIs if running in\n distributed environment.\n\n There are several important concepts for distributed TensorFlow, e.g.\n `client`, `job`, 'task', `cluster`, `in-graph replication` and\n 'synchronous training' and they have already been defined in the\n [TensorFlow's documentation](https://www.tensorflow.org/deploy/distributed).\n The distribution strategy inherits these concepts as well and in addition to\n that we also clarify several more concepts:\n\n * **In-graph replication**: the `client` creates a single `tf.Graph` that\n specifies tasks for devices on all workers. The `client` then creates a\n client session which will talk to the `master` service of a `worker`. Then\n the `master` will partition the graph and distribute the work to all\n participating workers.\n * **Worker**: A `worker` is a TensorFlow `task` that usually maps to one\n physical machine. We will have multiple `worker`s with different `task`\n index. They all do similar things except for one worker checkpointing model\n variables, writing summaries, etc. in addition to its ordinary work.\n\n The multi-worker version of this class maps one replica to one device on a\n worker. It mirrors all model variables on all replicas. For example, if you\n have two `worker`s and each `worker` has 4 GPUs, it will create 8 copies of\n the model variables on these 8 GPUs. Then like in MirroredStrategy, each\n replica performs their computation with their own copy of variables unless in\n cross-replica model where variable or tensor reduction happens.\n\n Args:\n devices: a list of device strings.\n num_gpus: number of GPUs. For local training, either specify `devices` or\n `num_gpus`. In distributed training, this must be specified as number of\n GPUs on each worker.\n num_gpus_per_worker: number of GPUs per worker. This is the same as\n `num_gpus` and only one of `num_gpus` and `num_gpus_per_worker` can be\n specified.\n cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not\n set, the `configure` method will try to find the best one.\n auto_shard_dataset: whether to auto-shard the dataset when there are\n multiple workers.\n cross_tower_ops: Deprecated alias for `cross_device_ops`.\n \"\"\"\n\n def __init__(self,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False,\n cross_tower_ops=None):\n assert not (cross_device_ops and cross_tower_ops)\n extended = MirroredExtended(\n self, devices, num_gpus, num_gpus_per_worker,\n cross_device_ops or cross_tower_ops, auto_shard_dataset)\n super(MirroredStrategy, self).__init__(extended)\n\n\nclass MirroredExtended(CoreMirroredExtended):\n \"\"\"Implementation of (contrib) MirroredStrategy.\"\"\"\n\n # pylint: disable=useless-super-delegation\n def __init__(self,\n container_strategy,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False):\n super(MirroredExtended, self).__init__(\n container_strategy, devices, num_gpus, num_gpus_per_worker,\n cross_device_ops, auto_shard_dataset)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Make iterator from dataset without splitting the batch.\n\n This implementation is different than the one in\n `tf.distribute.MirroredStrategy` for purposes of backward compatibility.\n We treat the incoming dataset's batch size as per replica batch size.\n\n Args:\n dataset: `tf.data.Dataset` for input.\n Returns:\n An `InputIterator` which returns inputs for each step of the computation.\n \"\"\"\n if self._cluster_spec:\n worker_device_pairs = self._worker_devices\n else:\n worker_device_pairs = [(\"/job:localhost\", self._devices)]\n return values.DatasetIterator(dataset, worker_device_pairs)\n\n\nclass MirroredReplicaContext(distribute_lib.ReplicaContext):\n \"\"\"ReplicaContext used in MirroredStrategy.call_for_each_replica().\n\n Opened in `_MirroredReplicaThread`, to allow the user to invoke\n `MirroredStrategy`'s specific implementation of `merge_call()`,\n which works by delegating the function and its arguments to\n the main thread (the one that invoked\n `MirroredStrategy.call_for_each_replica()`).\n \"\"\"\n\n def _merge_call(self, fn, args, kwargs):\n \"\"\"Delegate to the main thread to actually perform merge_call().\"\"\"\n t = threading.current_thread() # a _MirroredReplicaThread\n t.merge_fn = fn\n t.merge_args = args\n t.merge_kwargs = kwargs\n t.captured_name_scope = t.graph.get_name_scope()\n # Adding a \"/\" at end lets us re-enter this scope later.\n if t.captured_name_scope:\n t.captured_name_scope += \"/\"\n t.has_paused.set()\n t.should_run.wait()\n t.should_run.clear()\n if t.coord.should_stop():\n raise _RequestedStop()\n return t.merge_result\n\n @property\n def devices(self):\n distribute_lib.require_replica_context(self)\n replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)\n return [self._distribution_strategy.worker_devices[replica_id]]\n"
] | [
[
"tensorflow.python.distribute.cross_device_ops.ReductionToOneDeviceCrossDeviceOps",
"tensorflow.python.distribute.values.InputFunctionIterator",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.distribute.cross_device_ops.check_destinations",
"tensorflow.python.distribute.cross_device_ops.validate_destinations",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.distribute.values.Mirrored",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.training.distribute.require_replica_context",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.training.distribute.InputContext",
"tensorflow.python.framework.device.DeviceSpec",
"tensorflow.python.distribute.values.value_container",
"tensorflow.python.distribute.shared_variable_creator.make_fn",
"tensorflow.python.distribute.values.DatasetIterator",
"tensorflow.python.distribute.values.update_regroup",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.distribute.values.select_device",
"tensorflow.python.eager.tape.stop_recording",
"tensorflow.python.distribute.values.MirroredVariable",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.distribute.values.MultiStepContext",
"tensorflow.python.distribute.values.select_device_mirrored",
"tensorflow.python.training.distribute.UpdateContext",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.distribute.cross_device_ops.AllReduceCrossDeviceOps",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.distribute.values.ReplicaLocalVariable",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.training.coordinator.Coordinator",
"tensorflow.python.distribute.cross_device_ops.MultiWorkerAllReduce",
"tensorflow.python.training.device_util.resolve",
"tensorflow.python.distribute.values.regroup",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.training.device_util.canonicalize",
"tensorflow.python.distribute.cross_device_ops.choose_the_best",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.distribute.cross_device_ops.get_devices_from"
]
] |
joeybose/Adversarial-Example-Games | [
"4219137e5263cd7de86687ed74cc1cef7497bb78"
] | [
"attacks/wilcoxon.py"
] | [
"import numpy as np\nimport ipdb\nfrom scipy.stats import wilcoxon, ttest_rel\n\n# MNIST\nmi_attack = [90.000000, 87.575768, 81.515160, 90.909088, 84.848480, 88.787872,\n 89.090904]\ndi_attack = [90.606056, 90.000000, 85.454552, 91.818176, 88.484856, 89.696968,\n 0.606071]\ntid_attack = [90.000000, 83.939400, 84.545456, 86.666664, 83.333336, 83.333336,\n 86.060608]\naeg_mnist = [88.095, 91.071, 88.690, 89.881, 85.714, 91.071, 91.667]\n\nw_mi, p_mi = wilcoxon(mi_attack, aeg_mnist, alternative='less', zero_method='zsplit')\nprint(\"MNIST-- MI-Attack vs. AEG: W: %f , P: %f\" %(w_mi, p_mi))\n\nw_di, p_di = wilcoxon(di_attack, aeg_mnist, alternative='less', zero_method='zsplit')\nprint(\"MNIST-- DI-Attack vs. AEG: W: %f , P: %f\" %(w_di, p_di))\n\nw_tid, p_tid = wilcoxon(tid_attack, aeg_mnist, alternative='less', zero_method='zsplit')\nprint(\"MNIST-- TID-Attack vs. AEG: W: %f , P: %f\" %(w_tid, p_tid))\n\n# CIFAR\nc_mi_attack = [48.176666, 60.848335, 57.434998, 49.005005, 64.980003,\n 60.071667]\nc_di_attack = [83.571671, 85.126671, 84.953331, 79.344994, 83.279999, 87.748329]\nc_tid_attack = [8.991667, 8.716668, 9.298335, 9.150001, 9.185000, 9.225000]\nc_sgm_attack = [55.240002, 63.230000, 58.849995, 49.519997, 66.979996,\n 68.919998]\naeg_cifar = [87.51, 87.353, 87.197, 86.761, 86.683, 86.529]\n\nc_w_mi, c_p_mi = wilcoxon(c_mi_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- MI-Attack vs. AEG: W: %f , P: %f\" %(c_w_mi, c_p_mi))\n\nc_w_di, c_p_di = wilcoxon(c_di_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- DI-Attack vs. AEG: W: %f , P: %f\" %(c_w_di, c_p_di))\n\nc_w_tid, c_p_tid = wilcoxon(c_tid_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- TID-Attack vs. AEG: W: %f , P: %f\" %(c_w_tid, c_p_tid))\n\nc_w_sgm, c_p_sgm = wilcoxon(c_sgm_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- SGM-Attack vs. AEG: W: %f , P: %f\" %(c_w_sgm, c_p_sgm))\n\n# T Test- MNIST\nw_mi, p_mi = ttest_rel(mi_attack, aeg_mnist)\nprint(\"T-Test MNIST-- MI-Attack vs. AEG: W: %f , P: %f\" %(w_mi, p_mi))\n\nw_di, p_di = ttest_rel(di_attack, aeg_mnist)\nprint(\"T-Test MNIST-- DI-Attack vs. AEG: W: %f , P: %f\" %(w_di, p_di))\n\nw_tid, p_tid = ttest_rel(tid_attack, aeg_mnist)\nprint(\"T-Test MNIST-- TID-Attack vs. AEG: W: %f , P: %f\" %(w_tid, p_tid))\n\n# T Test- CIFAR\nc_w_mi, c_p_mi = ttest_rel(c_mi_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- MI-Attack vs. AEG: W: %f , P: %f\" %(c_w_mi, c_p_mi))\n\nc_w_di, c_p_di = ttest_rel(c_di_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- DI-Attack vs. AEG: W: %f , P: %f\" %(c_w_di, c_p_di))\n\nc_w_tid, c_p_tid = ttest_rel(c_tid_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- TID-Attack vs. AEG: W: %f , P: %f\" %(c_w_tid, c_p_tid))\n\nc_w_sgm, c_p_sgm = ttest_rel(c_sgm_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- SGM-Attack vs. AEG: W: %f , P: %f\" %(c_w_sgm, c_p_sgm))\n"
] | [
[
"scipy.stats.ttest_rel",
"scipy.stats.wilcoxon"
]
] |
patozavala/spectrareader | [
"ebd77ca568726936832e909c2f38c7b35fb35134"
] | [
"readers/readers.py"
] | [
"import os\nimport glob\nimport pandas as pd\n\nclass BaseReader():\n \"\"\"\n Implements several verifications and utilities for handling spectral files.\n \"\"\"\n def __init__(self):\n pass\n\n def check_file_if_exists(self,filepath):\n \"\"\"\n Verifies that a required file exists.\n \"\"\"\n try:\n f = open(filepath)\n f.close()\n except:\n raise Exception (filepath + ' does not exists.') \n\n def check_file_is_readable(self,filepath):\n \"\"\"\n Verifies that a required file is readable.\n \"\"\"\n try:\n f = open(filepath)\n f.readable()\n f.close()\n except:\n raise Exception (filepath + ' is not readable.')\n \n def check_dir_if_exist(self,dirpath):\n \"\"\"\n Verifies that a directory exists.\n \"\"\"\n if os.path.isdir(dirpath):\n return True\n else:\n raise Exception (dirpath + 'does not exists.')\n\nclass SpectraReader(BaseReader):\n \"\"\"\n SpectraReader reads .csv file with spectral information from objects. The spectrum is measured with laboratory and field spectrometers.\n SpectraReader allows handling the spectral information into a pandas dataframe. Each spectral measurement must follow the current protocols of the company.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n \n def read_spectrum(self, filepath: str) -> dict:\n \"\"\"\n Reads a .csv file with an spectroradiometer measurement. \n \"\"\"\n\n self.check_file_if_exists()\n self.check_file_is_readable()\n\n data = pd.read_csv(filepath)\n label = filepath.split(sep='_')[0]\n\n spectrum = {\n 'label': label,\n 'data': data,\n }\n return spectrum\n\n def read_multiple_spectra(self, dirpath: str) -> list:\n \"\"\"\n Reads multiple files from a directory an store each measurement into a Spectrum object.\n \"\"\"\n\n self.check_dir_if_exist()\n filepaths = glob.glob(dirpath + '/*.txt')\n spectra = []\n for file in filepaths:\n spectrum = self.read_single_file(file)\n spectra.append(spectrum)\n\n return spectra\n"
] | [
[
"pandas.read_csv"
]
] |
mandubian/codenets | [
"63be72b706d57dbfb2ecec94adc203fc7bdfa3cf"
] | [
"codenets/codesearchnet/query_code_ast/dataset.py"
] | [
"import os\nimport sys\nfrom typing import Iterable, Union, Dict, Tuple, List, Callable, TypeVar, Optional, Any, cast\nimport numpy as np\nfrom pathlib import Path\nfrom loguru import logger\nfrom pathos.pools import ProcessPool\nimport itertools\nimport pickle\nimport random\nfrom dpu_utils.codeutils import split_identifier_into_parts\n\nfrom codenets.utils import _to_subtoken_stream, get_data_files_from_directory\nfrom codenets.codesearchnet.data import DatasetParams\nfrom codenets.codesearchnet.tokenizer_recs import TokenizerRecordable\nfrom codenets.codesearchnet.copied_code.utils import read_file_samples\nfrom codenets.codesearchnet.dataset_utils import (\n Samples,\n LangDataset,\n Compose,\n InputFeaturesToNpArray_RandomReplace,\n Tensorize,\n compute_language_weightings,\n)\nfrom codenets.codesearchnet.copied_code.metadata import QueryType\nfrom codenets.codesearchnet.data import InputFeatures\n\n\ndef convert_and_pad_token_sequence(\n tokenizer: TokenizerRecordable,\n token_sequence: List[str],\n output_tensor_size: int,\n token: str,\n prefix: Optional[str],\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Tensorise token sequence with padding; returning a mask for used elements as well.\n\n Args:\n tokenizer: Tokenizer.\n token_sequence: List of tokens in string form\n output_tensor_size: Size of the resulting tensor (i.e., length up which we pad / down to which we truncate.\n pad_from_left: Indicate if we are padding/truncating on the left side of string. [Default: False]\n\n Returns:\n Pair of numpy arrays. First is the actual tensorised token sequence, the second is a masking tensor\n that is 1.0 for those token indices that are actually used.\n \"\"\"\n if prefix is not None:\n token_sequence = [prefix, token] + token_sequence\n else:\n token_sequence = [token] + token_sequence\n token_ids, token_mask = tokenizer.encode_tokens([token_sequence], max_length=output_tensor_size)\n return token_ids[0], token_mask[0]\n\n\ndef load_data_from_sample_siamese(\n language: str,\n encoder_label: str,\n data_to_load: Any,\n function_name: Optional[str],\n tokenizer: TokenizerRecordable,\n fraction_using_func_name: float,\n min_len_func_name_for_query: int,\n use_subtokens: bool,\n mark_subtoken_end: bool,\n max_num_tokens: int,\n lang_token: str,\n query_token: str,\n) -> Optional[Dict[str, np.ndarray]]:\n \"\"\"\n Save two versions of both the code and the query: one using the docstring as the query and the other using the\n function-name as the query, and replacing the function name in the code with an out-of-vocab token.\n Sub-tokenizes, converts, and pads both versions, and rejects empty samples.\n \"\"\"\n result_holder: Dict[str, Any] = {}\n # Save the two versions of the code and query:\n data_holder = {QueryType.DOCSTRING.value: data_to_load, QueryType.FUNCTION_NAME.value: None}\n # Skip samples where the function name is very short, because it probably has too little information\n # to be a good search query.\n if fraction_using_func_name > 0.0 and function_name and len(function_name) >= min_len_func_name_for_query:\n if encoder_label == \"query\":\n # Set the query tokens to the function name, broken up into its sub-tokens:\n data_holder[QueryType.FUNCTION_NAME.value] = split_identifier_into_parts(function_name)\n elif encoder_label == \"code\":\n # In the code, replace the function name with the out-of-vocab token everywhere it appears:\n data_holder[QueryType.FUNCTION_NAME.value] = [\n tokenizer.unk_token() if token == function_name else token for token in data_to_load\n ]\n else:\n return None\n\n # Sub-tokenize, convert, and pad both versions:\n for key, data in data_holder.items():\n # if hyperparameters[f\"{encoder_label}_use_subtokens\"]:\n if use_subtokens:\n data = _to_subtoken_stream(data, mark_subtoken_end=mark_subtoken_end)\n\n logger.debug(\"\")\n if encoder_label == \"code\":\n tokens, tokens_mask = convert_and_pad_token_sequence(\n tokenizer=tokenizer,\n token_sequence=list(data),\n output_tensor_size=max_num_tokens,\n token=lang_token,\n prefix=language,\n )\n elif encoder_label == \"query\":\n tokens, tokens_mask = convert_and_pad_token_sequence(\n tokenizer=tokenizer,\n token_sequence=list(data),\n output_tensor_size=max_num_tokens,\n token=query_token,\n prefix=None,\n )\n # Note that we share the result_holder with different encoders, and so we need to make our identifiers\n # unique-ish\n result_holder[f\"{encoder_label}_tokens_{key}\"] = tokens\n result_holder[f\"{encoder_label}_tokens_mask_{key}\"] = tokens_mask\n\n if (\n result_holder[f\"{encoder_label}_tokens_mask_{QueryType.DOCSTRING.value}\"] is None\n or int(np.sum(result_holder[f\"{encoder_label}_tokens_mask_{QueryType.DOCSTRING.value}\"])) == 0\n ):\n return None\n\n return result_holder\n\n\ndef parse_data_file_siamese_tokenizer(\n data_file: Path, data_params: DatasetParams, tokenizer: TokenizerRecordable, lang_token: str, query_token: str\n) -> Tuple[str, int, Samples]:\n logger.info(f\"Reading samples from {data_file}\")\n filename = os.path.basename(data_file)\n file_language = filename.split(\"_\")[0]\n\n samples = list(read_file_samples(data_file))\n\n ds: List[Dict[str, Union[str, int]]] = []\n for raw_sample in samples:\n language = raw_sample[\"language\"]\n if language.startswith(\"python\"): # In some datasets, we use 'python-2.7' and 'python-3'\n language = \"python\"\n\n if language != file_language:\n logger.error(f\"file with different language {language} from filename {file_language}\")\n sys.exit(f\"file with multiple language {language} from filename {file_language}\")\n\n # the load_data_from_sample method call places processed data into sample, and\n # returns a boolean flag indicating if sample should be used\n function_name = raw_sample.get(\"func_name\")\n data_code = load_data_from_sample_siamese(\n language=language,\n encoder_label=\"code\",\n data_to_load=raw_sample[\"code_tokens\"],\n function_name=function_name,\n tokenizer=tokenizer,\n fraction_using_func_name=data_params.fraction_using_func_name,\n min_len_func_name_for_query=data_params.min_len_func_name_for_query,\n use_subtokens=data_params.use_subtokens,\n mark_subtoken_end=data_params.mark_subtoken_end,\n max_num_tokens=data_params.code_max_num_tokens,\n lang_token=lang_token,\n query_token=query_token,\n )\n\n # query doesn't use the language\n data_query = load_data_from_sample_siamese(\n language=language,\n encoder_label=\"query\",\n data_to_load=[d.lower() for d in raw_sample[\"docstring_tokens\"]],\n function_name=function_name,\n tokenizer=tokenizer,\n fraction_using_func_name=data_params.fraction_using_func_name,\n min_len_func_name_for_query=data_params.min_len_func_name_for_query,\n use_subtokens=data_params.use_subtokens,\n mark_subtoken_end=data_params.mark_subtoken_end,\n max_num_tokens=data_params.query_max_num_tokens,\n lang_token=lang_token,\n query_token=query_token,\n )\n\n if data_code is not None and data_query is not None:\n d = {\"language\": language, \"similarity\": 1, **data_code, **data_query}\n ds.append(d)\n\n logger.debug(f\"Parsed file {data_file}: language {file_language} [{len(ds)} samples]\")\n\n return (file_language, len(ds), ds)\n\n\nT_Single = TypeVar(\"T_Single\")\n\n\ndef load_data_from_files(\n data_files: Iterable[Path],\n data_params: DatasetParams,\n tokenizer: TokenizerRecordable,\n # humm that is not very nice type signature... need to create interface for that\n parse_callback: Callable[[Path, DatasetParams, TokenizerRecordable], Tuple[str, int, Iterable[T_Single]]],\n parallelize: bool = True,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n tasks_as_args = [[data_file, data_params, tokenizer] for data_file in data_files]\n\n if parallelize:\n pool = ProcessPool()\n\n # needed that hack to work... issues with serialization of classes\n # doesn't work with basic multiprocessing so needed pathos\n def cb(x):\n return parse_callback(*x)\n\n per_file_results = list(pool.map(cb, tasks_as_args))\n else:\n per_file_results = [parse_callback(*task_args) for task_args in tasks_as_args] # type: ignore\n\n lang_samples_iter: Dict[str, Tuple[int, List[Iterable[T_Single]]]] = {}\n for (lang, lg, samples_iter) in per_file_results:\n if lang not in lang_samples_iter:\n lang_samples_iter[lang] = (0, [])\n (lg0, iters) = lang_samples_iter[lang]\n iters.append(samples_iter)\n lang_samples_iter[lang] = (lg0 + lg, iters)\n\n lang_samples: Dict[str, Tuple[int, Iterable[T_Single]]] = {}\n for (lang, (lg, iters)) in lang_samples_iter.items():\n lang_samples[lang] = (lg, itertools.chain(*iters))\n\n return lang_samples\n\n\ndef load_data_from_files_raw(\n data_files: Iterable[Path],\n # humm that is not very nice type signature... need to create interface for that\n parse_callback: Callable[..., Tuple[str, int, Iterable[T_Single]]], # type: ignore\n parallelize: bool,\n *args,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n tasks_as_args = [[data_file, *args] for data_file in data_files]\n\n if parallelize:\n pool = ProcessPool()\n\n # needed that hack to work... issues with serialization of classes\n # doesn't work with basic multiprocessing so needed pathos\n def cb(x):\n return parse_callback(*x)\n\n per_file_results = list(pool.map(cb, tasks_as_args))\n else:\n per_file_results = [parse_callback(*task_args) for task_args in tasks_as_args] # type: ignore\n\n lang_samples_iter: Dict[str, Tuple[int, List[Iterable[T_Single]]]] = {}\n for (lang, lg, samples_iter) in per_file_results:\n if lang not in lang_samples_iter:\n lang_samples_iter[lang] = (0, [])\n (lg0, iters) = lang_samples_iter[lang]\n iters.append(samples_iter)\n lang_samples_iter[lang] = (lg0 + lg, iters)\n\n lang_samples: Dict[str, Tuple[int, Iterable[T_Single]]] = {}\n for (lang, (lg, iters)) in lang_samples_iter.items():\n lang_samples[lang] = (lg, itertools.chain(*iters))\n\n return lang_samples\n\n\ndef load_data_from_dirs_siamese_tokenizer(\n data_dirs: List[Path],\n tokenizer: TokenizerRecordable,\n data_params: DatasetParams,\n parse_callback: Callable[[Path, DatasetParams, TokenizerRecordable], Tuple[str, int, Iterable[T_Single]]],\n max_files_per_dir: Optional[int] = None,\n parallelize: bool = True,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n return load_data_from_files(\n data_files=list(get_data_files_from_directory(data_dirs, max_files_per_dir)),\n data_params=data_params,\n tokenizer=tokenizer,\n parse_callback=parse_callback,\n parallelize=parallelize,\n )\n\n\ndef load_data_from_dirs(\n data_dirs: List[Path],\n parse_callback: Callable[..., Tuple[str, int, Iterable[T_Single]]], # type: ignore\n max_files_per_dir: Optional[int],\n parallelize: bool,\n *args,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n return load_data_from_files_raw(\n list(get_data_files_from_directory(data_dirs, max_files_per_dir)), parse_callback, parallelize, *args\n )\n\n\ndef build_lang_dataset_siamese_tokenizer(\n dirs: List[Path],\n name: str,\n data_params: DatasetParams,\n tokenizer: TokenizerRecordable,\n lang_token: str,\n query_token: str,\n fraction_using_func_name: float,\n query_random_token_frequency: float,\n common_tokens: Dict[int, List[int]], # list of token ID\n use_lang_weights: bool,\n lang_ids: Dict[str, int],\n pickle_path=\".\",\n parallelize: bool = False,\n embedding_model=None,\n) -> LangDataset:\n def build_input_features_from_dict(sample: Dict[str, Union[str, int, np.ndarray]]) -> InputFeatures:\n \"\"\"Build InputFeature from Dict by randomizing between using docstring or function name for query\"\"\"\n return InputFeatures(\n language=data_params.lang_ids[cast(str, sample[\"language\"])],\n similarity=cast(int, sample[\"similarity\"]),\n query_tokens=sample[\"query_tokens_func_name_as_query\"],\n query_tokens_mask=sample[\"query_tokens_mask_func_name_as_query\"],\n query_docstring_tokens=sample[\"query_tokens_docstring_as_query\"],\n query_docstring_tokens_mask=sample[\"query_tokens_mask_docstring_as_query\"],\n code_tokens=sample[\"code_tokens_func_name_as_query\"],\n code_tokens_mask=sample[\"code_tokens_mask_func_name_as_query\"],\n )\n\n def parser(\n data_file: Path, data_params: DatasetParams, tokenizer: TokenizerRecordable\n ) -> Tuple[str, int, Iterable[InputFeatures]]:\n (lang, lg, feats) = parse_data_file_siamese_tokenizer(\n data_file, data_params, tokenizer, lang_token, query_token\n )\n return (lang, lg, list(map(build_input_features_from_dict, feats)))\n\n # Train Data\n if not os.path.exists(pickle_path):\n os.makedirs(pickle_path)\n\n pickle_file = Path(pickle_path) / f\"{name}_samples.p\"\n loaded_samples: Dict[str, Tuple[int, Iterable[InputFeatures]]]\n\n if os.path.exists(pickle_file):\n logger.debug(f\"Loading dataset {name} raw samples from pickled {pickle_file}\")\n loaded_samples = pickle.load(open(pickle_file, \"rb\"))\n else:\n logger.debug(f\"Building dataset {name} from {dirs}\")\n loaded_samples = load_data_from_dirs_siamese_tokenizer(\n data_dirs=dirs, tokenizer=tokenizer, data_params=data_params, parse_callback=parser, parallelize=parallelize\n )\n nb = 0\n for lang, (lg, ss) in loaded_samples.items():\n ll = list(ss)\n loaded_samples[lang] = (lg, ll)\n nb += len(ll)\n pickle.dump(loaded_samples, open(pickle_file, \"wb\"))\n logger.debug(f\"Pickled dataset {name} [{nb} raw samples] to {pickle_file}\")\n\n lang_weights = compute_language_weightings(loaded_samples, lang_ids)\n logger.debug(f\"lang_weights {lang_weights}\")\n\n transform = Compose(\n [\n InputFeaturesToNpArray_RandomReplace(\n lang_weights=lang_weights,\n fraction_using_func_name=fraction_using_func_name,\n query_random_token_frequency=query_random_token_frequency,\n common_tokens=common_tokens,\n ),\n Tensorize(),\n ]\n )\n dataset = LangDataset(\n loaded_samples,\n lang_ids=data_params.lang_ids,\n transform=transform,\n use_lang_weights=use_lang_weights,\n embedding_model=embedding_model,\n tokenizer=tokenizer,\n emb_annoy_path=Path(pickle_path) / f\"{name}_embeddings.ann\",\n )\n logger.debug(f\"Loaded {name} lang dataset [{len(dataset)} samples]\")\n return dataset\n"
] | [
[
"numpy.sum"
]
] |
will-duncan/ramp_systems | [
"7db1964af6bdb26ee4fed25131a12f9294c4cc1d"
] | [
"src/ramp_to_hill/hill_system.py"
] | [
"import numpy as np\nfrom scipy.integrate import solve_ivp\n\n\ndef HS_ode(t,y,HS):\n rhs = -HS.gamma*y + HS.lambda_value(y)\n return rhs\n\ndef at_HS_equilibrium(t,y,HS,tol = 1e-3):\n val = np.linalg.norm(HS_ode(t,y,HS)) - tol\n if val < 0:\n return 0\n else:\n return val\n\ndef simulate_HS(x0,HS,max_time,tol = 1e-3):\n \"\"\"\n Simulate the hill system ODE. Terminate simulation if an equilibrium is found. \n Input:\n x0 - initial condition\n HS - HillSystemParameter object\n max_time - time at which to terminate the simulation if an equilibrium hasn't been found\n Output:\n sol - output of solve_ivp\n \"\"\"\n ode = lambda t,y: HS_ode(t,y,HS)\n at_equilibrium = lambda t,y: at_HS_equilibrium(t,y,HS,tol)\n at_equilibrium.terminal = True\n integration_interval = (0,max_time)\n sol = solve_ivp(ode,integration_interval,x0,method = 'BDF',events = at_equilibrium)\n return sol\n\n\ndef find_equilibrium(x0,HS,max_time,tol = 1e-3):\n \"\"\"\n Simulate the ODE to equilibrium starting from x0\n Input: \n x0 - initial condition\n HS - HillSystemParameter object\n max_time - run the ode from time points [0,max_time]. If the solver reaches\n max_time before finding an equilibrium, then report that an equilibrium \n was not found\n Output:\n x - value of the equilibrium, if found within max_time. If not found, returns -1\n \"\"\"\n ode = lambda t,y: HS_ode(t,y,HS)\n at_equilibrium = lambda t,y: at_HS_equilibrium(t,y,HS,tol)\n # def ode(t,y,HS = HS):\n # rhs = -HS.gamma*y + HS.lambda_value(y)\n # return rhs\n # def at_equilibrium(t,y,HS = HS,tol = tol):\n # val = np.linalg.norm(ode(t,y)) - tol\n # if val < 0:\n # return 0\n # else:\n # return val\n at_equilibrium.terminal = True\n integration_interval = (0,max_time)\n sol = solve_ivp(ode,integration_interval,x0,method = 'BDF',events = at_equilibrium)\n if sol.status == 1: #at_equilibrium triggered stopping integration\n return sol.y[:,-1]\n else: \n return -1\n\ndef find_hill_equilibria_from_FPs(FPs,HS,RS,max_time,tol = 1e-3):\n \"\"\"\n Use DSGRN equilibria as initial conditions for finding Hill equilibria. \n Input: \n FPs - list of fixed point coordinates computed by DSGRN\n HS - HillSystemParameter object\n RS - RampSystem object\n max_time - maximum time to run the ODE for each equilibrium search attempt. \n Output:\n eq - list of Nx1 numpy arrays. An entry is -1 if find_equilibrium didn't find an\n equilibrium within max_time. len(eq) == len(FPs)\n \"\"\"\n reg_DSGRN_equilibria = RS.reg_equilibria_from_FPs(FPs)\n hill_eq = [find_equilibrium(x0.reshape([x0.shape[0]]),HS,max_time,tol = tol) for x0 in reg_DSGRN_equilibria]\n return hill_eq\n\ndef num_unique_vectors(vectors,tol = 1e-3):\n \"\"\"\n Given a list of vectors, count the number which are unique up to some tolerance\n \"\"\"\n repeat_indices = []\n num_unique = 0\n for j, vec0 in enumerate(vectors):\n if j in repeat_indices:\n continue\n num_unique += 1\n for i, vec1 in enumerate(vectors[j+1:]):\n i = i+j+1\n if i in repeat_indices:\n continue\n if np.allclose(vec0,vec1,rtol = tol):\n repeat_indices.append(i)\n return num_unique\n\ndef hill_value(x,hill_parameter):\n sign = hill_parameter.sign\n theta = hill_parameter.theta\n Delta = hill_parameter.Delta\n L = hill_parameter.L\n n = hill_parameter.n\n if sign == 1:\n return L + Delta/((theta/x)**n + 1)\n if sign == -1:\n return L + Delta/((x/theta)**n + 1)\n\ndef hill_second_derivative_root(*args):\n if len(args) == 1:\n hill_parameter = args[0]\n theta = hill_parameter.theta\n n = hill_parameter.n\n elif len(args) == 2:\n theta = args[0]\n n = args[1]\n else:\n raise TypeError('hill_second_derivative_root() takes 1 or 2 position arguments\\\n but {} were given.'.format(len(args)))\n return theta*((n-1)/(n+1))**(1/n)\n\ndef hill_derivative_magnitude(x,*args):\n if len(args) == 1:\n hill_parameter = args[0]\n sign = hill_parameter.sign\n Delta = hill_parameter.Delta\n theta = hill_parameter.theta\n n = hill_parameter.n\n elif len(args) == 4:\n sign = args[0]\n Delta = args[1]\n theta = args[2]\n n = args[3]\n else: \n raise TypeError('hill_derivative() takes 1 or 4 positiional arguments\\\n but {} were given.'.format(len(args)))\n if n == np.inf:\n if theta == x:\n return np.inf\n else: \n return 0\n return Delta*n/(theta*(theta/x)**(n-1) + 2*x + x*(x/theta)**n)\n\ndef make_hill_coefficient_array(Network,n):\n \"\"\"\n Make a hill coefficient array consistent with the network topology with each\n hill coefficient equal to n\n Input:\n Network - DSGRN network object\n n - float or integer greater than 1\n Output:\n numpy array with entry [i,j] equal to n if j->i is an edge and 0 otherwise\n \"\"\"\n N = Network.size()\n hill_coefficients = np.zeros([N,N])\n for j in range(N):\n for i in Network.outputs(j):\n hill_coefficients[i,j] = n\n return hill_coefficients\n\ndef make_sign_from_network(Network):\n \"\"\"\n Make an NxN numpy array describing the interaction sign between edges\n Input:\n Network - DSGRN network object\n Output:\n numpy array with 1 if j->i, -1 if j-|i, and 0 otherwise. \n \"\"\"\n N = Network.size()\n sign = np.zeros([N,N])\n for j in range(N):\n for i in Network.outputs(j):\n sign[i,j] = 1 if Network.interaction(j,i) else -1\n return sign\n\nclass HillParameter:\n\n def __init__(self,sign,L,Delta,theta,n):\n \"\"\"\n Input:\n sign - either 1 or -1\n L,Delta,theta,n - parameters for a hill function\n \"\"\"\n self.sign = sign\n self.L = L\n self.Delta = Delta\n self.theta = theta\n self.n = n\n \n def __repr__(self):\n sign = self.sign\n L = self.L\n Delta = self.Delta\n theta = self.theta\n n = self.n\n return 'HillParameter({},{},{},{},{})'.format(sign,L,Delta,theta,n)\n\n def func_value(self,x):\n return hill_value(x,self)\n\n def dx_value(self,x):\n return self.sign*hill_derivative_magnitude(x,self)\n\n\nclass HillSystemParameter:\n\n def __init__(self,Network,sign,L,Delta,theta,n,gamma):\n \"\"\"\n Input:\n gamma - length N lists\n sign,L,Delta,theta,n - NxN arrays\n \"\"\"\n self.Network = Network\n N = Network.size()\n self.sign = np.array(sign)\n self.L = np.array(L)\n self.Delta = np.array(Delta)\n self.theta = np.array(theta)\n self.n = np.array(n)\n self.gamma = np.array(gamma).reshape([N])\n \n def __eq__(self,other):\n if isinstance(other,HillSystemParameter):\n return np.array_equal(self.sign,other.sign) and np.array_equal(self.L, other.L) \\\n and np.array_equal(self.Delta, other.Delta) and np.array_equal(self.theta,other.theta) \\\n and np.array_equal(self.n, other.n) and np.array_equal(self.gamma,other.gamma)\n else: \n return False\n \n\n def hill_parameter(self,i,j):\n return HillParameter(self.sign[i,j],self.L[i,j],self.Delta[i,j],self.theta[i,j],self.n[i,j])\n\n\n def lambda_value(self,x):\n Network = self.Network\n N = Network.size()\n val = np.zeros([N])\n for i in range(N):\n cur_prod = 1\n for source_set in Network.logic(i):\n cur_sum = 0\n for j in source_set:\n cur_param = self.hill_parameter(i,j)\n cur_sum += cur_param.func_value(x[j])\n cur_prod *= cur_sum\n val[i] = cur_prod\n return val\n\n def is_equilibrium(self,x,tol = 1e-4):\n N = self.Network.size()\n x = np.array(x).reshape([N])\n return np.allclose(self.lambda_value(x)-self.gamma*x,np.zeros([N]),atol=tol)\n\n def Jacobian(self,x):\n N = self.Network.size()\n J = np.diag(-self.gamma)\n for i in range(N):\n for j in self.Network.inputs(i):\n cur_prod = 1\n for source_set in self.Network.logic(i):\n cur_sum = 0\n if j in source_set:\n cur_sum = self.hill_parameter(i,j).dx_value(x[j])\n else:\n for k in source_set:\n cur_sum += self.hill_parameter(i,k).func_value(x[k])\n cur_prod *= cur_sum\n J[i,j] += cur_prod\n return j\n\n\n def is_saddle(self,x,tol = 1e-4):\n N = self.Network.size()\n x = np.array(x).reshape([N,1])\n if not self.is_equilibrium(x,tol=tol):\n return False\n J = self.Jacobian(x)\n if np.linalg.matrix_rank(J) == N:\n return False\n return True\n\n\n"
] | [
[
"numpy.allclose",
"numpy.zeros",
"numpy.diag",
"scipy.integrate.solve_ivp",
"numpy.linalg.matrix_rank",
"numpy.array_equal",
"numpy.array"
]
] |
taureandyernv/cuml | [
"c92b594d3bda342c64d88a9c44b5d6e507b13f6c"
] | [
"python/cuml/test/test_tsne.py"
] | [
"\nfrom cuml.manifold import TSNE\n\nfrom sklearn.manifold.t_sne import trustworthiness\nfrom sklearn import datasets\nimport pandas as pd\nimport numpy as np\nimport cudf\nimport pytest\n\ndataset_names = ['digits', 'boston', 'iris', 'breast_cancer',\n 'diabetes']\n\n\[email protected]('name', dataset_names)\ndef test_tsne(name):\n \"\"\"\n This tests how TSNE handles a lot of input data across time.\n (1) cuDF DataFrames are passed input\n (2) Numpy arrays are passed in\n (3) Params are changed in the TSNE class\n (4) The class gets re-used across time\n (5) Trustworthiness is checked\n (6) Tests NAN in TSNE output for learning rate explosions\n (7) Tests verbosity\n \"\"\"\n datasets\n X = eval(\"datasets.load_{}\".format(name))().data\n X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X))\n\n for i in range(3):\n print(\"iteration = \", i)\n\n tsne = TSNE(2, random_state=i, verbose=0, learning_rate=2+i)\n\n Y = tsne.fit_transform(X_cudf).to_pandas().values\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n\n # Reuse\n Y = tsne.fit_transform(X)\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n\n # Again\n tsne = TSNE(2, random_state=i+2, verbose=1, learning_rate=2+i+2)\n\n Y = tsne.fit_transform(X_cudf).to_pandas().values\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n\n # Reuse\n Y = tsne.fit_transform(X)\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n"
] | [
[
"sklearn.manifold.t_sne.trustworthiness",
"pandas.DataFrame",
"numpy.isnan"
]
] |
KiriLev/albumentations | [
"c91b67c710d20755d04166b7b5e41d430aef9662"
] | [
"tests/test_serialization.py"
] | [
"import random\nfrom unittest.mock import patch\n\nimport cv2\nimport pytest\nimport numpy as np\nimport imgaug as ia\n\nimport albumentations as A\nimport albumentations.augmentations.functional as F\nfrom .utils import OpenMock\n\nTEST_SEEDS = (0, 1, 42, 111, 9999)\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.GaussianBlur, {}],\n [A.GaussNoise, {}],\n [A.CLAHE, {}],\n [A.ChannelShuffle, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.Cutout, {}],\n [A.CoarseDropout, {}],\n [A.RandomSnow, {}],\n [A.RandomRain, {}],\n [A.RandomFog, {}],\n [A.RandomSunFlare, {}],\n [A.RandomShadow, {}],\n [A.PadIfNeeded, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.ShiftScaleRotate, {}],\n [A.OpticalDistortion, {}],\n [A.GridDistortion, {}],\n [A.ElasticTransform, {}],\n [A.ToFloat, {}],\n [A.Normalize, {}],\n [A.RandomBrightness, {}],\n [A.RandomContrast, {}],\n [A.RandomScale, {}],\n [A.SmallestMaxSize, {}],\n [A.LongestMaxSize, {}],\n [A.RandomGridShuffle, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.Downscale, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\nAUGMENTATION_CLS_PARAMS = (\n [\n [\n A.ImageCompression,\n {\n \"quality_lower\": 10,\n \"quality_upper\": 80,\n \"compression_type\": A.ImageCompression.ImageCompressionType.WEBP,\n },\n ],\n [A.JpegCompression, {\"quality_lower\": 10, \"quality_upper\": 80}],\n [A.HueSaturationValue, {\"hue_shift_limit\": 70, \"sat_shift_limit\": 95, \"val_shift_limit\": 55}],\n [A.RGBShift, {\"r_shift_limit\": 70, \"g_shift_limit\": 80, \"b_shift_limit\": 40}],\n [A.RandomBrightnessContrast, {\"brightness_limit\": 0.5, \"contrast_limit\": 0.8}],\n [A.Blur, {\"blur_limit\": 3}],\n [A.MotionBlur, {\"blur_limit\": 3}],\n [A.MedianBlur, {\"blur_limit\": 3}],\n [A.GaussianBlur, {\"blur_limit\": 3}],\n [A.GaussNoise, {\"var_limit\": (20, 90)}],\n [A.CLAHE, {\"clip_limit\": 2, \"tile_grid_size\": (12, 12)}],\n [A.RandomGamma, {\"gamma_limit\": (10, 90)}],\n [A.Cutout, {\"num_holes\": 4, \"max_h_size\": 4, \"max_w_size\": 4}],\n [A.CoarseDropout, {\"max_holes\": 4, \"max_height\": 4, \"max_width\": 4}],\n [A.RandomSnow, {\"snow_point_lower\": 0.2, \"snow_point_upper\": 0.4, \"brightness_coeff\": 4}],\n [\n A.RandomRain,\n {\n \"slant_lower\": -5,\n \"slant_upper\": 5,\n \"drop_length\": 15,\n \"drop_width\": 2,\n \"drop_color\": (100, 100, 100),\n \"blur_value\": 3,\n \"brightness_coefficient\": 0.5,\n \"rain_type\": \"heavy\",\n },\n ],\n [A.RandomFog, {\"fog_coef_lower\": 0.2, \"fog_coef_upper\": 0.8, \"alpha_coef\": 0.11}],\n [\n A.RandomSunFlare,\n {\n \"flare_roi\": (0.1, 0.1, 0.9, 0.6),\n \"angle_lower\": 0.1,\n \"angle_upper\": 0.95,\n \"num_flare_circles_lower\": 7,\n \"num_flare_circles_upper\": 11,\n \"src_radius\": 300,\n \"src_color\": (200, 200, 200),\n },\n ],\n [\n A.RandomShadow,\n {\n \"shadow_roi\": (0.1, 0.4, 0.9, 0.9),\n \"num_shadows_lower\": 2,\n \"num_shadows_upper\": 4,\n \"shadow_dimension\": 8,\n },\n ],\n [\n A.PadIfNeeded,\n {\"min_height\": 512, \"min_width\": 512, \"border_mode\": cv2.BORDER_CONSTANT, \"value\": (10, 10, 10)},\n ],\n [\n A.Rotate,\n {\n \"limit\": 120,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.ShiftScaleRotate,\n {\n \"shift_limit\": 0.2,\n \"scale_limit\": 0.2,\n \"rotate_limit\": 70,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.ShiftScaleRotate,\n {\n \"shift_limit_x\": 0.3,\n \"shift_limit_y\": 0.4,\n \"scale_limit\": 0.2,\n \"rotate_limit\": 70,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.OpticalDistortion,\n {\n \"distort_limit\": 0.2,\n \"shift_limit\": 0.2,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.GridDistortion,\n {\n \"num_steps\": 10,\n \"distort_limit\": 0.5,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.ElasticTransform,\n {\n \"alpha\": 2,\n \"sigma\": 25,\n \"alpha_affine\": 40,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [A.CenterCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomCrop, {\"height\": 10, \"width\": 10}],\n [A.CropNonEmptyMaskIfExists, {\"height\": 10, \"width\": 10}],\n [A.RandomSizedCrop, {\"min_max_height\": (4, 8), \"height\": 10, \"width\": 10}],\n [A.Crop, {\"x_max\": 64, \"y_max\": 64}],\n [A.ToFloat, {\"max_value\": 16536}],\n [A.Normalize, {\"mean\": (0.385, 0.356, 0.306), \"std\": (0.129, 0.124, 0.125), \"max_pixel_value\": 100.0}],\n [A.RandomBrightness, {\"limit\": 0.4}],\n [A.RandomContrast, {\"limit\": 0.4}],\n [A.RandomScale, {\"scale_limit\": 0.2, \"interpolation\": cv2.INTER_CUBIC}],\n [A.Resize, {\"height\": 64, \"width\": 64}],\n [A.SmallestMaxSize, {\"max_size\": 64, \"interpolation\": cv2.INTER_CUBIC}],\n [A.LongestMaxSize, {\"max_size\": 128, \"interpolation\": cv2.INTER_CUBIC}],\n [A.RandomGridShuffle, {\"grid\": (5, 5)}],\n [A.Solarize, {\"threshold\": 32}],\n [A.Posterize, {\"num_bits\": 1}],\n [A.Equalize, {\"mode\": \"pil\", \"by_channels\": False}],\n [A.MultiplicativeNoise, {\"multiplier\": (0.7, 2.3), \"per_channel\": True, \"elementwise\": True}],\n [\n A.ColorJitter,\n {\"brightness\": [0.2, 0.3], \"contrast\": [0.7, 0.9], \"saturation\": [1.2, 1.7], \"hue\": [-0.2, 0.1]},\n ],\n [\n A.Perspective,\n {\n \"scale\": 0.5,\n \"keep_size\": False,\n \"pad_mode\": cv2.BORDER_REFLECT_101,\n \"pad_val\": 10,\n \"mask_pad_val\": 100,\n \"fit_output\": True,\n \"interpolation\": cv2.INTER_CUBIC,\n },\n ],\n [A.Sharpen, {\"alpha\": [0.2, 0.5], \"lightness\": [0.5, 1.0]}],\n ],\n)\n\n\[email protected]([\"augmentation_cls\", \"params\"], *AUGMENTATION_CLS_PARAMS)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_serialization_with_custom_parameters(\n augmentation_cls, params, p, seed, image, mask, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected]([\"augmentation_cls\", \"params\"], *AUGMENTATION_CLS_PARAMS)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\[email protected](\"data_format\", (\"yaml\",))\ndef test_augmentations_serialization_to_file_with_custom_parameters(\n augmentation_cls, params, p, seed, image, mask, always_apply, data_format\n):\n with patch(\"builtins.open\", OpenMock()):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n filepath = \"serialized.{}\".format(data_format)\n A.save(aug, filepath, data_format=data_format)\n deserialized_aug = A.load(filepath, data_format=data_format)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.GaussianBlur, {}],\n [A.GaussNoise, {}],\n [A.CLAHE, {}],\n [A.ChannelShuffle, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.Cutout, {}],\n [A.GaussNoise, {}],\n [A.RandomSnow, {}],\n [A.RandomRain, {}],\n [A.RandomFog, {}],\n [A.RandomSunFlare, {}],\n [A.RandomShadow, {}],\n [A.PadIfNeeded, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.ShiftScaleRotate, {}],\n [A.CenterCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomSizedCrop, {\"min_max_height\": (4, 8), \"height\": 10, \"width\": 10}],\n [A.Crop, {\"x_max\": 64, \"y_max\": 64}],\n [A.FromFloat, {}],\n [A.ToFloat, {}],\n [A.Normalize, {}],\n [A.RandomBrightness, {}],\n [A.RandomContrast, {}],\n [A.RandomScale, {}],\n [A.Resize, {\"height\": 64, \"width\": 64}],\n [A.SmallestMaxSize, {}],\n [A.LongestMaxSize, {}],\n [A.RandomSizedBBoxSafeCrop, {\"height\": 50, \"width\": 50}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_for_bboxes_serialization(\n augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, bboxes=albumentations_bboxes)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.GaussianBlur, {}],\n [A.GaussNoise, {}],\n [A.CLAHE, {}],\n [A.ChannelShuffle, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.Cutout, {}],\n [A.GaussNoise, {}],\n [A.RandomSnow, {}],\n [A.RandomRain, {}],\n [A.RandomFog, {}],\n [A.RandomSunFlare, {}],\n [A.RandomShadow, {}],\n [A.PadIfNeeded, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.ShiftScaleRotate, {}],\n [A.CenterCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomSizedCrop, {\"min_max_height\": (4, 8), \"height\": 10, \"width\": 10}],\n [A.FromFloat, {}],\n [A.ToFloat, {}],\n [A.Normalize, {}],\n [A.RandomBrightness, {}],\n [A.RandomContrast, {}],\n [A.RandomScale, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_for_keypoints_serialization(augmentation_cls, params, p, seed, image, keypoints, always_apply):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, keypoints=keypoints)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.IAAEmboss, {}],\n [A.IAASuperpixels, {}],\n [A.IAAAdditiveGaussianNoise, {}],\n [A.IAACropAndPad, {}],\n [A.IAAFliplr, {}],\n [A.IAAFlipud, {}],\n [A.IAAAffine, {}],\n [A.IAAPiecewiseAffine, {}],\n [A.IAAPerspective, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_imgaug_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n ia.seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n ia.seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.IAAEmboss, {}],\n [A.IAASuperpixels, {}],\n [A.IAAAdditiveGaussianNoise, {}],\n [A.IAACropAndPad, {}],\n [A.IAAFliplr, {}],\n [A.IAAFlipud, {}],\n [A.IAAAffine, {}],\n [A.IAAPiecewiseAffine, {}],\n [A.IAAPerspective, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_imgaug_augmentations_for_bboxes_serialization(\n augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n ia.seed(seed)\n aug_data = aug(image=image, bboxes=albumentations_bboxes)\n set_seed(seed)\n ia.seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.IAAEmboss, {}],\n [A.IAASuperpixels, {}],\n [A.IAAAdditiveGaussianNoise, {}],\n [A.IAACropAndPad, {}],\n [A.IAAFliplr, {}],\n [A.IAAFlipud, {}],\n [A.IAAAffine, {}],\n [A.IAAPiecewiseAffine, {}],\n [A.IAAPerspective, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_imgaug_augmentations_for_keypoints_serialization(\n augmentation_cls, params, p, seed, image, keypoints, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n ia.seed(seed)\n aug_data = aug(image=image, keypoints=keypoints)\n set_seed(seed)\n ia.seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\", \"call_params\"],\n [[A.RandomCropNearBBox, {\"max_part_shift\": 0.15}, {\"cropping_bbox\": [-59, 77, 177, 231]}]],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_serialization_with_call_params(\n augmentation_cls, params, call_params, p, seed, image, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n annotations = {\"image\": image, **call_params}\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(**annotations)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(**annotations)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n\n\ndef test_from_float_serialization(float_image):\n aug = A.FromFloat(p=1, dtype=\"uint8\")\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n aug_data = aug(image=float_image)\n deserialized_aug_data = deserialized_aug(image=float_image)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n\n\[email protected](\"seed\", TEST_SEEDS)\ndef test_transform_pipeline_serialization(seed, image, mask):\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose(\n [\n A.Resize(1024, 1024),\n A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),\n A.OneOf(\n [\n A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),\n A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),\n ]\n ),\n ]\n ),\n A.Compose(\n [\n A.Resize(1024, 1024),\n A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),\n A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),\n ]\n ),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ]\n )\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected](\n [\"bboxes\", \"bbox_format\", \"labels\"],\n [\n ([(20, 30, 40, 50)], \"coco\", [1]),\n ([(20, 30, 40, 50, 99), (10, 40, 30, 20, 9)], \"coco\", [1, 2]),\n ([(20, 30, 60, 80)], \"pascal_voc\", [2]),\n ([(20, 30, 60, 80, 99)], \"pascal_voc\", [1]),\n ([(0.2, 0.3, 0.4, 0.5)], \"yolo\", [2]),\n ([(0.2, 0.3, 0.4, 0.5, 99)], \"yolo\", [1]),\n ],\n)\[email protected](\"seed\", TEST_SEEDS)\ndef test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),\n A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ],\n bbox_params={\"format\": bbox_format, \"label_fields\": [\"labels\"]},\n )\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, bboxes=bboxes, labels=labels)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n\n\[email protected](\n [\"keypoints\", \"keypoint_format\", \"labels\"],\n [\n ([(20, 30, 40, 50)], \"xyas\", [1]),\n ([(20, 30, 40, 50, 99), (10, 40, 30, 20, 9)], \"xy\", [1, 2]),\n ([(20, 30, 60, 80)], \"yx\", [2]),\n ([(20, 30, 60, 80, 99)], \"xys\", [1]),\n ],\n)\[email protected](\"seed\", TEST_SEEDS)\ndef test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),\n A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ],\n keypoint_params={\"format\": keypoint_format, \"label_fields\": [\"labels\"]},\n )\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, keypoints=keypoints, labels=labels)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ChannelShuffle, {}],\n [A.GaussNoise, {}],\n [A.Cutout, {}],\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.CLAHE, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.OpticalDistortion, {}],\n [A.GridDistortion, {}],\n [A.ElasticTransform, {}],\n [A.Normalize, {}],\n [A.ToFloat, {}],\n [A.FromFloat, {}],\n [A.RandomGridShuffle, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"seed\", TEST_SEEDS)\ndef test_additional_targets_for_image_only_serialization(augmentation_cls, params, image, seed):\n aug = A.Compose([augmentation_cls(always_apply=True, **params)], additional_targets={\"image2\": \"image\"})\n image2 = image.copy()\n\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, image2=image2)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, image2=image2)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"image2\"], deserialized_aug_data[\"image2\"])\n\n\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"p\", [1])\ndef test_lambda_serialization(image, mask, albumentations_bboxes, keypoints, seed, p):\n def vflip_image(image, **kwargs):\n return F.vflip(image)\n\n def vflip_mask(mask, **kwargs):\n return F.vflip(mask)\n\n def vflip_bbox(bbox, **kwargs):\n return F.bbox_vflip(bbox, **kwargs)\n\n def vflip_keypoint(keypoint, **kwargs):\n return F.keypoint_vflip(keypoint, **kwargs)\n\n aug = A.Lambda(name=\"vflip\", image=vflip_image, mask=vflip_mask, bbox=vflip_bbox, keypoint=vflip_keypoint, p=p)\n\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug, lambda_transforms={\"vflip\": aug})\n set_seed(seed)\n aug_data = aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n"
] | [
[
"numpy.random.seed",
"numpy.array_equal"
]
] |
SarahGuo1999/SiamR-CNN | [
"df9b428aeb90da0c8b2c8076f54f632efb07366c"
] | [
"train.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: train.py\n\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport cv2\nimport six\nimport shutil\n\nassert six.PY3, \"FasterRCNN requires Python 3!\"\nimport tensorflow as tf\nimport tqdm\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack import *\nfrom tensorpack.tfutils import optimizer\nfrom tensorpack.tfutils.common import get_tf_version_tuple, get_tensors_by_names\nfrom tensorpack.tfutils.summary import add_moving_summary\nfrom tensorpack.tfutils.varreplace import freeze_variables\n\nimport model_frcnn\nimport model_mrcnn\nfrom basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone, backbone_scope\nfrom dataset import DetectionDataset\nfrom config import finalize_configs, config as cfg\nfrom data import get_all_anchors, get_all_anchors_fpn, get_train_dataflow\nfrom eval_utils import EvalCallback\nfrom model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align\nfrom model_cascade import CascadeRCNNHead, CascadeRCNNHeadWithHardExamples\nfrom model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses\nfrom model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets\nfrom model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head\nfrom model_rpn import generate_rpn_proposals, rpn_head, rpn_losses\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nclass DetectionModel(ModelDesc):\n def preprocess(self, image):\n image = tf.expand_dims(image, 0)\n image = image_preprocess(image, bgr=True)\n return tf.transpose(image, [0, 3, 1, 2])\n\n @property\n def training(self):\n return get_current_tower_context().is_training\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)\n tf.summary.scalar('learning_rate-summary', lr)\n\n # The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.\n lr = lr / 8.\n opt = tf.train.MomentumOptimizer(lr, 0.9)\n if cfg.TRAIN.NUM_GPUS < 8:\n opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)\n return opt\n\n def get_inference_tensor_names(self):\n \"\"\"\n Returns two lists of tensor names to be used to create an inference callable.\n\n Returns:\n [str]: input names\n [str]: output names\n \"\"\"\n if cfg.MODE_THIRD_STAGE:\n out = ['output/boxes', 'output/scores', 'third_stage_features_out', 'ff_gt_tracklet_scores',\n 'sparse_tracklet_scores', 'tracklet_score_indices']\n else:\n out = ['output/boxes', 'output/scores', 'output/labels']\n if cfg.MODE_MASK:\n out.append('output/masks')\n if cfg.EXTRACT_GT_FEATURES:\n return ['image', 'roi_boxes'], ['boxes_for_extraction', 'features_for_extraction']\n else:\n return ['image'], out\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n\n image = self.preprocess(inputs['image']) # 1CHW\n\n features = self.backbone(image)\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n if cfg.EXTRACT_GT_FEATURES:\n anchor_inputs[\"roi_boxes\"] = inputs[\"roi_boxes\"]\n proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?\n\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n head_losses = self.roi_heads(image, features, proposals, targets)\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n\nclass ResNetC4Model(DetectionModel):\n def inputs(self):\n ret = [\n tf.placeholder(tf.float32, (None, None, 3), 'image'),\n tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),\n tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),\n tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),\n tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def backbone(self, image):\n return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]\n\n def rpn(self, image, features, inputs):\n featuremap = features[0]\n rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)\n anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])\n anchors = anchors.narrow_to(featuremap)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox\n proposal_boxes, proposal_scores = generate_rpn_proposals(\n tf.reshape(pred_boxes_decoded, [-1, 4]),\n tf.reshape(rpn_label_logits, [-1]),\n image_shape2d,\n cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,\n cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)\n\n if self.training:\n losses = rpn_losses(\n anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n featuremap = features[0]\n\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n # sample proposal boxes in training\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n # The boxes to be used to crop RoIs.\n # Use all proposal boxes in inference\n\n boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)\n roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)\n\n feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7\n # Keep C5 feature to be shared with mask branch\n feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)\n\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,\n tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n # In training, mask branch shares the same C5 feature.\n fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 14,\n pad_border=False) # nfg x 1x14x14\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n\n if cfg.MODE_MASK:\n roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)\n feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNModel(DetectionModel):\n\n def inputs(self):\n ret = [\n tf.placeholder(tf.float32, (None, None, 3), 'image')]\n num_anchors = len(cfg.RPN.ANCHOR_RATIOS)\n for k in range(len(cfg.FPN.ANCHOR_STRIDES)):\n ret.extend([\n tf.placeholder(tf.int32, (None, None, num_anchors),\n 'anchor_labels_lvl{}'.format(k + 2)),\n tf.placeholder(tf.float32, (None, None, num_anchors, 4),\n 'anchor_boxes_lvl{}'.format(k + 2))])\n ret.extend([\n tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),\n tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')\n ) # NR_GT x height x width\n if cfg.EXTRACT_GT_FEATURES:\n ret.append(tf.placeholder(tf.float32, (None, 4,), 'roi_boxes'))\n return ret\n\n def slice_feature_and_anchors(self, p23456, anchors):\n for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):\n with tf.name_scope('FPN_slice_lvl{}'.format(i)):\n anchors[i] = anchors[i].narrow_to(p23456[i])\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n p23456 = fpn_model('fpn', c2345)\n return p23456\n\n def rpn(self, image, features, inputs):\n if cfg.EXTRACT_GT_FEATURES:\n boxes = inputs['roi_boxes']\n return BoxProposals(boxes), tf.constant(0, dtype=tf.float32)\n\n assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n all_anchors_fpn = get_all_anchors_fpn()\n multilevel_anchors = [RPNAnchors(\n all_anchors_fpn[i],\n inputs['anchor_labels_lvl{}'.format(i + 2)],\n inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]\n self.slice_feature_and_anchors(features, multilevel_anchors)\n\n # Multi-Level RPN Proposals\n rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))\n for pi in features]\n multilevel_label_logits = [k[0] for k in rpn_outputs]\n multilevel_box_logits = [k[1] for k in rpn_outputs]\n multilevel_pred_boxes = [anchor.decode_logits(logits)\n for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]\n\n proposal_boxes, proposal_scores = generate_fpn_proposals(\n multilevel_pred_boxes, multilevel_label_logits, image_shape2d)\n\n if self.training:\n losses = multilevel_rpn_losses(\n multilevel_anchors, multilevel_label_logits, multilevel_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if not cfg.FPN.CASCADE:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n\n head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(\n 'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,\n gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n else:\n def roi_func(boxes):\n return multilevel_roi_align(features[:4], boxes, 7)\n\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if cfg.EXTRACT_GT_FEATURES:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n tf.identity(roi_feature_fastrcnn, \"rpn/feature\")\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNTrackModel(ResNetFPNModel):\n def inputs(self):\n ret = super().inputs()\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n ret.append(tf.placeholder(tf.float32, (256, 7, 7), 'ref_features'))\n else:\n ret.append(tf.placeholder(tf.float32, (None, None, 3), 'ref_image'))\n ret.append(tf.placeholder(tf.float32, (4,), 'ref_box'))\n if cfg.MODE_THIRD_STAGE:\n ret.append(tf.placeholder(tf.float32, (256, 7, 7), 'ff_gt_tracklet_feat'))\n ret.append(tf.placeholder(tf.float32, (None, 256, 7, 7), 'active_tracklets_feats'))\n ret.append(tf.placeholder(tf.float32, (None, 4), 'active_tracklets_boxes'))\n ret.append(tf.placeholder(tf.float32, (), 'tracklet_distance_threshold'))\n if cfg.MODE_HARD_MINING:\n ret.append(tf.placeholder(tf.float32, (None, 3, 256, 7, 7), 'hard_negative_features'))\n if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:\n ret.append(tf.placeholder(tf.float32, (None, 3, 256, 7, 7), 'hard_positive_features'))\n ret.append(tf.placeholder(tf.float32, (None, 3), 'hard_positive_ious'))\n ret.append(tf.placeholder(tf.float32, (None, 4), 'hard_positive_gt_boxes'))\n ret.append(tf.placeholder(tf.float32, (None, 3, 4), 'hard_positive_jitter_boxes'))\n if cfg.EXTRACT_GT_FEATURES:\n ret.append(tf.placeholder(tf.float32, (None, 4,), 'roi_boxes'))\n return ret\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n with backbone_scope(freeze=cfg.BACKBONE.FREEZE_AT > 3):\n p23456 = fpn_model('fpn', c2345)\n return p23456, c2345\n\n def rpn(self, image, features, inputs):\n if cfg.EXTRACT_GT_FEATURES:\n boxes = inputs['roi_boxes']\n return BoxProposals(boxes), tf.constant(0, dtype=tf.float32)\n\n if cfg.BACKBONE.FREEZE_AT > 3:\n with freeze_variables(stop_gradient=False, skip_collection=True):\n return super().rpn(image, features, inputs)\n else:\n return super().rpn(image, features, inputs)\n\n def roi_heads(self, image, ref_features, ref_box, features, proposals, targets, hard_negative_features=None,\n hard_positive_features=None, hard_positive_ious=None, hard_positive_gt_boxes=None,\n hard_positive_jitter_boxes=None, precomputed_ref_features=None):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if precomputed_ref_features is None:\n roi_aligned_ref_features = multilevel_roi_align(ref_features[:4], ref_box[tf.newaxis], 7)\n else:\n roi_aligned_ref_features = precomputed_ref_features[tf.newaxis]\n\n if cfg.MODE_SHARED_CONV_REDUCE:\n scope = tf.get_variable_scope()\n else:\n scope = \"\"\n\n assert cfg.FPN.CASCADE\n\n def roi_func(boxes, already_aligned_features=None):\n if already_aligned_features is None:\n aligned_features = multilevel_roi_align(features[:4], boxes, 7)\n else:\n # for hard example mining\n aligned_features = already_aligned_features\n tiled = tf.tile(roi_aligned_ref_features, [tf.shape(aligned_features)[0], 1, 1, 1])\n concat_features = tf.concat((tiled, aligned_features), axis=1)\n\n with argscope(Conv2D, data_format='channels_first',\n kernel_initializer=tf.variance_scaling_initializer(\n scale=2.0, mode='fan_out',\n distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n reduced_features = Conv2D('conv_reduce', concat_features, 256, 1, activation=None)\n return reduced_features\n\n if cfg.MODE_HARD_MINING and self.training:\n fastrcnn_head = CascadeRCNNHeadWithHardExamples(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS, hard_negative_features,\n hard_positive_features, cfg.HARD_NEGATIVE_LOSS_SCALING_FACTOR,\n cfg.HARD_POSITIVE_LOSS_SCALING_FACTOR, hard_positive_ious, hard_positive_gt_boxes,\n hard_positive_jitter_boxes)\n else:\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if cfg.EXTRACT_GT_FEATURES:\n # get boxes and features for each of the three cascade stages!\n b0 = proposals.boxes\n b1, b2, _ = fastrcnn_head._cascade_boxes\n f0 = multilevel_roi_align(features[:4], b0, 7)\n f1 = multilevel_roi_align(features[:4], b1, 7)\n f2 = multilevel_roi_align(features[:4], b2, 7)\n tf.concat([b0, b1, b2], axis=0, name=\"boxes_for_extraction\")\n tf.concat([f0, f1, f2], axis=0, name=\"features_for_extraction\")\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n\n if cfg.MEASURE_IOU_DURING_TRAINING:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output_train')\n # if predictions are empty, this might break...\n # to prevent, stack dummy box\n boxes_for_iou = tf.concat([final_boxes[:1], tf.constant([[0.0, 0.0, 1.0, 1.0]],\n dtype=tf.float32)], axis=0)\n from examples.FasterRCNN.utils.box_ops import pairwise_iou\n iou_at_1 = tf.identity(pairwise_iou(gt_boxes[:1], boxes_for_iou)[0, 0], name=\"train_iou_at_1\")\n add_moving_summary(iou_at_1)\n\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n image = self.preprocess(inputs['image']) # 1CHW\n\n fpn_features, backbone_features = self.backbone(image)\n\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n ref_features = None\n ref_box = None\n else:\n ref_image = self.preprocess(inputs['ref_image']) # 1CHW\n ref_box = inputs['ref_box']\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n ref_features, _ = self.backbone(ref_image)\n\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n if cfg.EXTRACT_GT_FEATURES:\n anchor_inputs[\"roi_boxes\"] = inputs[\"roi_boxes\"]\n proposals, rpn_losses = self.rpn(image, fpn_features, anchor_inputs) # inputs?\n\n second_stage_features = fpn_features\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n\n hard_negative_features = None\n hard_positive_features = None\n hard_positive_ious = None\n hard_positive_gt_boxes = None\n hard_positive_jitter_boxes = None\n if cfg.MODE_HARD_MINING:\n hard_negative_features = inputs['hard_negative_features']\n if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:\n hard_positive_features = inputs['hard_positive_features']\n hard_positive_ious = inputs['hard_positive_ious']\n hard_positive_gt_boxes = inputs['hard_positive_gt_boxes']\n hard_positive_jitter_boxes = inputs['hard_positive_jitter_boxes']\n\n precomputed_ref_features = None\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n precomputed_ref_features = inputs['ref_features']\n\n # Extend proposals by previous frame detections\n if not self.training and cfg.MODE_THIRD_STAGE and cfg.EXTEND_PROPOSALS_BY_ACTIVE_TRACKLETS:\n proposal_boxes = proposals.boxes\n tracklet_boxes = inputs['active_tracklets_boxes']\n concat_boxes = tf.concat([proposal_boxes, tracklet_boxes], axis=0)\n proposals = BoxProposals(concat_boxes)\n\n head_losses = self.roi_heads(image, ref_features, ref_box, second_stage_features, proposals, targets,\n hard_negative_features, hard_positive_features, hard_positive_ious,\n hard_positive_gt_boxes, hard_positive_jitter_boxes,\n precomputed_ref_features=precomputed_ref_features)\n\n if cfg.MODE_THIRD_STAGE:\n self._run_third_stage(inputs, second_stage_features, tf.shape(image)[2:4])\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n def _run_third_stage(self, inputs, second_stage_features, image_hw):\n boxes, scores = get_tensors_by_names(['output/boxes', 'output/scores'])\n # let's fix (as in finalize) the boxes, so we can roi align only one time\n aligned_features_curr = multilevel_roi_align(second_stage_features[:4], boxes, 7)\n # these also need to be extracted!\n aligned_features_curr = tf.identity(aligned_features_curr, name='third_stage_features_out')\n\n ff_gt_tracklet_scores, _ = self._score_for_third_stage(ref_feats=inputs['ff_gt_tracklet_feat'][tf.newaxis],\n det_feats=aligned_features_curr)\n tf.identity(ff_gt_tracklet_scores, name='ff_gt_tracklet_scores')\n sparse_tracklet_scores, tracklet_score_indices = self._score_for_third_stage(\n ref_feats=inputs['active_tracklets_feats'], det_feats=aligned_features_curr,\n dense=False, ref_boxes=inputs['active_tracklets_boxes'], det_boxes=boxes, image_hw=image_hw,\n tracklet_distance_threshold=inputs['tracklet_distance_threshold'])\n tf.identity(sparse_tracklet_scores, name='sparse_tracklet_scores')\n tf.identity(tracklet_score_indices, name='tracklet_score_indices')\n\n def _score_for_third_stage(self, ref_feats, det_feats, dense=True, ref_boxes=None, det_boxes=None, image_hw=None,\n tracklet_distance_threshold=0.08):\n # build all pairs\n n_refs = tf.shape(ref_feats)[0]\n n_dets = tf.shape(det_feats)[0]\n active_tracklets_tiled = tf.tile(ref_feats[:, tf.newaxis], multiples=[1, n_dets, 1, 1, 1])\n dets_tiled = tf.tile(det_feats[tf.newaxis], multiples=[n_refs, 1, 1, 1, 1])\n concated = tf.concat([active_tracklets_tiled, dets_tiled], axis=2)\n\n if not dense:\n # use boxes to prune the connectivity\n assert ref_boxes is not None\n assert det_boxes is not None\n assert image_hw is not None\n\n def xyxy_to_cxcywh(boxes_xyxy):\n wh = boxes_xyxy[:, 2:] - boxes_xyxy[:, :2]\n c = boxes_xyxy[:, :2] + wh / 2\n boxes_cwh = tf.concat((c, wh), axis=1)\n return boxes_cwh\n\n active_tracklets_boxes_cxcywh = xyxy_to_cxcywh(ref_boxes)\n boxes_cxcywh = xyxy_to_cxcywh(det_boxes)\n # normalize by image size\n h = image_hw[0]\n w = image_hw[1]\n norm = tf.cast(tf.stack([w, h, w, h], axis=0), tf.float32)\n diffs = tf.abs(active_tracklets_boxes_cxcywh[:, tf.newaxis] - boxes_cxcywh[tf.newaxis]) / norm[\n tf.newaxis, tf.newaxis]\n\n # use distances of boxes, first frame scores (\"scores\") to prune\n thresholds = tf.stack([tracklet_distance_threshold] * 4, axis=0)\n keep_mask = tf.reduce_all(diffs < thresholds, axis=2)\n\n indices = tf.where(keep_mask)\n flattened = tf.boolean_mask(concated, keep_mask)\n else:\n indices = None\n flattened = tf.reshape(\n concated, [tf.shape(concated)[0] * tf.shape(concated)[1]] + [int(x) for x in concated.shape[2:]])\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if cfg.MODE_SHARED_CONV_REDUCE:\n scope = tf.get_variable_scope()\n else:\n scope = \"\"\n all_posteriors = []\n # do this for every cascade stage\n for idx in range(3):\n with tf.variable_scope('cascade_rcnn_stage{}'.format(idx + 1), reuse=True):\n with argscope(Conv2D, data_format='channels_first'):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n reduced_features = Conv2D('conv_reduce', flattened, 256, 1, activation=None)\n head_feats = fastrcnn_head_func('head', reduced_features)\n with tf.variable_scope('outputs_new', reuse=True):\n classification = FullyConnected('class', head_feats, 2)\n posteriors = tf.nn.softmax(classification)\n all_posteriors.append(posteriors)\n posteriors = (all_posteriors[0] + all_posteriors[1] + all_posteriors[2]) / tf.constant(3.0, dtype=tf.float32)\n scores = posteriors[:, 1]\n return scores, indices\n\n def get_inference_tensor_names(self):\n inp, out = super().get_inference_tensor_names()\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n inp.append('ref_features')\n else:\n inp.append('ref_image')\n inp.append('ref_box')\n if cfg.MODE_THIRD_STAGE:\n inp.append('ff_gt_tracklet_feat')\n inp.append('active_tracklets_feats')\n inp.append('active_tracklets_boxes')\n inp.append('tracklet_distance_threshold')\n return inp, out\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')\n parser.add_argument('--logdir', help='log directory', default='train_log/siamrcnn')\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n if get_tf_version_tuple() < (1, 6):\n # https://github.com/tensorflow/tensorflow/issues/14657\n logger.warn(\"TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.\")\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n\n MODEL = ResNetFPNTrackModel()\n DetectionDataset() # initialize the config with information from our dataset\n\n is_horovod = cfg.TRAINER == 'horovod'\n if is_horovod:\n hvd.init()\n logger.info(\"Horovod Rank={}, Size={}\".format(hvd.rank(), hvd.size()))\n\n if not is_horovod or hvd.rank() == 0:\n # keep the old log folder if already existing! (before it would just delete it)\n logger.set_logger_dir(args.logdir, 'k')\n # logger.set_logger_dir(args.logdir, 'd')\n\n finalize_configs(is_training=True)\n stepnum = cfg.TRAIN.STEPS_PER_EPOCH\n\n # warmup is step based, lr is epoch based\n init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)\n warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]\n warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum\n lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]\n\n factor = 8. / cfg.TRAIN.NUM_GPUS\n for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):\n mult = 0.1 ** (idx + 1)\n lr_schedule.append(\n (steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))\n logger.info(\"Warm Up Schedule (steps, value): \" + str(warmup_schedule))\n logger.info(\"LR Schedule (epochs, value): \" + str(lr_schedule))\n train_dataflow = get_train_dataflow()\n # This is what's commonly referred to as \"epochs\"\n total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()\n logger.info(\"Total passes of the training set is: {:.5g}\".format(total_passes))\n\n callbacks = [\n PeriodicCallback(\n ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),\n # every_k_epochs=1),\n every_k_epochs=20),\n # linear warmup\n ScheduledHyperParamSetter(\n 'learning_rate', warmup_schedule, interp='linear', step_based=True),\n ScheduledHyperParamSetter('learning_rate', lr_schedule),\n PeakMemoryTracker(),\n EstimatedTimeLeft(median=True),\n SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout\n ] + [\n EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)\n for dataset in cfg.DATA.VAL\n ]\n if not is_horovod:\n callbacks.append(GPUUtilizationTracker())\n\n start_epoch = cfg.TRAIN.STARTING_EPOCH\n if is_horovod and hvd.rank() > 0:\n session_init = None\n else:\n # first try to find existing model\n checkpoint_path = os.path.join(args.logdir, \"checkpoint\")\n if os.path.exists(checkpoint_path):\n session_init = get_model_loader(checkpoint_path)\n start_step = int(session_init.path.split(\"-\")[-1])\n start_epoch = start_step // stepnum\n logger.info(\n \"initializing from existing model, \" + session_init.path + \", starting from epoch \" + str(start_epoch))\n else:\n if args.load:\n session_init = get_model_loader(args.load)\n else:\n session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None\n\n max_epoch = min(cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum, cfg.TRAIN.MAX_NUM_EPOCHS)\n\n traincfg = TrainConfig(\n model=MODEL,\n data=QueueInput(train_dataflow),\n callbacks=callbacks,\n steps_per_epoch=stepnum,\n # max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,\n max_epoch=max_epoch,\n session_init=session_init,\n starting_epoch=start_epoch\n )\n if is_horovod:\n trainer = HorovodTrainer(average=False)\n else:\n # nccl mode appears faster than cpu mode\n trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')\n launch_train_with_config(traincfg, trainer)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.reshape",
"tensorflow.train.MomentumOptimizer",
"tensorflow.gather_nd",
"tensorflow.sigmoid",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.abs",
"tensorflow.concat",
"tensorflow.get_variable_scope",
"tensorflow.identity",
"tensorflow.nn.softmax",
"tensorflow.reduce_all",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.stack",
"tensorflow.add_n",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.boolean_mask",
"tensorflow.tile",
"tensorflow.size",
"tensorflow.placeholder",
"tensorflow.where",
"tensorflow.get_variable"
]
] |
yxfish13/plan_enumerator | [
"e081b4c6eb3b373c4b8d97fdb88c5c4de9c77ba3"
] | [
"TreeLSTM.py"
] | [
"# Copyright 2018-2021 Xiang Yu(x-yu17(at)mails.tsinghua.edu.cn)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport torch\nfrom torch.nn import init\nimport torchfold\nimport torch.nn as nn\nfrom ImportantConfig import Config\n\nconfig = Config()\n\nclass TreeLSTM(nn.Module):\n def __init__(self, num_units):\n super(TreeLSTM, self).__init__()\n self.num_units = num_units\n self.FC1 = nn.Linear(num_units, 5 * num_units)\n self.FC2 = nn.Linear(num_units, 5 * num_units)\n self.FC0 = nn.Linear(num_units, 5 * num_units)\n self.LNh = nn.LayerNorm(num_units,elementwise_affine = False)\n self.LNc = nn.LayerNorm(num_units,elementwise_affine = False)\n def forward(self, left_in, right_in,inputX):\n lstm_in = self.FC1(left_in[0])\n lstm_in += self.FC2(right_in[0])\n lstm_in += self.FC0(inputX)\n a, i, f1, f2, o = lstm_in.chunk(5, 1)\n c = (a.tanh() * i.sigmoid() + f1.sigmoid() * left_in[1] +\n f2.sigmoid() * right_in[1])\n h = o.sigmoid() * c.tanh()\n h = self.LNh(h)\n return h,c\nclass TreeRoot(nn.Module):\n def __init__(self,num_units):\n super(TreeRoot, self).__init__()\n self.num_units = num_units\n self.FC = nn.Linear(num_units, num_units)\n if config.rootPool == 'meanPool':\n self.sum_pooling = nn.AdaptiveAvgPool2d((1,num_units))\n else:\n self.sum_pooling = nn.AdaptiveMaxPool2d((1,num_units))\n \n # self.sum_pooling = nn.AdaptiveMaxPool2d((1,num_units))\n # self.max_pooling = nn.AdaptiveAvgPool2d((1,num_units))\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n def forward(self, tree_list):\n\n return self.relu(self.FC(self.sum_pooling(tree_list)).view(-1,self.num_units))\n\nclass SPINN(nn.Module):\n\n def __init__(self, n_classes, size, n_words, mask_size,device,max_column_in_table = 15):\n super(SPINN, self).__init__()\n self.size = size\n self.tree_lstm = TreeLSTM(size)\n self.tree_root = TreeRoot(size)\n self.FC = nn.Linear(size*2, size)\n self.table_embeddings = nn.Embedding(n_words, size)#2 * max_column_in_table * size)\n self.column_embeddings = nn.Embedding(n_words, (1+2 * max_column_in_table) * size)\n self.out = nn.Linear(size*2, size)\n self.out2 = nn.Linear(size, n_classes)\n self.outFc = nn.Linear(mask_size, size)\n\n if config.rootPool == 'meanPool':\n self.max_pooling = nn.AdaptiveAvgPool2d((1,size))\n else:\n self.max_pooling = nn.AdaptiveMaxPool2d((1,size))\n self.max_pooling = nn.AdaptiveMaxPool2d((1,size))\n self.relu = nn.ReLU()\n self.sigmoid = nn.ReLU()\n self.leafFC = nn.Linear(size, size)\n self.sigmoid = nn.Sigmoid()\n self.LN1 = nn.LayerNorm(size,)\n self.LN2 = nn.LayerNorm(size,)\n self.max_column_in_table = max_column_in_table\n self.leafLn = nn.LayerNorm(size,elementwise_affine = False)\n self.device = device\n self.sigmoid = nn.Sigmoid()\n\n def leaf(self, word_id, table_fea=None):\n # print('tlstm_wi',word_id)\n all_columns = table_fea.view(-1,self.max_column_in_table*2+1,1)*self.column_embeddings(word_id).reshape(-1,2 * self.max_column_in_table+1,self.size)\n all_columns = self.relu(self.leafFC(all_columns))\n table_emb = self.max_pooling(all_columns.view(-1,self.max_column_in_table*2+1,self.size)).view(-1,self.size)\n return self.leafLn(table_emb), torch.zeros(word_id.size()[0], self.size,device = self.device,dtype = torch.float32)\n def inputX(self,left_emb,right_emb):\n cat_emb = torch.cat([left_emb,right_emb],dim = 1)\n return self.relu(self.FC(cat_emb))\n def childrenNode(self, left_h, left_c, right_h, right_c,inputX):\n return self.tree_lstm((left_h, left_c), (right_h, right_c),inputX)\n def root(self,tree_list):\n return self.tree_root(tree_list).view(-1,self.size)\n def logits(self, encoding,join_matrix,prt=False):\n encoding = self.root(encoding.view(1,-1,self.size))\n # if prt:\n # print(encoding)\n matrix = self.relu(self.outFc(join_matrix))\n # outencoding = torch.cat([encoding,encoding],dim = 1)\n outencoding = torch.cat([encoding,matrix],dim = 1)\n return self.out2(self.relu(self.out(outencoding)))"
] | [
[
"torch.nn.Linear",
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.cat"
]
] |
ChristianOrr/Real-time-self-adaptive-deep-stereo | [
"29bbfb212ff7a62769d39f0fe15ecb2f408ac535"
] | [
"custom_models_functional.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom keras.engine import data_adapter\nfrom matplotlib import cm\n\n\ndef colorize_img(value, vmin=None, vmax=None, cmap='jet'):\n \"\"\"\n A utility function for TensorFlow that maps a grayscale image to a matplotlib colormap for use with TensorBoard image summaries.\n By default it will normalize the input value to the range 0..1 before mapping to a grayscale colormap.\n Arguments:\n - value: 4D Tensor of shape [batch_size,height, width,1]\n - vmin: the minimum value of the range used for normalization. (Default: value minimum)\n - vmax: the maximum value of the range used for normalization. (Default: value maximum)\n - cmap: a valid cmap named for use with matplotlib's 'get_cmap'.(Default: 'gray')\n \n Returns a 3D tensor of shape [batch_size,height, width,3].\n \"\"\"\n # Uncomment the code below if disparity isnt normalised already\n # # normalize\n # vmin = tf.reduce_min(value) if vmin is None else vmin\n # vmax = tf.reduce_max(value) if vmax is None else vmax\n # value = (value - vmin) / (vmax - vmin) # vmin..vmax\n\n # quantize\n indices = tf.cast(tf.round(value[:,:,:,0]*255), dtype=tf.int32)\n\n # gather\n color_map = cm.get_cmap(cmap)\n colors = color_map(np.arange(256))[:,:3]\n colors = tf.constant(colors, dtype=tf.float32)\n value = tf.gather(colors, indices)\n return value\n\n\n# https://github.com/philferriere/tfoptflow/blob/bdc7a72e78008d1cd6db46e4667dffc2bab1fe9e/tfoptflow/core_costvol.py\ndef StereoCostVolume(name=\"cost_volume\", search_range=2):\n \"\"\"Build cost volume for associating a pixel from the left image with its corresponding pixels in the right image.\n Args:\n c1: Level of the feature pyramid of the left image\n warp: Warped level of the feature pyramid of the right image\n search_range: Search range (maximum displacement)\n \"\"\"\n def _block(inputs):\n\n def internal_fn(inputs):\n c1, warp = inputs\n padded_lvl = tf.pad(warp, [[0, 0], [0, 0], [search_range, search_range], [0, 0]])\n width = c1.shape.as_list()[2]\n max_offset = search_range * 2 + 1\n\n cost_vol = []\n for i in range(0, max_offset):\n slice = padded_lvl[:, :, i:width+i, :]\n cost = tf.reduce_mean(c1 * slice, axis=3, keepdims=True)\n cost_vol.append(cost)\n\n cost_vol = tf.concat(cost_vol, axis=3)\n cost_curve = tf.concat([c1, cost_vol], axis=3)\n\n return cost_curve\n\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # keras_output = internal_fn(inputs)\n #\n # cost_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # cost_model.shape = keras_output.shape\n #\n # return cost_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\ndef BuildIndices(name=\"build_indices\", batch_size=1):\n \"\"\"\n Given a flow or disparity generate the coordinates\n of source pixels to sample from [batch, height_t, width_t, 2]\n Args:\n coords: Generic optical flow or disparity\n Returns:\n coordinates to sample from.\n\n \"\"\"\n def _block(coords):\n\n def internal_fn(coords):\n _, height, width, _ = coords.get_shape().as_list()\n\n pixel_coords = np.ones((1, height, width, 2), dtype=np.float32)\n batches_coords = np.ones((batch_size, height, width, 1), dtype=np.float32)\n\n for i in range(0, batch_size):\n batches_coords[i][:][:][:] = i\n # build pixel coordinates and their disparity\n for i in range(0, height):\n for j in range(0, width):\n pixel_coords[0][i][j][0] = j\n pixel_coords[0][i][j][1] = i\n\n pixel_coords = tf.constant(pixel_coords, tf.float32)\n output = tf.concat([batches_coords, pixel_coords + coords], -1)\n return output\n\n # keras_inputs = tf.keras.layers.Input(shape=coords.shape[1:])\n # keras_output = internal_fn(keras_inputs)\n #\n # indices_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # indices_model.shape = keras_output.shape\n #\n # return indices_model(coords)\n return internal_fn(coords)\n\n return _block\n\n\ndef Warp(name=\"warp\"):\n \"\"\"\n Construct a new image by bilinear sampling from the input image.\n The right image is warpt into the lefts position.\n Points falling outside the source image boundary have value 0.\n Args:\n imgs: source right images to be sampled from [batch, height_s, width_s, channels]\n coords: coordinates of source pixels to sample from [batch, height_t, width_t, 2]. \n height_t/width_t correspond to the dimensions of the outputimage (don't need to be the same as height_s/width_s). \n The two channels correspond to x and y coordinates respectively.\n Returns:\n A new sampled image [batch, height_t, width_t, channels],\n which ideally is very similar to the left image\n \"\"\"\n\n def _block(inputs):\n def internal_fn(inputs):\n imgs, coords = inputs\n coord_b, coords_x, coords_y = tf.split(coords, [1, 1, 1], axis=3)\n\n coords_x = tf.cast(coords_x, 'float32')\n coords_y = tf.cast(coords_y, 'float32')\n\n x0 = tf.floor(coords_x)\n x1 = x0 + 1\n y0 = tf.floor(coords_y)\n\n y_max = tf.cast(tf.shape(imgs)[1] - 1, 'float32')\n x_max = tf.cast(tf.shape(imgs)[2] - 1, 'float32')\n zero = tf.zeros([1],dtype=tf.float32)\n\n x0_safe = tf.clip_by_value(x0, zero[0], x_max)\n y0_safe = tf.clip_by_value(y0, zero[0], y_max)\n x1_safe = tf.clip_by_value(x1, zero[0], x_max)\n\n # bilinear interp weights, with points outside the grid having weight 0\n wt_x0 = (x1 - coords_x) * tf.cast(tf.equal(x0, x0_safe), 'float32')\n wt_x1 = (coords_x - x0) * tf.cast(tf.equal(x1, x1_safe), 'float32')\n\n\n im00 = tf.cast(tf.gather_nd(imgs, tf.cast(\n tf.concat([coord_b, y0_safe, x0_safe], -1), 'int32')), 'float32')\n im01 = tf.cast(tf.gather_nd(imgs, tf.cast(\n tf.concat([coord_b, y0_safe, x1_safe], -1), 'int32')), 'float32')\n\n output = tf.add_n([\n wt_x0 * im00, wt_x1 * im01\n ])\n return output\n\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # keras_output = internal_fn(keras_inputs)\n #\n # warp_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # warp_model.shape = keras_output.shape\n #\n # return warp_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\ndef StereoContextNetwork(name=\"residual_refinement_network\", batch_size=1, output_height=320, output_width=1216):\n \"\"\"\n Final Layer in MADNet.\n Calculates the reprojection loss if training=True.\n Args:\n input: left_F2 tensor\n disp: D2 disparity from M2 module\n final_left: full resolution RGB left image\n final_right: full resolution RGB right image\n Returns:\n Full resolution disparity in float32 normalized 0-1\n \"\"\"\n act = tf.keras.layers.Activation(tf.nn.leaky_relu)\n context1 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), dilation_rate=1, padding=\"same\", activation=act, use_bias=True, name=\"context1\")\n context2 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), dilation_rate=2, padding=\"same\", activation=act, use_bias=True, name=\"context2\")\n context3 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), dilation_rate=4, padding=\"same\", activation=act, use_bias=True, name=\"context3\")\n context4 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), dilation_rate=8, padding=\"same\", activation=act, use_bias=True, name=\"context4\")\n context5 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), dilation_rate=16, padding=\"same\", activation=act, use_bias=True, name=\"context5\")\n context6 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), dilation_rate=1, padding=\"same\", activation=act, use_bias=True, name=\"context6\")\n context7 = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), dilation_rate=1, padding=\"same\", activation=\"linear\", use_bias=True, name=\"context7\")\n add = tf.keras.layers.Add(name=\"context_disp\")\n concat = tf.keras.layers.Concatenate(axis=-1)\n\n def _block(inputs):\n def internal_fn(inputs):\n input, disp = inputs\n #volume = concat([input, disp])\n volume = tf.keras.layers.concatenate([input, disp], axis=-1)\n\n x = context1(volume)\n x = context2(x)\n x = context3(x)\n x = context4(x)\n x = context5(x)\n x = context6(x)\n x = context7(x)\n\n context_disp = add([disp, x])\n final_disparity = tf.keras.layers.Resizing(name=\"final_disparity\", height=output_height, width=output_width, interpolation='bilinear')(context_disp)\n\n return final_disparity\n\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # keras_output = internal_fn(keras_inputs)\n #\n # refinement_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # refinement_model.shape = keras_output.shape\n # return refinement_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\ndef StereoEstimator(name=\"volume_filtering\"):\n \"\"\"\n This is the stereo estimation network at resolution n.\n It uses the costs (from the pixel difference between the warped right image \n and the left image) combined with the upsampled disparity from the previous\n layer (when the layer is not the last layer).\n\n The output is predicted disparity for the network at resolution n.\n \"\"\"\n act = tf.keras.layers.Activation(tf.nn.leaky_relu)\n disp1 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp1\")\n disp2 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp2\")\n disp3 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp3\")\n disp4 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp4\")\n disp5 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp5\")\n disp6 = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), strides=1, padding=\"same\", activation=\"linear\", use_bias=True, name=\"disp6\")\n concat = tf.keras.layers.Concatenate(axis=-1)\n\n def _block(inputs):\n def internal_fn(inputs):\n if type(inputs) is list:\n costs, upsampled_disp = inputs\n # volume = concat([costs, upsampled_disp])\n volume = tf.keras.layers.concatenate([costs, upsampled_disp], axis=-1)\n else:\n volume = inputs\n\n x = disp1(volume)\n x = disp2(x)\n x = disp3(x)\n x = disp4(x)\n x = disp5(x)\n x = disp6(x)\n return x\n\n # if type(inputs) is list:\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # else:\n # keras_inputs = tf.keras.layers.Input(shape=inputs.shape[1:])\n #\n # keras_output = internal_fn(keras_inputs)\n #\n # estimator_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # estimator_model.shape = keras_output.shape\n # return estimator_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\ndef ModuleM(name, layer, search_range=2, batch_size=1):\n \"\"\"\n Module MX is a sub-module of MADNet, which can be trained individually for \n online adaptation using the MAD (Modular ADaptaion) method.\n \"\"\"\n cost_volume = StereoCostVolume(name=f\"cost_{layer}\", search_range=search_range)\n stereo_estimator = StereoEstimator(name=f\"volume_filtering_{layer}\")\n build_indices = BuildIndices(name=f\"build_indices_{layer}\", batch_size=batch_size)\n warp = Warp(name=f\"warp_{layer}\")\n\n def _block(inputs):\n def internal_fn(inputs):\n # Check if layer is the bottom of the pyramid\n if len(inputs) == 3:\n left, right, prev_disp = inputs\n # Upsample disparity from previous layer\n upsampled_disp = tf.keras.layers.Resizing(name=f\"upsampled_disp_{layer}\", height=height, width=width, interpolation='bilinear')(prev_disp)\n coords = tf.keras.layers.concatenate([upsampled_disp, tf.zeros_like(upsampled_disp)], -1)\n indices = build_indices(coords)\n # Warp the right image into the left using upsampled disparity\n warped_left = warp([right, indices])\n else:\n left, right = inputs\n # No previous disparity exits, so use right image instead of warped left\n warped_left = right\n\n costs = cost_volume([left, warped_left])\n\n # Get the disparity using cost volume between left and warped left images\n if len(inputs) == 3:\n module_disparity = stereo_estimator([costs, prev_disp])\n else:\n module_disparity = stereo_estimator(costs)\n\n return module_disparity\n\n # if len(inputs) == 3:\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # else:\n # keras_inputs = [tf.keras.layers.Input(shape=inputs[0].shape[1:]), tf.keras.layers.Input(shape=inputs[1].shape[1:])]\n # keras_output = internal_fn(keras_inputs)\n #\n # module_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # module_model.shape = keras_output.shape\n # return module_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\n\n\nheight = 320\nwidth = 1216\nsearch_range = 2\nbatch_size = 1\n\n# Initializing the layers\nact = tf.keras.layers.Activation(tf.nn.leaky_relu)\n# Left image feature pyramid (feature extractor)\n# F1\nleft_conv1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv1\", \ninput_shape=(height, width, 3, ))\nleft_conv2 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv2\")\n# F2\nleft_conv3 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv3\")\nleft_conv4 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv4\")\n# F3\nleft_conv5 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv5\")\nleft_conv6 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv6\")\n# F4\nleft_conv7 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv7\")\nleft_conv8 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv8\")\n# F5\nleft_conv9 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv9\")\nleft_conv10 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv10\")\n# F6\nleft_conv11 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv11\")\nleft_conv12 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv12\") \n# Right image feature pyramid (feature extractor)\n# F1\nright_conv1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv1\", \ninput_shape=(height, width, 3, ))\nright_conv2 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv2\")\n# F2\nright_conv3 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv3\")\nright_conv4 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv4\")\n# F3\nright_conv5 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv5\")\nright_conv6 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv6\")\n# F4\nright_conv7 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv7\")\nright_conv8 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv8\")\n# F5\nright_conv9 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv9\")\nright_conv10 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv10\")\n# F6\nright_conv11 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv11\")\nright_conv12 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv12\")\n\n#############################SCALE 6#################################\nM6 = ModuleM(name=\"M6\", layer=\"6\", search_range=search_range, batch_size=batch_size)\n############################SCALE 5###################################\nM5 = ModuleM(name=\"M5\", layer=\"5\", search_range=search_range, batch_size=batch_size)\n############################SCALE 4###################################\nM4 = ModuleM(name=\"M4\", layer=\"4\", search_range=search_range, batch_size=batch_size)\n############################SCALE 3###################################\nM3 = ModuleM(name=\"M3\", layer=\"3\", search_range=search_range, batch_size=batch_size)\n############################SCALE 2###################################\nM2 = ModuleM(name=\"M2\", layer=\"2\", search_range=search_range, batch_size=batch_size)\n############################REFINEMENT################################\nrefinement_module = StereoContextNetwork(batch_size=batch_size, output_height=height, output_width=width)\n\n\n# Build the model\n# Left and right image inputs\nleft_input = tf.keras.layers.Input(shape=[height, width, 3])\nright_input = tf.keras.layers.Input(shape=[height, width, 3])\n\n#######################PYRAMID FEATURES###############################\n# Left image feature pyramid (feature extractor)\n# F1\nleft_pyramid = left_conv1(left_input)\nleft_F1 = left_conv2(left_pyramid)\n# F2\nleft_pyramid = left_conv3(left_F1)\nleft_F2 = left_conv4(left_pyramid)\n# F3\nleft_pyramid = left_conv5(left_F2)\nleft_F3 = left_conv6(left_pyramid)\n# F4\nleft_pyramid = left_conv7(left_F3)\nleft_F4 = left_conv8(left_pyramid)\n# F5\nleft_pyramid = left_conv9(left_F4)\nleft_F5 = left_conv10(left_pyramid)\n# F6\nleft_pyramid = left_conv11(left_F5)\nleft_F6 = left_conv12(left_pyramid)\n\n# Right image feature pyramid (feature extractor)\n# F1\nright_pyramid = right_conv1(right_input)\nright_F1 = right_conv2(right_pyramid)\n# F2\nright_pyramid = right_conv3(right_F1)\nright_F2 = right_conv4(right_pyramid)\n# F3\nright_pyramid = right_conv5(right_F2)\nright_F3 = right_conv6(right_pyramid)\n# F4\nright_pyramid = right_conv7(right_F3)\nright_F4 = right_conv8(right_pyramid)\n# F5\nright_pyramid = right_conv9(right_F4)\nright_F5 = right_conv10(right_pyramid)\n# F6\nright_pyramid = right_conv11(right_F5)\nright_F6 = right_conv12(right_pyramid)\n\n\n#############################SCALE 6#################################\nD6 = M6([left_F6, right_F6])\n############################SCALE 5###################################\nD5 = M5([left_F5, right_F5, D6])\n############################SCALE 4###################################\nD4 = M4([left_F4, right_F4, D5])\n############################SCALE 3###################################\nD3 = M3([left_F3, right_F3, D4])\n############################SCALE 2###################################\nD2 = M2([left_F2, right_F2, D3])\n############################REFINEMENT################################\nfinal_disparity = refinement_module([left_F2, D2])\n\n\nmodel = tf.keras.Model(inputs={\"left_input\": left_input, \"right_input\": right_input}, outputs=final_disparity, name=\"MADNet\")\nmodel.summary()\n"
] | [
[
"tensorflow.keras.layers.Resizing",
"numpy.ones",
"tensorflow.keras.layers.Concatenate",
"tensorflow.round",
"matplotlib.cm.get_cmap",
"tensorflow.keras.layers.Add",
"tensorflow.concat",
"tensorflow.keras.layers.Conv2D",
"tensorflow.split",
"tensorflow.keras.layers.Activation",
"tensorflow.clip_by_value",
"tensorflow.constant",
"tensorflow.add_n",
"tensorflow.shape",
"tensorflow.keras.layers.concatenate",
"tensorflow.zeros_like",
"numpy.arange",
"tensorflow.cast",
"tensorflow.floor",
"tensorflow.pad",
"tensorflow.zeros",
"tensorflow.equal",
"tensorflow.reduce_mean",
"tensorflow.keras.Model",
"tensorflow.gather",
"tensorflow.keras.layers.Input"
]
] |
antonevenepoel/open_spiel | [
"f2f0c786410018675fc40e9a5b82c40814555fa8"
] | [
".nox/tests/lib/python3.7/site-packages/nashpy/polytope/polytope.py"
] | [
"\"\"\"A class for a normal form game\"\"\"\nfrom itertools import product\n\nimport numpy as np\nfrom scipy.optimize import linprog\nfrom scipy.spatial import HalfspaceIntersection\n\n\ndef build_halfspaces(M):\n \"\"\"\n Build a matrix representation for a halfspace corresponding to:\n\n Mx <= 1 and x >= 0\n\n This is of the form:\n\n [M: -1]\n [-1: 0]\n\n As specified in\n https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html\n\n Parameters\n ----------\n\n M: a numpy array\n\n Returns:\n --------\n\n Numpy array\n \"\"\"\n number_of_strategies, dimension = M.shape\n b = np.append(-np.ones(number_of_strategies), np.zeros(dimension))\n M = np.append(M, -np.eye(dimension), axis=0)\n halfspaces = np.column_stack((M, b.transpose()))\n return halfspaces\n\n\ndef find_feasible_point(halfspaces):\n \"\"\"\n Use linear programming to find a point inside the halfspaces (needed to\n define it).\n\n Code taken from scipy documentation:\n https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html\n\n Parameters\n ----------\n\n halfspaces: a matrix representation of halfspaces\n\n Returns:\n --------\n\n numpy array\n \"\"\"\n norm_vector = np.reshape(\n np.linalg.norm(halfspaces[:, :-1], axis=1), (halfspaces.shape[0], 1)\n )\n c = np.zeros((halfspaces.shape[1],))\n c[-1] = -1\n A = np.hstack((halfspaces[:, :-1], norm_vector))\n b = -halfspaces[:, -1:]\n res = linprog(c, A_ub=A, b_ub=b)\n return res.x[:-1]\n\n\ndef labels(vertex, halfspaces):\n \"\"\"\n Return the labels of the facets on which lie a given vertex. This is\n calculated by carrying out the matrix multiplictation.\n\n Parameters\n ----------\n\n vertex: a numpy array\n halfspaces: a numpy array\n\n Returns\n -------\n\n set\n \"\"\"\n b = halfspaces[:, -1]\n M = halfspaces[:, :-1]\n return set(np.where(np.isclose(np.dot(M, vertex), -b))[0])\n\n\ndef non_trivial_vertices(halfspaces):\n \"\"\"\n Returns all vertex, label pairs (ignoring the origin).\n\n Parameters:\n\n halfspaces: a numpy array\n\n Returns:\n\n generator\n \"\"\"\n feasible_point = find_feasible_point(halfspaces)\n hs = HalfspaceIntersection(halfspaces, feasible_point)\n hs.close()\n return ((v, labels(v, halfspaces)) for v in hs.intersections if max(v) > 0)\n"
] | [
[
"numpy.ones",
"numpy.eye",
"numpy.zeros",
"scipy.spatial.HalfspaceIntersection",
"scipy.optimize.linprog",
"numpy.hstack",
"numpy.dot",
"numpy.linalg.norm"
]
] |
amspector100/knockpy | [
"c4980ebd506c110473babd85836dbd8ae1d548b7"
] | [
"knockpy/kpytorch/deeppink.py"
] | [
"import warnings\nimport numpy as np\nimport scipy as sp\nfrom scipy import stats\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .. import utilities\n\n\ndef create_batches(features, y, batchsize):\n\n # Create random indices to reorder datapoints\n n = features.shape[0]\n p = features.shape[1]\n inds = torch.randperm(n)\n\n # Iterate through and create batches\n i = 0\n batches = []\n while i < n:\n batches.append([features[inds][i : i + batchsize], y[inds][i : i + batchsize]])\n i += batchsize\n return batches\n\n\nclass DeepPinkModel(nn.Module):\n def __init__(self, p, hidden_sizes=[64], y_dist=\"gaussian\", normalize_Z=True):\n \"\"\"\n Adapted from https://arxiv.org/pdf/1809.01185.pdf.\n\n The module has two components:\n 1. A sparse linear layer with dimension 2*p to p.\n However, there are only 2*p weights (each feature\n and knockoff points only to their own unique node).\n This is (maybe?) followed by a ReLU activation.\n 2. A multilayer perceptron (MLP)\n\n Parameters\n ----------\n p : int\n The dimensionality of the data\n hidden_sizes: list\n A list of hidden sizes for the mlp layer(s). \n Defaults to [64].\n normalize_Z : bool\n If True, the first sparse linear layer is normalized\n so the weights for each feature/knockoff pair have an\n l1 norm of 1. This can modestly improve power in some\n settings.\n \"\"\"\n\n super().__init__()\n\n # Initialize weight for first layer\n self.p = p\n self.y_dist = y_dist\n self.Z_weight = nn.Parameter(torch.ones(2 * p))\n self.norm_Z_weight = normalize_Z\n\n # Save indices/reverse indices to prevent violations of FDR control\n self.inds, self.rev_inds = utilities.random_permutation_inds(2 * p)\n self.feature_inds = self.rev_inds[0:self.p]\n self.ko_inds = self.rev_inds[self.p:]\n\n # Create MLP layers\n mlp_layers = [nn.Linear(p, hidden_sizes[0])]\n for i in range(len(hidden_sizes) - 1):\n mlp_layers.append(nn.ReLU())\n mlp_layers.append(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))\n # Prepare for either MSE loss or cross entropy loss\n mlp_layers.append(nn.ReLU())\n if y_dist == \"gaussian\":\n mlp_layers.append(nn.Linear(hidden_sizes[-1], 1))\n else:\n mlp_layers.append(nn.Linear(hidden_sizes[-1], 2))\n\n # Then create MLP\n self.mlp = nn.Sequential(*mlp_layers)\n\n def _fetch_Z_weight(self):\n\n # Possibly don't normalize\n if not self.norm_Z_weight:\n return self.Z_weight\n\n # Else normalize, first construct denominator \n normalizer = torch.abs(self.Z_weight[self.feature_inds]) + torch.abs(\n self.Z_weight[self.ko_inds]\n )\n # Normalize\n Z = torch.abs(self.Z_weight[self.feature_inds]) / normalizer\n Ztilde = torch.abs(self.Z_weight[self.ko_inds]) / normalizer\n # Concatenate and reshuffle\n return torch.cat([Z, Ztilde], dim=0)[self.inds]\n\n def forward(self, features):\n \"\"\"\n Note: features are now shuffled\n \"\"\"\n\n # First layer: pairwise weights (and sum)\n if not isinstance(features, torch.Tensor):\n features = torch.tensor(features).float()\n features = features[:, self.inds] # shuffle features to prevent FDR violations\n features = self._fetch_Z_weight().unsqueeze(dim=0) * features\n features = features[:, self.feature_inds] - features[:, self.ko_inds]\n\n # Apply MLP\n return self.mlp(features)\n\n def predict(self, features):\n \"\"\"\n Wraps forward method, for compatibility\n with sklearn classes.\n \"\"\"\n with torch.no_grad():\n return self.forward(features).numpy()\n\n def l1norm(self):\n out = 0\n for parameter in self.mlp.parameters():\n out += torch.abs(parameter).sum()\n out += torch.abs(self.Z_weight).sum() # This is just for stability\n return out\n\n def l2norm(self):\n out = 0\n for parameter in self.mlp.parameters():\n out += (parameter ** 2).sum()\n out += (self.Z_weight ** 2).sum()\n return out\n\n def feature_importances(self, weight_scores=True):\n\n with torch.no_grad():\n # Calculate weights from MLP\n if weight_scores:\n layers = list(self.mlp.named_children())\n W = layers[0][1].weight.detach().numpy().T\n for layer in layers[1:]:\n if isinstance(layer[1], nn.ReLU):\n continue\n weight = layer[1].weight.detach().numpy().T\n W = np.dot(W, weight)\n W = W.squeeze(-1)\n else:\n W = np.ones(self.p)\n\n # Multiply by Z weights\n Z = self._fetch_Z_weight().numpy()\n feature_imp = Z[self.feature_inds] * W\n knockoff_imp = Z[self.ko_inds] * W\n return np.concatenate([feature_imp, knockoff_imp])\n\n\ndef train_deeppink(\n model,\n features,\n y,\n batchsize=100,\n num_epochs=50,\n lambda1=None,\n lambda2=None,\n verbose=True,\n **kwargs,\n):\n\n # Infer n, p, set default lambda1, lambda2\n n = features.shape[0]\n p = int(features.shape[1] / 2)\n if lambda1 is None:\n lambda1 = 10 * np.sqrt(np.log(p) / n)\n if lambda2 is None:\n lambda2 = 0\n\n # Batchsize can't be bigger than n\n batchsize = min(features.shape[0], batchsize)\n\n # Create criterion\n features, y = map(lambda x: torch.tensor(x).detach().float(), (features, y))\n if model.y_dist == \"gaussian\":\n criterion = nn.MSELoss(reduction=\"sum\")\n else:\n criterion = nn.CrossEntropyLoss(reduction=\"sum\")\n y = y.long()\n\n # Create optimizer\n opt = torch.optim.Adam(model.parameters(), **kwargs)\n\n # Loop through epochs\n for j in range(num_epochs):\n\n # Create batches, loop through\n batches = create_batches(features, y, batchsize=batchsize)\n predictive_loss = 0\n for Xbatch, ybatch in batches:\n\n # Forward pass and loss\n output = model(Xbatch)\n loss = criterion(output, ybatch.unsqueeze(-1))\n predictive_loss += loss\n\n # Add l1 and l2 regularization\n loss += lambda1 * model.l1norm()\n loss += lambda2 * model.l2norm()\n\n # Step\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n if verbose and j % 10 == 0:\n print(f\"At epoch {j}, mean loss is {predictive_loss / n}\")\n\n return model\n"
] | [
[
"numpy.ones",
"torch.ones",
"torch.nn.Linear",
"torch.nn.MSELoss",
"numpy.dot",
"torch.no_grad",
"torch.tensor",
"torch.nn.CrossEntropyLoss",
"numpy.log",
"torch.abs",
"torch.nn.Sequential",
"torch.randperm",
"numpy.concatenate",
"torch.nn.ReLU",
"torch.cat"
]
] |
netoaraujjo/hal | [
"0cd66d5548659c4dde70381ad21ba5b9d8213365"
] | [
"clustering/agglomerative_clustering.py"
] | [
"#-*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering\nfrom sklearn.externals.joblib import Memory\nfrom .clustering import Clustering\n\nclass AgglomerativeClustering(Clustering):\n \"\"\"docstring for AgglomerativeClustering.\"\"\"\n def __init__(self, data, n_clusters = 2, affinity = 'euclidean',\n memory = Memory(cachedir = None), connectivity = None,\n compute_full_tree = 'auto', linkage = 'ward',\n pooling_func = np.mean):\n super(AgglomerativeClustering, self).__init__()\n self.data = data\n self.n_clusters = n_clusters\n self.affinity = affinity\n self.memory = memory\n self.connectivity = connectivity\n self.compute_full_tree = compute_full_tree\n self.linkage = linkage\n self.pooling_func = pooling_func\n\n\n\n def execute(self):\n \"\"\"Constroi o modelo de clusterizacao.\"\"\"\n self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters,\n affinity = self.affinity,\n memory = self.memory,\n connectivity = self.connectivity,\n compute_full_tree = self.compute_full_tree,\n linkage = self.linkage,\n pooling_func = self.pooling_func).fit(self.data)\n\n self.clusters = super().make_clusters(self.data, self.model.labels_)\n\n\n @property\n def labels_(self):\n \"\"\"Retorna os labels dos elementos do dataset.\"\"\"\n return self.model.labels_\n\n\n @property\n def clusters_(self):\n \"\"\"Retorna um dicionaro onde os indices dos grupos sao as chaves.\"\"\"\n return self.clusters\n\n\n @property\n def model_(self):\n \"\"\"Retorna o modelo de agrupamento.\"\"\"\n return self.model\n"
] | [
[
"sklearn.externals.joblib.Memory",
"sklearn.cluster.AgglomerativeClustering"
]
] |
renier/qiskit-terra | [
"1f5e4c8f6768dfac5d68f39e9d38fdd783ba1346"
] | [
"qiskit/quantum_info/states/statevector.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nStatevector quantum state class.\n\"\"\"\n\nimport copy\nimport re\nimport warnings\nfrom numbers import Number\n\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.instruction import Instruction\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.states.quantum_state import QuantumState\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.predicates import matrix_equal\n\n\nclass Statevector(QuantumState):\n \"\"\"Statevector class\"\"\"\n\n def __init__(self, data, dims=None):\n \"\"\"Initialize a statevector object.\n\n Args:\n data (vector_like): a complex statevector.\n dims (int or tuple or list): Optional. The subsystem dimension of\n the state (See additional information).\n\n Raises:\n QiskitError: if input data is not valid.\n\n Additional Information:\n The ``dims`` kwarg can be None, an integer, or an iterable of\n integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` or ``None`` -- the length of the input vector\n specifies the total dimension of the density matrix. If it is a\n power of two the state will be initialized as an N-qubit state.\n If it is not a power of two the state will have a single\n d-dimensional subsystem.\n \"\"\"\n if isinstance(data, (list, np.ndarray)):\n # Finally we check if the input is a raw vector in either a\n # python list or numpy array format.\n self._data = np.asarray(data, dtype=complex)\n elif isinstance(data, Statevector):\n self._data = data._data\n if dims is None:\n dims = data._dims\n elif isinstance(data, Operator):\n # We allow conversion of column-vector operators to Statevectors\n input_dim, _ = data.dim\n if input_dim != 1:\n raise QiskitError(\"Input Operator is not a column-vector.\")\n self._data = np.ravel(data.data)\n else:\n raise QiskitError(\"Invalid input data format for Statevector\")\n # Check that the input is a numpy vector or column-vector numpy\n # matrix. If it is a column-vector matrix reshape to a vector.\n ndim = self._data.ndim\n shape = self._data.shape\n if ndim != 1:\n if ndim == 2 and shape[1] == 1:\n self._data = np.reshape(self._data, shape[0])\n elif ndim != 2 or shape[1] != 1:\n raise QiskitError(\"Invalid input: not a vector or column-vector.\")\n super().__init__(self._automatic_dims(dims, shape[0]))\n\n def __eq__(self, other):\n return super().__eq__(other) and np.allclose(\n self._data, other._data, rtol=self.rtol, atol=self.atol)\n\n def __repr__(self):\n prefix = 'Statevector('\n pad = len(prefix) * ' '\n return '{}{},\\n{}dims={})'.format(\n prefix, np.array2string(\n self.data, separator=', ', prefix=prefix),\n pad, self._dims)\n\n @property\n def data(self):\n \"\"\"Return data.\"\"\"\n return self._data\n\n def is_valid(self, atol=None, rtol=None):\n \"\"\"Return True if a Statevector has norm 1.\"\"\"\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n norm = np.linalg.norm(self.data)\n return np.allclose(norm, 1, rtol=rtol, atol=atol)\n\n def to_operator(self):\n \"\"\"Convert state to a rank-1 projector operator\"\"\"\n mat = np.outer(self.data, np.conj(self.data))\n return Operator(mat, input_dims=self.dims(), output_dims=self.dims())\n\n def conjugate(self):\n \"\"\"Return the conjugate of the operator.\"\"\"\n return Statevector(np.conj(self.data), dims=self.dims())\n\n def trace(self):\n \"\"\"Return the trace of the quantum state as a density matrix.\"\"\"\n return np.sum(np.abs(self.data) ** 2)\n\n def purity(self):\n \"\"\"Return the purity of the quantum state.\"\"\"\n # For a valid statevector the purity is always 1, however if we simply\n # have an arbitrary vector (not correctly normalized) then the\n # purity is equivalent to the trace squared:\n # P(|psi>) = Tr[|psi><psi|psi><psi|] = |<psi|psi>|^2\n return self.trace() ** 2\n\n def tensor(self, other):\n \"\"\"Return the tensor product state self ⊗ other.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the tensor product operator self ⊗ other.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n dims = other.dims() + self.dims()\n data = np.kron(self._data, other._data)\n return Statevector(data, dims)\n\n def expand(self, other):\n \"\"\"Return the tensor product state other ⊗ self.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the tensor product state other ⊗ self.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n dims = self.dims() + other.dims()\n data = np.kron(other._data, self._data)\n return Statevector(data, dims)\n\n def _add(self, other):\n \"\"\"Return the linear combination self + other.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the linear combination self + other.\n\n Raises:\n QiskitError: if other is not a quantum state, or has\n incompatible dimensions.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n if self.dim != other.dim:\n raise QiskitError(\"other Statevector has different dimensions.\")\n return Statevector(self.data + other.data, self.dims())\n\n def _multiply(self, other):\n \"\"\"Return the scalar multiplied state self * other.\n\n Args:\n other (complex): a complex number.\n\n Returns:\n Statevector: the scalar multiplied state other * self.\n\n Raises:\n QiskitError: if other is not a valid complex number.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n return Statevector(other * self.data, self.dims())\n\n def evolve(self, other, qargs=None):\n \"\"\"Evolve a quantum state by the operator.\n\n Args:\n other (Operator): The operator to evolve by.\n qargs (list): a list of Statevector subsystem positions to apply\n the operator on.\n\n Returns:\n Statevector: the output quantum state.\n\n Raises:\n QiskitError: if the operator dimension does not match the\n specified Statevector subsystem dimensions.\n \"\"\"\n if qargs is None:\n qargs = getattr(other, 'qargs', None)\n\n # Get return vector\n ret = copy.copy(self)\n\n # Evolution by a circuit or instruction\n if isinstance(other, QuantumCircuit):\n other = other.to_instruction()\n if isinstance(other, Instruction):\n if self.num_qubits is None:\n raise QiskitError(\"Cannot apply QuantumCircuit to non-qubit Statevector.\")\n return self._evolve_instruction(ret, other, qargs=qargs)\n\n # Evolution by an Operator\n if not isinstance(other, Operator):\n other = Operator(other)\n\n # check dimension\n if self.dims(qargs) != other.input_dims():\n raise QiskitError(\n \"Operator input dimensions are not equal to statevector subsystem dimensions.\"\n )\n return Statevector._evolve_operator(ret, other, qargs=qargs)\n\n def equiv(self, other, rtol=None, atol=None):\n \"\"\"Return True if statevectors are equivalent up to global phase.\n\n Args:\n other (Statevector): a statevector object.\n rtol (float): relative tolerance value for comparison.\n atol (float): absolute tolerance value for comparison.\n\n Returns:\n bool: True if statevectors are equivalent up to global phase.\n \"\"\"\n if not isinstance(other, Statevector):\n try:\n other = Statevector(other)\n except QiskitError:\n return False\n if self.dim != other.dim:\n return False\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n return matrix_equal(self.data, other.data, ignore_phase=True,\n rtol=rtol, atol=atol)\n\n def expectation_value(self, oper, qargs=None):\n \"\"\"Compute the expectation value of an operator.\n\n Args:\n oper (Operator): an operator to evaluate expval of.\n qargs (None or list): subsystems to apply operator on.\n\n Returns:\n complex: the expectation value.\n \"\"\"\n val = self.evolve(oper, qargs=qargs)\n conj = self.conjugate()\n return np.dot(conj.data, val.data)\n\n def probabilities(self, qargs=None, decimals=None):\n \"\"\"Return the subsystem measurement probability vector.\n\n Measurement probabilities are with respect to measurement in the\n computation (diagonal) basis.\n\n Args:\n qargs (None or list): subsystems to return probabilities for,\n if None return for all subsystems (Default: None).\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done (Default: None).\n\n Returns:\n np.array: The Numpy vector array of probabilities.\n\n Examples:\n\n Consider a 2-qubit product state\n :math:`|\\\\psi\\\\rangle=|+\\\\rangle\\\\otimes|0\\\\rangle`.\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = psi.probabilities()\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring only qubit-0\n probs_qubit_0 = psi.probabilities([0])\n print('Qubit-0 probs: {}'.format(probs_qubit_0))\n\n # Probabilities for measuring only qubit-1\n probs_qubit_1 = psi.probabilities([1])\n print('Qubit-1 probs: {}'.format(probs_qubit_1))\n\n We can also permute the order of qubits in the ``qargs`` list\n to change the qubit position in the probabilities output\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = psi.probabilities([0, 1])\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring both qubits\n # but swapping qubits 0 and 1 in output\n probs_swapped = psi.probabilities([1, 0])\n print('Swapped probs: {}'.format(probs_swapped))\n \"\"\"\n probs = self._subsystem_probabilities(\n np.abs(self.data) ** 2, self._dims, qargs=qargs)\n if decimals is not None:\n probs = probs.round(decimals=decimals)\n return probs\n\n def reset(self, qargs=None):\n \"\"\"Reset state or subsystems to the 0-state.\n\n Args:\n qargs (list or None): subsystems to reset, if None all\n subsystems will be reset to their 0-state\n (Default: None).\n\n Returns:\n Statevector: the reset state.\n\n Additional Information:\n If all subsystems are reset this will return the ground state\n on all subsystems. If only a some subsystems are reset this\n function will perform a measurement on those subsystems and\n evolve the subsystems so that the collapsed post-measurement\n states are rotated to the 0-state. The RNG seed for this\n sampling can be set using the :meth:`seed` method.\n \"\"\"\n if qargs is None:\n # Resetting all qubits does not require sampling or RNG\n state = np.zeros(self._dim, dtype=complex)\n state[0] = 1\n return Statevector(state, dims=self._dims)\n\n # Sample a single measurement outcome\n dims = self.dims(qargs)\n probs = self.probabilities(qargs)\n sample = self._rng.choice(len(probs), p=probs, size=1)\n\n # Convert to projector for state update\n proj = np.zeros(len(probs), dtype=complex)\n proj[sample] = 1 / np.sqrt(probs[sample])\n\n # Rotate outcome to 0\n reset = np.eye(len(probs))\n reset[0, 0] = 0\n reset[sample, sample] = 0\n reset[0, sample] = 1\n\n # compose with reset projection\n reset = np.dot(reset, np.diag(proj))\n return self.evolve(\n Operator(reset, input_dims=dims, output_dims=dims),\n qargs=qargs)\n\n def to_counts(self):\n \"\"\"Returns the statevector as a counts dict\n of probabilities.\n\n DEPRECATED: use :meth:`probabilities_dict` instead.\n\n Returns:\n dict: Counts of probabilities.\n \"\"\"\n warnings.warn(\n 'The `Statevector.to_counts` method is deprecated as of 0.13.0,'\n ' and will be removed no earlier than 3 months after that '\n 'release date. You should use the `Statevector.probabilities_dict`'\n ' method instead.', DeprecationWarning, stacklevel=2)\n return self.probabilities_dict()\n\n @classmethod\n def from_label(cls, label):\n \"\"\"Return a tensor product of Pauli X,Y,Z eigenstates.\n\n .. list-table:: Single-qubit state labels\n :header-rows: 1\n\n * - Label\n - Statevector\n * - ``\"0\"``\n - :math:`[1, 0]`\n * - ``\"1\"``\n - :math:`[0, 1]`\n * - ``\"+\"``\n - :math:`[1 / \\\\sqrt{2}, 1 / \\\\sqrt{2}]`\n * - ``\"-\"``\n - :math:`[1 / \\\\sqrt{2}, -1 / \\\\sqrt{2}]`\n * - ``\"r\"``\n - :math:`[1 / \\\\sqrt{2}, i / \\\\sqrt{2}]`\n * - ``\"l\"``\n - :math:`[1 / \\\\sqrt{2}, -i / \\\\sqrt{2}]`\n\n Args:\n label (string): a eigenstate string ket label (see table for\n allowed values).\n\n Returns:\n Statevector: The N-qubit basis state density matrix.\n\n Raises:\n QiskitError: if the label contains invalid characters, or the\n length of the label is larger than an explicitly\n specified num_qubits.\n \"\"\"\n # Check label is valid\n if re.match(r'^[01rl\\-+]+$', label) is None:\n raise QiskitError('Label contains invalid characters.')\n # We can prepare Z-eigenstates by converting the computational\n # basis bit-string to an integer and preparing that unit vector\n # However, for X-basis states, we will prepare a Z-eigenstate first\n # then apply Hadamard gates to rotate 0 and 1s to + and -.\n z_label = label\n xy_states = False\n if re.match('^[01]+$', label) is None:\n # We have X or Y eigenstates so replace +,r with 0 and\n # -,l with 1 and prepare the corresponding Z state\n xy_states = True\n z_label = z_label.replace('+', '0')\n z_label = z_label.replace('r', '0')\n z_label = z_label.replace('-', '1')\n z_label = z_label.replace('l', '1')\n # Initialize Z eigenstate vector\n num_qubits = len(label)\n data = np.zeros(1 << num_qubits, dtype=complex)\n pos = int(z_label, 2)\n data[pos] = 1\n state = Statevector(data)\n if xy_states:\n # Apply hadamards to all qubits in X eigenstates\n x_mat = np.array([[1, 1], [1, -1]], dtype=complex) / np.sqrt(2)\n # Apply S.H to qubits in Y eigenstates\n y_mat = np.dot(np.diag([1, 1j]), x_mat)\n for qubit, char in enumerate(reversed(label)):\n if char in ['+', '-']:\n state = state.evolve(x_mat, qargs=[qubit])\n elif char in ['r', 'l']:\n state = state.evolve(y_mat, qargs=[qubit])\n return state\n\n @staticmethod\n def from_int(i, dims):\n \"\"\"Return a computational basis statevector.\n\n Args:\n i (int): the basis state element.\n dims (int or tuple or list): The subsystem dimensions of the statevector\n (See additional information).\n\n Returns:\n Statevector: The computational basis state :math:`|i\\\\rangle`.\n\n Additional Information:\n The ``dims`` kwarg can be an integer or an iterable of integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` -- the integer specifies the total dimension of the\n state. If it is a power of two the state will be initialized\n as an N-qubit state. If it is not a power of two the state\n will have a single d-dimensional subsystem.\n \"\"\"\n size = np.product(dims)\n state = np.zeros(size, dtype=complex)\n state[i] = 1.0\n return Statevector(state, dims=dims)\n\n @classmethod\n def from_instruction(cls, instruction):\n \"\"\"Return the output statevector of an instruction.\n\n The statevector is initialized in the state :math:`|{0,\\\\ldots,0}\\\\rangle` of the\n same number of qubits as the input instruction or circuit, evolved\n by the input instruction, and the output statevector returned.\n\n Args:\n instruction (qiskit.circuit.Instruction or QuantumCircuit): instruction or circuit\n\n Returns:\n Statevector: The final statevector.\n\n Raises:\n QiskitError: if the instruction contains invalid instructions for\n the statevector simulation.\n \"\"\"\n # Convert circuit to an instruction\n if isinstance(instruction, QuantumCircuit):\n instruction = instruction.to_instruction()\n # Initialize an the statevector in the all |0> state\n init = np.zeros(2 ** instruction.num_qubits, dtype=complex)\n init[0] = 1.0\n vec = Statevector(init, dims=instruction.num_qubits * (2,))\n return Statevector._evolve_instruction(vec, instruction)\n\n def to_dict(self, decimals=None):\n r\"\"\"Convert the statevector to dictionary form.\n\n This dictionary representation uses a Ket-like notation where the\n dictionary keys are qudit strings for the subsystem basis vectors.\n If any subsystem has a dimension greater than 10 comma delimiters are\n inserted between integers so that subsystems can be distinguished.\n\n Args:\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done\n (Default: None).\n\n Returns:\n dict: the dictionary form of the Statevector.\n\n Example:\n\n The ket-form of a 2-qubit statevector\n :math:`|\\psi\\rangle = |-\\rangle\\otimes |0\\rangle`\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('-0')\n print(psi.to_dict())\n\n For non-qubit subsystems the integer range can go from 0 to 9. For\n example in a qutrit system\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import Statevector\n\n vec = np.zeros(9)\n vec[0] = 1 / np.sqrt(2)\n vec[-1] = 1 / np.sqrt(2)\n psi = Statevector(vec, dims=(3, 3))\n print(psi.to_dict())\n\n For large subsystem dimensions delimeters are required. The\n following example is for a 20-dimensional system consisting of\n a qubit and 10-dimensional qudit.\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import Statevector\n\n vec = np.zeros(2 * 10)\n vec[0] = 1 / np.sqrt(2)\n vec[-1] = 1 / np.sqrt(2)\n psi = Statevector(vec, dims=(2, 10))\n print(psi.to_dict())\n \"\"\"\n return self._vector_to_dict(self.data,\n self._dims,\n decimals=decimals,\n string_labels=True)\n\n @property\n def _shape(self):\n \"\"\"Return the tensor shape of the matrix operator\"\"\"\n return tuple(reversed(self.dims()))\n\n @staticmethod\n def _evolve_operator(statevec, oper, qargs=None):\n \"\"\"Evolve a qudit statevector\"\"\"\n is_qubit = bool(statevec.num_qubits and oper.num_qubits)\n\n if qargs is None:\n # Full system evolution\n statevec._data = np.dot(oper._data, statevec._data)\n if not is_qubit:\n statevec._set_dims(oper._output_dims)\n return statevec\n\n # Calculate contraction dimensions\n if is_qubit:\n # Qubit contraction\n new_dim = statevec._dim\n num_qargs = statevec.num_qubits\n else:\n # Qudit contraction\n new_dims = list(statevec._dims)\n for i, qubit in enumerate(qargs):\n new_dims[qubit] = oper._output_dims[i]\n new_dim = np.product(new_dims)\n num_qargs = len(new_dims)\n\n # Get transpose axes\n indices = [num_qargs - 1 - i for i in reversed(qargs)]\n axes = indices + [i for i in range(num_qargs) if i not in indices]\n axes_inv = np.argsort(axes).tolist()\n\n # Calculate contraction dimensions\n if is_qubit:\n pre_tensor_shape = num_qargs * (2,)\n post_tensor_shape = pre_tensor_shape\n contract_shape = (1 << oper.num_qubits, 1 << (num_qargs - oper.num_qubits))\n else:\n contract_dim = np.product(oper._input_dims)\n pre_tensor_shape = statevec._shape\n contract_shape = (contract_dim, statevec._dim // contract_dim)\n post_tensor_shape = list(reversed(oper._output_dims)) + [\n pre_tensor_shape[i] for i in range(num_qargs) if i not in indices]\n\n # reshape input for contraction\n statevec._data = np.reshape(np.transpose(\n np.reshape(statevec.data, pre_tensor_shape), axes), contract_shape)\n statevec._data = np.reshape(np.dot(oper.data, statevec._data), post_tensor_shape)\n statevec._data = np.reshape(np.transpose(statevec._data, axes_inv), new_dim)\n\n # Update dimension\n if not is_qubit:\n statevec._set_dims(new_dims)\n return statevec\n\n @staticmethod\n def _evolve_instruction(statevec, obj, qargs=None):\n \"\"\"Update the current Statevector by applying an instruction.\"\"\"\n from qiskit.circuit.reset import Reset\n from qiskit.circuit.barrier import Barrier\n\n mat = Operator._instruction_to_matrix(obj)\n if mat is not None:\n # Perform the composition and inplace update the current state\n # of the operator\n return Statevector._evolve_operator(statevec, Operator(mat), qargs=qargs)\n\n # Special instruction types\n if isinstance(obj, Reset):\n statevec._data = statevec.reset(qargs)._data\n return statevec\n if isinstance(obj, Barrier):\n return statevec\n\n # If the instruction doesn't have a matrix defined we use its\n # circuit decomposition definition if it exists, otherwise we\n # cannot compose this gate and raise an error.\n if obj.definition is None:\n raise QiskitError('Cannot apply Instruction: {}'.format(obj.name))\n if not isinstance(obj.definition, QuantumCircuit):\n raise QiskitError('{} instruction definition is {}; expected QuantumCircuit'.format(\n obj.name, type(obj.definition)))\n if obj.definition.global_phase:\n statevec._data *= np.exp(1j * float(obj.definition.global_phase))\n qubits = {qubit: i for i, qubit in enumerate(obj.definition.qubits)}\n for instr, qregs, cregs in obj.definition:\n if cregs:\n raise QiskitError(\n 'Cannot apply instruction with classical registers: {}'.format(\n instr.name))\n # Get the integer position of the flat register\n if qargs is None:\n new_qargs = [qubits[tup] for tup in qregs]\n else:\n new_qargs = [qargs[qubits[tup]] for tup in qregs]\n Statevector._evolve_instruction(statevec, instr, qargs=new_qargs)\n return statevec\n"
] | [
[
"numpy.allclose",
"numpy.transpose",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.array2string",
"numpy.diag",
"numpy.conj",
"numpy.reshape",
"numpy.abs",
"numpy.asarray",
"numpy.argsort",
"numpy.ravel",
"numpy.product",
"numpy.sqrt",
"numpy.kron",
"numpy.linalg.norm"
]
] |
amorehead/metrics | [
"2e4cb70c46bd775629ceb9d710bc581af8bf92c5"
] | [
"torchmetrics/classification/f_beta.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.f_beta import _fbeta_compute, _fbeta_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass FBeta(Metric):\n r\"\"\"\n Computes `F-score <https://en.wikipedia.org/wiki/F-score>`_, specifically:\n\n .. math::\n F_\\beta = (1 + \\beta^2) * \\frac{\\text{precision} * \\text{recall}}\n {(\\beta^2 * \\text{precision}) + \\text{recall}}\n\n Where :math:`\\beta` is some positive real factor. Works with binary, multiclass, and multilabel data.\n Accepts probabilities from a model output or integer class values in prediction.\n Works with multi-dimensional preds and target.\n\n Forward accepts\n\n - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes\n - ``target`` (long tensor): ``(N, ...)``\n\n If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument\n to convert into integer labels. This is the case for binary and multi-label probabilities.\n\n If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.\n\n Args:\n num_classes: Number of classes in the dataset.\n beta: Beta coefficient in the F measure.\n threshold:\n Threshold value for binary or multi-label probabilities. default: 0.5\n\n average:\n - ``'micro'`` computes metric globally\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``'none'`` or ``None`` computes and returns the metric per class\n\n multilabel: If predictions are from multilabel classification.\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n\n >>> from torchmetrics import FBeta\n >>> target = torch.tensor([0, 1, 2, 0, 1, 2])\n >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])\n >>> f_beta = FBeta(num_classes=3, beta=0.5)\n >>> f_beta(preds, target)\n tensor(0.3333)\n\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n beta: float = 1.0,\n threshold: float = 0.5,\n average: str = \"micro\",\n multilabel: bool = False,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n\n self.num_classes = num_classes\n self.beta = beta\n self.threshold = threshold\n self.average = average\n self.multilabel = multilabel\n\n allowed_average = (\"micro\", \"macro\", \"weighted\", \"none\", None)\n if self.average not in allowed_average:\n raise ValueError(\n 'Argument `average` expected to be one of the following:'\n f' {allowed_average} but got {self.average}'\n )\n\n self.add_state(\"true_positives\", default=torch.zeros(num_classes), dist_reduce_fx=\"sum\")\n self.add_state(\"predicted_positives\", default=torch.zeros(num_classes), dist_reduce_fx=\"sum\")\n self.add_state(\"actual_positives\", default=torch.zeros(num_classes), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor):\n \"\"\"\n Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n true_positives, predicted_positives, actual_positives = _fbeta_update(\n preds, target, self.num_classes, self.threshold, self.multilabel\n )\n\n self.true_positives += true_positives\n self.predicted_positives += predicted_positives\n self.actual_positives += actual_positives\n\n def compute(self) -> Tensor:\n \"\"\"\n Computes fbeta over state.\n \"\"\"\n return _fbeta_compute(\n self.true_positives, self.predicted_positives, self.actual_positives, self.beta, self.average\n )\n\n\nclass F1(FBeta):\n \"\"\"\n Computes F1 metric. F1 metrics correspond to a harmonic mean of the\n precision and recall scores.\n\n Works with binary, multiclass, and multilabel data.\n Accepts logits from a model output or integer class values in prediction.\n Works with multi-dimensional preds and target.\n\n Forward accepts\n\n - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes\n - ``target`` (long tensor): ``(N, ...)``\n\n If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument.\n This is the case for binary and multi-label logits.\n\n If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.\n\n Args:\n num_classes: Number of classes in the dataset.\n threshold:\n Threshold value for binary or multi-label logits. default: 0.5\n\n average:\n - ``'micro'`` computes metric globally\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``'none'`` or ``None`` computes and returns the metric per class\n\n multilabel: If predictions are from multilabel classification.\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n >>> from torchmetrics import F1\n >>> target = torch.tensor([0, 1, 2, 0, 1, 2])\n >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])\n >>> f1 = F1(num_classes=3)\n >>> f1(preds, target)\n tensor(0.3333)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n threshold: float = 0.5,\n average: str = \"micro\",\n multilabel: bool = False,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n if multilabel is not False:\n rank_zero_warn(f'The `multilabel={multilabel}` parameter is unused and will not have any effect.')\n\n super().__init__(\n num_classes=num_classes,\n beta=1.0,\n threshold=threshold,\n average=average,\n multilabel=multilabel,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n"
] | [
[
"torch.zeros"
]
] |
stewue/masterthesis-evaluation | [
"0fb825e196f386c628f95524aa9c80af2126617e"
] | [
"RQ1_Python/execution_time_per_benchmark.py"
] | [
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.ticker import PercentFormatter\n\ndata = pd.read_csv('C:\\\\Users\\\\stewue\\\\OneDrive - Wuersten\\\\Uni\\\\19_HS\\\\Masterarbeit\\\\Repo\\\\Evaluation\\\\RQ1_Results\\\\aggregated\\\\executiontime.csv')\ntotalTime = data['executionTime'] * data['parameterizationCombinations'] / 60\n\nall, base = np.histogram(totalTime, bins=1000, range=[0, 30], weights=np.ones(len(totalTime)) / len(totalTime))\ncumulative = np.cumsum(all)\n\nfig = plt.figure()\ntotal = totalTime.shape[0]\n\n# absolute\nax1 = fig.add_subplot()\nax1.plot(base[:-1], cumulative * total)\nax1.set_ylabel('# benchmarks')\n\n# relative\nax2 = ax1.twinx()\nplt.gca().yaxis.set_major_formatter(PercentFormatter(1, 0))\nax2.plot(base[:-1], cumulative)\nax2.set_ylabel('# benchmarks [cumulative %]')\n\nax1.set_xlabel('execution time [min]')\nplt.yticks(np.arange(0, 0.91, 0.1))\nplt.tight_layout()\n#plt.show()\n#plt.savefig('C:\\\\Users\\\\stewue\\\\OneDrive - Wuersten\\\\Uni\\\\19_HS\\\\Masterarbeit\\\\Repo\\\\Evaluation\\\\RQ1_Results\\\\images\\\\execution_time_per_benchmark.pdf')\n\nprint(\"max: \" + str(np.max(totalTime)))\nprint(\"median: \" + str(np.median(totalTime)))\nprint(\"total: \" + str(total))\n\ns10 = totalTime[totalTime < 10]\nprint(\"<10min: \" + str(len(s10) / total))\nprint(\"<10min: \" + str(len(s10)))\n\ns30 = totalTime[totalTime < 30]\nprint(\"<30min: \" + str(len(s30) / total))\nprint(\"<30min: \" + str(len(s30)))"
] | [
[
"numpy.cumsum",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.ticker.PercentFormatter",
"matplotlib.pyplot.gca",
"numpy.median",
"numpy.arange",
"numpy.max"
]
] |
VITA-Group/BERT-Tickets | [
"4d8e0356939e7045e2f5ee908412a5026051d162"
] | [
"squad_trans.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).\"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nimport timeit\nimport torch.nn.utils.prune as prune\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertForQuestionAnswering,\n AlbertTokenizer,\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n CamembertConfig,\n CamembertForQuestionAnswering,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertForQuestionAnswering,\n DistilBertTokenizer,\n RobertaConfig,\n RobertaForQuestionAnswering,\n RobertaTokenizer,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n squad_convert_examples_to_features,\n BertPreTrainedModel,\n PreTrainedModel,\n PreTrainedTokenizer,\n)\nfrom transformers.data.metrics.squad_metrics import (\n compute_predictions_log_probs,\n compute_predictions_logits,\n squad_evaluate,\n)\nfrom transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum(\n (\n tuple(conf.pretrained_config_archive_map.keys())\n for conf in (BertConfig, CamembertConfig, RobertaConfig, XLNetConfig, XLMConfig)\n ),\n (),\n)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForQuestionAnswering, BertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForQuestionAnswering, CamembertTokenizer),\n \"roberta\": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),\n}\n\ndef rewind(pre_weight):\n\n recover_dict = {}\n name_list = []\n for ii in range(12):\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.self.query.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.self.key.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.self.value.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.output.dense.weight')\n name_list.append('bert.pooler.dense.weight')\n\n for key in pre_weight.keys():\n\n if 'bert' in key:\n if key in name_list:\n new_key = key+'_orig'\n else:\n new_key = key\n\n recover_dict[new_key] = pre_weight[key]\n\n return recover_dict\n\ndef see_weight_rate(model):\n\n sum_list = 0\n zero_sum = 0\n for ii in range(12):\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.query.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.query.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.key.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.key.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.value.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.value.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.output.dense.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].intermediate.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].intermediate.dense.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].output.dense.weight == 0))\n\n\n sum_list = sum_list+float(model.bert.pooler.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.pooler.dense.weight == 0))\n \n\n return 100*zero_sum/sum_list\n\ndef pruning_model_custom(model, mask_dict):\n\n parameters_to_prune =[]\n mask_list = []\n for ii in range(12):\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.query)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.key)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.value)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.output.dense)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].intermediate.dense)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].output.dense)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.output.dense.weight_mask'])\n\n parameters_to_prune.append(model.bert.pooler.dense)\n mask_list.append(mask_dict['bert.pooler.dense.weight_mask'])\n\n for ii in range(len(parameters_to_prune)):\n prune.CustomFromMask.apply(parameters_to_prune[ii], 'weight', mask=mask_list[ii])\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ndef set_seed_new(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\ndef train(args, train_dataset, model, tokenizer):\n record_result = []\n\n zero_rate = see_weight_rate(model)\n record_result.append(zero_rate)\n\n\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n # if args.max_steps > 0:\n # t_total = args.max_steps\n # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n # else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 1\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n # Added here for reproductibility\n set_seed(args)\n\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n \"start_positions\": batch[3],\n \"end_positions\": batch[4],\n }\n\n if args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\"]:\n del inputs[\"token_type_ids\"]\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\n if args.version_2_with_negative:\n inputs.update({\"is_impossible\": batch[7]})\n if hasattr(model, \"config\") and hasattr(model.config, \"lang2id\"):\n inputs.update(\n {\"langs\": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}\n )\n\n outputs = model(**inputs)\n # model outputs are always tuple in transformers (see doc)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n # Log metrics\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training:\n results = evaluate(args, model, tokenizer)\n record_result.append(results)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n # Save model checkpoint\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n torch.save(model,os.path.join(output_dir, \"model.pt\"))\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n \n results = evaluate(args, model, tokenizer)\n record_result.append(results)\n\n torch.save(record_result, os.path.join(args.output_dir, 'result.pt'))\n\n return global_step, tr_loss / global_step\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)\n\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n all_results = []\n start_time = timeit.default_timer()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n if args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\"]:\n del inputs[\"token_type_ids\"]\n\n example_indices = batch[3]\n\n # XLNet and XLM use more arguments for their predictions\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n # for lang_id-sensitive xlm models\n if hasattr(model, \"config\") and hasattr(model.config, \"lang2id\"):\n inputs.update(\n {\"langs\": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}\n )\n\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n\n output = [to_list(output[i]) for output in outputs]\n\n # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other \"simpler\"\n # models only use two.\n if len(output) >= 5:\n start_logits = output[0]\n start_top_index = output[1]\n end_logits = output[2]\n end_top_index = output[3]\n cls_logits = output[4]\n\n result = SquadResult(\n unique_id,\n start_logits,\n end_logits,\n start_top_index=start_top_index,\n end_top_index=end_top_index,\n cls_logits=cls_logits,\n )\n\n else:\n start_logits, end_logits = output\n result = SquadResult(unique_id, start_logits, end_logits)\n\n all_results.append(result)\n\n evalTime = timeit.default_timer() - start_time\n logger.info(\" Evaluation done in total %f secs (%f sec per example)\", evalTime, evalTime / len(dataset))\n\n # Compute predictions\n output_prediction_file = os.path.join(args.output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(args.output_dir, \"nbest_predictions_{}.json\".format(prefix))\n\n if args.version_2_with_negative:\n output_null_log_odds_file = os.path.join(args.output_dir, \"null_odds_{}.json\".format(prefix))\n else:\n output_null_log_odds_file = None\n\n # XLNet and XLM use a more complex post-processing procedure\n if args.model_type in [\"xlnet\", \"xlm\"]:\n start_n_top = model.config.start_n_top if hasattr(model, \"config\") else model.module.config.start_n_top\n end_n_top = model.config.end_n_top if hasattr(model, \"config\") else model.module.config.end_n_top\n\n predictions = compute_predictions_log_probs(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n start_n_top,\n end_n_top,\n args.version_2_with_negative,\n tokenizer,\n args.verbose_logging,\n )\n else:\n predictions = compute_predictions_logits(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n args.do_lower_case,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n args.verbose_logging,\n args.version_2_with_negative,\n args.null_score_diff_threshold,\n tokenizer,\n )\n\n # Compute the F1 and exact scores.\n results = squad_evaluate(examples, predictions)\n return results\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n # Load data features from cache or dataset file\n input_dir = args.data_dir if args.data_dir else \".\"\n cached_features_file = os.path.join(\n input_dir,\n \"cached_{}_{}_{}\".format(\n \"dev\" if evaluate else \"train\",\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n ),\n )\n\n # Init features and dataset from cache if it exists\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features_and_dataset = torch.load(cached_features_file)\n features, dataset, examples = (\n features_and_dataset[\"features\"],\n features_and_dataset[\"dataset\"],\n features_and_dataset[\"examples\"],\n )\n else:\n logger.info(\"Creating features from dataset file at %s\", input_dir)\n\n if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)):\n try:\n import tensorflow_datasets as tfds\n except ImportError:\n raise ImportError(\"If not data_dir is specified, tensorflow_datasets needs to be installed.\")\n\n if args.version_2_with_negative:\n logger.warn(\"tensorflow_datasets does not handle version 2 of SQuAD.\")\n\n tfds_examples = tfds.load(\"squad\")\n examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)\n else:\n processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()\n if evaluate:\n examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file)\n else:\n examples = processor.get_train_examples(args.data_dir, filename=args.train_file)\n\n features, dataset = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n return_dataset=\"pt\",\n threads=args.threads,\n )\n\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save({\"features\": features, \"dataset\": dataset, \"examples\": examples}, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints and predictions will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n help=\"The input data dir. Should contain the .json files for the task.\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--train_file\",\n default=None,\n type=str,\n help=\"The input training file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--predict_file\",\n default=None,\n type=str,\n help=\"The input evaluation file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--dir\",\n default=None,\n type=str,\n required=False,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--mask_dir\",\n default=None,\n type=str,\n required=False,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n\n parser.add_argument(\n \"--version_2_with_negative\",\n action=\"store_true\",\n help=\"If true, the SQuAD examples contain some that do not have an answer.\",\n )\n parser.add_argument(\n \"--null_score_diff_threshold\",\n type=float,\n default=0.0,\n help=\"If null_score - best_non_null is greater than the threshold predict null.\",\n )\n\n parser.add_argument(\n \"--max_seq_length\",\n default=384,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. Sequences \"\n \"longer than this will be truncated, and sequences shorter than this will be padded.\",\n )\n parser.add_argument(\n \"--doc_stride\",\n default=128,\n type=int,\n help=\"When splitting up a long document into chunks, how much stride to take between chunks.\",\n )\n parser.add_argument(\n \"--max_query_length\",\n default=64,\n type=int,\n help=\"The maximum number of tokens for the question. Questions longer than this will \"\n \"be truncated to this length.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=20, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\n \"--n_best_size\",\n default=20,\n type=int,\n help=\"The total number of n-best predictions to generate in the nbest_predictions.json output file.\",\n )\n parser.add_argument(\n \"--max_answer_length\",\n default=30,\n type=int,\n help=\"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\",\n )\n parser.add_argument(\n \"--verbose_logging\",\n action=\"store_true\",\n help=\"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\",\n )\n parser.add_argument(\n \"--lang_id\",\n default=0,\n type=int,\n help=\"language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)\",\n )\n\n parser.add_argument(\n \"--weight_pertub\",\n default=None,\n type=str,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Whether not to use CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n\n parser.add_argument(\"--threads\", type=int, default=1, help=\"multiple threads for converting example to features\")\n args = parser.parse_args()\n\n if args.doc_stride >= args.max_seq_length - args.max_query_length:\n logger.warning(\n \"WARNING - You've set a doc stride which may be superior to the document length in some \"\n \"examples. This could result in errors when building features from the examples. Please reduce the doc \"\n \"stride or increase the maximum length to ensure the features are correctly built.\"\n )\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n \n if args.dir == 'pre':\n\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n elif args.dir == 'rand':\n\n model = model_class(config=config)\n \n model.to(args.device)\n\n\n if args.weight_pertub:\n load_weight = torch.load(args.weight_pertub, map_location=args.device)\n model_dict = model.state_dict()\n model_dict.update(load_weight)\n model.load_state_dict(model_dict)\n\n\n if args.mask_dir:\n mask = torch.load(args.mask_dir, map_location=args.device)\n pruning_model_custom(model, mask)\n zero_rate = see_weight_rate(model)\n print('model 0:',zero_rate)\n\n\n if args.local_rank == 0:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.\n # Otherwise it'll default to \"promote\" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level=\"O2\"` will\n # remove the need for this code, but it is still valid.\n if args.fp16:\n try:\n import apex\n\n apex.amp.register_half_function(torch, \"einsum\")\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # # Save the trained model and the tokenizer\n # if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # # Create output directory if needed\n # if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n # os.makedirs(args.output_dir)\n\n # logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # # They can then be reloaded using `from_pretrained()`\n # # Take care of distributed/parallel training\n # model_to_save = model.module if hasattr(model, \"module\") else model\n # model_to_save.save_pretrained(args.output_dir)\n # tokenizer.save_pretrained(args.output_dir)\n # torch.save(model,os.path.join(args.output_dir, \"model.pt\"))\n\n # # Good practice: save your training arguments together with the trained model\n # torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # # Load a trained model and vocabulary that you have fine-tuned\n # model = model_class.from_pretrained(args.output_dir) # , force_download=True)\n # model = torch.load(os.path.join(args.output_dir, \"model.pt\"))\n # tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n # model.to(args.device)\n\n # # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory\n # results = {}\n # if args.do_eval and args.local_rank in [-1, 0]:\n # if args.do_train:\n # logger.info(\"Loading checkpoints saved during training for evaluation\")\n # checkpoints = [args.output_dir]\n # if args.eval_all_checkpoints:\n # checkpoints = list(\n # os.path.dirname(c)\n # for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n # )\n # logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce model loading logs\n # else:\n # logger.info(\"Loading checkpoint %s for evaluation\", args.model_name_or_path)\n # checkpoints = [args.model_name_or_path]\n\n # logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n\n # for checkpoint in checkpoints:\n # # Reload the model\n # global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n # if '/' in global_step:\n # global_step = 'last'\n # # model = model_class.from_pretrained(checkpoint) # , force_download=True)\n # model = torch.load(os.path.join(checkpoint, \"model.pt\"))\n # model.to(args.device)\n\n # # Evaluate\n # result = evaluate(args, model, tokenizer, prefix=global_step)\n\n # result = dict((k + (\"_{}\".format(global_step) if global_step else \"\"), v) for k, v in result.items())\n # results.update(result)\n # for key in results.keys():\n # print(key, results[key])\n\n # logger.info(\"Results: {}\".format(results))\n\n # return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed_all",
"torch.no_grad",
"numpy.random.seed",
"torch.cuda.is_available",
"torch.distributed.init_process_group",
"torch.save",
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.utils.data.RandomSampler",
"torch.device",
"torch.cuda.set_device",
"torch.ones",
"torch.load",
"torch.distributed.get_world_size",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.distributed.barrier",
"torch.nn.parallel.DistributedDataParallel",
"torch.sum",
"torch.utils.data.distributed.DistributedSampler",
"torch.nn.utils.prune.CustomFromMask.apply"
]
] |
ayushkumar63123/seqio | [
"23bcb59df59798074d7d5896a131980137c69ec8"
] | [
"seqio/loggers.py"
] | [
"# Copyright 2021 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classes for logging evaluation metrics and inference results.\"\"\"\n\nimport abc\nimport base64\nimport itertools\nimport json\nimport os\nimport time\nfrom typing import Any, Mapping, Optional, Sequence, Type\n\nfrom absl import logging\nimport numpy as np\nfrom seqio import metrics as metrics_lib\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\nclass Logger(abc.ABC):\n \"\"\"Abstract base class for logging.\n\n Attributes:\n output_dir: a directory to save the logging results (e.g., TensorBoard\n summary) as well as the evaluation results (e.g., \"inputs_pretokenized\",\n \"target_pretokenize\" and \"prediction\").\n \"\"\"\n\n def __init__(self, output_dir):\n self.output_dir = output_dir\n\n @abc.abstractmethod\n def __call__(self, task_name: str, step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset, inferences: Mapping[str,\n Sequence[Any]],\n targets: Sequence[Any]) -> None:\n \"\"\"Logs the metrics and inferences for each task.\n\n Args:\n task_name: The name of the task these datapoints are relevant to.\n step: The timestep to place this datapoint at.\n metrics: A mapping from series names to numeric datapoints to be added to\n that series.\n dataset: The Task dataset.\n inferences: Mapping from inference type (\"predictions\", \"scores\") to the\n model outputs, aligned with the dataset.\n targets: The postprocessed targets, aligned with the dataset.\n \"\"\"\n ...\n\n\nclass PyLoggingLogger(Logger):\n \"\"\"A logger that writes metrics using the standard Python log.\"\"\"\n\n def __init__(self, output_dir: str, level: int = logging.INFO):\n self._level = level\n super().__init__(output_dir)\n\n def __call__(self, task_name: str, step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset, inferences: Mapping[str,\n Sequence[Any]],\n targets: Sequence[Any]) -> None:\n del dataset\n del inferences\n del targets\n for metric_name, metric_value in metrics.items():\n if isinstance(metric_value, metrics_lib.Scalar):\n strvalue = f\"{metric_value.value:.3f}\"\n elif isinstance(metric_value, metrics_lib.Text):\n strvalue = metric_value.textdata\n else:\n strvalue = f\"unloggable type {type(metric_value)}\"\n logging.info(\"%s/%s at step %d: %s\", task_name, metric_name, step,\n strvalue)\n\n\nclass TensorBoardLogger(Logger):\n \"\"\"A logger that writes metrics to TensorBoard summaries.\"\"\"\n\n def __init__(self, output_dir: str):\n \"\"\"TensorBoardLogger initializer.\n\n Args:\n output_dir: The base directory where all logs will be written.\n \"\"\"\n super().__init__(output_dir)\n self._summary_writers = {}\n\n def _get_summary_writer(self, summary_dir: str) -> tf.summary.SummaryWriter:\n \"\"\"Get or create a summary writer for a specific task.\n\n Args:\n summary_dir: The task we are getting the writer for.\n\n Returns:\n The summary writer associated with the directory.\n \"\"\"\n if summary_dir not in self._summary_writers:\n self._summary_writers[summary_dir] = tf.summary.create_file_writer(\n summary_dir, flush_millis=120)\n return self._summary_writers[summary_dir]\n\n def _write_metric(self, tag: str, value: metrics_lib.MetricValue, step: int,\n writer: tf.summary.SummaryWriter):\n \"\"\"Log a metric value to tensorboard, dispatched on value type.\"\"\"\n if isinstance(value, metrics_lib.Scalar):\n value: metrics_lib.Scalar = value\n value = float(np.array(value.value))\n with writer.as_default():\n tf.summary.scalar(name=tag, data=value, step=step)\n elif isinstance(value, metrics_lib.Image):\n value: metrics_lib.Image = value\n image = tf.convert_to_tensor(value.image)\n with writer.as_default():\n tf.summary.image(\n name=tag, data=image, step=step, max_outputs=value.max_outputs)\n elif isinstance(value, metrics_lib.Audio):\n value: metrics_lib.Audio = value\n audio = tf.convert_to_tensor(value.audiodata, dtype=tf.float32)\n with writer.as_default():\n tf.summary.audio(\n name=tag,\n data=audio,\n sample_rate=value.sample_rate,\n step=step,\n max_outputs=value.max_outputs,\n encoding=\"wav\")\n elif isinstance(value, metrics_lib.Histogram):\n value: metrics_lib.Histogram = value\n values = np.array(value.values)\n with writer.as_default():\n tf.summary.histogram(\n name=tag, data=values, step=step, buckets=value.bins)\n elif isinstance(value, metrics_lib.Text):\n value: metrics_lib.Text = value\n if not isinstance(value.textdata, (str, bytes)):\n raise ValueError(\"`textdata` should be of the type `str` or `bytes`.\")\n with writer.as_default():\n tf.summary.text(name=tag, data=tf.constant(value.textdata), step=step)\n elif isinstance(value, metrics_lib.Generic):\n with writer.as_default():\n tf.summary.write(\n tag=tag, tensor=value.tensor, metadata=value.metadata, step=step)\n else:\n raise TypeError(\n f\"Value type not understood, got '{type(value).__name__}'.\")\n\n def __call__(self, task_name: str, step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset, inferences: Mapping[str,\n Sequence[Any]],\n targets: Sequence[Any]) -> None:\n \"\"\"Log metrics to tensorboard.\n\n Args:\n task_name: The name of the task these datapoints are relevant to.\n step: The timestep to place this datapoint at.\n metrics: A mapping from series names to numeric datapoints to be added to\n that series.\n dataset: The Task dataset, which is unused by this logger.\n inferences: The model outputs, which are unused by this logger.\n targets: The postprocessed targets, which are unused by this logger.\n \"\"\"\n del dataset\n del inferences\n del targets\n if step is None:\n logging.warning(\"Step number for the logging session is not provided. \"\n \"A dummy value of -1 will be used.\")\n step = -1\n\n writer = self._get_summary_writer(os.path.join(self.output_dir, task_name))\n for metric_name, metric_value in metrics.items():\n # We prefix the tag with \"eval/\" for backward compatibility.\n # TODO(adarob): Find a way to remove this or make it an option.\n self._write_metric(\n tag=f\"eval/{metric_name}\",\n value=metric_value,\n step=step,\n writer=writer)\n writer.flush()\n\n\nclass TensorBoardLoggerV1(Logger):\n \"\"\"A logger that writes metrics to TensorBoard summaries in TF1.\"\"\"\n\n def __init__(self, output_dir: str):\n \"\"\"TensorBoardLoggerV1 initializer.\n\n Args:\n output_dir: The base directory where all logs will be written.\n \"\"\"\n super().__init__(output_dir)\n self._summary_writers = {}\n\n def _get_summary_writer(self, task_name: str) -> tf.summary.SummaryWriter:\n \"\"\"Create (if needed) and return a SummaryWriter for a given task.\"\"\"\n if task_name not in self._summary_writers:\n with tf.compat.v1.Graph().as_default():\n self._summary_writers[task_name] = tf.compat.v1.summary.FileWriter(\n os.path.join(self.output_dir, task_name))\n return self._summary_writers[task_name]\n\n def __call__(self,\n task_name: str,\n step: int,\n metrics: Mapping[str, metrics_lib.Scalar],\n dataset: tf.data.Dataset,\n inferences: Mapping[str, Sequence[Any]],\n targets: Sequence[Any]) -> None:\n \"\"\"Log the eval results and optionally write summaries for TensorBoard.\n\n Note:\n This is the default implementation using tensorflow v1 operations. This\n only supports logging metrics of the Scalar type.\n\n Args:\n task_name: The name of the task these datapoints are relevant to.\n step: The timestep to place this datapoint at.\n metrics: A mapping from series names to numeric datapoints to be added to\n that series.\n dataset: The Task dataset, which is unused by this logger.\n inferences: The model outputs, which are unused by this logger.\n targets: The postprocessed targets, which are unused by this logger.\n \"\"\"\n del dataset\n del inferences\n del targets\n if step is None:\n logging.warning(\"Step number for the logging session is not provided. \"\n \"A dummy value of -1 will be used.\")\n step = -1\n\n summary_writer = self._get_summary_writer(task_name)\n\n for metric_name, metric_value in metrics.items():\n if not isinstance(metric_value, metrics_lib.Scalar):\n raise ValueError(f\"Value for metric '{metric_name}' should be of \"\n f\"type 'Scalar, got '{type(metric_value).__name__}'.\")\n summary = tf.compat.v1.Summary()\n\n tag = f\"eval/{metric_name}\"\n logging.info(\"%s at step %d: %.3f\", tag, step, metric_value.value)\n\n summary.value.add(tag=tag, simple_value=metric_value.value)\n summary_writer.add_summary(summary, step)\n\n summary_writer.flush()\n\n\nclass TensorAndNumpyEncoder(json.JSONEncoder):\n \"\"\"JSON Encoder to use when encoding dicts with tensors and numpy arrays.\"\"\"\n\n def __init__(self, *args, max_ndarray_size=32, **kwargs):\n self.max_ndarray_size = max_ndarray_size\n super().__init__(*args, **kwargs)\n\n def default(self, obj):\n if isinstance(obj, tf.Tensor):\n if obj.dtype == tf.bfloat16:\n # bfloat16 not supported, convert to float32.\n obj = tf.cast(obj, tf.float32)\n obj = obj.numpy()\n\n if isinstance(obj, np.ndarray):\n obj_dtype = obj.dtype\n if str(obj.dtype) == \"bfloat16\":\n # bfloat16 not supported, convert to float32.\n obj = obj.astype(np.float32)\n if obj.size <= self.max_ndarray_size:\n return obj.tolist() # Convert arrays to lists of py-native types.\n else:\n # If the ndarray is larger than allowed, return a summary string\n # instead of the entire array.\n first_five_str = str(obj.reshape([-1])[:5].tolist())[1:-1]\n return (\n f\"{type(obj).__name__}(shape={obj.shape}, dtype={obj_dtype}); \"\n f\"first: {first_five_str} ...\")\n elif (np.issubdtype(type(obj), np.number) or\n np.issubdtype(type(obj), np.bool_)):\n return obj.item() # Convert most primitive np types to py-native types.\n elif hasattr(obj, \"dtype\") and obj.dtype == tf.bfloat16.as_numpy_dtype:\n return float(obj)\n elif isinstance(obj, bytes):\n # JSON doesn't support bytes. First, try to decode using utf-8 in case\n # it's text. Otherwise, just base64 encode the bytes.\n try:\n return obj.decode(\"utf-8\")\n except UnicodeDecodeError:\n return base64.b64encode(obj)\n\n return json.JSONEncoder.default(self, obj)\n\n\nclass JSONLogger(Logger):\n \"\"\"A logger that writes metrics and model outputs to JSONL files.\"\"\"\n\n def __init__(\n self,\n output_dir: str,\n write_n_results: Optional[int] = None,\n json_encoder_cls: Type[json.JSONEncoder] = TensorAndNumpyEncoder):\n \"\"\"JSONLogger constructor.\n\n Args:\n output_dir: The base directory where all logs will be written.\n write_n_results: number of scores/predictions to be written to the file at\n each step. If None, scores and predictions from all examples are\n written.\n json_encoder_cls: Class to use for serializing JSON to file.\n \"\"\"\n super().__init__(output_dir)\n self._write_n_results = write_n_results\n self._json_encoder_cls = json_encoder_cls\n\n def __call__(self,\n task_name: str,\n step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset,\n inferences: Mapping[str, Sequence[Any]],\n targets: Sequence[Any]) -> None:\n if step is None:\n logging.warning(\"Step number for the logging session is not provided. \"\n \"A dummy value of -1 will be used.\")\n step = -1\n\n metrics_fname = os.path.join(self.output_dir, f\"{task_name}-metrics.jsonl\")\n\n serializable_metrics = {}\n for metric_name, metric_value in metrics.items():\n if isinstance(metric_value, metrics_lib.Scalar):\n serializable_metrics[metric_name] = metric_value.value\n elif isinstance(metric_value, metrics_lib.Text):\n serializable_metrics[metric_name] = metric_value.textdata\n else:\n logging.warning(\n \"Skipping JSON logging of non-serializable metric '%s' of type %s.\",\n metric_name, type(metric_value))\n\n if metrics:\n logging.info(\"Appending metrics to %s\", metrics_fname)\n # We simulate an atomic append for filesystems that do not suppport\n # mode=\"a\".\n file_contents = \"\"\n if tf.io.gfile.exists(metrics_fname):\n with tf.io.gfile.GFile(metrics_fname, \"r\") as f:\n file_contents = f.read()\n with tf.io.gfile.GFile(metrics_fname + \".tmp\", \"w\") as f:\n f.write(file_contents)\n f.write(\n json.dumps({\n \"step\": step,\n **serializable_metrics\n }, cls=self._json_encoder_cls))\n f.write(\"\\n\")\n tf.io.gfile.rename(metrics_fname + \".tmp\", metrics_fname, overwrite=True)\n\n if self._write_n_results == 0:\n return\n\n write_tick = time.time()\n inferences_fname = os.path.join(self.output_dir,\n f\"{task_name}-{step:06}.jsonl\")\n logging.info(\"Writing inferences to %s\", inferences_fname)\n with tf.io.gfile.GFile(inferences_fname, \"w\") as f:\n examples_with_scores = itertools.zip_longest(\n tfds.as_numpy(dataset), inferences.get(\"predictions\", []),\n targets, inferences.get(\"scores\", []))\n if self._write_n_results:\n examples_with_scores = itertools.islice(\n examples_with_scores, 0, self._write_n_results)\n\n for inp, prediction, target, score in examples_with_scores:\n\n # tfds.as_numpy does not convert ragged tensors\n for k in inp:\n if isinstance(inp[k], tf.RaggedTensor):\n inp[k] = inp[k].numpy()\n\n json_dict = {\"input\": inp}\n\n # Only write `prediction` if it is JSON serializable.\n if prediction is not None:\n try:\n json.dumps(prediction, cls=self._json_encoder_cls)\n json_dict[\"prediction\"] = prediction\n except TypeError:\n logging.warning(\"`prediction` is not JSON serializable\",\n exc_info=True)\n\n # Only write `target` if it is JSON serializable.\n try:\n json.dumps(target, cls=self._json_encoder_cls)\n json_dict[\"target\"] = target\n except TypeError:\n logging.warning(\"`target` is not JSON serializable\", exc_info=True)\n\n if score is not None:\n json_dict[\"score\"] = score\n\n json_str = json.dumps(json_dict, cls=self._json_encoder_cls)\n f.write(json_str + \"\\n\")\n write_time = time.time() - write_tick\n logging.info(\"Writing completed in %02f seconds (%02f examples/sec).\",\n write_time,\n len(inferences) / write_time)\n"
] | [
[
"tensorflow.io.gfile.exists",
"tensorflow.summary.scalar",
"tensorflow.summary.histogram",
"tensorflow.summary.write",
"tensorflow.io.gfile.GFile",
"tensorflow.io.gfile.rename",
"tensorflow.summary.image",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.Summary",
"tensorflow.cast",
"tensorflow.convert_to_tensor",
"tensorflow.summary.audio",
"numpy.array",
"tensorflow.constant",
"tensorflow.summary.create_file_writer"
]
] |
TTrapper/tensorflow | [
"64f0ebd33a7c868da3c8f1ea15adf358c578f227"
] | [
"tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.contrib.data.python.ops import batching\nfrom tensorflow.contrib.data.python.ops import dataset_ops\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.platform import test\n\n\nclass DatasetConstructorTest(test.TestCase):\n\n def testTensorDataset(self):\n \"\"\"Test an dataset that represents a single tuple of tensors.\"\"\"\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))\n\n iterator = (dataset_ops.Dataset.from_tensors(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([c.shape for c in components],\n [t.shape for t in get_next])\n\n with self.test_session() as sess:\n sess.run(init_op)\n results = sess.run(get_next)\n for component, result_component in zip(components, results):\n self.assertAllEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testTensorSliceDataset(self):\n \"\"\"Test an dataset that represents the slices from a tuple of tensors.\"\"\"\n components = (\n np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(\n np.array([[12], [13], [14], [15]]), 22),\n np.array([37.0, 38.0, 39.0, 40.0])\n )\n\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([c.shape[1:] for c in components],\n [t.shape for t in get_next])\n\n with self.test_session() as sess:\n sess.run(init_op)\n for i in range(4):\n results = sess.run(get_next)\n for component, result_component in zip(components, results):\n self.assertAllEqual(component[i], result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testTensorSliceDatasetWithDict(self):\n components = {\"foo\": [1, 2, 3], \"bar\": [[4.0], [5.0], [6.0]]}\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual(dtypes.int32, iterator.output_types[\"foo\"])\n self.assertEqual(dtypes.float32, iterator.output_types[\"bar\"])\n self.assertEqual((), iterator.output_shapes[\"foo\"])\n self.assertEqual((1,), iterator.output_shapes[\"bar\"])\n\n with self.test_session() as sess:\n sess.run(init_op)\n for i in range(3):\n results = sess.run(get_next)\n self.assertEqual(components[\"foo\"][i], results[\"foo\"])\n self.assertEqual(components[\"bar\"][i], results[\"bar\"])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testSparseTensorSliceDataset(self):\n \"\"\"Test a dataset based on slices of a `tf.SparseTensor`.\"\"\"\n st = array_ops.sparse_placeholder(dtypes.float64)\n iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = sparse_tensor.SparseTensor(*iterator.get_next())\n\n with self.test_session() as sess:\n slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]\n\n # Test with sparse tensor in the appropriate order.\n indices = np.array(\n [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])\n values = np.array([val for s in slices for val in s])\n dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])\n sparse_feed = sparse_tensor.SparseTensorValue(indices, values,\n dense_shape)\n sess.run(init_op, feed_dict={st: sparse_feed})\n for i, s in enumerate(slices):\n results = sess.run(get_next)\n self.assertAllEqual(s, results.values)\n expected_indices = np.array(\n [[j] for j in range(len(slices[i]))]).reshape([-1, 1])\n self.assertAllEqual(expected_indices, results.indices)\n self.assertAllEqual(dense_shape[1:], results.dense_shape)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Test with sparse tensor in the reverse order, which is not\n # currently supported.\n reverse_order_indices = indices[::-1, :]\n reverse_order_values = values[::-1]\n sparse_feed = sparse_tensor.SparseTensorValue(\n reverse_order_indices, reverse_order_values, dense_shape)\n with self.assertRaises(errors.UnimplementedError):\n sess.run(init_op, feed_dict={st: sparse_feed})\n\n # Test with an empty sparse tensor.\n empty_indices = np.empty((0, 4), dtype=np.int64)\n empty_values = np.empty((0,), dtype=np.float64)\n empty_dense_shape = [0, 4, 37, 9]\n sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,\n empty_dense_shape)\n sess.run(init_op, feed_dict={st: sparse_feed})\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # pylint: disable=g-long-lambda,unnecessary-lambda\n def testNestedStructure(self):\n components = (np.array([1, 2, 3]), (np.array([4., 5.]), np.array([6., 7.])),\n np.array([8, 9, 10]))\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.shuffle(10, 10)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.repeat(-1)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.filter(lambda x, y, z: True)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.take(5)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)\n\n dataset = dataset.flat_map(\n lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),\n (y[0], y[1])))\n )\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)\n\n dataset = dataset.batch(32)\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),\n nest.pack_sequence_as(dataset.output_shapes, [\n s.as_list()\n for s in nest.flatten(dataset.output_shapes)\n ]))\n\n iterator = dataset.make_one_shot_iterator()\n (w, x), (y, z) = iterator.get_next()\n self.assertEquals(dtypes.int64, w.dtype)\n self.assertEquals(dtypes.int64, x.dtype)\n self.assertEquals(dtypes.float64, y.dtype)\n self.assertEquals(dtypes.float64, z.dtype)\n self.assertEquals([None, 3], w.shape.as_list())\n self.assertEquals([None, 3], x.shape.as_list())\n self.assertEquals([None, 2], y.shape.as_list())\n self.assertEquals([None, 2], z.shape.as_list())\n\n iterator = dataset.make_initializable_iterator()\n (w, x), (y, z) = iterator.get_next()\n self.assertEquals(dtypes.int64, w.dtype)\n self.assertEquals(dtypes.int64, x.dtype)\n self.assertEquals(dtypes.float64, y.dtype)\n self.assertEquals(dtypes.float64, z.dtype)\n self.assertEquals([None, 3], w.shape.as_list())\n self.assertEquals([None, 3], x.shape.as_list())\n self.assertEquals([None, 2], y.shape.as_list())\n self.assertEquals([None, 2], z.shape.as_list())\n\n # Define a separate set of components with matching leading\n # dimension for the from-slices constructor.\n components_for_slices = (np.array([1, 2, 3]), (np.array(\n [4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12]))\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([], ([], []), []), dataset.output_shapes)\n\n def testNestedDict(self):\n components = {\"a\": {\"aa\": 1, \"ab\": [2.0, 2.0]}, \"b\": [3, 3, 3]}\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals(dtypes.int32, dataset.output_types[\"a\"][\"aa\"])\n self.assertEquals(dtypes.float32, dataset.output_types[\"a\"][\"ab\"])\n self.assertEquals(dtypes.int32, dataset.output_types[\"b\"])\n self.assertEquals([], dataset.output_shapes[\"a\"][\"aa\"])\n self.assertEquals([2], dataset.output_shapes[\"a\"][\"ab\"])\n self.assertEquals([3], dataset.output_shapes[\"b\"])\n\n def testNonSequenceNestedStructure(self):\n components = np.array([1, 2, 3])\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n dataset = dataset.filter(\n lambda x: math_ops.reduce_all(math_ops.equal(x, components)))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n dataset = dataset.map(lambda x: array_ops.stack([x, x]))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([2, 3], dataset.output_shapes)\n\n dataset = dataset.flat_map(\n lambda x: dataset_ops.Dataset.from_tensor_slices(x))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n iterator = dataset.make_one_shot_iterator()\n get_next = iterator.get_next()\n self.assertEquals(dtypes.int64, get_next.dtype)\n self.assertEquals([3], get_next.shape)\n\n def _testFromGenerator(self, generator, elem_sequence, num_repeats):\n iterator = (\n dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)\n .repeat(num_repeats)\n .prefetch(5)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n for _ in range(2): # Run twice to test reinitialization.\n sess.run(init_op)\n for _ in range(num_repeats):\n for elem in elem_sequence:\n self.assertAllEqual(elem, sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def _testFromGeneratorOneShot(self, generator, elem_sequence, num_repeats):\n iterator = (\n dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)\n .repeat(num_repeats)\n .prefetch(5)\n .make_one_shot_iterator())\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n for _ in range(num_repeats):\n for elem in elem_sequence:\n self.assertAllEqual(elem, sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorUsingFunction(self):\n def generator():\n for i in range(1, 100):\n yield [i] * i\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n self._testFromGeneratorOneShot(generator, elem_sequence, 1)\n self._testFromGeneratorOneShot(generator, elem_sequence, 5)\n\n def testFromGeneratorUsingList(self):\n generator = lambda: [[i] * i for i in range(1, 100)]\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n\n def testFromGeneratorUsingNdarray(self):\n generator = lambda: np.arange(100, dtype=np.int64)\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n\n def testFromGeneratorUsingGeneratorExpression(self):\n # NOTE(mrry): Generator *expressions* are not repeatable (or in\n # general reusable), because they eagerly evaluate the `for`\n # expression as `iter(range(1, 100))` and discard the means of\n # reconstructing `range(1, 100)`. Wrapping the generator\n # expression in a `lambda` makes it repeatable.\n generator = lambda: ([i] * i for i in range(1, 100))\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n\n def testFromMultipleConcurrentGenerators(self):\n num_inner_repeats = 5\n num_outer_repeats = 100\n\n def generator():\n for i in range(1, 10):\n yield ([i] * i, [i, i ** 2, i ** 3])\n input_list = list(generator())\n\n # The interleave transformation is essentially a flat map that\n # draws from multiple input datasets concurrently (in a cyclic\n # fashion). By placing `Datsaet.from_generator()` inside an\n # interleave, we test its behavior when multiple iterators are\n # active at the same time; by additionally prefetching inside the\n # interleave, we create the possibility of parallel (modulo GIL)\n # invocations to several iterators created by the same dataset.\n def interleave_fn(_):\n return (dataset_ops.Dataset.from_generator(\n generator, output_types=(dtypes.int64, dtypes.int64),\n output_shapes=([None], [3]))\n .repeat(num_inner_repeats).prefetch(5))\n\n iterator = (\n dataset_ops.Dataset.range(num_outer_repeats)\n .interleave(interleave_fn, cycle_length=10,\n block_length=len(input_list))\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(num_inner_repeats * num_outer_repeats):\n for elem in input_list:\n val0, val1 = sess.run(get_next)\n self.assertAllEqual(elem[0], val0)\n self.assertAllEqual(elem[1], val1)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorsRunningInParallel(self):\n num_parallel_iterators = 3\n\n # Define shared state that multiple iterator instances will access to\n # demonstrate their concurrent activity.\n lock = threading.Lock()\n condition = threading.Condition(lock)\n next_ticket = [0] # GUARDED_BY(lock)\n\n def generator():\n # NOTE(mrry): We yield one element before the barrier, because\n # the current implementation of `Dataset.interleave()` must\n # fetch one element from each incoming dataset to start the\n # prefetching.\n yield 0\n\n # Define a barrier that `num_parallel_iterators` iterators must enter\n # before any can proceed. Demonstrates that multiple iterators may be\n # active at the same time.\n condition.acquire()\n ticket = next_ticket[0]\n next_ticket[0] += 1\n if ticket == num_parallel_iterators - 1:\n # The last iterator to join the barrier notifies the others.\n condition.notify_all()\n else:\n # Wait until the last iterator enters the barrier.\n while next_ticket[0] < num_parallel_iterators:\n condition.wait()\n condition.release()\n\n yield 1\n\n # As in `testFromMultipleConcurrentGenerators()`, we use a combination of\n # `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple\n # iterators to be active concurrently.\n def interleave_fn(_):\n return dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2)\n\n iterator = (\n dataset_ops.Dataset.range(num_parallel_iterators)\n .interleave(\n interleave_fn, cycle_length=num_parallel_iterators, block_length=1)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n for elem in [0, 1]:\n for _ in range(num_parallel_iterators):\n self.assertAllEqual(elem, sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorTypeError(self):\n def generator():\n yield np.array([1, 2, 3], dtype=np.int64)\n yield np.array([4, 5, 6], dtype=np.int64)\n yield \"ERROR\"\n yield np.array([7, 8, 9], dtype=np.int64)\n\n iterator = (dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int64, output_shapes=[3])\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n self.assertAllEqual([1, 2, 3], sess.run(get_next))\n self.assertAllEqual([4, 5, 6], sess.run(get_next))\n with self.assertRaisesOpError(r\"element of type .*int64.* was expected\"):\n sess.run(get_next)\n self.assertAllEqual([7, 8, 9], sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorShapeError(self):\n def generator():\n yield np.array([1, 2, 3], dtype=np.int64)\n yield np.array([4, 5, 6], dtype=np.int64)\n yield np.array([7, 8, 9, 10], dtype=np.int64)\n yield np.array([11, 12, 13], dtype=np.int64)\n\n iterator = (dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int64, output_shapes=[3])\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n self.assertAllEqual([1, 2, 3], sess.run(get_next))\n self.assertAllEqual([4, 5, 6], sess.run(get_next))\n with self.assertRaisesOpError(r\"element of shape \\(3,\\) was expected\"):\n sess.run(get_next)\n self.assertAllEqual([11, 12, 13], sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testSplitPipelineFailsWithPlacementError(self):\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n\n dataset = dataset_ops.Dataset.from_tensors(0)\n\n # Define a pipeline that attempts to use variables on two\n # different devices.\n #\n # Initialize the variables before creating to iterator, to avoid the\n # placement algorithm overriding the DT_RESOURCE colocation constraints.\n with ops.device(\"/cpu:0\"):\n var_0 = resource_variable_ops.ResourceVariable(initial_value=0)\n dataset = dataset.map(lambda x: x + var_0.read_value())\n sess.run(var_0.initializer)\n\n with ops.device(\"/cpu:1\"):\n var_1 = resource_variable_ops.ResourceVariable(initial_value=0)\n dataset = dataset.map(lambda x: x + var_1.read_value())\n sess.run(var_1.initializer)\n\n iterator = dataset.make_initializable_iterator()\n\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n \"Trying to access resource located in device\"):\n sess.run(iterator.initializer)\n\n def testRestructureDataset(self):\n components = (array_ops.placeholder(dtypes.int32),\n (array_ops.placeholder(dtypes.int32, shape=[None]),\n array_ops.placeholder(dtypes.int32, shape=[20, 30])))\n dataset = dataset_ops.Dataset.from_tensors(components)\n\n i32 = dtypes.int32\n\n test_cases = [((i32, i32, i32), None),\n (((i32, i32), i32), None),\n ((i32, i32, i32), (None, None, None)),\n ((i32, i32, i32), ([17], [17], [20, 30]))]\n\n for new_types, new_shape_lists in test_cases:\n # pylint: disable=protected-access\n new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)\n # pylint: enable=protected-access\n self.assertEqual(new_types, new.output_types)\n if new_shape_lists is not None:\n for expected_shape_list, shape in zip(\n nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):\n if expected_shape_list is None:\n self.assertIs(None, shape.ndims)\n else:\n self.assertEqual(expected_shape_list, shape.as_list())\n\n fail_cases = [((i32, dtypes.int64, i32), None),\n ((i32, i32, i32, i32), None),\n ((i32, i32, i32), ((None, None), None)),\n ((i32, i32, i32), (None, None, None, None)),\n ((i32, i32, i32), (None, [None], [21, 30]))]\n\n for new_types, new_shape_lists in fail_cases:\n with self.assertRaises(ValueError):\n # pylint: disable=protected-access\n new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)\n # pylint: enable=protected-access\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.data.util.nest.flatten",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.range",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_sparse_tensor_slices",
"tensorflow.contrib.data.python.ops.batching._RestructuredDataset",
"tensorflow.python.framework.ops.device",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.ops.array_ops.sparse_placeholder",
"numpy.arange",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"numpy.empty",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.ops.array_ops.stack",
"numpy.array",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_generator"
]
] |
YWJae/CS234-Reinforcement-Learning-Winter-2019 | [
"aa95a42b847a0e752b8caaa7b0bfeffb514ab7d3"
] | [
"assignment 3/pg.py"
] | [
"# -*- coding: UTF-8 -*-\n\nimport os\nimport argparse\nimport sys\nimport logging\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport scipy.signal\nimport os\nimport time\nimport inspect\nfrom utils.general import get_logger, Progbar, export_plot\nfrom config import get_config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--env_name', required=True, type=str,\n choices=['cartpole', 'pendulum', 'cheetah'])\nparser.add_argument('--baseline', dest='use_baseline', action='store_true')\nparser.add_argument('--no-baseline', dest='use_baseline', action='store_false')\nparser.set_defaults(use_baseline=True)\n\ndef build_mlp(\n mlp_input,\n output_size,\n scope,\n n_layers,\n size,\n output_activation=None):\n \"\"\"\n Build a feed forward network (multi-layer perceptron, or mlp)\n with 'n_layers' hidden layers, each of size 'size' units.\n Use tf.nn.relu nonlinearity between layers.\n Args:\n mlp_input: the input to the multi-layer perceptron\n output_size: the output layer size\n scope: the scope of the neural network\n n_layers: the number of hidden layers of the network\n size: the size of each layer:\n output_activation: the activation of output layer\n Returns:\n The tensor output of the network\n\n TODO: Implement this function. This will be similar to the linear\n model you implemented for Assignment 2.\n \"tf.layers.dense\" and \"tf.variable_scope\" may be helpful.\n\n A network with n hidden layers has n 'linear transform + nonlinearity'\n operations followed by the final linear transform for the output layer\n (followed by the output activation, if it is not None).\n \"\"\"\n\n #######################################################\n ######### YOUR CODE HERE - 7-20 lines. ############\n with tf.variable_scope(scope) as _:\n x = mlp_input\n for _ in range(n_layers):\n x = tf.keras.layers.Dense(size, activation=tf.nn.relu)(x)\n output = tf.keras.layers.Dense(output_size, activation=output_activation)(x)\n return output # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n\nclass PG(object):\n \"\"\"\n Abstract Class for implementing a Policy Gradient Based Algorithm\n \"\"\"\n def __init__(self, env, config, logger=None):\n \"\"\"\n Initialize Policy Gradient Class\n\n Args:\n env: an OpenAI Gym environment\n config: class with hyperparameters\n logger: logger instance from the logging module\n\n You do not need to implement anything in this function. However,\n you will need to use self.discrete, self.observation_dim,\n self.action_dim, and self.lr in other methods.\n\n \"\"\"\n # directory for training outputs\n if not os.path.exists(config.output_path):\n os.makedirs(config.output_path)\n\n # store hyperparameters\n self.config = config\n self.logger = logger\n if logger is None:\n self.logger = get_logger(config.log_path)\n self.env = env\n\n # discrete vs continuous action space\n self.discrete = isinstance(env.action_space, gym.spaces.Discrete)\n self.observation_dim = self.env.observation_space.shape[0]\n self.action_dim = self.env.action_space.n if self.discrete else self.env.action_space.shape[0]\n\n self.lr = self.config.learning_rate\n\n # build model\n self.build()\n\n def add_placeholders_op(self):\n \"\"\"\n Add placeholders for observation, action, and advantage:\n self.observation_placeholder, type: tf.float32\n self.action_placeholder, type: depends on the self.discrete\n self.advantage_placeholder, type: tf.float32\n\n HINT: Check self.observation_dim and self.action_dim\n HINT: In the case of continuous action space, an action will be specified by\n 'self.action_dim' float32 numbers (i.e. a vector with size 'self.action_dim')\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 8-12 lines. ############\n self.observation_placeholder = tf.placeholder(tf.float32, shape=(None, self.observation_dim), name=\"observation\") # TODO\n if self.discrete:\n # \"I don't know why their can't be (None, self.action_dim)\n self.action_placeholder = tf.placeholder(tf.int32, shape=(None,), name=\"action\") # TODO\n else:\n self.action_placeholder = tf.placeholder(tf.float32, shape=(None, self.action_dim), name=\"action\") # TODO\n\n # Define a placeholder for advantages\n self.advantage_placeholder = tf.placeholder(tf.float32, shape=(None,), name=\"advantage\") # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def build_policy_network_op(self, scope=\"policy_network\"):\n \"\"\"\n Build the policy network, construct the tensorflow operation to sample\n actions from the policy network outputs, and compute the log probabilities\n of the actions taken (for computing the loss later). These operations are\n stored in self.sampled_action and self.logprob. Must handle both settings\n of self.discrete.\n\n Args:\n scope: the scope of the neural network\n\n TODO:\n Discrete case:\n action_logits: the logits for each action\n HINT: use build_mlp, check self.config for layer_size and\n n_layers\n self.sampled_action: sample from these logits\n HINT: use tf.multinomial + tf.squeeze\n self.logprob: compute the log probabilities of the taken actions\n HINT: 1. tf.nn.sparse_softmax_cross_entropy_with_logits computes\n the *negative* log probabilities of labels, given logits.\n 2. taken actions are different than sampled actions!\n\n Continuous case:\n To build a policy in a continuous action space domain, we will have the\n model output the means of each action dimension, and then sample from\n a multivariate normal distribution with these means and trainable standard\n deviation.\n\n That is, the action a_t ~ N( mu(o_t), sigma)\n where mu(o_t) is the network that outputs the means for each action\n dimension, and sigma is a trainable variable for the standard deviations.\n N here is a multivariate gaussian distribution with the given parameters.\n\n action_means: the predicted means for each action dimension.\n HINT: use build_mlp, check self.config for layer_size and\n n_layers\n log_std: a trainable variable for the log standard deviations.\n HINT: think about why we use log std as the trainable variable instead of std\n HINT: use tf.get_variables\n self.sampled_actions: sample from the gaussian distribution as described above\n HINT: use tf.random_normal\n HINT: use re-parametrization to obtain N(mu, sigma) from N(0, 1)\n self.lobprob: the log probabilities of the taken actions\n HINT: use tf.contrib.distributions.MultivariateNormalDiag\n\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n\n if self.discrete:\n action_logits = build_mlp(self.observation_placeholder,\n output_size=self.action_dim,\n scope=scope,\n n_layers=self.config.n_layers,\n size=self.config.layer_size,\n output_activation=self.config.activation) # TODO\n self.sampled_action = tf.squeeze(tf.multinomial(action_logits, 1)) # TODO\n self.logprob = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.action_placeholder,\n logits=action_logits) # TODO\n else:\n action_means = build_mlp(self.observation_placeholder,\n output_size=self.action_dim,\n scope=scope,\n n_layers=self.config.n_layers,\n size=self.config.layer_size,\n output_activation=self.config.activation) # TODO\n log_std = tf.get_variable(\"log_std\", shape=[1, self.action_dim]) # TODO\n self.sampled_action = tf.random_normal((1,), mean=action_means, stddev=log_std) # TODO\n self.logprob = tf.contrib.distributions.MultivariateNormalDiag(action_means, log_std) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def add_loss_op(self):\n \"\"\"\n Compute the loss, averaged for a given batch.\n\n Recall the update for REINFORCE with advantage:\n θ = θ + α ∇_θ log π_θ(a_t|s_t) A_t\n Think about how to express this update as minimizing a\n loss (so that tensorflow will do the gradient computations\n for you).\n\n You only have to reference fields of 'self' that have already\n been set in the previous methods.\n\n \"\"\"\n\n ######################################################\n ######### YOUR CODE HERE - 1-2 lines. ############\n self.loss = tf.reduce_mean(-self.logprob * self.advantage_placeholder)# TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def add_optimizer_op(self):\n \"\"\"\n Set 'self.train_op' using AdamOptimizer\n HINT: Use self.lr, and minimize self.loss\n \"\"\"\n ######################################################\n ######### YOUR CODE HERE - 1-2 lines. ############\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def add_baseline_op(self, scope=\"baseline\"):\n \"\"\"\n Build the baseline network within the scope.\n\n In this function we will build the baseline network.\n Use build_mlp with the same parameters as the policy network to\n get the baseline estimate. You also have to setup a target\n placeholder and an update operation so the baseline can be trained.\n\n Args:\n scope: the scope of the baseline network\n\n TODO: Set the following fields\n self.baseline\n HINT: use build_mlp, the network is the same as policy network\n check self.config for n_layers and layer_size\n HINT: tf.squeeze might be helpful\n self.baseline_target_placeholder\n self.update_baseline_op\n HINT: first construct a loss using tf.losses.mean_squared_error.\n HINT: use AdamOptimizer with self.lr\n\n \"\"\"\n ######################################################\n ######### YOUR CODE HERE - 4-8 lines. ############\n self.baseline = build_mlp(self.observation_placeholder,\n output_size=1,\n scope=scope,\n n_layers=self.config.n_layers,\n size=self.config.layer_size) # TODO\n self.baseline_target_placeholder = tf.placeholder(tf.float32, shape=(None, )) # TODO\n loss = tf.losses.mean_squared_error(self.baseline_target_placeholder, tf.squeeze(self.baseline))\n self.update_baseline_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def build(self):\n \"\"\"\n Build the model by adding all necessary variables.\n\n You don't have to change anything here - we are just calling\n all the operations you already defined above to build the tensorflow graph.\n \"\"\"\n\n # add placeholders\n self.add_placeholders_op()\n # create policy net\n self.build_policy_network_op()\n # add square loss\n self.add_loss_op()\n # add optmizer for the main networks\n self.add_optimizer_op()\n\n # add baseline\n if self.config.use_baseline:\n self.add_baseline_op()\n\n def initialize(self):\n \"\"\"\n Assumes the graph has been constructed (have called self.build())\n Creates a tf Session and run initializer of variables\n\n You don't have to change or use anything here.\n \"\"\"\n # create tf session\n self.sess = tf.Session()\n # tensorboard stuff\n self.add_summary()\n # initiliaze all variables\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n def add_summary(self):\n \"\"\"\n Tensorboard stuff.\n\n You don't have to change or use anything here.\n \"\"\"\n # extra placeholders to log stuff from python\n self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"avg_reward\")\n self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"max_reward\")\n self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"std_reward\")\n\n self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"eval_reward\")\n\n # extra summaries from python -> placeholders\n tf.summary.scalar(\"Avg Reward\", self.avg_reward_placeholder)\n tf.summary.scalar(\"Max Reward\", self.max_reward_placeholder)\n tf.summary.scalar(\"Std Reward\", self.std_reward_placeholder)\n tf.summary.scalar(\"Eval Reward\", self.eval_reward_placeholder)\n\n # logging\n self.merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.config.output_path,self.sess.graph)\n\n def init_averages(self):\n \"\"\"\n Defines extra attributes for tensorboard.\n\n You don't have to change or use anything here.\n \"\"\"\n self.avg_reward = 0.\n self.max_reward = 0.\n self.std_reward = 0.\n self.eval_reward = 0.\n\n def update_averages(self, rewards, scores_eval):\n \"\"\"\n Update the averages.\n\n You don't have to change or use anything here.\n\n Args:\n rewards: deque\n scores_eval: list\n \"\"\"\n self.avg_reward = np.mean(rewards)\n self.max_reward = np.max(rewards)\n self.std_reward = np.sqrt(np.var(rewards) / len(rewards))\n\n if len(scores_eval) > 0:\n self.eval_reward = scores_eval[-1]\n\n def record_summary(self, t):\n \"\"\"\n Add summary to tensorboard\n\n You don't have to change or use anything here.\n \"\"\"\n\n fd = {\n self.avg_reward_placeholder: self.avg_reward,\n self.max_reward_placeholder: self.max_reward,\n self.std_reward_placeholder: self.std_reward,\n self.eval_reward_placeholder: self.eval_reward,\n }\n summary = self.sess.run(self.merged, feed_dict=fd)\n # tensorboard stuff\n self.file_writer.add_summary(summary, t)\n\n def sample_path(self, env, num_episodes = None):\n \"\"\"\n Sample paths (trajectories) from the environment.\n\n Args:\n num_episodes: the number of episodes to be sampled\n if none, sample one batch (size indicated by config file)\n env: open AI Gym envinronment\n\n Returns:\n paths: a list of paths. Each path in paths is a dictionary with\n path[\"observation\"] a numpy array of ordered observations in the path\n path[\"actions\"] a numpy array of the corresponding actions in the path\n path[\"reward\"] a numpy array of the corresponding rewards in the path\n total_rewards: the sum of all rewards encountered during this \"path\"\n\n You do not have to implement anything in this function, but you will need to\n understand what it returns, and it is worthwhile to look over the code\n just so you understand how we are taking actions in the environment\n and generating batches to train on.\n \"\"\"\n episode = 0\n episode_rewards = []\n paths = []\n t = 0\n\n while (num_episodes or t < self.config.batch_size):\n state = env.reset()\n states, actions, rewards = [], [], []\n episode_reward = 0\n\n for step in range(self.config.max_ep_len):\n states.append(state)\n action = self.sess.run(self.sampled_action, feed_dict={self.observation_placeholder : state[None]})\n state, reward, done, info = env.step(action)\n actions.append(action)\n rewards.append(reward)\n episode_reward += reward\n t += 1\n if (done or step == self.config.max_ep_len-1):\n episode_rewards.append(episode_reward)\n break\n if (not num_episodes) and t == self.config.batch_size:\n break\n\n path = {\"observation\" : np.array(states),\n \"reward\" : np.array(rewards),\n \"action\" : np.array(actions)}\n paths.append(path)\n episode += 1\n if num_episodes and episode >= num_episodes:\n break\n\n return paths, episode_rewards\n\n def get_returns(self, paths):\n \"\"\"\n Calculate the returns G_t for each timestep\n\n Args:\n paths: recorded sample paths. See sample_path() for details.\n\n Return:\n returns: return G_t for each timestep\n\n After acting in the environment, we record the observations, actions, and\n rewards. To get the advantages that we need for the policy update, we have\n to convert the rewards into returns, G_t, which are themselves an estimate\n of Q^π (s_t, a_t):\n\n G_t = r_t + γ r_{t+1} + γ^2 r_{t+2} + ... + γ^{T-t} r_T\n\n where T is the last timestep of the episode.\n\n TODO: compute and return G_t for each timestep. Use self.config.gamma.\n \"\"\"\n\n all_returns = []\n for path in paths:\n rewards = path[\"reward\"]\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n returns = np.zeros_like(rewards) # TODO\n returns[-1] = rewards[-1]\n for i in reversed(range(len(rewards) - 1)):\n returns[i] = rewards[i] + self.config.gamma * returns[i + 1]\n #######################################################\n ######### END YOUR CODE. ############\n all_returns.append(returns)\n returns = np.concatenate(all_returns)\n\n return returns\n\n def calculate_advantage(self, returns, observations):\n \"\"\"\n Calculate the advantage\n\n Args:\n returns: all discounted future returns for each step\n observations: observations\n Returns:\n adv: Advantage\n\n Calculate the advantages, using baseline adjustment if necessary,\n and normalizing the advantages if necessary.\n If neither of these options are True, just return returns.\n\n TODO:\n If config.use_baseline = False and config.normalize_advantage = False,\n then the \"advantage\" is just going to be the returns (and not actually\n an advantage).\n\n if config.use_baseline, then we need to evaluate the baseline and subtract\n it from the returns to get the advantage.\n HINT: evaluate the self.baseline with self.sess.run(...)\n\n if config.normalize_advantage:\n after doing the above, normalize the advantages so that they have a mean of 0\n and standard deviation of 1.\n \"\"\"\n adv = returns\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n # modified from https://github.com/arowdy98/Stanford-CS234/blob/master/assignment3/starter_code/pg.py\n if self.config.use_baseline:\n adv -= self.sess.run(self.baseline, feed_dict={self.observation_placeholder : observations}).squeeze()# TODO\n if self.config.normalize_advantage:\n adv = (adv - adv.mean()) / (adv.std() + 1e-12) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n return adv\n\n def update_baseline(self, returns, observations):\n \"\"\"\n Update the baseline from given returns and observation.\n\n Args:\n returns: Returns from get_returns\n observations: observations\n TODO:\n apply the baseline update op with the observations and the returns.\n HINT: Run self.update_baseline_op with self.sess.run(...)\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 1-5 lines. ############\n self.sess.run(self.update_baseline_op, feed_dict={self.baseline_target_placeholder:returns,\n self.observation_placeholder:observations}) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def train(self):\n \"\"\"\n Performs training\n\n You do not have to change or use anything here, but take a look\n to see how all the code you've written fits together!\n \"\"\"\n last_eval = 0\n last_record = 0\n scores_eval = []\n\n self.init_averages()\n scores_eval = [] # list of scores computed at iteration time\n\n for t in range(self.config.num_batches):\n\n # collect a minibatch of samples\n paths, total_rewards = self.sample_path(self.env)\n scores_eval = scores_eval + total_rewards\n observations = np.concatenate([path[\"observation\"] for path in paths])\n actions = np.concatenate([path[\"action\"] for path in paths])\n rewards = np.concatenate([path[\"reward\"] for path in paths])\n # compute Q-val estimates (discounted future returns) for each time step\n returns = self.get_returns(paths)\n advantages = self.calculate_advantage(returns, observations)\n\n # run training operations\n if self.config.use_baseline:\n self.update_baseline(returns, observations)\n self.sess.run(self.train_op, feed_dict={\n self.observation_placeholder : observations,\n self.action_placeholder : actions,\n self.advantage_placeholder : advantages})\n\n # tf stuff\n if (t % self.config.summary_freq == 0):\n self.update_averages(total_rewards, scores_eval)\n self.record_summary(t)\n\n # compute reward statistics for this batch and log\n avg_reward = np.mean(total_rewards)\n sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards))\n msg = \"Average reward: {:04.2f} +/- {:04.2f}\".format(avg_reward, sigma_reward)\n self.logger.info(msg)\n\n if self.config.record and (t % self.config.record_freq == 0):\n self.logger.info(\"Recording...\")\n # last_record = 0\n self.record()\n\n self.logger.info(\"- Training done.\")\n export_plot(scores_eval, \"Score\", config.env_name, self.config.plot_output)\n\n def evaluate(self, env=None, num_episodes=1):\n \"\"\"\n Evaluates the return for num_episodes episodes.\n Not used right now, all evaluation statistics are computed during training\n episodes.\n \"\"\"\n if env==None: env = self.env\n paths, rewards = self.sample_path(env, num_episodes)\n avg_reward = np.mean(rewards)\n sigma_reward = np.sqrt(np.var(rewards) / len(rewards))\n msg = \"Average reward: {:04.2f} +/- {:04.2f}\".format(avg_reward, sigma_reward)\n self.logger.info(msg)\n return avg_reward\n\n def record(self):\n \"\"\"\n Recreate an env and record a video for one episode\n \"\"\"\n env = gym.make(self.config.env_name)\n env = gym.wrappers.Monitor(env, self.config.record_path, video_callable=lambda x: True, resume=True)\n self.evaluate(env, 1)\n\n def run(self):\n \"\"\"\n Apply procedures of training for a PG.\n \"\"\"\n # initialize\n self.initialize()\n # record one game at the beginning\n if self.config.record:\n self.record()\n # model\n self.train()\n # record one game at the end\n if self.config.record:\n self.record()\n\nif __name__ == '__main__':\n # args = parser.parse_args()\n # config = get_config(args.env_name, args.use_baseline)\n config = get_config('cartpole', True)\n env = gym.make(config.env_name)\n # train model\n model = PG(env, config)\n model.run()\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.contrib.distributions.MultivariateNormalDiag",
"numpy.var",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.summary.FileWriter",
"tensorflow.random_normal",
"tensorflow.multinomial",
"tensorflow.global_variables_initializer",
"tensorflow.keras.layers.Dense",
"numpy.mean",
"numpy.max",
"tensorflow.Session",
"numpy.zeros_like",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"numpy.array",
"numpy.concatenate",
"tensorflow.get_variable"
]
] |
abhijeetdhupia/WCE-Classification | [
"043805fe54d14ef3d24735375df1f387c62e7896"
] | [
"utils.py"
] | [
"import torch \n\ndef calculate_topk_accuracy(y_pred, y, k = 4):\n with torch.no_grad():\n batch_size = y.shape[0]\n _, top_pred = y_pred.topk(k, 1)\n top_pred = top_pred.t()\n correct = top_pred.eq(y.view(1, -1).expand_as(top_pred))\n correct_1 = correct[:1].reshape(-1).float().sum(0, keepdim = True)\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim = True)\n acc_1 = correct_1 / batch_size\n acc_k = correct_k / batch_size\n return acc_1, acc_k\n\n\ndef evaluate(model, iterator, criterion, device):\n \n epoch_loss = 0\n epoch_acc_1 = 0\n epoch_acc_5 = 0\n \n model.eval()\n \n with torch.no_grad():\n \n for (x, y) in iterator:\n\n x = x.to(device)\n y = y.to(device)\n\n y_pred, _ = model(x)\n\n loss = criterion(y_pred, y)\n\n acc_1, acc_5 = calculate_topk_accuracy(y_pred, y)\n\n epoch_loss += loss.item()\n epoch_acc_1 += acc_1.item()\n epoch_acc_5 += acc_5.item()\n \n epoch_loss /= len(iterator)\n epoch_acc_1 /= len(iterator)\n epoch_acc_5 /= len(iterator)\n \n return epoch_loss, epoch_acc_1, epoch_acc_5\n\n#Normalization \ndef normalize_image(image):\n image_min = image.min()\n image_max = image.max()\n image.clamp_(min = image_min, max = image_max)\n image.add_(-image_min).div_(image_max - image_min + 1e-5)\n return image \n"
] | [
[
"torch.no_grad"
]
] |
Anonymous633671/A-Comparison-on-Communication-and-Code-Dependency-Effects-on-Software-Code-Quality | [
"5a88f62513f9879178af3c5f763631b93e4f3054"
] | [
"src/main/git_log/buggy_commit.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 31 12:49:38 2018\n\n@author: suvod\n\"\"\"\n\nfrom main.git_log import git2repo\nfrom main.api import api_access\nimport pygit2\nimport re\nimport pandas as pd\nfrom datetime import datetime\nimport re, unicodedata\nfrom pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE\nimport os\nfrom main.utils.utils import utils\nimport platform\nimport threading\nfrom multiprocessing import Queue\nfrom threading import Thread\nimport numpy as np\nimport itertools\nimport pandas as pd\nimport itertools\nimport math\nfrom multiprocessing import Pool, cpu_count\nfrom os.path import dirname as up\n\n\nclass ThreadWithReturnValue(Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs={}, Verbose=None):\n Thread.__init__(self, group, target, name, args, kwargs)\n self._return = None\n def run(self):\n #print(type(self._target))\n if self._target is not None:\n self._return = self._target(*self._args,\n **self._kwargs)\n def join(self, *args):\n Thread.join(self, *args)\n return self._return\n\nclass buggy_commit_maker(object):\n \n \n def __init__(self,project_name,repo_url,repo_name):\n self.project_name = project_name\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n self.data_path = os.getcwd() + '/data/'\n else:\n self.data_path = os.getcwd() + '\\\\data\\\\'\n self.commit = self.read_files('commit')\n self.committed_files = self.read_files('committed_file')\n self.initilize_repo(repo_url,repo_name)\n self.cores = cpu_count()\n \n def initilize_repo(self,repo_url,repo_name):\n self.git_repo = git2repo.git2repo(repo_url,repo_name)\n self.repo = self.git_repo.clone_repo()\n \n \n def read_files(self,file_data):\n file_path = self.data_path + self.project_name + '_' + file_data + '.pkl'\n return pd.read_pickle(file_path)\n \n \n def isBuggyCommit(self, commit):\n res=re.search(r'\\b{bug|fix|issue|error|correct|proper|deprecat|broke|optimize|patch|solve|slow|obsolete|vulnerab|debug|perf|memory|minor|wart|better|complex|break|investigat|compile|defect|inconsist|crash|problem|resol|#}\\b',utils().stemming(commit),re.IGNORECASE)\n if res is not None:\n return True\n \n \n def buggy_commits(self,commits):\n for i in range(commits.shape[0]):\n result = self.isBuggyCommit(commits.loc[i,'message'])\n if result:\n commits.loc[i,'isBuggy'] = 1\n else:\n commits.loc[i,'isBuggy'] = 0\n return commits\n\n def get_buggy_commits(self):\n threads = []\n self.commit['isBuggy'] = pd.Series([0]*self.commit.shape[0])\n column_names = self.commit.columns.tolist()\n bug_fixed_commit = pd.DataFrame([], columns = column_names)\n commits_np = np.array_split(self.commit, self.cores)\n for i in range(self.cores):\n commits = pd.DataFrame(commits_np[i], columns = column_names)\n commits.reset_index(inplace = True, drop = True)\n t = ThreadWithReturnValue(target = self.buggy_commits, args = [commits])\n threads.append(t)\n for th in threads:\n th.start()\n for th in threads:\n response = th.join()\n bug_fixed_commit = pd.concat([bug_fixed_commit,response])\n bug_fixed_commit.reset_index(inplace = True, drop = True)\n self.commit = bug_fixed_commit\n \n# def get_buggy_commits(self):\n# self.commit['isBuggy'] = pd.Series([0]*self.commit.shape[0])\n# for i in range(self.commit.shape[0]):\n# result = self.isBuggyCommit(self.commit.loc[i,'message'])\n# if result:\n# self.commit.loc[i,'isBuggy'] = 1\n# else:\n# self.commit.loc[i,'isBuggy'] = 0\n \n\n def buggy_committer(self,buggy_diffs):\n bug_creator = []\n for value in buggy_diffs:\n _diff_files = buggy_diffs[value]['files']\n self.repo.head.set_target(buggy_diffs[value]['object'].parent_ids[0])\n for _value in _diff_files:\n try:\n file_path = _diff_files[_value]['file_path']\n blame = self.git_repo.get_blame(file_path)\n for _line in _diff_files[_value]['old_lines']:\n if _line != -1:\n ref = blame.for_line(_line)\n #print(_value,ref.final_committer.name)\n bug_creator.append([ref.final_committer.name, ref.orig_commit_id, 1])\n except:\n continue\n bug_creator_df = pd.DataFrame(bug_creator, columns = ['committer','commit','ob'])\n bug_creator_df = bug_creator_df.drop_duplicates()\n return bug_creator_df\n \n \n def get_buggy_committer(self):\n threads = []\n df = pd.DataFrame([])\n # To-Do this is to saperate the data into small chunks from get_diff that is the dict\n buggy_commit_df = self.commit[self.commit['isBuggy'] == 1]\n buggy_diffs = self.git_repo.get_diffs(buggy_commit_df['commit_number'].values.tolist())\n keys = list(buggy_diffs.keys())\n len_bd = len(buggy_diffs)\n sub_list_len = len_bd/self.cores\n for i in range(self.cores):\n sub_keys = keys[int(i*sub_list_len):int((i+1)*sub_list_len)]\n subdict = {x: buggy_diffs[x] for x in sub_keys if x in buggy_diffs}\n t = ThreadWithReturnValue(target = self.buggy_committer, args = [subdict])\n threads.append(t)\n for i in range(0,len(threads),self.cores):\n _threads = threads[i:i+self.cores]\n for th in _threads:\n th.start()\n for th in _threads:\n response = th.join()\n df = pd.concat([df,response])\n df.reset_index(inplace = True, drop = True)\n df.drop_duplicates(inplace = True)\n df = df.groupby( ['committer']).count()\n defect_count = []\n for key,value in df.iterrows():\n user = key\n count = value.values.tolist()[0]\n defect_count.append([user,count])\n return defect_count\n \n# def get_buggy_committer(self):\n# buggy_commit_df = self.commit[self.commit['isBuggy'] == 1]\n# buggy_diffs = self.git_repo.get_diffs(buggy_commit_df['commit_number'].values.tolist())\n# bug_creator = []\n# for value in buggy_diffs:\n# _diff_files = buggy_diffs[value]['files']\n# self.repo.head.set_target(buggy_diffs[value]['object'].parent_ids[0])\n# for _value in _diff_files:\n# try:\n# file_path = _diff_files[_value]['file_path']\n# blame = self.git_repo.get_blame(file_path)\n# for _line in _diff_files[_value]['old_lines']:\n# if _line != -1:\n# ref = blame.for_line(_line)\n# print(_value,ref.final_committer.name)\n# bug_creator.append([ref.final_committer.name, ref.orig_commit_id, 1])\n# except:\n# continue\n# bug_creator_df = pd.DataFrame(bug_creator, columns = ['committer','commit','ob'])\n# bug_creator_df = bug_creator_df.drop_duplicates()\n# df = bug_creator_df.groupby( ['committer']).count()\n# defect_count = []\n# for key,value in df.iterrows():\n# user = key\n# count = value.values.tolist()[0]\n# defect_count.append([user,count])\n# return defect_count\n \n def get_commit_count(self):\n committer_count = []\n for i in range(self.commit.shape[0]):\n commit_id = self.commit.loc[i,'commit_number']\n user = self.repo.get(commit_id).committer\n committer_count.append([user.name, commit_id, 1])\n committer_count_df = pd.DataFrame(committer_count, columns = ['committer', 'commit_id', 'ob'])\n committer_count_df = committer_count_df.drop_duplicates()\n df = committer_count_df.groupby( ['committer']).count()\n commit_count = []\n for key,value in df.iterrows():\n user = key\n count = value.values.tolist()[0]\n commit_count.append([user,count])\n return commit_count\n \n\n "
] | [
[
"pandas.read_pickle",
"pandas.Series",
"pandas.DataFrame",
"numpy.array_split",
"pandas.concat"
]
] |
pjain310/scRNAseq_Cell_Classification | [
"46d73ff257eef9974e1e425a52b30b61e96e3ca4"
] | [
"Scripts/run_parallel_VC.py"
] | [
"import os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time as tm\r\nfrom joblib import Parallel, delayed\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.ensemble import VotingClassifier\r\nfrom imblearn.combine import SMOTEENN\r\nimport rpy2.robjects as robjects\r\n\r\ndef CV_Classifier(cv_fold):\r\n test_ind_i = np.array(test_ind[cv_fold], dtype='int') - 1\r\n train_ind_i = np.array(train_ind[cv_fold], dtype='int') - 1\r\n\r\n train = data.iloc[train_ind_i]\r\n test = data.iloc[test_ind_i]\r\n y_train = labels.iloc[train_ind_i]\r\n y_test = labels.iloc[test_ind_i]\r\n\r\n #Feature selection\r\n if (NumGenes > 0):\r\n feat_to_use = features.iloc[0:NumGenes, cv_fold]\r\n train = train.iloc[:, feat_to_use]\r\n test = test.iloc[:, feat_to_use]\r\n\r\n print(\"Runnning SMOTE\", cv_fold)\r\n\r\n #Imbalance removal using Smote-ENN \r\n #smt = SMOTEENN(ratio='auto', random_state=42, n_jobs = -1)\r\n #train, y_train = smt.fit_resample(train_unsampled, y_train_unsampled.values.ravel())\r\n\r\n print(\"Ran SMOTE\", cv_fold)\r\n\r\n start = tm.time()\r\n print(\"Fitting\")\r\n Classifier.fit(train, y_train.values.ravel())\r\n tr_time.append(tm.time()-start)\r\n\r\n start = tm.time()\r\n print(\"Testing\")\r\n predicted = Classifier.predict(test)\r\n ts_time.append(tm.time()-start)\r\n\r\n truelab.extend(y_test.values)\r\n pred.extend(predicted)\r\n\r\n\r\ndef run_SVM(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath = \"\", NumGenes = 0):\r\n '''\r\n run baseline classifier: SVM\r\n Wrapper script to run an SVM classifier with a linear kernel on a benchmark dataset with 5-fold cross validation,\r\n outputs lists of true and predicted cell labels as csv files, as well as computation time.\r\n\r\n Parameters\r\n ----------\r\n DataPath : Data file path (.csv), cells-genes matrix with cell unique barcodes\r\n as row names and gene names as column names.\r\n LabelsPath : Cell population annotations file path (.csv).\r\n CV_RDataPath : Cross validation RData file path (.RData), obtained from Cross_Validation.R function.\r\n OutputDir : Output directory defining the path of the exported file.\r\n GeneOrderPath : Gene order file path (.csv) obtained from feature selection,\r\n defining the genes order for each cross validation fold, default is NULL.\r\n NumGenes : Number of genes used in case of feature selection (integer), default is 0.\r\n '''\r\n\r\n # read the Rdata file\r\n robjects.r['load'](CV_RDataPath)\r\n\r\n nfolds = np.array(robjects.r['n_folds'], dtype = 'int')\r\n tokeep = np.array(robjects.r['Cells_to_Keep'], dtype = 'bool')\r\n col = np.array(robjects.r['col_Index'], dtype = 'int')\r\n col = col - 1\r\n test_ind = np.array(robjects.r['Test_Idx'])\r\n train_ind = np.array(robjects.r['Train_Idx'])\r\n\r\n # read the data\r\n print(\"readind data\")\r\n data = pd.read_csv(DataPath,index_col=0,sep=',')\r\n labels = pd.read_csv(LabelsPath, header=0,index_col=None, sep=',', usecols = col)\r\n print(\"done\")\r\n\r\n\r\n labels = labels.iloc[tokeep]\r\n data = data.iloc[tokeep]\r\n\r\n # read the feature file\r\n if (NumGenes > 0):\r\n features = pd.read_csv(GeneOrderPath,header=0,index_col=None, sep=',')\r\n\r\n # folder with results\r\n os.chdir(OutputDir)\r\n\r\n # normalize data\r\n data = np.log1p(data)\r\n\r\n svm = AdaBoostClassifier(base_estimator=LinearSVC(),n_estimators=50, algorithm='SAMME')\r\n RF = RandomForestClassifier(n_estimators=50)\r\n LDA = LinearDiscriminantAnalysis()\r\n\r\n Classifier = VotingClassifier(estimators = [('Support Vector',svm),('Random Forest',RF),('Linear Discriminant',LDA)],n_jobs = -1,weights=[0.45,0.25,0.3])\r\n\r\n\r\n tr_time=[]\r\n ts_time=[]\r\n truelab = []\r\n pred = []\r\n\r\n Parallel(n_jobs=4)(delayed(CV_Classifier(i) for i in range(np.squeeze(nfolds))))\r\n\r\n truelab = pd.DataFrame(truelab)\r\n pred = pd.DataFrame(pred)\r\n\r\n tr_time = pd.DataFrame(tr_time)\r\n ts_time = pd.DataFrame(ts_time)\r\n\r\n if (NumGenes == 0):\r\n truelab.to_csv(\"VC_parallel_True_Labels.csv\", index = False)\r\n pred.to_csv(\"VC_parallel_Pred_Labels.csv\", index = False)\r\n tr_time.to_csv(\"VC_parallel_Training_Time.csv\", index = False)\r\n ts_time.to_csv(\"VC_parallel_Testing_Time.csv\", index = False)\r\n else:\r\n truelab.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_True_Labels.csv\", index = False)\r\n pred.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_Pred_Labels.csv\", index = False)\r\n tr_time.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_Training_Time.csv\", index = False)\r\n ts_time.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_Testing_Time.csv\", index = False)\r\n\r\n\r\nLabelsPath = \"~/Desktop/scRNA_Cell_Typing/scRNAseq_Benchmark_datasets/Zheng/Labels.csv\"\r\nDataPath = \"~/Desktop/scRNA_Cell_Typing/scRNAseq_Benchmark_datasets/Zheng/Filtered_68K_PBMC_data.csv\"\r\nCV_RDataPath = \"~/Desktop/scRNA_Cell_Typing/CV_folds.RData\"\r\nGeneOrderPath = \"~/Desktop/scRNA_Cell_Typing/results/og_SVM_results/rank_genes_dropouts.csv\"\r\nOutputDir = \"results/top5000_VotingRegressor_results\"\r\n\r\nrun_SVM(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath, 5000)\r\n"
] | [
[
"numpy.log1p",
"numpy.squeeze",
"pandas.read_csv",
"sklearn.svm.LinearSVC",
"pandas.DataFrame",
"sklearn.ensemble.VotingClassifier",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
]
] |
jdfekete/progressivis | [
"3bc79ce229cd628ef0aa4663136a674743697b47"
] | [
"tests/test_03_csv_crash.py"
] | [
"from . import ProgressiveTest, skip, skipIf\nfrom progressivis.io import CSVLoader\nfrom progressivis.table.constant import Constant\nfrom progressivis.table.table import Table\nfrom progressivis.datasets import (get_dataset, get_dataset_bz2,\n get_dataset_gz,\n get_dataset_lzma, DATA_DIR)\nfrom progressivis.core.utils import RandomBytesIO\nfrom progressivis.stats.counter import Counter\nfrom progressivis.storage import IS_PERSISTENT\nfrom progressivis.storage import cleanup_temp_dir, init_temp_dir_if\nfrom progressivis.core import aio\n#import logging, sys\nfrom multiprocessing import Process\nimport time, os\nimport requests\nfrom requests.packages.urllib3.exceptions import ReadTimeoutError\nfrom requests.exceptions import ConnectionError\n\n\nfrom RangeHTTPServer import RangeRequestHandler\nimport shutil\nimport numpy as np\nimport pandas as pd\n\nimport http.server as http_srv\n\nBZ2 = 'csv.bz2'\nGZ = 'csv.gz'\nXZ = 'csv.xz'\n#TRAVIS = os.getenv(\"TRAVIS\")\nPORT = 8000\nHOST = 'localhost'\nSLEEP = 10\n\n#IS_PERSISTENT = False\n\ndef _close(module):\n try:\n module.parser._input._stream.close()\n except:\n pass\n\nasync def sleep_then_stop(s, t):\n await aio.sleep(t)\n await s.stop()\n #trace_after_stop(s)\n\ndef trace_after_stop(s):\n t = s.modules()['csv_loader_1']._table\n print(\"crashed when len(_table) ==\", len(t), \"last_id:\", t._last_id)\n i = t._last_id\n print(\"border row i:\", t.loc[i-1,:].to_dict())\n print(\"border row i+1:\", t.loc[i,:].to_dict())\n\ndef make_url(name, ext='csv'):\n return 'http://{host}:{port}/{name}.{ext}'.format(host=HOST,\n port=PORT,\n name=name, ext=ext)\n\ndef run_simple_server():\n _ = get_dataset('smallfile')\n _ = get_dataset('bigfile')\n _ = get_dataset_bz2('smallfile')\n _ = get_dataset_bz2('bigfile')\n _ = get_dataset_gz('smallfile')\n _ = get_dataset_gz('bigfile')\n #if six.PY3:\n # _ = get_dataset_lzma('smallfile')\n # _ = get_dataset_lzma('bigfile')\n os.chdir(DATA_DIR)\n import RangeHTTPServer.__main__\n\nBIGFILE_DF = pd.read_csv(get_dataset('bigfile'), header=None, usecols=[0])\n\nclass _HttpSrv(object):\n def __init__(self):\n _HttpSrv.start(self)\n\n def stop(self):\n if self._http_proc is not None:\n try:\n self._http_proc.terminate()\n time.sleep(SLEEP)\n except:\n pass\n\n def start(self):\n p = Process(target=run_simple_server, args=())\n p.start()\n self._http_proc = p\n time.sleep(SLEEP)\n\n def restart(self):\n self.stop()\n self.start()\n\n#IS_PERSISTENT = False\nclass ProgressiveLoadCSVCrashRoot(ProgressiveTest):\n _http_srv = None\n def setUp(self):\n super().setUp()\n #self._http_srv = None\n cleanup_temp_dir()\n init_temp_dir_if()\n #if self._http_srv is None:\n # self._http_srv = _HttpSrv()\n\n def tearDown(self):\n super().tearDown()\n #TestProgressiveLoadCSVCrash.cleanup()\n if self._http_srv is not None:\n try:\n self._http_srv.stop()\n except:\n pass\n cleanup_temp_dir()\n\n def get_tag(self):\n return id(self._http_srv)\n\n#IS_PERSISTENT = False\nclass TestProgressiveLoadCSVCrash1(ProgressiveLoadCSVCrashRoot):\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_01_read_http_csv_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s=self.scheduler()\n url = make_url('bigfile')\n module=CSVLoader(url, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 2)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module=CSVLoader(url, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 1000000)\n arr1 = module.result.loc[:, 0].to_array().reshape(-1)\n arr2 = BIGFILE_DF.loc[:, 0].values\n #import pdb;pdb.set_trace()\n self.assertTrue(np.allclose(arr1, arr2))\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_01_read_http_csv_with_crash_and_counter(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s=self.scheduler()\n url = make_url('bigfile')\n module=CSVLoader(url, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 2)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n csv=CSVLoader(url, recovery=True, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n counter = Counter(scheduler=s)\n counter.input[0] = csv.output.result\n self.assertTrue(csv.result is None)\n aio.run(s.start())\n self.assertEqual(len(csv.result), 1000000)\n self.assertEqual(counter.result['counter'].loc[0], 1000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_02_read_http_csv_bz2_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s=self.scheduler()\n url = make_url('bigfile', ext=BZ2)\n module=CSVLoader(url, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 5)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module=CSVLoader(url, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 1000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_03_read_http_multi_csv_no_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n s=self.scheduler()\n module=CSVLoader([make_url('smallfile'),make_url('smallfile')], index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_04_read_http_multi_csv_bz2_no_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n s=self.scheduler()\n module=CSVLoader([make_url('smallfile', ext=BZ2)]*2, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\nclass TestProgressiveLoadCSVCrash2(ProgressiveLoadCSVCrashRoot):\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_05_read_http_multi_csv_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s = self.scheduler()\n url_list = [make_url('bigfile'),make_url('bigfile')]\n module = CSVLoader(url_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 3)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module = CSVLoader(url_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_06_read_http_multi_csv_bz2_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s = self.scheduler()\n url_list = [make_url('bigfile', ext=BZ2)]*2\n module = CSVLoader(url_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 3)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module = CSVLoader(url_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_07_read_multi_csv_file_no_crash(self):\n s = self.scheduler()\n module = CSVLoader([get_dataset('smallfile'), get_dataset('smallfile')], index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\nclass TestProgressiveLoadCSVCrash3(ProgressiveLoadCSVCrashRoot):\n def _tst_08_read_multi_csv_file_compress_no_crash(self, files):\n s=self.scheduler()\n module=CSVLoader(files, index_col=False, header=None, scheduler=s)#, save_context=False)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_08_read_multi_csv_file_bz2_no_crash(self):\n files = [get_dataset_bz2('smallfile')]*2\n return self._tst_08_read_multi_csv_file_compress_no_crash(files)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_08_read_multi_csv_file_gz_no_crash(self):\n files = [get_dataset_gz('smallfile')]*2\n return self._tst_08_read_multi_csv_file_compress_no_crash(files)\n\n @skip(\"Too slow ...\")\n def test_08_read_multi_csv_file_lzma_no_crash(self):\n files = [get_dataset_lzma('smallfile')]*2\n return self._tst_08_read_multi_csv_file_compress_no_crash(files)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_09_read_multi_csv_file_with_crash(self):\n s=self.scheduler()\n tag = 't9'\n file_list = [get_dataset('bigfile'), get_dataset('bigfile')]\n module=CSVLoader(file_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 3)\n aio.run_gather(s.start(), sts)\n _close(module)\n s=self.scheduler(clean=True)\n module=CSVLoader(file_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n def _tst_10_read_multi_csv_file_compress_with_crash(self, file_list, tag):\n s=self.scheduler()\n module=CSVLoader(file_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 4)\n aio.run_gather(s.start(), sts)\n _close(module)\n s=self.scheduler(clean=True)\n module=CSVLoader(file_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_10_read_multi_csv_file_bz2_with_crash(self):\n file_list = [get_dataset_bz2('bigfile')]*2\n self._tst_10_read_multi_csv_file_compress_with_crash(file_list, 't10_1')\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_10_read_multi_csv_file_gzip_with_crash(self):\n file_list = [get_dataset_gz('bigfile')]*2\n self._tst_10_read_multi_csv_file_compress_with_crash(file_list, 't10_2')\n\n @skip(\"Too slow ...\")\n def test_10_read_multi_csv_file_lzma_with_crash(self):\n file_list = [get_dataset_lzma('bigfile')]*2\n self._tst_10_read_multi_csv_file_compress_with_crash(file_list, 't10_3')\n\nif __name__ == '__main__':\n ProgressiveTest.main()\n"
] | [
[
"numpy.allclose"
]
] |
EstelleHuang666/OpenNMT-py | [
"f7a239086d0db156535f3f5db9ed7060291485e8"
] | [
"onmt/inputters/inputter.py"
] | [
"# -*- coding: utf-8 -*-\nimport glob\nimport os\nimport codecs\nimport math\n\nfrom collections import Counter, defaultdict\nfrom itertools import chain, cycle\n\nimport torch\nimport torchtext.data\nfrom torchtext.data import Field, RawField\nfrom torchtext.vocab import Vocab\nfrom torchtext.data.utils import RandomShuffler\n\nfrom onmt.inputters.text_dataset import text_fields, TextMultiField\nfrom onmt.inputters.image_dataset import image_fields\nfrom onmt.inputters.audio_dataset import audio_fields\nfrom onmt.utils.logging import logger\n# backwards compatibility\nfrom onmt.inputters.text_dataset import _feature_tokenize # noqa: F401\nfrom onmt.inputters.image_dataset import ( # noqa: F401\n batch_img as make_img)\n\nimport gc\n\n\n# monkey-patch to make torchtext Vocab's pickleable\ndef _getstate(self):\n return dict(self.__dict__, stoi=dict(self.stoi))\n\n\ndef _setstate(self, state):\n self.__dict__.update(state)\n self.stoi = defaultdict(lambda: 0, self.stoi)\n\n\nVocab.__getstate__ = _getstate\nVocab.__setstate__ = _setstate\n\n\ndef make_src(data, vocab):\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n\ndef make_tgt(data, vocab):\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n\ndef get_fields(\n src_data_type,\n n_src_feats,\n n_tgt_feats,\n pad='<blank>',\n bos='<s>',\n eos='</s>',\n dynamic_dict=False,\n src_truncate=None,\n tgt_truncate=None\n):\n \"\"\"\n Args:\n src_data_type: type of the source input. Options are [text|img|audio].\n n_src_feats (int): the number of source features (not counting tokens)\n to create a :class:`torchtext.data.Field` for. (If\n ``src_data_type==\"text\"``, these fields are stored together\n as a ``TextMultiField``).\n n_tgt_feats (int): See above.\n pad (str): Special pad symbol. Used on src and tgt side.\n bos (str): Special beginning of sequence symbol. Only relevant\n for tgt.\n eos (str): Special end of sequence symbol. Only relevant\n for tgt.\n dynamic_dict (bool): Whether or not to include source map and\n alignment fields.\n src_truncate: Cut off src sequences beyond this (passed to\n ``src_data_type``'s data reader - see there for more details).\n tgt_truncate: Cut off tgt sequences beyond this (passed to\n :class:`TextDataReader` - see there for more details).\n\n Returns:\n A dict mapping names to fields. These names need to match\n the dataset example attributes.\n \"\"\"\n\n assert src_data_type in ['text', 'img', 'audio'], \\\n \"Data type not implemented\"\n assert not dynamic_dict or src_data_type == 'text', \\\n 'it is not possible to use dynamic_dict with non-text input'\n fields = {}\n\n fields_getters = {\"text\": text_fields,\n \"img\": image_fields,\n \"audio\": audio_fields}\n\n src_field_kwargs = {\"n_feats\": n_src_feats,\n \"include_lengths\": True,\n \"pad\": pad, \"bos\": None, \"eos\": None,\n \"truncate\": src_truncate,\n \"base_name\": \"src\"}\n fields[\"src\"] = fields_getters[src_data_type](**src_field_kwargs)\n\n tgt_field_kwargs = {\"n_feats\": n_tgt_feats,\n \"include_lengths\": False,\n \"pad\": pad, \"bos\": bos, \"eos\": eos,\n \"truncate\": tgt_truncate,\n \"base_name\": \"tgt\"}\n fields[\"tgt\"] = fields_getters[\"text\"](**tgt_field_kwargs)\n\n indices = Field(use_vocab=False, dtype=torch.long, sequential=False)\n fields[\"indices\"] = indices\n\n if dynamic_dict:\n src_map = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n fields[\"src_map\"] = src_map\n\n src_ex_vocab = RawField()\n fields[\"src_ex_vocab\"] = src_ex_vocab\n\n align = Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n fields[\"alignment\"] = align\n\n return fields\n\n\ndef load_old_vocab(vocab, data_type=\"text\", dynamic_dict=False):\n \"\"\"Update a legacy vocab/field format.\n\n Args:\n vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the\n format formerly saved in *.vocab.pt files. Or, text data\n not using a :class:`TextMultiField`.\n data_type (str): text, img, or audio\n dynamic_dict (bool): Used for copy attention.\n\n Returns:\n a dictionary whose keys are the field names and whose values Fields.\n \"\"\"\n\n if _old_style_vocab(vocab):\n # List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]\n # -> dict[str, Field]\n vocab = dict(vocab)\n n_src_features = sum('src_feat_' in k for k in vocab)\n n_tgt_features = sum('tgt_feat_' in k for k in vocab)\n fields = get_fields(\n data_type, n_src_features, n_tgt_features,\n dynamic_dict=dynamic_dict)\n for n, f in fields.items():\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(n, f)]\n for sub_n, sub_f in f_iter:\n if sub_n in vocab:\n sub_f.vocab = vocab[sub_n]\n return fields\n\n if _old_style_field_list(vocab): # upgrade to multifield\n # Dict[str, List[Tuple[str, Field]]]\n # doesn't change structure - don't return early.\n fields = vocab\n for base_name, vals in fields.items():\n if ((base_name == 'src' and data_type == 'text') or\n base_name == 'tgt'):\n assert not isinstance(vals[0][1], TextMultiField)\n fields[base_name] = [(base_name, TextMultiField(\n vals[0][0], vals[0][1], vals[1:]))]\n\n if _old_style_nesting(vocab):\n # Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]\n # -> dict[str, Field]\n fields = dict(list(chain.from_iterable(vocab.values())))\n\n return fields\n\n\ndef _old_style_vocab(vocab):\n \"\"\"Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).\n\n Args:\n vocab: some object loaded from a *.vocab.pt file\n\n Returns:\n Whether ``vocab`` is a list of pairs where the second object\n is a :class:`torchtext.vocab.Vocab` object.\n\n This exists because previously only the vocab objects from the fields\n were saved directly, not the fields themselves, and the fields needed to\n be reconstructed at training and translation time.\n \"\"\"\n\n return isinstance(vocab, list) and \\\n any(isinstance(v[1], Vocab) for v in vocab)\n\n\ndef _old_style_nesting(vocab):\n \"\"\"Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``).\"\"\"\n return isinstance(vocab, dict) and \\\n any(isinstance(v, list) for v in vocab.values())\n\n\ndef _old_style_field_list(vocab):\n \"\"\"Detect old-style text fields.\n\n Not old style vocab, old nesting, and text-type fields not using\n ``TextMultiField``.\n\n Args:\n vocab: some object loaded from a *.vocab.pt file\n\n Returns:\n Whether ``vocab`` is not an :func:`_old_style_vocab` and not\n a :class:`TextMultiField` (using an old-style text representation).\n \"\"\"\n\n # if tgt isn't using TextMultiField, then no text field is.\n return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \\\n (not isinstance(vocab['tgt'][0][1], TextMultiField))\n\n\ndef old_style_vocab(vocab):\n \"\"\"The vocab/fields need updated.\"\"\"\n return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \\\n _old_style_nesting(vocab)\n\n\ndef filter_example(ex, use_src_len=True, use_tgt_len=True,\n min_src_len=1, max_src_len=float('inf'),\n min_tgt_len=1, max_tgt_len=float('inf')):\n \"\"\"Return whether an example is an acceptable length.\n\n If used with a dataset as ``filter_pred``, use :func:`partial()`\n for all keyword arguments.\n\n Args:\n ex (torchtext.data.Example): An object with a ``src`` and ``tgt``\n property.\n use_src_len (bool): Filter based on the length of ``ex.src``.\n use_tgt_len (bool): Similar to above.\n min_src_len (int): A non-negative minimally acceptable length\n (examples of exactly this length will be included).\n min_tgt_len (int): Similar to above.\n max_src_len (int or float): A non-negative (possibly infinite)\n maximally acceptable length (examples of exactly this length\n will be included).\n max_tgt_len (int or float): Similar to above.\n \"\"\"\n\n src_len = len(ex.src[0])\n tgt_len = len(ex.tgt[0])\n return (not use_src_len or min_src_len <= src_len <= max_src_len) and \\\n (not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)\n\n\ndef _pad_vocab_to_multiple(vocab, multiple):\n vocab_size = len(vocab)\n if vocab_size % multiple == 0:\n return\n target_size = int(math.ceil(vocab_size / multiple)) * multiple\n padding_tokens = [\n \"averyunlikelytoken%d\" % i for i in range(target_size - vocab_size)]\n vocab.extend(Vocab(Counter(), specials=padding_tokens))\n return vocab\n\n\ndef _build_field_vocab(field, counter, size_multiple=1, **kwargs):\n # this is basically copy-pasted from torchtext.\n all_specials = [\n field.unk_token, field.pad_token, field.init_token, field.eos_token\n ]\n specials = [tok for tok in all_specials if tok is not None]\n field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)\n if size_multiple > 1:\n _pad_vocab_to_multiple(field.vocab, size_multiple)\n\n\ndef _load_vocab(vocab_path, name, counters, min_freq):\n # counters changes in place\n vocab = _read_vocab_file(vocab_path, name)\n vocab_size = len(vocab)\n logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))\n for i, token in enumerate(vocab):\n # keep the order of tokens specified in the vocab file by\n # adding them to the counter with decreasing counting values\n counters[name][token] = vocab_size - i + min_freq\n return vocab, vocab_size\n\n\ndef _build_fv_from_multifield(multifield, counters, build_fv_args,\n size_multiple=1):\n for name, field in multifield:\n _build_field_vocab(\n field,\n counters[name],\n size_multiple=size_multiple,\n **build_fv_args[name])\n logger.info(\" * %s vocab size: %d.\" % (name, len(field.vocab)))\n\n\ndef _build_fields_vocab(fields, counters, data_type, share_vocab,\n vocab_size_multiple,\n src_vocab_size, src_words_min_frequency,\n tgt_vocab_size, tgt_words_min_frequency):\n build_fv_args = defaultdict(dict)\n build_fv_args[\"src\"] = dict(\n max_size=src_vocab_size, min_freq=src_words_min_frequency)\n build_fv_args[\"tgt\"] = dict(\n max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)\n tgt_multifield = fields[\"tgt\"]\n _build_fv_from_multifield(\n tgt_multifield,\n counters,\n build_fv_args,\n size_multiple=vocab_size_multiple if not share_vocab else 1)\n if data_type == 'text':\n src_multifield = fields[\"src\"]\n _build_fv_from_multifield(\n src_multifield,\n counters,\n build_fv_args,\n size_multiple=vocab_size_multiple if not share_vocab else 1)\n if share_vocab:\n # `tgt_vocab_size` is ignored when sharing vocabularies\n logger.info(\" * merging src and tgt vocab...\")\n src_field = src_multifield.base_field\n tgt_field = tgt_multifield.base_field\n _merge_field_vocabs(\n src_field, tgt_field, vocab_size=src_vocab_size,\n min_freq=src_words_min_frequency,\n vocab_size_multiple=vocab_size_multiple)\n logger.info(\" * merged vocab size: %d.\" % len(src_field.vocab))\n\n return fields\n\n\ndef build_vocab(train_dataset_files, fields, data_type, share_vocab,\n src_vocab_path, src_vocab_size, src_words_min_frequency,\n tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,\n vocab_size_multiple=1):\n \"\"\"Build the fields for all data sides.\n\n Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict[str, Field]): fields to build vocab for.\n data_type (str): A supported data type string.\n share_vocab (bool): share source and target vocabulary?\n src_vocab_path (str): Path to src vocabulary file.\n src_vocab_size (int): size of the source vocabulary.\n src_words_min_frequency (int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path (str): Path to tgt vocabulary file.\n tgt_vocab_size (int): size of the target vocabulary.\n tgt_words_min_frequency (int): the minimum frequency needed to\n include a target word in the vocabulary.\n vocab_size_multiple (int): ensure that the vocabulary size is a\n multiple of this value.\n\n Returns:\n Dict of Fields\n \"\"\"\n\n counters = defaultdict(Counter)\n\n if src_vocab_path:\n try:\n logger.info(\"Using existing vocabulary...\")\n vocab = torch.load(src_vocab_path)\n # return vocab to dump with standard name\n return vocab\n except torch.serialization.pickle.UnpicklingError:\n logger.info(\"Building vocab from text file...\")\n # empty train_dataset_files so that vocab is only loaded from\n # given paths in src_vocab_path, tgt_vocab_path\n train_dataset_files = []\n\n # Load vocabulary\n if src_vocab_path:\n src_vocab, src_vocab_size = _load_vocab(\n src_vocab_path, \"src\", counters,\n src_words_min_frequency)\n else:\n src_vocab = None\n\n if tgt_vocab_path:\n tgt_vocab, tgt_vocab_size = _load_vocab(\n tgt_vocab_path, \"tgt\", counters,\n tgt_words_min_frequency)\n else:\n tgt_vocab = None\n\n for i, path in enumerate(train_dataset_files):\n dataset = torch.load(path)\n logger.info(\" * reloading %s.\" % path)\n for ex in dataset.examples:\n for name, field in fields.items():\n try:\n f_iter = iter(field)\n except TypeError:\n f_iter = [(name, field)]\n all_data = [getattr(ex, name, None)]\n else:\n all_data = getattr(ex, name)\n for (sub_n, sub_f), fd in zip(\n f_iter, all_data):\n has_vocab = (sub_n == 'src' and src_vocab) or \\\n (sub_n == 'tgt' and tgt_vocab)\n if sub_f.sequential and not has_vocab:\n val = fd\n counters[sub_n].update(val)\n\n # Drop the none-using from memory but keep the last\n if i < len(train_dataset_files) - 1:\n dataset.examples = None\n gc.collect()\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n fields = _build_fields_vocab(\n fields, counters, data_type,\n share_vocab, vocab_size_multiple,\n src_vocab_size, src_words_min_frequency,\n tgt_vocab_size, tgt_words_min_frequency)\n\n return fields # is the return necessary?\n\n\ndef _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,\n vocab_size_multiple):\n # in the long run, shouldn't it be possible to do this by calling\n # build_vocab with both the src and tgt data?\n specials = [tgt_field.unk_token, tgt_field.pad_token,\n tgt_field.init_token, tgt_field.eos_token]\n merged = sum(\n [src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()\n )\n merged_vocab = Vocab(\n merged, specials=specials,\n max_size=vocab_size, min_freq=min_freq\n )\n if vocab_size_multiple > 1:\n _pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)\n src_field.vocab = merged_vocab\n tgt_field.vocab = merged_vocab\n assert len(src_field.vocab) == len(tgt_field.vocab)\n\n\ndef _read_vocab_file(vocab_path, tag):\n \"\"\"Loads a vocabulary from the given path.\n\n Args:\n vocab_path (str): Path to utf-8 text file containing vocabulary.\n Each token should be on a line by itself. Tokens must not\n contain whitespace (else only before the whitespace\n is considered).\n tag (str): Used for logging which vocab is being read.\n \"\"\"\n\n logger.info(\"Loading {} vocabulary from {}\".format(tag, vocab_path))\n\n if not os.path.exists(vocab_path):\n raise RuntimeError(\n \"{} vocabulary not found at {}\".format(tag, vocab_path))\n else:\n with codecs.open(vocab_path, 'r', 'utf-8') as f:\n return [line.strip().split()[0] for line in f if line.strip()]\n\n\ndef batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):\n \"\"\"Yield elements from data in chunks of batch_size, where each chunk size\n is a multiple of batch_size_multiple.\n\n This is an extended version of torchtext.data.batch.\n \"\"\"\n if batch_size_fn is None:\n def batch_size_fn(new, count, sofar):\n return count\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far >= batch_size:\n overflowed = 0\n if size_so_far > batch_size:\n overflowed += 1\n if batch_size_multiple > 1:\n overflowed += (\n (len(minibatch) - overflowed) % batch_size_multiple)\n if overflowed == 0:\n yield minibatch\n minibatch, size_so_far = [], 0\n else:\n yield minibatch[:-overflowed]\n minibatch = minibatch[-overflowed:]\n size_so_far = 0\n for i, ex in enumerate(minibatch):\n size_so_far = batch_size_fn(ex, i + 1, size_so_far)\n if minibatch:\n yield minibatch\n\n\ndef _pool(data, batch_size, batch_size_fn, batch_size_multiple,\n sort_key, random_shuffler, pool_factor):\n for p in torchtext.data.batch(\n data, batch_size * pool_factor,\n batch_size_fn=batch_size_fn):\n p_batch = list(batch_iter(\n sorted(p, key=sort_key),\n batch_size,\n batch_size_fn=batch_size_fn,\n batch_size_multiple=batch_size_multiple))\n for b in random_shuffler(p_batch):\n yield b\n\n\nclass OrderedIterator(torchtext.data.Iterator):\n\n def __init__(self,\n dataset,\n batch_size,\n pool_factor=1,\n batch_size_multiple=1,\n yield_raw_example=False,\n **kwargs):\n super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)\n self.batch_size_multiple = batch_size_multiple\n self.yield_raw_example = yield_raw_example\n self.dataset = dataset\n self.pool_factor = pool_factor\n\n def create_batches(self):\n if self.train:\n if self.yield_raw_example:\n self.batches = batch_iter(\n self.data(),\n 1,\n batch_size_fn=None,\n batch_size_multiple=1)\n else:\n self.batches = _pool(\n self.data(),\n self.batch_size,\n self.batch_size_fn,\n self.batch_size_multiple,\n self.sort_key,\n self.random_shuffler,\n self.pool_factor)\n else:\n self.batches = []\n for b in batch_iter(\n self.data(),\n self.batch_size,\n batch_size_fn=self.batch_size_fn,\n batch_size_multiple=self.batch_size_multiple):\n self.batches.append(sorted(b, key=self.sort_key))\n\n def __iter__(self):\n \"\"\"\n Extended version of the definition in torchtext.data.Iterator.\n Added yield_raw_example behaviour to yield a torchtext.data.Example\n instead of a torchtext.data.Batch object.\n \"\"\"\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n if self.sort_within_batch:\n # NOTE: `rnn.pack_padded_sequence` requires that a\n # minibatch be sorted by decreasing order, which\n # requires reversing relative to typical sort keys\n if self.sort:\n minibatch.reverse()\n else:\n minibatch.sort(key=self.sort_key, reverse=True)\n if self.yield_raw_example:\n yield minibatch[0]\n else:\n yield torchtext.data.Batch(\n minibatch,\n self.dataset,\n self.device)\n if not self.repeat:\n return\n\n\nclass MultipleDatasetIterator(object):\n \"\"\"\n This takes a list of iterable objects (DatasetLazyIter) and their\n respective weights, and yields a batch in the wanted proportions.\n \"\"\"\n def __init__(self,\n train_shards,\n fields,\n device,\n opt):\n self.index = -1\n self.iterables = []\n for shard in train_shards:\n self.iterables.append(\n build_dataset_iter(shard, fields, opt, multi=True))\n self.init_iterators = True\n self.weights = opt.data_weights\n self.batch_size = opt.batch_size\n self.batch_size_fn = max_tok_len \\\n if opt.batch_type == \"tokens\" else None\n self.batch_size_multiple = 8 if opt.model_dtype == \"fp16\" else 1\n self.device = device\n # Temporarily load one shard to retrieve sort_key for data_type\n temp_dataset = torch.load(self.iterables[0]._paths[0])\n self.sort_key = temp_dataset.sort_key\n self.random_shuffler = RandomShuffler()\n self.pool_factor = opt.pool_factor\n del temp_dataset\n\n def _iter_datasets(self):\n if self.init_iterators:\n self.iterators = [iter(iterable) for iterable in self.iterables]\n self.init_iterators = False\n for weight in self.weights:\n self.index = (self.index + 1) % len(self.iterators)\n for i in range(weight):\n yield self.iterators[self.index]\n\n def _iter_examples(self):\n for iterator in cycle(self._iter_datasets()):\n yield next(iterator)\n\n def __iter__(self):\n while True:\n for minibatch in _pool(\n self._iter_examples(),\n self.batch_size,\n self.batch_size_fn,\n self.batch_size_multiple,\n self.sort_key,\n self.random_shuffler,\n self.pool_factor):\n minibatch = sorted(minibatch, key=self.sort_key, reverse=True)\n yield torchtext.data.Batch(minibatch,\n self.iterables[0].dataset,\n self.device)\n\n\nclass DatasetLazyIter(object):\n \"\"\"Yield data from sharded dataset files.\n\n Args:\n dataset_paths: a list containing the locations of dataset files.\n fields (dict[str, Field]): fields dict for the\n datasets.\n batch_size (int): batch size.\n batch_size_fn: custom batch process function.\n device: See :class:`OrderedIterator` ``device``.\n is_train (bool): train or valid?\n \"\"\"\n\n def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,\n batch_size_multiple, device, is_train, pool_factor,\n repeat=True, num_batches_multiple=1, yield_raw_example=False):\n self._paths = dataset_paths\n self.fields = fields\n self.batch_size = batch_size\n self.batch_size_fn = batch_size_fn\n self.batch_size_multiple = batch_size_multiple\n self.device = device\n self.is_train = is_train\n self.repeat = repeat\n self.num_batches_multiple = num_batches_multiple\n self.yield_raw_example = yield_raw_example\n self.pool_factor = pool_factor\n\n def _iter_dataset(self, path):\n logger.info('Loading dataset from %s' % path)\n cur_dataset = torch.load(path)\n logger.info('number of examples: %d' % len(cur_dataset))\n cur_dataset.fields = self.fields\n cur_iter = OrderedIterator(\n dataset=cur_dataset,\n batch_size=self.batch_size,\n pool_factor=self.pool_factor,\n batch_size_multiple=self.batch_size_multiple,\n batch_size_fn=self.batch_size_fn,\n device=self.device,\n train=self.is_train,\n sort=False,\n sort_within_batch=True,\n repeat=False,\n yield_raw_example=self.yield_raw_example\n )\n for batch in cur_iter:\n self.dataset = cur_iter.dataset\n yield batch\n\n # NOTE: This is causing some issues for consumer/producer,\n # as we may still have some of those examples in some queue\n # cur_dataset.examples = None\n # gc.collect()\n # del cur_dataset\n # gc.collect()\n\n def __iter__(self):\n num_batches = 0\n paths = self._paths\n if self.is_train and self.repeat:\n # Cycle through the shards indefinitely.\n paths = cycle(paths)\n for path in paths:\n for batch in self._iter_dataset(path):\n yield batch\n num_batches += 1\n if self.is_train and not self.repeat and \\\n num_batches % self.num_batches_multiple != 0:\n # When the dataset is not repeated, we might need to ensure that\n # the number of returned batches is the multiple of a given value.\n # This is important for multi GPU training to ensure that all\n # workers have the same number of batches to process.\n for path in paths:\n for batch in self._iter_dataset(path):\n yield batch\n num_batches += 1\n if num_batches % self.num_batches_multiple == 0:\n return\n\n\ndef max_tok_len(new, count, sofar):\n \"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"\n # Maintains the longest src and tgt length in the current batch\n global max_src_in_batch, max_tgt_in_batch # this is a hack\n # Reset current longest length at a new batch (count=1)\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n # Src: [<bos> w1 ... wN <eos>]\n max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)\n # Tgt: [w1 ... wM <eos>]\n max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\ndef build_dataset_iter(corpus_type, fields, opt, is_train=True, multi=False):\n \"\"\"\n This returns user-defined train/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.\n \"\"\"\n dataset_paths = list(sorted(\n glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt')))\n if not dataset_paths:\n if is_train:\n raise ValueError('Training data %s not found' % opt.data)\n else:\n return None\n if multi:\n batch_size = 1\n batch_fn = None\n batch_size_multiple = 1\n else:\n batch_size = opt.batch_size if is_train else opt.valid_batch_size\n batch_fn = max_tok_len \\\n if is_train and opt.batch_type == \"tokens\" else None\n batch_size_multiple = 8 if opt.model_dtype == \"fp16\" else 1\n\n device = \"cuda\" if opt.gpu_ranks else \"cpu\"\n\n return DatasetLazyIter(\n dataset_paths,\n fields,\n batch_size,\n batch_fn,\n batch_size_multiple,\n device,\n is_train,\n opt.pool_factor,\n repeat=not opt.single_pass,\n num_batches_multiple=max(opt.accum_count) * opt.world_size,\n yield_raw_example=multi)\n\n\ndef build_dataset_iter_multiple(train_shards, fields, opt):\n return MultipleDatasetIterator(\n train_shards, fields, \"cuda\" if opt.gpu_ranks else \"cpu\", opt)\n"
] | [
[
"torch.load"
]
] |
lbouma/Cyclopath | [
"d09d927a1e6f9e07924007fd39e8e807cd9c0f8c"
] | [
"pyserver/bin/rpy2/robjects/tests/testNumpyConversions.py"
] | [
"import unittest\nimport rpy2.robjects as robjects\nr = robjects.r\n\ntry:\n import numpy\n has_numpy = True\n import rpy2.robjects.numpy2ri as rpyn\nexcept:\n has_numpy = False\n\n\nclass MissingNumpyDummyTestCase(unittest.TestCase):\n def testMissingNumpy(self):\n self.assertTrue(False) # numpy is missing. No tests.\n\nclass NumpyConversionsTestCase(unittest.TestCase):\n\n def setUp(self):\n robjects.conversion.py2ri = rpyn.numpy2ri\n\n def tearDown(self):\n robjects.conversion.py2ri = robjects.default_py2ri\n\n def checkHomogeneous(self, obj, mode, storage_mode):\n converted = robjects.conversion.py2ri(obj)\n self.assertEquals(r[\"mode\"](converted)[0], mode)\n self.assertEquals(r[\"storage.mode\"](converted)[0], storage_mode)\n self.assertEquals(list(obj), list(converted))\n self.assertTrue(r[\"is.array\"](converted)[0])\n\n def testVectorBoolean(self):\n b = numpy.array([True, False, True], dtype=numpy.bool_)\n self.checkHomogeneous(b, \"logical\", \"logical\")\n\n def testVectorInteger(self):\n i = numpy.array([1, 2, 3], dtype=\"i\")\n self.checkHomogeneous(i, \"numeric\", \"integer\")\n\n def testVectorFloat(self):\n f = numpy.array([1, 2, 3], dtype=\"f\")\n self.checkHomogeneous(f, \"numeric\", \"double\")\n\n def testVectorComplex(self):\n c = numpy.array([1j, 2j, 3j], dtype=numpy.complex_)\n self.checkHomogeneous(c, \"complex\", \"complex\")\n\n def testVectorCharacter(self):\n s = numpy.array([\"a\", \"b\", \"c\"], dtype=\"S\")\n self.checkHomogeneous(s, \"character\", \"character\")\n\n def testVectorUnicodeCharacter(self):\n u = numpy.array([u\"a\", u\"b\", u\"c\"], dtype=\"U\")\n self.checkHomogeneous(u, \"character\", \"character\")\n\n def testArray(self):\n\n i2d = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=\"i\")\n i2d_r = robjects.conversion.py2ri(i2d)\n\n self.assertEquals(r[\"storage.mode\"](i2d_r)[0], \"integer\")\n self.assertEquals(tuple(r[\"dim\"](i2d_r)), (2, 3))\n\n # Make sure we got the row/column swap right:\n self.assertEquals(i2d_r.rx(1, 2)[0], i2d[0, 1])\n\n f3d = numpy.arange(24, dtype=\"f\").reshape((2, 3, 4))\n f3d_r = robjects.conversion.py2ri(f3d)\n\n self.assertEquals(r[\"storage.mode\"](f3d_r)[0], \"double\")\n self.assertEquals(tuple(r[\"dim\"](f3d_r)), (2, 3, 4))\n\n # Make sure we got the row/column swap right:\n self.assertEquals(f3d_r.rx(1, 2, 3)[0], f3d[0, 1, 2])\n\n def testObjectArray(self):\n o = numpy.array([1, \"a\", 3.2], dtype=numpy.object_)\n o_r = robjects.conversion.py2ri(o)\n self.assertEquals(r[\"mode\"](o_r)[0], \"list\")\n self.assertEquals(r[\"[[\"](o_r, 1)[0], 1)\n self.assertEquals(r[\"[[\"](o_r, 2)[0], \"a\")\n self.assertEquals(r[\"[[\"](o_r, 3)[0], 3.2)\n\n def testRecordArray(self):\n rec = numpy.array([(1, 2.3), (2, -0.7), (3, 12.1)],\n dtype=[(\"count\", \"i\"), (\"value\", numpy.double)])\n rec_r = robjects.conversion.py2ri(rec)\n self.assertTrue(r[\"is.data.frame\"](rec_r)[0])\n self.assertEquals(tuple(r[\"names\"](rec_r)), (\"count\", \"value\"))\n count_r = r[\"$\"](rec_r, \"count\")\n value_r = r[\"$\"](rec_r, \"value\")\n self.assertEquals(r[\"storage.mode\"](count_r)[0], \"integer\")\n self.assertEquals(r[\"storage.mode\"](value_r)[0], \"double\")\n self.assertEquals(count_r[1], 2)\n self.assertEquals(value_r[2], 12.1)\n\n def testBadArray(self):\n u = numpy.array([1, 2, 3], dtype=numpy.uint32)\n self.assertRaises(ValueError, robjects.conversion.py2ri, u)\n\n def testAssignNumpyObject(self):\n x = numpy.arange(-10., 10., 1)\n env = robjects.Environment()\n env[\"x\"] = x\n self.assertEquals(1, len(env))\n self.assertTrue(isinstance(env[\"x\"], robjects.Array))\n\ndef suite():\n if has_numpy:\n return unittest.TestLoader().loadTestsFromTestCase(NumpyConversionsTestCase)\n else:\n return unittest.TestLoader().loadTestsFromTestCase(MissingNumpyDummyTestCase)\n\nif __name__ == '__main__':\n unittest.main()\n\n"
] | [
[
"numpy.array",
"numpy.arange"
]
] |
sivaprakasaman/Python_Coding_Toolbox | [
"8bbcfb43eed49f49de7321e330f4b3943586038a"
] | [
"signal_processing/timbral_inspection/resynthesize.py"
] | [
"#Andrew Sivaprakasam\n#Purdue University\n#Email: [email protected]\n\n#DESCRIPTION: Code written to isolate the magnitudes of harmonics of a\n#given f_0 for a given audiofile/stimulus.\n\n#Additional Dependencies: scipy, numpy, matplotlib\n# pip3 install scipy\n# pip3 install numpy\n# pip3 install matplotlib\n\n#May require ffmpeg on Ubuntu/Linux as well\n# sudo apt-get install ffmpeg\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\n\ndef extract_harmonics(fname, fs = 44100, f_0 = 440, n_harms = 3):\n fs, x = wavfile.read(fname)\n #x = np.array(aud[0])\n t_vect = np.arange(0,len(x))/fs\n f_vect = np.arange(1,n_harms+1)*f_0;\n #plt.plot(t_vect,x)\n #output = get_spect(x, fs, DR = 120, BW = 100, xlim = [0,0.5], ylim = [0,5000], colormap = 'magma')\n\n ## TODO: Try applying dpss to this. Might result in more accurate\n ## magnitudes?\n\n freq_time = np.multiply(np.asmatrix(f_vect).T,np.asmatrix(t_vect))\n x_sin = np.multiply(np.asmatrix(x),np.sin(2*np.pi*freq_time))\n x_cos = np.multiply(np.asmatrix(x),np.cos(2*np.pi*freq_time))\n sin_sum = np.sum(x_sin,1);\n cos_sum = np.sum(x_cos,1);\n\n mags = np.sqrt(np.multiply(sin_sum,sin_sum) + np.multiply(cos_sum,cos_sum))\n mags = np.squeeze(np.asarray(mags))/np.max(mags)\n\n phase = np.arctan(np.divide(sin_sum,cos_sum));\n phase = np.squeeze(np.asarray(phase));\n #phase = [0];\n #plt.stem(f_vect,mags)\n\n return [f_vect, mags, phase, x, fs]\n\nfrom signal_processing import pure_tone_complex, sound, magphase\nimport matplotlib.pyplot as plt\n#from playsound import playsound\n\ndef resynthesize(mags, fname = 'resynth.wav', fs_Hz = 44100, freq_Hz = [0], dur_sec = 1, phi = [0], scale = .75, tone_shift = 1, env_fxn = 1, fs = 44100, type = 'sin', play_write = True, plot = True):\n harmonics = len(mags)\n\n #This handling should be added to pure_tone_complex at some point\n if len(phi)<harmonics:\n phi = np.ones(harmonics)*phi;\n\n if len(freq_Hz) <harmonics:\n freq_Hz = np.arange(1,n_harms+1)*440;\n\n tone = pure_tone_complex(freq_Hz*tone_shift, fs, dur_sec, mags, phi, type)\n tone = tone[1]*env_fxn;\n tone = scale*tone/np.max(tone);\n\n t_vect = np.arange(0,len(tone))/fs_Hz;\n\n if plot:\n plt.figure()\n plt.plot(tone);\n plt.xlim([0,len(tone)])\n\n if play_write:\n sound(tone,fs_Hz,fname,1)\n\n return tone\n################################################################################\n\nimport numpy as np\n\ndef play_alma_mater(extract, freq_Hz, fname = 'alma_mater.wav', n_harms = 6, key = 1, tempo = 0.3, fxn = 'string', type = 'sin', short = True):\n shift_mat = [1.26/1.66, .85, .95, 1.00, 1.13, 1.26, 1.26, 1.32, 1.32, 1.32, 1, 1.13, 1.13, 1.26, 1.26/1.66, 1.26, 1.20, 1.26, 1.26, 1.13, 1.00, 1.13, 1.26, 1.26, 1.13, .85, .95, 1, .95, .85, 1.13, 1.26/1.66, 1.26/1.66, .85, .95, 1, 1.13, 1.26, 1.26, 1.26, 1.32, 1.32, 1, 1.13, 1.26, .85, .95, 1, .85, 1.26/1.66, 1, 1.26, 1.26/1.66, .85, 1.26, 1.13, 1, 1]\n dur_mat = [2, 1, 1, 1.5, .5, 1, 1, 1, .5, .5, 1, .5, .5, 1, 1, 1, 1, 2, 1, 1, 1.5, .5, 1, 1, 1, .5, .5, 1, .5, .5, 3, 1.5, .5, 1, 1, 1.5, .5, 1, .5, .5, 1, 1, 1, 1, 4, 1.5, .5, 1, 1, 1, 1, 1, 1, 1.5, .5, 1.5, .5, 3]\n scale_mat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ,1 , 1, 1, 1, 1]\n\n #Truncate by default, otherwise listen to music for a few extra seconds...\n if short:\n shift_mat = shift_mat[:6];\n dur_mat = dur_mat[:6];\n scale_mat = scale_mat[:6];\n\n fs = 44100;\n #Change tempo\n dur_mat = np.asarray(dur_mat)*tempo\n tone = [];\n\n for i in range(0,len(shift_mat)):\n\n t_vect = np.arange(0,dur_mat[i]*fs)/fs;\n\n if fxn == 'banjo':\n env_fxn = np.exp(-7*t_vect);\n elif fxn == 'string':\n env_fxn = (1+.25*np.sin(5*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);\n else:\n env_fxn = 1;\n\n tone_temp = resynthesize(extract[1], freq_Hz = key*freq_Hz, dur_sec = dur_mat[i], phi = extract[2], scale = scale_mat[i], tone_shift = shift_mat[i], env_fxn = env_fxn, type = type, play_write = False, plot = False)\n print(tone_temp)\n tone = np.concatenate((tone,tone_temp), axis = 0)\n\n sound(tone, fs, fname, 1)\n\n return [tone,fs];\n\n########################## IMPLEMENTATION #####################################\n\n# from signal_processing import pure_tone_complex, sound, magphase, get_spect\n# import matplotlib.pyplot as plt\n# from scipy.signal import spectrogram as sp\n# import numpy as np\n# ## TODO: Quantify Envelope, apply slepian sequences, verify magnitudes against DFT/PSD\n\n# #Can use the below line in Atom when running Hydrogen\n# #%matplotlib inline\n\n# harmonics = 7;\n# first = 0;\n# dur_sec = 1;\n# toPlay = np.array([0,1,2,3,4,5,6])\n# extract = extract_harmonics('instruments/violin_A4_normal.wav', fs = 44100, f_0 = 440, n_harms = harmonics);\n\n# fs_Hz = extract[4];\n# amp = extract[1][toPlay];\n# phase = extract[2][toPlay];\n# freq_Hz = extract[0][toPlay];\n\n# t_vect = np.arange(0,dur_sec*fs_Hz)/fs_Hz;\n# env_banj = np.exp(-9*t_vect);\n# env_string = (1+0.15*np.sin(6*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);\n\n# tone = resynthesize(amp, 'violin_all.wav', freq_Hz = freq_Hz, dur_sec = 1, phi = phase, scale = 1, tone_shift = 1, env_fxn = env_string, type = 'sin', play_write = True, plot = False)\n\n# sound(tone, fs_Hz)\n# get_spect(tone, fs_Hz, DR = 200, BW = 75, xlim = [0,1], ylim = [0,4000], colormap = 'cividis',title = 'Simulated Violin | All Harmonics');\n\n# #Play Alma Mater\n# alma_mater = play_alma_mater(extract, freq_Hz, key = 1, fxn = 'strings', type = 'sin')\n#\n# plt.figure()\n# plt.plot(np.arange(0,len(alma_mater[0]))/alma_mater[1],alma_mater[0]);\n# output = get_spect(alma_mater[0],alma_mater[1], DR = 300, BW = 200, xlim = [0.01,2], ylim = [0,5000])\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.multiply",
"numpy.divide",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.asarray",
"numpy.exp",
"numpy.arange",
"numpy.asmatrix",
"numpy.max",
"scipy.io.wavfile.read",
"numpy.sin",
"numpy.concatenate"
]
] |
britt0508/ExplainedKinshipCorrect | [
"e0e255ff9531af1436bb9a9fe07256e72a0061f7"
] | [
"stylegan/pretrained_example.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\n\"\"\"Minimal script for generating an image using pre-trained StyleGAN generator.\"\"\"\n\nimport os\nimport pickle\nimport numpy as np\nimport PIL.Image\nimport dnnlib\nimport dnnlib.tflib as tflib\nimport config\n\n\ndef main():\n # Initialize TensorFlow.\n tflib.init_tf()\n\n # Load pre-trained network.\n url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl\n with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:\n _G, _D, Gs = pickle.load(f)\n # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.\n # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.\n # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.\n\n # Print network details.\n Gs.print_layers()\n\n # Pick latent vector.\n rnd = np.random.RandomState(5)\n latents = rnd.randn(1, Gs.input_shape[1])\n\n # Generate image.\n fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)\n\n # Save image.\n os.makedirs(config.result_dir, exist_ok=True)\n png_filename = os.path.join(config.result_dir, 'example.png')\n PIL.Image.fromarray(images[0], 'RGB').save(png_filename)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.RandomState"
]
] |
zommiommy/cache_decorator | [
"e7d71dd48890247838612533481d0b5a808c03ec"
] | [
"tests/test_npz.py"
] | [
"import numpy as np\nfrom time import sleep\nfrom shutil import rmtree\nfrom cache_decorator import Cache\nfrom .utils import standard_test_arrays\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_single(a):\n sleep(2)\n return np.array([1, 2, 3])\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_tuple(a):\n sleep(2)\n return np.array([1, 2, 3]), np.array([1, 2, 4])\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_list(a):\n sleep(2)\n return [np.array([1, 2, 3]), np.array([1, 2, 4])]\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_dict(a):\n sleep(2)\n return {\"a\":np.array([1, 2, 3]), \"b\":np.array([1, 2, 4])}\n\ndef test_npz_single():\n standard_test_arrays(cached_function_single)\n rmtree(\"./test_cache\")\n\ndef test_npz_tuple():\n standard_test_arrays(cached_function_tuple)\n rmtree(\"./test_cache\")\n\ndef test_npz_list():\n standard_test_arrays(cached_function_list)\n rmtree(\"./test_cache\")\n\ndef test_npz_dict():\n standard_test_arrays(cached_function_dict)\n rmtree(\"./test_cache\")\n"
] | [
[
"numpy.array"
]
] |
vios-s/RA_FA_Cardiac | [
"8af4b82b62b53e29e96084113a5d379774c11b12"
] | [
"dice_loss.py"
] | [
"import torch\r\nfrom torch.autograd import Function\r\n\r\n\r\nclass DiceCoeff(Function):\r\n \"\"\"Dice coeff for individual examples\"\"\"\r\n\r\n def forward(self, input, target):\r\n self.save_for_backward(input, target)\r\n eps = 0.0001\r\n self.inter = torch.dot(input.view(-1), target.view(-1))\r\n self.union = torch.sum(input) + torch.sum(target) + eps\r\n\r\n t = (2 * self.inter.float() + eps) / self.union.float()\r\n return t\r\n\r\n # This function has only a single output, so it gets only one gradient\r\n def backward(self, grad_output):\r\n\r\n input, target = self.saved_variables\r\n grad_input = grad_target = None\r\n\r\n if self.needs_input_grad[0]:\r\n grad_input = grad_output * 2 * (target * self.union - self.inter) \\\r\n / (self.union * self.union)\r\n if self.needs_input_grad[1]:\r\n grad_target = None\r\n\r\n return grad_input, grad_target\r\n\r\n\r\ndef dice_coeff(input, target, device):\r\n \"\"\"Dice coeff for batches\"\"\"\r\n if input.is_cuda:\r\n s = torch.FloatTensor(1).zero_()\r\n s = s.to(device)\r\n else:\r\n s = torch.FloatTensor(1).zero_()\r\n\r\n for i, c in enumerate(zip(input, target)):\r\n s = s + DiceCoeff().forward(c[0], c[1])\r\n\r\n return s / (i + 1)"
] | [
[
"torch.sum",
"torch.FloatTensor"
]
] |
openforcefield/openff-recharge | [
"0ea3ef986e33c3ecf05924e64fb2e1872913b093"
] | [
"openff/recharge/esp/qcarchive.py"
] | [
"import json\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple\n\nimport numpy\nfrom openff.utilities import requires_package\nfrom pydantic import ValidationError\n\nfrom openff.recharge.esp import ESPSettings, PCMSettings\nfrom openff.recharge.esp.storage import MoleculeESPRecord\nfrom openff.recharge.grids import GridGenerator, GridSettings\nfrom openff.recharge.utilities.exceptions import RechargeException\nfrom openff.recharge.utilities.openeye import molecule_to_conformers\n\nif TYPE_CHECKING:\n import qcelemental.models\n import qcelemental.models.results\n import qcportal.models\n\nQCFractalResults = List[\n Tuple[\"qcelemental.models.Molecule\", \"qcportal.models.ResultRecord\"]\n]\nQCFractalKeywords = Dict[str, \"qcportal.models.KeywordSet\"]\n\nlogger = logging.getLogger(__name__)\n\n\nclass MissingQCMoleculesError(RechargeException):\n \"\"\"An exception raised when an expected set of molecules are not present\n in a QC data set.\"\"\"\n\n def __init__(self, data_set_name: str, missing_smiles: Iterable[str]):\n\n smiles_string = \"\\n\".join(missing_smiles)\n\n super(MissingQCMoleculesError, self).__init__(\n f\"The {smiles_string} SMILES patterns were not found in the \"\n f\"{data_set_name} data set.\"\n )\n\n self.data_set_name = data_set_name\n self.missing_smiles = missing_smiles\n\n\nclass MissingQCResultsError(RechargeException):\n \"\"\"An exception raised when an expected set of results are not present\n in a QC data set.\"\"\"\n\n def __init__(self, data_set_name: str, missing_ids: Iterable[str]):\n\n id_string = \"\\n\".join(missing_ids)\n\n super(MissingQCResultsError, self).__init__(\n f\"The result records associated with the following molecule ids from the \"\n f\"{data_set_name} data set could not be retrieved from QCA: {id_string}\"\n )\n\n self.data_set_name = data_set_name\n self.missing_ids = missing_ids\n\n\nclass MissingQCWaveFunctionError(RechargeException):\n \"\"\"An exception raised when a result does not store the required information about\n a computed QM wavefunction.\"\"\"\n\n def __init__(self, result_id: str):\n\n super(MissingQCWaveFunctionError, self).__init__(\n f\"The result with id={result_id} does not store the required wavefunction.\"\n f\"Make sure to use at minimum the 'orbitals_and_eigenvalues' wavefunction \"\n f\"protocol when computing the data set.\"\n )\n self.result_id = result_id\n\n\nclass InvalidPCMKeywordError(RechargeException):\n \"\"\"An exception raised when the PCM settings found in the 'pcm__input' entry of\n an entries keywords cannot be safely parsed.\"\"\"\n\n def __init__(self, input_string: str):\n\n super(InvalidPCMKeywordError, self).__init__(\n f\"The PCM settings could not be safely parsed: {input_string}\"\n )\n\n\ndef _parse_pcm_input(input_string: str) -> PCMSettings:\n \"\"\"Attempts to parse a set of PCM settings from a PSI4 keyword string.\"\"\"\n\n # Convert the string to a JSON like string.\n value = input_string.replace(\" \", \"\").replace(\"=\", \":\").replace(\"{\", \":{\")\n value = re.sub(r\"(\\d*[a-z][a-z\\d]*)\", r'\"\\1\"', value)\n value = re.sub(r'([\"\\d}])\"', r'\\1,\"', value.replace(\"\\n\", \"\"))\n value = value.replace('\"true\"', \"true\")\n value = value.replace('\"false\"', \"false\")\n\n solvent_map = {\"H2O\": \"Water\"}\n radii_map = {\"BONDI\": \"Bondi\", \"UFF\": \"UFF\", \"ALLINGER\": \"Allinger\"}\n\n try:\n # Load the string into a dictionary.\n pcm_dict = json.loads(f\"{{{value}}}\")\n\n # Validate some of the settings which we do not store in the settings\n # object yet.\n assert pcm_dict[\"cavity\"][\"type\"].upper() == \"GEPOL\"\n assert pcm_dict[\"cavity\"][\"mode\"].upper() == \"IMPLICIT\"\n assert numpy.isclose(pcm_dict[\"cavity\"][\"minradius\"], 52.917721067)\n assert pcm_dict[\"units\"].upper() == \"ANGSTROM\"\n assert pcm_dict[\"codata\"] == 2010\n assert pcm_dict[\"medium\"][\"nonequilibrium\"] is False\n assert pcm_dict[\"medium\"][\"matrixsymm\"] is True\n assert numpy.isclose(pcm_dict[\"medium\"][\"diagonalscaling\"], 1.07)\n assert numpy.isclose(pcm_dict[\"medium\"][\"proberadius\"], 0.52917721067)\n assert numpy.isclose(pcm_dict[\"medium\"][\"correction\"], 0.0)\n\n # noinspection PyTypeChecker\n pcm_settings = PCMSettings(\n solver=pcm_dict[\"medium\"][\"solvertype\"].upper(),\n solvent=solvent_map[pcm_dict[\"medium\"][\"solvent\"].upper()],\n radii_model=radii_map[pcm_dict[\"cavity\"][\"radiiset\"].upper()],\n radii_scaling=pcm_dict[\"cavity\"][\"scaling\"],\n cavity_area=pcm_dict[\"cavity\"][\"area\"],\n )\n\n except (AssertionError, ValidationError):\n raise InvalidPCMKeywordError(input_string)\n except Exception as e:\n raise e\n\n return pcm_settings\n\n\ndef _compare_pcm_settings(settings_a: PCMSettings, settings_b: PCMSettings) -> bool:\n \"\"\"Compares if two PCM settings are identical.\"\"\"\n\n for field in PCMSettings.__fields__:\n\n value_a = getattr(settings_a, field)\n value_b = getattr(settings_b, field)\n\n if isinstance(value_a, float) and not numpy.isclose(value_a, value_b):\n return False\n elif not isinstance(value_a, float) and value_a != value_b:\n return False\n\n return True\n\n\n@requires_package(\"cmiles\")\n@requires_package(\"qcportal\")\ndef retrieve_qcfractal_results(\n data_set_name: str,\n subset: Optional[Iterable[str]],\n method: str,\n basis: str,\n pcm_settings: Optional[PCMSettings],\n qcfractal_address: Optional[str] = None,\n error_on_missing: bool = True,\n) -> Tuple[QCFractalResults, QCFractalKeywords]:\n \"\"\"Attempt to retrieve the results for the requested data set from a QCFractal\n server.\n\n Parameters\n ----------\n data_set_name\n The name of the data set to retrieve the results from.\n subset\n The SMILES representations of the subset of molecules to retrieve from the data\n set.\n method\n The method which the results should have been computed using.\n basis\n The basis which the results should have been computed using.\n pcm_settings\n The PCM settings which the results should have been computed using.\n Use ``None`` to specify that PCM should not have been enabled.\n qcfractal_address\n An optional address to the QCFractal server instance which stores the data set.\n error_on_missing\n Whether to raise an exception when either a molecule listed in the subset\n cannot be found in the data set, or when a result record could not be found\n for one of the requested molecule in the data set.\n\n Returns\n -------\n A list of the retrieved results (alongside their corresponding molecule records)\n and a dictionary of the keywords referenced by the results entries.\n \"\"\"\n\n import cmiles\n import qcportal\n from qcelemental.models import Molecule as QCMolecule\n\n # Map the input smiles to uniform isomeric and explicit hydrogen smiles.\n subset = (\n None\n if subset is None\n else [\n cmiles.get_molecule_ids(smiles, \"openeye\", strict=False)[\n \"canonical_isomeric_explicit_hydrogen_smiles\"\n ]\n for smiles in subset\n ]\n )\n\n # Connect to the default QCA server and retrieve the data set of interest.\n if qcfractal_address is None:\n client = qcportal.FractalClient()\n else:\n client = qcportal.FractalClient(address=qcfractal_address)\n\n # noinspection PyTypeChecker\n collection: qcportal.collections.Dataset = client.get_collection(\n \"Dataset\", data_set_name\n )\n\n # Retrieve the ids of the molecules of interest.\n molecules = {}\n found_smiles = set()\n\n for _, molecule_row in collection.get_molecules().iterrows():\n\n qc_molecule: QCMolecule = molecule_row[\"molecule\"]\n\n # Manually map the molecule to a dictionary as CMILES expects a flat geometry\n # array.\n qc_molecule_dict = {\n \"symbols\": qc_molecule.symbols,\n \"connectivity\": qc_molecule.connectivity,\n \"geometry\": qc_molecule.geometry.flatten(),\n \"molecular_charge\": qc_molecule.molecular_charge,\n \"molecular_multiplicity\": qc_molecule.molecular_multiplicity,\n }\n\n cmiles_ids = cmiles.get_molecule_ids(qc_molecule_dict, toolkit=\"openeye\")\n molecule_smiles = cmiles_ids[\"canonical_isomeric_explicit_hydrogen_smiles\"]\n\n if subset is not None and molecule_smiles not in subset:\n continue\n\n molecules[qc_molecule.id] = qc_molecule\n found_smiles.add(molecule_smiles)\n\n molecule_ids = sorted(molecules)\n\n # Make sure the data set contains the requested subset.\n missing_smiles = (set() if subset is None else {*subset}) - found_smiles\n\n if len(missing_smiles) > 0:\n\n if error_on_missing:\n raise MissingQCMoleculesError(data_set_name, missing_smiles)\n else:\n logger.warning(\n f\"The following smiles count not be found in the {data_set_name} \"\n f\"data set: {missing_smiles}\"\n )\n\n # Retrieve the data sets results records\n results = []\n\n paginating = True\n page_index = 0\n\n while paginating:\n\n page_results = client.query_results(\n molecule=molecule_ids,\n method=method,\n basis=basis,\n limit=client.server_info[\"query_limit\"],\n skip=page_index,\n )\n\n results.extend(page_results)\n\n paginating = len(page_results) > 0\n page_index += client.server_info[\"query_limit\"]\n\n # Filter based on the PCM settings.\n keyword_ids = list({result.keywords for result in results})\n keywords: Dict[\n str,\n ] = {keyword_id: client.query_keywords(keyword_id)[0] for keyword_id in keyword_ids}\n\n if pcm_settings is None:\n matching_keywords = [\n keyword_id\n for keyword_id, keyword in keywords.items()\n if \"pcm\" not in keyword.values or keyword.values[\"pcm\"] is False\n ]\n else:\n matching_keywords = [\n keyword_id\n for keyword_id, keyword in keywords.items()\n if \"pcm\" in keyword.values\n and keyword.values[\"pcm\"] is True\n and \"pcm__input\" in keyword.values\n and _compare_pcm_settings(\n pcm_settings, _parse_pcm_input(keyword.values[\"pcm__input\"])\n )\n ]\n\n results = list(\n filter(lambda x: x.keywords is None or x.keywords in matching_keywords, results)\n )\n\n # Make sure none of the records are missing.\n result_ids = {result.molecule for result in results}\n\n missing_result_ids = {*molecule_ids} - {*result_ids}\n\n if len(missing_result_ids) > 0:\n\n if error_on_missing:\n raise MissingQCResultsError(data_set_name, missing_result_ids)\n else:\n logger.warning(\n f\"Result records could not be found for the following molecules in the \"\n f\"{data_set_name}: {missing_result_ids}\"\n )\n\n return (\n [(molecules[result.molecule], result) for result in results],\n {keyword_id: keywords[keyword_id] for keyword_id in matching_keywords},\n )\n\n\ndef reconstruct_density(\n wavefunction: \"qcelemental.models.results.WavefunctionProperties\", n_alpha: int\n) -> numpy.ndarray:\n \"\"\"Reconstructs a density matrix from a QCFractal wavefunction, making sure to\n order the entries in the ordering that psi4 expects (e.g. spherical, cartesian).\n\n Parameters\n ----------\n wavefunction\n The wavefunction return by QCFractal.\n n_alpha\n The number of alpha electrons in the computation.\n\n Returns\n -------\n The reconstructed density.\n \"\"\"\n\n # Reconstruct the density in CCA order\n orbitals = getattr(wavefunction, wavefunction.orbitals_a)\n density = numpy.dot(orbitals[:, :n_alpha], orbitals[:, :n_alpha].T)\n\n # Re-order the density matrix to match the ordering expected by psi4.\n angular_momenta = {\n angular_momentum\n for atom in wavefunction.basis.atom_map\n for shell in wavefunction.basis.center_data[atom].electron_shells\n for angular_momentum in shell.angular_momentum\n }\n\n spherical_maps = {\n L: numpy.array(\n list(range(L * 2 - 1, 0, -2)) + [0] + list(range(2, L * 2 + 1, 2))\n )\n for L in angular_momenta\n }\n\n # Build a flat index that we can transform the AO quantities\n ao_map = []\n counter = 0\n\n for atom in wavefunction.basis.atom_map:\n\n center = wavefunction.basis.center_data[atom]\n for shell in center.electron_shells:\n\n if shell.harmonic_type == \"cartesian\":\n ao_map.append(numpy.arange(counter, counter + shell.nfunctions()))\n\n else:\n smap = spherical_maps[shell.angular_momentum[0]]\n ao_map.append(smap + counter)\n\n counter += shell.nfunctions()\n\n ao_map = numpy.hstack(ao_map)\n\n reverse_ao_map = {map_index: i for i, map_index in enumerate(ao_map)}\n reverse_ao_map = numpy.array([reverse_ao_map[i] for i in range(len(ao_map))])\n\n reordered_density = density[reverse_ao_map[:, None], reverse_ao_map]\n return reordered_density\n\n\n@requires_package(\"psi4\")\ndef compute_esp(\n qc_molecule, density, esp_settings, grid\n) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"Computes the ESP and electric field for a particular molecule on\n a specified grid and using the specified settings.\n\n Parameters\n ----------\n qc_molecule\n The molecule to compute the ESP / electric field of.\n density\n The electron density of the molecule.\n esp_settings\n The settings to use when computing the ESP / electric field.\n grid\n The grid to evaluate the ESP and electric field on.\n\n Returns\n -------\n A tuple of the evaluated ESP with shape=(n_grid_points, 1) and the electric\n field with shape=(n_grid_points, 3)\n \"\"\"\n import psi4\n\n psi4.core.be_quiet()\n\n psi4_molecule = psi4.geometry(qc_molecule.to_string(\"psi4\", \"angstrom\"))\n psi4_molecule.reset_point_group(\"c1\")\n\n psi4_wavefunction = psi4.core.RHF(\n psi4.core.Wavefunction.build(psi4_molecule, esp_settings.basis),\n psi4.core.SuperFunctional(),\n )\n psi4_wavefunction.Da().copy(psi4.core.Matrix.from_array(density))\n\n psi4_calculator = psi4.core.ESPPropCalc(psi4_wavefunction)\n psi4_grid = psi4.core.Matrix.from_array(grid)\n\n esp = numpy.array(\n psi4_calculator.compute_esp_over_grid_in_memory(psi4_grid)\n ).reshape(-1, 1)\n\n field = numpy.array(psi4_calculator.compute_field_over_grid_in_memory(psi4_grid))\n\n return esp, field\n\n\n@requires_package(\"cmiles\")\n@requires_package(\"qcportal\")\ndef from_qcfractal_result(\n qc_result: \"qcportal.models.ResultRecord\",\n qc_molecule: \"qcelemental.models.Molecule\",\n qc_keyword_set: \"qcportal.models.KeywordSet\",\n grid_settings: GridSettings,\n) -> MoleculeESPRecord:\n \"\"\"A function which will evaluate the the ESP and electric field from a set of\n wavefunctions which have been computed by a QCFractal instance using the Psi4\n package.\n\n Parameters\n ----------\n qc_result\n The QCFractal result record which encodes the wavefunction\n qc_molecule\n The QC molecule corresponding to the result record.\n qc_keyword_set\n The keyword set used when computing the result record.\n grid_settings\n The settings which define the grid to evaluate the electronic properties on.\n\n Returns\n -------\n The values of the ESP and electric field stored in storable records.\n \"\"\"\n\n import cmiles.utils\n from qcelemental.models.results import WavefunctionProperties\n\n # Compute and store the ESP and electric field for each result.\n if qc_result.wavefunction is None:\n raise MissingQCWaveFunctionError(qc_result.id)\n\n # Retrieve the wavefunction and use it to reconstruct the electron density.\n wavefunction = WavefunctionProperties(\n **qc_result.get_wavefunction(\n [\"scf_eigenvalues_a\", \"scf_orbitals_a\", \"basis\", \"restricted\"]\n ),\n **qc_result.wavefunction[\"return_map\"],\n )\n\n density = reconstruct_density(wavefunction, qc_result.properties.calcinfo_nalpha)\n\n # Convert the OE molecule to a QC molecule and extract the conformer of\n # interest.\n oe_molecule = cmiles.utils.load_molecule(\n {\n \"symbols\": qc_molecule.symbols,\n \"connectivity\": qc_molecule.connectivity,\n \"geometry\": qc_molecule.geometry.flatten(),\n \"molecular_charge\": qc_molecule.molecular_charge,\n \"molecular_multiplicity\": qc_molecule.molecular_multiplicity,\n },\n toolkit=\"openeye\",\n )\n\n conformers = molecule_to_conformers(oe_molecule)\n assert len(conformers) == 1\n\n conformer = conformers[0]\n\n # Construct the grid to evaluate the ESP / electric field on.\n grid = GridGenerator.generate(oe_molecule, conformer, grid_settings)\n\n # Retrieve the ESP settings from the record.\n enable_pcm = \"pcm\" in qc_keyword_set.values\n\n esp_settings = ESPSettings(\n basis=qc_result.basis,\n method=qc_result.method,\n grid_settings=grid_settings,\n pcm_settings=(\n None\n if not enable_pcm\n else _parse_pcm_input(qc_keyword_set.values[\"pcm__input\"])\n ),\n )\n\n # Reconstruct the ESP and field from the density.\n esp, electric_field = compute_esp(qc_molecule, density, esp_settings, grid)\n\n return MoleculeESPRecord.from_oe_molecule(\n oe_molecule,\n conformer=conformer,\n grid_coordinates=grid,\n esp=esp,\n electric_field=electric_field,\n esp_settings=esp_settings,\n )\n"
] | [
[
"numpy.hstack",
"numpy.dot",
"numpy.isclose"
]
] |
Xuyuanjia2014/tvm | [
"892f8305e77ad506660b851f9ce4c81be0f95d9d"
] | [
"tests/python/frontend/caffe/test_forward.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"\nCaffe testcases\n====================\nThis article is a test script to test Caffe operator with Relay.\n\"\"\"\nimport os\n\nos.environ[\"GLOG_minloglevel\"] = \"2\"\nimport sys\nimport logging\n\nlogging.basicConfig(level=logging.ERROR)\n\nimport numpy as np\nfrom google.protobuf import text_format\nimport caffe\nfrom caffe import layers as L, params as P\nfrom caffe.proto import caffe_pb2 as pb\n\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib import utils, graph_executor\nfrom tvm.contrib.download import download_testdata\n\nCURRENT_DIR = os.path.join(os.path.expanduser(\"~\"), \".tvm_test_data\", \"caffe_test\")\n\n#######################################################################\n# Generic functions for TVM & Caffe\n# ------------------------------------------\n\n\ndef _create_dir(d_path):\n \"\"\"If the directory is not existed, create it\"\"\"\n if not (os.path.exists(d_path) and os.path.isdir(d_path)):\n os.makedirs(d_path)\n\n\ndef _list_to_str(ll):\n \"\"\"Convert list or tuple to str, separated by underline.\"\"\"\n if isinstance(ll, (tuple, list)):\n tmp = [str(i) for i in ll]\n return \"_\".join(tmp)\n\n\ndef _gen_filename_str(op_name, data_shape, *args, **kwargs):\n \"\"\"Combining the filename according to the op_name, shape and other args.\"\"\"\n file_dir = os.path.join(CURRENT_DIR, op_name)\n _create_dir(file_dir)\n res = op_name + \"_\"\n shape_str = _list_to_str(list(data_shape))\n res += shape_str\n for arg in args:\n if isinstance(arg, (tuple, list)):\n res += \"_\" + _list_to_str(arg)\n elif isinstance(arg, (int, float, str)):\n res += \"_\" + str(arg)\n for _, v in kwargs.items():\n if isinstance(v, (tuple, list)):\n res += \"_\" + _list_to_str(v)\n elif isinstance(v, (int, float, str)):\n res += \"_\" + str(v)\n res = res.replace(\".\", \"_\")\n res = res.replace(\"-\", \"_\")\n proto_file = os.path.join(file_dir, res + \".prototxt\")\n blob_file = os.path.join(file_dir, res + \".caffemodel\")\n solver_file = os.path.join(file_dir, res + \"_solver.prototxt\")\n\n return (proto_file, blob_file, solver_file)\n\n\ndef _save_prototxt(n_netspec, f_path):\n \"\"\"Generate .prototxt file according to caffe.NetSpec\"\"\"\n s = n_netspec.to_proto()\n with open(f_path, \"w\") as f:\n f.write(str(s))\n\n\ndef _save_solver(solver_file, proto_file, blob_file):\n \"\"\"Define a solver proto, you can change the configs.\"\"\"\n blob_file_prefix = blob_file.split(\".caffemodel\")[0]\n s = pb.SolverParameter()\n s.train_net = proto_file\n s.base_lr = 0.01\n s.momentum = 0.9\n s.weight_decay = 0.0005\n s.lr_policy = \"inv\"\n s.gamma = 0.0001\n s.power = 0.75\n s.display = 1\n s.max_iter = 100000\n s.snapshot = 100000\n s.snapshot_prefix = blob_file_prefix\n\n with open(solver_file, \"w\") as f:\n f.write(str(s))\n\n\ndef _save_caffemodel(solver_file, blob_file):\n \"\"\"Generate .caffemodel file.\"\"\"\n solver = caffe.SGDSolver(solver_file)\n solver.net.save(blob_file)\n\n\ndef _gen_model_files(n_netspec, proto_file, blob_file, solver_file):\n _save_prototxt(n_netspec, proto_file)\n _save_solver(solver_file, proto_file, blob_file)\n _save_caffemodel(solver_file, blob_file)\n\n\ndef _siso_op(data, func, *args, **kwargs):\n \"\"\"Create single input and single output Caffe op\"\"\"\n n = caffe.NetSpec()\n n.data = L.Input(input_param={\"shape\": {\"dim\": list(data.shape)}})\n n.output = func(n.data, *args, **kwargs)\n return n\n\n\ndef _miso_op(data_list, func, *args, **kwargs):\n \"\"\"Create multi input and single output Caffe op\"\"\"\n n = caffe.NetSpec()\n if not isinstance(data_list, (tuple, list)):\n raise TypeError(\"Need tuple or list but get {}\".format(type(data_list)))\n input_list = list()\n for idx, data in enumerate(data_list):\n n[\"data\" + str(idx)] = L.Input(input_param={\"shape\": {\"dim\": list(data.shape)}})\n input_list.append(n[\"data\" + str(idx)])\n n.output = func(*input_list, *args, **kwargs)\n return n\n\n\ndef _simo_op(data, func, *args, **kwargs):\n \"\"\"Create single input and multi output Caffe op\"\"\"\n n = caffe.NetSpec()\n n.data = L.Input(input_param={\"shape\": {\"dim\": list(data.shape)}})\n output_list = func(n.data, *args, **kwargs)\n for idx, out in enumerate(output_list):\n n[\"output\" + str(idx)] = out\n return n\n\n\ndef _run_caffe(data, proto_file, blob_file):\n \"\"\"Run caffe model by Caffe according to .caffemodel and .prototxt\"\"\"\n net = caffe.Net(proto_file, blob_file, caffe.TEST)\n if isinstance(data, (list, tuple)):\n for idx, d in enumerate(data):\n net.blobs[\"data\" + str(idx)].data[...] = d\n else:\n net.blobs[\"data\"].data[...] = data\n out = net.forward()\n\n caffe_output = list()\n for i in range(len(out.keys())):\n if \"output\" + str(i) not in out.keys():\n caffe_output.clear()\n return list(out.values())\n caffe_output.append(out[\"output\" + str(i)])\n return caffe_output\n\n\ndef _run_tvm(data, proto_file, blob_file):\n \"\"\"Run caffe model by TVM according to .caffemodel and .prototxt\"\"\"\n init_net = pb.NetParameter()\n predict_net = pb.NetParameter()\n\n # load model\n with open(proto_file, \"r\") as f:\n text_format.Merge(f.read(), predict_net)\n # load blob\n with open(blob_file, \"rb\") as f:\n init_net.ParseFromString(f.read())\n\n shape_dict = dict()\n dtype_dict = dict()\n if isinstance(data, (tuple, list)):\n for idx, d in enumerate(data):\n shape_dict[\"data\" + str(idx)] = d.shape\n dtype_dict[\"data\" + str(idx)] = \"float32\"\n else:\n shape_dict = {\"data\": data.shape}\n dtype_dict = {\"data\": \"float32\"}\n\n mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)\n\n target = \"llvm\"\n\n dev = tvm.cpu(0)\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build(mod, target=target, params=params)\n dtype = \"float32\"\n m = graph_executor.GraphModule(lib[\"default\"](dev))\n if isinstance(data, (tuple, list)):\n for idx, d in enumerate(data):\n m.set_input(\"data\" + str(idx), tvm.nd.array(d.astype(dtype)))\n else:\n m.set_input(\"data\", tvm.nd.array(data.astype(dtype)))\n # execute\n m.run()\n tvm_output = list()\n # get outputs\n for i in range(m.get_num_outputs()):\n tvm_output.append(m.get_output(i).numpy())\n return tvm_output\n\n\ndef _compare_caffe_tvm(caffe_out, tvm_out, is_network=False):\n for i in range(len(caffe_out)):\n if is_network:\n caffe_out[i] = caffe_out[i][:1]\n tvm.testing.assert_allclose(caffe_out[i], tvm_out[i], rtol=1e-5, atol=1e-5)\n\n\ndef _test_op(data, func_op, op_name, **kwargs):\n \"\"\"Single op testing pipline.\"\"\"\n shape_list = list()\n if isinstance(data, (list, tuple)):\n n = _miso_op(data, func_op, **kwargs)\n for d in data:\n shape_list.extend(list(d.shape))\n else:\n output_num = 1\n if \"ntop\" in kwargs.keys():\n output_num = kwargs[\"ntop\"]\n if output_num == 1:\n n = _siso_op(data, func_op, **kwargs)\n else:\n n = _simo_op(data, func_op, **kwargs)\n shape_list = list(data.shape)\n\n # obtain the .caffemodel file and .prototxt file\n (proto_file, blob_file, solver_file) = _gen_filename_str(op_name, shape_list, **kwargs)\n _gen_model_files(n, proto_file, blob_file, solver_file)\n # run model in Caffe\n caffe_out = _run_caffe(data, proto_file, blob_file)\n # run model in TVM\n tvm_out = _run_tvm(data, proto_file, blob_file)\n _compare_caffe_tvm(caffe_out, tvm_out)\n\n\ndef _test_network(data, proto_file, blob_file):\n # run model in Caffe\n caffe_out = _run_caffe(data, proto_file, blob_file)\n # run model in TVM\n tvm_out = _run_tvm(data, proto_file, blob_file)\n _compare_caffe_tvm(caffe_out, tvm_out, is_network=True)\n\n\n#######################################################################\n# BatchNorm\n# -----------\n\n\ndef _test_batchnorm(data, moving_average_fraction=0.999, eps=1e-5):\n \"\"\"One iteration of BatchNorm\"\"\"\n _test_op(\n data, L.BatchNorm, \"BatchNorm\", moving_average_fraction=moving_average_fraction, eps=eps\n )\n\n\ndef test_forward_BatchNorm():\n \"\"\"BatchNorm\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_batchnorm(data)\n _test_batchnorm(data, moving_average_fraction=0.88, eps=1e-4)\n\n\n#######################################################################\n# Concat\n# -----------\n\n\ndef _test_concat(data_list, axis=1):\n \"\"\"One iteration of Concat\"\"\"\n _test_op(data_list, L.Concat, \"Concat\", axis=axis)\n\n\ndef test_forward_Concat():\n \"\"\"Concat\"\"\"\n _test_concat([np.random.rand(1, 3, 10, 10), np.random.rand(1, 2, 10, 10)], axis=1)\n _test_concat([np.random.rand(3, 10, 10), np.random.rand(2, 10, 10)], axis=0)\n _test_concat([np.random.rand(3, 10), np.random.rand(2, 10)], axis=0)\n\n\n#######################################################################\n# Convolution\n# -----------\n\n\ndef _test_convolution(data, **kwargs):\n \"\"\"One iteration of Convolution\"\"\"\n _test_op(data, L.Convolution, \"Convolution\", **kwargs)\n\n\ndef test_forward_Convolution():\n \"\"\"Convolution\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_convolution(\n data,\n num_output=20,\n bias_term=True,\n pad=0,\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_convolution(\n data,\n num_output=20,\n bias_term=False,\n pad=[1, 2],\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_convolution(\n data,\n num_output=20,\n bias_term=True,\n pad=[1, 2],\n kernel_size=[3, 5],\n stride=[2, 1],\n dilation=[1, 2],\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_convolution(\n np.random.rand(1, 2, 10, 10).astype(np.float32),\n num_output=20,\n bias_term=True,\n pad=[1, 2],\n kernel_size=[3, 5],\n stride=[2, 1],\n dilation=[1, 2],\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n group=2,\n )\n _test_convolution(\n data,\n num_output=20,\n bias_term=True,\n pad_h=1,\n pad_w=2,\n kernel_h=3,\n kernel_w=5,\n stride_h=2,\n stride_w=1,\n dilation=[1, 2],\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n\n\n#######################################################################\n# Crop\n# -----------\n\n\ndef _test_crop(data, **kwargs):\n \"\"\"One iteration of Crop\"\"\"\n _test_op(data, L.Crop, \"Crop\", **kwargs)\n\n\ndef test_forward_Crop():\n \"\"\"Crop\"\"\"\n _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)])\n _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1)\n _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2)\n _test_crop(\n [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=[1, 2, 4]\n )\n _test_crop(\n [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=2, offset=[2, 4]\n )\n _test_crop([np.random.rand(10, 120, 120), np.random.rand(5, 50, 60)], axis=1, offset=[2, 4])\n _test_crop([np.random.rand(120, 120), np.random.rand(50, 60)], axis=0, offset=[2, 4])\n\n\n#######################################################################\n# Deconvolution\n# -----------\n\n\ndef _test_deconvolution(data, **kwargs):\n \"\"\"One iteration of Deconvolution\"\"\"\n _test_op(data, L.Deconvolution, \"Deconvolution\", **kwargs)\n\n\ndef test_forward_Deconvolution():\n \"\"\"Deconvolution\"\"\"\n data = np.random.rand(1, 16, 32, 32).astype(np.float32)\n _test_deconvolution(\n data,\n convolution_param=dict(\n num_output=20,\n bias_term=True,\n pad=0,\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n ),\n )\n _test_deconvolution(\n data,\n convolution_param=dict(\n num_output=20,\n bias_term=False,\n pad=[1, 2],\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n ),\n )\n _test_deconvolution(\n data,\n convolution_param=dict(\n num_output=20,\n bias_term=True,\n pad_h=1,\n pad_w=2,\n kernel_h=3,\n kernel_w=5,\n stride_h=2,\n stride_w=1,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n ),\n )\n\n\n#######################################################################\n# Dropout\n# -----------\n\n\ndef _test_dropout(data, **kwargs):\n \"\"\"One iteration of Dropout\"\"\"\n _test_op(data, L.Dropout, \"Dropout\", **kwargs)\n\n\ndef test_forward_Dropout():\n \"\"\"Dropout\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_dropout(data)\n _test_dropout(data, dropout_ratio=0.7)\n\n\n#######################################################################\n# Eltwise\n# -----------\n\n\ndef _test_eltwise(data_list, **kwargs):\n \"\"\"One iteration of Eltwise\"\"\"\n _test_op(data_list, L.Eltwise, \"Eltwise\", **kwargs)\n\n\ndef test_forward_Eltwise():\n \"\"\"Eltwise\"\"\"\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=0,\n )\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=1,\n )\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=2,\n )\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=1,\n coeff=[0.5, 1],\n )\n\n\n#######################################################################\n# Flatten\n# -----------\n\n\ndef _test_flatten(data, axis=1):\n \"\"\"One iteration of Flatten\"\"\"\n _test_op(data, L.Flatten, \"Flatten\", axis=axis)\n\n\ndef test_forward_Flatten():\n \"\"\"Flatten\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_flatten(data)\n _test_flatten(data, axis=1)\n\n\n#######################################################################\n# Flatten\n# -----------\n\n\ndef _test_inner_product(data, **kwargs):\n \"\"\"One iteration of InnerProduct\"\"\"\n _test_op(data, L.InnerProduct, \"InnerProduct\", **kwargs)\n\n\ndef test_forward_InnerProduct():\n \"\"\"InnerProduct\"\"\"\n data = np.random.rand(1, 3, 10, 10)\n _test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type=\"xavier\"))\n _test_inner_product(\n data,\n num_output=20,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_inner_product(\n np.random.rand(20, 10).astype(np.float32),\n num_output=30,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n\n\n#######################################################################\n# LRN\n# -----------\n\n\ndef _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0):\n \"\"\"One iteration of LRN\"\"\"\n _test_op(data, L.LRN, \"LRN\", local_size=local_size, alpha=alpha, beta=beta, k=k)\n\n\ndef test_forward_LRN():\n \"\"\"LRN\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_lrn(data)\n _test_lrn(data, local_size=3)\n _test_lrn(data, local_size=3, alpha=2.0)\n _test_lrn(\n data,\n local_size=3,\n alpha=2.0,\n beta=0.5,\n )\n _test_lrn(data, local_size=3, alpha=2.0, beta=0.5, k=2.0)\n\n\n#######################################################################\n# Pooling\n# -----------\n\n\ndef _test_pooling(data, **kwargs):\n \"\"\"One iteration of Pooling.\"\"\"\n _test_op(data, L.Pooling, \"Pooling\", **kwargs)\n\n\ndef test_forward_Pooling():\n \"\"\"Pooing\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n # MAX Pooling\n _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX)\n _test_pooling(\n data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.MAX\n )\n _test_pooling(data, pool=P.Pooling.MAX, global_pooling=True)\n\n # AVE Pooing\n _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.AVE)\n _test_pooling(\n data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.AVE\n )\n _test_pooling(data, pool=P.Pooling.AVE, global_pooling=True)\n\n\n#######################################################################\n# PReLU\n# -----------\n\n\ndef _test_prelu(data, **kwargs):\n \"\"\"One iteration of PReLU.\"\"\"\n _test_op(data, L.PReLU, \"PReLU\", **kwargs)\n\n\ndef test_forward_PReLU():\n \"\"\"PReLU\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_prelu(data, filler=dict(type=\"constant\", value=0.5))\n _test_prelu(data)\n _test_prelu(np.random.rand(10, 20).astype(np.float32))\n\n\n#######################################################################\n# ReLU\n# -----------\n\n\ndef _test_relu(data, **kwargs):\n \"\"\"One iteration of ReLU.\"\"\"\n _test_op(data, L.ReLU, \"ReLU\", **kwargs)\n\n\ndef test_forward_ReLU():\n \"\"\"ReLU\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_relu(data)\n _test_relu(np.random.rand(10, 20).astype(np.float32))\n\n\n#######################################################################\n# Reshape\n# -----------\n\n\ndef _test_reshape(data, **kwargs):\n \"\"\"One iteration of Reshape.\"\"\"\n _test_op(data, L.Reshape, \"Reshape\", **kwargs)\n\n\ndef test_forward_Reshape():\n \"\"\"Reshape\"\"\"\n data = np.random.rand(1, 8, 6).astype(np.float32)\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 0, 3]}})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 0, -1]}})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [0, -1]}})\n\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 3]}, \"axis\": 2})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}, \"axis\": 1})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}, \"axis\": -3})\n\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 4]}, \"axis\": 1, \"num_axes\": 1})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [3, 16]}, \"axis\": 1, \"num_axes\": 2})\n\n\n#######################################################################\n# Scale\n# -----------\n\n\ndef _test_scale(data, **kwargs):\n \"\"\"One iteration of Scale.\"\"\"\n _test_op(data, L.Scale, \"Scale\", **kwargs)\n\n\ndef test_forward_Scale():\n \"\"\"Scale\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_scale(data, filler=dict(type=\"xavier\"))\n _test_scale(data, filler=dict(type=\"xavier\"), bias_term=True, bias_filler=dict(type=\"xavier\"))\n\n\n#######################################################################\n# Sigmoid\n# -----------\n\n\ndef _test_sigmoid(data, **kwargs):\n \"\"\"One iteration of Sigmoid.\"\"\"\n _test_op(data, L.Sigmoid, \"Sigmoid\", **kwargs)\n\n\ndef test_forward_Sigmoid():\n \"\"\"Sigmoid\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_sigmoid(data)\n\n\n#######################################################################\n# Slice\n# -----------\n\n\ndef _test_slice(data, **kwargs):\n \"\"\"One iteration of Slice\"\"\"\n _test_op(data, L.Slice, \"Slice\", **kwargs)\n\n\ndef test_forward_Slice():\n \"\"\"Slice\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1]))\n _test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1]))\n _test_slice(data, ntop=3, slice_param=dict(axis=2, slice_point=[1, 6]))\n _test_slice(data, ntop=3)\n\n\n#######################################################################\n# Softmax\n# -----------\n\n\ndef _test_softmax(data, **kwargs):\n \"\"\"One iteration of Softmax\"\"\"\n _test_op(data, L.Softmax, \"Softmax\", **kwargs)\n\n\ndef test_forward_Softmax():\n \"\"\"Softmax\"\"\"\n _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32))\n _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2)\n _test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0)\n _test_softmax(np.random.rand(2, 10, 10).astype(np.float32), axis=1)\n\n\n#######################################################################\n# TanH\n# -----------\n\n\ndef _test_tanh(data, **kwargs):\n \"\"\"One iteration of TanH\"\"\"\n _test_op(data, L.TanH, \"TanH\", **kwargs)\n\n\ndef test_forward_TanH():\n \"\"\"TanH\"\"\"\n _test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32))\n _test_tanh(np.random.rand(3, 10, 10).astype(np.float32))\n _test_tanh(np.random.rand(10, 10).astype(np.float32))\n _test_tanh(np.random.rand(10).astype(np.float32))\n\n\n#######################################################################\n# Embed\n# -----------\n\n\ndef _test_embed(data, **kwargs):\n \"\"\"One iteration of Embed\"\"\"\n _test_op(data, L.Embed, \"Embed\", **kwargs)\n\n\ndef test_forward_Embed():\n k = 20\n data = [i for i in range(k)]\n np.random.shuffle(data)\n # dimension is 1\n data = np.asarray(data)\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n # dimension is 2\n data = np.reshape(data, [4, 5])\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n # dimension is 3\n data = np.reshape(data, [2, 2, 5])\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n # dimension is 4\n data = np.reshape(data, [2, 2, 5, 1])\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n\n\n#######################################################################\n# Mobilenetv2\n# -----------\n\n\ndef _test_mobilenetv2(data):\n \"\"\"One iteration of Mobilenetv2\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 224, 224))\n data_process = data - mean_val\n data_process = data_process / 58.8\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/shicai/MobileNet-Caffe/raw/\" \"master/mobilenet_v2_deploy.prototxt\"\n )\n blob_file_url = (\n \"https://github.com/shicai/MobileNet-Caffe/blob/\" \"master/mobilenet_v2.caffemodel?raw=true\"\n )\n proto_file = download_testdata(proto_file_url, \"mobilenetv2.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"mobilenetv2.caffemodel\", module=\"model\")\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Mobilenetv2():\n \"\"\"Mobilenetv2\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)\n _test_mobilenetv2(data)\n\n\n#######################################################################\n# Alexnet\n# -----------\n\n\ndef _test_alexnet(data):\n \"\"\"One iteration of Alexnet\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 227, 227))\n data_process = data - mean_val\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/BVLC/caffe/raw/master/models/\" \"bvlc_alexnet/deploy.prototxt\"\n )\n blob_file_url = \"http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel\"\n proto_file = download_testdata(proto_file_url, \"alexnet.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"alexnet.caffemodel\", module=\"model\")\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Alexnet():\n \"\"\"Alexnet\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 227, 227)).astype(np.float32)\n _test_alexnet(data)\n\n\n#######################################################################\n# Resnet50\n# -----------\n\n\ndef _test_resnet50(data):\n \"\"\"One iteration of Resnet50\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 224, 224))\n data_process = data - mean_val\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/fernchen/CaffeModels/raw/\" \"master/resnet/ResNet-50-deploy.prototxt\"\n )\n blob_file_url = (\n \"https://github.com/fernchen/CaffeModels/raw/\" \"master/resnet/ResNet-50-model.caffemodel\"\n )\n\n proto_file = download_testdata(proto_file_url, \"resnet50.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"resnet50.caffemodel\", module=\"model\")\n\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Resnet50():\n \"\"\"Resnet50\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)\n _test_resnet50(data)\n\n\n#######################################################################\n# Inceptionv4\n# -----------\n\n\ndef _test_inceptionv1(data):\n \"\"\"One iteration of Inceptionv4\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 224, 224))\n data_process = data - mean_val\n data_process = data_process / 58.8\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/BVLC/caffe/raw/master/models\" \"/bvlc_googlenet/deploy.prototxt\"\n )\n blob_file_url = \"http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel\"\n proto_file = download_testdata(proto_file_url, \"inceptionv1.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"inceptionv1.caffemodel\", module=\"model\")\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Inceptionv1():\n \"\"\"Inceptionv4\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)\n _test_inceptionv1(data)\n\n\nif __name__ == \"__main__\":\n # NN\n test_forward_Convolution()\n test_forward_Deconvolution()\n test_forward_Dropout()\n test_forward_LRN()\n test_forward_Pooling()\n test_forward_Scale()\n test_forward_InnerProduct()\n test_forward_BatchNorm()\n\n # Elemwise\n test_forward_Eltwise()\n\n # Activation\n test_forward_PReLU()\n test_forward_ReLU()\n test_forward_Sigmoid()\n test_forward_Softmax()\n test_forward_TanH()\n\n # Reshape\n test_forward_Reshape()\n test_forward_Flatten()\n\n # Math\n test_forward_Concat()\n test_forward_Crop()\n test_forward_Slice()\n\n # End to End\n test_forward_Mobilenetv2()\n test_forward_Alexnet()\n test_forward_Resnet50()\n test_forward_Inceptionv1()\n"
] | [
[
"numpy.tile",
"numpy.random.shuffle",
"numpy.reshape",
"numpy.asarray",
"numpy.random.rand",
"numpy.array",
"numpy.random.randint"
]
] |
MasterMedo/typetest | [
"7d573c6bbf0d07ffd3b2fb4a8ee9ce783df2ac26"
] | [
"typetest/analyse/typing_speed_per_char.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom io import StringIO\nfrom collections import deque\n\nfrom typetest.utils import validate_input_file_path\n\n\n@validate_input_file_path\ndef plot(input_file, size=10000, filter_func=lambda c: True):\n \"\"\"Reads last `size` lines of `input_file` and groups them by characters.\n Removes lowest and highest 10% and boxplots the data.\n\n filter_func: function taking a `char` returning `True` if char should be\n plotted, `False` otherwise. By default plots all characters.\n \"\"\"\n with open(input_file) as f:\n q = deque(f, maxlen=size)\n\n data_frame = pd.read_csv(\n StringIO(\"\".join(q)),\n header=None,\n names=[\"char\", \"duration\", \"wpm\", \"timestamp\"],\n )\n\n grouped_data_frames = filter(\n lambda t: filter_func(t[1][\"char\"].iloc[0]),\n data_frame.groupby(\"char\"),\n )\n\n typing_speeds_in_wpm = []\n chars = []\n means = []\n for char, df in grouped_data_frames:\n if filter_func(char):\n q1 = df[\"wpm\"].quantile(0.1) # noqa\n q3 = df[\"wpm\"].quantile(0.9) # noqa\n typing_speed_in_wpm = df.query(\"@q1 <= wpm <= @q3\")[\"wpm\"]\n chars.append(char)\n typing_speeds_in_wpm.append(typing_speed_in_wpm)\n mean = typing_speed_in_wpm.mean()\n means.append(mean if mean > 0 else 0)\n\n fig, ax = plt.subplots()\n\n ax.boxplot(typing_speeds_in_wpm, labels=chars)\n mean = round(sum(means) / len(means))\n ax.axhline(y=mean, color=\"r\", linestyle=\"-\", label=f\"mean {mean} wpm\")\n\n ax.set_title(f\"typing speed per character of last {size} characters\")\n ax.set_xlabel(\"characters\")\n ax.set_ylabel(\"typing speed [wpm]\")\n ax.legend()\n\n ticks = plt.yticks()[0]\n plt.yticks(np.arange(0, ticks[-1], 10))\n\n plt.show()\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplots"
]
] |
benjeffery/tsdate | [
"93c3dabdeb857a351bf994fc56bf5b8d18bb830d"
] | [
"tests/utility_functions.py"
] | [
"# MIT License\n#\n# Copyright (C) 2020 University of Oxford\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nA collection of utilities to edit and construct tree sequences for testing purposes\n\"\"\"\n\nimport msprime\nimport numpy as np\nimport tskit\nimport io\n\n\ndef add_grand_mrca(ts):\n \"\"\"\n Function to add a grand mrca node to a tree sequence\n \"\"\"\n grand_mrca = ts.max_root_time + 1\n tables = ts.dump_tables()\n new_node_number = tables.nodes.add_row(time=grand_mrca)\n for tree in ts.trees():\n tables.edges.add_row(\n tree.interval[0], tree.interval[1], new_node_number, tree.root)\n tables.sort()\n return tables.tree_sequence()\n\n\ndef single_tree_ts_n2():\n r\"\"\"\n Simple case where we have n = 2 and one tree. [] marks a sample\n 2\n / \\\n [0] [1]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 0 1\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 2 0,1\n \"\"\")\n return(tskit.load_text(nodes=nodes, edges=edges, strict=False))\n\n\ndef single_tree_ts_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n 4\n / \\\n 3 \\\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_n4():\n r\"\"\"\n Simple case where we have n = 4 and one tree.\n 6\n / \\\n 5 \\\n / \\ \\\n 4 \\ \\\n / \\ \\ \\\n [0] [1] [2] [3]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 0\n 4 0 1\n 5 0 2\n 6 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 4 0,1\n 0 1 5 2,4\n 0 1 6 3,5\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_mutation_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n 4\n / \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef site_no_mutations():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n The single site has no derived alleles.\n 4\n / \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites, strict=False)\n\n\ndef single_tree_all_samples_one_mutation_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n 4\n / \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 1\n 4 1 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef gils_example_tree():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Mutations marked on each branch by *.\n 4\n / \\\n / \\\n / *\n 3 *\n / \\ *\n * * *\n * \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.1 0\n 0.2 0\n 0.3 0\n 0.4 0\n 0.5 0\n 0.6 0\n 0.7 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 0 1\n 1 0 1\n 2 1 1\n 3 2 1\n 4 2 1\n 5 2 1\n 6 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef polytomy_tree_ts():\n r\"\"\"\n Simple case where we have n = 3 and a polytomy.\n 3\n /|\\\n / | \\\n [0][1][2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1,2\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_internal_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Node 3 is an internal sample.\n 4\n / \\\n 3 \\\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts():\n r\"\"\"\n Simple case where we have n = 3 and 2 trees.\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n 3 \\ . | | \\\n / \\ \\ . | | \\\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 0.2 3 0,1\n 0 1 4 2\n 0 0.2 4 3\n 0.2 1 4 1\n 0.2 1 5 0,4\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_extra_length():\n r\"\"\"\n Simple case where we have n = 3 and 2 trees, but with extra length\n for testing keep_intervals() and delete_intervals().\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n 3 \\ . | | \\\n / \\ \\ . | | \\\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 0.2 3 0,1\n 0 1.5 4 2\n 0 0.2 4 3\n 0.2 1.5 4 1\n 0.2 1.5 5 0,4\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_n3_non_contemporaneous():\n r\"\"\"\n Simple case where we have n = 3 and two trees with node 2 ancient.\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n 3 [2] . | |[2]\n / \\ . | |\n [0] [1] . [0] [1]\n \"\"\"\n ts = two_tree_ts()\n tables = ts.dump_tables()\n time = tables.nodes.time\n time[2] = time[3]\n tables.nodes.time = time\n return tables.tree_sequence()\n\n\ndef single_tree_ts_with_unary():\n r\"\"\"\n Simple case where we have n = 3 and some unary nodes.\n 7\n / \\\n 5 \\\n | \\\n 4 6\n | |\n 3 |\n / \\ |\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n 6 0 2\n 7 0 4\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 6 2\n 0 1 4 3\n 0 1 5 4\n 0 1 7 5,6\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_with_unary_n3():\n r\"\"\"\n Simple case where we have n = 3 and node 5 is an internal, unary node in the first\n tree. In the second tree, node t is the root, but still unary.\n 6 . 5\n / \\ . |\n 4 5 . 4\n | | . / \\\n 3 | . 3 \\\n / \\ | . / \\ \\\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n 6 0 4\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 2 3 0,1\n 0 1 5 2\n 0 2 4 3\n 0 1 6 4,5\n 1 2 4 2\n 1 2 5 4\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_mutation_ts():\n r\"\"\"\n Simple case where we have n = 3, 2 trees, three mutations.\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n x \\ . | | \\\n x \\ . x | \\\n / | . | | |\n 3 | . | | |\n / \\ | . | | |\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 0.2 3 0,1\n 0 1 4 2\n 0 0.2 4 3\n 0.2 1 4 1\n 0.2 1 5 0,4\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.1 0\n 0.15 0\n 0.8 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 3 1\n 1 3 1\n 2 0 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef two_tree_two_mrcas():\n r\"\"\"\n Simple case where we have n = 4, 2 trees, one mutation.\n 6 |\n / \\ | 7\n / \\ | / \\\n / \\ | / x\n / \\ | / \\\n / \\ | / \\\n 4 5 | 4 5\n / \\ / \\ | / \\ / \\\n / \\ / \\ | / \\ / \\\n [0] [1] [2] [3] | [0] [1] [2] [3]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 0\n 4 0 1\n 5 0 1\n 6 0 3\n 7 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 4 0,1\n 0 1 5 2,3\n 0 0.3 6 4\n 0 0.3 6 5\n 0.3 1 7 4\n 0.3 1 7 5\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 5 1\n \"\"\")\n\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef loopy_tree():\n r\"\"\"\n Simple case where we have n = 3, 2 trees, three mutations.\n . 7\n . / \\\n . / |\n . / |\n 6 . / 6\n / \\ . / / \\\n / 5 . / / |\n / / \\ . / / |\n / | \\ . | | |\n / | \\ . | | |\n | 4 | . | 4 |\n | / \\ | . | / \\ |\n [0] [1] [2] [3] . [0] [1] [2] [3]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 0\n 4 0 1\n 5 0 2\n 6 0 3\n 7 0 4\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 4 0,1\n 0 0.2 5 2,4\n 0 0.2 6 5\n 0 1 6 3\n 0.2 1 6 4\n 0.2 1 7 2\n 0.2 1 7 6\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_n3_sample_as_parent():\n r\"\"\"\n Simple case where we have n = 3 and one tree. Node 3 is a sample.\n 4\n / \\\n 3 \\\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_n2_dangling():\n r\"\"\"\n Simple case where we have n = 2 and one tree. Node 0 is dangling.\n 4\n / \\\n 3 \\\n / \\ \\\n 0 [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 0 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_n2_part_dangling():\n r\"\"\"\n Simple case where we have n = 2 and two trees. Node 0 is dangling in the first tree.\n 4 4\n / \\ / \\\n 3 \\ 3 \\\n / \\ \\ \\ \\\n 0 \\ \\ 0 \\\n \\ \\ \\ \\\n [1] [2] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 0 0.5\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0\n 0 0.5 3 1\n 0.5 1 0 1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_2mutations_multiallelic_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Site is multiallelic.\n 4\n x \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 2 1\n 0 3 2\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef single_tree_ts_2mutations_singletons_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Site has two singleton mutations.\n 4\n / \\\n 3 x\n / x \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 1 1\n 0 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef single_tree_ts_2mutations_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Site has two mutations with different times.\n 4\n x \\\n 3 \\\n / x \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 3 1\n 0 1 0\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef ts_w_data_desert(gap_start, gap_end, length):\n \"\"\"\n Inside/Outside algorithm has been observed to give overflow/underflow when\n attempting to date tree sequences with large regions without data. Test\n that preprocess_ts removes regions of a specified size that have no data.\n \"\"\"\n ts = msprime.simulate(\n 100, mutation_rate=10, recombination_rate=1, length=length)\n tables = ts.dump_tables()\n sites = tables.sites.position[:]\n tables.delete_sites(np.where(np.logical_and(sites > gap_start, sites < gap_end))[0])\n deleted_ts = tables.tree_sequence()\n return deleted_ts\n\n\ndef truncate_ts_samples(ts, average_span, random_seed, min_span=5):\n \"\"\"\n Create a tree sequence that has sample nodes which have been truncated\n so that they span only a small region of the genome. The length of the\n truncated spans is given by a poisson distribution whose mean is average_span\n but which cannot go below a fixed min_span, or above the sequence_length\n\n Samples are truncated by removing the edges that connect them to the rest\n of the tree.\n \"\"\"\n\n np.random.seed(random_seed)\n # Make a list of (left,right) tuples giving the new limits of each sample\n # Keyed by sample ID.\n # for simplicity, we pick lengths from a poisson distribution of av 300 bp\n span = np.random.poisson(average_span, ts.num_samples)\n span = np.maximum(span, min_span)\n span = np.minimum(span, ts.sequence_length)\n start = np.random.uniform(0, ts.sequence_length-span)\n to_slice = {id: (a, b) for id, a, b in zip(ts.samples(), start, start + span)}\n\n tables = ts.dump_tables()\n tables.edges.clear()\n for e in ts.tables.edges:\n if e.child not in to_slice:\n left, right = e.left, e.right\n else:\n if e.right <= to_slice[e.child][0] or e.left >= to_slice[e.child][1]:\n continue # this edge is outside the focal region\n else:\n left = max(e.left, to_slice[e.child][0])\n right = min(e.right, to_slice[e.child][1])\n tables.edges.add_row(left, right, e.parent, e.child)\n # Remove mutations above isolated nodes\n mutations = tables.mutations\n keep_mutations = np.ones((mutations.num_rows, ), dtype=bool)\n positions = tables.sites.position[:]\n for i, m in enumerate(mutations):\n if m.node in to_slice:\n if not(to_slice[m.node][0] <= positions[m.site] < to_slice[m.node][1]):\n keep_mutations[i] = False\n new_ds, new_ds_offset = tskit.tables.keep_with_offset(\n keep_mutations, mutations.derived_state, mutations.derived_state_offset)\n new_md, new_md_offset = tskit.tables.keep_with_offset(\n keep_mutations, mutations.metadata, mutations.metadata_offset)\n mutations_map = np.append(np.cumsum(keep_mutations) - 1, [-1])\n mutations_map = mutations_map.astype(mutations.parent.dtype)\n # parent -1 always maps to parent -1\n tables.mutations.set_columns(\n site=mutations.site[keep_mutations],\n node=mutations.node[keep_mutations],\n derived_state=new_ds,\n derived_state_offset=new_ds_offset,\n parent=mutations_map[mutations.parent[keep_mutations]],\n metadata=new_md,\n metadata_offset=new_md_offset)\n return tables.tree_sequence().simplify(\n filter_populations=False,\n filter_individuals=False,\n filter_sites=False,\n keep_unary=True)\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"numpy.cumsum",
"numpy.logical_and",
"numpy.random.seed",
"numpy.random.poisson",
"numpy.maximum",
"numpy.minimum"
]
] |
xjarvik/onnxmltools | [
"e4fbdc09814ceedc7655d85b6c4203ca21d8433a"
] | [
"tests/sparkml/test_decision_tree_classifier.py"
] | [
"# SPDX-License-Identifier: Apache-2.0\n\nimport sys\nimport inspect\nimport unittest\nfrom distutils.version import StrictVersion\n\nimport onnx\nimport pandas\nimport numpy\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.classification import DecisionTreeClassifier\nfrom pyspark.ml.linalg import VectorUDT, SparseVector, Vectors\n\nfrom onnxmltools import convert_sparkml\nfrom onnxmltools.convert.common.data_types import StringTensorType, FloatTensorType\nfrom tests.sparkml.sparkml_test_utils import save_data_models, compare_results, run_onnx_model\nfrom tests.sparkml import SparkMlTestCase\nfrom pyspark.ml.feature import StringIndexer, VectorIndexer\n\n\nclass TestSparkmDecisionTreeClassifier(SparkMlTestCase):\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n @unittest.skipIf(StrictVersion(onnx.__version__) <= StrictVersion('1.3'), 'Need Greater Opset 9')\n def test_tree_pipeline(self):\n import os\n this_script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n input_path = os.path.join(this_script_dir, \"data\", \"sample_libsvm_data.txt\")\n original_data = self.spark.read.format(\"libsvm\").load(input_path)\n #\n # truncate the features\n #\n feature_count = 5\n self.spark.udf.register(\"truncateFeatures\",\n lambda x: SparseVector(feature_count, range(0,feature_count), x.toArray()[125:130]),\n VectorUDT())\n data = original_data.selectExpr(\"cast(label as string) as label\", \"truncateFeatures(features) as features\")\n label_indexer = StringIndexer(inputCol=\"label\", outputCol=\"indexedLabel\", handleInvalid='error')\n feature_indexer = VectorIndexer(inputCol=\"features\", outputCol=\"indexedFeatures\",\n maxCategories=10, handleInvalid='error')\n\n dt = DecisionTreeClassifier(labelCol=\"indexedLabel\", featuresCol=\"indexedFeatures\")\n pipeline = Pipeline(stages=[label_indexer, feature_indexer, dt])\n model = pipeline.fit(data)\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Pipeline', [\n ('label', StringTensorType([1, 1])),\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n self.assertTrue(model_onnx is not None)\n # run the model\n predicted = model.transform(data.limit(1))\n data_np = {\n 'label': data.limit(1).toPandas().label.values,\n 'features': data.limit(1).toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n }\n expected = [\n predicted.toPandas().indexedLabel.values.astype(numpy.int64),\n predicted.toPandas().prediction.values.astype(numpy.int64),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreePipeline\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['indexedLabel', 'prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n def test_tree_one_class_classification(self):\n features = [[0., 1.], [1., 1.], [2., 0.]]\n features = numpy.array(features, dtype=numpy.float32)\n labels = [1, 1, 1]\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\n dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")\n model = dt.fit(data)\n feature_count = 1\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree One Class', [\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n predicted = model.transform(data)\n expected = [\n predicted.toPandas().prediction.values.astype(numpy.float32),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreeBinaryClass\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n def test_tree_binary_classification(self):\n features = [[0, 1], [1, 1], [2, 0]]\n features = numpy.array(features, dtype=numpy.float32)\n labels = [0, 1, 0]\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\n dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")\n model = dt.fit(data)\n feature_count = 2\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Binary Class', [\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n predicted = model.transform(data)\n expected = [\n predicted.toPandas().prediction.values.astype(numpy.float32),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreeBinaryClass\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n def test_tree_multiple_classification(self):\n features = [[0, 1], [1, 1], [2, 0], [0.5, 0.5], [1.1, 1.1], [2.1, 0.1]]\n features = numpy.array(features, dtype=numpy.float32)\n labels = [0, 1, 2, 1, 1, 2]\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\n dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")\n model = dt.fit(data)\n feature_count = 2\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Multi Class', [\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n predicted = model.transform(data)\n expected = [\n predicted.toPandas().prediction.values.astype(numpy.float32),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreeMultiClass\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.