repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
apurvak/tapas | [
"2987658c3b65c5ab6e698d6c57823dc30d3d0f96",
"7884280be78d2f58ad9c125504d710ef89f49f9a"
] | [
"tapas/experiments/table_retriever_experiment.py",
"tapas/utils/create_data_test.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Table retriever experiment.\"\"\"\n\nimport csv\nimport functools\nimport os\nimport traceback\nfrom typing import Text, Optional\n\nfrom absl import app\nfrom absl import flags\nfrom tapas.models import table_retriever_model\nfrom tapas.scripts import eval_table_retriever_utils\nfrom tapas.utils import experiment_utils # pylint: disable=unused-import\nimport tensorflow.compat.v1 as tf\n\ntf.disable_v2_behavior()\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"data_format\", \"tfrecord\", \"The input data format.\")\n\nflags.DEFINE_multi_string(\n \"input_file_train\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_multi_string(\n \"input_file_eval\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_multi_string(\n \"input_file_predict\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"prediction_output_dir\", None,\n \"If not none or empty writes predictions to this directory. Otherwise \"\n \"writes predictions to model_dir.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded. Must match data generation.\")\n\nflags.DEFINE_integer(\"minutes_to_sleep_before_predictions\", 5,\n \"Time in minutes to sleep before starting to predict.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_string(\n \"eval_name\", \"default\",\n \"Name of the current evaluation set. Will be used in Tensorboard.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_float(\n \"grad_clipping\", None, \"If not None, gradients greater in absolute value\"\n \"than this number are clipped.\")\n\nflags.DEFINE_integer(\n \"down_projection_dim\", 0, \"Representation dimension of the query/table\"\n \"after down projection. If smaller than 1, no projection occurs.\")\n\nflags.DEFINE_bool(\n \"init_from_single_encoder\", True, \"If true, expects to load\"\n \"a checkpoint of a single encoder, that would be used to\"\n \"initialize both encoders.\")\n\nflags.DEFINE_integer(\"max_query_length\", 128,\n \"The query is capped to this length.\")\n\nflags.DEFINE_string(\n \"compression_type\",\n \"\",\n \"Compression to use when reading tfrecords. '' for no compression.\",\n)\n\nflags.DEFINE_integer(\n \"evaluated_checkpoint_step\", None,\n \"The step for a specific model checkpoint to be evaluated. If None, then\"\n \"all checkpoints are used.\")\n\nflags.DEFINE_string(\n \"evaluated_checkpoint_metric\", None,\n \"The metric used to chose a model checkpoint to be evaluated. If None, then\"\n \"all checkpoints are used.\")\n\nflags.DEFINE_bool(\"use_out_of_core_negatives\", False,\n \"If true, use all the negatives when\"\n \"using many TPU cores.\")\n\nflags.DEFINE_bool(\"mask_repeated_tables\", False,\n \"If true, mask tables that are repeated within a batch.\")\n\nflags.DEFINE_bool(\"mask_repeated_questions\", False,\n \"If true, mask questions that are repeated within a batch.\")\n\nflags.DEFINE_bool(\n \"ignore_table_content\", False,\n \"If true, use only the table headers to represent the table.\")\n\nflags.DEFINE_bool(\n \"use_mined_negatives\", False,\n \"If true, use mined negatives that should be given as\"\n \"additional table features.\")\n\nflags.DEFINE_list(\"disabled_features\", [],\n \"Features that should be disabled (for ablation studies).\")\n\n\ndef _get_test_input_fn(name, input_file):\n \"\"\"Gets input_fn for eval/predict modes.\"\"\"\n if input_file is None:\n return None\n input_fn = functools.partial(\n table_retriever_model.input_fn,\n name=name,\n file_patterns=input_file,\n data_format=FLAGS.data_format,\n is_training=False,\n max_seq_length=FLAGS.max_seq_length,\n compression_type=FLAGS.compression_type,\n use_mined_negatives=FLAGS.use_mined_negatives,\n include_id=True)\n return input_fn\n\n\ndef _predict_and_export_metrics(\n mode,\n input_fn,\n checkpoint_path,\n step,\n estimator,\n output_dir,\n):\n \"\"\"Exports model predictions and calculates precision@k.\"\"\"\n tf.logging.info(\"Running predictor for step %d.\", step)\n result = estimator.predict(input_fn=input_fn, checkpoint_path=checkpoint_path)\n output_predict_file = os.path.join(output_dir, f\"{mode}_results_{step}.tsv\")\n write_predictions(result, output_predict_file)\n\n # Compute precision@k.\n if (not FLAGS.evaluated_checkpoint_step or\n not FLAGS.evaluated_checkpoint_metric):\n p_at_k = eval_table_retriever_utils.eval_precision_at_k(\n query_prediction_files=output_predict_file,\n table_prediction_files=output_predict_file,\n make_tables_unique=True)\n experiment_utils.save_metrics(output_dir, mode, step, p_at_k)\n\n\ndef write_predictions(predictions,\n output_predict_file):\n \"\"\"Writes predictions to an output TSV file.\n\n Predictions header: [query_id, query_rep, table_id, table_rep]\n Args:\n predictions: model predictions\n output_predict_file: Path for wrinting the predicitons.\n \"\"\"\n with tf.io.gfile.GFile(output_predict_file, \"w\") as write_file:\n header = [\n \"query_id\",\n \"query_rep\",\n \"table_id\",\n \"table_rep\",\n ]\n writer = csv.DictWriter(write_file, fieldnames=header, delimiter=\"\\t\")\n writer.writeheader()\n\n for prediction in predictions:\n query_id = prediction[\"query_id\"]\n table_id = prediction[\"table_id\"]\n query_rep = prediction[\"query_rep\"]\n table_rep = prediction[\"table_rep\"]\n\n prediction_to_write = {\n \"query_id\": query_id[0].decode(\"utf-8\"),\n \"query_rep\": query_rep.tolist(),\n \"table_id\": table_id[0].decode(\"utf-8\"),\n \"table_rep\": table_rep.tolist(),\n }\n writer.writerow(prediction_to_write)\n\n\ndef main(_):\n bert_config = experiment_utils.bert_config_from_flags()\n total_steps = experiment_utils.num_train_steps()\n retriever_config = table_retriever_model.RetrieverConfig(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=total_steps,\n num_warmup_steps=experiment_utils.num_warmup_steps(),\n use_tpu=FLAGS.use_tpu,\n grad_clipping=FLAGS.grad_clipping,\n down_projection_dim=FLAGS.down_projection_dim,\n init_from_single_encoder=FLAGS.init_from_single_encoder,\n max_query_length=FLAGS.max_query_length,\n mask_repeated_tables=FLAGS.mask_repeated_tables,\n mask_repeated_questions=FLAGS.mask_repeated_questions,\n use_out_of_core_negatives=FLAGS.use_out_of_core_negatives,\n ignore_table_content=FLAGS.ignore_table_content,\n disabled_features=FLAGS.disabled_features,\n use_mined_negatives=FLAGS.use_mined_negatives,\n )\n\n model_fn = table_retriever_model.model_fn_builder(retriever_config)\n estimator = experiment_utils.build_estimator(model_fn)\n\n if FLAGS.do_train:\n tf.io.gfile.makedirs(FLAGS.model_dir)\n bert_config.to_json_file(os.path.join(FLAGS.model_dir, \"bert_config.json\"))\n retriever_config.to_json_file(\n os.path.join(FLAGS.model_dir, \"tapas_config.json\"))\n train_input_fn = functools.partial(\n table_retriever_model.input_fn,\n name=\"train\",\n file_patterns=FLAGS.input_file_train,\n data_format=FLAGS.data_format,\n is_training=True,\n max_seq_length=FLAGS.max_seq_length,\n compression_type=FLAGS.compression_type,\n use_mined_negatives=FLAGS.use_mined_negatives,\n include_id=False)\n estimator.train(input_fn=train_input_fn, max_steps=total_steps)\n\n eval_input_fn = _get_test_input_fn(\"eval\", FLAGS.input_file_eval)\n if FLAGS.do_eval:\n if eval_input_fn is None:\n raise ValueError(\"No input_file_eval specified!\")\n for _, checkpoint in experiment_utils.iterate_checkpoints(\n model_dir=estimator.model_dir,\n total_steps=total_steps,\n marker_file_prefix=os.path.join(estimator.model_dir,\n f\"eval_{FLAGS.eval_name}\"),\n minutes_to_sleep=FLAGS.minutes_to_sleep_before_predictions):\n tf.logging.info(\"Running eval: %s\", FLAGS.eval_name)\n try:\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=FLAGS.num_eval_steps,\n name=FLAGS.eval_name,\n checkpoint_path=checkpoint)\n tf.logging.info(\"Eval result:\\n%s\", result)\n except (ValueError, tf.errors.NotFoundError):\n tf.logging.error(\"Error getting predictions for checkpoint %s: %s\",\n checkpoint, traceback.format_exc())\n\n if FLAGS.do_predict:\n predict_input_fn = _get_test_input_fn(\"predict\", FLAGS.input_file_predict)\n if FLAGS.prediction_output_dir:\n prediction_output_dir = FLAGS.prediction_output_dir\n tf.io.gfile.makedirs(prediction_output_dir)\n else:\n prediction_output_dir = estimator.model_dir\n\n marker_file_prefix = os.path.join(prediction_output_dir, \"predict\")\n # When two separate jobs are launched we don't want conflicting markers.\n if predict_input_fn is not None:\n marker_file_prefix += \"_test\"\n if eval_input_fn is not None:\n marker_file_prefix += \"_dev\"\n\n single_step = FLAGS.evaluated_checkpoint_step\n if FLAGS.evaluated_checkpoint_metric:\n single_step = experiment_utils.get_best_step_for_metric(\n estimator.model_dir, FLAGS.evaluated_checkpoint_metric)\n for current_step, checkpoint in experiment_utils.iterate_checkpoints(\n model_dir=estimator.model_dir,\n total_steps=total_steps,\n marker_file_prefix=marker_file_prefix,\n single_step=single_step):\n try:\n if predict_input_fn is not None:\n _predict_and_export_metrics(\n mode=\"predict\",\n input_fn=predict_input_fn,\n checkpoint_path=checkpoint,\n step=current_step,\n estimator=estimator,\n output_dir=prediction_output_dir)\n\n if eval_input_fn is not None:\n _predict_and_export_metrics(\n mode=\"eval\",\n input_fn=eval_input_fn,\n checkpoint_path=checkpoint,\n step=current_step,\n estimator=estimator,\n output_dir=prediction_output_dir)\n except (ValueError, tf.errors.NotFoundError):\n tf.logging.error(\"Error getting predictions for checkpoint %s: %s\",\n checkpoint, traceback.format_exc())\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport string\nimport tempfile\n\nfrom absl import flags\nfrom absl import logging\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport apache_beam as beam\nimport mock\nfrom tapas.protos import interaction_pb2\nfrom tapas.retrieval import tf_example_utils as retrieval_utils\nfrom tapas.utils import beam_runner\nfrom tapas.utils import create_data\nfrom tapas.utils import tf_example_utils\nimport tensorflow.compat.v1 as tf\n\nfrom google.protobuf import text_format\n\nFLAGS = flags.FLAGS\nTEST_PATH = 'tapas/utils/testdata/'\n\n_ImplType = create_data.ConverterImplType\n_InputFormat = create_data.InputFormat\n\n_PretrainConfig = tf_example_utils.PretrainConversionConfig\n_ClassifierConfig = tf_example_utils.ClassifierConversionConfig\n\n_RESERVED_SYMBOLS = ('[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]', '[EMPTY]')\n\n\ndef _read_examples(filepath):\n examples = []\n for value in tf.python_io.tf_record_iterator(filepath):\n example = tf.train.Example()\n example.ParseFromString(value)\n examples.append(example)\n return examples\n\n\ndef _set_mock_read(mock_read, output):\n\n def dummy_read(file_pattern, coder, validate):\n del file_pattern, coder, validate # Unused.\n return beam.Create(output)\n\n mock_read.side_effect = dummy_read\n\n\nclass CreatePretrainingDataTest(parameterized.TestCase):\n\n def setUp(self):\n super(CreatePretrainingDataTest, self).setUp()\n\n self._test_dir = TEST_PATH\n self._temp_dir = tempfile.TemporaryDirectory()\n self._vocab_path = os.path.join(self._temp_dir.name, 'vocab.txt')\n self._output_path = self._temp_dir.name\n\n def tearDown(self):\n super(CreatePretrainingDataTest, self).tearDown()\n self._temp_dir.cleanup()\n\n def _create_vocab(self, vocab):\n with tf.gfile.Open(self._vocab_path, 'w') as input_handle:\n input_handle.write('\\n'.join(vocab))\n\n @parameterized.parameters(\n (beam_runner.RunnerType.DIRECT, True),\n (beam_runner.RunnerType.DIRECT, False),\n )\n def test_end_to_end(self, runner_type, always_continue_cells):\n\n self._create_vocab(list(_RESERVED_SYMBOLS) + ['released'])\n\n pipeline = create_data.build_pretraining_pipeline(\n input_file=os.path.join(self._test_dir, 'pretrain_interactions.txtpb'),\n output_suffix='.tfrecord',\n output_dir=self._output_path,\n config=tf_example_utils.PretrainConversionConfig(\n vocab_file=self._vocab_path,\n max_seq_length=10,\n max_predictions_per_seq=10,\n random_seed=5,\n masked_lm_prob=0.5,\n max_column_id=3,\n max_row_id=3,\n min_question_length=1,\n max_question_length=4,\n always_continue_cells=always_continue_cells,\n strip_column_names=False),\n dupe_factor=2,\n min_num_columns=0,\n min_num_rows=0,\n num_corpus_bins=2,\n )\n\n beam_runner.run_type(pipeline, runner_type).wait_until_finish()\n\n for name in ['train', 'test']:\n examples = _read_examples(\n os.path.join(self._output_path, f'{name}.tfrecord'))\n self.assertNotEmpty(examples)\n\n @mock.patch.object(beam.io, 'ReadFromTFRecord')\n def test_end_to_end_multiple_interactions(self, mock_read):\n with tf.gfile.Open(os.path.join(self._test_dir,\n 'interaction_01.pbtxt')) as input_file:\n interaction = text_format.ParseLines(input_file,\n interaction_pb2.Interaction())\n\n interactions = []\n for trial in range(100):\n table_id = f'table_id_{trial}'\n new_interaction = interaction_pb2.Interaction()\n new_interaction.CopyFrom(interaction)\n new_interaction.table.table_id = table_id\n new_interaction.id = table_id\n interactions.append(new_interaction)\n\n _set_mock_read(mock_read, interactions)\n\n self._create_vocab(\n list(_RESERVED_SYMBOLS) + list(string.ascii_lowercase) +\n ['##' + letter for letter in string.ascii_lowercase])\n\n pipeline = create_data.build_pretraining_pipeline(\n input_file='input.tfrecord',\n output_suffix='.tfrecord',\n output_dir=self._output_path,\n config=_PretrainConfig(\n vocab_file=self._vocab_path,\n max_seq_length=40,\n max_predictions_per_seq=10,\n random_seed=5,\n masked_lm_prob=0.5,\n max_column_id=5,\n max_row_id=5,\n min_question_length=5,\n max_question_length=10,\n always_continue_cells=True,\n strip_column_names=False,\n ),\n dupe_factor=1,\n min_num_columns=0,\n min_num_rows=0,\n num_random_table_bins=10,\n num_corpus_bins=100000, # High number sends all examples to train set.\n add_random_table=True,\n )\n\n result = beam.runners.direct.direct_runner.DirectRunner().run(pipeline)\n result.wait_until_finish()\n\n counters = {\n metric_result.key.metric.name: metric_result.committed\n for metric_result in result.metrics().query()['counters']\n }\n\n self.assertEqual(\n counters, {\n 'Examples': 100,\n 'Examples with tables': 100,\n 'Interactions': 100,\n 'Interactions without random interaction': 11,\n 'Question Length: < inf': 31,\n 'Question Length: <= 10': 53,\n 'Question Length: <= 7': 16,\n 'Real Table Size: <= 8': 100,\n 'Trimmed Table Size: <= 8': 100,\n 'Column Sizes: <= 8': 100,\n 'Row Sizes: <= 8': 100,\n 'Table Token Sizes: <= 8': 100,\n 'Inputs': 100,\n })\n\n output = _read_examples(os.path.join(self._output_path, 'train.tfrecord'))\n self.assertLen(output, 100)\n\n\nCOMP_INTERACTION = \"\"\"\n id: \"nt-3533-1\"\n table: {\n columns: {\n text: \"Attendance\"\n }\n rows: {\n cells: {\n text: \"76,194\"\n }\n }\n rows: {\n cells: {\n text: \"76,333\"\n }\n }\n table_id: \"table_csv/204_250.csv\"\n }\n questions: {\n id: \"nt-3533-1_1\"\n original_text:\n \"of these games, which ones had an attendance of greater than 70,000?\"\n answer: {\n answer_coordinates: {\n row_index: 0\n column_index: 0\n }\n }\n }\"\"\"\n\n\nclass CreateClassifierDataTest(parameterized.TestCase):\n\n def setUp(self):\n super(CreateClassifierDataTest, self).setUp()\n\n self._test_dir = TEST_PATH\n self._temp_dir = tempfile.TemporaryDirectory()\n self._vocab_path = os.path.join(self._temp_dir.name, 'vocab.txt')\n self._output_path = os.path.join(self._temp_dir.name, 'output.tfrecord')\n self._create_vocab(list(_RESERVED_SYMBOLS) + list(string.ascii_lowercase))\n\n def tearDown(self):\n super(CreateClassifierDataTest, self).tearDown()\n self._temp_dir.cleanup()\n\n def _create_vocab(self, vocab):\n with tf.gfile.Open(self._vocab_path, 'w') as input_handle:\n input_handle.write('\\n'.join(vocab))\n\n @parameterized.parameters((1,), (0,), (2,), (0.9,), (1.1,), (0.5,))\n def test_get_samples(self, rate):\n keys = map(str, range(100))\n if rate == int(rate):\n for key in keys:\n self.assertEqual(create_data._get_samples(key, rate), rate)\n else:\n self.assertAlmostEqual(\n sum(create_data._get_samples(key, rate) for key in keys) / 100,\n rate,\n places=1,\n )\n\n @parameterized.parameters((None, None, None, {\n 'Conversion success': 2,\n 'Input question': 2,\n 'Question Ids added': 2,\n 'Table Ids added': 1,\n 'Relation Set Index: 1': 1,\n 'Relation Set Index: 2': 2,\n 'Relation Set Index: 4': 15,\n 'Found answers: <= 1': 3,\n 'Example emitted': 2,\n }), (None, 1, None, {\n 'Input question': 2,\n 'Conversion error': 2,\n 'Too many columns': 2,\n 'Question Ids added': 2,\n 'Table Ids added': 1,\n }), (None, None, 1, {\n 'Input question': 2,\n 'Conversion error': 2,\n 'Too many rows': 2,\n 'Question Ids added': 2,\n 'Table Ids added': 1,\n }), (1, None, None, {\n 'Input question': 2,\n 'Conversion error': 2,\n 'Sequence too long': 2,\n 'Question Ids added': 2,\n 'Table Ids added': 1,\n }))\n @mock.patch.object(beam.io, 'ReadFromTFRecord')\n def test_gracefully_handle_big_examples(self, max_seq_length, max_column_id,\n max_row_id, expected_counters,\n mock_read):\n\n with tf.gfile.Open(os.path.join(self._test_dir,\n 'interaction_02.pbtxt')) as input_file:\n interaction = text_format.ParseLines(input_file,\n interaction_pb2.Interaction())\n\n _set_mock_read(mock_read, [interaction])\n\n pipeline = create_data.build_classifier_pipeline(\n input_files=['input.tfrecord'],\n output_files=[self._output_path],\n config=_ClassifierConfig(\n vocab_file=self._vocab_path,\n max_seq_length=60 if max_seq_length is None else max_seq_length,\n max_column_id=5 if max_column_id is None else max_column_id,\n max_row_id=10 if max_row_id is None else max_row_id,\n strip_column_names=False,\n add_aggregation_candidates=False,\n ))\n\n result = beam.runners.direct.direct_runner.DirectRunner().run(pipeline)\n result.wait_until_finish()\n\n self.assertEqual(\n {\n metric_result.key.metric.name: metric_result.committed\n for metric_result in result.metrics().query()['counters']\n }, expected_counters)\n\n if max_seq_length is None and max_column_id is None and max_row_id is None:\n output = _read_examples(self._output_path)\n\n with tf.gfile.Open(os.path.join(self._test_dir,\n 'tf_example_02.pbtxt')) as input_file:\n expected_example = text_format.ParseLines(input_file,\n tf.train.Example())\n with tf.gfile.Open(\n os.path.join(self._test_dir,\n 'tf_example_02_conv.pbtxt')) as input_file:\n expected_conversational_example = text_format.ParseLines(\n input_file, tf.train.Example())\n\n self.assertLen(output, 2)\n\n actual_example = output[0]\n del actual_example.features.feature['column_ranks']\n del actual_example.features.feature['inv_column_ranks']\n del actual_example.features.feature['numeric_relations']\n del actual_example.features.feature['numeric_values']\n del actual_example.features.feature['numeric_values_scale']\n del actual_example.features.feature['question_id_ints']\n # assertEqual struggles with NaNs inside protos\n del actual_example.features.feature['answer']\n\n self.assertEqual(actual_example, expected_example)\n\n actual_example = output[1]\n del actual_example.features.feature['column_ranks']\n del actual_example.features.feature['inv_column_ranks']\n del actual_example.features.feature['numeric_relations']\n del actual_example.features.feature['numeric_values']\n del actual_example.features.feature['numeric_values_scale']\n del actual_example.features.feature['question_id_ints']\n # assertEqual struggles with NaNs inside protos\n del actual_example.features.feature['answer']\n\n self.assertEqual(actual_example, expected_conversational_example)\n\n @mock.patch.object(beam.io, 'ReadFromTFRecord')\n def test_numeric_relations(self, mock_read):\n input_file = 'interaction_00.pbtxt'\n expected_counters = {\n 'Conversion success': 1,\n 'Example emitted': 1,\n 'Input question': 1,\n 'Relation Set Index: 2': 5,\n 'Relation Set Index: 4': 13,\n 'Found answers: <= 4': 1,\n }\n\n with tf.gfile.Open(os.path.join(self._test_dir, input_file)) as input_file:\n interaction = text_format.ParseLines(input_file,\n interaction_pb2.Interaction())\n\n _set_mock_read(mock_read, [interaction])\n\n max_seq_length = 512\n\n pipeline = create_data.build_classifier_pipeline(\n input_files=['input.tfrecord'],\n output_files=[self._output_path],\n config=_ClassifierConfig(\n vocab_file=os.path.join(self._test_dir, 'vocab.txt'),\n max_seq_length=max_seq_length,\n max_column_id=512,\n max_row_id=512,\n strip_column_names=False,\n add_aggregation_candidates=False,\n ))\n\n result = beam.runners.direct.direct_runner.DirectRunner().run(pipeline)\n result.wait_until_finish()\n\n self.assertEqual(\n {\n metric_result.key.metric.name: metric_result.committed\n for metric_result in result.metrics().query()['counters']\n }, expected_counters)\n\n output = _read_examples(self._output_path)\n\n self.assertLen(output, 1)\n actual_example = output[0]\n\n self.assertIn('numeric_relations', actual_example.features.feature.keys())\n relations = actual_example.features.feature[\n 'numeric_relations'].int64_list.value\n\n with tf.gfile.Open(os.path.join(self._test_dir, 'vocab.txt')) as vocab_file:\n vocab = [line.strip() for line in vocab_file]\n inputs = actual_example.features.feature['input_ids'].int64_list.value\n pairs = [(vocab[input_id], relation)\n for (input_id, relation) in zip(inputs, relations)\n if input_id > 0]\n logging.info('pairs: %s', pairs)\n self.assertSequenceEqual(pairs,\n [('[CLS]', 0), ('which', 0), ('cities', 0),\n ('had', 0), ('less', 0), ('than', 0), ('2', 0),\n (',', 0), ('000', 0), ('pass', 0), ('##en', 0),\n ('##ge', 0), ('##rs', 0), ('?', 0), ('[SEP]', 0),\n ('ran', 0), ('##k', 0), ('city', 0), ('pass', 0),\n ('##en', 0), ('##ge', 0), ('##rs', 0), ('ran', 0),\n ('##ki', 0), ('##ng', 0), ('air', 0), ('##li', 0),\n ('##ne', 0),\n ('1', 4), ('united', 0), ('states', 0), (',', 0),\n ('los', 0), ('angeles', 0), ('14', 2), (',', 2),\n ('7', 2), ('##4', 2), ('##9', 2), ('[EMPTY]', 0),\n ('al', 0), ('##as', 0), ('##ka', 0), ('air', 0),\n ('##li', 0), ('##ne', 0), ('##s', 0), ('2', 4),\n ('united', 0), ('states', 0), (',', 0), ('h', 0),\n ('##ous', 0), ('##ton', 0), ('5', 2), (',', 2),\n ('4', 2), ('##6', 2), ('##5', 2), ('[EMPTY]', 0),\n ('united', 0), ('e', 0), ('##x', 0), ('##p', 0),\n ('##re', 0), ('##s', 0), ('##s', 0), ('3', 4),\n ('canada', 0), (',', 0), ('c', 0), ('##al', 0),\n ('##ga', 0), ('##ry', 0), ('3', 2), (',', 2),\n ('7', 2), ('##6', 2), ('##1', 2), ('[EMPTY]', 0),\n ('air', 0), ('t', 0), ('##ra', 0), ('##ns', 0),\n ('##a', 0), ('##t', 0), (',', 0), ('west', 0),\n ('##j', 0), ('##et', 0), ('4', 4), ('canada', 0),\n (',', 0), ('s', 0), ('##as', 0), ('##ka', 0),\n ('##to', 0), ('##on', 0), ('2', 2), (',', 2),\n ('28', 2), ('##2', 2), ('4', 0), ('[EMPTY]', 0),\n ('5', 4), ('canada', 0), (',', 0), ('van', 0),\n ('##co', 0), ('##u', 0), ('##ve', 0), ('##r', 0),\n ('2', 2), (',', 2), ('10', 2), ('##3', 2),\n ('[EMPTY]', 0), ('air', 0), ('t', 0), ('##ra', 0),\n ('##ns', 0), ('##a', 0), ('##t', 0), ('6', 4),\n ('united', 0), ('states', 0), (',', 0), ('p', 0),\n ('##h', 0), ('##o', 0), ('##en', 0), ('##i', 0),\n ('##x', 0), ('1', 4), (',', 4), ('8', 4),\n ('##2', 4), ('##9', 4), ('1', 0), ('us', 0),\n ('air', 0), ('##w', 0), ('##a', 0), ('##y', 0),\n ('##s', 0), ('7', 4), ('canada', 0), (',', 0),\n ('to', 0), ('##ro', 0), ('##nt', 0), ('##o', 0),\n ('1', 4), (',', 4), ('20', 4), ('##2', 4),\n ('1', 0), ('air', 0), ('t', 0), ('##ra', 0),\n ('##ns', 0), ('##a', 0), ('##t', 0), (',', 0),\n ('can', 0), ('##j', 0), ('##et', 0), ('8', 4),\n ('canada', 0), (',', 0), ('ed', 0), ('##m', 0),\n ('##on', 0), ('##ton', 0), ('11', 4), ('##0', 4),\n ('[EMPTY]', 0), ('[EMPTY]', 0), ('9', 4),\n ('united', 0), ('states', 0), (',', 0), ('o', 0),\n ('##a', 0), ('##k', 0), ('##land', 0), ('10', 4),\n ('##7', 4), ('[EMPTY]', 0), ('[EMPTY]', 0)])\n\n @mock.patch.object(beam.io, 'ReadFromTFRecord')\n def test_candidates(self, mock_read):\n\n with tf.gfile.Open(os.path.join(self._test_dir,\n 'interaction_03.pbtxt')) as input_file:\n interaction = text_format.ParseLines(input_file,\n interaction_pb2.Interaction())\n\n _set_mock_read(mock_read, [interaction])\n max_seq_length = 15\n\n tf_example_utils._MAX_NUM_ROWS = 4\n tf_example_utils._MAX_NUM_CANDIDATES = 10\n\n pipeline = create_data.build_classifier_pipeline(\n input_files=['input.tfrecord'],\n output_files=[self._output_path],\n config=_ClassifierConfig(\n vocab_file=os.path.join(self._test_dir, 'vocab.txt'),\n max_seq_length=max_seq_length,\n max_column_id=4,\n max_row_id=4,\n strip_column_names=False,\n add_aggregation_candidates=True,\n ),\n )\n\n result = beam.runners.direct.direct_runner.DirectRunner().run(pipeline)\n result.wait_until_finish()\n\n output = _read_examples(self._output_path)\n\n with tf.gfile.Open(os.path.join(self._test_dir,\n 'tf_example_03.pbtxt')) as input_file:\n expected_example = text_format.ParseLines(input_file, tf.train.Example())\n\n actual_example = output[0]\n logging.info('%s', actual_example)\n # assertEqual struggles with NaNs inside protos\n del actual_example.features.feature['numeric_values']\n self.assertEqual(actual_example, expected_example)\n\n @mock.patch.object(beam.io, 'ReadFromTFRecord')\n def test_tfrecord_io(self, mock_read):\n \"\"\"Reads from TFRecord and writes to TFRecord.\"\"\"\n\n with tf.gfile.Open(os.path.join(self._test_dir,\n 'interaction_03.pbtxt')) as input_file:\n interaction = text_format.ParseLines(input_file,\n interaction_pb2.Interaction())\n\n def dummy_read(file_pattern, coder, validate):\n del file_pattern, coder, validate # Unused.\n return beam.Create([interaction])\n\n mock_read.side_effect = dummy_read\n max_seq_length = 15\n\n pipeline = create_data.build_classifier_pipeline(\n input_files=['input.tfrecord'],\n output_files=[self._output_path],\n config=_ClassifierConfig(\n vocab_file=os.path.join(self._test_dir, 'vocab.txt'),\n max_seq_length=max_seq_length,\n max_column_id=4,\n max_row_id=4,\n strip_column_names=False,\n add_aggregation_candidates=False,\n ))\n\n result = beam.runners.direct.direct_runner.DirectRunner().run(pipeline)\n result.wait_until_finish()\n\n output = []\n for value in tf.python_io.tf_record_iterator(self._output_path):\n example = tf.train.Example()\n example.ParseFromString(value)\n output.append(example)\n\n self.assertLen(output, 1)\n sid = output[0].features.feature['segment_ids']\n self.assertLen(sid.int64_list.value, max_seq_length)\n\n\nclass CreateRetrievalDataTest(parameterized.TestCase):\n\n def setUp(self):\n super(CreateRetrievalDataTest, self).setUp()\n\n self._test_dir = 'tapas/retrieval/testdata'\n self._temp_dir = tempfile.TemporaryDirectory()\n self._vocab_path = os.path.join(self._temp_dir.name, 'vocab.txt')\n self._output_path = os.path.join(self._temp_dir.name, 'output.tfrecord')\n\n def tearDown(self):\n super(CreateRetrievalDataTest, self).tearDown()\n self._temp_dir.cleanup()\n\n def _create_vocab(\n self,\n vocab,\n ):\n vocab_path = self._vocab_path\n with tf.gfile.Open(vocab_path, 'w') as input_handle:\n input_handle.write('\\n'.join(vocab))\n\n @parameterized.parameters(\n (_InputFormat.INTERACTION, _ImplType.PYTHON),\n (_InputFormat.TABLE, _ImplType.PYTHON),\n )\n @mock.patch.object(beam.io, 'ReadFromTFRecord')\n def test_end_to_end(self, input_format, impl, mock_read):\n self._create_vocab(list(_RESERVED_SYMBOLS))\n\n with tf.gfile.Open(\n os.path.join(self._test_dir,\n 'retrieval_interaction.pbtxt')) as input_file:\n interaction = text_format.ParseLines(input_file,\n interaction_pb2.Interaction())\n if input_format == _InputFormat.INTERACTION:\n samples = [interaction]\n elif input_format == _InputFormat.TABLE:\n samples = [interaction.table]\n else:\n raise ValueError(f'Unknown format: {input_format}')\n\n _set_mock_read(mock_read, samples)\n\n pipeline = create_data.build_retrieval_pipeline(\n input_files=['input.tfrecord'],\n input_format=input_format,\n output_files=[self._output_path],\n config=retrieval_utils.RetrievalConversionConfig(\n vocab_file=self._vocab_path,\n max_seq_length=15,\n max_column_id=5,\n max_row_id=5,\n strip_column_names=False),\n converter_impl=impl,\n )\n result = beam.runners.direct.direct_runner.DirectRunner().run(pipeline)\n result.wait_until_finish()\n counters = {\n metric_result.key.metric.name: metric_result.committed\n for metric_result in result.metrics().query()['counters']\n }\n\n if input_format == _InputFormat.INTERACTION:\n self.assertEqual(counters, {\n 'Input question': 1,\n 'Conversion success': 1,\n })\n else:\n self.assertEqual(\n counters, {\n 'Input question': 1,\n 'Conversion success': 1,\n 'Fake Questions added for table only example': 1,\n })\n\n output = _read_examples(self._output_path)\n self.assertLen(output, 1)\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"tensorflow.compat.v1.io.gfile.makedirs",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.io.gfile.GFile",
"tensorflow.compat.v1.disable_v2_behavior"
],
[
"tensorflow.compat.v1.python_io.tf_record_iterator",
"tensorflow.compat.v1.train.Example",
"tensorflow.compat.v1.gfile.Open"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zmxdream/Paddle | [
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c",
"04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c"
] | [
"python/paddle/fluid/tests/unittests/mkldnn/test_stack_mkldnn_op.py",
"python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py",
"python/paddle/fluid/tests/unittests/ir/inference/test_fc_gru_fuse_pass.py",
"python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py",
"python/paddle/fluid/dygraph/amp/loss_scaler.py",
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_build_strategy.py",
"python/paddle/fluid/tests/unittests/collective_global_scatter_dygraph.py",
"python/paddle/fluid/tests/unittests/dygraph_sharding_stage2_offload.py",
"python/paddle/fluid/tests/unittests/test_normal.py",
"python/paddle/fluid/tests/unittests/test_translated_layer.py",
"python/paddle/fluid/tests/unittests/test_retain_graph.py",
"python/paddle/fluid/tests/unittests/test_gradient_clip.py",
"python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py",
"python/paddle/text/datasets/movielens.py",
"python/paddle/fluid/tests/unittests/test_rnn_op.py",
"python/paddle/fluid/tests/unittests/test_compiled_program.py",
"python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py",
"python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py",
"python/paddle/fluid/tests/unittests/parallel_dygraph_se_resnext.py",
"python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py",
"python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py",
"python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py",
"python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_transpose_reshape_fuse_pass.py",
"python/paddle/fluid/tests/unittests/test_lstm_op.py",
"python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py",
"python/paddle/fluid/tests/unittests/test_crop_tensor_op.py",
"python/paddle/fluid/tests/unittests/test_complex_trace_layer.py",
"python/paddle/fluid/tests/unittests/test_regularizer.py",
"python/paddle/fluid/tests/unittests/test_rmsprop_op.py",
"python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py",
"python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py",
"python/paddle/fluid/tests/unittests/test_is_integer.py",
"python/paddle/fluid/tests/unittests/test_where_index.py",
"python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py",
"python/paddle/fluid/tests/unittests/check_nan_inf_base.py",
"python/paddle/fluid/tests/unittests/npu/test_huber_loss_op_npu.py",
"python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py",
"python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py",
"python/paddle/fluid/tests/unittests/sequence/test_sequence_scatter_op.py",
"python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py",
"python/paddle/fluid/tests/unittests/test_dyn_rnn.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, skip_check_grad_ci\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\n\n\[email protected]_if_not_cpu()\nclass TestStack2DOneDNNOp(OpTest):\n def initDefaultParameters(self):\n self.num_inputs = 4\n self.input_dim = (2, 2)\n self.axis = 1\n self.dtype = np.float32\n\n def initParameters(self):\n pass\n\n def getInputNames(self):\n input_names = []\n for i in range(self.num_inputs):\n input_names.append('x{}'.format(i))\n return input_names\n\n def setUp(self):\n self.initDefaultParameters()\n self.initParameters()\n self.op_type = 'stack'\n self.op_inputs = []\n\n for i in range(self.num_inputs):\n self.op_inputs.append(\n np.random.random(size=self.input_dim).astype(np.float32))\n\n input_list = []\n input_names = self.getInputNames()\n for i in range(self.num_inputs):\n input_list.append((input_names[i], self.op_inputs[i]))\n\n self.inputs = {'X': input_list}\n self.outputs = {'Y': np.stack(self.op_inputs, axis=self.axis)}\n self.attrs = {'axis': self.axis, 'use_mkldnn': True}\n\n def test_check_output(self):\n self.check_output_with_place(core.CPUPlace())\n\n # JUST FOR CI TO PASS, GRAD IS NOT IMPLEMENTED YET\n def test_check_grad(self):\n pass\n\n\nclass TestStack1DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (100)\n self.axis = 0\n\n\nclass TestStack1DAxis1OneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (100)\n self.axis = 1\n\n\nclass TestStack2DAxisLastOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (13, 24)\n self.num_inputs = 5\n self.axis = -1\n\n\nclass TestStack3DAxisNegativeOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (10, 128, 128)\n self.axis = -2\n\n\nclass TestStack3DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (10, 128, 128)\n self.num_inputs = 3\n self.axis = 1\n\n\nclass TestStack4DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (2, 2, 2, 2)\n self.num_inputs = 3\n self.axis = 4\n\n\nclass TestStack5DOneDNNOp(TestStack2DOneDNNOp):\n def initParameters(self):\n self.input_dim = (2, 3, 4, 5, 6)\n self.num_inputs = 6\n self.axis = 0\n\n\nif __name__ == \"__main__\":\n paddle.enable_static()\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.fluid as fluid\nimport unittest\nimport numpy as np\nimport os\nimport six\nfrom paddle.fluid.reader import keep_data_loader_order\n\nkeep_data_loader_order(False)\n\n\ndef create_reader(shape, batch_number):\n def __impl__():\n idx = 0\n for _ in six.moves.range(batch_number):\n yield np.ones(shape).astype('float32') * idx,\n idx += 1\n\n return __impl__\n\n\nclass DataLoaderKeepOrderTestBase(unittest.TestCase):\n def initParameters(self):\n self.iterable = False\n self.break_num = 10000\n\n def setUp(self):\n self.epoch_num = 3\n self.batch_num = 40\n self.shape = [3, 4, 5]\n self.initParameters()\n\n def clear_visited(self):\n self.visited = set()\n\n def build_network(self, places):\n input_data = fluid.data(shape=self.shape, dtype='float32', name=\"input\")\n loader = fluid.io.DataLoader.from_generator(\n capacity=16, feed_list=[input_data], iterable=self.iterable)\n\n fc = fluid.layers.fc(input_data, size=10)\n loss = fluid.layers.reduce_mean(fc)\n\n loader.set_batch_generator(\n create_reader(self.shape, self.batch_num),\n places=places if loader.iterable else None)\n\n return input_data, loss, loader\n\n def assertInputData(self, batch_id, input_data, dev_cnt,\n check_visited=True):\n if isinstance(input_data, list):\n self.assertTrue(len(input_data), dev_cnt)\n start_val = dev_cnt * batch_id\n for each_input_dict in input_data:\n input_tensor = np.array(each_input_dict[\"input\"])\n self.assertEqual(self.shape, list(input_tensor.shape))\n\n num = input_tensor.flatten()[0]\n equal = (input_tensor == num).all()\n self.assertTrue(equal)\n if check_visited:\n self.assertTrue(num not in self.visited)\n self.visited.add(num)\n\n start_val += 1\n else:\n self.assertEqual(\n list(input_data.shape),\n [self.shape[0] * dev_cnt] + self.shape[1:])\n start_val = dev_cnt * batch_id\n for idx in six.moves.range(dev_cnt):\n data_part = input_data[idx * self.shape[0]:(idx + 1) *\n self.shape[0], :]\n num = data_part.flatten()[0]\n self.assertTrue((data_part == num).all())\n if check_visited:\n self.assertTrue(num not in self.visited)\n self.visited.add(num)\n\n start_val += 1\n\n def get_places(self):\n place_list = [fluid.cpu_places(1), fluid.cpu_places(4)]\n if fluid.is_compiled_with_cuda():\n if os.name == \"nt\":\n place_list.extend([fluid.cuda_places(0)])\n else:\n place_list.extend(\n [fluid.cuda_places(0), fluid.cuda_places([0, 1])])\n return place_list\n\n def test_main(self):\n for p in self.get_places():\n use_compiled_program_list = [True] if len(p) > 1 else [False, True]\n for use_compiled_program in use_compiled_program_list:\n self.run_main_with_place(p, use_compiled_program)\n\n def run_main_with_place(self, places, use_compiled_program=True):\n with fluid.scope_guard(fluid.Scope()):\n with fluid.program_guard(fluid.Program(), fluid.Program()):\n input_data, loss, loader = self.build_network(places)\n fetch_list = [input_data]\n\n exe = fluid.Executor(places[0])\n exe.run(fluid.default_startup_program())\n\n dev_cnt = len(places)\n if dev_cnt > 1:\n self.assertTrue(use_compiled_program)\n\n main_program = fluid.default_main_program()\n if use_compiled_program:\n main_program = fluid.CompiledProgram(\n main_program).with_data_parallel(\n loss_name=loss.name, places=places)\n\n max_batch_num = min(self.break_num,\n int(self.batch_num / dev_cnt))\n\n if loader.iterable:\n early_break = False\n for epoch_id in six.moves.range(self.epoch_num):\n early_break = False\n self.clear_visited()\n batch_id = 0\n for data in loader():\n if batch_id >= self.break_num:\n early_break = True\n break\n self.assertInputData(\n batch_id, data, dev_cnt, check_visited=False)\n fetch_val, = exe.run(program=main_program,\n feed=data,\n fetch_list=fetch_list)\n self.assertInputData(batch_id, fetch_val, dev_cnt)\n batch_id += 1\n\n if dev_cnt == 1:\n self.assertEqual(batch_id, max_batch_num)\n else:\n self.assertLessEqual(batch_id, max_batch_num)\n\n if early_break:\n loader._reset()\n else:\n for epoch_id in six.moves.range(self.epoch_num):\n batch_id = 0\n self.clear_visited()\n loader.start()\n try:\n while True:\n if batch_id >= self.break_num:\n loader.reset()\n break\n fetch_val, = exe.run(program=main_program,\n fetch_list=fetch_list)\n self.assertInputData(batch_id, fetch_val,\n dev_cnt)\n batch_id += 1\n except fluid.core.EOFException:\n loader.reset()\n\n if dev_cnt == 1:\n self.assertEqual(batch_id, max_batch_num)\n else:\n self.assertLessEqual(batch_id, max_batch_num)\n\n\nclass IterableDataLoaderKeepOrderTest2(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = True\n self.break_num = 10000\n\n\nclass IterableDataLoaderKeepOrderTest3(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = False\n self.break_num = 2\n\n\nclass IterableDataLoaderKeepOrderTest4(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = True\n self.break_num = 2\n\n\nclass IterableDataLoaderKeepOrderTest5(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = False\n self.break_num = 0\n\n\nclass IterableDataLoaderKeepOrderTest6(DataLoaderKeepOrderTestBase):\n def initParameters(self):\n self.iterable = True\n self.break_num = 0\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom inference_pass_test import InferencePassTest\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.core import PassVersionChecker\n\n\nclass FcGruFusePassTest(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n dict_dim, emb_dim = 128, 64\n data = fluid.data(\n name='step_data', shape=[None], dtype='int64', lod_level=1)\n emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])\n hidden_dim = 512\n x = fluid.layers.fc(input=emb, size=hidden_dim * 3)\n hidden = fluid.layers.dynamic_gru(\n input=x,\n size=hidden_dim,\n bias_attr=True,\n origin_mode=False,\n is_reverse=True)\n\n batch = 16\n lod_tensor = fluid.LoDTensor()\n lod_tensor.set(np.random.randint(\n 0, dict_dim, size=[batch]).astype(\"int64\"),\n fluid.CPUPlace())\n lod_tensor.set_lod([[0, batch]])\n self.feeds = {\"step_data\": lod_tensor}\n self.fetch_list = [hidden]\n\n def test_check_output(self):\n use_gpu = False\n self.check_output_with_option(use_gpu)\n self.assertTrue(PassVersionChecker.IsCompatible('fc_gru_fuse_pass'))\n\n\nclass MulGruFusePassTest(InferencePassTest):\n def setUp(self):\n with fluid.program_guard(self.main_program, self.startup_program):\n dict_dim, emb_dim = 128, 64\n data = fluid.data(\n name='step_data', shape=[None], dtype='int64', lod_level=1)\n emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])\n hidden_dim = 512\n x = fluid.layers.fc(input=emb, size=hidden_dim * 3, bias_attr=False)\n hidden = fluid.layers.dynamic_gru(\n input=x,\n size=hidden_dim,\n bias_attr=True,\n origin_mode=False,\n is_reverse=True)\n\n batch = 16\n lod_tensor = fluid.LoDTensor()\n lod_tensor.set(np.random.randint(\n 0, dict_dim, size=[batch]).astype(\"int64\"),\n fluid.CPUPlace())\n lod_tensor.set_lod([[0, batch]])\n self.feeds = {\"step_data\": lod_tensor}\n self.fetch_list = [hidden]\n\n def test_check_output(self):\n use_gpu = False\n self.check_output_with_option(use_gpu)\n self.assertTrue(PassVersionChecker.IsCompatible('mul_gru_fuse_pass'))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\n\n\ndef modified_huber_loss_forward(val):\n if val < -1:\n return -4. * val\n elif val < 1:\n return (1. - val) * (1. - val)\n else:\n return 0.\n\n\nclass TestModifiedHuberLossOp(OpTest):\n def setUp(self):\n self.op_type = 'modified_huber_loss'\n samples_num = 100\n\n x_np = np.random.uniform(-2., 2., (samples_num, 1)).astype('float32')\n y_np = np.random.choice([0, 1], samples_num).reshape(\n (samples_num, 1)).astype('float32')\n product_res = x_np * (2. * y_np - 1.)\n # keep away from the junction of piecewise function\n for pos, val in np.ndenumerate(product_res):\n while abs(val - 1.) < 0.05:\n x_np[pos] = np.random.uniform(-2., 2.)\n y_np[pos] = np.random.choice([0, 1])\n product_res[pos] = x_np[pos] * (2 * y_np[pos] - 1)\n val = product_res[pos]\n\n self.inputs = {'X': x_np, 'Y': y_np}\n loss = np.vectorize(modified_huber_loss_forward)(product_res)\n\n self.outputs = {\n 'IntermediateVal': product_res.astype('float32'),\n 'Out': loss.reshape((samples_num, 1)).astype('float32')\n }\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom paddle.fluid import core\nfrom paddle.fluid.dygraph import to_variable\nfrom paddle.fluid.framework import _varbase_creator, _dygraph_tracer, dygraph_only\nfrom paddle.fluid.data_feeder import check_type\nfrom ...wrapped_decorator import signature_safe_contextmanager, wrap_decorator\nimport warnings\nimport numpy as np\nfrom paddle import _C_ops\nfrom collections import defaultdict\nfrom enum import Enum\n\n__all__ = ['AmpScaler', 'OptimizerState']\n\n\nclass OptimizerState(Enum):\n INIT = 0\n UNSCALED = 1\n STEPPED = 2\n\n\ndef _refresh_optimizer_state():\n return {\"state\": OptimizerState.INIT}\n\n\nclass AmpScaler(object):\n \"\"\"\n :api_attr: imperative\n\n AmpScaler is used for Auto-Mixed-Precision training/inferring in imperative\n mode. It controls the scaling of loss, helps avoiding numerical overflow.\n The object of this class has seventeen methods `scale()`, `unscale_()`, `minimize()` and `get`/`set` api of parameters.\n\n `scale()` is used to multiply the loss by a scale ratio.\n `unscale_()` is used to unscale the gradients of parameters, multiplies the gradients of parameters by 1/(scale ratio)\n `minimize()` is similar as `optimizer.minimize()`, performs parameters updating, and it will update the loss_scaling.\n\n Commonly, it is used together with `amp_guard` to achieve Auto-Mixed-Precision in \n imperative mode.\n\n Args:\n enable(bool, optional): Enable loss scaling or not. Default is True.\n init_loss_scaling (float, optional): The initial loss scaling factor. Default is 2**15.\n incr_ratio(float, optional): The multiplier to use when increasing the loss \n scaling. Default is 2.0.\n decr_ratio(float, optional): The less-than-one-multiplier to use when decreasing \n the loss scaling. Default is 0.5.\n incr_every_n_steps(int, optional): Increases loss scaling every n consecutive \n steps with finite gradients. Default is 1000.\n decr_every_n_nan_or_inf(int, optional): Decreases loss scaling every n \n accumulated steps with nan or inf gradients. Default is 2.\n use_dynamic_loss_scaling(bool, optional): Whether to use dynamic loss scaling. If False, fixed loss_scaling is used. If True, the loss scaling is updated dynamicly. Default is True.\n Returns:\n An AmpScaler object.\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')\n with fluid.dygraph.guard():\n model = fluid.dygraph.Conv2D(3, 2, 3)\n optimizer = fluid.optimizer.SGDOptimizer(\n learning_rate=0.01, parameter_list=model.parameters())\n scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024)\n data = fluid.dygraph.to_variable(data)\n with fluid.dygraph.amp_guard():\n conv = model(data)\n loss = fluid.layers.reduce_mean(conv)\n scaled = scaler.scale(loss)\n scaled.backward()\n scaler.minimize(optimizer, scaled) \n \"\"\"\n\n @dygraph_only\n def __init__(self,\n enable=True,\n init_loss_scaling=2.**15,\n incr_ratio=2.0,\n decr_ratio=0.5,\n incr_every_n_steps=1000,\n decr_every_n_nan_or_inf=1,\n use_dynamic_loss_scaling=True):\n\n tracer = _dygraph_tracer()\n if not tracer:\n raise ValueError(\n \"current_tracer is None, maybe it is not in imperative mode.\")\n\n if enable and not (tracer._expected_place.is_gpu_place() or\n tracer._expected_place.is_xpu_place()):\n warnings.warn(\n 'AmpScaler can only be enabled on CUDAPlace and XPUPlace, current place is %s, so it makes no effect.'\n % tracer._expected_place)\n enable = False\n\n self._enable = enable\n\n if self._enable:\n assert incr_ratio > 1.0, \"The incr_ratio must be > 1.0.\"\n assert decr_ratio < 1.0, \"The decr_ratio must be < 1.0.\"\n\n self._init_loss_scaling = init_loss_scaling\n self._incr_ratio = incr_ratio\n self._decr_ratio = decr_ratio\n self._incr_every_n_steps = incr_every_n_steps\n self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf\n self._incr_count = 0\n self._decr_count = 0\n self._use_dynamic_loss_scaling = use_dynamic_loss_scaling\n\n self._found_inf = to_variable(np.array([0]).astype(np.bool))\n self._temp_found_inf_fp16 = to_variable(\n np.array([0]).astype(np.bool))\n self._temp_found_inf_fp32 = to_variable(\n np.array([0]).astype(np.bool))\n self._scale = to_variable(\n np.array([self._init_loss_scaling]).astype(np.float32))\n self._cache_founf_inf = None\n self._optimizer_states = defaultdict(_refresh_optimizer_state)\n\n def scale(self, var):\n \"\"\"\n Multiplies a variable(Tensor) by the scale factor and returns scaled outputs. \n If this instance of :class:`AmpScaler` is not enabled, output are returned unmodified.\n\n Args:\n var (Variable): The variable to scale.\n Returns:\n The scaled variable or original variable.\n \n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')\n with fluid.dygraph.guard():\n model = fluid.dygraph.Conv2D(3, 2, 3)\n optimizer = fluid.optimizer.SGDOptimizer(\n learning_rate=0.01, parameter_list=model.parameters())\n scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024)\n data = fluid.dygraph.to_variable(data)\n with fluid.dygraph.amp_guard():\n conv = model(data)\n loss = fluid.layers.reduce_mean(conv)\n scaled = scaler.scale(loss)\n scaled.backward()\n scaler.minimize(optimizer, scaled) \n \"\"\"\n check_type(var, \"var\", core.VarBase, 'AmpScaler.scale()')\n\n if not self._enable:\n return var\n\n return var * self._scale\n\n def minimize(self, optimizer, *args, **kwargs):\n \"\"\"\n This function is similar as `Optimizer.minimize()`, which performs parameters updating.\n \n If the scaled gradients of parameters contains NAN or INF, the parameters updating is skipped.\n Otherwise, if `unscale_()` has not been called, it first unscales the scaled gradients of parameters, then updates the parameters.\n\n Finally, the loss scaling ratio is updated.\n\n Args:\n optimizer(Optimizer): The optimizer used to update parameters.\n args: Arguments, which will be forward to `optimizer.minimize()`.\n kwargs: Keyword arguments, which will be forward to `Optimizer.minimize()`.\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')\n with fluid.dygraph.guard():\n model = fluid.dygraph.Conv2D(3, 2, 3)\n optimizer = fluid.optimizer.SGDOptimizer(\n learning_rate=0.01, parameter_list=model.parameters())\n scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024)\n data = fluid.dygraph.to_variable(data)\n with fluid.dygraph.amp_guard():\n conv = model(data)\n loss = fluid.layers.reduce_mean(conv)\n scaled = scaler.scale(loss)\n scaled.backward()\n scaler.minimize(optimizer, scaled) \n \"\"\"\n if not self._enable:\n return optimizer.minimize(*args, **kwargs)\n\n optimizer_state = self._optimizer_states[id(optimizer)]\n\n # unscale the grad\n if optimizer_state[\"state\"] is OptimizerState.INIT:\n self._unscale(optimizer)\n\n optimize_ops, params_grads = (None, None)\n\n if self._found_inf:\n self._cache_founf_inf = True\n else:\n optimize_ops, params_grads = optimizer.minimize(*args, **kwargs)\n self._cache_founf_inf = False\n\n if self._use_dynamic_loss_scaling:\n # uopdate the scale\n self._update()\n\n self._optimizer_states = defaultdict(_refresh_optimizer_state)\n\n return optimize_ops, params_grads\n\n def _unscale(self, optimizer):\n \"\"\"\n Unscale the gradients of parameters, multiplies the gradients of parameters by 1/(loss scaling ratio). \n If this instance of :class:`GradScaler` is not enabled, output are returned unmodified.\n Args:\n optimizer(Optimizer): The optimizer used to update parameters.\n Returns:\n The unscaled parameters or original parameters.\n \"\"\"\n if not self._enable:\n return\n\n optimizer_state = self._optimizer_states[id(optimizer)]\n\n if optimizer_state[\"state\"] is OptimizerState.UNSCALED:\n raise RuntimeError(\n \"unscale_() has already been called on this optimizer since the last update().\"\n )\n elif optimizer_state[\"state\"] is OptimizerState.STEPPED:\n raise RuntimeError(\"unscale_() is being called after step().\")\n\n if getattr(optimizer, '_param_groups', None) and isinstance(\n optimizer._param_groups[0], dict):\n param_grads = []\n param_grads_fp16 = []\n param_grads_fp32 = []\n for group in optimizer._param_groups:\n for param in group['params']:\n if param._grad_ivar() is not None:\n param_grads.append(param._grad_ivar())\n if param._grad_ivar(\n ).dtype == core.VarDesc.VarType.FP16:\n param_grads_fp16.append(param._grad_ivar())\n else:\n param_grads_fp32.append(param._grad_ivar())\n else:\n param_grads = [\n param._grad_ivar() for param in optimizer._parameter_list\n if param._grad_ivar() is not None\n ]\n param_grads_fp16 = [\n param._grad_ivar() for param in optimizer._parameter_list\n if (param._grad_ivar() is not None\n ) and (param._grad_ivar().dtype == core.VarDesc.VarType.FP16\n )\n ]\n param_grads_fp32 = [\n param._grad_ivar() for param in optimizer._parameter_list\n if (param._grad_ivar() is not None\n ) and (param._grad_ivar().dtype == core.VarDesc.VarType.FP32\n )\n ]\n if len(param_grads_fp16):\n _C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,\n param_grads_fp16,\n self._temp_found_inf_fp16)\n if len(param_grads_fp32):\n _C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,\n param_grads_fp32,\n self._temp_found_inf_fp32)\n if len(param_grads_fp16) and len(param_grads_fp32):\n self._found_inf = self._temp_found_inf_fp16 or self._temp_found_inf_fp32\n elif len(param_grads_fp16):\n self._found_inf = self._temp_found_inf_fp16\n else:\n self._found_inf = self._temp_found_inf_fp32\n\n optimizer_state[\"state\"] = OptimizerState.UNSCALED\n\n def _update(self):\n \"\"\"\n Updates the loss_scaling.\n \"\"\"\n if not self._enable:\n return\n\n if self._cache_founf_inf:\n self._incr_count = 0\n self._decr_count = self._decr_count + 1\n if self._decr_count == self._decr_every_n_nan_or_inf:\n print(\n 'Found inf or nan, current scale is: {}, decrease to: {}*{}'.\n format(\n float(self._scale),\n float(self._scale), float(self._decr_ratio)))\n self._scale = self._scale * self._decr_ratio\n self._decr_count = 0\n else:\n self._decr_count = 0\n self._incr_count = self._incr_count + 1\n if self._incr_count == self._incr_every_n_steps:\n self._scale = self._scale * self._incr_ratio\n self._incr_count = 0\n\n return\n\n def is_enable(self):\n \"\"\"\n Enable loss scaling or not.\n\n Returns:\n bool: enable loss scaling return True else return False.\n \"\"\"\n return self._enable\n\n def is_use_dynamic_loss_scaling(self):\n \"\"\"\n Whether to use dynamic loss scaling.\n\n Returns:\n bool: if fixed loss_scaling is used return False, if the loss scaling is updated dynamicly return true.\n \"\"\"\n return self._use_dynamic_loss_scaling\n\n def get_init_loss_scaling(self):\n \"\"\"\n Return the initial loss scaling factor.\n\n Reurns:\n float: the initial loss scaling factor.\n \"\"\"\n return self._init_loss_scaling\n\n def set_init_loss_scaling(self, new_init_loss_scaling):\n \"\"\"\n Set the initial loss scaling factor by `new_init_loss_scaling`.\n\n Args:\n new_init_loss_scaling(int): The new_init_loss_scaling used to update initial loss scaling factor.s\n \"\"\"\n self._init_loss_scaling = new_init_loss_scaling\n self._scale = to_variable(\n np.array([self._init_loss_scaling]).astype(np.float32))\n\n def get_incr_ratio(self):\n \"\"\"\n Return the multiplier to use when increasing the loss scaling.\n\n Reurns:\n float: the multiplier to use when increasing the loss scaling.\n \"\"\"\n return self._incr_ratio\n\n def set_incr_ratio(self, new_incr_ratio):\n \"\"\"\n Set the multiplier to use when increasing the loss scaling by `new_incr_ratio`, `new_incr_ratio` should > 1.0.\n\n Args:\n new_incr_ratio(float): The new_incr_ratio used to update the multiplier to use when increasing the loss scaling.\n \"\"\"\n assert new_incr_ratio > 1.0, \"The new_incr_ratio must be > 1.0.\"\n self._incr_ratio = new_incr_ratio\n\n def get_decr_ratio(self):\n \"\"\"\n Get the less-than-one-multiplier to use when decreasing the loss scaling.\n\n Reurns:\n float: the less-than-one-multiplier to use when decreasing the loss scaling.\n \"\"\"\n return self._decr_ratio\n\n def set_decr_ratio(self, new_decr_ratio):\n \"\"\"\n Set the less-than-one-multiplier to use when decreasing the loss scaling by `new_incr_ratio`, `new_decr_ratio` should < 1.0.\n\n Args:\n new_decr_ratio(float): The new_decr_ratio used to update the less-than-one-multiplier to use when decreasing the loss scaling.\n \"\"\"\n assert new_decr_ratio < 1.0, \"The new_decr_ratio must be < 1.0.\"\n self._decr_ratio = new_decr_ratio\n\n def get_incr_every_n_steps(self):\n \"\"\"\n Return the num `n`, `n` represent increases loss scaling every `n` consecutive steps with finite gradients.\n\n Reurns:\n int: the num `n`, `n` represent increases loss scaling every `n` consecutive steps with finite gradients.\n \"\"\"\n return self._incr_every_n_steps\n\n def set_incr_every_n_steps(self, new_incr_every_n_steps):\n \"\"\"\n Set the num `n` by `new_incr_every_n_steps`, `n` represent increases loss scaling every `n` consecutive steps with finite gradients.\n\n Args:\n new_incr_every_n_steps(int): The new_incr_every_n_steps used to update the num `n`, `n` represent increases loss scaling every `n` consecutive steps with finite gradients.\n \"\"\"\n self._incr_every_n_steps = new_incr_every_n_steps\n\n def get_decr_every_n_nan_or_inf(self):\n \"\"\"\n Return the num `n`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients.\n\n Reurns:\n int: the num `n`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients.\n \"\"\"\n return self._decr_every_n_nan_or_inf\n\n def set_decr_every_n_nan_or_inf(self, new_decr_every_n_nan_or_inf):\n \"\"\"\n Set the num `n` by `new_decr_every_n_nan_or_inf`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients.\n\n Args:\n new_decr_every_n_nan_or_inf(int): The new_decr_every_n_nan_or_inf used to update the num `n`, `n` represent decreases loss scaling every `n` accumulated steps with nan or inf gradients.\n \"\"\"\n self._decr_every_n_nan_or_inf = new_decr_every_n_nan_or_inf\n\n def state_dict(self):\n \"\"\"\n Returns the state of the scaler as a `dict`, If this instance is not enabled, returns an empty dict.\n\n Reurns:\n A dict of scaler includes:\n scale (tensor): The loss scaling factor.\n incr_ratio(float): The multiplier to use when increasing the loss scaling.\n decr_ratio(float): The less-than-one-multiplier to use when decreasing the loss scaling.\n incr_every_n_steps(int): Increases loss scaling every n consecutive steps with finite gradients.\n decr_every_n_nan_or_inf(int): Decreases loss scaling every n accumulated steps with nan or inf gradients.\n incr_count(int): The number of recent consecutive unskipped steps.\n decr_count(int): The number of recent consecutive skipped steps.\n use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling. If False, fixed loss_scaling is used. If True, the loss scaling is updated dynamicly. Default is True.\n \"\"\"\n return {\n \"scale\": self._scale.numpy(),\n \"incr_ratio\": self._incr_ratio,\n \"decr_ratio\": self._decr_ratio,\n \"incr_every_n_steps\": self._incr_every_n_steps,\n \"decr_every_n_nan_or_inf\": self._decr_every_n_nan_or_inf,\n \"incr_count\": self._incr_count,\n \"decr_count\": self._decr_count,\n \"use_dynamic_loss_scaling\": self._use_dynamic_loss_scaling\n } if self._enable else {}\n\n def load_state_dict(self, state_dict):\n \"\"\"\n Loads the scaler state.\n \n Args:\n state_dict(dict): scaler state. Should be an object returned from a call to `AmpScaler.state_dict()`.\n \"\"\"\n if not self._enable:\n return\n\n if len(state_dict) == 0:\n raise RuntimeError(\n \"The input state dict is empty, possibly because it was saved \"\n \"from a disabled instance of GradScaler.\")\n\n self._init_loss_scaling = state_dict[\"scale\"][0]\n self._scale = to_variable(\n np.array([self._init_loss_scaling]).astype(np.float32))\n self._incr_ratio = state_dict[\"incr_ratio\"]\n self._decr_ratio = state_dict[\"decr_ratio\"]\n self._incr_every_n_steps = state_dict[\"incr_every_n_steps\"]\n self._decr_every_n_nan_or_inf = state_dict[\"decr_every_n_nan_or_inf\"]\n self._incr_count = state_dict[\"incr_count\"]\n self._decr_count = state_dict[\"decr_count\"]\n self._use_dynamic_loss_scaling = state_dict[\"use_dynamic_loss_scaling\"]\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport paddle\nimport unittest\nimport numpy as np\nfrom paddle.jit import ProgramTranslator\n\nfrom test_resnet import ResNet, train, predict_dygraph_jit\nfrom test_resnet import predict_dygraph, predict_static, predict_analysis_inference\n\nprogram_translator = ProgramTranslator()\n\n\nclass TestResnetWithPass(unittest.TestCase):\n def setUp(self):\n self.build_strategy = paddle.static.BuildStrategy()\n self.build_strategy.fuse_elewise_add_act_ops = True\n self.build_strategy.fuse_bn_act_ops = True\n self.build_strategy.fuse_bn_add_act_ops = True\n self.build_strategy.enable_addto = True\n # NOTE: for enable_addto\n paddle.fluid.set_flags({\"FLAGS_max_inplace_grad_add\": 8})\n\n def train(self, to_static):\n program_translator.enable(to_static)\n\n return train(to_static, self.build_strategy)\n\n def verify_predict(self):\n image = np.random.random([1, 3, 224, 224]).astype('float32')\n dy_pre = predict_dygraph(image)\n st_pre = predict_static(image)\n dy_jit_pre = predict_dygraph_jit(image)\n predictor_pre = predict_analysis_inference(image)\n self.assertTrue(\n np.allclose(dy_pre, st_pre),\n msg=\"dy_pre:\\n {}\\n, st_pre: \\n{}.\".format(dy_pre, st_pre))\n self.assertTrue(\n np.allclose(dy_jit_pre, st_pre),\n msg=\"dy_jit_pre:\\n {}\\n, st_pre: \\n{}.\".format(dy_jit_pre, st_pre))\n self.assertTrue(\n np.allclose(predictor_pre, st_pre),\n msg=\"predictor_pre:\\n {}\\n, st_pre: \\n{}.\".format(predictor_pre,\n st_pre))\n\n def test_resnet(self):\n static_loss = self.train(to_static=True)\n dygraph_loss = self.train(to_static=False)\n self.assertTrue(\n np.allclose(static_loss, dygraph_loss),\n msg=\"static_loss: {} \\n dygraph_loss: {}\".format(static_loss,\n dygraph_loss))\n self.verify_predict()\n\n def test_in_static_mode_mkldnn(self):\n paddle.fluid.set_flags({'FLAGS_use_mkldnn': True})\n try:\n if paddle.fluid.core.is_compiled_with_mkldnn():\n train(True, self.build_strategy)\n finally:\n paddle.fluid.set_flags({'FLAGS_use_mkldnn': False})\n\n\nclass TestError(unittest.TestCase):\n def test_type_error(self):\n def foo(x):\n out = x + 1\n return out\n\n with self.assertRaises(TypeError):\n static_foo = paddle.jit.to_static(foo, build_strategy=\"x\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport sys\nimport paddle\nimport paddle.fluid as fluid\nimport unittest\nimport paddle.fluid.layers as layers\nfrom test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main\n\n\nclass TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase):\n def __init__(self):\n self.global_ring_id = 0\n\n def get_model(self, main_prog, startup_program, rank, indata=None):\n with fluid.program_guard(main_prog, startup_program):\n seed = os.getpid()\n np.random.seed(seed)\n in_feat = 2\n n_expert = 2\n world_size = 2\n tot_expert = n_expert * world_size\n local_expert_count = np.random.randint(\n 1, 4, size=tot_expert).astype(\"int\")\n fwd_expert_count = sum(local_expert_count)\n local_input_buf = np.random.rand(fwd_expert_count,\n in_feat).astype(\"float32\")\n local_expert_count = paddle.to_tensor(local_expert_count)\n local_input_buf = paddle.to_tensor(local_input_buf)\n global_expert_count = []\n paddle.distributed.alltoall(\n paddle.split(\n local_expert_count, 2, axis=0),\n global_expert_count)\n global_expert_count = paddle.concat(global_expert_count, axis=0)\n local_input_buf.stop_gradient = False\n output = paddle.distributed.utils.global_scatter(\n local_input_buf, local_expert_count, global_expert_count)\n output.stop_gradient = False\n c = output * output\n c.backward()\n return [output.numpy(), local_input_buf.grad.numpy()]\n\n\nif __name__ == \"__main__\":\n runtime_main(TestCollectiveGlobalScatterAPI, \"global_scatter\")\n",
"# -*- coding: UTF-8 -*-\n\n# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport argparse\nimport ast\nimport time\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.nn import Linear\nfrom paddle.distributed import fleet\nfrom paddle.fluid.dygraph import nn\n\nfrom paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2\nfrom paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2\nfrom paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler\n\nfrom dygraph_sharding_stage2 import MLP, reader_decorator, optimizer_setting\n\nseed = 2021\nepoch = 2\nbatch_size = 32\nlinear_size = 1000\n\nnp.random.seed(seed)\npaddle.seed(seed)\n\n\ndef train_mlp(model, offload=False):\n optimizer = optimizer_setting(model=model, use_pure_fp16=True)\n\n model = paddle.amp.decorate(models=model, level='O2', save_dtype='float32')\n scaler = paddle.amp.GradScaler(init_loss_scaling=32768)\n scaler = ShardingScaler(scaler)\n\n optimizer = ShardingOptimizerStage2(\n params=model.parameters(), optim=optimizer, offload=offload)\n model = ShardingStage2(\n model, optimizer, buffer_max_size=2**21, accumulate_grads=True)\n\n train_reader = paddle.batch(\n reader_decorator(linear_size), batch_size=batch_size, drop_last=True)\n\n train_loader = paddle.io.DataLoader.from_generator(\n capacity=32,\n use_double_buffer=True,\n iterable=True,\n return_list=True,\n use_multiprocess=True)\n train_loader.set_sample_list_generator(train_reader)\n\n for eop in range(epoch):\n model.train()\n\n for batch_id, data in enumerate(train_loader()):\n img, label = data\n label.stop_gradient = True\n img.stop_gradient = True\n\n with paddle.amp.auto_cast(True, level='O2'):\n out = model(img)\n loss = paddle.nn.functional.cross_entropy(\n input=out, label=label)\n\n avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32))\n scaler.scale(avg_loss).backward()\n\n scaler.step(optimizer)\n scaler.update()\n optimizer.clear_grad()\n\n for dtype in optimizer.param_storages:\n for dst_rank, param_storage in optimizer.param_storages[dtype].items():\n param_storage.to(device=\"gpu\", dtype=dtype)\n\n return model.parameters()\n\n\ndef test_sharding_stage2_offload():\n mlp = MLP(linear_size)\n mlp_offload = MLP(linear_size)\n mlp_offload.set_state_dict(mlp.state_dict())\n\n mlp_params = train_mlp(mlp, offload=False)\n mlp_offload_params = train_mlp(mlp_offload, offload=True)\n\n for i in range(len(mlp_params)):\n for j in range(len(mlp_offload_params)):\n if mlp_params[i].name == mlp_offload_params[j].name:\n np.testing.assert_allclose(\n mlp_params[i].numpy(),\n mlp_offload_params[j].numpy(),\n rtol=1e-6)\n return\n\n\nif __name__ == '__main__':\n test_sharding_stage2_offload()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nimport paddle\nimport copy\n\nnp.random.seed(10)\npaddle.seed(10)\n\n\nclass TestNormalAPI(unittest.TestCase):\n def setUp(self):\n self.mean = 1.0\n self.std = 0.0\n self.shape = None\n self.repeat_num = 2000\n self.set_attrs()\n self.dtype = self.get_dtype()\n self.place=paddle.CUDAPlace(0) \\\n if paddle.fluid.core.is_compiled_with_cuda() \\\n else paddle.CPUPlace()\n\n def set_attrs(self):\n self.shape = [8, 12]\n\n def get_shape(self):\n if isinstance(self.mean, np.ndarray):\n shape = self.mean.shape\n elif isinstance(self.std, np.ndarray):\n shape = self.std.shape\n else:\n shape = self.shape\n return list(shape)\n\n def get_dtype(self):\n if isinstance(self.mean, np.ndarray):\n return self.mean.dtype\n elif isinstance(self.std, np.ndarray):\n return self.std.dtype\n else:\n return 'float32'\n\n def static_api(self):\n shape = self.get_shape()\n ret_all_shape = copy.deepcopy(shape)\n ret_all_shape.insert(0, self.repeat_num)\n ret_all = np.zeros(ret_all_shape, self.dtype)\n if isinstance(self.mean, np.ndarray) \\\n and isinstance(self.std, np.ndarray):\n with paddle.static.program_guard(paddle.static.Program()):\n mean = paddle.fluid.data('Mean', self.mean.shape,\n self.mean.dtype)\n std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)\n out = paddle.normal(mean, std, self.shape)\n\n exe = paddle.static.Executor(self.place)\n for i in range(self.repeat_num):\n ret = exe.run(feed={\n 'Mean': self.mean,\n 'Std': self.std.reshape(shape)\n },\n fetch_list=[out])\n ret_all[i] = ret[0]\n return ret_all\n elif isinstance(self.mean, np.ndarray):\n with paddle.static.program_guard(paddle.static.Program()):\n mean = paddle.fluid.data('Mean', self.mean.shape,\n self.mean.dtype)\n out = paddle.normal(mean, self.std, self.shape)\n\n exe = paddle.static.Executor(self.place)\n for i in range(self.repeat_num):\n ret = exe.run(feed={'Mean': self.mean}, fetch_list=[out])\n ret_all[i] = ret[0]\n return ret_all\n elif isinstance(self.std, np.ndarray):\n with paddle.static.program_guard(paddle.static.Program()):\n std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)\n out = paddle.normal(self.mean, std, self.shape)\n\n exe = paddle.static.Executor(self.place)\n for i in range(self.repeat_num):\n ret = exe.run(feed={'Std': self.std}, fetch_list=[out])\n ret_all[i] = ret[0]\n return ret_all\n else:\n with paddle.static.program_guard(paddle.static.Program()):\n out = paddle.normal(self.mean, self.std, self.shape)\n\n exe = paddle.static.Executor(self.place)\n for i in range(self.repeat_num):\n ret = exe.run(fetch_list=[out])\n ret_all[i] = ret[0]\n return ret_all\n\n def dygraph_api(self):\n paddle.disable_static(self.place)\n shape = self.get_shape()\n ret_all_shape = copy.deepcopy(shape)\n ret_all_shape.insert(0, self.repeat_num)\n ret_all = np.zeros(ret_all_shape, self.dtype)\n\n mean = paddle.to_tensor(self.mean) \\\n if isinstance(self.mean, np.ndarray) else self.mean\n std = paddle.to_tensor(self.std) \\\n if isinstance(self.std, np.ndarray) else self.std\n for i in range(self.repeat_num):\n out = paddle.normal(mean, std, self.shape)\n ret_all[i] = out.numpy()\n paddle.enable_static()\n return ret_all\n\n def test_api(self):\n ret_static = self.static_api()\n ret_dygraph = self.dygraph_api()\n for ret in [ret_static, ret_dygraph]:\n shape_ref = self.get_shape()\n self.assertEqual(shape_ref, list(ret[0].shape))\n\n ret = ret.flatten().reshape([self.repeat_num, -1])\n mean = np.mean(ret, axis=0)\n std = np.std(ret, axis=0)\n mean_ref=self.mean.reshape([1, -1]) \\\n if isinstance(self.mean, np.ndarray) else self.mean\n std_ref=self.std.reshape([1, -1]) \\\n if isinstance(self.std, np.ndarray) else self.std\n self.assertTrue(np.allclose(mean_ref, mean, 0.2, 0.2))\n self.assertTrue(np.allclose(std_ref, std, 0.2, 0.2))\n\n\nclass TestNormalAPI_mean_is_tensor(TestNormalAPI):\n def set_attrs(self):\n self.mean = np.random.uniform(-2, -1, [2, 3, 4, 5]).astype('float64')\n\n\nclass TestNormalAPI_std_is_tensor(TestNormalAPI):\n def set_attrs(self):\n self.std = np.random.uniform(0.7, 1, [2, 3, 17]).astype('float64')\n\n\nclass TestNormalAPI_mean_std_are_tensor(TestNormalAPI):\n def set_attrs(self):\n self.mean = np.random.uniform(1, 2, [1, 100]).astype('float64')\n self.std = np.random.uniform(0.5, 1, [1, 100]).astype('float64')\n\n\nclass TestNormalAPI_mean_std_are_tensor_with_different_dtype(TestNormalAPI):\n def set_attrs(self):\n self.mean = np.random.uniform(1, 2, [100]).astype('float64')\n self.std = np.random.uniform(1, 2, [100]).astype('float32')\n\n\nclass TestNormalAlias(unittest.TestCase):\n def test_alias(self):\n paddle.disable_static()\n shape = [1, 2, 3]\n out1 = paddle.normal(shape=shape)\n out2 = paddle.tensor.normal(shape=shape)\n out3 = paddle.tensor.random.normal(shape=shape)\n paddle.enable_static()\n\n\nclass TestNormalErrors(unittest.TestCase):\n def test_errors(self):\n with paddle.static.program_guard(paddle.static.Program()):\n mean = [1, 2, 3]\n self.assertRaises(TypeError, paddle.normal, mean)\n\n std = [1, 2, 3]\n self.assertRaises(TypeError, paddle.normal, std=std)\n\n mean = paddle.fluid.data('Mean', [100], 'int32')\n self.assertRaises(TypeError, paddle.normal, mean)\n\n std = paddle.fluid.data('Std', [100], 'int32')\n self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std)\n\n self.assertRaises(TypeError, paddle.normal, shape=1)\n\n self.assertRaises(TypeError, paddle.normal, shape=[1.0])\n\n shape = paddle.fluid.data('Shape', [100], 'float32')\n self.assertRaises(TypeError, paddle.normal, shape=shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.optimizer as opt\n\nBATCH_SIZE = 16\nBATCH_NUM = 4\nEPOCH_NUM = 4\nSEED = 10\n\nIMAGE_SIZE = 784\nCLASS_NUM = 10\n\n\n# define a random dataset\nclass RandomDataset(paddle.io.Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n\n def __getitem__(self, idx):\n np.random.seed(SEED)\n image = np.random.random([IMAGE_SIZE]).astype('float32')\n label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')\n return image, label\n\n def __len__(self):\n return self.num_samples\n\n\nclass LinearNet(nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__()\n self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n self._dropout = paddle.nn.Dropout(p=0.5)\n\n @paddle.jit.to_static(input_spec=[\n paddle.static.InputSpec(\n shape=[None, IMAGE_SIZE], dtype='float32', name='x')\n ])\n def forward(self, x):\n return self._linear(x)\n\n\ndef train(layer, loader, loss_fn, opt):\n for epoch_id in range(EPOCH_NUM):\n for batch_id, (image, label) in enumerate(loader()):\n out = layer(image)\n loss = loss_fn(out, label)\n loss.backward()\n opt.step()\n opt.clear_grad()\n print(\"Epoch {} batch {}: loss = {}\".format(epoch_id, batch_id,\n np.mean(loss.numpy())))\n return loss\n\n\nclass TestTranslatedLayer(unittest.TestCase):\n def setUp(self):\n # enable dygraph mode\n place = paddle.CPUPlace()\n paddle.disable_static(place)\n\n # config seed\n paddle.seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n\n # create network\n self.layer = LinearNet()\n self.loss_fn = nn.CrossEntropyLoss()\n self.sgd = opt.SGD(learning_rate=0.001,\n parameters=self.layer.parameters())\n\n # create data loader\n dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)\n self.loader = paddle.io.DataLoader(\n dataset,\n places=place,\n batch_size=BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n num_workers=0)\n\n # train\n train(self.layer, self.loader, self.loss_fn, self.sgd)\n\n # save\n self.model_path = \"linear.example.model\"\n paddle.jit.save(self.layer, self.model_path)\n\n def test_inference_and_fine_tuning(self):\n self.load_and_inference()\n self.load_and_fine_tuning()\n\n def load_and_inference(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n\n # inference\n x = paddle.randn([1, IMAGE_SIZE], 'float32')\n\n self.layer.eval()\n orig_pred = self.layer(x)\n\n translated_layer.eval()\n pred = translated_layer(x)\n\n self.assertTrue(np.array_equal(orig_pred.numpy(), pred.numpy()))\n\n def load_and_fine_tuning(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n\n # train original layer continue\n self.layer.train()\n orig_loss = train(self.layer, self.loader, self.loss_fn, self.sgd)\n\n # fine-tuning\n translated_layer.train()\n sgd = opt.SGD(learning_rate=0.001,\n parameters=translated_layer.parameters())\n loss = train(translated_layer, self.loader, self.loss_fn, sgd)\n\n self.assertTrue(\n np.array_equal(orig_loss.numpy(), loss.numpy()),\n msg=\"original loss:\\n{}\\nnew loss:\\n{}\\n\".format(orig_loss.numpy(),\n loss.numpy()))\n\n def test_get_program(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n\n program = translated_layer.program()\n self.assertTrue(isinstance(program, paddle.static.Program))\n\n def test_get_program_method_not_exists(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n\n with self.assertRaises(ValueError):\n program = translated_layer.program('not_exists')\n\n def test_get_input_spec(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n\n expect_spec = [\n paddle.static.InputSpec(\n shape=[None, IMAGE_SIZE], dtype='float32', name='x')\n ]\n actual_spec = translated_layer._input_spec()\n\n for spec_x, spec_y in zip(expect_spec, actual_spec):\n self.assertEqual(spec_x, spec_y)\n\n def test_get_output_spec(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n\n expect_spec = [\n paddle.static.InputSpec(\n shape=[None, CLASS_NUM],\n dtype='float32',\n name='translated_layer/scale_0.tmp_1')\n ]\n actual_spec = translated_layer._output_spec()\n\n for spec_x, spec_y in zip(expect_spec, actual_spec):\n self.assertEqual(spec_x, spec_y)\n\n def test_layer_state(self):\n # load\n translated_layer = paddle.jit.load(self.model_path)\n translated_layer.eval()\n self.assertEqual(translated_layer.training, False)\n for layer in translated_layer.sublayers():\n print(\"123\")\n self.assertEqual(layer.training, False)\n\n translated_layer.train()\n self.assertEqual(translated_layer.training, True)\n for layer in translated_layer.sublayers():\n self.assertEqual(layer.training, True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport unittest\n\npaddle.disable_static()\nSEED = 2020\nnp.random.seed(SEED)\npaddle.seed(SEED)\n\n\nclass Generator(fluid.dygraph.Layer):\n def __init__(self):\n super(Generator, self).__init__()\n self.conv1 = paddle.nn.Conv2D(3, 3, 3, padding=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = fluid.layers.tanh(x)\n return x\n\n\nclass Discriminator(fluid.dygraph.Layer):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.convd = paddle.nn.Conv2D(6, 3, 1)\n\n def forward(self, x):\n x = self.convd(x)\n return x\n\n\nclass TestRetainGraph(unittest.TestCase):\n def cal_gradient_penalty(self,\n netD,\n real_data,\n fake_data,\n edge_data=None,\n type='mixed',\n constant=1.0,\n lambda_gp=10.0):\n if lambda_gp > 0.0:\n if type == 'real':\n interpolatesv = real_data\n elif type == 'fake':\n interpolatesv = fake_data\n elif type == 'mixed':\n alpha = paddle.rand((real_data.shape[0], 1))\n alpha = paddle.expand(alpha, [\n real_data.shape[0],\n np.prod(real_data.shape) // real_data.shape[0]\n ])\n alpha = paddle.reshape(alpha, real_data.shape)\n interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)\n else:\n raise NotImplementedError('{} not implemented'.format(type))\n interpolatesv.stop_gradient = False\n real_data.stop_gradient = True\n fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)\n disc_interpolates = netD(fake_AB)\n\n outs = paddle.fluid.layers.fill_constant(\n disc_interpolates.shape, disc_interpolates.dtype, 1.0)\n gradients = paddle.grad(\n outputs=disc_interpolates,\n inputs=fake_AB,\n grad_outputs=outs,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)\n\n gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1])\n\n gradient_penalty = paddle.mean((paddle.norm(gradients + 1e-16, 2, 1)\n - constant)**\n 2) * lambda_gp # added eps\n return gradient_penalty, gradients\n else:\n return 0.0, None\n\n def run_retain(self, need_retain):\n g = Generator()\n d = Discriminator()\n\n optim_g = paddle.optimizer.Adam(parameters=g.parameters())\n optim_d = paddle.optimizer.Adam(parameters=d.parameters())\n\n gan_criterion = paddle.nn.MSELoss()\n l1_criterion = paddle.nn.L1Loss()\n\n A = np.random.rand(2, 3, 32, 32).astype('float32')\n B = np.random.rand(2, 3, 32, 32).astype('float32')\n\n realA = paddle.to_tensor(A)\n realB = paddle.to_tensor(B)\n fakeB = g(realA)\n\n optim_d.clear_gradients()\n fake_AB = paddle.concat((realA, fakeB), 1)\n G_pred_fake = d(fake_AB.detach())\n\n false_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape,\n 'float32', 0.0)\n\n G_gradient_penalty, _ = self.cal_gradient_penalty(\n d, realA, fakeB, lambda_gp=10.0)\n loss_d = gan_criterion(G_pred_fake, false_target) + G_gradient_penalty\n\n loss_d.backward(retain_graph=need_retain)\n optim_d.minimize(loss_d)\n\n optim_g.clear_gradients()\n fake_AB = paddle.concat((realA, fakeB), 1)\n G_pred_fake = d(fake_AB)\n true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape,\n 'float32', 1.0)\n loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake,\n true_target)\n\n loss_g.backward()\n optim_g.minimize(loss_g)\n\n def test_retain(self):\n self.run_retain(need_retain=True)\n self.assertRaises(RuntimeError, self.run_retain, need_retain=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\nimport six\nfrom fake_reader import fake_imdb_reader\nfrom paddle.fluid.clip import _allow_pure_fp16_global_norm_clip\n\npaddle.enable_static()\n\n\ndef bow_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2):\n \"\"\"\n BOW net\n This model is from https://github.com/PaddlePaddle/models:\n fluid/PaddleNLP/text_classification/nets.py\n \"\"\"\n emb = fluid.layers.embedding(\n input=data, is_sparse=True, size=[dict_dim, emb_dim])\n bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')\n bow_tanh = fluid.layers.tanh(bow)\n fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act=\"tanh\")\n fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act=\"tanh\")\n prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n\n return avg_cost\n\n\nclass TestGradientClip(unittest.TestCase):\n def setUp(self):\n self.word_dict_len = 5147\n self.BATCH_SIZE = 2\n reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)\n self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)\n self.clip_gradient = lambda x: None\n self.init()\n\n def init(self):\n pass\n\n def get_places(self):\n places = [fluid.CPUPlace()]\n if core.is_compiled_with_cuda():\n places.append(fluid.CUDAPlace(0))\n return places\n\n def check_clip_result(self, out, out_clip):\n pass\n\n def check_gradient_clip(self, place, dtype='float32'):\n prog = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(\n main_program=prog, startup_program=startup_program):\n image = fluid.data(name=\"a\", shape=[-1, 784], dtype='float32')\n label = fluid.data(name=\"b\", shape=[-1, 1], dtype='int64')\n if dtype != 'float32':\n image_cast = paddle.cast(image, dtype)\n hidden = fluid.layers.fc(input=image_cast, size=32, act='relu')\n else:\n hidden = fluid.layers.fc(input=image, size=32, act='relu')\n predict = fluid.layers.fc(input=hidden, size=10, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=predict, label=label)\n avg_cost = fluid.layers.mean(cost)\n\n prog_clip = prog.clone()\n avg_cost_clip = prog_clip.block(0).var(avg_cost.name)\n\n p_g = fluid.backward.append_backward(loss=avg_cost)\n p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)\n\n p_g = sorted(p_g, key=lambda x: x[0].name)\n p_g_clip = sorted(p_g_clip, key=lambda x: x[0].name)\n with fluid.program_guard(\n main_program=prog_clip, startup_program=startup_program):\n p_g_clip = self.clip_gradient(p_g_clip)\n\n grad_list = [elem[1] for elem in p_g]\n grad_clip_list = [elem[1] for elem in p_g_clip]\n\n train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=3)\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=[image, label], place=place)\n exe.run(startup_program)\n\n data = next(train_reader())\n out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)\n out_clip = exe.run(prog_clip,\n feed=feeder.feed(data),\n fetch_list=grad_clip_list)\n self.check_clip_result(out, out_clip)\n\n def check_sparse_gradient_clip(self, place):\n prog = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(\n main_program=prog, startup_program=startup_program):\n data = fluid.data(\n name=\"words\", shape=[-1, 1], dtype=\"int64\", lod_level=1)\n label = fluid.data(name=\"label\", shape=[-1, 1], dtype=\"int64\")\n cost = bow_net(data, label, self.word_dict_len)\n\n self.backward_and_optimize(cost)\n\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=[data, label], place=place)\n exe.run(startup_program)\n\n data = next(self.train_data())\n val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0]\n self.assertEqual((1, ), val.shape)\n self.assertFalse(np.isnan(val))\n\n def backward_and_optimize(self, cost):\n pass\n\n\nclass TestGradientClipByGlobalNorm(TestGradientClip):\n def init(self):\n self.clip_norm = 0.2\n\n def check_clip_result(self, out, out_clip):\n global_norm = 0\n for v in out:\n global_norm += np.sum(np.square(v))\n global_norm = np.sqrt(global_norm)\n scale = self.clip_norm / np.maximum(self.clip_norm, global_norm)\n res = []\n for i in range(len(out)):\n out[i] = scale * out[i]\n\n for u, v in zip(out, out_clip):\n self.assertTrue(\n np.allclose(\n a=u, b=v, rtol=1e-5, atol=1e-8),\n \"gradient clip by global norm has wrong results!, \\nu={}\\nv={}\\ndiff={}\".\n format(u, v, u - v))\n\n # test whether the ouput is right when use 'set_gradient_clip'\n def test_old_gradient_clip(self):\n def func(params_grads):\n clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)\n fluid.clip.set_gradient_clip(clip)\n return fluid.clip.append_gradient_clip_ops(params_grads)\n\n self.clip_gradient = func\n self.check_gradient_clip(fluid.CPUPlace())\n\n # test whether the ouput is right when use grad_clip\n def test_new_gradient_clip(self):\n def func(params_grads):\n clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)\n return clip(params_grads)\n\n self.clip_gradient = func\n self.check_gradient_clip(fluid.CPUPlace())\n\n # test whether the ouput is right when use grad_clip under float64\n def test_new_gradient_clip_fp64(self):\n def func(params_grads):\n clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm)\n return clip(params_grads)\n\n self.clip_gradient = func\n self.check_gradient_clip(fluid.CPUPlace(), \"float64\")\n\n # invoke 'set_gradient_clip' in a wrong order\n def test_wrong_API_order(self):\n def backward_func(cost):\n clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)\n fluid.clip.set_gradient_clip(clip)\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01,\n grad_clip=clip)\n # if 'set_gradient_clip' and 'optimize(grad_clip)' together, 'set_gradient_clip' will be ineffective\n sgd_optimizer.minimize(cost)\n # 'set_gradient_clip' must before 'minimize', otherwise, 'set_gradient_clip' will be ineffective\n fluid.clip.set_gradient_clip(clip)\n\n self.backward_and_optimize = backward_func\n for place in self.get_places():\n self.check_sparse_gradient_clip(place)\n\n # raise typeError\n def test_tpyeError(self):\n # the type of optimizer(grad_clip=) must be an instance of GradientClipBase's derived class\n with self.assertRaises(TypeError):\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1,\n grad_clip=\"test\")\n\n # if grad is None or not need clip\n def test_none_grad_fp32(self):\n ops = self._test_none_grad_helper(\"float32\")\n self.assertListEqual(ops, [\n 'squared_l2_norm', 'squared_l2_norm', 'sum', 'sqrt',\n 'fill_constant', 'elementwise_max', 'elementwise_div',\n 'elementwise_mul', 'elementwise_mul'\n ])\n\n def test_none_grad_fp16(self):\n ops = self._test_none_grad_helper(\"float16\")\n self.assertListEqual(ops, [\n 'square', 'reduce_sum', 'square', 'reduce_sum', 'sum', 'cast',\n 'sqrt', 'fill_constant', 'elementwise_max', 'elementwise_div',\n 'cast', 'elementwise_mul', 'cast', 'elementwise_mul'\n ])\n\n def _test_none_grad_helper(self, dtype):\n prog = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(\n main_program=prog, startup_program=startup_program):\n clip = fluid.clip.GradientClipByGlobalNorm(self.clip_norm)\n x = fluid.default_main_program().global_block().create_parameter(\n name=\"x\", shape=[2, 3], dtype=dtype)\n y = fluid.default_main_program().global_block().create_parameter(\n name=\"y\", shape=[2, 3], dtype=dtype)\n\n # (x, None) should not be returned\n params_grads = [(x, None), (x, y), (y, x)]\n params_grads = clip(params_grads)\n self.assertTrue(\n len(params_grads) == 2,\n \"ClipByGlobalNorm: when grad is None, it shouldn't be returned by gradient clip!\"\n )\n\n ops = [op.type for op in x.block.ops]\n return ops\n\n\nclass TestGradientClipByNorm(TestGradientClip):\n def init(self):\n self.clip_norm = 0.2\n\n def check_clip_result(self, out, out_clip):\n for u, v in zip(out, out_clip):\n norm = np.sqrt(np.sum(np.power(u, 2)))\n scale = self.clip_norm / np.maximum(self.clip_norm, norm)\n u = u * scale\n self.assertTrue(\n np.allclose(\n a=u, b=v, rtol=1e-5, atol=1e-8),\n \"gradient clip by norm has wrong results!\")\n\n # test whether the ouput is right when use grad_clip\n def test_gradient_clip(self):\n def func(params_grads):\n clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm)\n return clip(params_grads)\n\n self.clip_gradient = func\n self.check_gradient_clip(fluid.CPUPlace())\n\n # if grad is None or not need clip\n def test_none_grad(self):\n clip = fluid.clip.GradientClipByNorm(self.clip_norm)\n x = fluid.default_main_program().global_block().create_parameter(\n name=\"x\", shape=[2, 3], dtype=\"float32\", need_clip=False)\n y = fluid.default_main_program().global_block().create_parameter(\n name=\"y\", shape=[2, 3], dtype=\"float32\", need_clip=False)\n\n # (x, None) should not be returned\n params_grads = [(x, None), (x, y)]\n params_grads = clip(params_grads)\n self.assertTrue(\n len(clip(params_grads)) == 1,\n \"ClipGradByNorm: when grad is None, it shouldn't be returned by gradient clip!\"\n )\n self.assertTrue(\n params_grads[0][1].name == 'y',\n \"ClipGradByNorm: grad should not be clipped when filtered out!\")\n\n\nclass TestGradientClipByValue(TestGradientClip):\n def init(self):\n self.max = 0.2\n self.min = 0.1\n\n def check_clip_result(self, out, out_clip):\n for i, v in enumerate(out):\n out[i] = np.clip(v, self.min, self.max)\n for u, v in zip(out, out_clip):\n u = np.clip(u, self.min, self.max)\n self.assertTrue(\n np.allclose(\n a=u, b=v, rtol=1e-6, atol=1e-8),\n \"gradient clip by value has wrong results!\")\n\n # test whether the ouput is right when use grad_clip\n def test_gradient_clip(self):\n def func(params_grads):\n clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min)\n return clip(params_grads)\n\n self.clip_gradient = func\n self.check_gradient_clip(fluid.CPUPlace())\n\n # if grad is None or not need clip\n def test_none_grad(self):\n clip = fluid.clip.GradientClipByValue(self.max, self.min)\n x = fluid.default_main_program().global_block().create_parameter(\n name=\"x\", shape=[2, 3], dtype=\"float32\", need_clip=False)\n y = fluid.default_main_program().global_block().create_parameter(\n name=\"y\", shape=[2, 3], dtype=\"float32\", need_clip=False)\n\n # (x, None) should not be returned\n params_grads = [(x, None), (x, y)]\n params_grads = clip(params_grads)\n self.assertTrue(\n len(clip(params_grads)) == 1,\n \"ClipGradByValue: when grad is None, it shouldn't be returned by gradient clip!\"\n )\n self.assertTrue(\n params_grads[0][1].name == 'y',\n \"ClipGradByValue: grad should not be clipped when filtered out!\")\n\n\nclass TestDygraphGradientClip(unittest.TestCase):\n def test_gradient_clip(self):\n with fluid.dygraph.guard():\n linear = fluid.dygraph.Linear(5, 5)\n inputs = fluid.layers.uniform_random(\n [16, 5], min=-10, max=10).astype('float32')\n out = linear(fluid.dygraph.to_variable(inputs))\n loss = fluid.layers.reduce_mean(out)\n loss.backward()\n sgd_optimizer = fluid.optimizer.SGD(\n learning_rate=0.0,\n parameter_list=linear.parameters(),\n grad_clip=fluid.clip.GradientClipByGlobalNorm(0.1))\n self.check_clip_result(loss, sgd_optimizer)\n\n def check_clip_result(self, loss, optimizer):\n pass\n\n\nclass TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip):\n def setUp(self):\n self.clip_norm = 0.8\n self.clip1 = fluid.clip.GradientClipByGlobalNorm(\n clip_norm=self.clip_norm)\n self.clip2 = fluid.clip.GradientClipByGlobalNorm(\n clip_norm=self.clip_norm)\n\n def check_clip_result(self, loss, optimizer):\n # if grad is None\n x = fluid.dygraph.to_variable(\n np.array([2, 3]).astype(\"float32\"), name=\"x\")\n y = fluid.dygraph.to_variable(\n np.array([3, 4]).astype(\"float32\"), name=\"y\")\n assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2\n # get params and grads from network\n opt, params_grads = optimizer.minimize(loss)\n _, grads = zip(*params_grads)\n params_grads = self.clip2(params_grads)\n _, grads_clip = zip(*params_grads)\n\n global_norm = 0\n for u in grads:\n u = u.numpy()\n global_norm += np.sum(np.power(u, 2))\n global_norm = np.sqrt(global_norm)\n\n global_norm_clip = 0\n for v in grads_clip:\n v = v.numpy()\n global_norm_clip += np.sum(np.power(v, 2))\n global_norm_clip = np.sqrt(global_norm_clip)\n\n a = np.minimum(global_norm, self.clip_norm)\n b = global_norm_clip\n self.assertTrue(\n np.isclose(\n a=a, b=b, rtol=1e-6, atol=1e-8),\n \"gradient clip by global norm has wrong results, expetcd:%f, but recieved:%f\"\n % (a, b))\n\n\nclass TestDygraphGradientClipByNorm(TestDygraphGradientClip):\n def setUp(self):\n self.clip_norm = 0.8\n self.clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm)\n\n def check_clip_result(self, loss, optimizer):\n # if grad is None\n x = fluid.dygraph.to_variable(np.array([2, 3]).astype(\"float32\"))\n assert len(self.clip([(x, None)])) == 0\n # get params and grads from network\n self.clip([(fluid.dygraph.to_variable(np.array([2, 3])), None)])\n opt, params_grads = optimizer.minimize(loss)\n _, grads = zip(*params_grads)\n params_grads = self.clip(params_grads)\n _, grads_clip = zip(*params_grads)\n\n for u, v in zip(grads, grads_clip):\n u = u.numpy()\n v = v.numpy()\n a = np.sqrt(np.sum(np.power(u, 2)))\n a = np.minimum(a, self.clip_norm)\n b = np.sqrt(np.sum(np.power(v, 2)))\n self.assertTrue(\n np.isclose(\n a=a, b=b, rtol=1e-6, atol=1e-8),\n \"gradient clip by norm has wrong results, expetcd:%f, but recieved:%f\"\n % (a, b))\n\n\nclass TestDygraphGradientClipByValue(TestDygraphGradientClip):\n def setUp(self):\n self.max = 0.2\n self.min = 0.1\n self.clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min)\n\n def check_clip_result(self, loss, optimizer):\n # if grad is None\n x = fluid.dygraph.to_variable(np.array([2, 3]).astype(\"float32\"))\n assert len(self.clip([(x, None)])) == 0\n # get params and grads from network\n opt, params_grads = optimizer.minimize(loss)\n _, grads = zip(*params_grads)\n params_grads = self.clip(params_grads)\n _, grads_clip = zip(*params_grads)\n for u, v in zip(grads, grads_clip):\n u = np.clip(u.numpy(), self.min, self.max)\n v = v.numpy()\n self.assertTrue(\n np.allclose(\n a=u, b=v, rtol=1e-6, atol=1e-8),\n \"gradient clip by value has wrong results!\")\n\n\nclass SimpleNet(paddle.nn.Layer):\n def __init__(self):\n super(SimpleNet, self).__init__()\n self.linear = paddle.nn.Linear(5, 5)\n self.batch_norm = paddle.nn.BatchNorm(5)\n\n def forward(self, x):\n x = self.linear(x)\n x = self.batch_norm(x)\n return x\n\n\nclass TestDygraphGradientClipFP16(unittest.TestCase):\n def test_gradient_clip(self):\n if fluid.core.is_compiled_with_cuda():\n with fluid.dygraph.guard():\n paddle.seed(10)\n model = SimpleNet()\n sgd_optimizer = paddle.optimizer.SGD(\n learning_rate=0.0, parameters=model.parameters())\n model, sgd_optimizer = paddle.amp.decorate(\n models=model, optimizers=sgd_optimizer, level='O2')\n scaler = paddle.amp.GradScaler(init_loss_scaling=1024)\n inputs = fluid.layers.uniform_random(\n [1, 5], min=-10, max=10).astype('float32')\n with paddle.amp.auto_cast(level='O2'):\n out = model(fluid.dygraph.to_variable(inputs))\n loss = fluid.layers.reduce_mean(out)\n scaled = scaler.scale(loss)\n scaled.backward()\n scaler.unscale_(sgd_optimizer)\n # before clip\n params_grads = []\n for param in model.parameters():\n if param.stop_gradient:\n continue\n if param._grad_ivar() is not None:\n params_grads.append((param, param._grad_ivar()))\n _, grads = zip(*params_grads)\n # clip grads\n clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=0.8)\n params_grads = clip(params_grads)\n _, grads_clip = zip(*params_grads)\n # param update \n scaler.step(sgd_optimizer)\n scaler.update()\n\n global_norm = 0\n for u in grads:\n u = u.numpy()\n global_norm += np.sum(np.power(u, 2))\n global_norm = np.sqrt(global_norm)\n global_norm_clip = 0\n for v in grads_clip:\n v = v.numpy()\n global_norm_clip += np.sum(np.power(v, 2))\n global_norm_clip = np.sqrt(global_norm_clip)\n\n a = np.minimum(global_norm, 0.8)\n b = global_norm_clip\n self.assertTrue(\n np.isclose(\n a=a, b=b, rtol=1e-3, atol=1e-8),\n \"gradient clip by global norm has wrong results, expetcd:%f, but recieved:%f\"\n % (a, b))\n\n\nclass TestDygraphGradientClipFP64(unittest.TestCase):\n def test_gradient_clip(self):\n with fluid.dygraph.guard():\n inputs = fluid.layers.uniform_random(\n [16, 5], min=-10, max=10).astype('float64')\n linear = fluid.dygraph.Linear(5, 5, dtype=\"float64\")\n out = linear(fluid.dygraph.to_variable(inputs))\n loss = fluid.layers.reduce_mean(out)\n loss.backward()\n # before clip\n params_grads = []\n for param in linear.parameters():\n if param.stop_gradient:\n continue\n if param._grad_ivar() is not None:\n params_grads.append((param, param._grad_ivar()))\n _, grads = zip(*params_grads)\n # clip grads\n clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=0.1)\n params_grads = clip(params_grads)\n _, grads_clip = zip(*params_grads)\n\n global_norm = 0\n for u in grads:\n u = u.numpy()\n global_norm += np.sum(np.power(u, 2))\n global_norm = np.sqrt(global_norm)\n\n global_norm_clip = 0\n for v in grads_clip:\n v = v.numpy()\n print(v)\n global_norm_clip += np.sum(np.power(v, 2))\n global_norm_clip = np.sqrt(global_norm_clip)\n print(global_norm_clip)\n\n a = np.minimum(global_norm, 0.1)\n b = global_norm_clip\n\n self.assertTrue(\n np.isclose(\n a=a, b=b, rtol=1e-6, atol=1e-8),\n \"gradient clip by global norm has wrong results, expetcd:%f, but recieved:%f\"\n % (a, b))\n\n\nclass TestPureFP16ClipGradByGlobalNorm(unittest.TestCase):\n def check_main(self, expected_has_cast_op):\n main_prog = paddle.static.Program()\n startup_prog = paddle.static.Program()\n with paddle.static.program_guard(main_prog, startup_prog):\n names = [\"p0\", \"p1\"]\n shapes = [[2, 3], [4, 5]]\n\n param_and_grads = []\n main_block = main_prog.global_block()\n for name, shape in zip(names, shapes):\n p = main_block.create_parameter(\n name=name, shape=shape, dtype='float16')\n g = main_block.create_parameter(\n name=p.name + '@GRAD', shape=p.shape, dtype=p.dtype)\n param_and_grads.append((p, g))\n\n clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)\n clip(param_and_grads)\n actual_has_cast = any(op.type == 'cast' for op in main_block.ops)\n self.assertEqual(actual_has_cast, expected_has_cast_op)\n\n def test_main(self):\n self.check_main(True)\n _allow_pure_fp16_global_norm_clip(True)\n self.check_main(False)\n _allow_pure_fp16_global_norm_clip(False)\n self.check_main(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nfrom test_lstm_op import lstm, ACTIVATION\n\n\ndef fc(x, w, b):\n return np.dot(x, w) + b\n\n\ndef fused_embedded_fc_lstm(\n ids, # T x 1\n lod, # 1 x N\n embeddings=None, # Dict_size x M\n wx=None, # M x 4D\n bx=None, # 1 x 4D\n h0=None, # N x D\n c0=None, # N x D\n w_h=None, # D x 4D\n w_b=None, # 1 x 4D\n w_c=None, # 1 x 3D\n is_reverse=False,\n act_gate=None,\n act_cell=None,\n act_cand=None):\n # Make a lookup for embeddings and pass result into lstm reference\n T = ids.shape[0]\n M = embeddings.shape[1]\n x = embeddings[ids].reshape([T, M])\n return lstm(\n fc(x, wx, bx), lod, h0, c0, w_h, w_b, w_c, is_reverse, act_gate,\n act_cell, act_cand)\n\n\nclass TestFusionLSTMOp(OpTest):\n def set_conf(self):\n pass\n\n def setUp(self):\n self.op_type = 'fused_embedding_fc_lstm'\n self.lod = [[2, 3, 5, 4]]\n self.M = 8 # Embedding size\n self.D = 16 # Hidden size \n self.dict_size = 18\n self.has_initial_state = False\n self.use_peepholes = False\n self.is_reverse = False\n self.act_gate = 'sigmoid'\n self.act_cell = 'tanh'\n self.act_cand = 'tanh'\n self.set_conf()\n\n T = sum(self.lod[0])\n bs = len(self.lod[0])\n\n # this is the weight of fc\n wx = np.random.normal(size=(self.M, 4 * self.D)).astype('float32')\n # this is the bias of fc\n bx = np.random.normal(size=(1, 4 * self.D)).astype('float32')\n\n if self.use_peepholes:\n b = np.random.normal(size=(1, 7 * self.D)).astype('float32')\n else:\n b = np.random.normal(size=(1, 4 * self.D)).astype('float32')\n w_b = np.copy(b[:, 0:4 * self.D])\n w_c = b[:, 4 * self.D:] if self.use_peepholes else None\n\n # low is 0 , high is voc_size - 1\n ids = np.random.randint(\n low=0, high=self.dict_size - 1, size=(T, 1)).astype(\"int64\")\n # embeddings as they were trained , so each entry is of M size\n embeddings = np.random.random(\n (self.dict_size, self.M)).astype(\"float32\")\n\n # multiply embeddings via Weights\n fc_embeddings = np.dot(embeddings, wx)\n\n # bias should be manually added into the bias of this fused embedding fc LSTM\n b[0, 0:4 * self.D] += bx[0, :]\n combined_biases = b[:, 0:4 * self.D]\n # So let broadcast it , so they can be added\n ones = np.ones([self.dict_size, 1])\n broadcasted_biases = np.dot(ones, combined_biases)\n # Sum biases with Wx*embeddings\n fc_embeddings += broadcasted_biases\n\n if self.has_initial_state:\n h0 = np.random.normal(size=(bs, self.D)).astype('float32')\n c0 = np.random.normal(size=(bs, self.D)).astype('float32')\n else:\n h0 = np.zeros((bs, self.D)).astype('float32')\n c0 = np.zeros((bs, self.D)).astype('float32')\n\n wh = np.random.normal(size=(self.D, 4 * self.D)).astype('float32')\n\n h, c = fused_embedded_fc_lstm(\n ids, self.lod, embeddings, wx, bx, h0, c0, wh, w_b, w_c,\n self.is_reverse, ACTIVATION[self.act_gate],\n ACTIVATION[self.act_cell], ACTIVATION[self.act_cand])\n\n self.inputs = {\n 'Ids': (ids, self.lod),\n 'Embeddings': fc_embeddings,\n 'WeightH': wh,\n 'Bias': b\n }\n\n if self.has_initial_state:\n self.inputs['H0'] = h0\n self.inputs['C0'] = c0\n\n self.outputs = {\n 'Hidden': (h, self.lod),\n 'Cell': (c, self.lod),\n }\n self.attrs = {\n 'use_peepholes': self.use_peepholes,\n 'is_reverse': self.is_reverse,\n 'gate_activation': self.act_gate,\n 'cell_activation': self.act_cell,\n 'candidate_activation': self.act_cand\n }\n\n def test_check_output(self):\n for use_seq in {True, False}:\n self.attrs['use_seq'] = use_seq\n self.check_output(check_dygraph=False)\n\n\nclass TestFusionLSTMOpInit(TestFusionLSTMOp):\n def set_conf(self):\n self.has_initial_state = True\n\n\nclass TestFusionLSTMOpReverse(TestFusionLSTMOp):\n def set_conf(self):\n self.is_reverse = True\n\n\nclass TestFusionLSTMOpInitReverse(TestFusionLSTMOp):\n def set_conf(self):\n self.has_initial_state = True\n self.is_reverse = True\n\n\nclass TestFusionLSTMOpMD1(TestFusionLSTMOp):\n def set_conf(self):\n self.M = 36\n self.D = 8\n\n\nclass TestFusionLSTMOpMD2(TestFusionLSTMOp):\n def set_conf(self):\n self.M = 8\n self.D = 8\n\n\nclass TestFusionLSTMOpMD3(TestFusionLSTMOp):\n def set_conf(self):\n self.M = 15\n self.D = 3\n\n\nclass TestFusionLSTMOpBS1(TestFusionLSTMOp):\n def set_conf(self):\n self.lod = [[3]]\n self.D = 16\n\n\nclass TestFusionLSTMOpPeepholes(TestFusionLSTMOp):\n def set_conf(self):\n self.use_peepholes = True\n\n\nclass TestFusionLSTMOpPeepholesInit(TestFusionLSTMOp):\n def set_conf(self):\n self.use_peepholes = True\n self.has_initial_state = True\n\n\nclass TestFusionLSTMOpPeepholesReverse(TestFusionLSTMOp):\n def set_conf(self):\n self.use_peepholes = True\n self.is_reverse = True\n\n\nclass TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp):\n def set_conf(self):\n self.use_peepholes = True\n self.has_initial_state = True\n self.is_reverse = True\n\n\nclass TestFusionLSTMOpPeepholesBS1(TestFusionLSTMOp):\n def set_conf(self):\n self.use_peepholes = True\n self.lod = [[2]]\n self.D = 8\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport zipfile\nimport re\nimport random\nimport functools\nimport six\n\nimport paddle\nfrom paddle.io import Dataset\nimport paddle.compat as cpt\nfrom paddle.dataset.common import _check_exists_and_download\n\n__all__ = []\n\nage_table = [1, 18, 25, 35, 45, 50, 56]\n\nURL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip'\nMD5 = 'c4d9eecfca2ab87c1945afe126590906'\n\n\nclass MovieInfo(object):\n \"\"\"\n Movie id, title and categories information are stored in MovieInfo.\n \"\"\"\n\n def __init__(self, index, categories, title):\n self.index = int(index)\n self.categories = categories\n self.title = title\n\n def value(self, categories_dict, movie_title_dict):\n \"\"\"\n Get information from a movie.\n \"\"\"\n return [[self.index], [categories_dict[c] for c in self.categories],\n [movie_title_dict[w.lower()] for w in self.title.split()]]\n\n def __str__(self):\n return \"<MovieInfo id(%d), title(%s), categories(%s)>\" % (\n self.index, self.title, self.categories)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass UserInfo(object):\n \"\"\"\n User id, gender, age, and job information are stored in UserInfo.\n \"\"\"\n\n def __init__(self, index, gender, age, job_id):\n self.index = int(index)\n self.is_male = gender == 'M'\n self.age = age_table.index(int(age))\n self.job_id = int(job_id)\n\n def value(self):\n \"\"\"\n Get information from a user.\n \"\"\"\n return [[self.index], [0 if self.is_male else 1], [self.age],\n [self.job_id]]\n\n def __str__(self):\n return \"<UserInfo id(%d), gender(%s), age(%d), job(%d)>\" % (\n self.index, \"M\"\n if self.is_male else \"F\", age_table[self.age], self.job_id)\n\n def __repr__(self):\n return str(self)\n\n\nclass Movielens(Dataset):\n \"\"\"\n Implementation of `Movielens 1-M <https://grouplens.org/datasets/movielens/1m/>`_ dataset.\n\n Args:\n data_file(str): path to data tar file, can be set None if\n :attr:`download` is True. Default None\n mode(str): 'train' or 'test' mode. Default 'train'.\n test_ratio(float): split ratio for test sample. Default 0.1.\n rand_seed(int): random seed. Default 0.\n download(bool): whether to download dataset automatically if\n :attr:`data_file` is not set. Default True\n\n Returns:\n Dataset: instance of Movielens 1-M dataset\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n from paddle.text.datasets import Movielens\n\n class SimpleNet(paddle.nn.Layer):\n def __init__(self):\n super(SimpleNet, self).__init__()\n\n def forward(self, category, title, rating):\n return paddle.sum(category), paddle.sum(title), paddle.sum(rating)\n\n\n movielens = Movielens(mode='train')\n\n for i in range(10):\n category, title, rating = movielens[i][-3:]\n category = paddle.to_tensor(category)\n title = paddle.to_tensor(title)\n rating = paddle.to_tensor(rating)\n\n model = SimpleNet()\n category, title, rating = model(category, title, rating)\n print(category.numpy().shape, title.numpy().shape, rating.numpy().shape)\n\n \"\"\"\n\n def __init__(self,\n data_file=None,\n mode='train',\n test_ratio=0.1,\n rand_seed=0,\n download=True):\n assert mode.lower() in ['train', 'test'], \\\n \"mode should be 'train', 'test', but got {}\".format(mode)\n self.mode = mode.lower()\n\n self.data_file = data_file\n if self.data_file is None:\n assert download, \"data_file is not set and downloading automatically is disabled\"\n self.data_file = _check_exists_and_download(data_file, URL, MD5,\n 'sentiment', download)\n\n self.test_ratio = test_ratio\n self.rand_seed = rand_seed\n\n np.random.seed(rand_seed)\n self._load_meta_info()\n self._load_data()\n\n def _load_meta_info(self):\n pattern = re.compile(r'^(.*)\\((\\d+)\\)$')\n self.movie_info = dict()\n self.movie_title_dict = dict()\n self.categories_dict = dict()\n self.user_info = dict()\n with zipfile.ZipFile(self.data_file) as package:\n for info in package.infolist():\n assert isinstance(info, zipfile.ZipInfo)\n title_word_set = set()\n categories_set = set()\n with package.open('ml-1m/movies.dat') as movie_file:\n for i, line in enumerate(movie_file):\n line = cpt.to_text(line, encoding='latin')\n movie_id, title, categories = line.strip().split('::')\n categories = categories.split('|')\n for c in categories:\n categories_set.add(c)\n title = pattern.match(title).group(1)\n self.movie_info[int(movie_id)] = MovieInfo(\n index=movie_id, categories=categories, title=title)\n for w in title.split():\n title_word_set.add(w.lower())\n\n for i, w in enumerate(title_word_set):\n self.movie_title_dict[w] = i\n\n for i, c in enumerate(categories_set):\n self.categories_dict[c] = i\n\n with package.open('ml-1m/users.dat') as user_file:\n for line in user_file:\n line = cpt.to_text(line, encoding='latin')\n uid, gender, age, job, _ = line.strip().split(\"::\")\n self.user_info[int(uid)] = UserInfo(\n index=uid, gender=gender, age=age, job_id=job)\n\n def _load_data(self):\n self.data = []\n is_test = self.mode == 'test'\n with zipfile.ZipFile(self.data_file) as package:\n with package.open('ml-1m/ratings.dat') as rating:\n for line in rating:\n line = cpt.to_text(line, encoding='latin')\n if (np.random.random() < self.test_ratio) == is_test:\n uid, mov_id, rating, _ = line.strip().split(\"::\")\n uid = int(uid)\n mov_id = int(mov_id)\n rating = float(rating) * 2 - 5.0\n\n mov = self.movie_info[mov_id]\n usr = self.user_info[uid]\n self.data.append(usr.value() + \\\n mov.value(self.categories_dict, self.movie_title_dict) + \\\n [[rating]])\n\n def __getitem__(self, idx):\n data = self.data[idx]\n return tuple([np.array(d) for d in data])\n\n def __len__(self):\n return len(self.data)\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nfrom __future__ import print_function\r\n\r\nimport unittest\r\nimport numpy as np\r\nimport math\r\nimport paddle.fluid.core as core\r\nimport paddle\r\nimport paddle.fluid as fluid\r\nimport paddle.fluid.layers as layers\r\nimport random\r\nimport sys\r\n\r\nfrom op_test import OpTest\r\nsys.path.append(\"./rnn\")\r\nfrom rnn_numpy import SimpleRNN, LSTM, GRU\r\nfrom convert import get_params_for_net\r\n\r\nrandom.seed(2)\r\nnp.set_printoptions(threshold=np.inf)\r\npaddle.enable_static()\r\n\r\n\r\nclass TestRNNOp(OpTest):\r\n def get_weight_names(self):\r\n weight_names = []\r\n for i in range(self.num_layers):\r\n for j in range(0, 2 * self.direction_num):\r\n weight_names.append(\"{}.weight_{}\".format(i, j))\r\n for i in range(self.num_layers):\r\n for j in range(0, 2 * self.direction_num):\r\n weight_names.append(\"{}.bias_{}\".format(i, j))\r\n return weight_names\r\n\r\n def setUp(self):\r\n self.op_type = \"rnn\"\r\n self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64\r\n self.sequence_length = None if core.is_compiled_with_rocm(\r\n ) else np.array(\r\n [12, 11, 10, 9, 8], dtype=np.int32)\r\n self.num_layers = 1\r\n self.is_bidirec = False\r\n self.mode = \"LSTM\"\r\n self.is_test = False\r\n self.dropout = 0.0\r\n self.set_attrs()\r\n\r\n self.direction_num = 2 if self.is_bidirec else 1\r\n direction = \"bidirectional\" if self.is_bidirec else \"forward\"\r\n seq_length = 12\r\n batch_size = 5\r\n input_size = 3\r\n hidden_size = 2\r\n\r\n input = np.random.uniform(\r\n low=-0.1, high=0.1,\r\n size=(seq_length, batch_size, input_size)).astype(self.dtype)\r\n if self.sequence_length is not None:\r\n input[11][1:][:] = 0\r\n input[10][2:][:] = 0\r\n input[9][3:][:] = 0\r\n input[8][4:][:] = 0\r\n\r\n rnn1 = LSTM(\r\n input_size,\r\n hidden_size,\r\n num_layers=self.num_layers,\r\n time_major=True,\r\n direction=direction,\r\n dropout=self.dropout,\r\n dtype=self.dtype)\r\n\r\n flat_w = get_params_for_net(rnn1)\r\n output, (last_hidden, last_cell) = rnn1(\r\n input, sequence_length=self.sequence_length)\r\n\r\n if core.is_compiled_with_rocm():\r\n\r\n def rocm_rnn_get_place():\r\n places = [core.CUDAPlace(0)]\r\n return places\r\n\r\n self._get_places = rocm_rnn_get_place\r\n\r\n init_h = np.zeros((self.num_layers * self.direction_num, batch_size,\r\n hidden_size)).astype(self.dtype)\r\n init_c = np.zeros((self.num_layers * self.direction_num, batch_size,\r\n hidden_size)).astype(self.dtype)\r\n state_out = np.ndarray((300)).astype(\"uint8\")\r\n\r\n self.inputs = {\r\n 'Input': input,\r\n 'WeightList': flat_w,\r\n 'PreState': [('init_h', init_h), ('init_c', init_c)],\r\n 'SequenceLength': self.sequence_length\r\n }\r\n if self.sequence_length is None:\r\n self.inputs = {\r\n 'Input': input,\r\n 'WeightList': flat_w,\r\n 'PreState': [('init_h', init_h), ('init_c', init_c)],\r\n }\r\n self.attrs = {\r\n 'dropout_prob': self.dropout,\r\n 'is_bidirec': self.is_bidirec,\r\n 'input_size': input_size,\r\n 'hidden_size': hidden_size,\r\n 'num_layers': self.num_layers,\r\n 'mode': self.mode,\r\n 'is_test': self.is_test\r\n }\r\n self.outputs = {\r\n 'Out': output,\r\n \"State\": [('last_hidden', last_hidden), ('last_cell', last_cell)],\r\n 'Reserve': np.ndarray((400)).astype(\"uint8\"),\r\n 'DropoutState': state_out\r\n }\r\n\r\n def test_output(self):\r\n self.check_output(no_check_set=['Reserve', 'DropoutState'])\r\n\r\n def set_attrs(self):\r\n pass\r\n\r\n def test_grad(self):\r\n if not self.is_test:\r\n var_name_list = self.get_weight_names()\r\n grad_check_list = ['Input', 'init_h', 'init_c']\r\n grad_check_list.extend(var_name_list)\r\n self.check_grad(\r\n set(grad_check_list), ['Out', 'last_hidden', 'last_cell'])\r\n\r\n\r\nclass TestRNNOp1(TestRNNOp):\r\n def set_attrs(self):\r\n self.sequence_length = None\r\n\r\n\r\nclass TestRNNOp2(TestRNNOp):\r\n def set_attrs(self):\r\n self.sequence_length = None\r\n self.is_bidirec = True\r\n\r\n\r\nclass TestRNNOp3(TestRNNOp):\r\n def set_attrs(self):\r\n self.is_test = True\r\n self.sequence_length = None\r\n\r\n\r\nclass TestRNNOp4(TestRNNOp):\r\n def set_attrs(self):\r\n self.is_test = True\r\n self.sequence_length = None\r\n self.is_bidirec = True\r\n\r\n\r\nclass TestRNNOp5(TestRNNOp):\r\n def set_attrs(self):\r\n self.num_layers = 2\r\n\r\n\r\nclass TestRNNOp6(TestRNNOp):\r\n def set_attrs(self):\r\n self.num_layers = 2\r\n self.is_bidirec = True\r\n\r\n\r\nclass TestRNNOp7(TestRNNOp):\r\n def set_attrs(self):\r\n self.num_layers = 2\r\n self.is_bidirec = True\r\n self.is_test = True\r\n\r\n\r\nclass TestRNNOp8(TestRNNOp):\r\n def set_attrs(self):\r\n self.num_layers = 2\r\n self.is_bidirec = True\r\n self.sequence_length = None\r\n\r\n\r\nclass TestRNNOp9(TestRNNOp):\r\n def set_attrs(self):\r\n self.num_layers = 3\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n",
"# copyright (c) 2020 paddlepaddle authors. all rights reserved.\n#\n# licensed under the apache license, version 2.0 (the \"license\");\n# you may not use this file except in compliance with the license.\n# you may obtain a copy of the license at\n#\n# http://www.apache.org/licenses/license-2.0\n#\n# unless required by applicable law or agreed to in writing, software\n# distributed under the license is distributed on an \"as is\" basis,\n# without warranties or conditions of any kind, either express or implied.\n# see the license for the specific language governing permissions and\n# limitations under the license.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import core\nfrom test_imperative_base import new_program_scope\nfrom simple_nets import simple_fc_net\n\n\nclass TestCompiledProgram(unittest.TestCase):\n def setUp(self):\n self.seed = 100\n self.img = np.random.random(size=(16, 784)).astype('float32')\n self.label = np.random.randint(\n low=0, high=10, size=[16, 1], dtype=np.int64)\n with new_program_scope():\n paddle.seed(self.seed)\n paddle.framework.random._manual_program_seed(self.seed)\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n loss = simple_fc_net()\n exe.run(fluid.default_startup_program())\n\n loss_data, = exe.run(fluid.default_main_program(),\n feed={\"image\": self.img,\n \"label\": self.label},\n fetch_list=[loss.name])\n self.loss = loss_data[0]\n\n def test_compiled_program_base(self):\n with new_program_scope():\n paddle.seed(self.seed)\n paddle.framework.random._manual_program_seed(self.seed)\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n loss = simple_fc_net()\n exe.run(fluid.default_startup_program())\n compiled_prog = fluid.CompiledProgram(fluid.default_main_program())\n\n loss_data, = exe.run(compiled_prog,\n feed={\"image\": self.img,\n \"label\": self.label},\n fetch_list=[loss.name])\n self.assertTrue(np.array_equal(loss_data[0], self.loss))\n\n def test_compiled_program_with_data_parallel(self):\n with new_program_scope():\n paddle.seed(self.seed)\n paddle.framework.random._manual_program_seed(self.seed)\n place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n loss = simple_fc_net()\n exe.run(fluid.default_startup_program())\n compiled_prog = fluid.CompiledProgram(fluid.default_main_program(\n )).with_data_parallel(\n loss_name=loss.name, places=[place])\n\n loss_data, = exe.run(compiled_prog,\n feed={\"image\": self.img,\n \"label\": self.label},\n fetch_list=[loss.name])\n self.assertTrue(np.array_equal(loss_data[0], self.loss))\n\n\nclass TestCompiledProgramError(unittest.TestCase):\n def test_program_or_graph_error(self):\n self.assertRaises(TypeError, fluid.CompiledProgram, \"program\")\n\n def build_simple_model(self):\n img = fluid.layers.data(\n name='image', shape=[1, 28, 28], dtype='float32')\n label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n prediction = fluid.layers.fc(input=img, size=10, act='softmax')\n loss = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_loss = fluid.layers.mean(loss)\n\n def compile_program_not_compiled(self):\n with fluid.program_guard(fluid.Program()):\n # build model\n self.build_simple_model()\n # compile program\n program = fluid.default_main_program()\n compiled_program = fluid.CompiledProgram(\n program).with_data_parallel()\n return compiled_program\n\n def compile_program(self):\n with fluid.program_guard(fluid.Program()):\n # build model\n self.build_simple_model()\n # compile program\n program = fluid.default_main_program()\n compiled_program = fluid.CompiledProgram(program)\n scope = fluid.global_scope()\n place = fluid.CPUPlace()\n compiled_program._compile(scope, place)\n return compiled_program, scope, place\n\n def test_compile_scope_error(self):\n compiled_program, _, place = self.compile_program()\n new_scope = core.Scope()\n with self.assertRaises(ValueError):\n compiled_program._compile(new_scope, place)\n\n def test_compile_place_error(self):\n # need create different place\n if core.is_compiled_with_cuda():\n compiled_program, scope, _ = self.compile_program()\n new_place = fluid.CUDAPlace(0)\n with self.assertRaises(ValueError):\n compiled_program._compile(scope, new_place)\n\n def test_share_vars_from_error_no_parallel(self):\n with fluid.program_guard(fluid.Program()):\n source_program, _, _ = self.compile_program()\n self.build_simple_model()\n # compile program\n program = fluid.default_main_program()\n compiled_program = fluid.CompiledProgram(\n program).with_data_parallel(share_vars_from=source_program)\n scope = fluid.global_scope()\n place = fluid.CPUPlace()\n with self.assertRaises(ValueError):\n compiled_program._compile(scope, place)\n\n def test_share_vars_from_error_no_executor(self):\n with fluid.program_guard(fluid.Program()):\n source_program = self.compile_program_not_compiled()\n self.build_simple_model()\n # compile program\n program = fluid.default_main_program()\n compiled_program = fluid.CompiledProgram(\n program).with_data_parallel(share_vars_from=source_program)\n scope = fluid.global_scope()\n place = fluid.CPUPlace()\n with self.assertRaises(ValueError):\n compiled_program._compile(scope, place)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest\nfrom functools import reduce\nfrom operator import mul\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom test_layer_norm_op import _reference_layer_norm_naive, _reference_layer_norm_grad\n\npaddle.enable_static()\n\nSEED = 2021\nEPOCH = 100\n\nfrom op_test import _set_use_system_allocator\n\n_set_use_system_allocator(False)\n\n\nclass TestLayerNormOp(unittest.TestCase):\n def setUp(self):\n self.use_cudnn = True\n self.set_npu()\n self.init_dtype()\n\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n\n def init_dtype(self):\n self.dtype = np.float32\n self.atol = 1e-4\n\n def __assert_close(self, tensor, np_array, msg, atol=1e-4):\n self.assertTrue(\n np.allclose(\n np.array(tensor).astype(np_array.dtype), np_array, atol=atol),\n msg)\n\n def check_forward_backward(self,\n shape,\n begin_norm_axis,\n has_scale=True,\n has_bias=True,\n y_grad_scale=1.0,\n use_mkldnn=False):\n def test_with_place(place,\n shape,\n begin_norm_axis,\n use_mkldnn=use_mkldnn):\n # attr\n epsilon = 0.00001\n x_shape = shape\n D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)\n scale_shape = [D]\n\n np.random.seed(123)\n x = np.random.random_sample(x_shape).astype(self.dtype)\n scale = np.random.random_sample(scale_shape).astype(\n np.float32) if has_scale else None\n bias = np.random.random_sample(scale_shape).astype(\n np.float32) if has_bias else None\n y_grad = (np.random.random_sample(x_shape) *\n y_grad_scale).astype(self.dtype)\n\n # reference forward & backward\n y, mean, variance = _reference_layer_norm_naive(\n x, scale, bias, epsilon, begin_norm_axis)\n x_grad, scale_grad, bias_grad = _reference_layer_norm_grad(\n x, y_grad, scale, bias, mean, variance, begin_norm_axis)\n\n var_dict = locals()\n var_dict['y@GRAD'] = y_grad\n var_names = ['x', 'mean', 'variance', 'y', 'y@GRAD']\n if has_scale:\n var_names += ['scale']\n if has_bias:\n var_names += ['bias']\n ground_truth = {name: var_dict[name] for name in var_names}\n\n program = fluid.Program()\n with fluid.program_guard(program):\n block = program.global_block()\n for name in ground_truth:\n block.create_var(\n name=name,\n dtype=self.dtype,\n shape=ground_truth[name].shape)\n inputs = {\"X\": block.var('x')}\n fetch_list = [\n 'y',\n 'mean',\n 'variance',\n 'x@GRAD',\n ]\n if has_scale:\n inputs[\"Scale\"] = block.var('scale')\n fetch_list += ['scale@GRAD']\n if has_bias:\n inputs[\"Bias\"] = block.var('bias')\n fetch_list += ['bias@GRAD']\n layer_norm_op = block.append_op(\n type=\"layer_norm\",\n inputs=inputs,\n outputs={\n \"Y\": block.var('y'),\n \"Mean\": block.var('mean'), # share the same memory\n \"Variance\":\n block.var('variance'), # share the same memory\n },\n attrs={\n \"epsilon\": epsilon,\n \"begin_norm_axis\": begin_norm_axis,\n \"use_mkldnn\": use_mkldnn\n })\n # generate backward op_desc\n grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(\n layer_norm_op.desc, set(), [])\n grad_op_desc = grad_op_desc_list[0]\n new_op_desc = block.desc.append_op()\n new_op_desc.copy_from(grad_op_desc)\n for var_name in grad_op_desc.output_arg_names():\n block.desc.var(var_name.encode(\"ascii\"))\n grad_op_desc.infer_var_type(block.desc)\n grad_op_desc.infer_shape(block.desc)\n for arg in grad_op_desc.output_arg_names():\n grad_var = block.desc.find_var(arg.encode(\"ascii\"))\n grad_var.set_dtype(core.VarDesc.VarType.FP32)\n\n program._sync_with_cpp()\n exe = fluid.Executor(place)\n out = exe.run(program,\n feed={\n name: var_dict[name]\n for name in ['x', 'scale', 'bias', 'y@GRAD']\n },\n fetch_list=fetch_list)\n self.__assert_close(y, out[0], \"y\", self.atol)\n self.__assert_close(mean, out[1], \"mean\")\n self.__assert_close(variance, out[2], \"variance\", 1e-3)\n self.__assert_close(x_grad, out[3], \"x_grad\", 1e-2)\n if has_scale:\n self.__assert_close(scale_grad,\n out[fetch_list.index('scale@GRAD')],\n \"scale_grad\", 1e-2)\n if has_bias:\n self.__assert_close(bias_grad,\n out[fetch_list.index('bias@GRAD')],\n \"bias_grad\", self.atol)\n\n test_with_place(self.place, shape, begin_norm_axis)\n\n def test_check_forward_backward_with_scale_and_bias(self):\n self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1)\n self.check_forward_backward(\n shape=[2, 3, 4, 5],\n begin_norm_axis=1,\n has_scale=False,\n has_bias=True)\n self.check_forward_backward(\n shape=[2, 3, 4, 5],\n begin_norm_axis=1,\n has_scale=True,\n has_bias=False)\n self.check_forward_backward(\n shape=[2, 3, 4, 5],\n begin_norm_axis=1,\n has_scale=False,\n has_bias=False)\n self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3)\n\n\nclass TestLayerNormOpFP16(TestLayerNormOp):\n def init_dtype(self):\n self.dtype = np.float16\n self.atol = 1e-2\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport unittest\nimport numpy as np\n\nimport paddle.fluid.core as core\nfrom op_test import OpTest, check_out_dtype\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\nimport paddle.nn.functional as F\n\n\ndef adaptive_start_index(index, input_size, output_size):\n return int(np.floor(index * input_size / output_size))\n\n\ndef adaptive_end_index(index, input_size, output_size):\n return int(np.ceil((index + 1) * input_size / output_size))\n\n\ndef adaptive_pool2d_forward(x, output_size, data_format='NCHW',\n pool_type=\"max\"):\n\n N = x.shape[0]\n C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \\\n else [x.shape[3], x.shape[1], x.shape[2]]\n\n if (isinstance(output_size, int) or output_size == None):\n H_out = output_size\n W_out = output_size\n output_size = [H_out, W_out]\n else:\n H_out, W_out = output_size\n\n if output_size[0] == None:\n output_size[0] = H\n H_out = H\n if output_size[1] == None:\n output_size[1] = W\n W_out = W\n\n out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \\\n else np.zeros((N, H_out, W_out, C))\n\n for i in range(H_out):\n in_h_start = adaptive_start_index(i, H, output_size[0])\n in_h_end = adaptive_end_index(i, H, output_size[0])\n\n for j in range(W_out):\n in_w_start = adaptive_start_index(j, W, output_size[1])\n in_w_end = adaptive_end_index(j, W, output_size[1])\n\n if data_format == 'NCHW':\n x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end]\n if pool_type == 'avg':\n field_size = (\n (in_h_end - in_h_start) * (in_w_end - in_w_start))\n out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size\n elif pool_type == 'max':\n out[:, :, i, j] = np.max(x_masked, axis=(2, 3))\n elif data_format == 'NHWC':\n x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :]\n if pool_type == 'avg':\n field_size = (\n (in_h_end - in_h_start) * (in_w_end - in_w_start))\n out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size\n elif pool_type == 'max':\n out[:, i, j, :] = np.max(x_masked, axis=(1, 2))\n return out\n\n\nclass TestAdaptiveMaxPool2DAPI(unittest.TestCase):\n def setUp(self):\n self.x_np = np.random.random([2, 3, 7, 7]).astype(\"float32\")\n self.res_1_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=[3, 3], pool_type=\"max\")\n\n self.res_2_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=5, pool_type=\"max\")\n\n self.res_3_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=[2, 5], pool_type=\"max\")\n \"\"\"\n self.res_4_np = adaptive_pool2d_forward(\n x=self.x_np,\n output_size=[3, 3],\n pool_type=\"max\",\n data_format=\"NHWC\")\n \"\"\"\n self.res_5_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=[None, 3], pool_type=\"max\")\n\n def test_static_graph(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n paddle.enable_static()\n x = paddle.fluid.data(name=\"x\", shape=[2, 3, 7, 7], dtype=\"float32\")\n\n out_1 = paddle.nn.functional.adaptive_max_pool2d(\n x=x, output_size=[3, 3])\n\n out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5)\n\n out_3 = paddle.nn.functional.adaptive_max_pool2d(\n x=x, output_size=[2, 5])\n\n #out_4 = paddle.nn.functional.adaptive_max_pool2d(\n # x=x, output_size=[3, 3], data_format=\"NHWC\")\n\n out_5 = paddle.nn.functional.adaptive_max_pool2d(\n x=x, output_size=[None, 3])\n\n exe = paddle.static.Executor(place=place)\n [res_1, res_2, res_3, res_5] = exe.run(\n fluid.default_main_program(),\n feed={\"x\": self.x_np},\n fetch_list=[out_1, out_2, out_3, out_5])\n\n assert np.allclose(res_1, self.res_1_np)\n\n assert np.allclose(res_2, self.res_2_np)\n\n assert np.allclose(res_3, self.res_3_np)\n\n #assert np.allclose(res_4, self.res_4_np)\n\n assert np.allclose(res_5, self.res_5_np)\n\n def test_dynamic_graph(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n paddle.disable_static(place=place)\n x = paddle.to_tensor(self.x_np)\n\n out_1 = paddle.nn.functional.adaptive_max_pool2d(\n x=x, return_mask=False, output_size=[3, 3])\n\n out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5)\n\n out_3 = paddle.nn.functional.adaptive_max_pool2d(\n x=x, output_size=[2, 5])\n\n #out_4 = paddle.nn.functional.adaptive_max_pool2d(\n # x=x, output_size=[3, 3], data_format=\"NHWC\")\n\n out_5 = paddle.nn.functional.adaptive_max_pool2d(\n x=x, output_size=[None, 3])\n\n assert np.allclose(out_1.numpy(), self.res_1_np)\n\n assert np.allclose(out_2.numpy(), self.res_2_np)\n\n assert np.allclose(out_3.numpy(), self.res_3_np)\n\n #assert np.allclose(out_4.numpy(), self.res_4_np)\n\n assert np.allclose(out_5.numpy(), self.res_5_np)\n\n\nclass TestAdaptiveMaxPool2DClassAPI(unittest.TestCase):\n def setUp(self):\n self.x_np = np.random.random([2, 3, 7, 7]).astype(\"float32\")\n self.res_1_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=[3, 3], pool_type=\"max\")\n\n self.res_2_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=5, pool_type=\"max\")\n\n self.res_3_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=[2, 5], pool_type=\"max\")\n\n #self.res_4_np = adaptive_pool2d_forward(\n # x=self.x_np,\n # output_size=[3, 3],\n # pool_type=\"max\",\n # data_format=\"NHWC\")\n\n self.res_5_np = adaptive_pool2d_forward(\n x=self.x_np, output_size=[None, 3], pool_type=\"max\")\n\n def test_static_graph(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n paddle.enable_static()\n x = paddle.fluid.data(name=\"x\", shape=[2, 3, 7, 7], dtype=\"float32\")\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])\n out_1 = adaptive_max_pool(x=x)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)\n out_2 = adaptive_max_pool(x=x)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])\n out_3 = adaptive_max_pool(x=x)\n\n # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(\n # output_size=[3, 3], data_format=\"NHWC\")\n # out_4 = adaptive_max_pool(x=x)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(\n output_size=[None, 3])\n out_5 = adaptive_max_pool(x=x)\n\n exe = paddle.static.Executor(place=place)\n [res_1, res_2, res_3, res_5] = exe.run(\n fluid.default_main_program(),\n feed={\"x\": self.x_np},\n fetch_list=[out_1, out_2, out_3, out_5])\n\n assert np.allclose(res_1, self.res_1_np)\n\n assert np.allclose(res_2, self.res_2_np)\n\n assert np.allclose(res_3, self.res_3_np)\n\n #assert np.allclose(res_4, self.res_4_np)\n\n assert np.allclose(res_5, self.res_5_np)\n\n def test_dynamic_graph(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n paddle.disable_static(place=place)\n x = paddle.to_tensor(self.x_np)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])\n out_1 = adaptive_max_pool(x=x)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)\n out_2 = adaptive_max_pool(x=x)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])\n out_3 = adaptive_max_pool(x=x)\n\n #adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(\n # output_size=[3, 3], data_format=\"NHWC\")\n #out_4 = adaptive_max_pool(x=x)\n\n adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(\n output_size=[None, 3])\n out_5 = adaptive_max_pool(x=x)\n\n assert np.allclose(out_1.numpy(), self.res_1_np)\n\n assert np.allclose(out_2.numpy(), self.res_2_np)\n\n assert np.allclose(out_3.numpy(), self.res_3_np)\n\n #assert np.allclose(out_4.numpy(), self.res_4_np)\n\n assert np.allclose(out_5.numpy(), self.res_5_np)\n\n\nclass TestOutDtype(unittest.TestCase):\n def test_max_pool(self):\n api_fn = F.adaptive_max_pool2d\n shape = [1, 3, 32, 32]\n check_out_dtype(\n api_fn,\n in_specs=[(shape, )],\n expect_dtypes=['float32', 'float64'],\n output_size=16)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport contextlib\nimport unittest\nimport numpy as np\nimport six\nimport pickle\nimport sys\n\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.dygraph as dygraph\nfrom paddle.fluid import core\nfrom paddle.fluid.optimizer import SGDOptimizer\nfrom paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, BatchNorm\nfrom paddle.fluid.dygraph.base import to_variable\nfrom paddle.fluid.layer_helper import LayerHelper\nimport math\nfrom test_dist_base import runtime_main, TestParallelDyGraphRunnerBase\n\nbatch_size = 64\nmomentum_rate = 0.9\nl2_decay = 1.2e-4\n\ntrain_parameters = {\n \"input_size\": [3, 224, 224],\n \"input_mean\": [0.485, 0.456, 0.406],\n \"input_std\": [0.229, 0.224, 0.225],\n \"learning_strategy\": {\n \"name\": \"cosine_decay\",\n \"batch_size\": batch_size,\n \"epochs\": [40, 80, 100],\n \"steps\": [0.1, 0.01, 0.001, 0.0001]\n },\n \"batch_size\": batch_size,\n \"lr\": 0.0125,\n \"total_images\": 6149,\n \"num_epochs\": 200\n}\n\n\ndef optimizer_setting(params, parameter_list=None):\n ls = params[\"learning_strategy\"]\n if \"total_images\" not in params:\n total_images = 6149\n else:\n total_images = params[\"total_images\"]\n\n batch_size = ls[\"batch_size\"]\n step = int(math.ceil(float(total_images) / batch_size))\n bd = [step * e for e in ls[\"epochs\"]]\n lr = params[\"lr\"]\n num_epochs = params[\"num_epochs\"]\n if fluid.in_dygraph_mode():\n optimizer = fluid.optimizer.Momentum(\n learning_rate=fluid.layers.cosine_decay(\n learning_rate=lr, step_each_epoch=step, epochs=num_epochs),\n momentum=momentum_rate,\n regularization=fluid.regularizer.L2Decay(l2_decay),\n parameter_list=parameter_list)\n else:\n optimizer = fluid.optimizer.Momentum(\n learning_rate=fluid.layers.cosine_decay(\n learning_rate=lr, step_each_epoch=step, epochs=num_epochs),\n momentum=momentum_rate,\n regularization=fluid.regularizer.L2Decay(l2_decay))\n\n return optimizer\n\n\nclass ConvBNLayer(fluid.dygraph.Layer):\n def __init__(self,\n num_channels,\n num_filters,\n filter_size,\n stride=1,\n groups=1,\n act=None):\n super(ConvBNLayer, self).__init__()\n\n self._conv = Conv2D(\n num_channels=num_channels,\n num_filters=num_filters,\n filter_size=filter_size,\n stride=stride,\n padding=(filter_size - 1) // 2,\n groups=groups,\n act=None,\n bias_attr=False)\n\n # disable BatchNorm in multi-card. disable LayerNorm because of complex input_shape\n # self._batch_norm = BatchNorm(num_filters, act=act)\n\n def forward(self, inputs):\n y = self._conv(inputs)\n # y = self._batch_norm(y)\n\n return y\n\n\nclass SqueezeExcitation(fluid.dygraph.Layer):\n def __init__(self, num_channels, reduction_ratio):\n\n super(SqueezeExcitation, self).__init__()\n self._num_channels = num_channels\n self._pool = Pool2D(pool_size=0, pool_type='avg', global_pooling=True)\n stdv = 1.0 / math.sqrt(num_channels * 1.0)\n self._squeeze = Linear(\n num_channels,\n num_channels // reduction_ratio,\n param_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Uniform(-stdv, stdv)),\n act='relu')\n stdv = 1.0 / math.sqrt(num_channels / 16.0 * 1.0)\n self._excitation = Linear(\n num_channels // reduction_ratio,\n num_channels,\n param_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Uniform(-stdv, stdv)),\n act='sigmoid')\n\n def forward(self, input):\n y = self._pool(input)\n y = fluid.layers.reshape(y, shape=[-1, self._num_channels])\n y = self._squeeze(y)\n y = self._excitation(y)\n y = fluid.layers.elementwise_mul(x=input, y=y, axis=0)\n return y\n\n\nclass BottleneckBlock(fluid.dygraph.Layer):\n def __init__(self,\n num_channels,\n num_filters,\n stride,\n cardinality,\n reduction_ratio,\n shortcut=True):\n super(BottleneckBlock, self).__init__()\n\n self.conv0 = ConvBNLayer(\n num_channels=num_channels,\n num_filters=num_filters,\n filter_size=1,\n act=\"relu\")\n self.conv1 = ConvBNLayer(\n num_channels=num_filters,\n num_filters=num_filters,\n filter_size=3,\n stride=stride,\n groups=cardinality,\n act=\"relu\")\n self.conv2 = ConvBNLayer(\n num_channels=num_filters,\n num_filters=num_filters * 2,\n filter_size=1,\n act=None)\n\n self.scale = SqueezeExcitation(\n num_channels=num_filters * 2, reduction_ratio=reduction_ratio)\n\n if not shortcut:\n self.short = ConvBNLayer(\n num_channels=num_channels,\n num_filters=num_filters * 2,\n filter_size=1,\n stride=stride)\n\n self.shortcut = shortcut\n\n self._num_channels_out = num_filters * 2\n\n def forward(self, inputs):\n y = self.conv0(inputs)\n conv1 = self.conv1(y)\n conv2 = self.conv2(conv1)\n scale = self.scale(conv2)\n\n if self.shortcut:\n short = inputs\n else:\n short = self.short(inputs)\n\n y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')\n return y\n\n\nclass SeResNeXt(fluid.dygraph.Layer):\n def __init__(self, layers=50, class_dim=102):\n super(SeResNeXt, self).__init__()\n\n self.layers = layers\n supported_layers = [50, 101, 152]\n assert layers in supported_layers, \\\n \"supported layers are {} but input layer is {}\".format(supported_layers, layers)\n\n if layers == 50:\n cardinality = 32\n reduction_ratio = 16\n depth = [3, 4, 6, 3]\n num_filters = [128, 256, 512, 1024]\n self.conv0 = ConvBNLayer(\n num_channels=3,\n num_filters=64,\n filter_size=7,\n stride=2,\n act='relu')\n self.pool = Pool2D(\n pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n elif layers == 101:\n cardinality = 32\n reduction_ratio = 16\n depth = [3, 4, 23, 3]\n num_filters = [128, 256, 512, 1024]\n self.conv0 = ConvBNLayer(\n num_channels=3,\n num_filters=64,\n filter_size=7,\n stride=2,\n act='relu')\n self.pool = Pool2D(\n pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n elif layers == 152:\n cardinality = 64\n reduction_ratio = 16\n depth = [3, 8, 36, 3]\n num_filters = [128, 256, 512, 1024]\n self.conv0 = ConvBNLayer(\n num_channels=3,\n num_filters=64,\n filter_size=3,\n stride=2,\n act='relu')\n self.conv1 = ConvBNLayer(\n num_channels=64,\n num_filters=64,\n filter_size=3,\n stride=1,\n act='relu')\n self.conv2 = ConvBNLayer(\n num_channels=64,\n num_filters=128,\n filter_size=3,\n stride=1,\n act='relu')\n self.pool = Pool2D(\n pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n\n self.bottleneck_block_list = []\n num_channels = 64\n for block in range(len(depth)):\n shortcut = False\n for i in range(depth[block]):\n bottleneck_block = self.add_sublayer(\n 'bb_%d_%d' % (block, i),\n BottleneckBlock(\n num_channels=num_channels,\n num_filters=num_filters[block],\n stride=2 if i == 0 and block != 0 else 1,\n cardinality=cardinality,\n reduction_ratio=reduction_ratio,\n shortcut=shortcut))\n num_channels = bottleneck_block._num_channels_out\n self.bottleneck_block_list.append(bottleneck_block)\n shortcut = True\n\n self.pool2d_avg = Pool2D(\n pool_size=7, pool_type='avg', global_pooling=True)\n stdv = 1.0 / math.sqrt(2048 * 1.0)\n\n self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1\n\n self.out = Linear(\n self.pool2d_avg_output,\n class_dim,\n param_attr=fluid.param_attr.ParamAttr(\n initializer=fluid.initializer.Uniform(-stdv, stdv)))\n\n def forward(self, inputs):\n if self.layers == 50 or self.layers == 101:\n y = self.conv0(inputs)\n y = self.pool(y)\n elif self.layers == 152:\n y = self.conv0(inputs)\n y = self.conv1(inputs)\n y = self.conv2(inputs)\n y = self.pool(y)\n\n for bottleneck_block in self.bottleneck_block_list:\n y = bottleneck_block(y)\n y = self.pool2d_avg(y)\n y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])\n y = self.out(y)\n return y\n\n\nclass TestSeResNeXt(TestParallelDyGraphRunnerBase):\n def get_model(self):\n model = SeResNeXt()\n train_reader = paddle.batch(\n paddle.dataset.flowers.test(use_xmap=False),\n batch_size=train_parameters[\"batch_size\"],\n drop_last=True)\n optimizer = optimizer_setting(\n train_parameters, parameter_list=model.parameters())\n return model, train_reader, optimizer\n\n def run_one_loop(self, model, opt, data):\n bs = len(data)\n dy_x_data = np.array([x[0].reshape(3, 224, 224)\n for x in data]).astype('float32')\n dy_x_data = dy_x_data / 255.0\n y_data = np.array([x[1] for x in data]).astype('int64').reshape(bs, 1)\n img = to_variable(dy_x_data)\n label = to_variable(y_data)\n label.stop_gradient = True\n\n out = model(img)\n softmax_out = fluid.layers.softmax(out, use_cudnn=False)\n loss = fluid.layers.cross_entropy(input=softmax_out, label=label)\n avg_loss = fluid.layers.mean(x=loss)\n return avg_loss\n\n\nif __name__ == \"__main__\":\n runtime_main(TestSeResNeXt)\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\n\npaddle.enable_static()\n\n\ndef sigmoid_array(x):\n return 1 / (1 + np.exp(-x))\n\n\[email protected](not paddle.is_compiled_with_npu(),\n \"core is not compiled with NPU\")\nclass TestLogLossOp(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = 'log_loss'\n self.place = paddle.NPUPlace(0)\n\n self.init_dtype()\n\n self.set_inputs()\n self.set_attrs()\n self.set_outputs()\n\n def set_inputs(self):\n samples_num = 100\n x = np.random.random((samples_num, 1)).astype(self.dtype)\n predicted = sigmoid_array(x)\n labels = np.random.randint(0, 2, (samples_num, 1)).astype(self.dtype)\n self.inputs = {'Predicted': predicted, 'Labels': labels}\n\n def set_attrs(self):\n epsilon = 1e-7\n self.attrs = {'epsilon': epsilon}\n\n def set_outputs(self):\n epsilon = self.attrs['epsilon']\n labels = self.inputs['Labels']\n predicted = self.inputs['Predicted']\n loss = -labels * np.log(predicted + epsilon) - (\n 1 - labels) * np.log(1 - predicted + epsilon)\n self.outputs = {'Loss': loss}\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad_with_place(self.place, ['Predicted'], 'Loss')\n\n\[email protected](not paddle.is_compiled_with_npu(),\n \"core is not compiled with NPU\")\nclass TestLogLossOpError(unittest.TestCase):\n def test_errors(self):\n with fluid.program_guard(fluid.Program()):\n\n def test_x_type():\n input_data = np.random.random(100, 1).astype(\"float32\")\n fluid.layers.log_loss(input_data)\n\n self.assertRaises(TypeError, test_x_type)\n\n def test_x_dtype():\n x2 = fluid.layers.data(name='x2', shape=[100, 1], dtype='int32')\n fluid.layers.log_loss(x2)\n\n self.assertRaises(TypeError, test_x_dtype)\n\n def test_label_type():\n input_data = np.random.random(100, 1).astype(\"float32\")\n fluid.layers.log_loss(input_data)\n\n self.assertRaises(TypeError, test_label_type)\n\n def test_label_dtype():\n x2 = fluid.layers.data(name='x2', shape=[100, 1], dtype='int32')\n fluid.layers.log_loss(x2)\n\n self.assertRaises(TypeError, test_label_dtype)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.op import Operator\nfrom paddle.fluid.executor import Executor\nfrom paddle.fluid import Program, program_guard\n\n\nclass TestTensorArrayToTensorError(unittest.TestCase):\n \"\"\"Tensor_array_to_tensor error message enhance\"\"\"\n\n def test_errors(self):\n with program_guard(Program()):\n input_data = numpy.random.random((2, 4)).astype(\"float32\")\n\n def test_Variable():\n fluid.layers.tensor_array_to_tensor(input=input_data)\n\n self.assertRaises(TypeError, test_Variable)\n\n def test_list_Variable():\n fluid.layers.tensor_array_to_tensor(input=[input_data])\n\n self.assertRaises(TypeError, test_list_Variable)\n\n\nclass TestLoDTensorArrayConcat(unittest.TestCase):\n \"\"\"Test case for concat mode of tensor_array_to_tensor.\"\"\"\n\n def setUp(self):\n self.op_type = \"tensor_array_to_tensor\"\n self.attrs = {\"axis\": 0}\n self.outputs = [\"Out\"]\n\n def test_get_set(self):\n scope = core.Scope()\n program = fluid.Program()\n block = program.global_block()\n\n input_arr = block.create_var(\n name=\"tmp_lod_tensor_array\",\n type=core.VarDesc.VarType.LOD_TENSOR_ARRAY)\n input_arr.persistable = True\n input_arr_var = scope.var('tmp_lod_tensor_array')\n input_tensor_array = input_arr_var.get_lod_tensor_array()\n self.assertEqual(0, len(input_tensor_array))\n\n cpu = core.CPUPlace()\n for i in range(10):\n t = core.LoDTensor()\n if i == 0:\n t.set(numpy.array([[i], [i]], dtype='float32'), cpu)\n else:\n t.set(numpy.array([[i]], dtype='float32'), cpu)\n input_tensor_array.append(t)\n\n self.assertEqual(10, len(input_tensor_array))\n\n random_grad = numpy.random.random_sample([11]).astype(numpy.float32)\n\n y_out = block.create_var(name=\"Out\")\n y_out.persistable = True\n y_out_index = block.create_var(name=\"OutIndex\")\n y_out_index.persistable = True\n\n y_grad_arr = block.create_var(\n name='Out@GRAD', dtype='float32', shape=[11])\n y_grad_arr.persistable = True\n y_grad = scope.var('Out@GRAD')\n y_grad_tensor = y_grad.get_tensor()\n y_grad_tensor.set(random_grad, cpu)\n\n op = block.append_op(\n type=self.op_type,\n inputs={\"X\": input_arr},\n outputs={\"Out\": y_out,\n \"OutIndex\": y_out_index},\n attrs=self.attrs)\n\n out_grad = block.create_var(\n name=\"tmp_lod_tensor_array@GRAD\",\n type=core.VarDesc.VarType.LOD_TENSOR_ARRAY)\n out_grad.persistable = True\n\n grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(op.desc,\n set(), [])\n grad_op_desc = grad_op_desc_list[0]\n new_op_desc = block.desc.append_op()\n new_op_desc.copy_from(grad_op_desc)\n for var_name in grad_op_desc.output_arg_names():\n block.desc.var(var_name.encode(\"ascii\"))\n\n grad_op_desc.infer_var_type(block.desc)\n grad_op_desc.infer_shape(block.desc)\n for arg in grad_op_desc.output_arg_names():\n grad_var = block.desc.find_var(arg.encode(\"ascii\"))\n grad_var.set_dtype(core.VarDesc.VarType.FP32)\n\n fetch_list = []\n fetch_list.append(block.var('Out'))\n fetch_list.append(block.var('OutIndex'))\n\n exe = fluid.Executor(fluid.CPUPlace())\n out = exe.run(program, fetch_list=fetch_list, scope=scope)\n #print (\"index: \", numpy.array(out[1]))\n\n # test forward\n tensor_res = numpy.array(out[0])\n tensor_res_out_idx = numpy.array(out[1])\n tensor_gt = numpy.array(\n [0] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float32')\n\n self.assertEqual(len(tensor_res), len(tensor_gt))\n self.assertEqual(len(tensor_res_out_idx), 10)\n\n for i in range(len(tensor_res)):\n self.assertEqual(tensor_res[i], tensor_gt[i])\n\n for i in range(len(tensor_res_out_idx)):\n if i == 0:\n self.assertEqual(tensor_res_out_idx[i], 2)\n else:\n self.assertEqual(tensor_res_out_idx[i], 1)\n\n # test backward\n grad_tensor = scope.var('tmp_lod_tensor_array@GRAD')\n grad_tensor_array = grad_tensor.get_lod_tensor_array()\n\n self.assertEqual(10, len(grad_tensor_array))\n\n for i in range(len(grad_tensor_array)):\n if i == 0:\n self.assertEqual(\n numpy.array(grad_tensor_array[i])[0],\n numpy.array(random_grad[i]))\n self.assertEqual(\n numpy.array(grad_tensor_array[i])[1],\n numpy.array(random_grad[i + 1]))\n if i == 1:\n self.assertEqual(\n numpy.array(grad_tensor_array[i]),\n numpy.array(random_grad[i + 1]))\n\n\nclass TestLoDTensorArrayStack(unittest.TestCase):\n \"\"\"Test case for stack mode of tensor_array_to_tensor.\"\"\"\n\n def setUp(self):\n self.op_type = \"tensor_array_to_tensor\"\n self.attrs = {\"axis\": 1, \"use_stack\": True}\n self.inputs = [\n numpy.random.rand(2, 3, 4).astype(\"float32\"),\n numpy.random.rand(2, 3, 4).astype(\"float32\"),\n numpy.random.rand(2, 3, 4).astype(\"float32\")\n ]\n self.outputs = [\n numpy.stack(\n self.inputs, axis=self.attrs[\"axis\"]), numpy.array(\n [x.shape[self.attrs[\"axis\"]] for x in self.inputs],\n dtype=\"int32\")\n ]\n self.input_grads = [numpy.ones_like(x) for x in self.inputs]\n self.set_program()\n for var in self.program.list_vars():\n # to avoid scope clearing after execution\n var.persistable = True\n\n def set_program(self):\n self.program = fluid.Program()\n with fluid.program_guard(self.program):\n self.array = array = fluid.layers.create_array(dtype='float32')\n idx = fluid.layers.fill_constant(shape=[1], dtype=\"int64\", value=0)\n for i, x in enumerate(self.inputs):\n x = fluid.layers.assign(x)\n fluid.layers.array_write(x, idx + i, array)\n output, output_index = fluid.layers.tensor_array_to_tensor(\n input=array, **self.attrs)\n loss = fluid.layers.reduce_sum(output)\n fluid.backward.append_backward(loss)\n self.output_vars = [output, output_index]\n\n def run_check(self, executor, scope):\n executor.run(self.program, scope=scope)\n for i, output in enumerate(self.outputs):\n numpy.allclose(\n numpy.array(scope.var(self.output_vars[i].name).get_tensor()),\n output,\n atol=0)\n tensor_array_grad = scope.var(self.array.name).get_lod_tensor_array()\n for i, input_grad in enumerate(self.input_grads):\n numpy.allclose(\n numpy.array(tensor_array_grad[i]), input_grad, atol=0)\n\n def test_cpu(self):\n scope = core.Scope()\n place = core.CPUPlace()\n executor = fluid.Executor(place)\n self.run_check(executor, scope)\n\n def test_gpu(self):\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n scope = core.Scope()\n executor = fluid.Executor(place)\n self.run_check(executor, scope)\n\n\nclass TestTensorArrayToTensorAPI(unittest.TestCase):\n def _test_case(self, inp1, inp2):\n x0 = fluid.layers.assign(inp1)\n x0.stop_gradient = False\n x1 = fluid.layers.assign(inp2)\n x1.stop_gradient = False\n i = fluid.layers.fill_constant(shape=[1], dtype=\"int64\", value=0)\n array = fluid.layers.create_array(dtype='float32')\n fluid.layers.array_write(x0, i, array)\n fluid.layers.array_write(x1, i + 1, array)\n output_stack, output_index_stack = fluid.layers.tensor_array_to_tensor(\n input=array, axis=1, use_stack=True)\n output_concat, output_index_concat = fluid.layers.tensor_array_to_tensor(\n input=array, axis=1, use_stack=False)\n return output_stack, output_index_stack, output_concat, output_index_concat\n\n def test_case(self):\n inp0 = numpy.random.rand(2, 3, 4).astype(\"float32\")\n inp1 = numpy.random.rand(2, 3, 4).astype(\"float32\")\n\n _outs_static = self._test_case(inp0, inp1)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n outs_static = exe.run(fetch_list=list(_outs_static))\n\n with fluid.dygraph.guard(place):\n outs_dynamic = self._test_case(inp0, inp1)\n\n for s, d in zip(outs_static, outs_dynamic):\n self.assertTrue(numpy.array_equal(s, d.numpy()))\n\n def test_while_loop_case(self):\n with fluid.dygraph.guard():\n zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)\n i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)\n ten = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)\n array = fluid.layers.create_array(dtype='float32')\n inp0 = numpy.random.rand(2, 3, 4).astype(\"float32\")\n x0 = fluid.layers.assign(inp0)\n fluid.layers.array_write(x0, zero, array)\n\n def cond(i, end, array):\n return fluid.layers.less_than(i, end)\n\n def body(i, end, array):\n prev = fluid.layers.array_read(array, i - 1)\n fluid.layers.array_write(prev, i, array)\n return i + 1, end, array\n\n _, _, array = fluid.layers.while_loop(cond, body, [i, ten, array])\n\n self.assertTrue(fluid.layers.array_length(array), 10)\n last = fluid.layers.fill_constant(shape=[1], dtype='int64', value=9)\n self.assertTrue(\n numpy.array_equal(\n fluid.layers.array_read(array, last).numpy(), inp0))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nimport paddle.fluid.layers as layers\nimport os\n\nfrom paddle.fluid import ParamAttr\nfrom paddle.fluid.contrib.layers import basic_lstm\nfrom paddle.fluid.executor import Executor\nfrom paddle.fluid.layers.control_flow import StaticRNN as PaddingRNN\n\nos.environ[\"CPU_NUM\"] = \"1\"\n\n\nclass RNNConfig(object):\n def __init__(self, model_type, rnn_model):\n self.model_type = model_type\n self.rnn_model = rnn_model\n\n self.vocab_size = 10000\n if self.model_type == \"test\":\n self.num_layers = 1\n self.batch_size = 2\n self.hidden_size = 10\n self.num_steps = 3\n self.init_scale = 0.1\n self.max_grad_norm = 5.0\n self.epoch_start_decay = 1\n self.max_epoch = 1\n self.dropout = 0.0\n self.lr_decay = 0.5\n self.base_learning_rate = 1.0\n elif self.model_type == \"small\":\n self.num_layers = 2\n self.batch_size = 20\n self.hidden_size = 200\n self.num_steps = 20\n self.init_scale = 0.1\n self.max_grad_norm = 5.0\n self.epoch_start_decay = 4\n self.max_epoch = 13\n self.dropout = 0.0\n self.lr_decay = 0.5\n self.base_learning_rate = 1.0\n elif self.model_type == \"medium\":\n self.num_layers = 2\n self.batch_size = 20\n self.hidden_size = 650\n self.num_steps = 35\n self.init_scale = 0.05\n self.max_grad_norm = 5.0\n self.epoch_start_decay = 6\n self.max_epoch = 39\n self.dropout = 0.5\n self.lr_decay = 0.8\n self.base_learning_rate = 1.0\n elif self.model_type == \"large\":\n self.num_layers = 2\n self.batch_size = 20\n self.hidden_size = 1500\n self.num_steps = 35\n self.init_scale = 0.04\n self.max_grad_norm = 10.0\n self.epoch_start_decay = 14\n self.max_epoch = 55\n self.dropout = 0.65\n self.lr_decay = 1.0 / 1.15\n self.base_learning_rate = 1.0\n else:\n raise ValueError('Unsupported model_type.')\n\n if rnn_model not in ('static', 'padding', 'cudnn', 'basic_lstm'):\n raise ValueError('Unsupported rnn_model.')\n\n self.batch_size = 12\n self.max_epoch = 3\n self.random_seed = 123\n\n\n# Fake data reader for test\nclass Reader(object):\n def get_data_iter(self, rnn_config):\n for i in range(rnn_config.max_epoch):\n x = np.zeros(\n shape=(rnn_config.batch_size, rnn_config.num_steps),\n dtype='int64')\n y = np.ones(\n shape=(rnn_config.batch_size, rnn_config.num_steps),\n dtype='int64')\n yield (x, y)\n\n\n# Model from PaddleNLP/models/language_model/lm_model.py in Paddle Models repo\ndef lm_model(hidden_size,\n vocab_size,\n batch_size,\n num_layers=2,\n num_steps=20,\n init_scale=0.1,\n dropout=None,\n rnn_model='static'):\n def padding_rnn(input_embedding, len=3, init_hidden=None, init_cell=None):\n weight_1_arr = []\n weight_2_arr = []\n bias_arr = []\n hidden_array = []\n cell_array = []\n mask_array = []\n for i in range(num_layers):\n weight_1 = layers.create_parameter(\n [hidden_size * 2, hidden_size * 4],\n dtype=\"float32\",\n name=\"fc_weight1_\" + str(i),\n default_initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale))\n weight_1_arr.append(weight_1)\n bias_1 = layers.create_parameter(\n [hidden_size * 4],\n dtype=\"float32\",\n name=\"fc_bias1_\" + str(i),\n default_initializer=fluid.initializer.Constant(0.0))\n bias_arr.append(bias_1)\n\n pre_hidden = layers.slice(\n init_hidden, axes=[0], starts=[i], ends=[i + 1])\n pre_cell = layers.slice(\n init_cell, axes=[0], starts=[i], ends=[i + 1])\n pre_hidden = layers.reshape(pre_hidden, shape=[-1, hidden_size])\n pre_cell = layers.reshape(pre_cell, shape=[-1, hidden_size])\n hidden_array.append(pre_hidden)\n cell_array.append(pre_cell)\n\n input_embedding = layers.transpose(input_embedding, perm=[1, 0, 2])\n rnn = PaddingRNN()\n\n with rnn.step():\n input = rnn.step_input(input_embedding)\n for k in range(num_layers):\n pre_hidden = rnn.memory(init=hidden_array[k])\n pre_cell = rnn.memory(init=cell_array[k])\n weight_1 = weight_1_arr[k]\n bias = bias_arr[k]\n\n nn = layers.concat([input, pre_hidden], 1)\n gate_input = layers.matmul(x=nn, y=weight_1)\n\n gate_input = layers.elementwise_add(gate_input, bias)\n i = layers.slice(\n gate_input, axes=[1], starts=[0], ends=[hidden_size])\n j = layers.slice(\n gate_input,\n axes=[1],\n starts=[hidden_size],\n ends=[hidden_size * 2])\n f = layers.slice(\n gate_input,\n axes=[1],\n starts=[hidden_size * 2],\n ends=[hidden_size * 3])\n o = layers.slice(\n gate_input,\n axes=[1],\n starts=[hidden_size * 3],\n ends=[hidden_size * 4])\n\n c = pre_cell * layers.sigmoid(f) + layers.sigmoid(\n i) * layers.tanh(j)\n m = layers.tanh(c) * layers.sigmoid(o)\n\n rnn.update_memory(pre_hidden, m)\n rnn.update_memory(pre_cell, c)\n\n rnn.step_output(m)\n rnn.step_output(c)\n\n input = m\n\n if dropout != None and dropout > 0.0:\n input = layers.dropout(\n input,\n dropout_prob=dropout,\n dropout_implementation='upscale_in_train')\n\n rnn.step_output(input)\n rnnout = rnn()\n\n last_hidden_array = []\n last_cell_array = []\n real_res = rnnout[-1]\n for i in range(num_layers):\n m = rnnout[i * 2]\n c = rnnout[i * 2 + 1]\n m.stop_gradient = True\n c.stop_gradient = True\n last_h = layers.slice(\n m, axes=[0], starts=[num_steps - 1], ends=[num_steps])\n last_hidden_array.append(last_h)\n last_c = layers.slice(\n c, axes=[0], starts=[num_steps - 1], ends=[num_steps])\n last_cell_array.append(last_c)\n real_res = layers.transpose(x=real_res, perm=[1, 0, 2])\n last_hidden = layers.concat(last_hidden_array, 0)\n last_cell = layers.concat(last_cell_array, 0)\n\n return real_res, last_hidden, last_cell\n\n def encoder_static(input_embedding, len=3, init_hidden=None,\n init_cell=None):\n\n weight_1_arr = []\n weight_2_arr = []\n bias_arr = []\n hidden_array = []\n cell_array = []\n mask_array = []\n for i in range(num_layers):\n weight_1 = layers.create_parameter(\n [hidden_size * 2, hidden_size * 4],\n dtype=\"float32\",\n name=\"fc_weight1_\" + str(i),\n default_initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale))\n weight_1_arr.append(weight_1)\n bias_1 = layers.create_parameter(\n [hidden_size * 4],\n dtype=\"float32\",\n name=\"fc_bias1_\" + str(i),\n default_initializer=fluid.initializer.Constant(0.0))\n bias_arr.append(bias_1)\n\n pre_hidden = layers.slice(\n init_hidden, axes=[0], starts=[i], ends=[i + 1])\n pre_cell = layers.slice(\n init_cell, axes=[0], starts=[i], ends=[i + 1])\n pre_hidden = layers.reshape(\n pre_hidden, shape=[-1, hidden_size], inplace=True)\n pre_cell = layers.reshape(\n pre_cell, shape=[-1, hidden_size], inplace=True)\n hidden_array.append(pre_hidden)\n cell_array.append(pre_cell)\n\n res = []\n sliced_inputs = layers.split(\n input_embedding, num_or_sections=len, dim=1)\n\n for index in range(len):\n input = sliced_inputs[index]\n input = layers.reshape(input, shape=[-1, hidden_size], inplace=True)\n for k in range(num_layers):\n pre_hidden = hidden_array[k]\n pre_cell = cell_array[k]\n weight_1 = weight_1_arr[k]\n bias = bias_arr[k]\n\n nn = layers.concat([input, pre_hidden], 1)\n gate_input = layers.matmul(x=nn, y=weight_1)\n\n gate_input = layers.elementwise_add(gate_input, bias)\n i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)\n\n c = pre_cell * layers.sigmoid(f) + layers.sigmoid(\n i) * layers.tanh(j)\n m = layers.tanh(c) * layers.sigmoid(o)\n\n hidden_array[k] = m\n cell_array[k] = c\n input = m\n\n if dropout != None and dropout > 0.0:\n input = layers.dropout(\n input,\n dropout_prob=dropout,\n dropout_implementation='upscale_in_train')\n\n res.append(input)\n\n last_hidden = layers.concat(hidden_array, 1)\n last_hidden = layers.reshape(\n last_hidden, shape=[-1, num_layers, hidden_size], inplace=True)\n last_hidden = layers.transpose(x=last_hidden, perm=[1, 0, 2])\n\n last_cell = layers.concat(cell_array, 1)\n last_cell = layers.reshape(\n last_cell, shape=[-1, num_layers, hidden_size])\n last_cell = layers.transpose(x=last_cell, perm=[1, 0, 2])\n\n real_res = layers.concat(res, 0)\n real_res = layers.reshape(\n real_res, shape=[len, -1, hidden_size], inplace=True)\n real_res = layers.transpose(x=real_res, perm=[1, 0, 2])\n\n return real_res, last_hidden, last_cell\n\n batch_size_each = batch_size\n x = layers.data(\n name=\"x\",\n shape=[batch_size_each, num_steps, 1],\n dtype='int64',\n append_batch_size=False)\n y = layers.data(\n name=\"y\",\n shape=[batch_size_each * num_steps, 1],\n dtype='int64',\n append_batch_size=False)\n\n init_hidden = layers.data(\n name=\"init_hidden\",\n shape=[num_layers, batch_size_each, hidden_size],\n dtype='float32',\n append_batch_size=False)\n init_cell = layers.data(\n name=\"init_cell\",\n shape=[num_layers, batch_size_each, hidden_size],\n dtype='float32',\n append_batch_size=False)\n\n init_cell.persistable = True\n init_hidden.persistable = True\n\n init_hidden_reshape = layers.reshape(\n init_hidden, shape=[num_layers, -1, hidden_size])\n init_cell_reshape = layers.reshape(\n init_cell, shape=[num_layers, -1, hidden_size])\n\n x_emb = layers.embedding(\n input=x,\n size=[vocab_size, hidden_size],\n dtype='float32',\n is_sparse=False,\n param_attr=fluid.ParamAttr(\n name='embedding_para',\n initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale)))\n\n x_emb = layers.reshape(\n x_emb, shape=[-1, num_steps, hidden_size], inplace=True)\n if dropout != None and dropout > 0.0:\n x_emb = layers.dropout(\n x_emb,\n dropout_prob=dropout,\n dropout_implementation='upscale_in_train')\n\n if rnn_model == \"padding\":\n rnn_out, last_hidden, last_cell = padding_rnn(\n x_emb,\n len=num_steps,\n init_hidden=init_hidden_reshape,\n init_cell=init_cell_reshape)\n elif rnn_model == \"static\":\n rnn_out, last_hidden, last_cell = encoder_static(\n x_emb,\n len=num_steps,\n init_hidden=init_hidden_reshape,\n init_cell=init_cell_reshape)\n elif rnn_model == \"cudnn\":\n x_emb = layers.transpose(x_emb, perm=[1, 0, 2])\n rnn_out, last_hidden, last_cell = layers.lstm(\n x_emb,\n init_hidden_reshape,\n init_cell_reshape,\n num_steps,\n hidden_size,\n num_layers,\n is_bidirec=False,\n default_initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale))\n rnn_out = layers.transpose(rnn_out, perm=[1, 0, 2])\n elif rnn_model == \"basic_lstm\":\n rnn_out, last_hidden, last_cell = basic_lstm( x_emb, init_hidden, init_cell, hidden_size, \\\n num_layers=num_layers, batch_first=True, dropout_prob=dropout, \\\n param_attr = ParamAttr( initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale) ), \\\n bias_attr = ParamAttr( initializer = fluid.initializer.Constant(0.0) ), \\\n forget_bias = 0.0)\n else:\n print(\"type not support\")\n return\n\n rnn_out = layers.reshape(\n rnn_out, shape=[-1, num_steps, hidden_size], inplace=True)\n\n softmax_weight = layers.create_parameter(\n [hidden_size, vocab_size],\n dtype=\"float32\",\n name=\"softmax_weight\",\n default_initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale))\n softmax_bias = layers.create_parameter(\n [vocab_size],\n dtype=\"float32\",\n name='softmax_bias',\n default_initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale))\n\n projection = layers.matmul(rnn_out, softmax_weight)\n projection = layers.elementwise_add(projection, softmax_bias)\n projection = layers.reshape(\n projection, shape=[-1, vocab_size], inplace=True)\n\n loss = layers.softmax_with_cross_entropy(\n logits=projection, label=y, soft_label=False)\n\n loss = layers.reshape(loss, shape=[-1, num_steps], inplace=True)\n loss = layers.reduce_mean(loss, dim=[0])\n loss = layers.reduce_sum(loss)\n\n loss.persistable = True\n last_cell.persistable = True\n last_hidden.persistable = True\n\n # This will feed last_hidden, last_cell to init_hidden, init_cell, which\n # can be used directly in next batch. This can avoid the fetching of\n # last_hidden and last_cell and feeding of init_hidden and init_cell in\n # each training step.\n layers.assign(input=last_cell, output=init_cell)\n layers.assign(input=last_hidden, output=init_hidden)\n\n feeding_list = ['x', 'y', 'init_hidden', 'init_cell']\n return loss, last_hidden, last_cell, feeding_list\n\n\nclass PaddingRNNTestBase(unittest.TestCase):\n def setUp(self):\n self.reader = Reader()\n self.device_count = 1\n\n # The default exec_strategy used for PaddingRNN.\n # You can change it in set_customed_config.\n self.exec_strategy = fluid.ExecutionStrategy()\n self.exec_strategy.num_threads = self.device_count\n self.exec_strategy.num_iteration_per_drop_scope = 100\n\n # The default build_strategy used for PaddingRNN.\n # You can change it in set_customed_config.\n self.build_strategy = fluid.BuildStrategy()\n self.build_strategy.enable_inplace = True\n self.build_strategy.memory_optimize = False\n self.build_strategy.fuse_all_optimizer_ops = True\n\n # CPU executor is used for PaddingRNN default.\n # You can change to CUDA executor in set_customed_config.\n self.exe = Executor(fluid.CPUPlace())\n\n def set_customed_config(self):\n # This function will be called before training.\n # You can override the function to set your own config.\n pass\n\n def _prepare_program(self, config, parallel=True):\n paddle.seed(config.random_seed)\n self.main_program = fluid.Program()\n self.startup_program = fluid.Program()\n with fluid.program_guard(self.main_program, self.startup_program):\n with fluid.unique_name.guard():\n res_vars = lm_model(\n config.hidden_size,\n config.vocab_size,\n config.batch_size,\n num_layers=config.num_layers,\n num_steps=config.num_steps,\n init_scale=config.init_scale,\n dropout=config.dropout,\n rnn_model=config.rnn_model)\n self.loss, self.last_hidden, self.last_cell, self.feed_order = res_vars\n\n fluid.clip.set_gradient_clip(\n clip=fluid.clip.GradientClipByGlobalNorm(\n clip_norm=config.max_grad_norm))\n\n self.learning_rate = fluid.layers.create_global_var(\n name=\"learning_rate\",\n shape=[1],\n value=1.0,\n dtype='float32',\n persistable=True)\n\n optimizer = fluid.optimizer.SGD(\n learning_rate=self.learning_rate)\n optimizer.minimize(self.loss)\n\n self.exe.run(self.startup_program)\n\n if parallel:\n self.train_program = fluid.compiler.CompiledProgram(\n self.main_program).with_data_parallel(\n loss_name=self.loss.name,\n build_strategy=self.build_strategy,\n exec_strategy=self.exec_strategy)\n else:\n self.train_program = self.main_program\n\n def _generate_init_data(self):\n init_hidden = np.zeros(\n (self.config.num_layers, self.config.batch_size,\n self.config.hidden_size),\n dtype='float32')\n init_cell = np.zeros(\n (self.config.num_layers, self.config.batch_size,\n self.config.hidden_size),\n dtype='float32')\n return init_hidden, init_cell\n\n def _generate_new_lr(self, epoch_id=0, device_count=1):\n new_lr = self.config.base_learning_rate * (self.config.lr_decay**max(\n epoch_id + 1 - self.config.epoch_start_decay, 0.0))\n lr = np.ones((self.device_count), dtype='float32') * new_lr\n return lr\n\n def _prepare_input(self,\n batch,\n init_hidden=None,\n init_cell=None,\n epoch_id=0,\n with_lr=True,\n device_count=1):\n x, y = batch\n x = x.reshape((-1, self.config.num_steps, 1))\n y = y.reshape((-1, 1))\n\n res = {}\n res['x'] = x\n res['y'] = y\n if init_hidden is not None:\n res['init_hidden'] = init_hidden\n if init_cell is not None:\n res['init_cell'] = init_cell\n if with_lr:\n res['learning_rate'] = self._generate_new_lr(epoch_id, device_count)\n return res\n\n def _train_an_epoch(self, epoch_id, use_program_cache=True):\n train_data_iter = self.reader.get_data_iter(self.config)\n\n total_loss = 0\n iters = 0\n\n init_hidden, init_cell = self._generate_init_data()\n ppl = np.zeros(shape=(0))\n for batch_id, batch in enumerate(train_data_iter):\n input_data_feed = self._prepare_input(\n batch,\n init_hidden=init_hidden,\n init_cell=init_cell,\n epoch_id=epoch_id,\n with_lr=True,\n device_count=self.device_count)\n\n fetch_outs = self.exe.run(self.train_program,\n feed=input_data_feed,\n fetch_list=[\n self.loss.name, \"learning_rate\",\n self.last_hidden.name,\n self.last_cell.name\n ],\n use_program_cache=use_program_cache)\n\n cost_train = np.array(fetch_outs[0])\n lr = np.array(fetch_outs[1])\n init_hidden = np.array(fetch_outs[2])\n init_cell = np.array(fetch_outs[3])\n\n total_loss += cost_train\n iters += self.config.num_steps\n\n batch_ppl = np.exp(total_loss / iters)\n ppl = np.append(ppl, batch_ppl)\n return ppl\n\n def train(self, config, parallel=True, use_program_cache=True):\n self.set_customed_config()\n\n self.config = config\n self._prepare_program(config, parallel)\n ppl = np.zeros(shape=(0, config.batch_size))\n for epoch_id in range(config.max_epoch):\n train_ppl = self._train_an_epoch(epoch_id, use_program_cache)\n ppl = np.append(ppl, train_ppl)\n return ppl\n\n def compare_padding_static_mode(self, parallel=True,\n use_program_cache=True):\n '''\n Test that train ppl of padding mode is same to that of static mode \n '''\n config = RNNConfig('test', 'padding')\n with fluid.scope_guard(fluid.Scope()):\n padding_rnn_ppl = self.train(config, parallel, use_program_cache)\n config = RNNConfig('test', 'static')\n with fluid.scope_guard(fluid.Scope()):\n static_rnn_ppl = self.train(config, parallel, use_program_cache)\n self.assertTrue(\n np.isclose(\n padding_rnn_ppl, static_rnn_ppl, rtol=0.001).all())\n\n\nclass EagerDeletionPaddingRNNTest(PaddingRNNTestBase):\n def test_padding_mode_no_eager_deletion(self):\n '''\n Test that train ppl of padding mode is same to that of static mode without eager deletion\n '''\n fluid.core._set_eager_deletion_mode(-1.0, 1.0, True)\n # When parallel is True, use_program_cache does not make a difference.\n self.compare_padding_static_mode(parallel=True, use_program_cache=True)\n\n def test_padding_mode_eager_deletion(self):\n '''\n Test that train ppl of padding mode is same to that of static mode under eager deletion\n '''\n fluid.core._set_eager_deletion_mode(0.0, 1.0, True)\n # When parallel is True, use_program_cache does not make a difference.\n self.compare_padding_static_mode(parallel=True, use_program_cache=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom auto_scan_test import PassAutoScanTest, SkipReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig\nimport numpy as np\nimport paddle.inference as paddle_infer\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\nimport hypothesis.strategies as st\n\n\nclass TestMatmulv2TransposeReshapeMkldnnFusePass(PassAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n if program_config.inputs[\"input_data1\"].shape[\n -4] != 1 and program_config.inputs[\"input_data2\"].shape[\n -4] != 1:\n if program_config.inputs[\"input_data1\"].shape[\n -4] != program_config.inputs[\"input_data2\"].shape[-4]:\n return False\n\n if program_config.inputs[\"input_data1\"].shape[\n -3] != 1 and program_config.inputs[\"input_data2\"].shape[\n -3] != 1:\n if program_config.inputs[\"input_data1\"].shape[\n -3] != program_config.inputs[\"input_data2\"].shape[-3]:\n return False\n\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n # If the problem has been fixed, the judgment \n # needs to be deleted!!!\n if 0 in attrs[2]['shape']:\n return False\n\n return True\n\n def sample_program_config(self, draw):\n transpose_X = draw(st.booleans())\n transpose_Y = draw(st.booleans())\n axis = draw(st.sampled_from([[0, 2, 1, 3]]))\n shape = draw(st.sampled_from([[0, -1, 128], [-1, 1, 64], [1, -1, 32]]))\n batch_size1 = draw(st.integers(min_value=1, max_value=4))\n batch_size2 = draw(st.integers(min_value=1, max_value=4))\n channel1 = draw(st.sampled_from([1, 16, 32, 64]))\n channel2 = draw(st.sampled_from([1, 16, 32, 64]))\n input_dim = draw(st.sampled_from([16, 32, 64]))\n\n def generate_input(type):\n if transpose_X and transpose_Y:\n shape_x = [batch_size1, channel1, input_dim, 32]\n shape_y = [batch_size2, channel2, 64, input_dim]\n elif transpose_X:\n shape_x = [batch_size1, channel1, input_dim, 32]\n shape_y = [batch_size2, channel2, input_dim, 64]\n elif transpose_Y:\n shape_x = [batch_size1, channel1, 32, input_dim]\n shape_y = [batch_size2, channel2, 8, input_dim]\n else:\n shape_x = [batch_size1, channel1, 32, input_dim]\n shape_y = [batch_size2, channel2, input_dim, 16]\n\n if type == \"x\":\n return np.random.random(shape_x).astype(np.float32)\n else:\n return np.random.random(shape_y).astype(np.float32)\n\n matmul_op = OpConfig(\n type=\"matmul_v2\",\n inputs={\"X\": [\"input_data1\"],\n \"Y\": [\"input_data2\"]},\n outputs={\"Out\": [\"matmul_output\"]},\n attrs={\n \"trans_x\": transpose_X,\n \"trans_y\": transpose_Y,\n \"fused_reshape_X\": [],\n \"fused_reshape_Y\": [],\n \"fused_transpose_X\": [],\n \"fused_transpose_Y\": [],\n \"fused_reshape_Out\": [],\n \"fused_transpose_Out\": []\n })\n\n transpose2_op = OpConfig(\n type=\"transpose2\",\n inputs={\"X\": [\"matmul_output\"]},\n outputs={\n \"Out\": [\"transpose2_output\"],\n \"XShape\": [\"transpose2_xshape\"]\n },\n attrs={'axis': axis})\n\n reshape2_op = OpConfig(\n type=\"reshape2\",\n inputs={\"X\": [\"transpose2_output\"]},\n outputs={\n \"Out\": [\"reshape2_output\"],\n \"XShape\": [\"reshape2_xshape\"]\n },\n attrs={'shape': shape})\n\n model_net = [matmul_op, transpose2_op, reshape2_op]\n\n program_config = ProgramConfig(\n ops=model_net,\n weights={},\n inputs={\n \"input_data1\":\n TensorConfig(data_gen=partial(generate_input, \"x\")),\n \"input_data2\":\n TensorConfig(data_gen=partial(generate_input, \"y\"))\n },\n outputs=[\"reshape2_output\"])\n\n return program_config\n\n def sample_predictor_configs(self, program_config):\n # map_matmul_v2_to_matmul_pass will affect the type of final fused op \n fused_op = \"matmul_v2\"\n input1_dim1 = program_config.inputs[\"input_data1\"].shape[0]\n input2_dim1 = program_config.inputs[\"input_data2\"].shape[0]\n input1_dim2 = program_config.inputs[\"input_data1\"].shape[1]\n input2_dim2 = program_config.inputs[\"input_data2\"].shape[1]\n if input1_dim1 == input2_dim1 and input1_dim2 == input2_dim2:\n fused_op = \"matmul\"\n\n config = self.create_inference_config(use_mkldnn=True)\n yield config, [fused_op], (1e-5, 1e-5)\n\n def test(self):\n self.run_and_statis(\n quant=False, passes=[\"matmul_v2_transpose_reshape_fuse_pass\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest, skip_check_grad_ci\nfrom paddle import fluid\nfrom paddle.fluid.layers import lstm as LSTM\nfrom paddle.fluid.layers import fill_constant\nfrom paddle.fluid.framework import program_guard, Program\n\nSIGMOID_THRESHOLD_MIN = -40.0\nSIGMOID_THRESHOLD_MAX = 13.0\nEXP_MAX_INPUT = 40.0\n\n\ndef identity(x):\n return x\n\n\ndef sigmoid(x):\n y = np.copy(x)\n y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN\n y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX\n return 1. / (1. + np.exp(-y))\n\n\ndef tanh(x):\n y = -2. * x\n y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT\n return (2. / (1. + np.exp(y))) - 1.\n\n\ndef relu(x):\n return np.maximum(x, 0)\n\n\nACTIVATION = {\n 'identity': identity,\n 'sigmoid': sigmoid,\n 'tanh': tanh,\n 'relu': relu\n}\n\n\ndef lstm(\n input, # T x 4D\n lod, # 1 x N\n h0=None, # N x D\n c0=None, # N x D\n w_h=None, # D x 4D\n w_b=None, # 1 x 4D\n w_c=None, # 1 x 3D\n is_reverse=False,\n act_gate=None,\n act_cell=None,\n act_cand=None):\n def _step(x, w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand):\n g = np.dot(h_pre, w_h) # 1 x 4D\n g = g + x\n g = np.reshape(g, (1, g.size))\n c, g_i, g_f, g_o = np.split(g, 4, axis=1)\n if w_c is None:\n g_i = act_gate(g_i) # 1 x D\n g_f = act_gate(g_f) # 1 x D\n else:\n w_ic, w_fc, w_oc = np.split(w_c, 3, axis=1)\n g_i = act_gate(g_i + w_ic * c_pre) # 1 x D\n g_f = act_gate(g_f + w_fc * c_pre) # 1 x D\n c = g_f * c_pre + g_i * act_cand(c) # 1 x D\n\n if w_c is None:\n g_o = act_gate(g_o) # 1 x D\n else:\n _, _, w_oc = np.split(w_c, 3, axis=1)\n g_o = act_gate(g_o + w_oc * c) # 1 x D\n h = g_o * act_cell(c)\n return h, c\n\n def _reverse(x, offset):\n y = np.zeros_like(x)\n for i in range(len(offset) - 1):\n b, e = offset[i], offset[i + 1]\n y[b:e, :] = np.flip(x[b:e, :], 0)\n return y\n\n offset = [0]\n for l in lod[0]:\n offset.append(offset[-1] + l)\n batch_size = len(lod[0])\n hidden = []\n cell = []\n input = _reverse(input, offset) if is_reverse else input\n if w_b is not None:\n input = input + np.tile(w_b, (offset[-1], 1))\n for i in range(batch_size):\n # compute one sequence\n seq_len = lod[0][i]\n x = input[offset[i]:offset[i + 1], :]\n h_pre = h0[i] # 1 x D\n c_pre = c0[i] # 1 x D\n for j in range(seq_len):\n # compute one step\n h_pre, c_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate,\n act_cell, act_cand)\n hidden.append(h_pre.flatten())\n cell.append(c_pre.flatten())\n\n hidden = np.array(hidden).astype('float64')\n cell = np.array(cell).astype('float64')\n\n hidden = _reverse(hidden, offset) if is_reverse else hidden\n cell = _reverse(cell, offset) if is_reverse else cell\n\n assert hidden.shape == (input.shape[0], input.shape[1] / 4)\n assert cell.shape == (input.shape[0], input.shape[1] / 4)\n return hidden, cell\n\n\nclass LstmUnitTestError(unittest.TestCase):\n def test_errors(self):\n with program_guard(Program(), Program()):\n batch_size = 20\n seq_len = 100\n dropout_prob = 0.2\n hidden_size = 150\n num_layers = 1\n input = fluid.data(\n name='input',\n shape=[batch_size, seq_len, hidden_size],\n dtype='float32')\n pre_hidden = fill_constant([num_layers, batch_size, hidden_size],\n 'float32', 0.0)\n pre_cell = fill_constant([num_layers, batch_size, hidden_size],\n 'float32', 0.0)\n\n np_input = np.random.uniform(\n -0.1, 0.1, (batch_size, seq_len, hidden_size)).astype('float64')\n np_pre_hidden = np.random.uniform(\n -0.1, 0.1,\n (num_layers, batch_size, hidden_size)).astype('float64')\n np_pre_cell = np.random.uniform(\n -0.1, 0.1,\n (num_layers, batch_size, hidden_size)).astype('float64')\n\n def test_input_Variable():\n LSTM(np_input, pre_hidden, pre_cell, \\\n seq_len, hidden_size, num_layers, \\\n dropout_prob=dropout_prob)\n\n self.assertRaises(TypeError, test_input_Variable)\n\n def test_pre_hidden_Variable():\n LSTM(np_input, np_pre_hidden, pre_cell, \\\n seq_len, hidden_size, num_layers, \\\n dropout_prob=dropout_prob)\n\n self.assertRaises(TypeError, test_pre_hidden_Variable)\n\n def test_pre_cell_Variable():\n LSTM(np_input, pre_hidden, np_pre_cell, \\\n seq_len, hidden_size, num_layers, \\\n dropout_prob=dropout_prob)\n\n self.assertRaises(TypeError, test_pre_cell_Variable)\n\n def test_input_type():\n error_input = fluid.data(\n name='error_input',\n shape=[None, hidden_size * 3],\n dtype='int32')\n LSTM(error_input, pre_hidden, pre_cell, \\\n seq_len, hidden_size, num_layers, \\\n dropout_prob=dropout_prob)\n\n self.assertRaises(TypeError, test_input_type)\n\n def test_pre_hidden_type():\n error_pre_hidden = fluid.data(\n name='error_pre_hidden',\n shape=[None, hidden_size],\n dtype='int32')\n LSTM(input, error_pre_hidden, pre_cell, \\\n seq_len, hidden_size, num_layers, \\\n dropout_prob=dropout_prob)\n\n self.assertRaises(TypeError, test_pre_hidden_type)\n\n def test_pre_cell_type():\n error_pre_cell = fluid.data(\n name='error_pre_cell',\n shape=[None, hidden_size],\n dtype='int32')\n LSTM(input, pre_hidden, error_pre_cell, \\\n seq_len, hidden_size, num_layers, \\\n dropout_prob=dropout_prob)\n\n self.assertRaises(TypeError, test_pre_cell_type)\n\n\nclass TestLstmOp(OpTest):\n def set_is_test(self):\n self.is_test = False\n\n def set_lod(self):\n self.lod = [[2, 3, 2]]\n\n def set_argument(self):\n self.set_is_test()\n self.set_lod()\n self.D = 16\n\n self.act_gate = 'sigmoid'\n self.act_cell = 'tanh'\n self.act_cand = 'tanh'\n\n self.has_initial_state = False\n self.is_reverse = False\n self.use_peepholes = True\n\n def setUp(self):\n self.set_argument()\n self.op_type = 'lstm'\n T = sum(self.lod[0])\n N = len(self.lod[0])\n\n x = np.random.normal(size=(T, 4 * self.D)).astype('float64')\n if self.has_initial_state:\n h0 = np.random.normal(size=(N, self.D)).astype('float64')\n c0 = np.random.normal(size=(N, self.D)).astype('float64')\n else:\n h0 = np.zeros((N, self.D)).astype('float64')\n c0 = np.zeros((N, self.D)).astype('float64')\n w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64')\n if self.use_peepholes:\n b = np.random.normal(size=(1, 7 * self.D)).astype('float64')\n else:\n b = np.random.normal(size=(1, 4 * self.D)).astype('float64')\n\n w_b = b[:, 0:4 * self.D]\n w_c = b[:, 4 * self.D:] if self.use_peepholes else None\n h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse,\n ACTIVATION[self.act_gate], ACTIVATION[self.act_cell],\n ACTIVATION[self.act_cand])\n\n self.inputs = {'Input': (x, self.lod), 'Weight': w}\n\n self.inputs['Bias'] = b\n\n if self.has_initial_state:\n self.inputs['H0'] = h0\n self.inputs['C0'] = c0\n\n self.outputs = {\n 'Hidden': (h, self.lod),\n 'Cell': (c, self.lod),\n }\n self.attrs = {\n 'use_peepholes': self.use_peepholes,\n 'is_reverse': self.is_reverse,\n 'gate_activation': self.act_gate,\n 'cell_activation': self.act_cell,\n 'candidate_activation': self.act_cand,\n 'is_test': self.is_test\n }\n\n def test_check_output(self):\n self.check_output(atol=1e-8, check_dygraph=False)\n\n def test_check_grad(self):\n # TODO(qingqing) remove folowing lines after the check_grad is refined.\n N = len(self.lod[0])\n self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n self.outputs['BatchCellPreAct'] = np.zeros(\n (N, self.D)).astype('float64')\n self.check_grad(\n ['Input', 'Weight', 'Bias'], ['Hidden'],\n max_relative_error=5e-4,\n check_dygraph=False)\n\n\nclass TestLstmOpCase1(TestLstmOp):\n def set_lod(self):\n self.lod = [[0, 3, 2]]\n\n\nclass TestLstmOpCase2(TestLstmOp):\n def set_lod(self):\n self.lod = [[0, 3, 0]]\n\n\nclass TestLstmOpCase3(TestLstmOp):\n def set_lod(self):\n self.lod = [[2, 0, 4]]\n\n\nclass TestLstmOpInference(TestLstmOp):\n def set_is_test(self):\n self.is_test = True\n\n # avoid checking gradient\n def test_check_grad(self):\n pass\n\n\nclass TestLstmOpError(unittest.TestCase):\n def test_errors(self):\n with program_guard(Program(), Program()):\n\n def test_Variable():\n input_data = np.random.random((1, 2048)).astype(\"float32\")\n fluid.layers.dynamic_lstm(\n input=input_data, size=2048, use_peepholes=False)\n\n self.assertRaises(TypeError, test_Variable)\n\n def test_h_0():\n in_data = fluid.data(\n name=\"input\", shape=[None, 2048], dtype=\"float32\")\n h = fluid.data(name=\"h\", shape=[None, 512], dtype=\"int32\")\n c = fluid.data(name=\"c\", shape=[None, 512], dtype=\"float32\")\n fluid.layers.dynamic_lstm(\n input=in_data, size=2048, use_peepholes=False, h_0=h, c_0=c)\n\n self.assertRaises(TypeError, test_h_0)\n\n def test_c_0():\n in_data_ = fluid.data(\n name=\"input_\", shape=[None, 2048], dtype=\"float32\")\n h_ = fluid.data(name=\"h_\", shape=[None, 512], dtype=\"float32\")\n c_ = fluid.data(name=\"c_\", shape=[None, 512], dtype=\"int32\")\n fluid.layers.dynamic_lstm(\n input=in_data_,\n size=2048,\n use_peepholes=False,\n h_0=h_,\n c_0=c_)\n\n self.assertRaises(TypeError, test_c_0)\n\n\n# class TestLstmOpHasInitial(TestLstmOp):\n# def set_argument(self):\n# self.lod = [[2, 3, 2]]\n# self.D = 16\n\n# self.act_gate = 'sigmoid'\n# self.act_cell = 'tanh'\n# self.act_cand = 'tanh'\n\n# self.has_initial_state = True\n# self.is_reverse = True\n# self.use_peepholes = True\n\n# def test_check_grad(self):\n# # TODO(qingqing) remove folowing lines after the check_grad is refined.\n# N = len(self.lod[0])\n# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n# self.outputs['BatchCellPreAct'] = np.zeros(\n# (N, self.D)).astype('float64')\n# self.check_grad(\n# ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'],\n# max_relative_error=5e-4)\n\n# def test_check_grad_ingore_bias(self):\n# N = len(self.lod[0])\n# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n# self.outputs['BatchCellPreAct'] = np.zeros(\n# (N, self.D)).astype('float64')\n# self.check_grad(\n# ['Input', 'Weight'], ['Hidden'],\n# max_relative_error=5e-4,\n# no_grad_set=set('Bias'))\n\n# def test_check_grad_ingore_weight(self):\n# N = len(self.lod[0])\n# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n# self.outputs['BatchCellPreAct'] = np.zeros(\n# (N, self.D)).astype('float64')\n# self.check_grad(\n# ['Input', 'Bias'], ['Hidden'],\n# max_relative_error=5e-4,\n# no_grad_set=set('Weight'))\n\n# def test_check_grad_ingore_input(self):\n# N = len(self.lod[0])\n# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n# self.outputs['BatchCellPreAct'] = np.zeros(\n# (N, self.D)).astype('float64')\n# self.check_grad(\n# ['Weight', 'Bias'], ['Hidden'],\n# max_relative_error=5e-4,\n# no_grad_set=set('Input'))\n\n# def test_check_grad_ingore_h0(self):\n# N = len(self.lod[0])\n# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n# self.outputs['BatchCellPreAct'] = np.zeros(\n# (N, self.D)).astype('float64')\n# self.check_grad(\n# ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'],\n# max_relative_error=5e-4,\n# no_grad_set=set('H0'))\n\n# def test_check_grad_ingore_c0(self):\n# N = len(self.lod[0])\n# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')\n# self.outputs['BatchCellPreAct'] = np.zeros(\n# (N, self.D)).astype('float64')\n# self.check_grad(\n# ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'],\n# max_relative_error=5e-4,\n# no_grad_set=set('C0'))\n\n# class TestLstmOpRerverse(TestLstmOp):\n# def set_argument(self):\n# self.lod = [[2, 3, 2]]\n# self.D = 16\n\n# self.act_gate = 'sigmoid'\n# self.act_cell = 'tanh'\n# self.act_cand = 'tanh'\n\n# self.has_initial_state = False\n# self.is_reverse = True\n# self.use_peepholes = True\n\n# class TestLstmOpNotUsePeepholes(TestLstmOp):\n# def set_argument(self):\n# self.lod = [[2, 3, 2]]\n# self.D = 16\n\n# self.act_gate = 'sigmoid'\n# self.act_cell = 'tanh'\n# self.act_cand = 'tanh'\n\n# self.has_initial_state = False\n# self.is_reverse = True\n# self.use_peepholes = False\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.compiler as compiler\nimport paddle.optimizer\nimport paddle.static\nfrom paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,\n np_dtype_to_fluid_str)\n\npaddle.enable_static()\n\n\[email protected](not paddle.is_compiled_with_ipu(),\n \"core is not compiled with IPU\")\nclass TestBase(IPUOpTest):\n def setUp(self):\n self.set_atol()\n self.set_training()\n self.set_feed()\n self.set_feed_attr()\n self.set_attrs()\n\n def set_feed(self):\n self.feed = {\n \"x\": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),\n }\n\n def set_feed_attr(self):\n self.feed_shape = [x.shape for x in self.feed.values()]\n self.feed_list = list(self.feed.keys())\n self.feed_dtype = [\n np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()\n ]\n\n def set_attrs(self):\n self.attrs = {\n \"pool_size\": 3,\n \"pool_type\": 'max',\n \"pool_stride\": 1,\n \"pool_padding\": 0,\n \"global_pooling\": False,\n \"ceil_mode\": False,\n \"exclusive\": True,\n \"data_format\": 'NCHW',\n }\n\n def _test_base(self, run_ipu=True):\n scope = fluid.core.Scope()\n main_prog = paddle.static.Program()\n startup_prog = paddle.static.Program()\n SEED = self.SEED\n main_prog.random_seed = SEED\n startup_prog.random_seed = SEED\n\n with fluid.scope_guard(scope):\n with paddle.static.program_guard(main_prog, startup_prog):\n x = paddle.static.data(\n name=self.feed_list[0],\n shape=self.feed_shape[0],\n dtype=self.feed_dtype[0])\n out = paddle.fluid.layers.pool2d(x, **self.attrs)\n\n fetch_list = [out.name]\n\n if run_ipu:\n place = paddle.IPUPlace()\n else:\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n exe.run(startup_prog)\n\n if run_ipu:\n feed_list = self.feed_list\n ipu_strategy = compiler.get_ipu_strategy()\n ipu_strategy.is_training = self.is_training\n program = compiler.IPUCompiledProgram(\n main_prog,\n ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)\n else:\n program = main_prog\n\n result = exe.run(program, feed=self.feed, fetch_list=fetch_list)\n return result[0]\n\n def test_base(self):\n res0 = self._test_base(False)\n res1 = self._test_base(True)\n\n self.assertTrue(\n np.allclose(\n res0.flatten(), res1.flatten(), atol=self.atol))\n\n self.assertTrue(res0.shape == res1.shape)\n\n\nclass TestCase1(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_size'] = 3\n\n\nclass TestCase1_2(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_size'] = [3, 1]\n\n\nclass TestCase2(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_stride'] = 2\n\n\nclass TestCase2_2(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_stride'] = [2, 1]\n\n\nclass TestCase3(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_padding'] = [1, 1]\n\n\nclass TestCase3_2(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_padding'] = [1, 1, 2, 2]\n\n\[email protected]('auto_pad is not currently supported')\nclass TestCase3_3(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_padding'] = 'VALID'\n\n\[email protected]('auto_pad is not currently supported')\nclass TestCase3_4(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['pool_padding'] = 'SAME'\n\n\nclass TestCase4(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['global_pooling'] = True\n\n\nclass TestCase5(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['ceil_mode'] = True\n\n\nclass TestCase6(TestBase):\n def set_attrs(self):\n super().set_attrs()\n self.attrs['exclusive'] = False\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle.fluid as fluid\n\n\ndef crop(data, offsets, crop_shape):\n def indexOf(shape, index):\n result = []\n for dim in reversed(shape):\n result.append(index % dim)\n index = index / dim\n return result[::-1]\n\n result = []\n for i, value in enumerate(data.flatten()):\n index = indexOf(data.shape, i)\n selected = True\n if len(index) == len(offsets):\n for j, offset in enumerate(offsets):\n selected = selected and index[j] >= offset and index[\n j] < crop_shape[j] + offset\n if selected:\n result.append(value)\n return np.array(result).reshape(crop_shape)\n\n\nclass TestCropTensorOp(OpTest):\n def setUp(self):\n self.op_type = \"crop_tensor\"\n self.shape_by_input = False\n self.offset_by_input = False\n self.unk_dim_idx = -1\n self.attrs = {}\n self.initTestCase()\n\n if self.shape_by_input:\n self.inputs = {\n 'X': np.random.random(self.x_shape).astype(\"float64\"),\n 'Shape': np.array(self.crop_shape).astype(\"int32\")\n }\n else:\n self.attrs['shape'] = self.crop_shape\n self.inputs = {\n 'X': np.random.random(self.x_shape).astype(\"float64\"),\n }\n if self.offset_by_input:\n self.inputs['Offsets'] = np.array(self.offsets).astype('int32')\n else:\n self.attrs['offsets'] = self.offsets\n\n crop_shape = [val for val in self.crop_shape]\n for i in range(len(self.crop_shape)):\n if self.crop_shape[i] == -1:\n crop_shape[i] = self.x_shape[i] - self.offsets[i]\n self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}\n\n def initTestCase(self):\n self.x_shape = (10, 10)\n self.crop_shape = [2, 2]\n self.offsets = [1, 2]\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad_normal(self):\n self.check_grad(['X'], 'Out')\n\n\nclass TestCase1(TestCropTensorOp):\n def initTestCase(self):\n self.x_shape = (100)\n self.crop_shape = [64]\n self.offsets = [13]\n\n\nclass TestCase2(TestCropTensorOp):\n def initTestCase(self):\n self.x_shape = (12, 24)\n self.crop_shape = [-1, 8]\n self.offsets = [0, 0]\n\n\nclass TestCase3(TestCropTensorOp):\n def initTestCase(self):\n self.x_shape = (4, 8, 16)\n self.crop_shape = [2, 2, 3]\n self.offsets = [1, 5, 3]\n self.shape_by_input = True\n\n\nclass TestCase4(TestCropTensorOp):\n def initTestCase(self):\n self.x_shape = (8, 3, 6, 6)\n self.crop_shape = [-1, 3, -1, 4]\n self.offsets = [0, 0, 1, 0]\n self.shape_by_input = True\n\n\nclass TestCase5(TestCropTensorOp):\n def initTestCase(self):\n self.x_shape = (2, 4, 5, 8, 8)\n self.crop_shape = [1, 1, 2, 4, 4]\n self.offsets = [1, 0, 0, 2, 2]\n self.offset_by_input = True\n\n\nclass TestCase6(TestCropTensorOp):\n def initTestCase(self):\n self.x_shape = (2, 2, 4, 4, 4, 2)\n self.crop_shape = [1, 1, 4, 2, 2, 2]\n self.offsets = [0, 0, 0, 0, 0, 0]\n self.shape_by_input = True\n self.offset_by_input = True\n\n\nclass TestCropTensorOpTensorAttr(OpTest):\n def setUp(self):\n self.op_type = \"crop_tensor\"\n self.OffsetsTensor = False\n self.ShapeTensor = True\n self.attrs = {}\n self.initTestCase()\n\n if self.ShapeTensor:\n shape_tensor = []\n for index, ele in enumerate(self.crop_shape):\n shape_tensor.append((\"x\" + str(index), np.ones(\n (1)).astype('int32') * ele))\n self.inputs = {\n 'X': np.random.random(self.x_shape).astype(\"float64\"),\n 'ShapeTensor': shape_tensor\n }\n self.attrs['shape'] = self.shape_attr\n\n if self.OffsetsTensor:\n offsets_tensor = []\n for index, ele in enumerate(self.offsets):\n offsets_tensor.append((\"x\" + str(index), np.ones(\n (1)).astype('int32') * ele))\n self.inputs = {\n 'X': np.random.random(self.x_shape).astype(\"float64\"),\n 'OffsetsTensor': offsets_tensor\n }\n self.attrs['offsets'] = self.offsets_attr\n\n self.attrs['shape'] = self.crop_shape\n self.attrs['offsets'] = self.offsets\n crop_shape = [val for val in self.crop_shape]\n for i in range(len(self.crop_shape)):\n if self.crop_shape[i] == -1:\n crop_shape[i] = self.x_shape[i] - self.offsets[i]\n self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}\n\n def initTestCase(self):\n self.x_shape = (10, 10)\n self.crop_shape = (2, 2)\n self.offsets = [1, 2]\n self.shape_attr = [0, 0]\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad_normal(self):\n self.check_grad([\"X\"], \"Out\")\n\n\nclass TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr):\n def initTestCase(self):\n self.x_shape = (16, 8, 32)\n self.crop_shape = [-1, -1, 3]\n self.offsets = [1, 5, 3]\n self.shape_attr = [-1, -1, 3]\n\n\nclass TestCropTensorOpTensorAttrCase2(TestCropTensorOpTensorAttr):\n def initTestCase(self):\n self.x_shape = (4, 8, 16, 8)\n self.crop_shape = [2, 2, 3, 4]\n self.offsets = [1, 5, 3, 0]\n self.shape_attr = [0, 0, 3, 4]\n\n\nclass TestCropTensorOpTensorAttrCase3(TestCropTensorOpTensorAttr):\n def initTestCase(self):\n self.x_shape = (16, 8, 32)\n self.crop_shape = [2, 2, 3]\n self.offsets = [1, 5, 3]\n self.offsets_attr = [-1, -1, 3]\n self.ShapeTensor = False\n self.OffsetsTensor = True\n\n\nclass TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr):\n def initTestCase(self):\n self.x_shape = (16, 8, 32)\n self.crop_shape = [2, 2, 3]\n self.shape_attr = [0, 2, 3]\n self.offsets = [1, 5, 3]\n self.offsets_attr = [-1, -1, 3]\n self.OffsetsTensor = True\n\n\nclass TestCropTensorException(unittest.TestCase):\n def test_exception(self):\n input1 = fluid.data(name=\"input1\", shape=[2, 3, 6, 6], dtype=\"float32\")\n input2 = fluid.data(name=\"input2\", shape=[2, 3, 6, 6], dtype=\"float16\")\n dim = fluid.data(name='dim', shape=[1], dtype='int32')\n offset = fluid.data(name='offset', shape=[1], dtype='int32')\n\n def attr_shape_type():\n out = fluid.layers.crop_tensor(input1, shape=3)\n\n def attr_shape_dtype():\n out = fluid.layers.crop_tensor(input1, shape=[2, 2.0, 3, 3])\n\n def attr_shape_value1():\n out = fluid.layers.crop_tensor(input1, shape=[2, -2, dim, 3])\n\n def attr_shape_value2():\n out = fluid.layers.crop_tensor(input1, shape=[2, 0, dim, 3])\n\n def attr_offsets_type():\n out = fluid.layers.crop_tensor(\n input1, shape=[2, 2, 3, 3], offsets=0)\n\n def attr_offsets_dtype():\n out = fluid.layers.crop_tensor(\n input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0])\n\n def attr_offsets_value():\n out = fluid.layers.crop_tensor(\n input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0])\n\n def input_dtype():\n out = fluid.layers.crop_tensor(input2, shape=[2, 2, 3, 3])\n\n self.assertRaises(TypeError, attr_shape_type)\n self.assertRaises(TypeError, attr_shape_dtype)\n self.assertRaises(ValueError, attr_shape_value1)\n self.assertRaises(ValueError, attr_shape_value2)\n self.assertRaises(TypeError, attr_offsets_type)\n self.assertRaises(TypeError, attr_offsets_dtype)\n self.assertRaises(ValueError, attr_offsets_value)\n self.assertRaises(TypeError, input_dtype)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nimport paddle\nfrom numpy.random import random as rand\nfrom paddle import tensor\nimport paddle.fluid as fluid\nimport paddle.fluid.dygraph as dg\n\n\nclass TestComplexTraceLayer(unittest.TestCase):\n def setUp(self):\n self._dtypes = [\"float32\", \"float64\"]\n self._places = [fluid.CPUPlace()]\n if fluid.core.is_compiled_with_cuda():\n self._places.append(fluid.CUDAPlace(0))\n\n def test_basic_api(self):\n for dtype in self._dtypes:\n input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand(\n [2, 20, 2, 3]).astype(dtype)\n for place in self._places:\n with dg.guard(place):\n var_x = dg.to_variable(input)\n result = tensor.trace(\n var_x, offset=1, axis1=0, axis2=2).numpy()\n target = np.trace(input, offset=1, axis1=0, axis2=2)\n self.assertTrue(np.allclose(result, target))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nfrom functools import partial\nimport contextlib\nimport numpy as np\nimport random\nimport paddle\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\nimport paddle.fluid.framework as framework\nimport paddle.fluid.optimizer as optimizer\nimport paddle.fluid.regularizer as regularizer\nfrom paddle.fluid.backward import append_backward\n\n\nclass TestL2DecayRegularizer(unittest.TestCase):\n def test_l2decay_regularizer(self):\n paddle.enable_static()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n regularizer=regularizer.L2DecayRegularizer(0.5))\n self.assertTrue(mul_x.regularizer is not None)\n self.assertTrue(\n isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer))\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n count_ops = len(block.ops)\n optimizer = paddle.optimizer.Adam()\n params_grads = optimizer.append_regularization_ops(params_grads)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(block.ops), count_ops + 2)\n self.assertEqual(block.ops[-1].type, 'sum')\n self.assertEqual(block.ops[-2].type, 'scale')\n\n\nclass TestL1DecayRegularizer(unittest.TestCase):\n def test_l2decay_regularizer(self):\n paddle.enable_static()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n regularizer=regularizer.L1DecayRegularizer(0.5))\n self.assertTrue(mul_x.regularizer is not None)\n self.assertTrue(\n isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer))\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n count_ops = len(block.ops)\n optimizer = paddle.optimizer.Adam()\n params_grads = optimizer.append_regularization_ops(params_grads)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(block.ops), count_ops + 3)\n self.assertEqual(block.ops[-1].type, 'sum')\n self.assertEqual(block.ops[-2].type, 'scale')\n self.assertEqual(block.ops[-3].type, 'sign')\n\n\ndef bow_net(data,\n label,\n dict_dim,\n is_sparse=False,\n emb_dim=8,\n hid_dim=8,\n hid_dim2=6,\n class_dim=2):\n \"\"\"\n BOW net\n This model is from https://github.com/PaddlePaddle/models:\n fluid/PaddleNLP/text_classification/nets.py\n \"\"\"\n emb = fluid.layers.embedding(\n input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim])\n bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')\n bow_tanh = fluid.layers.tanh(bow)\n fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act=\"tanh\")\n fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act=\"tanh\")\n prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n return avg_cost\n\n\nclass TestRegularizer(unittest.TestCase):\n def setUp(self):\n self.word_len = 1500\n self.train_data = [[(random.sample(range(1000), 10), [0])]\n for _ in range(2)]\n\n def get_places(self):\n places = [core.CPUPlace()]\n if core.is_compiled_with_cuda():\n places.append(core.CUDAPlace(0))\n return places\n\n @contextlib.contextmanager\n def scope_prog_guard(self, main_prog, startup_prog):\n scope = fluid.core.Scope()\n with fluid.unique_name.guard():\n with fluid.scope_guard(scope):\n with fluid.program_guard(main_prog, startup_prog):\n yield\n\n def run_program(self, place, feed_list):\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=feed_list, place=place)\n exe.run(fluid.default_startup_program())\n\n main_prog = fluid.default_main_program()\n param_list = [var.name for var in main_prog.block(0).all_parameters()]\n\n param_sum = []\n for data in self.train_data:\n out = exe.run(main_prog,\n feed=feeder.feed(data),\n fetch_list=param_list)\n p_sum = 0\n for v in out:\n p_sum += np.sum(np.abs(v))\n param_sum.append(p_sum)\n return param_sum\n\n def check_l2decay_regularizer(self, place, model):\n paddle.seed(1)\n paddle.framework.random._manual_program_seed(1)\n main_prog = fluid.framework.Program()\n startup_prog = fluid.framework.Program()\n with self.scope_prog_guard(\n main_prog=main_prog, startup_prog=startup_prog):\n data = fluid.layers.data(\n name=\"words\", shape=[1], dtype=\"int64\", lod_level=1)\n label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\n avg_cost = model(data, label, self.word_len)\n\n optimizer = fluid.optimizer.Adagrad(\n learning_rate=0.1,\n regularization=fluid.regularizer.L2Decay(1.0))\n optimizer.minimize(avg_cost)\n param_sum = self.run_program(place, [data, label])\n return param_sum\n\n def check_l2decay(self, place, model):\n paddle.seed(1)\n paddle.framework.random._manual_program_seed(1)\n main_prog = fluid.framework.Program()\n startup_prog = fluid.framework.Program()\n\n with self.scope_prog_guard(\n main_prog=main_prog, startup_prog=startup_prog):\n data = fluid.layers.data(\n name=\"words\", shape=[1], dtype=\"int64\", lod_level=1)\n label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\n avg_cost_l2 = model(data, label, self.word_len)\n\n param_list = fluid.default_main_program().block(0).all_parameters()\n para_sum = []\n for para in param_list:\n para_mul = fluid.layers.square(x=para)\n para_sum.append(fluid.layers.reduce_sum(input=para_mul))\n avg_cost_l2 += fluid.layers.sums(para_sum) * .5\n\n optimizer = fluid.optimizer.Adagrad(learning_rate=0.1)\n optimizer.minimize(avg_cost_l2)\n param_sum = self.run_program(place, [data, label])\n return param_sum\n\n def test_l2(self):\n for place in self.get_places():\n dense_sparse_p_sum = []\n for sparse in [True, False]:\n model = partial(bow_net, is_sparse=sparse)\n framework_l2 = self.check_l2decay_regularizer(place, model)\n l2 = self.check_l2decay(place, model)\n assert len(l2) == len(framework_l2)\n for i in range(len(l2)):\n assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)\n dense_sparse_p_sum.append(framework_l2)\n\n assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])\n for i in range(len(dense_sparse_p_sum[0])):\n assert np.isclose(\n a=dense_sparse_p_sum[0][i],\n b=dense_sparse_p_sum[1][i],\n rtol=5e-5)\n\n def test_repeated_regularization(self):\n l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)\n l2 = fluid.regularizer.L2Decay(regularization_coeff=0.01)\n fc_param_attr = fluid.ParamAttr(regularizer=l1)\n with fluid.program_guard(fluid.Program(), fluid.Program()):\n x = fluid.layers.uniform_random([2, 2, 3])\n out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)\n loss = fluid.layers.reduce_sum(out)\n sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)\n sgd.minimize(loss)\n with fluid.dygraph.guard():\n input = fluid.dygraph.to_variable(\n np.random.randn(3, 2).astype('float32'))\n paddle.seed(1)\n paddle.framework.random._manual_program_seed(1)\n\n linear1 = fluid.dygraph.Linear(\n 2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr)\n linear2 = fluid.dygraph.Linear(\n 2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr)\n\n loss1 = linear1(input)\n loss1.backward()\n # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr\n\n fluid.optimizer.SGD(parameter_list=linear1.parameters(),\n learning_rate=1e-2,\n regularization=l2).minimize(loss1)\n # only set l1 in fluid.ParamAttr\n loss2 = linear2(input)\n loss2.backward()\n fluid.optimizer.SGD(parameter_list=linear2.parameters(),\n learning_rate=1e-2).minimize(loss2)\n # they should both be applied by l1, and keep the same\n self.assertTrue(\n np.allclose(linear1.weight.numpy(), linear2.weight.numpy()),\n \"weight should use the regularization in fluid.ParamAttr!\")\n self.assertTrue(\n np.allclose(linear1.bias.numpy(), linear2.bias.numpy()),\n \"bias should use the regularization in fluid.ParamAttr!\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\n\nimport numpy as np\nimport paddle.fluid.core as core\nfrom paddle.fluid.op import Operator\nimport paddle.fluid as fluid\nimport paddle\n\n\ndef create_selected_rows_and_tensor(scope, place, height, row_num,\n embedding_size):\n sr = scope.var(\"@selected_rows@\").get_selected_rows()\n tensor = scope.var(\"grad\").get_tensor()\n\n rows = np.random.random_integers(\n low=0, high=height - 1, size=[row_num, ]).astype('int64')\n sr_val = np.random.random(size=[row_num, embedding_size]).astype('float32')\n\n sr.set_height(height)\n sr.set_rows(rows)\n sr.get_tensor().set(sr_val, place)\n\n tensor_val = np.zeros(shape=[height, embedding_size], dtype='float32')\n for i in range(row_num):\n row = rows[i]\n tensor_val[row, :] = tensor_val[row, :] + sr_val[i, :]\n\n tensor.set(tensor_val, place)\n return tensor_val, sr_val\n\n\nclass TestBase(unittest.TestCase):\n def setup(self,\n place,\n is_sparse,\n centered,\n size,\n row_num=None,\n epsilon=1e-6):\n np.random.seed(5) # fix seed\n\n self.scope = fluid.global_scope()\n self.place = place\n\n self.param_name = \"param\"\n self.param = np.random.random(size).astype(\"float32\")\n\n self.mean_square_name = \"mean_square\"\n self.mean_square = np.random.uniform(\n low=1, high=2, size=size).astype(\"float32\")\n\n self.mean_grad_name = \"mean_grad\"\n self.mean_grad = np.random.random(size).astype(\"float32\")\n\n self.lr_name = \"lr\"\n self.learning_rate = np.array([0.01]).astype(\"float32\")\n\n self.grad_name = \"grad\"\n\n self.is_sparse = is_sparse\n if self.is_sparse:\n self.grad_sr_name = \"@selected_rows@\"\n self.grad, self.grad_sr = create_selected_rows_and_tensor(\n self.scope, place, size[0], row_num, size[1])\n else:\n self.grad = np.random.random(size).astype(\"float32\")\n grad_tensor = self.scope.var(self.grad_name).get_tensor()\n grad_tensor.set(self.grad, place)\n\n self.moment_name = \"moment\"\n self.moment = np.random.uniform(\n low=0, high=1, size=size).astype(\"float32\")\n\n self.epsilon = epsilon\n self.decay = 0.9\n self.momentum = 0.1\n self.centered = centered\n\n self.ms_out = self.decay * self.mean_square + (1 - self.decay\n ) * self.grad * self.grad\n if centered:\n self.mg_out = self.decay * self.mean_grad + (1 - self.decay\n ) * self.grad\n self.moment_out = self.momentum * self.moment + \\\n self.learning_rate * self.grad / np.sqrt(self.ms_out - np.square(self.mg_out) + self.epsilon)\n else:\n self.moment_out = self.momentum * self.moment + \\\n self.learning_rate * self.grad / np.sqrt(self.ms_out + self.epsilon)\n\n self.param_out = self.param - self.moment_out\n\n # create and initialize Param Variable\n self.param_tensor = self.scope.var(self.param_name).get_tensor()\n self.param_tensor.set(self.param, place)\n\n self.mean_square_tensor = self.scope.var(\n self.mean_square_name).get_tensor()\n self.mean_square_tensor.set(self.mean_square, place)\n\n lr = self.scope.var(self.lr_name).get_tensor()\n lr.set(self.learning_rate, place)\n\n self.moment_tensor = self.scope.var(self.moment_name).get_tensor()\n self.moment_tensor.set(self.moment, place)\n\n if self.centered:\n self.mean_grad_tensor = self.scope.var(\n self.mean_grad_name).get_tensor()\n self.mean_grad_tensor.set(self.mean_grad, place)\n\n def check(self, actual_t, expect_t, place, out_name, atol=1e-5):\n self.assertTrue(\n np.allclose(\n actual_t, expect_t, atol=atol),\n \"Output (\" + out_name + \") has diff at \" + str(place) + \"\\nExpect \"\n + str(expect_t) + \"\\n\" + \"But Got\" + str(actual_t))\n\n\nclass TestRmspropOp(TestBase):\n def check_with_place(self,\n place,\n is_sparse,\n centered,\n size,\n row_num=None,\n epsilon=1e-6):\n self.setup(place, is_sparse, centered, size, row_num, epsilon)\n self.run_and_check()\n\n def run_and_check(self):\n grad_name = self.grad_sr_name if self.is_sparse else self.grad_name\n\n kwargs = {\n 'Param': self.param_name,\n 'Grad': grad_name,\n 'MeanSquare': self.mean_square_name,\n 'Moment': self.moment_name,\n 'LearningRate': self.lr_name,\n 'ParamOut': self.param_name,\n 'MeanSquareOut': self.mean_square_name,\n 'MomentOut': self.moment_name,\n 'epsilon': self.epsilon,\n 'decay': self.decay,\n 'momentum': self.momentum,\n 'centered': self.centered\n }\n\n if self.centered:\n kwargs['MeanGrad'] = self.mean_grad_name\n kwargs['MeanGradOut'] = self.mean_grad_name\n\n rmsprop_op = Operator('rmsprop', **kwargs)\n atol = 1e-6\n\n rmsprop_op.run(self.scope, self.place)\n\n self.check(\n np.array(self.mean_square_tensor),\n self.ms_out,\n self.place,\n self.mean_square_name,\n atol=atol)\n self.check(\n np.array(self.moment_tensor),\n self.moment_out,\n self.place,\n self.moment_name,\n atol=atol)\n self.check(\n np.array(self.param_tensor),\n self.param_out,\n self.place,\n self.param_name,\n atol=atol)\n\n if self.centered:\n self.check(\n np.array(self.mean_grad_tensor), self.mg_out, self.place,\n self.mean_grad_name)\n\n def test_rmsprop(self):\n places = [core.CPUPlace()]\n if core.is_compiled_with_cuda():\n places.append(core.CUDAPlace(0))\n\n size = (128, 320)\n for place in places:\n for centered in [False, True]:\n with fluid.scope_guard(core.Scope()):\n self.check_with_place(\n place, is_sparse=False, centered=centered, size=size)\n\n with fluid.scope_guard(core.Scope()):\n self.check_with_place(\n place,\n is_sparse=True,\n centered=centered,\n row_num=512,\n size=size)\n\n with fluid.scope_guard(core.Scope()):\n self.check_with_place(\n place,\n is_sparse=True,\n centered=centered,\n row_num=60,\n size=size)\n\n\nclass TestRMSPropV2(unittest.TestCase):\n def test_rmsprop_dygraph(self):\n paddle.disable_static()\n value = np.arange(26).reshape(2, 13).astype(\"float32\")\n a = paddle.to_tensor(value)\n linear = paddle.nn.Linear(13, 5)\n # This can be any optimizer supported by dygraph.\n adam = paddle.optimizer.RMSProp(\n learning_rate=0.01,\n parameters=linear.parameters(),\n weight_decay=0.01)\n out = linear(a)\n out.backward()\n adam.step()\n adam.clear_gradients()\n\n def test_rmsprop(self):\n paddle.enable_static()\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n rms_optimizer = paddle.optimizer.RMSProp(learning_rate=0.1)\n rms_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n def test_raise_error(self):\n self.assertRaises(ValueError, paddle.optimizer.RMSProp, None)\n self.assertRaises(\n ValueError, paddle.optimizer.RMSProp, learning_rate=0.1, rho=None)\n self.assertRaises(\n ValueError,\n paddle.optimizer.RMSProp,\n learning_rate=0.1,\n epsilon=None)\n self.assertRaises(\n ValueError,\n paddle.optimizer.RMSProp,\n learning_rate=0.1,\n momentum=None)\n\n def test_rmsprop_op_invalid_input(self):\n paddle.disable_static()\n linear = paddle.nn.Linear(10, 10)\n with self.assertRaises(ValueError):\n adam = paddle.optimizer.RMSProp(\n 0.1, epsilon=-1, parameters=linear.parameters())\n with self.assertRaises(ValueError):\n adam = paddle.optimizer.RMSProp(\n 0.1, momentum=-1, parameters=linear.parameters())\n with self.assertRaises(ValueError):\n adam = paddle.optimizer.RMSProp(\n 0.1, rho=-1, parameters=linear.parameters())\n\n\nclass TestRMSPropV2Group(TestRMSPropV2):\n def test_rmsprop_dygraph(self):\n paddle.disable_static()\n value = np.arange(26).reshape(2, 13).astype(\"float32\")\n a = paddle.to_tensor(value)\n linear_1 = paddle.nn.Linear(13, 5)\n linear_2 = paddle.nn.Linear(5, 3)\n # This can be any optimizer supported by dygraph.\n adam = paddle.optimizer.RMSProp(\n learning_rate=0.01,\n parameters=[{\n 'params': linear_1.parameters()\n }, {\n 'params': linear_2.parameters(),\n 'weight_decay': 0.001\n }],\n weight_decay=0.01)\n out = linear_1(a)\n out = linear_2(out)\n out.backward()\n adam.step()\n adam.clear_gradients()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport numpy as np\n\nos.environ[str(\"FLAGS_check_nan_inf\")] = str(\"1\")\nos.environ[str(\"GLOG_vmodule\")] = str(\"nan_inf_utils_detail=10\")\n\nimport paddle\nimport paddle.nn as nn\n\nnp.random.seed(0)\n\n\ndef generator():\n batch_size = 5\n for i in range(5):\n curr_train_x = np.random.randint(\n batch_size, size=(batch_size, 3)).astype(\"float32\")\n if i >= 2:\n curr_train_x[0, :] = np.nan\n curr_train_x[-1, :] = np.inf\n res = []\n for i in range(batch_size):\n y = i % 3\n res.append([y])\n y_label = np.array(res).astype('int64')\n yield [curr_train_x, y_label]\n\n\nclass TestLayer(nn.Layer):\n def __init__(self):\n super(TestLayer, self).__init__()\n self.linear1 = nn.Linear(3, 400)\n self.linear2 = nn.Linear(400, 400)\n self.linear3 = nn.Linear(400, 3)\n\n def forward(self, x):\n x = self.linear1(x)\n x = nn.functional.sigmoid(x)\n x = self.linear2(x)\n x = nn.functional.sigmoid(x)\n x = self.linear3(x)\n x = nn.functional.softmax(x)\n\n return x\n\n\ndef check(use_cuda):\n paddle.set_device('gpu' if use_cuda else 'cpu')\n\n net = TestLayer()\n sgd = paddle.optimizer.SGD(learning_rate=0.05, parameters=net.parameters())\n\n for step, (x, y) in enumerate(generator()):\n x = paddle.to_tensor(x)\n y = paddle.to_tensor(y)\n\n zero = paddle.zeros(shape=[1], dtype='int64')\n fp16_zero = paddle.cast(zero, dtype='float16')\n\n y = y + zero\n\n y_pred = net(x)\n\n cost = nn.functional.cross_entropy(y_pred, y, use_softmax=False)\n avg_cost = paddle.mean(cost)\n\n acc_top1 = paddle.metric.accuracy(input=y_pred, label=y, k=1)\n\n print('iter={:.0f}, cost={}, acc1={}'.format(\n step, avg_cost.numpy(), acc_top1.numpy()))\n\n sgd.step()\n sgd.clear_grad()\n\n\nif __name__ == '__main__':\n if paddle.is_compiled_with_cuda():\n try:\n check(use_cuda=True)\n assert False\n except Exception as e:\n print(e)\n print(type(e))\n # Note. Enforce in cuda kernel may not catch in paddle, and\n # Exception type will be RuntimeError\n assert type(e) == OSError or type(e) == RuntimeError\n try:\n check(use_cuda=False)\n assert False\n except Exception as e:\n print(e)\n print(type(e))\n assert type(e) == RuntimeError\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport unittest\nimport numpy as np\nfrom op_test import OpTest, skip_check_grad_ci\nimport paddle.fluid as fluid\n\n\nclass TestElementwisePowOp(OpTest):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(1, 2, [20, 5]).astype(\"float64\"),\n 'Y': np.random.uniform(1, 2, [20, 5]).astype(\"float64\")\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad_normal(self):\n self.check_grad(['X', 'Y'], 'Out')\n\n\nclass TestElementwisePowOp_big_shape_1(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(1, 2, [10, 10]).astype(\"float64\"),\n 'Y': np.random.uniform(0.1, 1, [10, 10]).astype(\"float64\")\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n\nclass TestElementwisePowOp_big_shape_2(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(1, 2, [10, 10]).astype(\"float64\"),\n 'Y': np.random.uniform(0.2, 2, [10, 10]).astype(\"float64\")\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n\n@skip_check_grad_ci(\n reason=\"[skip shape check] Use y_shape(1) to test broadcast.\")\nclass TestElementwisePowOp_scalar(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float64),\n 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64)\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n\nclass TestElementwisePowOp_tensor(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [100]).astype(\"float64\"),\n 'Y': np.random.uniform(1, 3, [100]).astype(\"float64\")\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n\nclass TestElementwisePowOp_broadcast_0(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(\"float64\"),\n 'Y': np.random.uniform(0.1, 1, [100]).astype(\"float64\")\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n\nclass TestElementwisePowOp_broadcast_1(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(\"float64\"),\n 'Y': np.random.uniform(0.1, 1, [100]).astype(\"float64\")\n }\n self.attrs = {'axis': 1}\n self.outputs = {\n 'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))\n }\n\n\nclass TestElementwisePowOp_broadcast_2(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(\"float64\"),\n 'Y': np.random.uniform(0.1, 1, [100]).astype(\"float64\")\n }\n self.attrs = {'axis': 0}\n self.outputs = {\n 'Out':\n np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))\n }\n\n\nclass TestElementwisePowOp_broadcast_3(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(\"float64\"),\n 'Y': np.random.uniform(0.1, 1, [20, 5]).astype(\"float64\")\n }\n self.attrs = {'axis': 1}\n self.outputs = {\n 'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5,\n 1))\n }\n\n\nclass TestElementwisePowOp_broadcast_4(TestElementwisePowOp):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {\n 'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(\"float64\"),\n 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(\"float64\")\n }\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n\nclass TestElementwisePowOpInt(OpTest):\n def setUp(self):\n self.op_type = \"elementwise_pow\"\n self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])}\n self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestElementwisePowGradOpInt(unittest.TestCase):\n def setUp(self):\n self.x = np.asarray([1, 3, 6])\n self.y = np.asarray([1, 1, 1])\n self.res = self.x**self.y\n # dout = 1\n self.grad_res = np.asarray([1, 1, 1])\n # dx = dout * y * pow(x, y-1)\n self.grad_x = self.grad_res * self.y * (self.x\n **(self.y - 1)).astype(\"int\")\n # dy = dout * log(x) * pow(x, y)\n self.grad_y = (self.grad_res * np.log(self.x) *\n (self.x**self.y)).astype(\"int\")\n print(self.grad_res, self.grad_x, self.grad_y)\n\n def test_grad(self):\n places = [fluid.CPUPlace()]\n if fluid.is_compiled_with_cuda():\n places.append(fluid.CUDAPlace(0))\n for place in places:\n with fluid.dygraph.guard(place):\n x = fluid.dygraph.to_variable(self.x, zero_copy=False)\n y = fluid.dygraph.to_variable(self.y, zero_copy=False)\n print(x, y)\n x.stop_gradient = False\n y.stop_gradient = False\n res = x**y\n res.backward()\n self.assertTrue(np.array_equal(res.gradient(), self.grad_res))\n self.assertTrue(np.array_equal(x.gradient(), self.grad_x))\n self.assertTrue(np.array_equal(y.gradient(), self.grad_y))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport numpy as np\nimport unittest\n\n\nclass TestIsInteger(unittest.TestCase):\n def test_for_integer(self):\n x = paddle.arange(10)\n self.assertTrue(paddle.is_integer(x))\n\n def test_for_floating_point(self):\n x = paddle.randn([2, 3])\n self.assertFalse(paddle.is_integer(x))\n\n def test_for_complex(self):\n x = paddle.randn([2, 3]) + 1j * paddle.randn([2, 3])\n self.assertFalse(paddle.is_integer(x))\n\n def test_for_exception(self):\n with self.assertRaises(TypeError):\n paddle.is_integer(np.array([1, 2]))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle.fluid.core as core\nfrom paddle.fluid.op import Operator\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\n\n\nclass TestWhereIndexOp(OpTest):\n def setUp(self):\n self.op_type = \"where_index\"\n self.init_config()\n\n def test_check_output(self):\n self.check_output()\n\n def init_config(self):\n self.inputs = {'Condition': np.array([True, False, True]), }\n\n self.outputs = {'Out': np.array([[0], [2]], dtype='int64')}\n\n\nclass TestAllFalse(unittest.TestCase):\n def setUp(self):\n self.op_type = \"where_index\"\n self.init_config()\n\n def check_with_place(self, place):\n scope = core.Scope()\n condition = scope.var('Condition').get_tensor()\n condition.set(self.cond_data, place)\n\n out = scope.var(\"Out\").get_tensor()\n out.set(np.full(self.shape, 0).astype('int64'), place)\n\n op = Operator(\"where_index\", Condition=\"Condition\", Out=\"Out\")\n op.run(scope, place)\n\n out_array = np.array(out)\n self.assertTrue((out_array == self.out_data).all())\n\n def init_config(self):\n self.cond_data = np.array([False, False, False])\n self.shape = (3, 1)\n self.out_data = np.array([], dtype='int64')\n\n def test_all_false(self):\n self.check_with_place(core.CPUPlace())\n\n if core.is_compiled_with_cuda():\n self.check_with_place(core.CUDAPlace(0))\n\n\nclass TestRank2(TestWhereIndexOp):\n def init_config(self):\n self.inputs = {'Condition': np.array([[True, False], [False, True]]), }\n\n self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')}\n\n\nclass TestRank3(TestWhereIndexOp):\n def init_config(self):\n self.inputs = {\n 'Condition': np.array([[[True, False], [False, True]],\n [[False, True], [True, False]],\n [[False, False], [False, True]]]),\n }\n\n self.outputs = {\n 'Out': np.array(\n [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]],\n dtype='int64')\n }\n\n\nclass TestWhereOpError(unittest.TestCase):\n def test_api(self):\n with program_guard(Program(), Program()):\n cond = fluid.layers.data(name='cond', shape=[4], dtype='bool')\n result = fluid.layers.where(cond)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n cond_i = np.array([True, False, False, False]).astype(\"bool\")\n out = exe.run(fluid.default_main_program(), feed={'cond': cond_i})\n\n\nclass TestWhereRaiseError(unittest.TestCase):\n def test_errors(self):\n def test_type():\n fluid.layers.where([10])\n\n self.assertRaises(TypeError, test_type)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons\nfrom program_config import TensorConfig, ProgramConfig\nimport unittest\nimport numpy as np\nimport paddle.inference as paddle_infer\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\n\n\nclass TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n return True\n\n def sample_program_configs(self):\n def generate_input(shape):\n return np.random.random(shape).astype(np.float32)\n\n def generate_weight():\n return np.random.randn(32).astype(np.float32)\n\n for batch in [1, 2, 4]:\n for shape in [[32], [batch, 32], [batch, 32, 32],\n [batch, 32, 16, 32]]:\n for op_type in [\"elementwise_add\", \"elementwise_mul\"]:\n for axis in [len(shape) - 1, -1]:\n self.dims = len(shape)\n dics = [{\"axis\": axis}]\n ops_config = [{\n \"op_type\": op_type,\n \"op_inputs\": {\n \"X\": [\"input_data\"],\n \"Y\": [\"weight\"]\n },\n \"op_outputs\": {\n \"Out\": [\"output_data\"]\n },\n \"op_attrs\": dics[0]\n }]\n ops = self.generate_op_config(ops_config)\n\n program_config = ProgramConfig(\n ops=ops,\n weights={\n \"weight\":\n TensorConfig(data_gen=partial(generate_weight))\n },\n inputs={\n \"input_data\": TensorConfig(\n data_gen=partial(generate_input, shape)),\n },\n outputs=[\"output_data\"])\n\n yield program_config\n\n def sample_predictor_configs(\n self, program_config) -> (paddle_infer.Config, List[int], float):\n def generate_dynamic_shape(attrs):\n # The input.dims[1] must be equal to the weight's length.\n if self.dims == 1:\n self.dynamic_shape.min_input_shape = {\"input_data\": [4]}\n self.dynamic_shape.max_input_shape = {\"input_data\": [256]}\n self.dynamic_shape.opt_input_shape = {\"input_data\": [16]}\n elif self.dims == 2:\n self.dynamic_shape.min_input_shape = {\"input_data\": [1, 32]}\n self.dynamic_shape.max_input_shape = {\"input_data\": [4, 32]}\n self.dynamic_shape.opt_input_shape = {\"input_data\": [2, 32]}\n elif self.dims == 3:\n self.dynamic_shape.min_input_shape = {\"input_data\": [1, 32, 4]}\n self.dynamic_shape.max_input_shape = {\n \"input_data\": [4, 32, 256]\n }\n self.dynamic_shape.opt_input_shape = {\"input_data\": [2, 32, 16]}\n elif self.dims == 4:\n self.dynamic_shape.min_input_shape = {\n \"input_data\": [1, 32, 4, 4]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data\": [4, 32, 128, 256]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data\": [2, 32, 32, 16]\n }\n\n def clear_dynamic_shape():\n self.dynamic_shape.max_input_shape = {}\n self.dynamic_shape.min_input_shape = {}\n self.dynamic_shape.opt_input_shape = {}\n\n def generate_trt_nodes_num(attrs, dynamic_shape):\n if self.dims == 1:\n return 0, 3\n return 1, 2\n\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n\n # for static_shape\n clear_dynamic_shape()\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, False), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, False), 1e-5\n\n # for dynamic_shape\n generate_dynamic_shape(attrs)\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), generate_trt_nodes_num(attrs,\n True), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), generate_trt_nodes_num(attrs,\n True), 1e-5\n\n def add_skip_trt_case(self):\n def teller1(program_config, predictor_config):\n if self.dims == 2 and len(self.dynamic_shape.max_input_shape) == 0:\n return True\n return False\n\n self.add_skip_case(\n teller1, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"The output shape are not equal between gpu and tensorrt when input dim is 2.\"\n )\n\n def teller2(program_config, predictor_config):\n if self.dims == 3:\n return True\n return False\n\n self.add_skip_case(\n teller2, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"The output has diff between gpu and tensorrt when input dim is 3.\")\n\n def teller3(program_config, predictor_config):\n if self.dims == 4:\n return True\n return False\n\n self.add_skip_case(\n teller3, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"The output has diff between gpu and tensorrt when input dim is 4.\")\n\n def test(self):\n self.add_skip_trt_case()\n self.run_test()\n\n\nclass TrtConvertElementwiseTest_two_input_without_broadcast(\n TrtLayerAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n inputs = program_config.inputs\n if len(inputs['input_data1'].shape) == 1:\n return False\n\n return True\n\n def sample_program_configs(self):\n def generate_input(shape):\n return np.random.random(shape).astype(np.float32)\n\n for shape in [[4], [4, 32], [2, 64, 32], [1, 8, 16, 32]]:\n for op_type in [\"elementwise_add\", \"elementwise_mul\"]:\n for axis in [0, -1]:\n self.dims = len(shape)\n dics = [{\"axis\": axis}]\n ops_config = [{\n \"op_type\": op_type,\n \"op_inputs\": {\n \"X\": [\"input_data1\"],\n \"Y\": [\"input_data2\"]\n },\n \"op_outputs\": {\n \"Out\": [\"output_data\"]\n },\n \"op_attrs\": dics[0]\n }]\n ops = self.generate_op_config(ops_config)\n\n program_config = ProgramConfig(\n ops=ops,\n weights={},\n inputs={\n \"input_data1\": TensorConfig(\n data_gen=partial(generate_input, shape)),\n \"input_data2\": TensorConfig(\n data_gen=partial(generate_input, shape))\n },\n outputs=[\"output_data\"])\n\n yield program_config\n\n def sample_predictor_configs(\n self, program_config) -> (paddle_infer.Config, List[int], float):\n def generate_dynamic_shape(attrs):\n if self.dims == 1:\n self.dynamic_shape.min_input_shape = {\n \"input_data1\": [1],\n \"input_data2\": [1]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data1\": [128],\n \"input_data2\": [128]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data1\": [32],\n \"input_data2\": [32]\n }\n elif self.dims == 2:\n self.dynamic_shape.min_input_shape = {\n \"input_data1\": [1, 4],\n \"input_data2\": [1, 4]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data1\": [128, 256],\n \"input_data2\": [128, 256]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data1\": [32, 64],\n \"input_data2\": [32, 64]\n }\n elif self.dims == 3:\n self.dynamic_shape.min_input_shape = {\n \"input_data1\": [1, 4, 4],\n \"input_data2\": [1, 4, 4]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data1\": [128, 128, 256],\n \"input_data2\": [128, 128, 256]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data1\": [2, 64, 64],\n \"input_data2\": [2, 64, 64]\n }\n elif self.dims == 4:\n self.dynamic_shape.min_input_shape = {\n \"input_data1\": [1, 4, 4, 4],\n \"input_data2\": [1, 4, 4, 4]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data1\": [8, 128, 64, 128],\n \"input_data2\": [8, 128, 64, 128]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data1\": [2, 64, 32, 32],\n \"input_data2\": [2, 64, 32, 32]\n }\n\n def clear_dynamic_shape():\n self.dynamic_shape.max_input_shape = {}\n self.dynamic_shape.min_input_shape = {}\n self.dynamic_shape.opt_input_shape = {}\n\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n\n # for static_shape\n clear_dynamic_shape()\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), (1, 3), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), (1, 3), 1e-5\n\n # for dynamic_shape\n generate_dynamic_shape(attrs)\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), (1, 3), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), (1, 3), 1e-5\n\n def add_skip_trt_case(self):\n def teller1(program_config, predictor_config):\n if self.dims == 2:\n return True\n return False\n\n self.add_skip_case(\n teller1, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"The output shape are not equal between gpu and tensorrt when input dim is 2.\"\n )\n\n def test(self):\n self.add_skip_trt_case()\n self.run_test()\n\n\nclass TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n inputs = program_config.inputs\n if len(inputs['input_data1'].shape) != len(inputs['input_data2'].shape):\n return False\n\n return True\n\n def sample_program_configs(self):\n def generate_input(shape):\n return np.random.random(shape).astype(np.float32)\n\n input1_shape_list = [[4, 32], [2, 4, 32], [4, 2, 4, 32]]\n input2_shape1_list = [[32], [4, 32], [2, 4, 32]]\n input2_shape2_list = [[4, 1], [2, 4, 1], [4, 2, 4, 1]]\n input2_shape3_list = [[32], [2, 1, 1], [4, 2, 1, 32]]\n input2_shape4_list = [[32], [4, 32], [4, 1, 4, 32]]\n input2_shape5_list = [[32], [2, 1, 32], [4, 1, 1, 32]]\n input2_shape6_list = [[1, 32], [1, 32], [1, 1, 1, 32]]\n input2_shape_list = [\n input2_shape1_list, input2_shape2_list, input2_shape3_list,\n input2_shape4_list, input2_shape5_list, input2_shape6_list\n ]\n axis1_list = [[-1], [1, -1], [1, -1]]\n axis2_list = [[-1], [0], [0]]\n axis3_list = [[-1], [0], [0]]\n axis4_list = [[-1], [-1], [0]]\n axis5_list = [[-1, 1], [-1, 0], [-1, 0]]\n axis6_list = [[-1, 0], [-1, 1], [-1, 0]]\n axis_list = [\n axis1_list, axis2_list, axis3_list, axis4_list, axis5_list,\n axis6_list\n ]\n\n for i in range(3):\n input1_shape = input1_shape_list[i]\n for j in range(6):\n input2_shape = input2_shape_list[j][i]\n for op_type in [\"elementwise_add\", \"elementwise_mul\"]:\n for axis in axis_list[j][i]:\n self.shape1 = input1_shape\n self.shape2 = input2_shape\n dics = [{\"axis\": axis}]\n ops_config = [{\n \"op_type\": op_type,\n \"op_inputs\": {\n \"X\": [\"input_data1\"],\n \"Y\": [\"input_data2\"]\n },\n \"op_outputs\": {\n \"Out\": [\"output_data\"]\n },\n \"op_attrs\": dics[0]\n }]\n ops = self.generate_op_config(ops_config)\n\n program_config = ProgramConfig(\n ops=ops,\n weights={},\n inputs={\n \"input_data1\": TensorConfig(data_gen=partial(\n generate_input, input1_shape)),\n \"input_data2\": TensorConfig(data_gen=partial(\n generate_input, input2_shape))\n },\n outputs=[\"output_data\"])\n\n yield program_config\n\n def sample_predictor_configs(\n self, program_config) -> (paddle_infer.Config, List[int], float):\n def generate_dynamic_shape(attrs):\n max_shape = [[128], [128, 128], [128, 128, 128],\n [128, 128, 128, 128]]\n min_shape = [[1], [1, 1], [1, 1, 1], [1, 1, 1, 1]]\n opt_shape = [[32], [32, 32], [32, 32, 32], [32, 32, 32, 32]]\n\n self.dynamic_shape.min_input_shape = {\n \"input_data1\": min_shape[len(self.shape1) - 1],\n \"input_data2\": min_shape[len(self.shape2) - 1]\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data1\": max_shape[len(self.shape1) - 1],\n \"input_data2\": max_shape[len(self.shape2) - 1]\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data1\": opt_shape[len(self.shape1) - 1],\n \"input_data2\": opt_shape[len(self.shape2) - 1]\n }\n\n def clear_dynamic_shape():\n self.dynamic_shape.max_input_shape = {}\n self.dynamic_shape.min_input_shape = {}\n self.dynamic_shape.opt_input_shape = {}\n\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n\n # for static_shape\n clear_dynamic_shape()\n if self.shape1[0] == self.shape2[0]:\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), (1, 3), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), (1, 3), 1e-5\n\n # for dynamic_shape\n generate_dynamic_shape(attrs)\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), (1, 3), 1e-5\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), (1, 3), 1e-5\n\n def add_skip_trt_case(self):\n def teller1(program_config, predictor_config):\n if len(self.shape1) == 2:\n return True\n return False\n\n self.add_skip_case(\n teller1, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"The output shape are not equal between gpu and tensorrt when input dim is 2.\"\n )\n\n def test(self):\n self.add_skip_trt_case()\n self.run_test()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport numpy as np\n\nos.environ[str(\"FLAGS_check_nan_inf\")] = str(\"1\")\nos.environ[str(\"GLOG_vmodule\")] = str(\"nan_inf_utils_detail=10\")\n\nimport paddle.fluid.core as core\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.compat as cpt\n\npaddle.enable_static()\n\nnp.random.seed(0)\n\n\ndef generator():\n batch_size = 5\n for i in range(5):\n curr_train_x = np.random.randint(\n batch_size, size=(batch_size, 3)).astype(\"float32\")\n if i >= 2:\n curr_train_x[0, :] = np.nan\n curr_train_x[-1, :] = np.inf\n res = []\n for i in range(batch_size):\n y = i % 3\n res.append([y])\n y_label = np.array(res).astype('int64')\n yield [curr_train_x, y_label]\n\n\ndef net():\n x = fluid.layers.data(name=\"x\", shape=[3], dtype='float32')\n y = fluid.layers.data(name=\"y\", shape=[1], dtype='int64')\n\n # test int64 value\n zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)\n\n # test float16 value\n fp16_zero = fluid.layers.cast(zero, dtype='float16')\n\n y = y + zero\n\n hidden = x\n\n for i in range(2):\n hidden = fluid.layers.fc(input=hidden, size=400, act=\"sigmoid\")\n\n hidden = fluid.layers.fc(input=hidden, size=3, act=None)\n cost, y_predict = fluid.layers.softmax_with_cross_entropy(\n hidden, y, return_softmax=True)\n acc_top1 = fluid.layers.accuracy(input=y_predict, label=y, k=1)\n avg_cost = fluid.layers.mean(cost)\n\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.05)\n sgd_optimizer.minimize(avg_cost)\n return y_predict, avg_cost, acc_top1\n\n\ndef check(use_cuda):\n main = fluid.Program()\n startup = fluid.Program()\n scope = fluid.core.Scope()\n\n with fluid.scope_guard(scope):\n with fluid.program_guard(main, startup):\n y_predict, avg_cost, acc_top1 = net()\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup)\n\n step = 0.0\n for train_data, y_label in generator():\n outs = exe.run(\n main,\n feed={'x': train_data,\n 'y': y_label},\n fetch_list=[y_predict.name, avg_cost.name, acc_top1.name])\n step += 1\n print('iter={:.0f},cost={},acc1={}'.format(step, outs[1][0],\n outs[2][0]))\n\n\nif __name__ == '__main__':\n if core.is_compiled_with_cuda():\n try:\n check(use_cuda=True)\n assert False\n except Exception as e:\n print(e)\n print(type(e))\n # Note. Enforce in cuda kernel may not catch in paddle, and\n # Exception type will be RuntimeError\n assert type(e) == OSError or type(e) == RuntimeError\n try:\n check(use_cuda=False)\n assert False\n except Exception as e:\n print(e)\n print(type(e))\n assert type(e) == RuntimeError\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import compiler, Program, program_guard\n\npaddle.enable_static()\n\n\ndef huber_loss_forward(val, delta):\n abs_val = abs(val)\n if abs_val <= delta:\n return 0.5 * val * val\n else:\n return delta * (abs_val - 0.5 * delta)\n\n\[email protected](not paddle.is_compiled_with_npu(),\n \"core is not compiled with NPU\")\nclass TestHuberLossOp(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = 'huber_loss'\n self.place = paddle.NPUPlace(0)\n\n self.init_dtype()\n\n self.set_inputs()\n self.set_attrs()\n self.set_outputs()\n\n def set_inputs(self):\n shape = self.set_shape()\n x = np.random.uniform(0, 1., shape).astype(self.dtype)\n y = np.random.uniform(0, 1., shape).astype(self.dtype)\n self.inputs = {\n 'X': OpTest.np_dtype_to_fluid_dtype(x),\n 'Y': OpTest.np_dtype_to_fluid_dtype(y)\n }\n\n def set_attrs(self):\n self.attrs = {'delta': 0.5}\n\n def set_outputs(self):\n delta = self.attrs['delta']\n shape = self.set_shape()\n residual = self.inputs['Y'] - self.inputs['X']\n loss = np.vectorize(huber_loss_forward)(residual,\n delta).astype(self.dtype)\n self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}\n\n def set_shape(self):\n return (100, 1)\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad_normal(self):\n if self.dtype == np.float16:\n return\n self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')\n\n def test_check_grad_ingore_x(self):\n if self.dtype == np.float16:\n return\n self.check_grad_with_place(\n self.place, ['Y'],\n 'Out',\n max_relative_error=0.008,\n no_grad_set=set(\"residual\"))\n\n def test_check_grad_ingore_y(self):\n if self.dtype == np.float16:\n return\n self.check_grad_with_place(\n self.place, ['X'],\n 'Out',\n max_relative_error=0.008,\n no_grad_set=set('residual'))\n\n\ndef TestHuberLossOp1(TestHuberLossOp):\n def set_shape(self):\n return (64)\n\n\ndef TestHuberLossOp2(TestHuberLossOp):\n def set_shape(self):\n return (6, 6)\n\n\ndef TestHuberLossOp3(TestHuberLossOp):\n def set_shape(self):\n return (6, 6, 1)\n\n\ndef TestHuberLossOpFP16(TestHuberLossOp):\n def init_dtype(self):\n self.dtype = np.float16\n\n\[email protected](not paddle.is_compiled_with_npu(),\n \"core is not compiled with NPU\")\nclass TestHuberLossOpError(unittest.TestCase):\n def test_errors(self):\n with program_guard(Program(), Program()):\n # the input and label must be Variable\n xw = np.random.random((6, 6)).astype(\"float32\")\n xr = fluid.data(name='xr', shape=[None, 6], dtype=\"float32\")\n lw = np.random.random((6, 6)).astype(\"float32\")\n lr = fluid.data(name='lr', shape=[None, 6], dtype=\"float32\")\n delta = 1.0\n self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw, delta)\n self.assertRaises(TypeError, fluid.layers.huber_loss, xw, lr, delta)\n\n # the dtype of input and label must be float32 or float64\n xw2 = fluid.data(name='xw2', shape=[None, 6], dtype=\"int32\")\n lw2 = fluid.data(name='lw2', shape=[None, 6], dtype=\"int32\")\n self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr,\n delta)\n self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2,\n delta)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\nimport random\nimport numpy as np\nimport six\nimport paddle.fluid as fluid\nimport paddle\nimport warnings\nfrom paddle.fluid.framework import IrGraph\nfrom paddle.fluid.contrib.slim.quantization import QuantizationTransformPass\nfrom paddle.fluid.contrib.slim.quantization import QuantizationFreezePass\nfrom paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass\nfrom paddle.fluid.contrib.slim.quantization import OutScaleForInferencePass\nfrom paddle.fluid.contrib.slim.quantization import AddQuantDequantPass\nfrom paddle.fluid import (core, Program, Variable, program_guard, layers)\nfrom paddle.fluid.io import prepend_feed_ops, append_fetch_ops\nfrom inference_pass_test import InferencePassTest\nfrom paddle.fluid.core import create_paddle_predictor\nfrom paddle.fluid.core import AnalysisConfig\n\n\nclass QuantDequantTest(unittest.TestCase):\n def __init__(self, methodName='runTest'):\n super(QuantDequantTest, self).__init__(methodName)\n paddle.enable_static()\n self.main_program = fluid.Program()\n self.startup_program = fluid.Program()\n self.test_main_program = fluid.Program()\n self.test_startup_program = fluid.Program()\n self.feeds = None\n self.fetch_list = None\n self.enable_mkldnn = False\n self.enable_mkldnn_bfloat16 = False\n self.enable_trt = False\n self.enable_tensorrt_oss = True\n self.trt_parameters = None\n self.dynamic_shape_params = None\n self.enable_lite = False\n self.lite_parameters = None\n self.path = \"./inference_pass/\" + self.__class__.__name__ + \"/\"\n self.data = None\n self.label = None\n self.result = None\n np.random.seed(1)\n random.seed(1)\n\n # from Paddle release2.1 \n def _normalize_program(self, program, feed_vars, fetch_vars):\n if not isinstance(program, Program):\n raise TypeError(\n \"program type must be `fluid.Program`, but received `%s`\" %\n type(program))\n if not isinstance(feed_vars, list):\n feed_vars = [feed_vars]\n if not all(isinstance(v, Variable) for v in feed_vars):\n raise TypeError(\n \"feed_vars type must be a Variable or a list of Variable.\")\n if not isinstance(fetch_vars, list):\n fetch_vars = [fetch_vars]\n if not all(isinstance(v, Variable) for v in fetch_vars):\n raise TypeError(\n \"fetch_vars type must be a Variable or a list of Variable.\")\n\n # remind users to set auc_states to 0 if auc op were found.\n for op in program.global_block().ops:\n # clear device of Op\n device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(\n )\n op._set_attr(device_attr_name, \"\")\n if op.type == 'auc':\n warnings.warn(\"Be sure that you have set auc states to 0 \"\n \"before saving inference model.\")\n break\n\n # serialize program\n copy_program = program.clone()\n global_block = copy_program.global_block()\n remove_op_idx = []\n for i, op in enumerate(global_block.ops):\n op.desc.set_is_target(False)\n if op.type == \"feed\" or op.type == \"fetch\":\n remove_op_idx.append(i)\n for idx in remove_op_idx[::-1]:\n global_block._remove_op(idx)\n copy_program.desc.flush()\n\n feed_var_names = [var.name for var in feed_vars]\n copy_program = copy_program._prune_with_input(\n feeded_var_names=feed_var_names, targets=fetch_vars)\n copy_program = copy_program._inference_optimize(prune_read_op=True)\n fetch_var_names = [var.name for var in fetch_vars]\n prepend_feed_ops(copy_program, feed_var_names)\n append_fetch_ops(copy_program, fetch_var_names)\n copy_program.desc._set_version()\n return copy_program\n\n def _save_models(self, dirname, feeded_var_names, target_vars, executor,\n program, scope):\n with fluid.scope_guard(scope):\n fluid.io.save_inference_model(\n dirname,\n feeded_var_names,\n target_vars,\n executor,\n program,\n clip_extra=True)\n\n def _get_paddle_outs(self, feed, fetch_list, executor, program, scope):\n '''\n Return PaddlePaddle outputs. \n '''\n with fluid.scope_guard(scope):\n outs = executor.run(program=program,\n feed=feed,\n fetch_list=fetch_list,\n return_numpy=True)\n return outs\n\n def _get_inference_outs(self, config):\n '''\n Return AnalysisPredictor outputs. \n '''\n predictor = create_paddle_predictor(config)\n tensor_shapes = predictor.get_input_tensor_shape()\n names = predictor.get_input_names()\n for i, name in enumerate(names):\n shape = tensor_shapes[name]\n shape[0] = 1\n tensor = predictor.get_input_tensor(name)\n feed_data = list(self.feeds.values())[i]\n tensor.copy_from_cpu(np.array(feed_data))\n if type(feed_data) == fluid.LoDTensor:\n tensor.set_lod(feed_data.lod())\n\n predictor.zero_copy_run()\n\n output_names = predictor.get_output_names()\n outs = [\n predictor.get_output_tensor(out_name).copy_to_cpu()\n for out_name in output_names\n ]\n return outs\n\n def _get_analysis_config(self,\n use_gpu=False,\n use_trt=False,\n use_mkldnn=False):\n '''\n Return a new object of AnalysisConfig. \n '''\n config = AnalysisConfig(self.path)\n config.disable_gpu()\n config.switch_specify_input_names(True)\n config.switch_ir_optim(True)\n config.switch_use_feed_fetch_ops(False)\n if use_gpu:\n config.enable_use_gpu(100, 0)\n if use_trt:\n config.enable_tensorrt_engine(\n self.trt_parameters.workspace_size,\n self.trt_parameters.max_batch_size,\n self.trt_parameters.min_subgraph_size,\n self.trt_parameters.precision,\n self.trt_parameters.use_static,\n self.trt_parameters.use_calib_mode)\n\n if self.dynamic_shape_params:\n config.set_trt_dynamic_shape_info(\n self.dynamic_shape_params.min_input_shape,\n self.dynamic_shape_params.max_input_shape,\n self.dynamic_shape_params.optim_input_shape,\n self.dynamic_shape_params.disable_trt_plugin_fp16)\n if self.enable_tensorrt_oss:\n config.enable_tensorrt_oss()\n\n elif use_mkldnn:\n config.enable_mkldnn()\n if self.enable_mkldnn_bfloat16:\n config.enable_mkldnn_bfloat16()\n print('config summary:', config.summary())\n return config\n\n def check_output_with_option(self,\n use_gpu,\n atol=1e-5,\n flatten=False,\n quant=False,\n rtol=1e-5):\n '''\n Check whether calculating on CPU and GPU, enable TensorRT \n or disable TensorRT, enable MKLDNN or disable MKLDNN \n are all the same. \n '''\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n executor = fluid.Executor(place)\n scope = fluid.Scope()\n device = \"GPU\" if use_gpu else \"CPU\"\n\n with fluid.scope_guard(scope):\n executor.run(self.startup_program)\n executor.run(self.test_startup_program)\n main_graph = IrGraph(core.Graph(self.main_program.desc), for_test=False)\n test_graph = IrGraph(\n core.Graph(self.test_main_program.desc), for_test=True)\n\n transform_pass = QuantizationTransformPass(\n scope=scope,\n place=place,\n activation_quantize_type=self.activation_quantize_type,\n weight_quantize_type=self.weight_quantize_type)\n transform_pass.apply(main_graph)\n transform_pass.apply(test_graph)\n\n add_quant_dequant_pass = AddQuantDequantPass(scope=scope, place=place)\n add_quant_dequant_pass.apply(main_graph)\n add_quant_dequant_pass.apply(test_graph)\n\n scale_training_pass = OutScaleForTrainingPass(scope=scope, place=place)\n scale_training_pass.apply(main_graph)\n\n build_strategy = fluid.BuildStrategy()\n build_strategy.memory_optimize = False\n build_strategy.enable_inplace = False\n build_strategy.fuse_all_reduce_ops = False\n binary = fluid.CompiledProgram(main_graph.graph)\n\n iters = 10\n batch_size = 1\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.mnist.train(), buf_size=500),\n batch_size=batch_size)\n feeder = fluid.DataFeeder(\n feed_list=[self.data, self.label], place=place)\n with fluid.scope_guard(scope):\n for _ in range(iters):\n data = next(train_reader())\n loss_v = executor.run(binary,\n feed=feeder.feed(data),\n fetch_list=[self.loss])\n\n scale_inference_pass = OutScaleForInferencePass(scope=scope)\n scale_inference_pass.apply(test_graph)\n\n # Freeze graph for inference, but the weight of fc/conv is still float type.\n freeze_pass = QuantizationFreezePass(\n scope=scope,\n place=place,\n weight_quantize_type=self.weight_quantize_type)\n freeze_pass.apply(test_graph)\n\n self.main_program = test_graph.to_program()\n\n with fluid.scope_guard(scope):\n self.main_program = self._normalize_program(\n self.main_program, self.data, self.fetch_list)\n\n self._save_models(self.path,\n list(self.feeds.keys()), self.fetch_list, executor,\n self.main_program, scope)\n\n paddle_outs = self._get_paddle_outs(self.feeds, self.fetch_list,\n executor, self.main_program, scope)\n inference_outs = self._get_inference_outs(\n self._get_analysis_config(use_gpu=use_gpu))\n\n # Check whether the results calculated on CPU and on GPU are the same. \n self.assertTrue(\n len(paddle_outs) == len(inference_outs),\n \"The number of outputs is different between inference and training forward at {}\".\n format(device))\n\n for out, inference_out in zip(paddle_outs, inference_outs):\n paddle_out = np.array(out)\n\n if flatten:\n paddle_out = paddle_out.flatten()\n inference_out = inference_out.flatten()\n\n self.assertTrue(\n np.allclose(\n paddle_out, inference_out, atol=atol),\n \"Output has diff between inference and training forward at {} \".\n format(device))\n\n # Check whether the trt results and the GPU results are the same. \n if use_gpu and self.enable_trt:\n tensorrt_outputs = self._get_inference_outs(\n self._get_analysis_config(\n use_gpu=use_gpu, use_trt=self.enable_trt))\n\n if self.trt_parameters.use_static:\n #deserialize\n tensorrt_outputs = self._get_inference_outs(\n self._get_analysis_config(\n use_gpu=use_gpu, use_trt=self.enable_trt))\n\n self.assertTrue(\n len(tensorrt_outputs) == len(paddle_outs),\n \"The number of outputs is different between GPU and TensorRT. \")\n\n for paddle_out, tensorrt_output in zip(paddle_outs,\n tensorrt_outputs):\n paddle_out = np.array(paddle_out)\n\n if flatten:\n paddle_out = paddle_out.flatten()\n tensorrt_output = tensorrt_output.flatten()\n\n self.assertTrue(\n np.allclose(\n paddle_out, tensorrt_output, rtol=rtol, atol=atol),\n \"Output has diff between GPU and TensorRT. \")\n\n # Check whether the mkldnn results and the CPU results are the same. \n if (not use_gpu) and self.enable_mkldnn:\n mkldnn_outputs = self._get_inference_outs(\n self._get_analysis_config(\n use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn))\n\n self.assertTrue(\n len(paddle_outs) == len(mkldnn_outputs),\n \"The number of outputs is different between CPU and MKLDNN. \")\n\n if self.enable_mkldnn_bfloat16:\n atol = 0.01\n for paddle_out, mkldnn_output in zip(paddle_outs, mkldnn_outputs):\n self.assertTrue(\n np.allclose(\n np.array(paddle_out), mkldnn_output, atol=atol),\n \"Output has diff between CPU and MKLDNN. \")\n\n class TensorRTParam:\n '''\n Prepare TensorRT subgraph engine parameters. \n '''\n\n def __init__(self, workspace_size, max_batch_size, min_subgraph_size,\n precision, use_static, use_calib_mode):\n self.workspace_size = workspace_size\n self.max_batch_size = max_batch_size\n self.min_subgraph_size = min_subgraph_size\n self.precision = precision\n self.use_static = use_static\n self.use_calib_mode = use_calib_mode\n\n class DynamicShapeParam:\n '''\n Prepare TensorRT subgraph engine dynamic shape parameters. \n '''\n\n def __init__(self, min_input_shape, max_input_shape, optim_input_shape,\n disable_trt_plugin_fp16):\n self.min_input_shape = min_input_shape\n self.max_input_shape = max_input_shape\n self.optim_input_shape = optim_input_shape\n self.disable_trt_plugin_fp16 = disable_trt_plugin_fp16\n\n def quant_dequant(self):\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n scope = fluid.Scope()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport paddle.fluid as fluid\nimport transformer_model\nimport numpy as np\nfrom parallel_executor_test_base import TestParallelExecutorBase, DeviceType\nimport unittest\nimport paddle\nimport paddle.fluid.core as core\nimport paddle.dataset.wmt16 as wmt16\nimport os\nfrom feed_data_reader import FeedDataReader\n\nos.environ['CPU_NUM'] = str(4)\n\n\nclass ModelHyperParams(object):\n # Dictionary size for source and target language. This model directly uses\n # paddle.dataset.wmt16 in which <bos>, <eos> and <unk> token has\n # alreay been added, but the <pad> token is not added. Transformer requires\n # sequences in a mini-batch are padded to have the same length. A <pad> token is\n # added into the original dictionary in paddle.dateset.wmt16.\n\n # size of source word dictionary.\n src_vocab_size = 10000\n # index for <pad> token in source language.\n src_pad_idx = src_vocab_size\n\n # size of target word dictionay\n trg_vocab_size = 10000\n # index for <pad> token in target language.\n trg_pad_idx = trg_vocab_size\n\n # position value corresponding to the <pad> token.\n pos_pad_idx = 0\n\n # max length of sequences. It should plus 1 to include position\n # padding token for position encoding.\n max_length = 50\n\n # the dimension for word embeddings, which is also the last dimension of\n # the input and output of multi-head attention, position-wise feed-forward\n # networks, encoder and decoder.\n\n d_model = 512\n # size of the hidden layer in position-wise feed-forward networks.\n d_inner_hid = 1024\n # the dimension that keys are projected to for dot-product attention.\n d_key = 64\n # the dimension that values are projected to for dot-product attention.\n d_value = 64\n # number of head used in multi-head attention.\n n_head = 8\n # number of sub-layers to be stacked in the encoder and decoder.\n # NOTE(zcd): the origin number of layer is 6, to make this unit test faster,\n # we should reduce the layer number to 4.\n n_layer = 4\n # dropout rate used by all dropout layers.\n dropout = 0.1\n\n\ndef prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias. Then, convert the numpy\n data to tensors and return a dict mapping names to tensors.\n \"\"\"\n\n def __pad_batch_data(insts,\n pad_idx,\n is_target=False,\n return_pos=True,\n return_attn_bias=True,\n return_max_len=True):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n return_list = []\n max_len = max(len(inst) for inst in insts)\n inst_data = np.array(\n [inst + [pad_idx] * (max_len - len(inst)) for inst in insts])\n return_list += [inst_data.astype(\"int64\").reshape([-1, 1])]\n if return_pos:\n inst_pos = np.array([[\n pos_i + 1 if w_i != pad_idx else 0\n for pos_i, w_i in enumerate(inst)\n ] for inst in inst_data])\n\n return_list += [inst_pos.astype(\"int64\").reshape([-1, 1])]\n if return_attn_bias:\n if is_target:\n # This is used to avoid attention on paddings and subsequent\n # words.\n slf_attn_bias_data = np.ones((inst_data.shape[0], max_len,\n max_len))\n slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape(\n [-1, 1, max_len, max_len])\n slf_attn_bias_data = np.tile(slf_attn_bias_data,\n [1, n_head, 1, 1]) * [-1e9]\n else:\n # This is used to avoid attention on paddings.\n slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *\n (max_len - len(inst))\n for inst in insts])\n slf_attn_bias_data = np.tile(\n slf_attn_bias_data.reshape([-1, 1, 1, max_len]),\n [1, n_head, max_len, 1])\n return_list += [slf_attn_bias_data.astype(\"float32\")]\n if return_max_len:\n return_list += [max_len]\n return return_list if len(return_list) > 1 else return_list[0]\n\n src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data(\n [inst[0] for inst in insts], src_pad_idx, is_target=False)\n trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data(\n [inst[1] for inst in insts], trg_pad_idx, is_target=True)\n trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],\n [1, 1, trg_max_len, 1]).astype(\"float32\")\n lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False,\n False, False, False)\n lbl_weight = (lbl_word != trg_pad_idx).astype(\"float32\").reshape([-1, 1])\n\n return [\n src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias,\n trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight\n ]\n\n\nfeed_data_reader = None\n\n\ndef transformer(use_feed):\n assert not use_feed, \"transfomer doesn't support feed yet\"\n return transformer_model.transformer(\n ModelHyperParams.src_vocab_size + 1,\n ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1,\n ModelHyperParams.n_layer, ModelHyperParams.n_head,\n ModelHyperParams.d_key, ModelHyperParams.d_value,\n ModelHyperParams.d_model, ModelHyperParams.d_inner_hid,\n ModelHyperParams.dropout, ModelHyperParams.src_pad_idx,\n ModelHyperParams.trg_pad_idx, ModelHyperParams.pos_pad_idx)\n\n\ndef get_feed_data_reader():\n global feed_data_reader\n if feed_data_reader is not None:\n return feed_data_reader\n\n reader = paddle.batch(\n wmt16.train(ModelHyperParams.src_vocab_size,\n ModelHyperParams.trg_vocab_size),\n batch_size=transformer_model.batch_size)\n all_batch_tensors = []\n for batch in reader():\n tensors = []\n for tensor in prepare_batch_input(batch, ModelHyperParams.src_pad_idx,\n ModelHyperParams.trg_pad_idx,\n ModelHyperParams.n_head):\n tensors.append(np.array(tensor))\n all_batch_tensors.append(tensors)\n\n def __reader__():\n for t in all_batch_tensors:\n yield t\n\n feed_data_reader = FeedDataReader(\n feed_list=transformer_model.build_inputs(\n ModelHyperParams.max_length + 1, ModelHyperParams.n_head),\n reader=__reader__)\n\n return feed_data_reader\n\n\nclass TestTransformer(TestParallelExecutorBase):\n def test_main(self):\n if core.is_compiled_with_cuda():\n self.check_network_convergence(\n transformer,\n use_device=DeviceType.CUDA,\n feed_data_reader=get_feed_data_reader())\n self.check_network_convergence(\n transformer,\n use_device=DeviceType.CUDA,\n enable_sequential_execution=True,\n feed_data_reader=get_feed_data_reader())\n self.check_network_convergence(\n transformer,\n use_device=DeviceType.CPU,\n iter=2,\n feed_data_reader=get_feed_data_reader())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nimport sys\nsys.path.append(\"../\")\nfrom op_test import OpTest\n\n\nclass TestSequenceScatterOp(OpTest):\n def init_lod(self):\n return [[30, 50, 40]]\n\n def setUp(self):\n self.op_type = \"sequence_scatter\"\n\n X_data = np.random.uniform(0.1, 1.0, [3, 6]).astype('float64')\n Ids_data = np.random.randint(0, 6, (120, 1)).astype('int64')\n Ids_lod = self.init_lod()\n\n Updates_data = np.random.uniform(0.1, 1.0, [120, 1]).astype('float64')\n Updates_lod = Ids_lod\n\n Out_data = np.copy(X_data)\n offset = 0\n for i in range(3):\n for j in range(Ids_lod[0][i]):\n Out_data[i][Ids_data[offset + j]] += Updates_data[offset + j]\n offset += Ids_lod[0][i]\n\n self.inputs = {\n 'X': X_data,\n 'Ids': (Ids_data, Ids_lod),\n 'Updates': (Updates_data, Updates_lod)\n }\n self.outputs = {'Out': Out_data}\n\n def test_check_output(self):\n self.check_output(check_dygraph=False)\n\n def test_check_grad(self):\n self.check_grad(['Updates'], 'Out', in_place=True, check_dygraph=False)\n\n\nclass TestSequenceScatterOpSeqLen0(TestSequenceScatterOp):\n def init_lod(self):\n return [[60, 60, 00]]\n\n\nclass TestSequenceScatterOpSeqLen0Case1(TestSequenceScatterOp):\n def init_lod(self):\n return [[0, 60, 60]]\n\n\nclass TestSequenceScatterOpSeqLen0Case2(TestSequenceScatterOp):\n def init_lod(self):\n return [[60, 0, 60]]\n\n\nclass TestSequenceScatterOpSeqLen0Case3(TestSequenceScatterOp):\n def init_lod(self):\n return [[120, 0, 0]]\n\n\nclass TestSequenceScatterOpSeqLen0Case4(TestSequenceScatterOp):\n def init_lod(self):\n return [[0, 120, 0]]\n\n\nclass TestSequenceScatterOpSeqLen0Case5(TestSequenceScatterOp):\n def init_lod(self):\n return [[0, 0, 120]]\n\n\n# run the uni tests\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.compiler as compiler\nimport paddle.optimizer\nimport paddle.static\nfrom paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,\n np_dtype_to_fluid_str)\n\npaddle.enable_static()\n\n\[email protected](not paddle.is_compiled_with_ipu(),\n \"core is not compiled with IPU\")\nclass TestBase(IPUOpTest):\n def setUp(self):\n self.set_atol()\n self.set_training()\n self.set_feed()\n self.set_feed_attr()\n self.set_attrs()\n\n def set_feed(self):\n self.feed = {\n \"x\": np.random.uniform(size=[1, 3, 10, 10]).astype('float32')\n }\n\n def set_feed_attr(self):\n self.feed_shape = [x.shape for x in self.feed.values()]\n self.feed_list = list(self.feed.keys())\n self.feed_dtype = [\n np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()\n ]\n\n def set_attrs(self):\n self.attrs = {\n \"dropout_prob\": 0.5,\n \"is_test\": True,\n \"dropout_implementation\": \"downgrade_in_infer\"\n }\n\n def _test_base(self, run_ipu=True):\n scope = fluid.core.Scope()\n main_prog = paddle.static.Program()\n startup_prog = paddle.static.Program()\n SEED = self.SEED\n main_prog.random_seed = SEED\n startup_prog.random_seed = SEED\n\n with fluid.scope_guard(scope):\n with paddle.static.program_guard(main_prog, startup_prog):\n x = paddle.static.data(\n name=self.feed_list[0],\n shape=self.feed_shape[0],\n dtype=self.feed_dtype[0])\n dropout = paddle.fluid.layers.dropout(x, **self.attrs)\n out = paddle.fluid.layers.elementwise_add(dropout, dropout)\n\n fetch_list = [out.name]\n\n if run_ipu:\n place = paddle.IPUPlace()\n else:\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n exe.run(startup_prog)\n\n if run_ipu:\n feed_list = self.feed_list\n ipu_strategy = compiler.get_ipu_strategy()\n ipu_strategy.is_training = self.is_training\n program = compiler.IPUCompiledProgram(\n main_prog,\n ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)\n else:\n program = main_prog\n\n result = exe.run(program, feed=self.feed, fetch_list=fetch_list)\n return result[0]\n\n def test_base(self):\n res0 = self._test_base(True)\n res1 = self._test_base(False)\n\n self.assertTrue(\n np.allclose(\n res0.flatten(), res1.flatten(), atol=self.atol))\n\n self.assertTrue(res0.shape == res1.shape)\n\n\nclass TestCase1(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"dropout_prob\": 0.5,\n \"is_test\": True,\n \"dropout_implementation\": \"upscale_in_train\"\n }\n\n\nclass TestCase2(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"dropout_prob\": 0.0,\n \"is_test\": False,\n \"dropout_implementation\": \"upscale_in_train\"\n }\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport paddle.fluid as fluid\nimport paddle\nimport unittest\nimport numpy\n\nfrom paddle.fluid.framework import Program, program_guard\nfrom paddle.fluid.layers.control_flow import lod_rank_table\nfrom paddle.fluid.layers.control_flow import max_sequence_len\nfrom paddle.fluid.layers.control_flow import lod_tensor_to_array\nfrom paddle.fluid.layers.control_flow import array_to_lod_tensor\nfrom paddle.fluid.layers.control_flow import shrink_memory\nfrom fake_reader import fake_imdb_reader\n\nnumpy.random.seed(2020)\n\n\nclass TestDynamicRNN(unittest.TestCase):\n def setUp(self):\n self.word_dict_len = 5147\n self.BATCH_SIZE = 2\n reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)\n self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)\n\n def _train(self,\n main_program,\n startup_program,\n feed_list,\n fetch_list,\n is_nested=False,\n max_iters=1):\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_program)\n feeder = fluid.DataFeeder(feed_list=feed_list, place=place)\n data = next(self.train_data())\n\n for iter_id in range(max_iters):\n fetch_outs = exe.run(main_program,\n feed=feeder.feed(data),\n fetch_list=fetch_list,\n return_numpy=False)\n if len(fetch_list) == 3:\n rnn_in_seq = fetch_outs[0]\n rnn_out_seq = fetch_outs[1]\n if not is_nested:\n # Check for lod set in runtime. When lod_level is 1,\n # the lod of DynamicRNN's output should be the same as input.\n self.assertEqual(rnn_in_seq.lod(), rnn_out_seq.lod())\n\n loss_i = numpy.array(fetch_outs[2])\n elif len(fetch_list) == 1:\n loss_i = numpy.array(fetch_outs[0])\n #print(loss_i)\n\n self.assertEqual((1, ), loss_i.shape)\n self.assertFalse(numpy.isnan(loss_i))\n if iter_id == 0:\n loss_0 = loss_i\n\n if max_iters > 10:\n # loss should be small after 10 mini-batch\n self.assertLess(loss_i[0], loss_0[0])\n\n def test_plain_while_op(self):\n main_program = fluid.Program()\n startup_program = fluid.Program()\n\n with fluid.program_guard(main_program, startup_program):\n sentence = fluid.layers.data(\n name='word', shape=[1], dtype='int64', lod_level=1)\n sent_emb = fluid.layers.embedding(\n input=sentence, size=[self.word_dict_len, 32], dtype='float32')\n\n rank_table = lod_rank_table(x=sent_emb)\n sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)\n\n seq_len = max_sequence_len(rank_table=rank_table)\n i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)\n i.stop_gradient = False\n\n boot_mem = fluid.layers.fill_constant_batch_size_like(\n input=fluid.layers.array_read(\n array=sent_emb_array, i=i),\n value=0,\n shape=[-1, 100],\n dtype='float32')\n boot_mem.stop_gradient = False\n mem_array = fluid.layers.array_write(x=boot_mem, i=i)\n\n cond = fluid.layers.less_than(x=i, y=seq_len)\n cond.stop_gradient = False\n while_op = fluid.layers.While(cond=cond)\n out = fluid.layers.create_array(dtype='float32')\n\n with while_op.block():\n mem = fluid.layers.array_read(array=mem_array, i=i)\n ipt = fluid.layers.array_read(array=sent_emb_array, i=i)\n\n mem = shrink_memory(x=mem, i=i, table=rank_table)\n\n hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh')\n\n fluid.layers.array_write(x=hidden, i=i, array=out)\n fluid.layers.increment(x=i, in_place=True)\n fluid.layers.array_write(x=hidden, i=i, array=mem_array)\n fluid.layers.less_than(x=i, y=seq_len, cond=cond)\n\n result_all_timesteps = array_to_lod_tensor(x=out, table=rank_table)\n last = fluid.layers.sequence_last_step(input=result_all_timesteps)\n\n logits = fluid.layers.fc(input=last, size=1, act=None)\n label = fluid.layers.data(name='label', shape=[1], dtype='float32')\n loss = fluid.layers.sigmoid_cross_entropy_with_logits(\n x=logits, label=label)\n loss = fluid.layers.mean(loss)\n sgd = fluid.optimizer.SGD(1e-4)\n sgd.minimize(loss=loss)\n\n # Check for lod_level set in compile-time.\n self.assertEqual(sent_emb.lod_level, result_all_timesteps.lod_level)\n\n self._train(\n main_program=main_program,\n startup_program=startup_program,\n feed_list=[sentence, label],\n fetch_list=[sent_emb, result_all_timesteps, loss],\n is_nested=False,\n max_iters=1)\n\n def test_train_dynamic_rnn(self):\n main_program = fluid.Program()\n startup_program = fluid.Program()\n main_program.random_seed = 10\n startup_program.random_seed = 10\n with fluid.program_guard(main_program, startup_program):\n sentence = fluid.layers.data(\n name='word', shape=[1], dtype='int64', lod_level=1)\n sent_emb = fluid.layers.embedding(\n input=sentence, size=[self.word_dict_len, 32], dtype='float32')\n\n drnn = fluid.layers.DynamicRNN()\n with drnn.block():\n in_ = drnn.step_input(sent_emb)\n mem = drnn.memory(shape=[100], dtype='float32')\n out_ = fluid.layers.fc(input=[in_, mem], size=100, act='tanh')\n drnn.update_memory(mem, out_)\n drnn.output(out_)\n\n drnn_result = drnn()\n last = fluid.layers.sequence_last_step(input=drnn_result)\n logits = fluid.layers.fc(input=last, size=1, act=None)\n\n label = fluid.layers.data(name='label', shape=[1], dtype='float32')\n loss = fluid.layers.sigmoid_cross_entropy_with_logits(\n x=logits, label=label)\n loss = fluid.layers.mean(loss)\n sgd = fluid.optimizer.Adam(1e-3)\n sgd.minimize(loss=loss)\n\n # Check for lod_level set in compile-time.\n self.assertEqual(sent_emb.lod_level, drnn_result.lod_level)\n\n self._train(\n main_program=main_program,\n startup_program=startup_program,\n feed_list=[sentence, label],\n fetch_list=[sent_emb, drnn_result, loss],\n is_nested=False,\n max_iters=100)\n\n def _fake_reader(self):\n seq_len, label = [[2, 2]], [0, 1]\n data = []\n for ele in seq_len:\n for j in ele:\n data.append([numpy.random.randint(30) for _ in range(j)])\n\n while True:\n yield data, label\n\n # this unit test is just used to the two layer nested dyn_rnn.\n def test_train_nested_dynamic_rnn(self):\n word_dict = [i for i in range(30)]\n\n main_program = fluid.Program()\n startup_program = fluid.Program()\n main_program.random_seed = 10\n startup_program.random_seed = 10\n with fluid.program_guard(main_program, startup_program):\n sentence = fluid.layers.data(\n name='word', shape=[1], dtype='int64', lod_level=2)\n label = fluid.layers.data(\n name='label', shape=[1], dtype='float32', lod_level=1)\n\n drnn0 = fluid.layers.DynamicRNN()\n with drnn0.block():\n in_0 = drnn0.step_input(sentence)\n assert in_0.lod_level == 1, \"the lod level of in_ should be 1\"\n sentence_emb = fluid.layers.embedding(\n input=in_0, size=[len(word_dict), 32], dtype='float32')\n out_0 = fluid.layers.fc(input=sentence_emb,\n size=100,\n act='tanh')\n\n drnn1 = fluid.layers.DynamicRNN()\n with drnn1.block():\n in_1 = drnn1.step_input(out_0)\n assert in_1.lod_level == 0, \"the lod level of in_1 should be 0\"\n out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh')\n drnn1.output(out_1)\n\n drnn1_result = drnn1()\n last_1 = fluid.layers.sequence_last_step(input=drnn1_result)\n drnn0.output(last_1)\n\n last = drnn0()\n logits = fluid.layers.fc(input=last, size=1, act=None)\n loss = fluid.layers.sigmoid_cross_entropy_with_logits(\n x=logits, label=label)\n loss = fluid.layers.mean(loss)\n sgd = fluid.optimizer.SGD(1e-3)\n sgd.minimize(loss=loss)\n\n train_data_orig = self.train_data\n self.train_data = paddle.batch(self._fake_reader, batch_size=2)\n self._train(\n main_program=main_program,\n startup_program=startup_program,\n feed_list=[sentence, label],\n fetch_list=[loss],\n is_nested=True,\n max_iters=100)\n self.train_data = train_data_orig\n\n # this unit test is just used to the two layer nested dyn_rnn.\n def test_train_nested_dynamic_rnn2(self):\n word_dict = [i for i in range(30)]\n\n hidden_size = 32\n main_program = fluid.Program()\n startup_program = fluid.Program()\n main_program.random_seed = 10\n startup_program.random_seed = 10\n with fluid.program_guard(main_program, startup_program):\n sentence = fluid.layers.data(\n name='word', shape=[1], dtype='int64', lod_level=2)\n label = fluid.layers.data(\n name='label', shape=[1], dtype='float32', lod_level=1)\n\n drnn0 = fluid.layers.DynamicRNN()\n with drnn0.block():\n in_0 = drnn0.step_input(sentence)\n sentence_emb = fluid.layers.embedding(\n input=in_0,\n size=[len(word_dict), hidden_size],\n dtype='float32')\n input_forward_proj = fluid.layers.fc(input=sentence_emb,\n size=hidden_size * 4,\n act=None,\n bias_attr=False)\n forward, _ = fluid.layers.dynamic_lstm(\n input=input_forward_proj,\n size=hidden_size * 4,\n use_peepholes=False)\n\n drnn1 = fluid.layers.DynamicRNN()\n with drnn1.block():\n in_1 = drnn1.step_input(forward)\n out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh')\n drnn1.output(out_1)\n\n last = fluid.layers.sequence_last_step(input=drnn1())\n drnn0.output(last)\n\n last = drnn0()\n logits = fluid.layers.fc(input=last, size=1, act=None)\n loss = fluid.layers.sigmoid_cross_entropy_with_logits(\n x=logits, label=label)\n loss = fluid.layers.mean(loss)\n sgd = fluid.optimizer.SGD(1e-3)\n sgd.minimize(loss=loss)\n\n train_data_orig = self.train_data\n self.train_data = paddle.batch(self._fake_reader, batch_size=2)\n self._train(\n main_program=main_program,\n startup_program=startup_program,\n feed_list=[sentence, label],\n fetch_list=[loss],\n is_nested=True,\n max_iters=100)\n self.train_data = train_data_orig\n\n\nclass TestDynamicRNNErrors(unittest.TestCase):\n def test_errors(self):\n with program_guard(Program(), Program()):\n init = fluid.layers.zeros(shape=[1], dtype='float32')\n shape = 'shape'\n sentence = fluid.data(\n name='sentence', shape=[None, 32], dtype='float32', lod_level=1)\n\n # The type of Input(shape) in API(memory) must be list or tuple\n def input_shape_type_of_memory():\n drnn = fluid.layers.DynamicRNN()\n with drnn.block():\n res = drnn.memory(init, shape)\n\n self.assertRaises(TypeError, input_shape_type_of_memory)\n\n # The type of element of Input(*outputs) in API(output) must be Variable.\n def outputs_type_of_output():\n drnn = fluid.layers.DynamicRNN()\n with drnn.block():\n word = drnn.step_input(sentence)\n memory = drnn.memory(shape=[10], dtype='float32', value=0)\n hidden = fluid.layers.fc(input=[word, memory],\n size=10,\n act='tanh')\n out = numpy.ones(1).astype('float32')\n drnn.update_memory(ex_mem=memory, new_mem=hidden)\n drnn.output(hidden, out)\n\n self.assertRaises(TypeError, outputs_type_of_output)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.random",
"numpy.stack"
],
[
"numpy.array",
"numpy.ones"
],
[
"numpy.random.randint"
],
[
"numpy.ndenumerate",
"numpy.vectorize",
"numpy.random.uniform",
"numpy.random.choice"
],
[
"numpy.array"
],
[
"numpy.random.random",
"numpy.allclose"
],
[
"numpy.random.rand",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.random.seed"
],
[
"numpy.allclose",
"numpy.random.seed",
"numpy.std",
"numpy.mean",
"numpy.random.uniform",
"numpy.zeros"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.prod",
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.square",
"numpy.maximum",
"numpy.minimum",
"numpy.sqrt",
"numpy.clip",
"numpy.isnan",
"numpy.allclose",
"numpy.power",
"numpy.array",
"numpy.isclose"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.ones",
"numpy.copy",
"numpy.random.normal",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.array",
"numpy.random.random",
"numpy.random.seed"
],
[
"numpy.set_printoptions",
"numpy.ndarray",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.random",
"numpy.array_equal",
"numpy.random.randint"
],
[
"numpy.array",
"numpy.random.random_sample",
"numpy.random.seed"
],
[
"numpy.random.random",
"numpy.allclose",
"numpy.ceil",
"numpy.max",
"numpy.floor",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.array"
],
[
"numpy.log",
"numpy.exp",
"numpy.random.random",
"numpy.random.randint"
],
[
"numpy.ones_like",
"numpy.random.random",
"numpy.random.random_sample",
"numpy.stack",
"numpy.random.rand",
"numpy.array"
],
[
"numpy.ones",
"numpy.append",
"numpy.array",
"numpy.exp",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.random.random"
],
[
"numpy.dot",
"numpy.split",
"numpy.maximum",
"numpy.random.random",
"numpy.reshape",
"numpy.tile",
"numpy.copy",
"numpy.random.normal",
"numpy.zeros_like",
"numpy.random.uniform",
"numpy.array",
"numpy.exp",
"numpy.flip",
"numpy.zeros"
],
[
"numpy.random.uniform"
],
[
"numpy.array",
"numpy.random.random",
"numpy.ones"
],
[
"numpy.random.random",
"numpy.trace",
"numpy.allclose"
],
[
"numpy.random.randn",
"numpy.abs",
"numpy.isclose"
],
[
"numpy.square",
"numpy.random.random",
"numpy.allclose",
"numpy.random.seed",
"numpy.sqrt",
"numpy.arange",
"numpy.random.random_integers",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.asarray",
"numpy.random.uniform",
"numpy.log",
"numpy.power"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.full"
],
[
"numpy.random.randn",
"numpy.random.random"
],
[
"numpy.array",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.random.uniform",
"numpy.vectorize",
"numpy.random.random"
],
[
"numpy.array",
"numpy.allclose",
"numpy.random.seed"
],
[
"numpy.triu",
"numpy.array",
"numpy.tile",
"numpy.ones"
],
[
"numpy.random.uniform",
"numpy.copy",
"numpy.random.randint"
],
[
"numpy.random.uniform"
],
[
"numpy.random.seed",
"numpy.isnan",
"numpy.ones",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Polas/omim | [
"03558b418b338f506fbf3aa72ddf15187a2005ee"
] | [
"search/search_quality/scoring_model.py"
] | [
"#!/usr/bin/env python3\n\nfrom math import exp, log\nfrom scipy.stats import pearsonr, t\nfrom sklearn import svm\nfrom sklearn.model_selection import GridSearchCV, KFold\nfrom sklearn.utils import resample\nimport argparse\nimport collections\nimport itertools\nimport numpy as np\nimport pandas as pd\nimport random\nimport sys\n\n\nMAX_DISTANCE_METERS = 2e6\nMAX_RANK = 255.0\nMAX_POPULARITY = 255.0\nRELEVANCES = {'Harmful': -3, 'Irrelevant': 0, 'Relevant': 1, 'Vital': 3}\nNAME_SCORES = ['Zero', 'Substring', 'Prefix', 'Full Match']\nSEARCH_TYPES = ['POI', 'Building', 'Street', 'Unclassified', 'Village', 'City', 'State', 'Country']\nFEATURES = ['DistanceToPivot', 'Rank', 'Popularity', 'Rating', 'FalseCats', 'ErrorsMade', 'MatchedFraction',\n 'AllTokensUsed', 'ExactCountryOrCapital'] + NAME_SCORES + SEARCH_TYPES\n\nBOOTSTRAP_ITERATIONS = 10000\n\n\ndef transform_name_score(value, categories_match):\n if categories_match == 1:\n return 'Zero'\n else:\n return value\n\n\ndef normalize_data(data):\n transform_distance = lambda v: min(v, MAX_DISTANCE_METERS) / MAX_DISTANCE_METERS\n\n data['DistanceToPivot'] = data['DistanceToPivot'].apply(transform_distance)\n data['Rank'] = data['Rank'].apply(lambda v: v / MAX_RANK)\n data['Popularity'] = data['Popularity'].apply(lambda v: v / MAX_POPULARITY)\n data['Relevance'] = data['Relevance'].apply(lambda v: RELEVANCES[v])\n\n cats = data['PureCats'].combine(data['FalseCats'], max)\n\n # TODO (@y, @m): do forward/backward/subset selection of features\n # instead of this merging. It would be great to conduct PCA on\n # the features too.\n data['NameScore'] = data['NameScore'].combine(cats, transform_name_score)\n\n # Adds dummy variables to data for NAME_SCORES.\n for ns in NAME_SCORES:\n data[ns] = data['NameScore'].apply(lambda v: int(ns == v))\n\n # Adds dummy variables to data for SEARCH_TYPES.\n\n # We unify BUILDING with POI here, as we don't have enough\n # training data to distinguish between them. Remove following\n # line as soon as the model will be changed or we will have enough\n # training data.\n data['SearchType'] = data['SearchType'].apply(lambda v: v if v != 'Building' else 'POI')\n for st in SEARCH_TYPES:\n data[st] = data['SearchType'].apply(lambda v: int(st == v))\n\n\ndef compute_ndcg(relevances):\n \"\"\"\n Computes NDCG (Normalized Discounted Cumulative Gain) for a given\n array of scores.\n \"\"\"\n\n dcg = sum(r / log(2 + i, 2) for i, r in enumerate(relevances))\n dcg_norm = sum(r / log(2 + i, 2) for i, r in enumerate(sorted(relevances, reverse=True)))\n return dcg / dcg_norm if dcg_norm != 0 else 0\n\n\ndef compute_ndcgs_without_ws(data):\n \"\"\"\n Computes NDCG (Normalized Discounted Cumulative Gain) for a given\n data. Returns an array of ndcg scores in the shape [num groups of\n features].\n \"\"\"\n\n grouped = data.groupby(data['SampleId'], sort=False).groups\n\n ndcgs = []\n for id in grouped:\n indices = grouped[id]\n relevances = np.array(data.ix[indices]['Relevance'])\n ndcgs.append(compute_ndcg(relevances))\n\n return ndcgs\n\n\ndef compute_ndcgs_for_ws(data, ws):\n \"\"\"\n Computes NDCG (Normalized Discounted Cumulative Gain) for a given\n data and an array of coeffs in a linear model. Returns an array of\n ndcg scores in the shape [num groups of features].\n \"\"\"\n\n data_scores = np.array([np.dot(data.ix[i][FEATURES], ws) for i in data.index])\n grouped = data.groupby(data['SampleId'], sort=False).groups\n\n ndcgs = []\n for id in grouped:\n indices = grouped[id]\n\n relevances = np.array(data.ix[indices]['Relevance'])\n scores = data_scores[indices]\n\n # Reoders relevances in accordance with decreasing scores.\n relevances = relevances[scores.argsort()[::-1]]\n ndcgs.append(compute_ndcg(relevances))\n\n return ndcgs\n\n\ndef transform_data(data):\n \"\"\"\n By a given data computes x and y that can be used as an input to a\n linear SVM.\n \"\"\"\n\n grouped = data.groupby(data['SampleId'], sort=False)\n\n xs, ys = [], []\n\n # k is used to create a balanced samples set for better linear\n # separation.\n k = 1\n for _, group in grouped:\n features, relevances = group[FEATURES], group['Relevance']\n\n n, total = len(group), 0\n for _, (i, j) in enumerate(itertools.combinations(range(n), 2)):\n dr = relevances.iloc[j] - relevances.iloc[i]\n y = np.sign(dr)\n if y == 0:\n continue\n\n x = np.array(features.iloc[j]) - np.array(features.iloc[i])\n\n # Need to multiply x by average drop in NDCG when i-th and\n # j-th are exchanged.\n x *= abs(dr * (1 / log(j + 2, 2) - 1 / log(i + 2, 2)))\n\n # This is needed to prevent disbalance in classes sizes.\n if y != k:\n x = np.negative(x)\n y = -y\n\n xs.append(x)\n ys.append(y)\n total += 1\n k = -k\n\n # Scales this group of features to equalize different search\n # queries.\n for i in range(-1, -total, -1):\n xs[i] = xs[i] / total\n return xs, ys\n\n\ndef show_pearson_statistics(xs, ys, features):\n \"\"\"\n Shows info about Pearson coefficient between features and\n relevancy.\n \"\"\"\n\n print('***** Correlation table *****')\n print('H0 - feature not is correlated with relevancy')\n print('H1 - feature is correlated with relevancy')\n print()\n\n cs, ncs = [], []\n for i, f in enumerate(features):\n zs = [x[i] for x in xs]\n (c, p) = pearsonr(zs, ys)\n\n correlated = p < 0.05\n print('{}: pearson={:.3f}, P(H1)={}'.format(f, c, 1 - p))\n if correlated:\n cs.append(f)\n else:\n ncs.append(f)\n\n print()\n print('Correlated:', cs)\n print('Non-correlated:', ncs)\n\n\ndef raw_output(features, ws):\n \"\"\"\n Prints feature-coeff pairs to the standard output.\n \"\"\"\n\n print('{:<20}{}'.format('Feature', 'Value'))\n print()\n for f, w in zip(features, ws):\n print('{:<20}{:.5f}'.format(f, w))\n\n\ndef print_const(name, value):\n print('double constexpr k{} = {:.7f};'.format(name, value))\n\n\ndef print_array(name, size, values):\n print('double constexpr {}[{}] = {{'.format(name, size))\n print(',\\n'.join(' {:.7f} /* {} */'.format(w, f) for (f, w) in values))\n print('};')\n\ndef cpp_output(features, ws):\n \"\"\"\n Prints feature-coeff pairs in the C++-compatible format.\n \"\"\"\n\n ns, st = [], []\n\n for f, w in zip(features, ws):\n if f in NAME_SCORES:\n ns.append((f, w))\n elif f in SEARCH_TYPES:\n st.append((f, w))\n else:\n print_const(f, w)\n print_array('kNameScore', 'NameScore::NAME_SCORE_COUNT', ns)\n print_array('kType', 'Model::TYPE_COUNT', st)\n\n\ndef show_bootstrap_statistics(clf, X, y, features):\n num_features = len(features)\n\n coefs = []\n for i in range(num_features):\n coefs.append([])\n\n for _ in range(BOOTSTRAP_ITERATIONS):\n X_sample, y_sample = resample(X, y)\n clf.fit(X_sample, y_sample)\n for i, c in enumerate(get_normalized_coefs(clf)):\n coefs[i].append(c)\n\n poi_index = features.index('POI')\n building_index = features.index('Building')\n coefs[building_index] = coefs[poi_index]\n\n intervals = []\n\n print()\n print('***** Bootstrap statistics *****')\n print('{:<20}{:<20}{:<10}{:<10}'.format('Feature', '95% interval', 't-value', 'Pr(>|t|)'))\n print()\n for i, cs in enumerate(coefs):\n values = np.array(cs)\n lo = np.percentile(values, 2.5)\n hi = np.percentile(values, 97.5)\n interval = '({:.3f}, {:.3f})'.format(lo, hi)\n tv = np.mean(values) / np.std(values)\n pr = (1.0 - t.cdf(x=abs(tv), df=len(values))) * 0.5\n\n stv = '{:.3f}'.format(tv)\n spr = '{:.3f}'.format(pr)\n print('{:<20}{:<20}{:<10}{:<10}'.format(features[i], interval, stv, spr))\n\n\ndef get_normalized_coefs(clf):\n ws = clf.coef_[0]\n max_w = max(abs(w) for w in ws)\n return np.divide(ws, max_w)\n\n\ndef main(args):\n data = pd.read_csv(sys.stdin)\n\n # Drop categorial requests cause we use different ranking model for them.\n data.drop(data[data['IsCategorialRequest'] == 1].index, inplace=True)\n data.reset_index(inplace=True, drop=True)\n data.drop(columns=['IsCategorialRequest', 'HasName'], inplace=True)\n\n normalize_data(data)\n\n ndcgs = compute_ndcgs_without_ws(data);\n print('Current NDCG: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))\n print()\n\n xs, ys = transform_data(data)\n\n clf = svm.LinearSVC(random_state=args.seed)\n cv = KFold(n_splits=5, shuffle=True, random_state=args.seed)\n\n # \"C\" stands for the regularizer constant.\n grid = {'C': np.power(10.0, np.arange(-5, 6))}\n gs = GridSearchCV(clf, grid, scoring='roc_auc', cv=cv)\n gs.fit(xs, ys)\n\n print('Best params: {}'.format(gs.best_params_))\n\n ws = get_normalized_coefs(gs.best_estimator_)\n\n # Following code restores coeffs for merged features.\n ws[FEATURES.index('Building')] = ws[FEATURES.index('POI')]\n\n ndcgs = compute_ndcgs_for_ws(data, ws)\n\n print('NDCG mean: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))\n print('ROC AUC: {:.3f}'.format(gs.best_score_))\n\n if args.pearson:\n print()\n show_pearson_statistics(xs, ys, FEATURES)\n\n print()\n print('***** Linear model weights *****')\n if args.cpp:\n cpp_output(FEATURES, ws)\n else:\n raw_output(FEATURES, ws)\n\n if args.bootstrap:\n show_bootstrap_statistics(clf, xs, ys, FEATURES)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', help='random seed', type=int)\n parser.add_argument('--pearson', help='show pearson statistics', action='store_true')\n parser.add_argument('--cpp', help='generate output in the C++ format', action='store_true')\n parser.add_argument('--bootstrap', help='show bootstrap confidence intervals', action='store_true')\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.dot",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"numpy.arange",
"scipy.stats.pearsonr",
"sklearn.model_selection.KFold",
"numpy.percentile",
"numpy.sign",
"numpy.std",
"numpy.mean",
"sklearn.svm.LinearSVC",
"sklearn.utils.resample",
"numpy.negative",
"numpy.array",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
meramossepu1/groundmotion-processing | [
"5cc19023b94e5b5b718590ce8cd05a22a4088a67",
"5cc19023b94e5b5b718590ce8cd05a22a4088a67",
"5cc19023b94e5b5b718590ce8cd05a22a4088a67",
"5cc19023b94e5b5b718590ce8cd05a22a4088a67"
] | [
"tests/gmprocess/metrics/imt/fas_arithmetic_mean_test.py",
"gmprocess/core/stationtrace.py",
"gmprocess/utils/base_utils.py",
"gmprocess/io/nsmn/turkey_fetcher.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# stdlib imports\nimport os.path\nimport re\n\n# third party imports\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\n\n# Local imports\nfrom gmprocess.metrics.station_summary import StationSummary\nfrom gmprocess.core.stationstream import StationStream\nfrom gmprocess.core.stationtrace import StationTrace\n\n\ndef test_fas():\n \"\"\"\n Testing based upon the work provided in\n https://github.com/arkottke/notebooks/blob/master/effective_amp_spectrum.ipynb\n \"\"\"\n ddir = os.path.join(\"data\", \"testdata\")\n datadir = pkg_resources.resource_filename(\"gmprocess\", ddir)\n\n \"\"\"\n Note: the testing data in the fas_*_.pkl files now uses the convention\n of using the next power of 2 for the number of FFT points. The original\n files based on thee Jupyter notebook above just used the length of the\n traces for the FFT.\n \"\"\"\n\n fas_file = os.path.join(datadir, \"fas_arithmetic_mean.pkl\")\n p1 = os.path.join(datadir, \"peer\", \"RSN763_LOMAP_GIL067.AT2\")\n p2 = os.path.join(datadir, \"peer\", \"RSN763_LOMAP_GIL337.AT2\")\n\n stream = StationStream([])\n for idx, fpath in enumerate([p1, p2]):\n with open(fpath, encoding=\"utf-8\") as file_obj:\n for _ in range(3):\n next(file_obj)\n meta = re.findall(r\"[.0-9]+\", next(file_obj))\n dt = float(meta[1])\n accels = np.array(\n [col for line in file_obj for col in line.split()], dtype=float\n )\n trace = StationTrace(\n data=accels,\n header={\n \"channel\": \"H\" + str(idx),\n \"delta\": dt,\n \"units\": \"acc\",\n \"standard\": {\n \"corner_frequency\": np.nan,\n \"station_name\": \"\",\n \"source\": \"json\",\n \"instrument\": \"\",\n \"instrument_period\": np.nan,\n \"source_format\": \"json\",\n \"comments\": \"\",\n \"structure_type\": \"\",\n \"sensor_serial_number\": \"\",\n \"source_file\": \"\",\n \"process_level\": \"raw counts\",\n \"process_time\": \"\",\n \"horizontal_orientation\": np.nan,\n \"vertical_orientation\": np.nan,\n \"units\": \"acc\",\n \"units_type\": \"acc\",\n \"instrument_sensitivity\": np.nan,\n \"instrument_damping\": np.nan,\n },\n },\n )\n stream.append(trace)\n\n for tr in stream:\n response = {\"input_units\": \"counts\", \"output_units\": \"cm/s^2\"}\n tr.setProvenance(\"remove_response\", response)\n\n target_df = pd.read_pickle(fas_file)\n ind_vals = target_df.index.values\n per = np.unique([float(i[0].split(\")\")[0].split(\"(\")[1]) for i in ind_vals])\n freqs = 1 / per\n imts = [\"fas\" + str(p) for p in per]\n summary = StationSummary.from_stream(\n stream, [\"arithmetic_mean\"], imts, bandwidth=30\n )\n\n pgms = summary.pgms\n # pgms.to_pickle(fas_file)\n for idx, f in enumerate(freqs):\n fstr = \"FAS(%.3f)\" % (1 / f)\n fval1 = pgms.loc[fstr, \"ARITHMETIC_MEAN\"].Result\n fval2 = target_df.loc[fstr, \"ARITHMETIC_MEAN\"].Result\n np.testing.assert_allclose(fval1, fval2, rtol=1e-5, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n test_fas()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# stdlib imports\nimport json\nimport copy\nimport logging\nfrom datetime import datetime\nimport getpass\nimport re\nimport inspect\n\n# third party imports\nimport numpy as np\nfrom obspy.core.trace import Trace\nimport prov\nimport prov.model\nfrom obspy.core.utcdatetime import UTCDateTime\nimport pandas as pd\n\n# local imports\nfrom gmprocess.utils.config import get_config\nfrom gmprocess.io.seedname import get_units_type\n\nUNITS = {\"acc\": \"cm/s^2\", \"vel\": \"cm/s\"}\nREVERSE_UNITS = {\"cm/s^2\": \"acc\", \"cm/s\": \"vel\"}\n\nPROCESS_LEVELS = {\n \"V0\": \"raw counts\",\n \"V1\": \"uncorrected physical units\",\n \"V2\": \"corrected physical units\",\n \"V3\": \"derived time series\",\n}\n\nREV_PROCESS_LEVELS = {\n \"raw counts\": \"V0\",\n \"uncorrected physical units\": \"V1\",\n \"corrected physical units\": \"V2\",\n \"derived time series\": \"V3\",\n}\n\nLENGTH_CONVERSIONS = {\"nm\": 1e9, \"um\": 1e6, \"mm\": 1e3, \"cm\": 1e2, \"m\": 1}\n\n# when checking to see if a channel is vertical,\n# 90 - abs(dip) must be less than or equal to this value\n# (i.e., dip must ne close to )\nMAX_DIP_OFFSET = 0.1\n\n# NOTE: if required is True then this means that the value must be\n# filled in with a value that does NOT match the default.\nSTANDARD_KEYS = {\n \"source_file\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"source\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"horizontal_orientation\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"vertical_orientation\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"station_name\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"instrument_period\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"instrument_damping\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"process_time\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"process_level\": {\n \"type\": str,\n \"required\": True,\n \"default\": list(PROCESS_LEVELS.values()),\n },\n \"sensor_serial_number\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"instrument\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"structure_type\": {\"type\": str, \"required\": False, \"default\": \"\"},\n \"corner_frequency\": {\"type\": float, \"required\": False, \"default\": np.nan},\n \"units\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"units_type\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"source_format\": {\"type\": str, \"required\": True, \"default\": \"\"},\n \"instrument_sensitivity\": {\n \"type\": float,\n \"required\": False,\n \"default\": np.nan,\n },\n \"comments\": {\"type\": str, \"required\": False, \"default\": \"\"},\n}\n\nINT_TYPES = [\n np.dtype(\"int8\"),\n np.dtype(\"int16\"),\n np.dtype(\"int32\"),\n np.dtype(\"int64\"),\n np.dtype(\"uint8\"),\n np.dtype(\"uint16\"),\n np.dtype(\"uint32\"),\n np.dtype(\"uint64\"),\n]\n\nFLOAT_TYPES = [np.dtype(\"float32\"), np.dtype(\"float64\")]\n\nTIMEFMT = \"%Y-%m-%dT%H:%M:%SZ\"\nTIMEFMT_MS = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nNS_PREFIX = \"seis_prov\"\nNS_SEIS = (NS_PREFIX, \"http://seisprov.org/seis_prov/0.1/#\")\n\nMAX_ID_LEN = 12\n\nPROV_TIME_FMT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nACTIVITIES = {\n \"waveform_simulation\": {\"code\": \"ws\", \"label\": \"Waveform Simulation\"},\n \"taper\": {\"code\": \"tp\", \"label\": \"Taper\"},\n \"stack_cross_correlations\": {\"code\": \"sc\", \"label\": \"Stack Cross Correlations\"},\n \"simulate_response\": {\"code\": \"sr\", \"label\": \"Simulate Response\"},\n \"rotate\": {\"code\": \"rt\", \"label\": \"Rotate\"},\n \"resample\": {\"code\": \"rs\", \"label\": \"Resample\"},\n \"remove_response\": {\"code\": \"rr\", \"label\": \"Remove Response\"},\n \"pad\": {\"code\": \"pd\", \"label\": \"Pad\"},\n \"normalize\": {\"code\": \"nm\", \"label\": \"Normalize\"},\n \"multiply\": {\"code\": \"nm\", \"label\": \"Multiply\"},\n \"merge\": {\"code\": \"mg\", \"label\": \"Merge\"},\n \"lowpass_filter\": {\"code\": \"lp\", \"label\": \"Lowpass Filter\"},\n \"interpolate\": {\"code\": \"ip\", \"label\": \"Interpolate\"},\n \"integrate\": {\"code\": \"ig\", \"label\": \"Integrate\"},\n \"highpass_filter\": {\"code\": \"hp\", \"label\": \"Highpass Filter\"},\n \"divide\": {\"code\": \"dv\", \"label\": \"Divide\"},\n \"differentiate\": {\"code\": \"df\", \"label\": \"Differentiate\"},\n \"detrend\": {\"code\": \"dt\", \"label\": \"Detrend\"},\n \"decimate\": {\"code\": \"dc\", \"label\": \"Decimate\"},\n \"cut\": {\"code\": \"ct\", \"label\": \"Cut\"},\n \"cross_correlate\": {\"code\": \"co\", \"label\": \"Cross Correlate\"},\n \"calculate_adjoint_source\": {\"code\": \"ca\", \"label\": \"Calculate Adjoint Source\"},\n \"bandstop_filter\": {\"code\": \"bs\", \"label\": \"Bandstop Filter\"},\n \"bandpass_filter\": {\"code\": \"bp\", \"label\": \"Bandpass Filter\"},\n}\n\n\nclass StationTrace(Trace):\n \"\"\"Subclass of Obspy Trace object which holds more metadata.\n\n ObsPy provides a Trace object that serves as a container for waveform data\n from a single channel, as well as some basic metadata about the waveform\n start/end times, number of points, sampling rate/interval, and\n network/station/channel/location information.\n\n gmprocess subclasses the Trace object with a StationTrace object, which\n provides the following additional features:\n\n - Validation that length of data matches the number of points in the\n metadata.\n - Validation that required values are set in metadata.\n - A `fail` method which can be used by processing routines to mark when\n processing of the StationTrace has failed some sort of check (signal\n to noise ratio, etc.)\n - A `free_field` property which can be used to query the object to\n ensure that its data comes from a free-field sensor. Note: this is\n not always known reliably, and different people have have different\n definitions of the term free_field. When possible, we define a\n mapping between location code and the free_field property. For\n example, see the LOCATION_CODES variable core.py in\n `gmprocess.io.fdsn`.\n - Methods (e.g., `getProvenance`, `setProvenance`) for tracking\n processing steps that have been performed. These are aligned with the\n SEIS-PROV standard for processing provenance, described here:\n http://seismicdata.github.io/SEIS-PROV/_generated_details.html#activities\n - Methods (e.g., `getParameter` and `setParameter`) for tracking of\n arbitrary metadata in the form of a dictionary as trace property\n (self.parameters).\n \"\"\"\n\n def __init__(self, data=np.array([]), header=None, inventory=None, config=None):\n \"\"\"Construct a StationTrace instance.\n\n Args:\n data (ndarray):\n numpy array of points.\n header (dict-like):\n Dictionary of metadata (see trace.stats docs).\n inventory (Inventory):\n Obspy Inventory object.\n config (dict):\n Dictionary containing configuration.\n If None, retrieve global config.\n \"\"\"\n prov_response = None\n if config is None:\n config = get_config()\n if inventory is None and header is None:\n raise ValueError(\n \"Cannot create StationTrace without header info or Inventory\"\n )\n elif inventory is not None and header is not None:\n # End up here if the format was read in with ObsPy and an\n # inventory was able to be constructed (e.g., miniseed+StationXML)\n try:\n seed_id = \"%s.%s.%s.%s\" % (\n header[\"network\"],\n header[\"station\"],\n header[\"location\"],\n header[\"channel\"],\n )\n start_time = header[\"starttime\"]\n (response, standard, coords, format_specific) = _stats_from_inventory(\n data, inventory, seed_id, start_time\n )\n header[\"response\"] = response\n header[\"coordinates\"] = coords\n header[\"standard\"] = standard\n header[\"format_specific\"] = format_specific\n except BaseException as e:\n raise ValueError(\n \"Failed to construct required metadata from inventory \"\n \"and input header data with exception: %s\" % e\n )\n elif inventory is None and header is not None and \"standard\" not in header:\n # End up here for ObsPy without an inventory (e.g., SAC).\n # This assumes that all of our readers include the \"standard\" key\n # in the header and that ObsPy one's do not.\n\n # NOTE: we are assuming that an ObsPy file that does NOT have an\n # inventory has been converted to cm/s^2 via the configurable\n # conversion factor in the config file.\n prov_response = {\"input_units\": \"counts\", \"output_units\": \"cm/s^2\"}\n try:\n (response, standard, coords, format_specific) = _stats_from_header(\n header, config\n )\n header[\"response\"] = response\n header[\"coordinates\"] = coords\n header[\"standard\"] = standard\n header[\"format_specific\"] = format_specific\n except BaseException:\n raise ValueError(\n \"Failed to construct required metadata from header data.\"\n )\n\n # Sometimes the channel names do not indicate which one is the\n # Z channel. If we have vertical_orientation information, then\n # let's get that and change the vertical channel to end in Z.\n # NOTE: `vertical_orientation` here is defined as the angle\n # from horizontal (aka, dip), not inclination.\n if not np.isnan(header[\"standard\"][\"vertical_orientation\"]):\n delta = np.abs(np.abs(header[\"standard\"][\"vertical_orientation\"]) - 90.0)\n is_z = header[\"channel\"].endswith(\"Z\")\n if delta < MAX_DIP_OFFSET and not is_z:\n header[\"channel\"] = header[\"channel\"][0:-1] + \"Z\"\n\n # Apply conversion factor if one was specified for this format\n if (\n \"format_specific\" in header\n and \"conversion_factor\" in header[\"format_specific\"]\n ):\n data *= header[\"format_specific\"][\"conversion_factor\"]\n\n super(StationTrace, self).__init__(data=data, header=header)\n self.provenance = []\n if prov_response is not None:\n self.setProvenance(\"remove_response\", prov_response)\n self.parameters = {}\n self.cached = {}\n self.validate()\n\n @property\n def free_field(self):\n \"\"\"Is this station a free-field station?\n\n Returns:\n bool: True if a free-field sensor, False if not.\n \"\"\"\n stype = self.stats.standard[\"structure_type\"]\n non_free = [\n \"building\",\n \"bridge\",\n \"dam\",\n \"borehole\",\n \"hole\",\n \"crest\",\n \"toe\",\n \"foundation\",\n \"body\",\n \"roof\",\n \"floor\",\n ]\n for ftype in non_free:\n if re.search(ftype, stype.lower()) is not None:\n return False\n\n return True\n\n def fail(self, reason):\n \"\"\"Note that a check on this StationTrace failed for a given reason.\n\n This method will set the parameter \"failure\", and store the reason\n provided, plus the name of the calling function.\n\n Args:\n reason (str):\n Reason given for failure.\n\n \"\"\"\n istack = inspect.stack()\n calling_module = istack[1][3]\n self.setParameter(\"failure\", {\"module\": calling_module, \"reason\": reason})\n trace_id = \"%s\" % self.id\n logging.info(\"%s - %s - %s\" % (calling_module, trace_id, reason))\n\n def validate(self):\n \"\"\"Ensure that all required metadata fields have been set.\n\n Raises:\n KeyError:\n - When standard dictionary is missing required fields\n - When standard values are of the wrong type\n - When required values are set to a default.\n ValueError:\n - When number of points in header does not match data length.\n \"\"\"\n # here's something we thought obspy would do...\n # verify that npts matches length of data\n if self.stats.npts != len(self.data):\n raise ValueError(\n \"Number of points in header does not match the number of \"\n \"points in the data.\"\n )\n\n if \"remove_response\" not in self.getProvenanceKeys():\n self.stats.standard.units = \"raw counts\"\n else:\n self.stats.standard.units = REVERSE_UNITS[\n self.getProvenance(\"remove_response\")[0][\"output_units\"]\n ]\n\n # are all of the defined standard keys in the standard dictionary?\n req_keys = set(STANDARD_KEYS.keys())\n std_keys = set(list(self.stats.standard.keys()))\n if not req_keys <= std_keys:\n missing = str(req_keys - std_keys)\n raise KeyError(\n 'Missing standard values in StationTrace header: \"%s\"' % missing\n )\n type_errors = []\n required_errors = []\n for key in req_keys:\n keydict = STANDARD_KEYS[key]\n value = self.stats.standard[key]\n required = keydict[\"required\"]\n vtype = keydict[\"type\"]\n default = keydict[\"default\"]\n if not isinstance(value, vtype):\n type_errors.append(key)\n if required:\n if isinstance(default, list):\n if value not in default:\n required_errors.append(key)\n if value == default:\n required_errors.append(key)\n\n type_error_msg = \"\"\n if len(type_errors):\n fmt = 'The following standard keys have the wrong type: \"%s\"'\n tpl = \",\".join(type_errors)\n type_error_msg = fmt % tpl\n\n required_error_msg = \"\"\n if len(required_errors):\n fmt = 'The following standard keys are required: \"%s\"'\n tpl = \",\".join(required_errors)\n required_error_msg = fmt % tpl\n\n error_msg = type_error_msg + \"\\n\" + required_error_msg\n if len(error_msg.strip()):\n raise KeyError(error_msg)\n\n def getProvenanceKeys(self):\n \"\"\"Get a list of all available provenance keys.\n\n Returns:\n list: List of available provenance keys.\n \"\"\"\n if not len(self.provenance):\n return []\n pkeys = []\n for provdict in self.provenance:\n pkeys.append(provdict[\"prov_id\"])\n return pkeys\n\n def getProvenance(self, prov_id):\n \"\"\"Get seis-prov compatible attributes whose id matches prov_id.\n\n See http://seismicdata.github.io/SEIS-PROV/_generated_details.html\n\n Args:\n prov_id (str):\n Provenance ID (see URL above).\n\n Returns:\n list: Sequence of prov_attribute dictionaries (see URL above).\n \"\"\"\n matching_prov = []\n if not len(self.provenance):\n return matching_prov\n for provdict in self.provenance:\n if provdict[\"prov_id\"] == prov_id:\n matching_prov.append(provdict[\"prov_attributes\"])\n return matching_prov\n\n def setProvenance(self, prov_id, prov_attributes):\n \"\"\"Update a trace's provenance information.\n\n Args:\n trace (obspy.core.trace.Trace):\n Trace of strong motion dataself.\n prov_id (str):\n Activity prov:id (see URL above).\n prov_attributes (dict or list):\n Activity attributes for the given key.\n \"\"\"\n provdict = {\"prov_id\": prov_id, \"prov_attributes\": prov_attributes}\n self.provenance.append(provdict)\n self.validate()\n\n def getAllProvenance(self):\n \"\"\"Get internal list of processing history.\n\n Returns:\n list:\n Sequence of dictionaries containing fields:\n - prov_id Activity prov:id (see URL above).\n - prov_attributes Activity attributes for the given key.\n \"\"\"\n return self.provenance\n\n def getProvenanceDocument(self, base_prov=None):\n \"\"\"Generate a provenance document.\n\n Args:\n base_prov:\n Base provenance document.\n\n Returns:\n Provenance document.\n \"\"\"\n if base_prov is None:\n pr = prov.model.ProvDocument()\n pr.add_namespace(*NS_SEIS)\n pr = _get_person_agent(pr)\n pr = _get_software_agent(pr)\n pr = _get_waveform_entity(self, pr)\n else:\n pr = _get_waveform_entity(self, copy.deepcopy(base_prov))\n sequence = 1\n for provdict in self.getAllProvenance():\n provid = provdict[\"prov_id\"]\n prov_attributes = provdict[\"prov_attributes\"]\n if provid not in ACTIVITIES:\n fmt = \"Unknown or invalid processing parameter %s\"\n logging.debug(fmt % provid)\n continue\n pr = _get_activity(pr, provid, prov_attributes, sequence)\n sequence += 1\n return pr\n\n def setProvenanceDocument(self, provdoc):\n software = {}\n person = {}\n for record in provdoc.get_records():\n ident = record.identifier.localpart\n parts = ident.split(\"_\")\n sptype = parts[1]\n # hashid = '_'.join(parts[2:])\n # sp, sptype, hashid = ident.split('_')\n if sptype == \"sa\":\n for attr_key, attr_val in record.attributes:\n key = attr_key.localpart\n if isinstance(attr_val, prov.identifier.Identifier):\n attr_val = attr_val.uri\n software[key] = attr_val\n elif sptype == \"pp\":\n for attr_key, attr_val in record.attributes:\n key = attr_key.localpart\n if isinstance(attr_val, prov.identifier.Identifier):\n attr_val = attr_val.uri\n person[key] = attr_val\n elif sptype == \"wf\": # waveform tag\n continue\n else: # these are processing steps\n params = {}\n sptype = \"\"\n for attr_key, attr_val in record.attributes:\n key = attr_key.localpart\n if key == \"label\":\n continue\n elif key == \"type\":\n _, sptype = attr_val.split(\":\")\n continue\n if isinstance(attr_val, datetime):\n attr_val = UTCDateTime(attr_val)\n params[key] = attr_val\n self.setProvenance(sptype, params)\n self.setParameter(\"software\", software)\n self.setParameter(\"user\", person)\n\n def hasParameter(self, param_id):\n \"\"\"Check to see if Trace contains a given parameter.\n\n Args:\n param_id (str): Name of parameter to check.\n\n Returns:\n bool: True if parameter is set, False if not.\n \"\"\"\n return param_id in self.parameters\n\n def setParameter(self, param_id, param_attributes):\n \"\"\"Add to the StationTrace's set of arbitrary metadata.\n\n Args:\n param_id (str):\n Key for parameters dictionary.\n param_attributes (dict or list):\n Parameters for the given key.\n \"\"\"\n self.parameters[param_id] = param_attributes\n\n def setCached(self, name, array_dict):\n \"\"\"Store a dictionary of arrays in StationTrace.\n\n Args:\n name (str):\n Name of data dictionary to be stored.\n array_dict (dict):\n Dictionary with:\n - key array name\n - value as numpy array\n \"\"\"\n self.cached[name] = array_dict\n\n def getCached(self, name):\n \"\"\"Retrieve a dictionary of arrays.\n\n Args:\n name (str):\n Name of dictionary to retrieve.\n Returns:\n dict: Dictionary of arrays (see setSpectrum).\n \"\"\"\n if name not in self.cached:\n raise KeyError(\"%s not in set of spectra arrays.\" % name)\n return self.cached[name]\n\n def hasCached(self, name):\n \"\"\"Check if StationTrace has cached attribute.\"\"\"\n if name not in self.cached:\n return False\n return True\n\n def getCachedNames(self):\n \"\"\"Return list of arrays that have been cached.\n\n Returns:\n list: List of cached arrays in this StationTrace.\n \"\"\"\n return list(self.cached.keys())\n\n def getParameterKeys(self):\n \"\"\"Get a list of all available parameter keys.\n\n Returns:\n list: List of available parameter keys.\n \"\"\"\n return list(self.parameters.keys())\n\n def getParameter(self, param_id):\n \"\"\"Retrieve some arbitrary metadata.\n\n Args:\n param_id (str):\n Key for parameters dictionary.\n\n Returns:\n dict or list:\n Parameters for the given key.\n \"\"\"\n if param_id not in self.parameters:\n raise KeyError(\"Parameter %s not found in StationTrace\" % param_id)\n return self.parameters[param_id]\n\n def getProvDataFrame(self):\n columns = [\"Process Step\", \"Process Attribute\", \"Process Value\"]\n df = pd.DataFrame(columns=columns)\n values = []\n attributes = []\n steps = []\n indices = []\n index = 0\n for activity in self.getAllProvenance():\n provid = activity[\"prov_id\"]\n provstep = ACTIVITIES[provid][\"label\"]\n prov_attrs = activity[\"prov_attributes\"]\n steps += [provstep] * len(prov_attrs)\n indices += [index] * len(prov_attrs)\n for key, value in prov_attrs.items():\n attributes.append(key)\n if isinstance(value, UTCDateTime):\n value = value.datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n values.append(str(value))\n index += 1\n\n mdict = {\n \"Index\": indices,\n \"Process Step\": steps,\n \"Process Attribute\": attributes,\n \"Process Value\": values,\n }\n df = pd.DataFrame(mdict)\n return df\n\n def getProvSeries(self):\n \"\"\"Return a pandas Series containing the processing history for the\n trace.\n\n BO.NGNH31.HN2 Remove Response input_units counts\n - output_units cm/s^2\n - Taper side both\n - window_type Hann\n - taper_width 0.05\n\n Returns:\n Series:\n Pandas Series (see above).\n \"\"\"\n tpl = (self.stats.network, self.stats.station, self.stats.channel)\n recstr = \"%s.%s.%s\" % tpl\n values = []\n attributes = []\n steps = []\n for activity in self.getAllProvenance():\n provid = activity[\"prov_id\"]\n provstep = ACTIVITIES[provid][\"label\"]\n prov_attrs = activity[\"prov_attributes\"]\n steps += [provstep] * len(prov_attrs)\n for key, value in prov_attrs.items():\n attributes.append(key)\n values.append(str(value))\n records = [recstr] * len(attributes)\n index = [records, steps, attributes]\n row = pd.Series(values, index=index)\n return row\n\n def __str__(self, id_length=None, indent=0):\n \"\"\"\n Extends Trace __str__.\n \"\"\"\n # set fixed id width\n\n if id_length:\n out = \"%%-%ds\" % (id_length)\n trace_id = out % self.id\n else:\n trace_id = \"%s\" % self.id\n out = \"\"\n # output depending on delta or sampling rate bigger than one\n if self.stats.sampling_rate < 0.1:\n if hasattr(self.stats, \"preview\") and self.stats.preview:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \"\n + \"%(delta).1f s, %(npts)d samples [preview]\"\n )\n else:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \" + \"%(delta).1f s, %(npts)d samples\"\n )\n else:\n if hasattr(self.stats, \"preview\") and self.stats.preview:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \"\n + \"%(sampling_rate).1f Hz, %(npts)d samples [preview]\"\n )\n else:\n out = (\n out + \" | \"\n \"%(starttime)s - %(endtime)s | \"\n + \"%(sampling_rate).1f Hz, %(npts)d samples\"\n )\n # check for masked array\n if np.ma.count_masked(self.data):\n out += \" (masked)\"\n if self.hasParameter(\"failure\"):\n out += \" (failed)\"\n else:\n out += \" (passed)\"\n ind_str = \" \" * indent\n return ind_str + trace_id + out % (self.stats)\n\n\ndef _stats_from_inventory(data, inventory, seed_id, start_time):\n if len(inventory.source):\n if inventory.sender is not None and inventory.sender != inventory.source:\n source = \"%s,%s\" % (inventory.source, inventory.sender)\n else:\n source = inventory.source\n\n network_code, station_code, location_code, channel_code = seed_id.split(\".\")\n\n selected_inventory = inventory.select(\n network=network_code,\n station=station_code,\n location=location_code,\n channel=channel_code,\n time=start_time,\n )\n\n station = selected_inventory.networks[0].stations[0]\n channel = station.channels[0]\n\n coords = {\n \"latitude\": channel.latitude,\n \"longitude\": channel.longitude,\n \"elevation\": channel.elevation,\n }\n\n standard = {}\n\n # things we'll never get from an inventory object\n standard[\"corner_frequency\"] = np.nan\n standard[\"instrument_damping\"] = np.nan\n standard[\"instrument_period\"] = np.nan\n standard[\"structure_type\"] = \"\"\n standard[\"process_time\"] = \"\"\n\n if data.dtype in INT_TYPES:\n standard[\"process_level\"] = \"raw counts\"\n else:\n standard[\"process_level\"] = \"uncorrected physical units\"\n\n standard[\"source\"] = source\n standard[\"source_file\"] = \"\"\n standard[\"instrument\"] = \"\"\n standard[\"sensor_serial_number\"] = \"\"\n if channel.sensor is not None:\n standard[\"instrument\"] = \"%s %s %s %s\" % (\n channel.sensor.type,\n channel.sensor.manufacturer,\n channel.sensor.model,\n channel.sensor.description,\n )\n if channel.sensor.serial_number is not None:\n standard[\"sensor_serial_number\"] = channel.sensor.serial_number\n else:\n standard[\"sensor_serial_number\"] = \"\"\n\n if channel.azimuth is not None:\n standard[\"horizontal_orientation\"] = channel.azimuth\n else:\n standard[\"horizontal_orientation\"] = np.nan\n\n if channel.dip is not None:\n # Note: vertical orientatin is defined here as angle from horizontal\n standard[\"vertical_orientation\"] = channel.dip\n else:\n standard[\"vertical_orientation\"] = np.nan\n\n standard[\"units_type\"] = get_units_type(channel_code)\n\n if len(channel.comments):\n comments = \" \".join(\n channel.comments[i].value for i in range(len(channel.comments))\n )\n standard[\"comments\"] = comments\n else:\n standard[\"comments\"] = \"\"\n standard[\"station_name\"] = \"\"\n if station.site.name != \"None\":\n standard[\"station_name\"] = station.site.name\n # extract the remaining standard info and format_specific info\n # from a JSON string in the station description.\n\n format_specific = {}\n if station.description is not None and station.description != \"None\":\n jsonstr = station.description\n try:\n big_dict = json.loads(jsonstr)\n standard.update(big_dict[\"standard\"])\n format_specific = big_dict[\"format_specific\"]\n except json.decoder.JSONDecodeError:\n format_specific[\"description\"] = jsonstr\n\n if \"source_format\" not in standard or standard[\"source_format\"] is None:\n standard[\"source_format\"] = \"fdsn\"\n\n standard[\"instrument_sensitivity\"] = np.nan\n response = None\n if channel.response is not None:\n response = channel.response\n if hasattr(response, \"instrument_sensitivity\"):\n units = response.instrument_sensitivity.input_units\n if \"/\" in units:\n num, denom = units.split(\"/\")\n if num.lower() not in LENGTH_CONVERSIONS:\n raise KeyError(\n \"Sensitivity input units of %s are not supported.\" % units\n )\n conversion = LENGTH_CONVERSIONS[num.lower()]\n sensitivity = response.instrument_sensitivity.value * conversion\n response.instrument_sensitivity.value = sensitivity\n standard[\"instrument_sensitivity\"] = sensitivity\n else:\n standard[\n \"instrument_sensitivity\"\n ] = response.instrument_sensitivity.value\n\n return (response, standard, coords, format_specific)\n\n\ndef _stats_from_header(header, config):\n if \"_format\" in header and header._format.lower() == \"sac\":\n # The plan is to add separate if blocks to support the different\n # formats as we encounter them here. See the SAC header documentation\n # here:\n # http://ds.iris.edu/files/sac-manual/manual/file_format.html\n\n # Todo: add support for SAC with PZ file.\n\n coords = {\n \"latitude\": header[\"sac\"][\"stla\"],\n \"longitude\": header[\"sac\"][\"stlo\"],\n \"elevation\": header[\"sac\"][\"stel\"],\n }\n standard = {}\n standard[\"corner_frequency\"] = np.nan\n standard[\"instrument_damping\"] = np.nan\n standard[\"instrument_period\"] = np.nan\n standard[\"structure_type\"] = \"\"\n standard[\"process_time\"] = \"\"\n standard[\"process_level\"] = \"uncorrected physical units\"\n standard[\"source\"] = config[\"read\"][\"sac_source\"]\n standard[\"source_file\"] = \"\"\n standard[\"instrument\"] = \"\"\n standard[\"sensor_serial_number\"] = \"\"\n standard[\"instrument\"] = \"\"\n standard[\"sensor_serial_number\"] = \"\"\n standard[\"horizontal_orientation\"] = float(header[\"sac\"][\"cmpaz\"])\n # Note: vertical orientatin is defined here as angle from horizontal\n standard[\"vertical_orientation\"] = 90.0 - float(header[\"sac\"][\"cmpinc\"])\n utype = get_units_type(header[\"channel\"])\n standard[\"units_type\"] = utype\n standard[\"units\"] = UNITS[utype]\n standard[\"comments\"] = \"\"\n standard[\"station_name\"] = \"\"\n standard[\"station_name\"] = header[\"station\"]\n format_specific = {\n \"conversion_factor\": float(config[\"read\"][\"sac_conversion_factor\"])\n }\n standard[\"source_format\"] = header._format\n standard[\"instrument_sensitivity\"] = np.nan\n response = None\n else:\n raise Exception(\"Format unsuppored without StationXML file.\")\n\n return (response, standard, coords, format_specific)\n\n\ndef _get_software_agent(pr, gmprocess_version):\n \"\"\"Get the seis-prov entity for the gmprocess software.\n\n Args:\n pr (prov.model.ProvDocument):\n Existing ProvDocument.\n gmprocess_version (str):\n gmprocess version.\n\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with gmprocess software name/version.\n \"\"\"\n software = \"gmprocess\"\n hashstr = \"0000001\"\n agent_id = \"seis_prov:sp001_sa_%s\" % hashstr\n giturl = \"https://github.com/usgs/groundmotion-processing\"\n pr.agent(\n agent_id,\n other_attributes=(\n (\n (\"prov:label\", software),\n (\n \"prov:type\",\n prov.identifier.QualifiedName(prov.constants.PROV, \"SoftwareAgent\"),\n ),\n (\"seis_prov:software_name\", software),\n (\"seis_prov:software_version\", gmprocess_version),\n (\n \"seis_prov:website\",\n prov.model.Literal(giturl, prov.constants.XSD_ANYURI),\n ),\n )\n ),\n )\n return pr\n\n\ndef _get_person_agent(pr, config=None):\n \"\"\"Get the seis-prov entity for the user software.\n\n Args:\n pr (prov.model.ProvDocument):\n Existing ProvDocument.\n config (dict):\n Configuration options.\n\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with gmprocess software name/version.\n \"\"\"\n username = getpass.getuser()\n if config is None:\n config = get_config()\n fullname = \"\"\n email = \"\"\n if \"user\" in config:\n if \"name\" in config[\"user\"]:\n fullname = config[\"user\"][\"name\"]\n if \"email\" in config[\"user\"]:\n email = config[\"user\"][\"email\"]\n hashstr = \"0000001\"\n person_id = \"seis_prov:sp001_pp_%s\" % hashstr\n pr.agent(\n person_id,\n other_attributes=(\n (\n (\"prov:label\", username),\n (\n \"prov:type\",\n prov.identifier.QualifiedName(prov.constants.PROV, \"Person\"),\n ),\n (\"seis_prov:name\", fullname),\n (\"seis_prov:email\", email),\n )\n ),\n )\n return pr\n\n\ndef _get_waveform_entity(trace, pr):\n \"\"\"Get the seis-prov entity for an input Trace.\n\n Args:\n trace (Trace):\n Input Obspy Trace object.\n pr (Prov):\n prov.model.ProvDocument\n\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with waveform entity information.\n \"\"\"\n tpl = (\n trace.stats.network.lower(),\n trace.stats.station.lower(),\n trace.stats.channel.lower(),\n )\n waveform_hash = \"%s_%s_%s\" % tpl\n waveform_id = \"seis_prov:sp001_wf_%s\" % waveform_hash\n pr.entity(\n waveform_id,\n other_attributes=(\n (\n (\"prov:label\", \"Waveform Trace\"),\n (\"prov:type\", \"seis_prov:waveform_trace\"),\n )\n ),\n )\n return pr\n\n\ndef _get_activity(pr, activity, attributes, sequence):\n \"\"\"Get the seis-prov entity for an input processing \"activity\".\n\n See\n http://seismicdata.github.io/SEIS-PROV/_generated_details.html#activities\n\n for details on the types of activities that are possible to capture.\n\n\n Args:\n pr (prov.model.ProvDocument):\n Existing ProvDocument.\n activity (str):\n The prov:id for the input activity.\n attributes (dict):\n The attributes associated with the activity.\n sequence (int):\n Integer used to identify the order in which the activities were\n performed.\n Returns:\n prov.model.ProvDocument:\n Provenance document updated with input activity.\n \"\"\"\n activity_dict = ACTIVITIES[activity]\n hashid = \"%07i\" % sequence\n code = activity_dict[\"code\"]\n label = activity_dict[\"label\"]\n activity_id = \"sp%03i_%s_%s\" % (sequence, code, hashid)\n pr_attributes = [(\"prov:label\", label), (\"prov:type\", \"seis_prov:%s\" % activity)]\n for key, value in attributes.items():\n if isinstance(value, float):\n value = prov.model.Literal(value, prov.constants.XSD_DOUBLE)\n elif isinstance(value, int):\n value = prov.model.Literal(value, prov.constants.XSD_INT)\n elif isinstance(value, UTCDateTime):\n value = prov.model.Literal(\n value.strftime(TIMEFMT), prov.constants.XSD_DATETIME\n )\n\n att_tuple = (\"seis_prov:%s\" % key, value)\n pr_attributes.append(att_tuple)\n pr.activity(\"seis_prov:%s\" % activity_id, other_attributes=pr_attributes)\n return pr\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport logging\nimport json\nfrom datetime import datetime\n\nimport pandas as pd\nimport pytz\n\nfrom gmprocess.utils.event import get_event_object, ScalarEvent\n\n\ndef get_events(eventids, textfile, eventinfo, directory, outdir=None):\n \"\"\"Find the list of events.\n\n Args:\n eventids (list or None):\n List of ComCat event IDs.\n textfile (str or None):\n Path to text file containing event IDs or info.\n eventinfo (list or None):\n List containing:\n - id Any string, no spaces.\n - time Any ISO-compatible date/time string.\n - latitude Latitude in decimal degrees.\n - longitude Longitude in decimal degrees.\n - depth Depth in kilometers.\n - magnitude Earthquake magnitude.\n - magnitude_type Earthquake magnitude type.\n directory (str):\n Path to a directory containing event subdirectories, each\n containing an event.json file, where the ID in the json file\n matches the subdirectory containing it.\n outdir (str):\n Output directory.\n\n Returns:\n list: ScalarEvent objects.\n\n \"\"\"\n events = []\n if eventids is not None:\n # Get list of events from directory if it has been provided\n tevents = []\n if directory is not None:\n tevents = events_from_directory(directory)\n elif outdir is not None:\n tevents = events_from_directory(outdir)\n for eventid in eventids:\n if len(tevents) and eventid in tevents:\n event = [e for e in tevents if e.id == eventid][0]\n else:\n # This connects to comcat to get event, does not check for a\n # local json file\n event = get_event_object(eventid)\n events.append(event)\n elif textfile is not None:\n events = parse_event_file(textfile)\n elif eventinfo is not None:\n eid = eventinfo[0]\n time = eventinfo[1]\n lat = float(eventinfo[2])\n lon = float(eventinfo[3])\n dep = float(eventinfo[4])\n mag = float(eventinfo[5])\n mag_type = str(eventinfo[6])\n event = ScalarEvent()\n event.fromParams(eid, time, lat, lon, dep, mag, mag_type)\n events = [event]\n elif directory is not None:\n events = events_from_directory(directory)\n elif outdir is not None:\n events = events_from_directory(outdir)\n return events\n\n\ndef events_from_directory(dir):\n events = []\n eventfiles = get_event_files(dir)\n if len(eventfiles):\n events = read_event_json_files(eventfiles)\n else:\n eventids = [f for f in os.listdir(dir) if not f.startswith(\".\")]\n for eventid in eventids:\n try:\n event = get_event_object(eventid)\n events.append(event)\n\n # If the event ID has been updated, make sure to rename\n # the source folder and issue a warning to the user\n if event.id != eventid:\n old_dir = os.path.join(dir, eventid)\n new_dir = os.path.join(dir, event.id)\n os.rename(old_dir, new_dir)\n logging.warn(\n \"Directory %s has been renamed to %s.\" % (old_dir, new_dir)\n )\n except BaseException:\n logging.warning(\"Could not get info for event id: %s\" % eventid)\n\n return events\n\n\ndef get_event_files(directory):\n \"\"\"Get list of event.json files found underneath a data directory.\n\n Args:\n directory (str):\n Path to directory containing input raw data, where\n subdirectories must be event directories containing\n event.json files, where the id in that file matches\n the directory under which it is found.\n Returns:\n List of event.json files.\n \"\"\"\n eventfiles = []\n for root, dirs, files in os.walk(directory):\n for name in files:\n if name == \"event.json\":\n fullname = os.path.join(root, name)\n eventfiles.append(fullname)\n return eventfiles\n\n\ndef parse_event_file(eventfile):\n \"\"\"Parse text file containing basic event information.\n\n Files can contain:\n - one column, in which case that column\n contains ComCat event IDs.\n - Seven columns, in which case those columns should be:\n - id: any string (no spaces)\n - time: Any ISO standard for date/time.\n - lat: Earthquake latitude in decimal degrees.\n - lon: Earthquake longitude in decimal degrees.\n - depth: Earthquake longitude in kilometers.\n - magnitude: Earthquake magnitude.\n - magnitude_type: Earthquake magnitude type.\n\n NB: THERE SHOULD NOT BE ANY HEADERS ON THIS FILE!\n\n Args:\n eventfile (str):\n Path to event text file\n\n Returns:\n list: ScalarEvent objects constructed from list of event information.\n\n \"\"\"\n df = pd.read_csv(eventfile, sep=\",\", header=None)\n nrows, ncols = df.shape\n events = []\n if ncols == 1:\n df.columns = [\"eventid\"]\n for idx, row in df.iterrows():\n event = get_event_object(row[\"eventid\"])\n events.append(event)\n elif ncols == 7:\n df.columns = [\n \"id\",\n \"time\",\n \"lat\",\n \"lon\",\n \"depth\",\n \"magnitude\",\n \"magnitude_type\",\n ]\n df[\"time\"] = pd.to_datetime(df[\"time\"])\n for idx, row in df.iterrows():\n rowdict = row.to_dict()\n event = get_event_object(rowdict)\n events.append(event)\n else:\n return None\n return events\n\n\ndef read_event_json_files(eventfiles):\n \"\"\"Read event.json file and return ScalarEvent object.\n\n Args:\n eventfiles (list):\n Event.json files to be read.\n Returns:\n list: ScalarEvent objects.\n\n \"\"\"\n events = []\n for eventfile in eventfiles:\n with open(eventfile, \"rt\", encoding=\"utf-8\") as f:\n event = json.load(f)\n try:\n origintime = datetime.fromtimestamp(\n event[\"properties\"][\"time\"] / 1000.0, pytz.utc\n )\n evdict = {\n \"id\": event[\"id\"],\n \"time\": origintime.strftime(\"%Y-%m-%dT%H:%M:%S.%f\"),\n \"lat\": event[\"geometry\"][\"coordinates\"][1],\n \"lon\": event[\"geometry\"][\"coordinates\"][0],\n \"depth\": event[\"geometry\"][\"coordinates\"][2],\n \"magnitude\": event[\"properties\"][\"mag\"],\n \"magnitude_type\": event[\"properties\"][\"magType\"],\n }\n event = get_event_object(evdict)\n\n except BaseException:\n event = get_event_object(event)\n\n events.append(event)\n return events\n",
"# stdlib imports\nfrom datetime import datetime, timedelta\nfrom urllib.parse import urlparse, urljoin\nimport shutil\nimport tempfile\nimport os.path\nimport logging\n\n# third party imports\nimport logging\nimport pytz\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nfrom openquake.hazardlib.geo.geodetic import geodetic_distance\nfrom obspy.core.utcdatetime import UTCDateTime\nimport pandas as pd\n\n# local imports\nfrom gmprocess.io.fetcher import DataFetcher, _get_first_value\nfrom gmprocess.io.nsmn.core import read_nsmn\nfrom gmprocess.core.streamcollection import StreamCollection\nfrom gmprocess.utils.config import get_config\n\n\nSEARCH_URL = \"http://kyhdata.deprem.gov.tr/2K/kyhdata_v4.php?dst=TU9EVUxFX05BTUU9ZWFydGhxdWFrZSZNT0RVTEVfVEFTSz1zZWFyY2g%3D\"\n\nEQ_FORM_DATA = {\n \"from_day\": \"\",\n \"from_month\": \"\",\n \"from_year\": \"\",\n \"from_md\": \"\",\n \"to_md\": \"\",\n \"to_day\": \"\",\n \"to_month\": \"\",\n \"to_year\": \"\",\n \"from_ml\": \"\",\n \"to_ml\": \"\",\n \"from_epi_lat\": \"34.00\",\n \"to_epi_lat\": \"43.00\",\n \"from_ms\": \"\",\n \"to_ms\": \"\",\n \"from_epi_lon\": \"24.0\",\n \"to_epi_lon\": \"45.82\",\n \"from_mw\": \"\",\n \"to_mw\": \"\",\n \"from_depth\": \"\",\n \"to_depth\": \"\",\n \"from_mb\": \"\",\n \"to_mb\": \"\",\n}\n\n# 2019/03/13-13:48:00.00\nTIMEFMT = \"%Y-%m-%dT%H:%M:%S\"\n\n# default values for this fetcher\n# if None specified in constructor, AND no parameters specified in\n# config, then use these.\nRADIUS = 100 # kilometers\nDT = 16 # seconds\nDDEPTH = 30 # km\nDMAG = 0.3\n\n\nclass TurkeyFetcher(DataFetcher):\n def __init__(\n self,\n time,\n lat,\n lon,\n depth,\n magnitude,\n user=None,\n password=None,\n radius=100,\n dt=16,\n ddepth=30,\n dmag=0.3,\n rawdir=None,\n config=None,\n drop_non_free=True,\n ):\n \"\"\"Create a TurkeyFetcher instance.\n\n Download Turkish strong motion data from the Turkish NSMN site:\n http://kyhdata.deprem.gov.tr/2K/kyhdata_v4.php\n\n Args:\n time (datetime):\n Origin time.\n lat (float):\n Origin latitude.\n lon (float):\n Origin longitude.\n depth (float):\n Origin depth.\n magnitude (float):\n Origin magnitude.\n radius (float):\n Search radius (km).\n dt (float):\n Search time window (sec).\n ddepth (float):\n Search depth window (km).\n dmag (float):\n Search magnitude window (magnitude units).\n rawdir (str):\n Path to location where raw data will be stored. If not\n specified, raw data will be deleted.\n config (dict):\n Dictionary containing configuration.\n If None, retrieve global config.\n drop_non_free (bool):\n Option to ignore non-free-field (borehole, sensors on\n structures, etc.)\n \"\"\"\n # what values do we use for search thresholds?\n # In order of priority:\n # 1) Not-None values passed in constructor\n # 2) Configured values\n # 3) DEFAULT values at top of the module\n if config is None:\n config = get_config()\n cfg_radius = None\n cfg_dt = None\n cfg_ddepth = None\n cfg_dmag = None\n\n if \"fetchers\" in config:\n if \"TurkeyFetcher\" in config[\"fetchers\"]:\n fetch_cfg = config[\"fetchers\"][\"TurkeyFetcher\"]\n if \"radius\" in fetch_cfg:\n cfg_radius = float(fetch_cfg[\"radius\"])\n if \"dt\" in fetch_cfg:\n cfg_dt = float(fetch_cfg[\"dt\"])\n if \"ddepth\" in fetch_cfg:\n cfg_ddepth = float(fetch_cfg[\"ddepth\"])\n if \"dmag\" in fetch_cfg:\n cfg_dmag = float(fetch_cfg[\"dmag\"])\n\n radius = _get_first_value(radius, cfg_radius, RADIUS)\n dt = _get_first_value(dt, cfg_dt, DT)\n ddepth = _get_first_value(ddepth, cfg_ddepth, DDEPTH)\n dmag = _get_first_value(dmag, cfg_dmag, DMAG)\n\n tz = pytz.UTC\n if isinstance(time, UTCDateTime):\n time = time.datetime\n self.time = tz.localize(time)\n self.lat = lat\n self.lon = lon\n self.radius = radius\n self.dt = dt\n self.rawdir = rawdir\n self.depth = depth\n self.magnitude = magnitude\n self.ddepth = ddepth\n self.dmag = dmag\n xmin = 25.664\n xmax = 46.67\n ymin = 34.132\n ymax = 43.555\n # this announces to the world the valid bounds for this fetcher.\n self.BOUNDS = [xmin, xmax, ymin, ymax]\n self.drop_non_free = drop_non_free\n\n def getMatchingEvents(self, solve=True):\n \"\"\"Return a list of dictionaries matching input parameters.\n\n Args:\n solve (bool):\n If set to True, then this method\n should return a list with a maximum of one event.\n\n Returns:\n list: List of event dictionaries, with fields:\n - time Event time (UTC)\n - lat Event latitude\n - lon Event longitude\n - depth Event depth\n - mag Event magnitude\n \"\"\"\n df = get_turkey_dataframe(self.time, 1)\n if df is None:\n return []\n lats = df[\"latitude\"].to_numpy()\n lons = df[\"longitude\"].to_numpy()\n etime = pd.Timestamp(self.time)\n dtimes = np.abs(df[\"origintime\"] - etime)\n distances = geodetic_distance(self.lon, self.lat, lons, lats)\n didx = distances <= self.radius\n tidx = (dtimes <= np.timedelta64(int(self.dt), \"s\")).to_numpy()\n newdf = df[didx & tidx]\n events = []\n for idx, row in newdf.iterrows():\n eventdict = {\n \"time\": UTCDateTime(row[\"origintime\"]),\n \"lat\": row[\"latitude\"],\n \"lon\": row[\"longitude\"],\n \"depth\": row[\"depth\"],\n \"url\": row[\"url\"],\n \"mag\": row[\"magnitude\"],\n }\n events.append(eventdict)\n\n if solve and len(events) > 1:\n event = self.solveEvents(events)\n events = [event]\n\n return events\n\n def retrieveData(self, event_dict):\n \"\"\"Retrieve data from NSMN, turn into StreamCollection.\n\n Args:\n event (dict):\n Best dictionary matching input event, fields as above\n in return of getMatchingEvents().\n\n Returns:\n StreamCollection: StreamCollection object.\n \"\"\"\n rawdir = self.rawdir\n if self.rawdir is None:\n rawdir = tempfile.mkdtemp()\n else:\n if not os.path.isdir(rawdir):\n os.makedirs(rawdir)\n\n urlparts = urlparse(SEARCH_URL)\n req = requests.get(event_dict[\"url\"])\n\n logging.debug(\"TurkeyFetcher event url: %s\", str(event_dict[\"url\"]))\n logging.debug(\"TurkeyFetcher event response code: %s\", req.status_code)\n\n data = req.text\n soup = BeautifulSoup(data, features=\"lxml\")\n table = soup.find_all(\"table\", \"tableType_01\")[1]\n datafiles = []\n for row in table.find_all(\"tr\"):\n if \"class\" in row.attrs:\n continue\n col = row.find_all(\"td\", \"coltype01\")[0]\n href = col.contents[0].attrs[\"href\"]\n station_id = col.contents[0].contents[0]\n station_url = urljoin(\"http://\" + urlparts.netloc, href)\n req2 = requests.get(station_url)\n logging.debug(\"TurkeyFetcher station url: %s\", str(station_url))\n logging.debug(\"TurkeyFetcher station response code: %s\", req2.status_code)\n data2 = req2.text\n soup2 = BeautifulSoup(data2, features=\"lxml\")\n center = soup2.find_all(\"center\")[0]\n anchor = center.find_all(\"a\")[0]\n href2 = anchor.attrs[\"href\"]\n data_url = urljoin(\"http://\" + urlparts.netloc, href2)\n req3 = requests.get(data_url)\n logging.debug(\"TurkeyFetcher data url: %s\", str(data_url))\n logging.debug(\"TurkeyFetcher data response code: %s\", req3.status_code)\n data = req3.text\n localfile = os.path.join(rawdir, \"%s.txt\" % station_id)\n logging.info(\"Downloading Turkish data file %s...\" % station_id)\n with open(localfile, \"wt\") as f:\n f.write(data)\n datafiles.append(localfile)\n\n streams = []\n for dfile in datafiles:\n logging.info(\"Reading datafile %s...\" % dfile)\n streams += read_nsmn(dfile)\n\n if self.rawdir is None:\n shutil.rmtree(rawdir)\n\n stream_collection = StreamCollection(\n streams=streams, drop_non_free=self.drop_non_free\n )\n return stream_collection\n\n\ndef get_turkey_dataframe(time, dt):\n \"\"\"Retrieve a dataframe of events from the NSMN site.\n\n Args:\n time (datetime): Earthquake origin time.\n dt (int): Number of days around origin time to search.\n\n Returns:\n DataFrame: Catalog of events with columns:\n - id Turkish Earthquake ID.\n - url URL where station data for this event can be downloaded.\n - origintime Earthquake origin time.\n - latitude Earthquake origin latitude.\n - longitude Earthquake origin longitude.\n - depth Earthquake origin depth.\n - magnitude Largest Turkish magnitude (from list of ML, MD, MS,\n MW, MB)\n or None if no events are found.\n\n \"\"\"\n urlparts = urlparse(SEARCH_URL)\n url = SEARCH_URL\n params = EQ_FORM_DATA.copy()\n start_time = time - timedelta(days=dt)\n end_time = time + timedelta(days=dt)\n params[\"from_year\"] = str(start_time.year)\n params[\"from_month\"] = \"%02i\" % start_time.month\n params[\"from_day\"] = \"%02i\" % start_time.day\n params[\"to_year\"] = str(end_time.year)\n params[\"to_month\"] = \"%02i\" % end_time.month\n params[\"to_day\"] = \"%02i\" % end_time.day\n req = requests.post(url, params)\n logging.debug(\"TurkeyFetcher dataframe url: %s\", str(url))\n logging.debug(\"TurkeyFetcher dataframe response code: %s\", req.status_code)\n if req.status_code != 200:\n return None\n data = req.text\n soup = BeautifulSoup(data, features=\"lxml\")\n all_table = soup.find_all(\"table\", \"tableType_01\")\n if len(all_table):\n table = all_table[0]\n cols = [\n \"id\",\n \"origintime\",\n \"latitude\",\n \"longitude\",\n \"depth\",\n \"magnitude\",\n \"url\",\n ]\n df = pd.DataFrame(columns=cols)\n for row in table.find_all(\"tr\"):\n if \"class\" in row.attrs and row.attrs[\"class\"] == [\"headerRowType_01\"]:\n continue\n cols = row.find_all(\"td\", \"coltype01\")\n href = cols[0].contents[0].attrs[\"href\"]\n event_url = urljoin(\"http://\" + urlparts.netloc, href)\n eid = cols[0].contents[0].contents[0]\n datestr = str(cols[1].contents[0])\n timestr = str(cols[2].contents[0])\n timestr = timestr[0:8]\n lat = float(str(cols[3].contents[0]))\n lon = float(str(cols[4].contents[0]))\n depth = float(str(cols[5].contents[0]))\n mags = []\n for i in range(6, 11):\n if len(cols[i].contents):\n mag = float(str(cols[i].contents[0]))\n mags.append(mag)\n mag = max(mags)\n time = datetime.strptime(datestr + \"T\" + timestr, TIMEFMT)\n time = pd.Timestamp(time).tz_localize(\"UTC\")\n edict = {\n \"id\": eid,\n \"url\": event_url,\n \"origintime\": time,\n \"latitude\": lat,\n \"longitude\": lon,\n \"depth\": depth,\n \"magnitude\": mag,\n }\n df = df.append(edict, ignore_index=True)\n\n # make sure that origintime is actually a time\n df[\"origintime\"] = pd.to_datetime(df[\"origintime\"])\n return df\n else:\n return None\n"
] | [
[
"pandas.read_pickle",
"numpy.testing.assert_allclose"
],
[
"pandas.Series",
"numpy.abs",
"numpy.isnan",
"numpy.ma.count_masked",
"numpy.dtype",
"pandas.DataFrame",
"numpy.array"
],
[
"pandas.read_csv",
"pandas.to_datetime"
],
[
"pandas.Timestamp",
"pandas.to_datetime",
"numpy.abs",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
romybu22/chameleon-smart-sampling | [
"d0f0588ed9d38e9c133482a68e84379c21892080"
] | [
"acr_module/acr/preprocessing/doc2vec_adressa.py"
] | [
"import argparse\nimport pandas as pd\nimport numpy as np\nimport re\nimport nltk\nfrom sklearn.preprocessing import LabelEncoder\n\n\nfrom ..utils import serialize\nfrom .tokenization import tokenize_articles, nan_to_str, convert_tokens_to_int, get_words_freq\n\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom nltk.tokenize import word_tokenize\n\n\ndef create_args_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--input_articles_csv_path', default='',\n help='Input path of the news CSV file.')\n\n parser.add_argument(\n '--output_article_content_embeddings', default='',\n help='')\n return parser\n\ndef load_input_csv(path):\n news_df = pd.read_csv(path, encoding = 'utf-8' \n #,nrows=1000\n )\n #Making sure articles are sorted by there encoded id\n news_df.sort_values('id_encoded', inplace=True)\n return news_df\n\n'''\ndef process_cat_features(dataframe):\n article_id_encoder = LabelEncoder()\n dataframe['id_encoded'] = article_id_encoder.fit_transform(dataframe['id'])\n\n #category_id_encoder = LabelEncoder()\n #dataframe['categoryid_encoded'] = category_id_encoder.fit_transform(dataframe['categoryid'])\n\n #domainid_encoder = LabelEncoder()\n #dataframe['domainid_encoded'] = domainid_encoder.fit_transform(dataframe['domainid'])\n\n\n return article_id_encoder#, category_id_encoder, domainid_encoder\n\n\ndef save_article_cat_encoders(output_path, article_id_encoder, category_id_encoder, domainid_encoder):\n to_serialize = {'article_id': article_id_encoder, \n 'category_id': category_id_encoder, \n 'publisher_id': domainid_encoder}\n serialize(output_path, to_serialize)\n'''\n\ndef tokenize_norwegian_article(text, first_sentences=12, max_words_length=1000):\n #Removing pipes for correct sentence tokenization\n text = text.replace('|', '.')\n words_tokenized = []\n sent_count = 0\n for sentence in nltk.tokenize.sent_tokenize(text, language='norwegian'): \n sent_tokenized = nltk.tokenize.word_tokenize(sentence, language='norwegian')\n if len(sent_tokenized) >= 3 and sent_tokenized[-1] in ['.', '!', '?', ';'] and \\\n sent_tokenized != ['Saken', 'oppdateres', '.']: \n sent_count += 1\n words_tokenized.extend(sent_tokenized) \n if sent_count == first_sentences:\n break\n return words_tokenized[:max_words_length]\n\n\ndef export_article_content_embeddings(content_article_embeddings, output_article_content_embeddings):\n output_path = output_article_content_embeddings\n print('Exporting ACR Label Encoders, Article metadata and embeddings to {}'.format(output_path))\n #to_serialize = (acr_label_encoders, articles_metadata_df, content_article_embeddings)\n to_serialize = content_article_embeddings\n serialize(output_path, to_serialize)\n\n\ndef main():\n parser = create_args_parser()\n args = parser.parse_args()\n\n print('Loading news article CSV: {}'.format(args.input_articles_csv_path))\n news_df = load_input_csv(args.input_articles_csv_path)\n print('N. docs: {}'.format(len(news_df)))\n\n '''\n print('Encoding categorical features')\n article_id_encoder, category_id_encoder, domainid_encoder = process_cat_features(news_df)\n print('Exporting LabelEncoders of categorical features: {}'.format(args.output_label_encoders))\n save_article_cat_encoders(args.output_label_encoders, \n article_id_encoder, \n category_id_encoder, \n domainid_encoder)\n '''\n\n print('Tokenizing articles...')\n tokenized_articles = tokenize_articles(news_df['text_highlights'].values, tokenization_fn=tokenize_norwegian_article)\n\n #print('Computing word frequencies...')\n #words_freq = get_words_freq(tokenized_articles)\n #print('Corpus vocabulary size: {}'.format(len(words_freq)))\n\n print('Processing documents...')\n tagged_data = [TaggedDocument(words=w, tags=[i]) for i, w in enumerate(tokenized_articles)] \n\n\n print('Training doc2vec')\n max_epochs = 30\n vec_size = 250\n alpha = 0.025\n model = Doc2Vec(vector_size=vec_size,\n alpha=alpha, \n min_alpha=alpha, \n window=5,\n negative=5,\n min_count=2, \n max_vocab_size=100000,\n dm = 1,\n dm_mean=1,\n workers=6)\n \n model.build_vocab(tagged_data)\n\n for epoch in range(max_epochs):\n print('iteration {0}'.format(epoch))\n model.train(tagged_data,\n total_examples=model.corpus_count,\n epochs=1) #model.iter)\n # decrease the learning rate\n model.alpha -= 0.0002\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\n\n del tokenized_articles\n\n\n #print('Encoding categorical features')\n #article_id_encoder = process_cat_features(news_df)\n\n print('Concatenating article content embeddings, making sure that they are sorted by the encoded article id')\n article_content_embeddings = np.vstack([model.docvecs[i-1] for i in news_df['id_encoded'].values]) \n embedding_for_padding_article = np.mean(article_content_embeddings, axis=0)\n content_article_embeddings_with_padding = np.vstack([embedding_for_padding_article, article_content_embeddings])\n del article_content_embeddings\n\n #Checking if content articles embedding size correspond to the last article_id\n assert content_article_embeddings_with_padding.shape[0] == news_df['id_encoded'].tail(1).values[0]+1\n\n print('Exporting article content embeddings')\n del news_df\n export_article_content_embeddings(content_article_embeddings_with_padding, args.output_article_content_embeddings)\n\n #Ps: To experiment with these doc2vec embeddings, it is necessary to deserialize \"acr_articles_metadata_embeddings.pickle\", substitute the content_article_embedding and serialize for further usage by NAR module\n #This is made by acr_module/notebooks/ACR_Results_Visualization_Gcom_doc2vec.ipynb\n\nif __name__ == '__main__':\n main()\n\n\n'''\nDATA_DIR=/media/data/projects/personal/doutorado/adressa_news/data_transformed && \\\npython3 -m acr.preprocessing.doc2vec_adressa \\\n --input_articles_csv_path ${DATA_DIR}/articles_tfrecords_v4_first_12_sent.csv \\\n --output_article_content_embeddings ${DATA_DIR}/pickles_v4/article_content_embeddings_v4_doc2vec.pickle\n\n#--input_articles_csv_path ${DATA_DIR}/adressa_articles.csv \\\n#--output_article_content_embeddings ${DATA_DIR}/pickles/article_content_embeddings_doc2vec.pickle \n'''"
] | [
[
"pandas.read_csv",
"numpy.mean",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mabrahamdevops/python_notebooks | [
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330",
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330",
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330",
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330",
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330",
"6d5e7383b60cc7fd476f6e85ab93e239c9c32330"
] | [
"notebooks/__code/radial_profile/event_handler.py",
"notebooks/__code/bragg_edge/get.py",
"notebooks/__code/panoramic_stitching/profile.py",
"notebooks/__code/metadata_overlapping_images/get.py",
"notebooks/__code/hfir_reactor_element_analysis/hfir_reactor_element_analysis.py",
"notebooks/__code/_utilities/folder.py"
] | [
"import numpy as np\nimport pyqtgraph as pg\nfrom qtpy import QtGui\n\nfrom __code._utilities.parent import Parent\nfrom __code.radial_profile.display import Display\n\n\nclass EventHandler(Parent):\n\n def file_index_changed(self):\n file_index = self.parent.ui.slider.value()\n live_image = self.parent.get_selected_image(file_index)\n\n _view = self.parent.ui.image_view.getView()\n _view_box = _view.getViewBox()\n _state = _view_box.getState()\n\n first_update = False\n if self.parent.histogram_level == []:\n first_update = True\n _histo_widget = self.parent.ui.image_view.getHistogramWidget()\n self.parent.histogram_level = _histo_widget.getLevels()\n\n _image = np.transpose(live_image)\n self.parent.ui.image_view.setImage(_image)\n self.parent.live_image = _image\n _view_box.setState(_state)\n\n if not first_update:\n _histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])\n\n def guide_color_changed(self):\n red = self.parent.ui.guide_red_slider.value()\n green = self.parent.ui.guide_green_slider.value()\n blue = self.parent.ui.guide_blue_slider.value()\n alpha = self.parent.ui.guide_alpha_slider.value()\n self.parent.guide_color_slider['red'] = red\n self.parent.guide_color_slider['green'] = green\n self.parent.guide_color_slider['blue'] = blue\n self.parent.guide_color_slider['alpha'] = alpha\n self.circle_center_changed()\n\n self.parent.ui.image_view.removeItem(self.parent.line_view_binning)\n\n o_display = Display(parent=self.parent)\n o_display.grid()\n\n def circle_center_changed(self):\n if self.parent.ui.sector_full_circle.isChecked():\n if self.parent.sector_g:\n self.parent.ui.image_view.removeItem(self.parent.sector_g)\n return\n\n x0 = float(self.parent.ui.circle_x.text())\n y0 = float(self.parent.ui.circle_y.text())\n from_angle = np.float(str(self.parent.ui.sector_from_value.text()))\n to_angle = np.float(str(self.parent.ui.sector_to_value.text()))\n\n self.calculate_corners_angles()\n self.update_angle_label_position()\n\n [y1, x1] = self.calculate_sector_xy_position(angle=from_angle, x0=x0, y0=y0)\n [y2, x2] = self.calculate_sector_xy_position(angle=to_angle, x0=x0, y0=y0)\n\n pos = np.array([[x0, y0], [x1, y1], [x2, y2]])\n adj = np.array([[0, 1], [1, 2], [2, 0]])\n\n symbols = ['+', 'o', 'o']\n\n lines = np.array([(255, 0, 0, 255, 2), (255, 0, 0, 0, 1), (255, 0, 0, 255, 2)],\n dtype=[('red', np.ubyte), ('green', np.ubyte), ('blue', np.ubyte), ('alpha', np.ubyte),\n ('width', float)])\n\n if self.parent.sector_g:\n self.parent.ui.image_view.removeItem(self.parent.sector_g)\n self.parent.sector_g = pg.GraphItem()\n self.parent.ui.image_view.addItem(self.parent.sector_g)\n self.parent.sector_g.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False)\n \n def update_angle_label_position(self):\n x0 = np.int(str(self.parent.ui.circle_x.text()))\n y0 = np.int(str(self.parent.ui.circle_y.text()))\n\n # add angle 0, 90, 180 and 270 labels\n if self.parent.angle_0 is None:\n self.parent.angle_0 = pg.TextItem(text=u'0\\u00b0', anchor=(0, 1))\n self.parent.angle_90 = pg.TextItem(text=u'90\\u00b0', anchor=(0, 1))\n self.parent.angle_180 = pg.TextItem(text=u'180\\u00b0', anchor=(0, 0))\n self.parent.angle_270 = pg.TextItem(text=u'270\\u00b0', anchor=(1, 1))\n\n self.parent.ui.image_view.addItem(self.parent.angle_0)\n self.parent.ui.image_view.addItem(self.parent.angle_90)\n self.parent.ui.image_view.addItem(self.parent.angle_180)\n self.parent.ui.image_view.addItem(self.parent.angle_270)\n\n self.parent.angle_0.setPos(np.int(x0), 0)\n self.parent.angle_90.setPos(self.parent.height, y0)\n self.parent.angle_180.setPos(x0, self.parent.width)\n self.parent.angle_270.setPos(0, y0)\n \n def calculate_sector_xy_position(self, angle=0, x0=0, y0=0):\n x = np.NaN\n y = np.NaN\n\n angle_top_right = self.parent.corners['top_right']\n angle_bottom_right = self.parent.corners['bottom_right']\n angle_bottom_left = self.parent.corners['bottom_left']\n angle_top_left = self.parent.corners['top_left']\n\n # print(\"angle_top_right: {}\".format(angle_top_right))\n # print(\"angle_bottom_right: {}\".format(angle_bottom_right))\n # print(\"angle_bottom_left: {}\".format(angle_bottom_left))\n # print(\"angle_top_left: {}\".format(angle_top_left))\n\n if (angle_top_right <= angle) and \\\n (angle <= angle_bottom_right):\n # right\n\n # get x\n x = self.parent.height\n\n # get y\n _angle = np.abs(90 - angle)\n\n if angle == 90:\n y = 0\n else:\n angle_rad = np.deg2rad(_angle)\n y = np.tan(angle_rad) * (self.parent.height - x0)\n\n if angle <= 90:\n y = y0 - y\n else:\n y = y0 + y\n\n elif angle_bottom_right < angle < angle_bottom_left:\n # bottom\n\n # get y\n y = self.parent.width\n\n # get x\n _angle = np.abs(180 - angle)\n\n if angle == 180:\n x = 0\n else:\n angle_rad = np.deg2rad(_angle)\n x = (y - y0) * np.tan(angle_rad)\n\n if angle <= 180:\n x = x0 + x\n else:\n x = x0 - x\n\n elif angle_bottom_left <= angle <= angle_top_left:\n # left\n\n # get x\n x = 0\n\n # get y\n _angle = np.abs(270 - angle)\n\n if angle == 270:\n y = 0\n else:\n angle_rad = np.deg2rad(_angle)\n y = np.tan(angle_rad) * x0\n\n if angle <= 270:\n y = y0 + y\n else:\n y = y0 - y\n\n else:\n # top\n\n # get y\n y = 0\n\n # get x\n b_right_part = True\n if angle > angle_top_left:\n angle = np.abs(360 - angle)\n b_right_part = False\n\n if angle == 0:\n x = 0\n else:\n angle_rad = np.deg2rad(angle)\n x = y0 * np.tan(angle_rad)\n\n if b_right_part:\n x = x0 + x\n else:\n x = x0 - x\n\n return [y, x]\n \n def calculate_corners_angles(self):\n '''top vertical being angle 0'''\n\n x0 = float(str(self.parent.ui.circle_x.text()))\n y0 = float(str(self.parent.ui.circle_y.text()))\n\n width = self.parent.width\n height = self.parent.height\n # width = self.parent.height\n # height = self.parent.width\n\n theta_tr = np.NaN # angle top right\n theta_br = np.NaN # bottom right\n theta_bl = np.NaN # bottom left\n theta_tl = np.NaN # top left\n\n theta_tr = np.arctan((width - x0) / y0)\n theta_tr_deg = np.rad2deg(theta_tr)\n\n theta_br = np.pi - np.arctan((width - x0) / (height - y0))\n theta_br_deg = np.rad2deg(theta_br)\n\n theta_bl = np.pi + np.arctan(x0 / (height - y0))\n theta_bl_deg = np.rad2deg(theta_bl)\n\n theta_tl = 2 * np.pi - np.arctan(x0 / y0)\n theta_tl_deg = np.rad2deg(theta_tl)\n\n self.parent.corners['top_right'] = theta_tr_deg\n self.parent.corners['bottom_right'] = theta_br_deg\n self.parent.corners['bottom_left'] = theta_bl_deg\n self.parent.corners['top_left'] = theta_tl_deg\n \n def sector_radio_button_changed(self):\n is_full_circle = self.parent.ui.sector_full_circle.isChecked()\n if is_full_circle:\n _status_sector = False\n self.remove_angle_label()\n else:\n _status_sector = True\n self.update_angle_label_position()\n\n self.parent.ui.sector_from_label.setEnabled(_status_sector)\n self.parent.ui.sector_from_value.setEnabled(_status_sector)\n self.parent.ui.sector_from_units.setEnabled(_status_sector)\n self.parent.ui.sector_to_label.setEnabled(_status_sector)\n self.parent.ui.sector_to_value.setEnabled(_status_sector)\n self.parent.ui.sector_to_units.setEnabled(_status_sector)\n self.parent.ui.from_angle_slider.setEnabled(_status_sector)\n self.parent.ui.to_angle_slider.setEnabled(_status_sector)\n self.parent.sector_changed()\n\n def remove_angle_label(self):\n if self.parent.angle_0:\n self.parent.ui.image_view.removeItem(self.parent.angle_0)\n\n if self.parent.angle_90:\n self.parent.ui.image_view.removeItem(self.parent.angle_90)\n\n if self.parent.angle_180:\n self.parent.ui.image_view.removeItem(self.parent.angle_180)\n\n if self.parent.angle_270:\n self.parent.ui.image_view.removeItem(self.parent.angle_270)\n\n self.parent.angle_0 = None\n self.parent.angle_90 = None\n self.parent.angle_180 = None\n self.parent.angle_270 = None\n\n def update_max_radius_item(self):\n is_max_radius_selected = self.parent.ui.max_radius_radioButton.isChecked()\n self.max_radius_handler(is_max_radius_selected=is_max_radius_selected)\n\n def max_radius_handler(self, is_max_radius_selected=None):\n if self.parent.max_radius_item:\n self.parent.ui.image_view.removeItem(self.parent.max_radius_item)\n\n if is_max_radius_selected:\n x0 = float(str(self.parent.ui.circle_x.text()))\n y0 = float(str(self.parent.ui.circle_y.text()))\n max_radius = self.parent.ui.max_radius_slider.value()\n\n _pen = QtGui.QPen()\n _pen.setColor(QtGui.QColor(0, 0, 255))\n _pen.setWidth(0.4)\n\n self.parent.max_radius_item = pg.CircleROI([x0 - max_radius, y0 - max_radius],\n [2*max_radius, 2*max_radius],\n movable=False,\n resizable=False,\n pen=_pen)\n handles = self.parent.max_radius_item.getHandles()\n self.parent.ui.image_view.addItem(self.parent.max_radius_item)\n for _handle in handles:\n self.parent.max_radius_item.removeHandle(_handle)\n\n def retrieve_max_radius_possible(self):\n x0 = float(str(self.parent.ui.circle_x.text()))\n y0 = float(str(self.parent.ui.circle_y.text()))\n width = self.parent.width\n height = self.parent.height\n\n def lenght_is(x=0, y=0):\n return np.sqrt(x**2 + y**2)\n\n # to top left distance\n x = x0\n y = y0\n top_left = lenght_is(x=x, y=y)\n\n # to top right distance\n x = width - x0\n y = y0\n top_right = lenght_is(x=x, y=y)\n\n # to bottom left corner\n x = x0\n y = height - y0\n bottom_left = lenght_is(x=x, y=y)\n\n # to bottom right corner\n x = width - x0\n y = height - y0\n bottom_right = lenght_is(x=x, y=y)\n\n max_distance = np.max([top_left, top_right, bottom_left, bottom_right])\n return max_distance\n\n def update_max_radius_value(self):\n max_radius = self.retrieve_max_radius_possible()\n current_radius_value = self.parent.ui.max_radius_slider.value()\n if current_radius_value > max_radius:\n self.parent.ui.max_radius_slider.setValue(max_radius)\n self.parent.ui.max_radius_slider.setMaximum(max_radius)\n",
"import numpy as np\n\nfrom __code.bragg_edge.bragg_edge_peak_fitting_gui_utility import GuiUtility\nfrom __code.selection_region_utilities import SelectionRegionUtilities\nfrom __code._utilities.get import Get as TopGet\n\n\nclass Get(TopGet):\n\n def __init__(self, parent=None):\n self.parent = parent\n\n self.x_axis_choice_ui = {'selection': {'index' : self.parent.ui.selection_index_radiobutton,\n 'tof' : self.parent.ui.selection_tof_radiobutton,\n 'lambda': self.parent.ui.selection_lambda_radiobutton},\n 'fitting' : {'index' : self.parent.ui.fitting_index_radiobutton,\n 'tof' : self.parent.ui.fitting_tof_radiobutton,\n 'lambda': self.parent.ui.fitting_lambda_radiobutton},\n }\n\n def specified_x_axis(self, xaxis='index'):\n # if self.parent.is_file_imported:\n # \treturn self.parent.fitting_input_dictionary['xaxis'][xaxis]\n # else:\n label = self.parent.xaxis_label[xaxis]\n if xaxis == 'index':\n return self.parent.index_array, label\n elif xaxis == 'tof':\n return self.parent.tof_array_s * 1e6, label\n elif xaxis == 'lambda':\n return self.parent.lambda_array, label\n else:\n raise NotImplementedError\n\n def x_axis_label(self, x_axis_selected='index'):\n x_axis_dict = self.parent.fitting_input_dictionary['xaxis']\n return x_axis_dict[x_axis_selected][1]\n\n def x_axis_checked(self):\n o_gui = GuiUtility(parent=self)\n tab_selected = o_gui.get_tab_selected(tab_ui=self.parent.ui.tabWidget).lower()\n\n list_ui = self.x_axis_choice_ui[tab_selected]\n\n if list_ui['index'].isChecked():\n return 'index'\n elif list_ui['tof'].isChecked():\n return 'tof'\n else:\n return 'lambda'\n\n def x_axis(self):\n o_gui = GuiUtility(parent=self.parent)\n tab_selected = o_gui.get_tab_selected(self.parent.ui.tabWidget).lower()\n\n list_ui = self.x_axis_choice_ui[tab_selected]\n if list_ui['index'].isChecked():\n return self.specified_x_axis(xaxis='index')\n elif list_ui['tof'].isChecked():\n return self.specified_x_axis(xaxis='tof')\n else:\n return self.specified_x_axis(xaxis='lambda')\n\n def all_x_axis(self):\n all_x_axis = {'index' : self.specified_x_axis(xaxis='index'),\n 'tof' : self.specified_x_axis(xaxis='tof'),\n 'lambda': self.specified_x_axis(xaxis='lambda')}\n return all_x_axis\n\n def all_russian_doll_region_full_infos(self):\n if self.parent.is_file_imported:\n dict_regions = self.parent.fitting_input_dictionary['rois']\n else:\n # collect initial selection size (x0, y0, width, height)\n [x0, y0, x1, y1, width, height] = self.selection_roi_dimension()\n # create profile for all the fitting region inside that first box\n o_regions = SelectionRegionUtilities(x0=x0, y0=y0, width=width, height=height)\n dict_regions = o_regions.get_all_russian_doll_regions()\n self.parent.add_profile_to_dict_of_all_regions(dict_regions=dict_regions)\n return dict_regions\n\n def selection_roi_dimension(self):\n roi_id = self.parent.roi_id\n\n x0, y0, x1, y1, width, height = None, None, None, None, None, None\n\n if roi_id:\n region = roi_id.getArraySlice(self.parent.final_image,\n self.parent.ui.image_view.imageItem)\n x0 = region[0][0].start\n x1 = region[0][0].stop\n y0 = region[0][1].start\n y1 = region[0][1].stop\n width = np.int(x1 - x0)\n height = np.int(y1 - y0)\n\n else:\n x0, y0, x1, y1, width, height = self.parent.roi_dimension_from_config_file\n\n return [x0, y0, x1, y1, width, height]\n\n def profile_of_roi(self, x0=None, y0=None, x1=None, y1=None, width=None, height=None):\n profile_value = []\n\n if width:\n x1 = x0 + width\n if height:\n y1 = y0 + height\n\n for _image in self.parent.o_norm.data['sample']['data']:\n _value = np.mean(_image[y0:y1, x0:x1])\n profile_value.append(_value)\n\n return profile_value\n\n def requested_xaxis(self, xaxis_label='index'):\n if xaxis_label == 'index':\n return self.parent.dict_profile_to_fit['xaxis']['index'], self.parent.xaxis_label['index']\n elif xaxis_label == 'tof':\n return self.parent.dict_profile_to_fit['xaxis']['tof'], self.parent.xaxis_label['tof']\n elif xaxis_label == 'lambda':\n return self.parent.dict_profile_to_fit['xaxis']['lambda'], self.parent.xaxis_label['lambda']\n\n def fitting_profile_xaxis(self):\n if self.parent.ui.fitting_tof_radiobutton.isChecked():\n return self.requested_xaxis(xaxis_label='tof')\n elif self.ui.fitting_index_radiobutton.isChecked():\n return self.requested_xaxis(xaxis_label='index')\n else:\n return self.requested_xaxis(xaxis_label='lambda')\n\n def part_of_fitting_selected(self):\n \"\"\"high, low or bragg_peak\"\"\"\n list_pages = [\"Bragg peak selection\", \"high\", \"low\", \"bragg_peak\"]\n list_table_ui = [None,\n self.parent.ui.high_lda_tableWidget,\n self.parent.ui.low_lda_tableWidget,\n self.parent.ui.bragg_edge_tableWidget]\n\n page_index = self.parent.ui.kropff_toolBox.currentIndex()\n\n return {'name_of_page': list_pages[page_index],\n 'table_ui' : list_table_ui[page_index]}\n\n def y_axis_data_of_selected_row(self, row_selected):\n selected_roi = self.parent.fitting_input_dictionary['rois'][row_selected]\n yaxis = selected_roi['profile']\n [left_xaxis_index, right_xaxis_index] = self.parent.bragg_edge_range\n yaxis = yaxis[left_xaxis_index: right_xaxis_index]\n return yaxis\n\n def x_axis_data(self, x_axis_selected='index'):\n xaxis_dict = self.parent.fitting_input_dictionary['xaxis']\n xaxis_index, xaxis_label = xaxis_dict[x_axis_selected]\n [left_xaxis_index, right_xaxis_index] = self.parent.bragg_edge_range\n xaxis = xaxis_index[left_xaxis_index: right_xaxis_index]\n return xaxis\n\n @staticmethod\n def units(name='index'):\n if name == 'index':\n return 'file index'\n elif name == 'tof':\n return u\"\\u03BCs\"\n elif name == 'lambda':\n return u\"\\u212B\"\n else:\n return \"\"\n",
"import numpy as np\n\nfrom __code._utilities.table_handler import TableHandler\nfrom __code.panoramic_stitching.get import Get\nfrom __code.panoramic_stitching.image_handler import HORIZONTAL_MARGIN, VERTICAL_MARGIN\n\nCOLOR_WORKING_ROW = 'red'\nCOLOR_NONE_WORKING_ROW = 'black'\n\n\nclass Profile:\n\n def __init__(self, parent=None):\n self.parent = parent\n\n def horizontal_profile_changed(self):\n if self.parent.ui.enable_horizontal_profile_checkbox.isChecked():\n roi_id = self.parent.horizontal_profile['id']\n horizontal_roi_dimensions = Profile.get_x_y_width_height_of_roi(roi_id=roi_id)\n self.plot_profile(x=horizontal_roi_dimensions['x'],\n y=horizontal_roi_dimensions['y'],\n width=horizontal_roi_dimensions['width'],\n height=horizontal_roi_dimensions['height'],\n profile_type='horizontal')\n\n def vertical_profile_changed(self):\n if self.parent.ui.enable_vertical_profile_checkbox.isChecked():\n roi_id = self.parent.vertical_profile['id']\n vertical_roi_dimensions = Profile.get_x_y_width_height_of_roi(roi_id=roi_id)\n self.plot_profile(x=vertical_roi_dimensions['x'],\n y=vertical_roi_dimensions['y'],\n width=vertical_roi_dimensions['width'],\n height=vertical_roi_dimensions['height'],\n profile_type='vertical')\n\n def plot_profile(self, x=None, y=None, width=None, height=None, profile_type='horizontal'):\n if profile_type == 'horizontal':\n plot_ui = self.parent.horizontal_profile_plot\n dim_to_keep = 0\n else:\n plot_ui = self.parent.vertical_profile_plot\n dim_to_keep = 1\n plot_ui.axes.cla()\n\n o_get = Get(parent=self.parent)\n folder_selected = o_get.get_combobox_folder_selected()\n\n o_table = TableHandler(table_ui=self.parent.ui.tableWidget)\n row_selected = o_table.get_row_selected()\n\n data_dictionary = self.parent.data_dictionary[folder_selected]\n offset_dictionary = self.parent.offset_dictionary[folder_selected]\n\n image_height = self.parent.image_height\n image_width = self.parent.image_width\n\n for _file_index, _file in enumerate(data_dictionary.keys()):\n\n # no need to display profile if image is not visible\n if not offset_dictionary[_file]['visible']:\n continue\n\n if row_selected == _file_index:\n color = COLOR_WORKING_ROW\n else:\n color = COLOR_NONE_WORKING_ROW\n\n left_of_image = offset_dictionary[_file]['xoffset']\n top_of_image = offset_dictionary[_file]['yoffset']\n\n # image is on the right of profile\n if left_of_image > (x + width):\n continue\n\n # image is on the left of profile\n if (left_of_image + image_width) < x:\n continue\n\n # image is above profile\n if (top_of_image + image_height) < y:\n continue\n\n # image is below profile\n if top_of_image > (y + height):\n continue\n\n # find part of profile that is inside image\n x_left_for_profile = np.max([x, left_of_image]) - left_of_image\n x_right_for_profile = np.min([x + width, left_of_image + image_width]) - left_of_image\n\n y_top_for_profile = np.max([y, top_of_image]) - top_of_image\n y_bottom_for_profile = np.min([y + height, top_of_image + image_height]) - top_of_image\n\n if profile_type == 'horizontal':\n x_axis_of_profile = np.arange(x_left_for_profile, x_right_for_profile) + left_of_image\n else:\n x_axis_of_profile = np.arange(y_top_for_profile, y_bottom_for_profile) + top_of_image\n\n y_axis_of_profile = data_dictionary[_file].data[\n y_top_for_profile:y_bottom_for_profile,\n x_left_for_profile:x_right_for_profile,\n ]\n\n y_axis = np.mean(y_axis_of_profile, axis=dim_to_keep)\n plot_ui.axes.plot(x_axis_of_profile, y_axis, color=color)\n # plot_ui.axes.set_xlabel(\"Pixel\")\n # plot_ui.axes.set_ylabel(\"Average counts\")\n\n try:\n plot_ui.draw()\n except ValueError:\n pass\n\n @staticmethod\n def get_x_y_width_height_of_roi(roi_id=None):\n x, y = roi_id.pos()\n width, height = roi_id.size()\n return {'x' : np.int(x) - HORIZONTAL_MARGIN,\n 'y' : np.int(y) - VERTICAL_MARGIN,\n 'width' : np.int(width),\n 'height': np.int(height)}\n",
"import numpy as np\nimport os\nimport collections\nfrom PIL import Image\n\n\nclass Get:\n\n def __init__(self, parent=None):\n self.parent = parent\n\n def metadata_column_selected(self):\n selection = self.parent.ui.tableWidget.selectedRanges()[0]\n return selection.leftColumn()\n\n def y_axis_data(self):\n y_axis_column_index = self.parent.y_axis_column_index\n y_axis = self.axis_data(axis_index=y_axis_column_index)\n return y_axis\n\n def x_axis_data(self):\n x_axis_column_index = self.parent.x_axis_column_index\n x_axis = self.axis_data(axis_index=x_axis_column_index)\n return x_axis\n\n def axis_data(self, axis_index=0):\n nbr_row = self.parent.ui.tableWidget.rowCount()\n axis_data = []\n for _row in np.arange(nbr_row):\n _row_str = str(self.parent.ui.tableWidget.item(_row, axis_index).text())\n axis_data.append(_row_str)\n return axis_data\n\n def convert_to_float(self, axis=None):\n return [float(value) for value in axis]\n\n def metadata_column(self):\n data = []\n nbr_row = self.parent.ui.tableWidget.rowCount()\n for _row in np.arange(nbr_row):\n _row_str = str(self.parent.ui.tableWidget.item(_row, 1).text())\n split_row_str = _row_str.split(\":\")\n if len(split_row_str) == 1:\n _row_str = split_row_str[0]\n else:\n _row_str = split_row_str[1]\n try:\n _row_value = np.float(_row_str)\n except:\n self.parent.ui.statusbar.showMessage(\"Error Displaying Metadata Graph!\", 10000)\n self.parent.ui.statusbar.setStyleSheet(\"color: red\")\n return []\n\n data.append(_row_value)\n\n return data\n\n def prefix_of_y_axis(self):\n y_axis_column_index = self.parent.y_axis_column_index\n return self.prefix_of_axis(column_index=y_axis_column_index)\n\n def suffix_of_y_axis(self):\n y_axis_column_index = self.parent.y_axis_column_index\n return self.suffix_of_axis(column_index=y_axis_column_index)\n\n def prefix_of_x_axis(self):\n x_axis_column_index = self.parent.x_axis_column_index\n return self.prefix_of_axis(column_index=x_axis_column_index)\n\n def suffix_of_x_axis(self):\n x_axis_column_index = self.parent.x_axis_column_index\n return self.suffix_of_axis(column_index=x_axis_column_index)\n\n def prefix_of_axis(self, column_index=0):\n if column_index == 0:\n return \"\"\n elif column_index == 2:\n return str(self.parent.ui.prefix_lineEdit_1.text())\n else:\n return str(self.parent.ui.prefix_lineEdit_2.text())\n\n def suffix_of_axis(self, column_index=0):\n column_index = self.parent.y_axis_column_index\n if column_index == 0:\n return \"\"\n elif column_index == 2:\n return str(self.parent.ui.suffix_lineEdit_1.text())\n else:\n return str(self.parent.ui.suffix_lineEdit_2.text())\n\n def metadata_text(self, metadata_index=1):\n \"\"\"return the text and value of the metadata to display\"\"\"\n\n if metadata_index == 1:\n metadata_name = str(self.parent.ui.prefix_lineEdit_1.text())\n metadata_units = str(self.parent.ui.suffix_lineEdit_1.text())\n else:\n metadata_name = str(self.parent.ui.prefix_lineEdit_2.text())\n metadata_units = str(self.parent.ui.suffix_lineEdit_2.text())\n\n slider_index = self.parent.ui.file_slider.value()\n\n index_of_y_axis = self.parent.y_axis_column_index\n metadata_value = str(self.parent.ui.tableWidget.item(slider_index, index_of_y_axis).text())\n if metadata_name.strip() == '':\n return \"{} {}\".format(metadata_value, metadata_units)\n else:\n return \"{}: {} {}\".format(metadata_name, metadata_value, metadata_units)\n\n def scale_legend(self):\n real_scale_value = str(self.parent.ui.scale_real_size.text())\n units_index_selected = self.parent.ui.scale_units_combobox.currentIndex()\n html_units = self.parent.list_scale_units['html'][units_index_selected]\n return \"{} {}\".format(real_scale_value, html_units)\n\n def raw_metadata_column(self):\n data = []\n nbr_row = self.parent.ui.tableWidget.rowCount()\n for _row in np.arange(nbr_row):\n _row_str = str(self.parent.ui.tableWidget.item(_row, 1).text())\n data.append(_row_str)\n return data\n\n def color(self, color_type='html', source='metadata'):\n if source == 'metadata':\n color_selected = self.parent.ui.metadata_color_combobox.currentText().lower()\n elif source == 'graph':\n color_selected = self.parent.ui.graph_color_combobox.currentText().lower()\n else:\n color_selected = self.parent.ui.scale_color_combobox.currentText().lower()\n\n if color_type == 'html':\n return self.parent.html_color[color_selected]\n elif color_type == 'rgba':\n return self.parent.rgba_color[color_selected]\n else:\n return self.parent.rgb_color[color_selected]\n\n def list_metadata(self):\n first_file = self.parent.data_dict['file_name'][0]\n dict_list_metadata = collections.OrderedDict()\n [_, ext] = os.path.splitext(os.path.basename(first_file))\n if ext in [\".tif\", \".tiff\"]:\n o_image0 = Image.open(first_file)\n info = collections.OrderedDict(sorted(o_image0.tag_v2.items()))\n list_metadata = []\n list_key = []\n for tag, value in info.items():\n dict_list_metadata[tag] = value\n list_metadata.append(\"{} -> {}\".format(tag, value))\n list_key.append(tag)\n self.parent.list_metadata = list_key\n self.parent.dict_list_metadata = dict_list_metadata\n return list_metadata\n else:\n return []\n",
"import pandas as pd\nimport numpy as np\nfrom IPython.core.display import HTML\nfrom IPython.core.display import display, clear_output\n\nfrom __code.ipywe import fileselector\nfrom __code.file_handler import read_ascii\n\n\nclass HfirReactorElementAnalysis:\n\n pandas_obj = None\n metadata = None\n column_labels = None\n\n def __init__(self, working_dir=\"\"):\n self.working_dir = working_dir\n\n def select_ascii_file(self):\n ascii_file_ui = fileselector.FileSelectorPanel(instruction=\"Select ASCII file ...\",\n start_dir=self.working_dir,\n next=self.load_ascii,\n filters={\"CSV\": \"*.csv\"},\n default_filter=\"CSV\")\n ascii_file_ui.show()\n\n def load_ascii(self, ascii_file_name):\n\n display(HTML('<span style=\"font-size: 20px; color:Blue\">Loading data set ... PROCESSING!</span>'))\n\n # retrieving metadata and column names\n ascii_contain = read_ascii(filename=ascii_file_name)\n formatted_ascii_contain = ascii_contain.split(\"\\n\")\n for _line_number, _line_contain in enumerate(formatted_ascii_contain):\n if _line_contain == \"#\":\n break\n metadata = formatted_ascii_contain[:_line_number]\n self.metadata = metadata\n\n column_labels = [\"Angle (degrees)\"]\n for _text in metadata:\n if _text.startswith(\"# column \"):\n part1, part2 = _text.split(\":\")\n column_labels.append(part2.strip())\n self.column_labels = column_labels\n\n # retrieving data with pandas\n self.pandas_obj = pd.read_csv(ascii_file_name,\n skiprows=_line_number + 2,\n delimiter=\", \",\n names=column_labels,\n dtype=np.float,\n index_col=0)\n\n clear_output(wait=False)\n display(HTML('<span style=\"font-size: 20px; color:green\">Loading data set ... DONE!</span>'))\n",
"import glob\nimport os\nimport numpy as np\nimport shutil\n\n\ndef get_list_of_folders_with_specified_file_type(list_of_folders_to_check=None,\n file_extension=['tiff', 'tif']):\n \"\"\"\n check in the list of folder given (list_of_folders_to_check) if files of the type specified are there.\n If no file can be found in that folder with that type, the folder name is removed from the list\n\n :param:\n list_of_folders_to_check: example ['folder1', 'folder2', 'folder3'] list of full path to each folder\n file_extension: example ['tiff', 'tif'], ['fits'], ['.txt'] list of file extension to check for.\n :return:\n list of folders that do have at least one file with the correct file extension\n \"\"\"\n if not (type(list_of_folders_to_check) is list):\n raise ValueError(\"list_of_folders_to_check must be a list!\")\n\n if not (type(file_extension) is list):\n raise ValueError(\"file_extension must be a list!\")\n\n for _folder in list_of_folders_to_check:\n if not (os.path.exists(_folder)):\n break\n\n # checking size\n list_of_folders_checked = []\n for _folder in list_of_folders_to_check:\n if not (os.path.exists(_folder)):\n break\n\n for _extension in file_extension:\n list_of_files = glob.glob(os.path.join(_folder, '*.{}'.format(_extension)))\n if len(list_of_files) > 0:\n list_of_folders_checked.append(_folder)\n\n return list_of_folders_checked\n\n\ndef get_list_of_folders_with_specified_file_type_and_same_number_of_files(list_of_folders_to_check=None,\n file_extension=['tiff', 'tif']):\n \"\"\"\n check in the list of folder given (list_of_folders_to_check) if files of the type specified are there.\n If no file can be found in that folder with that type, the folder name is removed from the list\n\n :param:\n list_of_folders_to_check: example ['folder1', 'folder2', 'folder3'] list of full path to each folder\n file_extension: example ['tiff', 'tif'], ['fits'], ['.txt'] list of file extension to check for.\n :return:\n list of folders that do have at least one file with the correct file extension\n \"\"\"\n if not (type(list_of_folders_to_check) is list):\n raise ValueError(\"list_of_folders_to_check must be a list!\")\n\n if not (type(file_extension) is list):\n raise ValueError(\"file_extension must be a list!\")\n\n list_of_files = {}\n for _folder in list_of_folders_to_check:\n if not (os.path.exists(_folder)):\n break\n\n list_of_files[_folder] = []\n for _ext in file_extension:\n list_files_of_that_extension = glob.glob(os.path.join(_folder, '*.{}'.format(_ext)))\n for _file in list_files_of_that_extension:\n list_of_files[_folder].append(_file)\n\n list_len = []\n for _folder in list_of_files.keys():\n list_len.append(len(list_of_files[_folder]))\n max_len = np.max(list_len)\n\n # checking size\n list_of_folders_checked = []\n list_of_folders_rejected = []\n for _folder in list_of_files.keys():\n\n _local_list_of_files = list_of_files[_folder]\n if len(_local_list_of_files) == max_len:\n list_of_folders_checked.append(_folder)\n else:\n list_of_folders_rejected.append(_folder)\n\n return list_of_folders_checked, list_of_folders_rejected\n\n\ndef make_folder(folder_name):\n if not (os.path.exists(folder_name)):\n os.makedirs(folder_name)\n\n\ndef make_or_reset_folder(folder_name):\n if os.path.exists(folder_name):\n shutil.rmtree(folder_name)\n os.makedirs(folder_name)\n"
] | [
[
"numpy.abs",
"numpy.arctan",
"numpy.sqrt",
"numpy.rad2deg",
"numpy.tan",
"numpy.max",
"numpy.int",
"numpy.deg2rad",
"numpy.transpose",
"numpy.array"
],
[
"numpy.int",
"numpy.mean"
],
[
"numpy.min",
"numpy.arange",
"numpy.int",
"numpy.max",
"numpy.mean"
],
[
"numpy.arange",
"numpy.float"
],
[
"pandas.read_csv"
],
[
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pything/draugr | [
"2fda662f2fa97236e4495a6af2b8237516fa428b"
] | [
"draugr/visualisation/matplotlib_utilities/styles/cyclers.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Christian Heider Nielsen\"\n__doc__ = r\"\"\"\n\n Created on 18-02-2021\n \"\"\"\n\n__all__ = [\n \"monochrome_hatch_cycler\",\n \"simple_hatch_cycler\",\n \"monochrome_line_no_marker_cycler\",\n \"monochrome_line_cycler\",\n]\n\nfrom matplotlib import cycler\n\nfrom draugr.visualisation.matplotlib_utilities.styles.hatching import (\n four_times_denser_hatch,\n)\nfrom draugr.visualisation.matplotlib_utilities.styles.lines import (\n line_styles,\n marker_styles,\n)\n\nsimple_hatch_cycler = cycler(\"hatch\", four_times_denser_hatch)\nmonochrome_hatch_cycler = (\n cycler(\"color\", \"w\")\n * cycler(\"facecolor\", \"w\")\n * cycler(\"edgecolor\", \"k\")\n * simple_hatch_cycler\n)\n\nmonochrome_line_no_marker_cycler = cycler(\"color\", [\"k\"]) * cycler(\n \"linestyle\", line_styles\n)\n\nmonochrome_line_cycler = (\n cycler(\"color\", [\"k\"])\n * cycler(\"linestyle\", line_styles)\n * cycler(\"marker\", marker_styles)\n)\n\nif __name__ == \"__main__\":\n print([a for _, a in zip(range(10), monochrome_line_cycler)])\n"
] | [
[
"matplotlib.cycler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
emarkou/scikit-learn | [
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025",
"d73822f84f2832dcc25f0ff58769f60871a78025"
] | [
"examples/compose/plot_compare_reduction.py",
"sklearn/utils/random.py",
"benchmarks/bench_plot_randomized_svd.py",
"sklearn/inspection/tests/test_partial_dependence.py",
"examples/neural_networks/plot_rbm_logistic_classification.py",
"sklearn/datasets/_kddcup99.py",
"examples/ensemble/plot_feature_transformation.py",
"examples/cluster/plot_digits_linkage.py",
"examples/model_selection/grid_search_text_feature_extraction.py",
"sklearn/tests/test_random_projection.py",
"sklearn/feature_selection/_mutual_info.py",
"sklearn/ensemble/_hist_gradient_boosting/loss.py",
"sklearn/datasets/_species_distributions.py",
"sklearn/utils/multiclass.py",
"examples/model_selection/plot_randomized_search.py",
"sklearn/ensemble/_hist_gradient_boosting/binning.py",
"sklearn/datasets/tests/test_kddcup99.py",
"examples/feature_selection/plot_rfe_digits.py",
"sklearn/_loss/glm_distribution.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n=================================================================\nSelecting dimensionality reduction with Pipeline and GridSearchCV\n=================================================================\n\nThis example constructs a pipeline that does dimensionality\nreduction followed by prediction with a support vector\nclassifier. It demonstrates the use of ``GridSearchCV`` and\n``Pipeline`` to optimize over different classes of estimators in a\nsingle CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality\nreductions are compared to univariate feature selection during\nthe grid search.\n\nAdditionally, ``Pipeline`` can be instantiated with the ``memory``\nargument to memoize the transformers within the pipeline, avoiding to fit\nagain the same transformers over and over.\n\nNote that the use of ``memory`` to enable caching becomes interesting when the\nfitting of a transformer is costly.\n\n# %%\nIllustration of ``Pipeline`` and ``GridSearchCV``\n###############################################################################\n\nThis section illustrates the use of a ``Pipeline`` with ``GridSearchCV``\n\"\"\"\n\n# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.decomposition import PCA, NMF\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nprint(__doc__)\n\npipe = Pipeline([\n # the reduce_dim stage is populated by the param_grid\n ('reduce_dim', 'passthrough'),\n ('classify', LinearSVC(dual=False, max_iter=10000))\n])\n\nN_FEATURES_OPTIONS = [2, 4, 8]\nC_OPTIONS = [1, 10, 100, 1000]\nparam_grid = [\n {\n 'reduce_dim': [PCA(iterated_power=7), NMF()],\n 'reduce_dim__n_components': N_FEATURES_OPTIONS,\n 'classify__C': C_OPTIONS\n },\n {\n 'reduce_dim': [SelectKBest(chi2)],\n 'reduce_dim__k': N_FEATURES_OPTIONS,\n 'classify__C': C_OPTIONS\n },\n]\nreducer_labels = ['PCA', 'NMF', 'KBest(chi2)']\n\ngrid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid)\nX, y = load_digits(return_X_y=True)\ngrid.fit(X, y)\n\nmean_scores = np.array(grid.cv_results_['mean_test_score'])\n# scores are in the order of param_grid iteration, which is alphabetical\nmean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))\n# select score for best C\nmean_scores = mean_scores.max(axis=0)\nbar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *\n (len(reducer_labels) + 1) + .5)\n\nplt.figure()\nCOLORS = 'bgrcmyk'\nfor i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):\n plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])\n\nplt.title(\"Comparing feature reduction techniques\")\nplt.xlabel('Reduced number of features')\nplt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)\nplt.ylabel('Digit classification accuracy')\nplt.ylim((0, 1))\nplt.legend(loc='upper left')\n\nplt.show()\n\n# %%\n# Caching transformers within a ``Pipeline``\n###############################################################################\n# It is sometimes worthwhile storing the state of a specific transformer\n# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers\n# such situations. Therefore, we use the argument ``memory`` to enable caching.\n#\n# .. warning::\n# Note that this example is, however, only an illustration since for this\n# specific case fitting PCA is not necessarily slower than loading the\n# cache. Hence, use the ``memory`` constructor parameter when the fitting\n# of a transformer is costly.\n\nfrom joblib import Memory\nfrom shutil import rmtree\n\n# Create a temporary folder to store the transformers of the pipeline\nlocation = 'cachedir'\nmemory = Memory(location=location, verbose=10)\ncached_pipe = Pipeline([('reduce_dim', PCA()),\n ('classify', LinearSVC(dual=False, max_iter=10000))],\n memory=memory)\n\n# This time, a cached pipeline will be used within the grid search\n\n\n# Delete the temporary cache before exiting\nmemory.clear(warn=False)\nrmtree(location)\n\n# %%\n# The ``PCA`` fitting is only computed at the evaluation of the first\n# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The\n# other configurations of ``C`` will trigger the loading of the cached ``PCA``\n# estimator data, leading to save processing time. Therefore, the use of\n# caching the pipeline using ``memory`` is highly beneficial when fitting\n# a transformer is costly.\n",
"# Author: Hamzeh Alsalhi <[email protected]>\n#\n# License: BSD 3 clause\nimport numpy as np\nimport scipy.sparse as sp\nimport array\n\nfrom . import check_random_state\nfrom ._random import sample_without_replacement\n\n__all__ = ['sample_without_replacement']\n\n\ndef _random_choice_csc(n_samples, classes, class_probability=None,\n random_state=None):\n \"\"\"Generate a sparse random matrix given column class distributions\n\n Parameters\n ----------\n n_samples : int,\n Number of samples to draw in each column.\n\n classes : list of size n_outputs of arrays of size (n_classes,)\n List of classes for each column.\n\n class_probability : list of size n_outputs of arrays of \\\n shape (n_classes,), default=None\n Class distribution of each column. If None, uniform distribution is\n assumed.\n\n random_state : int, RandomState instance or None, default=None\n Controls the randomness of the sampled classes.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n random_matrix : sparse csc matrix of size (n_samples, n_outputs)\n\n \"\"\"\n data = array.array('i')\n indices = array.array('i')\n indptr = array.array('i', [0])\n\n for j in range(len(classes)):\n classes[j] = np.asarray(classes[j])\n if classes[j].dtype.kind != 'i':\n raise ValueError(\"class dtype %s is not supported\" %\n classes[j].dtype)\n classes[j] = classes[j].astype(np.int64, copy=False)\n\n # use uniform distribution if no class_probability is given\n if class_probability is None:\n class_prob_j = np.empty(shape=classes[j].shape[0])\n class_prob_j.fill(1 / classes[j].shape[0])\n else:\n class_prob_j = np.asarray(class_probability[j])\n\n if not np.isclose(np.sum(class_prob_j), 1.0):\n raise ValueError(\"Probability array at index {0} does not sum to \"\n \"one\".format(j))\n\n if class_prob_j.shape[0] != classes[j].shape[0]:\n raise ValueError(\"classes[{0}] (length {1}) and \"\n \"class_probability[{0}] (length {2}) have \"\n \"different length.\".format(j,\n classes[j].shape[0],\n class_prob_j.shape[0]))\n\n # If 0 is not present in the classes insert it with a probability 0.0\n if 0 not in classes[j]:\n classes[j] = np.insert(classes[j], 0, 0)\n class_prob_j = np.insert(class_prob_j, 0, 0.0)\n\n # If there are nonzero classes choose randomly using class_probability\n rng = check_random_state(random_state)\n if classes[j].shape[0] > 1:\n p_nonzero = 1 - class_prob_j[classes[j] == 0]\n nnz = int(n_samples * p_nonzero)\n ind_sample = sample_without_replacement(n_population=n_samples,\n n_samples=nnz,\n random_state=random_state)\n indices.extend(ind_sample)\n\n # Normalize probabilities for the nonzero elements\n classes_j_nonzero = classes[j] != 0\n class_probability_nz = class_prob_j[classes_j_nonzero]\n class_probability_nz_norm = (class_probability_nz /\n np.sum(class_probability_nz))\n classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),\n rng.rand(nnz))\n data.extend(classes[j][classes_j_nonzero][classes_ind])\n indptr.append(len(indices))\n\n return sp.csc_matrix((data, indices, indptr),\n (n_samples, len(classes)),\n dtype=int)\n",
"\"\"\"\nBenchmarks on the power iterations phase in randomized SVD.\n\nWe test on various synthetic and real datasets the effect of increasing\nthe number of power iterations in terms of quality of approximation\nand running time. A number greater than 0 should help with noisy matrices,\nwhich are characterized by a slow spectral decay.\n\nWe test several policy for normalizing the power iterations. Normalization\nis crucial to avoid numerical issues.\n\nThe quality of the approximation is measured by the spectral norm discrepancy\nbetween the original input matrix and the reconstructed one (by multiplying\nthe randomized_svd's outputs). The spectral norm is always equivalent to the\nlargest singular value of a matrix. (3) justifies this choice. However, one can\nnotice in these experiments that Frobenius and spectral norms behave\nvery similarly in a qualitative sense. Therefore, we suggest to run these\nbenchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to\ncompute.\n\nThe benchmarks follow.\n\n(a) plot: time vs norm, varying number of power iterations\n data: many datasets\n goal: compare normalization policies and study how the number of power\n iterations affect time and norm\n\n(b) plot: n_iter vs norm, varying rank of data and number of components for\n randomized_SVD\n data: low-rank matrices on which we control the rank\n goal: study whether the rank of the matrix and the number of components\n extracted by randomized SVD affect \"the optimal\" number of power iterations\n\n(c) plot: time vs norm, varying datasets\n data: many datasets\n goal: compare default configurations\n\nWe compare the following algorithms:\n- randomized_svd(..., power_iteration_normalizer='none')\n- randomized_svd(..., power_iteration_normalizer='LU')\n- randomized_svd(..., power_iteration_normalizer='QR')\n- randomized_svd(..., power_iteration_normalizer='auto')\n- fbpca.pca() from https://github.com/facebook/fbpca (if installed)\n\nConclusion\n----------\n- n_iter=2 appears to be a good default value\n- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU\n gives similar errors to QR but is cheaper. That's what 'auto' implements.\n\nReferences\n----------\n(1) Finding structure with randomness: Stochastic algorithms for constructing\n approximate matrix decompositions\n Halko, et al., 2009 https://arxiv.org/abs/0909.4061\n\n(2) A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert\n\n(3) An implementation of a randomized algorithm for principal component\n analysis\n A. Szlam et al. 2014\n\"\"\"\n\n# Author: Giorgio Patrini\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nimport gc\nimport pickle\nfrom time import time\nfrom collections import defaultdict\nimport os.path\n\nfrom sklearn.utils._arpack import _init_arpack_v0\nfrom sklearn.utils import gen_batches\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.utils.extmath import randomized_svd\nfrom sklearn.datasets import make_low_rank_matrix, make_sparse_uncorrelated\nfrom sklearn.datasets import (fetch_lfw_people,\n fetch_openml,\n fetch_20newsgroups_vectorized,\n fetch_olivetti_faces,\n fetch_rcv1)\n\ntry:\n import fbpca\n fbpca_available = True\nexcept ImportError:\n fbpca_available = False\n\n# If this is enabled, tests are much slower and will crash with the large data\nenable_spectral_norm = False\n\n# TODO: compute approximate spectral norms with the power method as in\n# Estimating the largest eigenvalues by the power and Lanczos methods with\n# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on\n# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.\n# This approximation is a very fast estimate of the spectral norm, but depends\n# on starting random vectors.\n\n# Determine when to switch to batch computation for matrix norms,\n# in case the reconstructed (dense) matrix is too large\nMAX_MEMORY = int(2e9)\n\n# The following datasets can be downloaded manually from:\n# CIFAR 10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat\nCIFAR_FOLDER = \"./cifar-10-batches-py/\"\nSVHN_FOLDER = \"./SVHN/\"\n\ndatasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',\n 'mnist_784', 'CIFAR', 'a3a', 'SVHN', 'uncorrelated matrix']\n\nbig_sparse_datasets = ['big sparse matrix', 'rcv1']\n\n\ndef unpickle(file_name):\n with open(file_name, 'rb') as fo:\n return pickle.load(fo, encoding='latin1')[\"data\"]\n\n\ndef handle_missing_dataset(file_folder):\n if not os.path.isdir(file_folder):\n print(\"%s file folder not found. Test skipped.\" % file_folder)\n return 0\n\n\ndef get_data(dataset_name):\n print(\"Getting dataset: %s\" % dataset_name)\n\n if dataset_name == 'lfw_people':\n X = fetch_lfw_people().data\n elif dataset_name == '20newsgroups':\n X = fetch_20newsgroups_vectorized().data[:, :100000]\n elif dataset_name == 'olivetti_faces':\n X = fetch_olivetti_faces().data\n elif dataset_name == 'rcv1':\n X = fetch_rcv1().data\n elif dataset_name == 'CIFAR':\n if handle_missing_dataset(CIFAR_FOLDER) == \"skip\":\n return\n X1 = [unpickle(\"%sdata_batch_%d\" % (CIFAR_FOLDER, i + 1))\n for i in range(5)]\n X = np.vstack(X1)\n del X1\n elif dataset_name == 'SVHN':\n if handle_missing_dataset(SVHN_FOLDER) == 0:\n return\n X1 = sp.io.loadmat(\"%strain_32x32.mat\" % SVHN_FOLDER)['X']\n X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]\n X = np.vstack(X2)\n del X1\n del X2\n elif dataset_name == 'low rank matrix':\n X = make_low_rank_matrix(n_samples=500, n_features=int(1e4),\n effective_rank=100, tail_strength=.5,\n random_state=random_state)\n elif dataset_name == 'uncorrelated matrix':\n X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,\n random_state=random_state)\n elif dataset_name == 'big sparse matrix':\n sparsity = int(1e6)\n size = int(1e6)\n small_size = int(1e4)\n data = np.random.normal(0, 1, int(sparsity/10))\n data = np.repeat(data, 10)\n row = np.random.uniform(0, small_size, sparsity)\n col = np.random.uniform(0, small_size, sparsity)\n X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))\n del data\n del row\n del col\n else:\n X = fetch_openml(dataset_name).data\n return X\n\n\ndef plot_time_vs_s(time, norm, point_labels, title):\n plt.figure()\n colors = ['g', 'b', 'y']\n for i, l in enumerate(sorted(norm.keys())):\n if l != \"fbpca\":\n plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())\n else:\n plt.plot(time[l], norm[l], label=l, marker='^', c='red')\n\n for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):\n plt.annotate(label, xy=(x, y), xytext=(0, -20),\n textcoords='offset points', ha='right', va='bottom')\n plt.legend(loc=\"upper right\")\n plt.suptitle(title)\n plt.ylabel(\"norm discrepancy\")\n plt.xlabel(\"running time [s]\")\n\n\ndef scatter_time_vs_s(time, norm, point_labels, title):\n plt.figure()\n size = 100\n for i, l in enumerate(sorted(norm.keys())):\n if l != \"fbpca\":\n plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)\n for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):\n plt.annotate(label, xy=(x, y), xytext=(0, -80),\n textcoords='offset points', ha='right',\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"arc3\"),\n va='bottom', size=11, rotation=90)\n else:\n plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)\n for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):\n plt.annotate(label, xy=(x, y), xytext=(0, 30),\n textcoords='offset points', ha='right',\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"arc3\"),\n va='bottom', size=11, rotation=90)\n\n plt.legend(loc=\"best\")\n plt.suptitle(title)\n plt.ylabel(\"norm discrepancy\")\n plt.xlabel(\"running time [s]\")\n\n\ndef plot_power_iter_vs_s(power_iter, s, title):\n plt.figure()\n for l in sorted(s.keys()):\n plt.plot(power_iter, s[l], label=l, marker='o')\n plt.legend(loc=\"lower right\", prop={'size': 10})\n plt.suptitle(title)\n plt.ylabel(\"norm discrepancy\")\n plt.xlabel(\"n_iter\")\n\n\ndef svd_timing(X, n_comps, n_iter, n_oversamples,\n power_iteration_normalizer='auto', method=None):\n \"\"\"\n Measure time for decomposition\n \"\"\"\n print(\"... running SVD ...\")\n if method is not 'fbpca':\n gc.collect()\n t0 = time()\n U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,\n power_iteration_normalizer,\n random_state=random_state, transpose=False)\n call_time = time() - t0\n else:\n gc.collect()\n t0 = time()\n # There is a different convention for l here\n U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,\n l=n_oversamples+n_comps)\n call_time = time() - t0\n\n return U, mu, V, call_time\n\n\ndef norm_diff(A, norm=2, msg=True, random_state=None):\n \"\"\"\n Compute the norm diff with the original matrix, when randomized\n SVD is called with *params.\n\n norm: 2 => spectral; 'fro' => Frobenius\n \"\"\"\n\n if msg:\n print(\"... computing %s norm ...\" % norm)\n if norm == 2:\n # s = sp.linalg.norm(A, ord=2) # slow\n v0 = _init_arpack_v0(min(A.shape), random_state)\n value = sp.sparse.linalg.svds(A,\n k=1,\n return_singular_vectors=False,\n v0=v0)\n else:\n if sp.sparse.issparse(A):\n value = sp.sparse.linalg.norm(A, ord=norm)\n else:\n value = sp.linalg.norm(A, ord=norm)\n return value\n\n\ndef scalable_frobenius_norm_discrepancy(X, U, s, V):\n # if the input is not too big, just call scipy\n if X.shape[0] * X.shape[1] < MAX_MEMORY:\n A = X - U.dot(np.diag(s).dot(V))\n return norm_diff(A, norm='fro')\n\n print(\"... computing fro norm by batches...\")\n batch_size = 1000\n Vhat = np.diag(s).dot(V)\n cum_norm = .0\n for batch in gen_batches(X.shape[0], batch_size):\n M = X[batch, :] - U[batch, :].dot(Vhat)\n cum_norm += norm_diff(M, norm='fro', msg=False)\n return np.sqrt(cum_norm)\n\n\ndef bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):\n\n all_time = defaultdict(list)\n if enable_spectral_norm:\n all_spectral = defaultdict(list)\n X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)\n all_frobenius = defaultdict(list)\n X_fro_norm = norm_diff(X, norm='fro', msg=False)\n\n for pi in power_iter:\n for pm in ['none', 'LU', 'QR']:\n print(\"n_iter = %d on sklearn - %s\" % (pi, pm))\n U, s, V, time = svd_timing(X, n_comps, n_iter=pi,\n power_iteration_normalizer=pm,\n n_oversamples=n_oversamples)\n label = \"sklearn - %s\" % pm\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if fbpca_available:\n print(\"n_iter = %d on fbca\" % (pi))\n U, s, V, time = svd_timing(X, n_comps, n_iter=pi,\n power_iteration_normalizer=pm,\n n_oversamples=n_oversamples,\n method='fbpca')\n label = \"fbpca\"\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if enable_spectral_norm:\n title = \"%s: spectral norm diff vs running time\" % (dataset_name)\n plot_time_vs_s(all_time, all_spectral, power_iter, title)\n title = \"%s: Frobenius norm diff vs running time\" % (dataset_name)\n plot_time_vs_s(all_time, all_frobenius, power_iter, title)\n\n\ndef bench_b(power_list):\n\n n_samples, n_features = 1000, 10000\n data_params = {'n_samples': n_samples, 'n_features': n_features,\n 'tail_strength': .7, 'random_state': random_state}\n dataset_name = \"low rank matrix %d x %d\" % (n_samples, n_features)\n ranks = [10, 50, 100]\n\n if enable_spectral_norm:\n all_spectral = defaultdict(list)\n all_frobenius = defaultdict(list)\n for rank in ranks:\n X = make_low_rank_matrix(effective_rank=rank, **data_params)\n if enable_spectral_norm:\n X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)\n X_fro_norm = norm_diff(X, norm='fro', msg=False)\n\n for n_comp in [int(rank/2), rank, rank*2]:\n label = \"rank=%d, n_comp=%d\" % (rank, n_comp)\n print(label)\n for pi in power_list:\n U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,\n power_iteration_normalizer='LU')\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) /\n X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if enable_spectral_norm:\n title = \"%s: spectral norm diff vs n power iteration\" % (dataset_name)\n plot_power_iter_vs_s(power_iter, all_spectral, title)\n title = \"%s: Frobenius norm diff vs n power iteration\" % (dataset_name)\n plot_power_iter_vs_s(power_iter, all_frobenius, title)\n\n\ndef bench_c(datasets, n_comps):\n all_time = defaultdict(list)\n if enable_spectral_norm:\n all_spectral = defaultdict(list)\n all_frobenius = defaultdict(list)\n\n for dataset_name in datasets:\n X = get_data(dataset_name)\n if X is None:\n continue\n\n if enable_spectral_norm:\n X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)\n X_fro_norm = norm_diff(X, norm='fro', msg=False)\n n_comps = np.minimum(n_comps, np.min(X.shape))\n\n label = \"sklearn\"\n print(\"%s %d x %d - %s\" %\n (dataset_name, X.shape[0], X.shape[1], label))\n U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,\n method=label)\n\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if fbpca_available:\n label = \"fbpca\"\n print(\"%s %d x %d - %s\" %\n (dataset_name, X.shape[0], X.shape[1], label))\n U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,\n method=label)\n all_time[label].append(time)\n if enable_spectral_norm:\n A = U.dot(np.diag(s).dot(V))\n all_spectral[label].append(\n norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm\n )\n f = scalable_frobenius_norm_discrepancy(X, U, s, V)\n all_frobenius[label].append(f / X_fro_norm)\n\n if len(all_time) == 0:\n raise ValueError(\"No tests ran. Aborting.\")\n\n if enable_spectral_norm:\n title = \"normalized spectral norm diff vs running time\"\n scatter_time_vs_s(all_time, all_spectral, datasets, title)\n title = \"normalized Frobenius norm diff vs running time\"\n scatter_time_vs_s(all_time, all_frobenius, datasets, title)\n\n\nif __name__ == '__main__':\n random_state = check_random_state(1234)\n\n power_iter = np.linspace(0, 6, 7, dtype=int)\n n_comps = 50\n\n for dataset_name in datasets:\n X = get_data(dataset_name)\n if X is None:\n continue\n print(\" >>>>>> Benching sklearn and fbpca on %s %d x %d\" %\n (dataset_name, X.shape[0], X.shape[1]))\n bench_a(X, dataset_name, power_iter, n_oversamples=2,\n n_comps=np.minimum(n_comps, np.min(X.shape)))\n\n print(\" >>>>>> Benching on simulated low rank matrix with variable rank\")\n bench_b(power_iter)\n\n print(\" >>>>>> Benching sklearn and fbpca default configurations\")\n bench_c(datasets + big_sparse_datasets, n_comps)\n\n plt.show()\n",
"\"\"\"\nTesting for the partial dependence module.\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nimport sklearn\nfrom sklearn.inspection import partial_dependence\nfrom sklearn.inspection._partial_dependence import (\n _grid_from_X,\n _partial_dependence_brute,\n _partial_dependence_recursion\n)\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import MultiTaskLasso\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import make_classification, make_regression\nfrom sklearn.cluster import KMeans\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import scale\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.base import BaseEstimator, ClassifierMixin, clone\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils import _IS_32BIT\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.tree.tests.test_tree import assert_is_subtree\n\n\n# toy sample\nX = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\ny = [-1, -1, -1, 1, 1, 1]\n\n\n# (X, y), n_targets <-- as expected in the output of partial_dep()\nbinary_classification_data = (make_classification(n_samples=50,\n random_state=0), 1)\nmulticlass_classification_data = (make_classification(n_samples=50,\n n_classes=3,\n n_clusters_per_class=1,\n random_state=0), 3)\nregression_data = (make_regression(n_samples=50, random_state=0), 1)\nmultioutput_regression_data = (make_regression(n_samples=50, n_targets=2,\n random_state=0), 2)\n\n# iris\niris = load_iris()\n\n\[email protected](\"ignore:A Bunch will be returned\")\[email protected]('Estimator, method, data', [\n (GradientBoostingClassifier, 'auto', binary_classification_data),\n (GradientBoostingClassifier, 'auto', multiclass_classification_data),\n (GradientBoostingClassifier, 'brute', binary_classification_data),\n (GradientBoostingClassifier, 'brute', multiclass_classification_data),\n (GradientBoostingRegressor, 'auto', regression_data),\n (GradientBoostingRegressor, 'brute', regression_data),\n (DecisionTreeRegressor, 'brute', regression_data),\n (LinearRegression, 'brute', regression_data),\n (LinearRegression, 'brute', multioutput_regression_data),\n (LogisticRegression, 'brute', binary_classification_data),\n (LogisticRegression, 'brute', multiclass_classification_data),\n (MultiTaskLasso, 'brute', multioutput_regression_data),\n ])\[email protected]('grid_resolution', (5, 10))\[email protected]('features', ([1], [1, 2]))\[email protected]('kind', ('legacy', 'average', 'individual', 'both'))\ndef test_output_shape(Estimator, method, data, grid_resolution,\n features, kind):\n # Check that partial_dependence has consistent output shape for different\n # kinds of estimators:\n # - classifiers with binary and multiclass settings\n # - regressors\n # - multi-task regressors\n\n est = Estimator()\n\n # n_target corresponds to the number of classes (1 for binary classif) or\n # the number of tasks / outputs in multi task settings. It's equal to 1 for\n # classical regression_data.\n (X, y), n_targets = data\n n_instances = X.shape[0]\n\n est.fit(X, y)\n result = partial_dependence(\n est, X=X, features=features, method=method, kind=kind,\n grid_resolution=grid_resolution\n )\n # FIXME: Remove 'legacy' support in 1.1\n pdp, axes = result if kind == 'legacy' else (result, result[\"values\"])\n\n expected_pdp_shape = (n_targets,\n *[grid_resolution for _ in range(len(features))])\n expected_ice_shape = (n_targets, n_instances,\n *[grid_resolution for _ in range(len(features))])\n if kind == 'legacy':\n assert pdp.shape == expected_pdp_shape\n elif kind == 'average':\n assert pdp.average.shape == expected_pdp_shape\n elif kind == 'individual':\n assert pdp.individual.shape == expected_ice_shape\n else: # 'both'\n assert pdp.average.shape == expected_pdp_shape\n assert pdp.individual.shape == expected_ice_shape\n\n expected_axes_shape = (len(features), grid_resolution)\n assert axes is not None\n assert np.asarray(axes).shape == expected_axes_shape\n\n\ndef test_grid_from_X():\n # tests for _grid_from_X: sanity check for output, and for shapes.\n\n # Make sure that the grid is a cartesian product of the input (it will use\n # the unique values instead of the percentiles)\n percentiles = (.05, .95)\n grid_resolution = 100\n X = np.asarray([[1, 2],\n [3, 4]])\n grid, axes = _grid_from_X(X, percentiles, grid_resolution)\n assert_array_equal(grid, [[1, 2],\n [1, 4],\n [3, 2],\n [3, 4]])\n assert_array_equal(axes, X.T)\n\n # test shapes of returned objects depending on the number of unique values\n # for a feature.\n rng = np.random.RandomState(0)\n grid_resolution = 15\n\n # n_unique_values > grid_resolution\n X = rng.normal(size=(20, 2))\n grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)\n assert grid.shape == (grid_resolution * grid_resolution, X.shape[1])\n assert np.asarray(axes).shape == (2, grid_resolution)\n\n # n_unique_values < grid_resolution, will use actual values\n n_unique_values = 12\n X[n_unique_values - 1:, 0] = 12345\n rng.shuffle(X) # just to make sure the order is irrelevant\n grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)\n assert grid.shape == (n_unique_values * grid_resolution, X.shape[1])\n # axes is a list of arrays of different shapes\n assert axes[0].shape == (n_unique_values,)\n assert axes[1].shape == (grid_resolution,)\n\n\[email protected](\n \"grid_resolution, percentiles, err_msg\",\n [(2, (0, 0.0001), \"percentiles are too close\"),\n (100, (1, 2, 3, 4), \"'percentiles' must be a sequence of 2 elements\"),\n (100, 12345, \"'percentiles' must be a sequence of 2 elements\"),\n (100, (-1, .95), r\"'percentiles' values must be in \\[0, 1\\]\"),\n (100, (.05, 2), r\"'percentiles' values must be in \\[0, 1\\]\"),\n (100, (.9, .1), r\"percentiles\\[0\\] must be strictly less than\"),\n (1, (0.05, 0.95), \"'grid_resolution' must be strictly greater than 1\")]\n)\ndef test_grid_from_X_error(grid_resolution, percentiles, err_msg):\n X = np.asarray([[1, 2], [3, 4]])\n with pytest.raises(ValueError, match=err_msg):\n _grid_from_X(\n X, grid_resolution=grid_resolution, percentiles=percentiles\n )\n\n\[email protected]('target_feature', range(5))\[email protected]('est, method', [\n (LinearRegression(), 'brute'),\n (GradientBoostingRegressor(random_state=0), 'brute'),\n (GradientBoostingRegressor(random_state=0), 'recursion'),\n (HistGradientBoostingRegressor(random_state=0), 'brute'),\n (HistGradientBoostingRegressor(random_state=0), 'recursion')]\n)\ndef test_partial_dependence_helpers(est, method, target_feature):\n # Check that what is returned by _partial_dependence_brute or\n # _partial_dependence_recursion is equivalent to manually setting a target\n # feature to a given value, and computing the average prediction over all\n # samples.\n # This also checks that the brute and recursion methods give the same\n # output.\n # Note that even on the trainset, the brute and the recursion methods\n # aren't always strictly equivalent, in particular when the slow method\n # generates unrealistic samples that have low mass in the joint\n # distribution of the input features, and when some of the features are\n # dependent. Hence the high tolerance on the checks.\n\n X, y = make_regression(random_state=0, n_features=5, n_informative=5)\n # The 'init' estimator for GBDT (here the average prediction) isn't taken\n # into account with the recursion method, for technical reasons. We set\n # the mean to 0 to that this 'bug' doesn't have any effect.\n y = y - y.mean()\n est.fit(X, y)\n\n # target feature will be set to .5 and then to 123\n features = np.array([target_feature], dtype=np.int32)\n grid = np.array([[.5],\n [123]])\n\n if method == 'brute':\n pdp, predictions = _partial_dependence_brute(est, grid, features, X,\n response_method='auto')\n else:\n pdp = _partial_dependence_recursion(est, grid, features)\n\n mean_predictions = []\n for val in (.5, 123):\n X_ = X.copy()\n X_[:, target_feature] = val\n mean_predictions.append(est.predict(X_).mean())\n\n pdp = pdp[0] # (shape is (1, 2) so make it (2,))\n\n # allow for greater margin for error with recursion method\n rtol = 1e-1 if method == 'recursion' else 1e-3\n assert np.allclose(pdp, mean_predictions, rtol=rtol)\n\n\[email protected]('seed', range(1))\ndef test_recursion_decision_tree_vs_forest_and_gbdt(seed):\n # Make sure that the recursion method gives the same results on a\n # DecisionTreeRegressor and a GradientBoostingRegressor or a\n # RandomForestRegressor with 1 tree and equivalent parameters.\n\n rng = np.random.RandomState(seed)\n\n # Purely random dataset to avoid correlated features\n n_samples = 1000\n n_features = 5\n X = rng.randn(n_samples, n_features)\n y = rng.randn(n_samples) * 10\n\n # The 'init' estimator for GBDT (here the average prediction) isn't taken\n # into account with the recursion method, for technical reasons. We set\n # the mean to 0 to that this 'bug' doesn't have any effect.\n y = y - y.mean()\n\n # set max_depth not too high to avoid splits with same gain but different\n # features\n max_depth = 5\n\n tree_seed = 0\n forest = RandomForestRegressor(n_estimators=1, max_features=None,\n bootstrap=False, max_depth=max_depth,\n random_state=tree_seed)\n # The forest will use ensemble.base._set_random_states to set the\n # random_state of the tree sub-estimator. We simulate this here to have\n # equivalent estimators.\n equiv_random_state = check_random_state(tree_seed).randint(\n np.iinfo(np.int32).max)\n gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1,\n criterion='squared_error',\n max_depth=max_depth,\n random_state=equiv_random_state)\n tree = DecisionTreeRegressor(max_depth=max_depth,\n random_state=equiv_random_state)\n\n forest.fit(X, y)\n gbdt.fit(X, y)\n tree.fit(X, y)\n\n # sanity check: if the trees aren't the same, the PD values won't be equal\n try:\n assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)\n assert_is_subtree(tree.tree_, forest[0].tree_)\n except AssertionError:\n # For some reason the trees aren't exactly equal on 32bits, so the PDs\n # cannot be equal either. See\n # https://github.com/scikit-learn/scikit-learn/issues/8853\n assert _IS_32BIT, \"this should only fail on 32 bit platforms\"\n return\n\n grid = rng.randn(50).reshape(-1, 1)\n for f in range(n_features):\n features = np.array([f], dtype=np.int32)\n\n pdp_forest = _partial_dependence_recursion(forest, grid, features)\n pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)\n pdp_tree = _partial_dependence_recursion(tree, grid, features)\n\n np.testing.assert_allclose(pdp_gbdt, pdp_tree)\n np.testing.assert_allclose(pdp_forest, pdp_tree)\n\n\[email protected]('est', (\n GradientBoostingClassifier(random_state=0),\n HistGradientBoostingClassifier(random_state=0),\n))\[email protected]('target_feature', (0, 1, 2, 3, 4, 5))\ndef test_recursion_decision_function(est, target_feature):\n # Make sure the recursion method (implicitly uses decision_function) has\n # the same result as using brute method with\n # response_method=decision_function\n\n X, y = make_classification(n_classes=2, n_clusters_per_class=1,\n random_state=1)\n assert np.mean(y) == .5 # make sure the init estimator predicts 0 anyway\n\n est.fit(X, y)\n\n preds_1 = partial_dependence(\n est, X, [target_feature], response_method='decision_function',\n method='recursion', kind='average'\n )\n preds_2 = partial_dependence(\n est, X, [target_feature], response_method='decision_function',\n method='brute', kind='average'\n )\n\n assert_allclose(preds_1['average'], preds_2['average'], atol=1e-7)\n\n\[email protected]('est', (\n LinearRegression(),\n GradientBoostingRegressor(random_state=0),\n HistGradientBoostingRegressor(random_state=0, min_samples_leaf=1,\n max_leaf_nodes=None, max_iter=1),\n DecisionTreeRegressor(random_state=0),\n))\[email protected]('power', (1, 2))\ndef test_partial_dependence_easy_target(est, power):\n # If the target y only depends on one feature in an obvious way (linear or\n # quadratic) then the partial dependence for that feature should reflect\n # it.\n # We here fit a linear regression_data model (with polynomial features if\n # needed) and compute r_squared to check that the partial dependence\n # correctly reflects the target.\n\n rng = np.random.RandomState(0)\n n_samples = 200\n target_variable = 2\n X = rng.normal(size=(n_samples, 5))\n y = X[:, target_variable]**power\n\n est.fit(X, y)\n\n pdp = partial_dependence(\n est, features=[target_variable], X=X, grid_resolution=1000,\n kind='average'\n )\n\n new_X = pdp[\"values\"][0].reshape(-1, 1)\n new_y = pdp['average'][0]\n # add polynomial features if needed\n new_X = PolynomialFeatures(degree=power).fit_transform(new_X)\n\n lr = LinearRegression().fit(new_X, new_y)\n r2 = r2_score(new_y, lr.predict(new_X))\n\n assert r2 > .99\n\n\[email protected]('Estimator',\n (sklearn.tree.DecisionTreeClassifier,\n sklearn.tree.ExtraTreeClassifier,\n sklearn.ensemble.ExtraTreesClassifier,\n sklearn.neighbors.KNeighborsClassifier,\n sklearn.neighbors.RadiusNeighborsClassifier,\n sklearn.ensemble.RandomForestClassifier))\ndef test_multiclass_multioutput(Estimator):\n # Make sure error is raised for multiclass-multioutput classifiers\n\n # make multiclass-multioutput dataset\n X, y = make_classification(n_classes=3, n_clusters_per_class=1,\n random_state=0)\n y = np.array([y, y]).T\n\n est = Estimator()\n est.fit(X, y)\n\n with pytest.raises(\n ValueError,\n match=\"Multiclass-multioutput estimators are not supported\"):\n partial_dependence(est, X, [0])\n\n\nclass NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator):\n def fit(self, X, y):\n # simulate that we have some classes\n self.classes_ = [0, 1]\n return self\n\n\[email protected](\"ignore:A Bunch will be returned\")\[email protected](\n \"estimator, params, err_msg\",\n [(KMeans(),\n {'features': [0]},\n \"'estimator' must be a fitted regressor or classifier\"),\n (LinearRegression(),\n {'features': [0], 'response_method': 'predict_proba'},\n 'The response_method parameter is ignored for regressors'),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'predict_proba',\n 'method': 'recursion'},\n \"'recursion' method, the response_method must be 'decision_function'\"),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'predict_proba', 'method': 'auto'},\n \"'recursion' method, the response_method must be 'decision_function'\"),\n (GradientBoostingClassifier(random_state=0),\n {'features': [0], 'response_method': 'blahblah'},\n 'response_method blahblah is invalid. Accepted response_method'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'auto'},\n 'The estimator has no predict_proba and no decision_function method'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'predict_proba'},\n 'The estimator has no predict_proba method.'),\n (NoPredictProbaNoDecisionFunction(),\n {'features': [0], 'response_method': 'decision_function'},\n 'The estimator has no decision_function method.'),\n (LinearRegression(),\n {'features': [0], 'method': 'blahblah'},\n 'blahblah is invalid. Accepted method names are brute, recursion, auto'),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion', 'kind': 'individual'},\n \"The 'recursion' method only applies when 'kind' is set to 'average'\"),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion', 'kind': 'both'},\n \"The 'recursion' method only applies when 'kind' is set to 'average'\"),\n (LinearRegression(),\n {'features': [0], 'method': 'recursion'},\n \"Only the following estimators support the 'recursion' method:\")]\n)\ndef test_partial_dependence_error(estimator, params, err_msg):\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, X, **params)\n\n\[email protected](\n \"with_dataframe, err_msg\",\n [(True, \"Only array-like or scalar are supported\"),\n (False, \"Only array-like or scalar are supported\")]\n)\ndef test_partial_dependence_slice_error(with_dataframe, err_msg):\n X, y = make_classification(random_state=0)\n if with_dataframe:\n pd = pytest.importorskip('pandas')\n X = pd.DataFrame(X)\n estimator = LogisticRegression().fit(X, y)\n\n with pytest.raises(TypeError, match=err_msg):\n partial_dependence(estimator, X, features=slice(0, 2, 1))\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\[email protected]('features', [-1, 10000])\ndef test_partial_dependence_unknown_feature_indices(estimator, features):\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n\n err_msg = 'all features must be in'\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, X, [features])\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\ndef test_partial_dependence_unknown_feature_string(estimator):\n pd = pytest.importorskip(\"pandas\")\n X, y = make_classification(random_state=0)\n df = pd.DataFrame(X)\n estimator.fit(df, y)\n\n features = ['random']\n err_msg = 'A given column is not a column of the dataframe'\n with pytest.raises(ValueError, match=err_msg):\n partial_dependence(estimator, df, features)\n\n\[email protected](\n 'estimator',\n [LinearRegression(), GradientBoostingClassifier(random_state=0)]\n)\ndef test_partial_dependence_X_list(estimator):\n # check that array-like objects are accepted\n X, y = make_classification(random_state=0)\n estimator.fit(X, y)\n partial_dependence(estimator, list(X), [0], kind='average')\n\n\ndef test_warning_recursion_non_constant_init():\n # make sure that passing a non-constant init parameter to a GBDT and using\n # recursion method yields a warning.\n\n gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)\n gbc.fit(X, y)\n\n with pytest.warns(\n UserWarning,\n match='Using recursion method with a non-constant init predictor'):\n partial_dependence(gbc, X, [0], method='recursion', kind='average')\n\n with pytest.warns(\n UserWarning,\n match='Using recursion method with a non-constant init predictor'):\n partial_dependence(gbc, X, [0], method='recursion', kind='average')\n\n\ndef test_partial_dependence_sample_weight():\n # Test near perfect correlation between partial dependence and diagonal\n # when sample weights emphasize y = x predictions\n # non-regression test for #13193\n # TODO: extend to HistGradientBoosting once sample_weight is supported\n N = 1000\n rng = np.random.RandomState(123456)\n mask = rng.randint(2, size=N, dtype=bool)\n\n x = rng.rand(N)\n # set y = x on mask and y = -x outside\n y = x.copy()\n y[~mask] = -y[~mask]\n X = np.c_[mask, x]\n # sample weights to emphasize data points where y = x\n sample_weight = np.ones(N)\n sample_weight[mask] = 1000.\n\n clf = GradientBoostingRegressor(n_estimators=10, random_state=1)\n clf.fit(X, y, sample_weight=sample_weight)\n\n pdp = partial_dependence(clf, X, features=[1], kind='average')\n\n assert np.corrcoef(pdp['average'], pdp[\"values\"])[0, 1] > 0.99\n\n\ndef test_hist_gbdt_sw_not_supported():\n # TODO: remove/fix when PDP supports HGBT with sample weights\n clf = HistGradientBoostingRegressor(random_state=1)\n clf.fit(X, y, sample_weight=np.ones(len(X)))\n\n with pytest.raises(NotImplementedError,\n match=\"does not support partial dependence\"):\n partial_dependence(clf, X, features=[1])\n\n\ndef test_partial_dependence_pipeline():\n # check that the partial dependence support pipeline\n iris = load_iris()\n\n scaler = StandardScaler()\n clf = DummyClassifier(random_state=42)\n pipe = make_pipeline(scaler, clf)\n\n clf.fit(scaler.fit_transform(iris.data), iris.target)\n pipe.fit(iris.data, iris.target)\n\n features = 0\n pdp_pipe = partial_dependence(\n pipe, iris.data, features=[features], grid_resolution=10,\n kind='average'\n )\n pdp_clf = partial_dependence(\n clf, scaler.transform(iris.data), features=[features],\n grid_resolution=10, kind='average'\n )\n assert_allclose(pdp_pipe['average'], pdp_clf['average'])\n assert_allclose(\n pdp_pipe[\"values\"][0],\n pdp_clf[\"values\"][0] * scaler.scale_[features] + scaler.mean_[features]\n )\n\n\[email protected](\n \"estimator\",\n [LogisticRegression(max_iter=1000, random_state=0),\n GradientBoostingClassifier(random_state=0, n_estimators=5)],\n ids=['estimator-brute', 'estimator-recursion']\n)\[email protected](\n \"preprocessor\",\n [None,\n make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n (RobustScaler(), [iris.feature_names[i] for i in (1, 3)])),\n make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n remainder='passthrough')],\n ids=['None', 'column-transformer', 'column-transformer-passthrough']\n)\[email protected](\n \"features\",\n [[0, 2], [iris.feature_names[i] for i in (0, 2)]],\n ids=['features-integer', 'features-string']\n)\ndef test_partial_dependence_dataframe(estimator, preprocessor, features):\n # check that the partial dependence support dataframe and pipeline\n # including a column transformer\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(scale(iris.data), columns=iris.feature_names)\n\n pipe = make_pipeline(preprocessor, estimator)\n pipe.fit(df, iris.target)\n pdp_pipe = partial_dependence(\n pipe, df, features=features, grid_resolution=10, kind='average'\n )\n\n # the column transformer will reorder the column when transforming\n # we mixed the index to be sure that we are computing the partial\n # dependence of the right columns\n if preprocessor is not None:\n X_proc = clone(preprocessor).fit_transform(df)\n features_clf = [0, 1]\n else:\n X_proc = df\n features_clf = [0, 2]\n\n clf = clone(estimator).fit(X_proc, iris.target)\n pdp_clf = partial_dependence(\n clf, X_proc, features=features_clf, method='brute', grid_resolution=10,\n kind='average'\n )\n\n assert_allclose(pdp_pipe['average'], pdp_clf['average'])\n if preprocessor is not None:\n scaler = preprocessor.named_transformers_['standardscaler']\n assert_allclose(\n pdp_pipe[\"values\"][1],\n pdp_clf[\"values\"][1] * scaler.scale_[1] + scaler.mean_[1]\n )\n else:\n assert_allclose(pdp_pipe[\"values\"][1], pdp_clf[\"values\"][1])\n\n\[email protected](\n \"features, expected_pd_shape\",\n [(0, (3, 10)),\n (iris.feature_names[0], (3, 10)),\n ([0, 2], (3, 10, 10)),\n ([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)),\n ([True, False, True, False], (3, 10, 10))],\n ids=['scalar-int', 'scalar-str', 'list-int', 'list-str', 'mask']\n)\ndef test_partial_dependence_feature_type(features, expected_pd_shape):\n # check all possible features type supported in PDP\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n\n preprocessor = make_column_transformer(\n (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),\n (RobustScaler(), [iris.feature_names[i] for i in (1, 3)])\n )\n pipe = make_pipeline(\n preprocessor, LogisticRegression(max_iter=1000, random_state=0)\n )\n pipe.fit(df, iris.target)\n pdp_pipe = partial_dependence(\n pipe, df, features=features, grid_resolution=10, kind='average'\n )\n assert pdp_pipe['average'].shape == expected_pd_shape\n assert len(pdp_pipe[\"values\"]) == len(pdp_pipe['average'].shape) - 1\n\n\[email protected](\n \"estimator\", [LinearRegression(), LogisticRegression(),\n GradientBoostingRegressor(), GradientBoostingClassifier()]\n)\ndef test_partial_dependence_unfitted(estimator):\n X = iris.data\n preprocessor = make_column_transformer(\n (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])\n )\n pipe = make_pipeline(preprocessor, estimator)\n with pytest.raises(NotFittedError, match=\"is not fitted yet\"):\n partial_dependence(pipe, X, features=[0, 2], grid_resolution=10)\n with pytest.raises(NotFittedError, match=\"is not fitted yet\"):\n partial_dependence(estimator, X, features=[0, 2], grid_resolution=10)\n\n\[email protected]('Estimator, data', [\n (LinearRegression, multioutput_regression_data),\n (LogisticRegression, binary_classification_data)])\ndef test_kind_average_and_average_of_individual(Estimator, data):\n est = Estimator()\n (X, y), n_targets = data\n est.fit(X, y)\n\n pdp_avg = partial_dependence(\n est, X=X, features=[1, 2], kind='average'\n )\n pdp_ind = partial_dependence(\n est, X=X, features=[1, 2], kind='individual'\n )\n avg_ind = np.mean(pdp_ind['individual'], axis=1)\n assert_allclose(avg_ind, pdp_avg['average'])\n\n\ndef test_warning_for_kind_legacy():\n est = LogisticRegression()\n (X, y), n_targets = binary_classification_data\n est.fit(X, y)\n\n err_msg = (\"A Bunch will be returned in place of 'predictions' from \"\n \"version 1.1\")\n with pytest.warns(FutureWarning, match=err_msg):\n partial_dependence(est, X=X, features=[1, 2])\n\n with pytest.warns(FutureWarning, match=err_msg):\n partial_dependence(est, X=X, features=[1, 2], kind='legacy')\n",
"\"\"\"\n==============================================================\nRestricted Boltzmann Machine features for digit classification\n==============================================================\n\nFor greyscale image data where pixel values can be interpreted as degrees of\nblackness on a white background, like handwritten digit recognition, the\nBernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM\n<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear\nfeature extraction.\n\nIn order to learn good latent representations from a small dataset, we\nartificially generate more labeled data by perturbing the training data with\nlinear shifts of 1 pixel in each direction.\n\nThis example shows how to build a classification pipeline with a BernoulliRBM\nfeature extractor and a :class:`LogisticRegression\n<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters\nof the entire model (learning rate, hidden layer size, regularization)\nwere optimized by grid search, but the search is not reproduced here because\nof runtime constraints.\n\nLogistic regression on raw pixel values is presented for comparison. The\nexample shows that the features extracted by the BernoulliRBM help improve the\nclassification accuracy.\n\"\"\"\nprint(__doc__)\n\n# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve\n# License: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.ndimage import convolve\nfrom sklearn import linear_model, datasets, metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import minmax_scale\nfrom sklearn.base import clone\n\n\n# #############################################################################\n# Setting up\n\ndef nudge_dataset(X, Y):\n \"\"\"\n This produces a dataset 5 times bigger than the original one,\n by moving the 8x8 images in X around by 1px to left, right, down, up\n \"\"\"\n direction_vectors = [\n [[0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 1],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0]]]\n\n def shift(x, w):\n return convolve(x.reshape((8, 8)), mode='constant', weights=w).ravel()\n\n X = np.concatenate([X] +\n [np.apply_along_axis(shift, 1, X, vector)\n for vector in direction_vectors])\n Y = np.concatenate([Y for _ in range(5)], axis=0)\n return X, Y\n\n\n# Load Data\nX, y = datasets.load_digits(return_X_y=True)\nX = np.asarray(X, 'float32')\nX, Y = nudge_dataset(X, y)\nX = minmax_scale(X, feature_range=(0, 1)) # 0-1 scaling\n\nX_train, X_test, Y_train, Y_test = train_test_split(\n X, Y, test_size=0.2, random_state=0)\n\n# Models we will use\nlogistic = linear_model.LogisticRegression(solver='newton-cg', tol=1)\nrbm = BernoulliRBM(random_state=0, verbose=True)\n\nrbm_features_classifier = Pipeline(\n steps=[('rbm', rbm), ('logistic', logistic)])\n\n# #############################################################################\n# Training\n\n# Hyper-parameters. These were set by cross-validation,\n# using a GridSearchCV. Here we are not performing cross-validation to\n# save time.\nrbm.learning_rate = 0.06\nrbm.n_iter = 10\n# More components tend to give better prediction performance, but larger\n# fitting time\nrbm.n_components = 100\nlogistic.C = 6000\n\n# Training RBM-Logistic Pipeline\nrbm_features_classifier.fit(X_train, Y_train)\n\n# Training the Logistic regression classifier directly on the pixel\nraw_pixel_classifier = clone(logistic)\nraw_pixel_classifier.C = 100.\nraw_pixel_classifier.fit(X_train, Y_train)\n\n# #############################################################################\n# Evaluation\n\nY_pred = rbm_features_classifier.predict(X_test)\nprint(\"Logistic regression using RBM features:\\n%s\\n\" % (\n metrics.classification_report(Y_test, Y_pred)))\n\nY_pred = raw_pixel_classifier.predict(X_test)\nprint(\"Logistic regression using raw pixel features:\\n%s\\n\" % (\n metrics.classification_report(Y_test, Y_pred)))\n\n# #############################################################################\n# Plotting\n\nplt.figure(figsize=(4.2, 4))\nfor i, comp in enumerate(rbm.components_):\n plt.subplot(10, 10, i + 1)\n plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,\n interpolation='nearest')\n plt.xticks(())\n plt.yticks(())\nplt.suptitle('100 components extracted by RBM', fontsize=16)\nplt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)\n\nplt.show()\n",
"\"\"\"KDDCUP 99 dataset.\n\nA classic dataset for anomaly detection.\n\nThe dataset page is available from UCI Machine Learning Repository\n\nhttps://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz\n\n\"\"\"\n\nimport errno\nfrom gzip import GzipFile\nimport logging\nimport os\nfrom os.path import dirname, exists, join\n\nimport numpy as np\nimport joblib\n\nfrom ._base import _fetch_remote\nfrom ._base import _convert_data_dataframe\nfrom . import get_data_home\nfrom ._base import RemoteFileMetadata\nfrom ..utils import Bunch\nfrom ..utils import check_random_state\nfrom ..utils import shuffle as shuffle_method\n\n\n# The original data can be found at:\n# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz\nARCHIVE = RemoteFileMetadata(\n filename='kddcup99_data',\n url='https://ndownloader.figshare.com/files/5976045',\n checksum=('3b6c942aa0356c0ca35b7b595a26c89d'\n '343652c9db428893e7494f837b274292'))\n\n# The original data can be found at:\n# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz\nARCHIVE_10_PERCENT = RemoteFileMetadata(\n filename='kddcup99_10_data',\n url='https://ndownloader.figshare.com/files/5976042',\n checksum=('8045aca0d84e70e622d1148d7df78249'\n '6f6333bf6eb979a1b0837c42a9fd9561'))\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_kddcup99(*, subset=None, data_home=None, shuffle=False,\n random_state=None,\n percent10=True, download_if_missing=True, return_X_y=False,\n as_frame=False):\n \"\"\"Load the kddcup99 dataset (classification).\n\n Download it if necessary.\n\n ================= ====================================\n Classes 23\n Samples total 4898431\n Dimensionality 41\n Features discrete (int) or continuous (float)\n ================= ====================================\n\n Read more in the :ref:`User Guide <kddcup99_dataset>`.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n subset : {'SA', 'SF', 'http', 'smtp'}, default=None\n To return the corresponding classical subsets of kddcup 99.\n If None, return the entire kddcup 99 dataset.\n\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n .. versionadded:: 0.19\n\n shuffle : bool, default=False\n Whether to shuffle dataset.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for dataset shuffling and for\n selection of abnormal samples if `subset='SA'`. Pass an int for\n reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n percent10 : bool, default=True\n Whether to load only 10 percent of the data.\n\n download_if_missing : bool, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object. See\n below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.20\n\n as_frame : bool, default=False\n If `True`, returns a pandas Dataframe for the ``data`` and ``target``\n objects in the `Bunch` returned object; `Bunch` return object will also\n have a ``frame`` member.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (494021, 41)\n The data matrix to learn. If `as_frame=True`, `data` will be a\n pandas DataFrame.\n target : {ndarray, series} of shape (494021,)\n The regression target for each sample. If `as_frame=True`, `target`\n will be a pandas Series.\n frame : dataframe of shape (494021, 42)\n Only present when `as_frame=True`. Contains `data` and `target`.\n DESCR : str\n The full description of the dataset.\n feature_names : list\n The names of the dataset columns\n target_names: list\n The names of the target columns\n\n (data, target) : tuple if ``return_X_y`` is True\n\n .. versionadded:: 0.20\n \"\"\"\n data_home = get_data_home(data_home=data_home)\n kddcup99 = _fetch_brute_kddcup99(\n data_home=data_home,\n percent10=percent10,\n download_if_missing=download_if_missing\n )\n\n data = kddcup99.data\n target = kddcup99.target\n feature_names = kddcup99.feature_names\n target_names = kddcup99.target_names\n\n if subset == 'SA':\n s = target == b'normal.'\n t = np.logical_not(s)\n normal_samples = data[s, :]\n normal_targets = target[s]\n abnormal_samples = data[t, :]\n abnormal_targets = target[t]\n\n n_samples_abnormal = abnormal_samples.shape[0]\n # selected abnormal samples:\n random_state = check_random_state(random_state)\n r = random_state.randint(0, n_samples_abnormal, 3377)\n abnormal_samples = abnormal_samples[r]\n abnormal_targets = abnormal_targets[r]\n\n data = np.r_[normal_samples, abnormal_samples]\n target = np.r_[normal_targets, abnormal_targets]\n\n if subset == 'SF' or subset == 'http' or subset == 'smtp':\n # select all samples with positive logged_in attribute:\n s = data[:, 11] == 1\n data = np.c_[data[s, :11], data[s, 12:]]\n feature_names = feature_names[:11] + feature_names[12:]\n target = target[s]\n\n data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))\n data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))\n data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))\n\n if subset == 'http':\n s = data[:, 2] == b'http'\n data = data[s]\n target = target[s]\n data = np.c_[data[:, 0], data[:, 4], data[:, 5]]\n feature_names = [feature_names[0], feature_names[4],\n feature_names[5]]\n\n if subset == 'smtp':\n s = data[:, 2] == b'smtp'\n data = data[s]\n target = target[s]\n data = np.c_[data[:, 0], data[:, 4], data[:, 5]]\n feature_names = [feature_names[0], feature_names[4],\n feature_names[5]]\n\n if subset == 'SF':\n data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]\n feature_names = [feature_names[0], feature_names[2],\n feature_names[4], feature_names[5]]\n\n if shuffle:\n data, target = shuffle_method(data, target, random_state=random_state)\n\n module_path = dirname(__file__)\n with open(join(module_path, 'descr', 'kddcup99.rst')) as rst_file:\n fdescr = rst_file.read()\n\n frame = None\n if as_frame:\n frame, data, target = _convert_data_dataframe(\n \"fetch_kddcup99\", data, target, feature_names, target_names\n )\n\n if return_X_y:\n return data, target\n\n return Bunch(\n data=data,\n target=target,\n frame=frame,\n target_names=target_names,\n feature_names=feature_names,\n DESCR=fdescr,\n )\n\n\ndef _fetch_brute_kddcup99(data_home=None,\n download_if_missing=True, percent10=True):\n\n \"\"\"Load the kddcup99 dataset, downloading it if necessary.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing : bool, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n percent10 : bool, default=True\n Whether to load only 10 percent of the data.\n\n Returns\n -------\n dataset : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : ndarray of shape (494021, 41)\n Each row corresponds to the 41 features in the dataset.\n target : ndarray of shape (494021,)\n Each value corresponds to one of the 21 attack types or to the\n label 'normal.'.\n feature_names : list\n The names of the dataset columns\n target_names: list\n The names of the target columns\n DESCR : str\n Description of the kddcup99 dataset.\n\n \"\"\"\n\n data_home = get_data_home(data_home=data_home)\n dir_suffix = \"-py3\"\n\n if percent10:\n kddcup_dir = join(data_home, \"kddcup99_10\" + dir_suffix)\n archive = ARCHIVE_10_PERCENT\n else:\n kddcup_dir = join(data_home, \"kddcup99\" + dir_suffix)\n archive = ARCHIVE\n\n samples_path = join(kddcup_dir, \"samples\")\n targets_path = join(kddcup_dir, \"targets\")\n available = exists(samples_path)\n\n dt = [('duration', int),\n ('protocol_type', 'S4'),\n ('service', 'S11'),\n ('flag', 'S6'),\n ('src_bytes', int),\n ('dst_bytes', int),\n ('land', int),\n ('wrong_fragment', int),\n ('urgent', int),\n ('hot', int),\n ('num_failed_logins', int),\n ('logged_in', int),\n ('num_compromised', int),\n ('root_shell', int),\n ('su_attempted', int),\n ('num_root', int),\n ('num_file_creations', int),\n ('num_shells', int),\n ('num_access_files', int),\n ('num_outbound_cmds', int),\n ('is_host_login', int),\n ('is_guest_login', int),\n ('count', int),\n ('srv_count', int),\n ('serror_rate', float),\n ('srv_serror_rate', float),\n ('rerror_rate', float),\n ('srv_rerror_rate', float),\n ('same_srv_rate', float),\n ('diff_srv_rate', float),\n ('srv_diff_host_rate', float),\n ('dst_host_count', int),\n ('dst_host_srv_count', int),\n ('dst_host_same_srv_rate', float),\n ('dst_host_diff_srv_rate', float),\n ('dst_host_same_src_port_rate', float),\n ('dst_host_srv_diff_host_rate', float),\n ('dst_host_serror_rate', float),\n ('dst_host_srv_serror_rate', float),\n ('dst_host_rerror_rate', float),\n ('dst_host_srv_rerror_rate', float),\n ('labels', 'S16')]\n\n column_names = [c[0] for c in dt]\n target_names = column_names[-1]\n feature_names = column_names[:-1]\n\n if available:\n try:\n X = joblib.load(samples_path)\n y = joblib.load(targets_path)\n except Exception as e:\n raise IOError(\n \"The cache for fetch_kddcup99 is invalid, please delete \"\n f\"{str(kddcup_dir)} and run the fetch_kddcup99 again\") from e\n\n elif download_if_missing:\n _mkdirp(kddcup_dir)\n logger.info(\"Downloading %s\" % archive.url)\n _fetch_remote(archive, dirname=kddcup_dir)\n DT = np.dtype(dt)\n logger.debug(\"extracting archive\")\n archive_path = join(kddcup_dir, archive.filename)\n file_ = GzipFile(filename=archive_path, mode='r')\n Xy = []\n for line in file_.readlines():\n line = line.decode()\n Xy.append(line.replace('\\n', '').split(','))\n file_.close()\n logger.debug('extraction done')\n os.remove(archive_path)\n\n Xy = np.asarray(Xy, dtype=object)\n for j in range(42):\n Xy[:, j] = Xy[:, j].astype(DT[j])\n\n X = Xy[:, :-1]\n y = Xy[:, -1]\n # XXX bug when compress!=0:\n # (error: 'Incorrect data length while decompressing[...] the file\n # could be corrupted.')\n\n joblib.dump(X, samples_path, compress=0)\n joblib.dump(y, targets_path, compress=0)\n else:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n\n return Bunch(\n data=X,\n target=y,\n feature_names=feature_names,\n target_names=[target_names],\n )\n\n\ndef _mkdirp(d):\n \"\"\"Ensure directory d exists (like mkdir -p on Unix)\n No guarantee that the directory is writable.\n \"\"\"\n try:\n os.makedirs(d)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n",
"\"\"\"\n===============================================\nFeature transformations with ensembles of trees\n===============================================\n\nTransform your features into a higher dimensional, sparse space. Then train a\nlinear model on these features.\n\nFirst fit an ensemble of trees (totally random trees, a random forest, or\ngradient boosted trees) on the training set. Then each leaf of each tree in the\nensemble is assigned a fixed arbitrary feature index in a new feature space.\nThese leaf indices are then encoded in a one-hot fashion.\n\nEach sample goes through the decisions of each tree of the ensemble and ends up\nin one leaf per tree. The sample is encoded by setting feature values for these\nleaves to 1 and the other feature values to 0.\n\nThe resulting transformer has then learned a supervised, sparse,\nhigh-dimensional categorical embedding of the data.\n\"\"\"\n\n# Author: Tim Head <[email protected]>\n#\n# License: BSD 3 clause\n\nprint(__doc__)\n\nfrom sklearn import set_config\nset_config(display='diagram')\n\n# %%\n# First, we will create a large dataset and split it into three sets:\n#\n# - a set to train the ensemble methods which are later used to as a feature\n# engineering transformer;\n# - a set to train the linear model;\n# - a set to test the linear model.\n#\n# It is important to split the data in such way to avoid overfitting by leaking\n# data.\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n\nX, y = make_classification(n_samples=80000, random_state=10)\n\nX_full_train, X_test, y_full_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=10)\nX_train_ensemble, X_train_linear, y_train_ensemble, y_train_linear = \\\n train_test_split(X_full_train, y_full_train, test_size=0.5,\n random_state=10)\n\n# %%\n# For each of the ensemble methods, we will use 10 estimators and a maximum\n# depth of 3 levels.\n\nn_estimators = 10\nmax_depth = 3\n\n# %%\n# First, we will start by training the random forest and gradient boosting on\n# the separated training set\n\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n\nrandom_forest = RandomForestClassifier(\n n_estimators=n_estimators, max_depth=max_depth, random_state=10)\nrandom_forest.fit(X_train_ensemble, y_train_ensemble)\n\ngradient_boosting = GradientBoostingClassifier(\n n_estimators=n_estimators, max_depth=max_depth, random_state=10)\n_ = gradient_boosting.fit(X_train_ensemble, y_train_ensemble)\n\n# %%\n# The :class:`~sklearn.ensemble.RandomTreesEmbedding` is an unsupervised method\n# and thus does not required to be trained independently.\n\nfrom sklearn.ensemble import RandomTreesEmbedding\n\nrandom_tree_embedding = RandomTreesEmbedding(\n n_estimators=n_estimators, max_depth=max_depth, random_state=0)\n\n# %%\n# Now, we will create three pipelines that will use the above embedding as\n# a preprocessing stage.\n#\n# The random trees embedding can be directly pipelined with the logistic\n# regression because it is a standard scikit-learn transformer.\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\n\nrt_model = make_pipeline(\n random_tree_embedding, LogisticRegression(max_iter=1000))\nrt_model.fit(X_train_linear, y_train_linear)\n\n# %%\n# Then, we can pipeline random forest or gradient boosting with a logistic\n# regression. However, the feature transformation will happen by calling the\n# method `apply`. The pipeline in scikit-learn expects a call to `transform`.\n# Therefore, we wrapped the call to `apply` within a `FunctionTransformer`.\n\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef rf_apply(X, model):\n return model.apply(X)\n\n\nrf_leaves_yielder = FunctionTransformer(\n rf_apply, kw_args={\"model\": random_forest})\n\nrf_model = make_pipeline(\n rf_leaves_yielder, OneHotEncoder(handle_unknown=\"ignore\"),\n LogisticRegression(max_iter=1000))\nrf_model.fit(X_train_linear, y_train_linear)\n\n\n# %%\ndef gbdt_apply(X, model):\n return model.apply(X)[:, :, 0]\n\n\ngbdt_leaves_yielder = FunctionTransformer(\n gbdt_apply, kw_args={\"model\": gradient_boosting})\n\ngbdt_model = make_pipeline(\n gbdt_leaves_yielder, OneHotEncoder(handle_unknown=\"ignore\"),\n LogisticRegression(max_iter=1000))\ngbdt_model.fit(X_train_linear, y_train_linear)\n\n# %%\n# We can finally show the different ROC curves for all the models.\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import plot_roc_curve\n\nfig, ax = plt.subplots()\n\nmodels = [\n (\"RT embedding -> LR\", rt_model),\n (\"RF\", random_forest),\n (\"RF embedding -> LR\", rf_model),\n (\"GBDT\", gradient_boosting),\n (\"GBDT embedding -> LR\", gbdt_model),\n]\n\nmodel_displays = {}\nfor name, pipeline in models:\n model_displays[name] = plot_roc_curve(\n pipeline, X_test, y_test, ax=ax, name=name)\n_ = ax.set_title('ROC curve')\n\n# %%\nfig, ax = plt.subplots()\nfor name, pipeline in models:\n model_displays[name].plot(ax=ax)\n\nax.set_xlim(0, 0.2)\nax.set_ylim(0.8, 1)\n_ = ax.set_title('ROC curve (zoomed in at top left)')\n",
"\"\"\"\n=============================================================================\nVarious Agglomerative Clustering on a 2D embedding of digits\n=============================================================================\n\nAn illustration of various linkage option for agglomerative clustering on\na 2D embedding of the digits dataset.\n\nThe goal of this example is to show intuitively how the metrics behave, and\nnot to find good clusters for the digits. This is why the example works on a\n2D embedding.\n\nWhat this example shows us is the behavior \"rich getting richer\" of\nagglomerative clustering that tends to create uneven cluster sizes.\nThis behavior is pronounced for the average linkage strategy,\nthat ends up with a couple of singleton clusters, while in the case\nof single linkage we get a single central cluster with all other clusters\nbeing drawn from noise points around the fringes.\n\"\"\"\n\n# Authors: Gael Varoquaux\n# License: BSD 3 clause (C) INRIA 2014\n\nprint(__doc__)\nfrom time import time\n\nimport numpy as np\nfrom scipy import ndimage\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import manifold, datasets\n\nX, y = datasets.load_digits(return_X_y=True)\nn_samples, n_features = X.shape\n\nnp.random.seed(0)\n\ndef nudge_images(X, y):\n # Having a larger dataset shows more clearly the behavior of the\n # methods, but we multiply the size of the dataset only by 2, as the\n # cost of the hierarchical clustering methods are strongly\n # super-linear in n_samples\n shift = lambda x: ndimage.shift(x.reshape((8, 8)),\n .3 * np.random.normal(size=2),\n mode='constant',\n ).ravel()\n X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])\n Y = np.concatenate([y, y], axis=0)\n return X, Y\n\n\nX, y = nudge_images(X, y)\n\n\n#----------------------------------------------------------------------\n# Visualize the clustering\ndef plot_clustering(X_red, labels, title=None):\n x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)\n X_red = (X_red - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(6, 4))\n for i in range(X_red.shape[0]):\n plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),\n color=plt.cm.nipy_spectral(labels[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([])\n plt.yticks([])\n if title is not None:\n plt.title(title, size=17)\n plt.axis('off')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n#----------------------------------------------------------------------\n# 2D embedding of the digits dataset\nprint(\"Computing embedding\")\nX_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)\nprint(\"Done.\")\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfor linkage in ('ward', 'average', 'complete', 'single'):\n clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)\n t0 = time()\n clustering.fit(X_red)\n print(\"%s :\\t%.2fs\" % (linkage, time() - t0))\n\n plot_clustering(X_red, clustering.labels_, \"%s linkage\" % linkage)\n\n\nplt.show()\n",
"\n\"\"\"\n==========================================================\nSample pipeline for text feature extraction and evaluation\n==========================================================\n\nThe dataset used in this example is the 20 newsgroups dataset which will be\nautomatically downloaded and then cached and reused for the document\nclassification example.\n\nYou can adjust the number of categories by giving their names to the dataset\nloader or setting them to None to get the 20 of them.\n\nHere is a sample output of a run on a quad-core machine::\n\n Loading 20 newsgroups dataset for categories:\n ['alt.atheism', 'talk.religion.misc']\n 1427 documents\n 2 categories\n\n Performing grid search...\n pipeline: ['vect', 'tfidf', 'clf']\n parameters:\n {'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),\n 'clf__max_iter': (10, 50, 80),\n 'clf__penalty': ('l2', 'elasticnet'),\n 'tfidf__use_idf': (True, False),\n 'vect__max_n': (1, 2),\n 'vect__max_df': (0.5, 0.75, 1.0),\n 'vect__max_features': (None, 5000, 10000, 50000)}\n done in 1737.030s\n\n Best score: 0.940\n Best parameters set:\n clf__alpha: 9.9999999999999995e-07\n clf__max_iter: 50\n clf__penalty: 'elasticnet'\n tfidf__use_idf: True\n vect__max_n: 2\n vect__max_df: 0.75\n vect__max_features: 50000\n\n\"\"\"\n\n# Author: Olivier Grisel <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Mathieu Blondel <[email protected]>\n# License: BSD 3 clause\nfrom pprint import pprint\nfrom time import time\nimport logging\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\n\nprint(__doc__)\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\n\n\n# #############################################################################\n# Load some categories from the training set\ncategories = [\n 'alt.atheism',\n 'talk.religion.misc',\n]\n# Uncomment the following to do the analysis on all the categories\n#categories = None\n\nprint(\"Loading 20 newsgroups dataset for categories:\")\nprint(categories)\n\ndata = fetch_20newsgroups(subset='train', categories=categories)\nprint(\"%d documents\" % len(data.filenames))\nprint(\"%d categories\" % len(data.target_names))\nprint()\n\n# #############################################################################\n# Define a pipeline combining a text feature extractor with a simple\n# classifier\npipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', SGDClassifier()),\n])\n\n# uncommenting more parameters will give better exploring power but will\n# increase processing time in a combinatorial way\nparameters = {\n 'vect__max_df': (0.5, 0.75, 1.0),\n # 'vect__max_features': (None, 5000, 10000, 50000),\n 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams\n # 'tfidf__use_idf': (True, False),\n # 'tfidf__norm': ('l1', 'l2'),\n 'clf__max_iter': (20,),\n 'clf__alpha': (0.00001, 0.000001),\n 'clf__penalty': ('l2', 'elasticnet'),\n # 'clf__max_iter': (10, 50, 80),\n}\n\nif __name__ == \"__main__\":\n # multiprocessing requires the fork to happen in a __main__ protected\n # block\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)\n\n print(\"Performing grid search...\")\n print(\"pipeline:\", [name for name, _ in pipeline.steps])\n print(\"parameters:\")\n pprint(parameters)\n t0 = time()\n grid_search.fit(data.data, data.target)\n print(\"done in %0.3fs\" % (time() - t0))\n print()\n\n print(\"Best score: %0.3f\" % grid_search.best_score_)\n print(\"Best parameters set:\")\n best_parameters = grid_search.best_estimator_.get_params()\n for param_name in sorted(parameters.keys()):\n print(\"\\t%s: %r\" % (param_name, best_parameters[param_name]))\n",
"\nimport functools\nfrom typing import List, Any\n\nimport numpy as np\nimport scipy.sparse as sp\nimport pytest\n\nfrom sklearn.metrics import euclidean_distances\n\nfrom sklearn.random_projection import johnson_lindenstrauss_min_dim\nfrom sklearn.random_projection import _gaussian_random_matrix\nfrom sklearn.random_projection import _sparse_random_matrix\nfrom sklearn.random_projection import SparseRandomProjection\nfrom sklearn.random_projection import GaussianRandomProjection\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.exceptions import DataDimensionalityWarning\n\nall_sparse_random_matrix: List[Any] = [_sparse_random_matrix]\nall_dense_random_matrix: List[Any] = [_gaussian_random_matrix]\nall_random_matrix = all_sparse_random_matrix + all_dense_random_matrix\n\nall_SparseRandomProjection: List[Any] = [SparseRandomProjection]\nall_DenseRandomProjection: List[Any] = [GaussianRandomProjection]\nall_RandomProjection = set(all_SparseRandomProjection +\n all_DenseRandomProjection)\n\n\n# Make some random data with uniformly located non zero entries with\n# Gaussian distributed values\ndef make_sparse_random_data(n_samples, n_features, n_nonzeros):\n rng = np.random.RandomState(0)\n data_coo = sp.coo_matrix(\n (rng.randn(n_nonzeros),\n (rng.randint(n_samples, size=n_nonzeros),\n rng.randint(n_features, size=n_nonzeros))),\n shape=(n_samples, n_features))\n return data_coo.toarray(), data_coo.tocsr()\n\n\ndef densify(matrix):\n if not sp.issparse(matrix):\n return matrix\n else:\n return matrix.toarray()\n\n\nn_samples, n_features = (10, 1000)\nn_nonzeros = int(n_samples * n_features / 100.)\ndata, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)\n\n\n###############################################################################\n# test on JL lemma\n###############################################################################\n\[email protected](\"n_samples, eps\", [\n (100, 1.1),\n (100, 0.0),\n (100, -0.1),\n (0, 0.5)\n])\ndef test_invalid_jl_domain(n_samples, eps):\n with pytest.raises(ValueError):\n johnson_lindenstrauss_min_dim(n_samples, eps=eps)\n\n\ndef test_input_size_jl_min_dim():\n with pytest.raises(ValueError):\n johnson_lindenstrauss_min_dim(3 * [100], eps=2 * [0.9])\n\n johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),\n eps=np.full((10, 10), 0.5))\n\n\n###############################################################################\n# tests random matrix generation\n###############################################################################\ndef check_input_size_random_matrix(random_matrix):\n inputs = [(0, 0), (-1, 1), (1, -1), (1, 0), (-1, 0)]\n for n_components, n_features in inputs:\n with pytest.raises(ValueError):\n random_matrix(n_components, n_features)\n\n\ndef check_size_generated(random_matrix):\n inputs = [(1, 5), (5, 1), (5, 5), (1, 1)]\n for n_components, n_features in inputs:\n assert random_matrix(n_components, n_features).shape == (\n n_components, n_features)\n\n\ndef check_zero_mean_and_unit_norm(random_matrix):\n # All random matrix should produce a transformation matrix\n # with zero mean and unit norm for each columns\n\n A = densify(random_matrix(10000, 1, random_state=0))\n\n assert_array_almost_equal(0, np.mean(A), 3)\n assert_array_almost_equal(1.0, np.linalg.norm(A), 1)\n\n\ndef check_input_with_sparse_random_matrix(random_matrix):\n n_components, n_features = 5, 10\n\n for density in [-1., 0.0, 1.1]:\n with pytest.raises(ValueError):\n random_matrix(n_components, n_features, density=density)\n\n\[email protected](\"random_matrix\", all_random_matrix)\ndef test_basic_property_of_random_matrix(random_matrix):\n # Check basic properties of random matrix generation\n check_input_size_random_matrix(random_matrix)\n check_size_generated(random_matrix)\n check_zero_mean_and_unit_norm(random_matrix)\n\n\[email protected](\"random_matrix\", all_sparse_random_matrix)\ndef test_basic_property_of_sparse_random_matrix(random_matrix):\n check_input_with_sparse_random_matrix(random_matrix)\n\n random_matrix_dense = functools.partial(random_matrix, density=1.0)\n\n check_zero_mean_and_unit_norm(random_matrix_dense)\n\n\ndef test_gaussian_random_matrix():\n # Check some statical properties of Gaussian random matrix\n # Check that the random matrix follow the proper distribution.\n # Let's say that each element of a_{ij} of A is taken from\n # a_ij ~ N(0.0, 1 / n_components).\n #\n n_components = 100\n n_features = 1000\n A = _gaussian_random_matrix(n_components, n_features, random_state=0)\n\n assert_array_almost_equal(0.0, np.mean(A), 2)\n assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)\n\n\ndef test_sparse_random_matrix():\n # Check some statical properties of sparse random matrix\n n_components = 100\n n_features = 500\n\n for density in [0.3, 1.]:\n s = 1 / density\n\n A = _sparse_random_matrix(n_components,\n n_features,\n density=density,\n random_state=0)\n A = densify(A)\n\n # Check possible values\n values = np.unique(A)\n assert np.sqrt(s) / np.sqrt(n_components) in values\n assert - np.sqrt(s) / np.sqrt(n_components) in values\n\n if density == 1.0:\n assert np.size(values) == 2\n else:\n assert 0. in values\n assert np.size(values) == 3\n\n # Check that the random matrix follow the proper distribution.\n # Let's say that each element of a_{ij} of A is taken from\n #\n # - -sqrt(s) / sqrt(n_components) with probability 1 / 2s\n # - 0 with probability 1 - 1 / s\n # - +sqrt(s) / sqrt(n_components) with probability 1 / 2s\n #\n assert_almost_equal(np.mean(A == 0.0),\n 1 - 1 / s, decimal=2)\n assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),\n 1 / (2 * s), decimal=2)\n assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),\n 1 / (2 * s), decimal=2)\n\n assert_almost_equal(np.var(A == 0.0, ddof=1),\n (1 - 1 / s) * 1 / s, decimal=2)\n assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),\n ddof=1),\n (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)\n assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),\n ddof=1),\n (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)\n\n\n###############################################################################\n# tests on random projection transformer\n###############################################################################\n\[email protected](\"density\", [1.1, 0, -0.1])\ndef test_sparse_random_projection_transformer_invalid_density(density):\n for RandomProjection in all_SparseRandomProjection:\n with pytest.raises(ValueError):\n RandomProjection(density=density).fit(data)\n\n\[email protected](\"n_components, fit_data\", [\n ('auto', [[0, 1, 2]]), (-10, data)]\n)\ndef test_random_projection_transformer_invalid_input(n_components, fit_data):\n for RandomProjection in all_RandomProjection:\n with pytest.raises(ValueError):\n RandomProjection(n_components=n_components).fit(fit_data)\n\n\ndef test_try_to_transform_before_fit():\n for RandomProjection in all_RandomProjection:\n with pytest.raises(ValueError):\n RandomProjection(n_components='auto').transform(data)\n\n\ndef test_too_many_samples_to_find_a_safe_embedding():\n data, _ = make_sparse_random_data(1000, 100, 1000)\n\n for RandomProjection in all_RandomProjection:\n rp = RandomProjection(n_components='auto', eps=0.1)\n expected_msg = (\n 'eps=0.100000 and n_samples=1000 lead to a target dimension'\n ' of 5920 which is larger than the original space with'\n ' n_features=100')\n with pytest.raises(ValueError, match=expected_msg):\n rp.fit(data)\n\n\ndef test_random_projection_embedding_quality():\n data, _ = make_sparse_random_data(8, 5000, 15000)\n eps = 0.2\n\n original_distances = euclidean_distances(data, squared=True)\n original_distances = original_distances.ravel()\n non_identical = original_distances != 0.0\n\n # remove 0 distances to avoid division by 0\n original_distances = original_distances[non_identical]\n\n for RandomProjection in all_RandomProjection:\n rp = RandomProjection(n_components='auto', eps=eps, random_state=0)\n projected = rp.fit_transform(data)\n\n projected_distances = euclidean_distances(projected, squared=True)\n projected_distances = projected_distances.ravel()\n\n # remove 0 distances to avoid division by 0\n projected_distances = projected_distances[non_identical]\n\n distances_ratio = projected_distances / original_distances\n\n # check that the automatically tuned values for the density respect the\n # contract for eps: pairwise distances are preserved according to the\n # Johnson-Lindenstrauss lemma\n assert distances_ratio.max() < 1 + eps\n assert 1 - eps < distances_ratio.min()\n\n\ndef test_SparseRandomProjection_output_representation():\n for SparseRandomProjection in all_SparseRandomProjection:\n # when using sparse input, the projected data can be forced to be a\n # dense numpy array\n rp = SparseRandomProjection(n_components=10, dense_output=True,\n random_state=0)\n rp.fit(data)\n assert isinstance(rp.transform(data), np.ndarray)\n\n sparse_data = sp.csr_matrix(data)\n assert isinstance(rp.transform(sparse_data), np.ndarray)\n\n # the output can be left to a sparse matrix instead\n rp = SparseRandomProjection(n_components=10, dense_output=False,\n random_state=0)\n rp = rp.fit(data)\n # output for dense input will stay dense:\n assert isinstance(rp.transform(data), np.ndarray)\n\n # output for sparse output will be sparse:\n assert sp.issparse(rp.transform(sparse_data))\n\n\ndef test_correct_RandomProjection_dimensions_embedding():\n for RandomProjection in all_RandomProjection:\n rp = RandomProjection(n_components='auto',\n random_state=0,\n eps=0.5).fit(data)\n\n # the number of components is adjusted from the shape of the training\n # set\n assert rp.n_components == 'auto'\n assert rp.n_components_ == 110\n\n if RandomProjection in all_SparseRandomProjection:\n assert rp.density == 'auto'\n assert_almost_equal(rp.density_, 0.03, 2)\n\n assert rp.components_.shape == (110, n_features)\n\n projected_1 = rp.transform(data)\n assert projected_1.shape == (n_samples, 110)\n\n # once the RP is 'fitted' the projection is always the same\n projected_2 = rp.transform(data)\n assert_array_equal(projected_1, projected_2)\n\n # fit transform with same random seed will lead to the same results\n rp2 = RandomProjection(random_state=0, eps=0.5)\n projected_3 = rp2.fit_transform(data)\n assert_array_equal(projected_1, projected_3)\n\n # Try to transform with an input X of size different from fitted.\n with pytest.raises(ValueError):\n rp.transform(data[:, 1:5])\n\n # it is also possible to fix the number of components and the density\n # level\n if RandomProjection in all_SparseRandomProjection:\n rp = RandomProjection(n_components=100, density=0.001,\n random_state=0)\n projected = rp.fit_transform(data)\n assert projected.shape == (n_samples, 100)\n assert rp.components_.shape == (100, n_features)\n assert rp.components_.nnz < 115 # close to 1% density\n assert 85 < rp.components_.nnz # close to 1% density\n\n\ndef test_warning_n_components_greater_than_n_features():\n n_features = 20\n data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))\n\n for RandomProjection in all_RandomProjection:\n with pytest.warns(DataDimensionalityWarning):\n RandomProjection(n_components=n_features + 1).fit(data)\n\n\ndef test_works_with_sparse_data():\n n_features = 20\n data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))\n\n for RandomProjection in all_RandomProjection:\n rp_dense = RandomProjection(n_components=3,\n random_state=1).fit(data)\n rp_sparse = RandomProjection(n_components=3,\n random_state=1).fit(sp.csr_matrix(data))\n assert_array_almost_equal(densify(rp_dense.components_),\n densify(rp_sparse.components_))\n\n\ndef test_johnson_lindenstrauss_min_dim():\n \"\"\"Test Johnson-Lindenstrauss for small eps.\n\n Regression test for #17111: before #19374, 32-bit systems would fail.\n \"\"\"\n assert johnson_lindenstrauss_min_dim(100, eps=1e-5) == 368416070986\n",
"# Author: Nikolay Mayorov <[email protected]>\n# License: 3-clause BSD\n\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom scipy.special import digamma\n\nfrom ..metrics.cluster import mutual_info_score\nfrom ..neighbors import NearestNeighbors, KDTree\nfrom ..preprocessing import scale\nfrom ..utils import check_random_state\nfrom ..utils.fixes import _astype_copy_false\nfrom ..utils.validation import check_array, check_X_y\nfrom ..utils.multiclass import check_classification_targets\n\n\ndef _compute_mi_cc(x, y, n_neighbors):\n \"\"\"Compute mutual information between two continuous variables.\n\n Parameters\n ----------\n x, y : ndarray, shape (n_samples,)\n Samples of two continuous random variables, must have an identical\n shape.\n\n n_neighbors : int\n Number of nearest neighbors to search for each point, see [1]_.\n\n Returns\n -------\n mi : float\n Estimated mutual information. If it turned out to be negative it is\n replace by 0.\n\n Notes\n -----\n True mutual information can't be negative. If its estimate by a numerical\n method is negative, it means (providing the method is adequate) that the\n mutual information is close to 0 and replacing it by 0 is a reasonable\n strategy.\n\n References\n ----------\n .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\n information\". Phys. Rev. E 69, 2004.\n \"\"\"\n n_samples = x.size\n\n x = x.reshape((-1, 1))\n y = y.reshape((-1, 1))\n xy = np.hstack((x, y))\n\n # Here we rely on NearestNeighbors to select the fastest algorithm.\n nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)\n\n nn.fit(xy)\n radius = nn.kneighbors()[0]\n radius = np.nextafter(radius[:, -1], 0)\n\n # KDTree is explicitly fit to allow for the querying of number of\n # neighbors within a specified radius\n kd = KDTree(x, metric='chebyshev')\n nx = kd.query_radius(x, radius, count_only=True, return_distance=False)\n nx = np.array(nx) - 1.0\n\n kd = KDTree(y, metric='chebyshev')\n ny = kd.query_radius(y, radius, count_only=True, return_distance=False)\n ny = np.array(ny) - 1.0\n\n mi = (digamma(n_samples) + digamma(n_neighbors) -\n np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)))\n\n return max(0, mi)\n\n\ndef _compute_mi_cd(c, d, n_neighbors):\n \"\"\"Compute mutual information between continuous and discrete variables.\n\n Parameters\n ----------\n c : ndarray, shape (n_samples,)\n Samples of a continuous random variable.\n\n d : ndarray, shape (n_samples,)\n Samples of a discrete random variable.\n\n n_neighbors : int\n Number of nearest neighbors to search for each point, see [1]_.\n\n Returns\n -------\n mi : float\n Estimated mutual information. If it turned out to be negative it is\n replace by 0.\n\n Notes\n -----\n True mutual information can't be negative. If its estimate by a numerical\n method is negative, it means (providing the method is adequate) that the\n mutual information is close to 0 and replacing it by 0 is a reasonable\n strategy.\n\n References\n ----------\n .. [1] B. C. Ross \"Mutual Information between Discrete and Continuous\n Data Sets\". PLoS ONE 9(2), 2014.\n \"\"\"\n n_samples = c.shape[0]\n c = c.reshape((-1, 1))\n\n radius = np.empty(n_samples)\n label_counts = np.empty(n_samples)\n k_all = np.empty(n_samples)\n nn = NearestNeighbors()\n for label in np.unique(d):\n mask = d == label\n count = np.sum(mask)\n if count > 1:\n k = min(n_neighbors, count - 1)\n nn.set_params(n_neighbors=k)\n nn.fit(c[mask])\n r = nn.kneighbors()[0]\n radius[mask] = np.nextafter(r[:, -1], 0)\n k_all[mask] = k\n label_counts[mask] = count\n\n # Ignore points with unique labels.\n mask = label_counts > 1\n n_samples = np.sum(mask)\n label_counts = label_counts[mask]\n k_all = k_all[mask]\n c = c[mask]\n radius = radius[mask]\n\n kd = KDTree(c)\n m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)\n m_all = np.array(m_all) - 1.0\n\n mi = (digamma(n_samples) + np.mean(digamma(k_all)) -\n np.mean(digamma(label_counts)) -\n np.mean(digamma(m_all + 1)))\n\n return max(0, mi)\n\n\ndef _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):\n \"\"\"Compute mutual information between two variables.\n\n This is a simple wrapper which selects a proper function to call based on\n whether `x` and `y` are discrete or not.\n \"\"\"\n if x_discrete and y_discrete:\n return mutual_info_score(x, y)\n elif x_discrete and not y_discrete:\n return _compute_mi_cd(y, x, n_neighbors)\n elif not x_discrete and y_discrete:\n return _compute_mi_cd(x, y, n_neighbors)\n else:\n return _compute_mi_cc(x, y, n_neighbors)\n\n\ndef _iterate_columns(X, columns=None):\n \"\"\"Iterate over columns of a matrix.\n\n Parameters\n ----------\n X : ndarray or csc_matrix, shape (n_samples, n_features)\n Matrix over which to iterate.\n\n columns : iterable or None, default=None\n Indices of columns to iterate over. If None, iterate over all columns.\n\n Yields\n ------\n x : ndarray, shape (n_samples,)\n Columns of `X` in dense format.\n \"\"\"\n if columns is None:\n columns = range(X.shape[1])\n\n if issparse(X):\n for i in columns:\n x = np.zeros(X.shape[0])\n start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]\n x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]\n yield x\n else:\n for i in columns:\n yield X[:, i]\n\n\ndef _estimate_mi(X, y, discrete_features='auto', discrete_target=False,\n n_neighbors=3, copy=True, random_state=None):\n \"\"\"Estimate mutual information between the features and the target.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Feature matrix.\n\n y : array-like of shape (n_samples,)\n Target vector.\n\n discrete_features : {'auto', bool, array-like}, default='auto'\n If bool, then determines whether to consider all features discrete\n or continuous. If array, then it should be either a boolean mask\n with shape (n_features,) or array with indices of discrete features.\n If 'auto', it is assigned to False for dense `X` and to True for\n sparse `X`.\n\n discrete_target : bool, default=False\n Whether to consider `y` as a discrete variable.\n\n n_neighbors : int, default=3\n Number of neighbors to use for MI estimation for continuous variables,\n see [1]_ and [2]_. Higher values reduce variance of the estimation, but\n could introduce a bias.\n\n copy : bool, default=True\n Whether to make a copy of the given data. If set to False, the initial\n data will be overwritten.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for adding small noise to\n continuous variables in order to remove repeated values.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n mi : ndarray, shape (n_features,)\n Estimated mutual information between each feature and the target.\n A negative value will be replaced by 0.\n\n References\n ----------\n .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\n information\". Phys. Rev. E 69, 2004.\n .. [2] B. C. Ross \"Mutual Information between Discrete and Continuous\n Data Sets\". PLoS ONE 9(2), 2014.\n \"\"\"\n X, y = check_X_y(X, y, accept_sparse='csc', y_numeric=not discrete_target)\n n_samples, n_features = X.shape\n\n if isinstance(discrete_features, (str, bool)):\n if isinstance(discrete_features, str):\n if discrete_features == 'auto':\n discrete_features = issparse(X)\n else:\n raise ValueError(\"Invalid string value for discrete_features.\")\n discrete_mask = np.empty(n_features, dtype=bool)\n discrete_mask.fill(discrete_features)\n else:\n discrete_features = check_array(discrete_features, ensure_2d=False)\n if discrete_features.dtype != 'bool':\n discrete_mask = np.zeros(n_features, dtype=bool)\n discrete_mask[discrete_features] = True\n else:\n discrete_mask = discrete_features\n\n continuous_mask = ~discrete_mask\n if np.any(continuous_mask) and issparse(X):\n raise ValueError(\"Sparse matrix `X` can't have continuous features.\")\n\n rng = check_random_state(random_state)\n if np.any(continuous_mask):\n if copy:\n X = X.copy()\n\n if not discrete_target:\n X[:, continuous_mask] = scale(X[:, continuous_mask],\n with_mean=False, copy=False)\n\n # Add small noise to continuous features as advised in Kraskov et. al.\n X = X.astype(float, **_astype_copy_false(X))\n means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))\n X[:, continuous_mask] += 1e-10 * means * rng.randn(\n n_samples, np.sum(continuous_mask))\n\n if not discrete_target:\n y = scale(y, with_mean=False)\n y += 1e-10 * np.maximum(1, np.mean(np.abs(y))) * rng.randn(n_samples)\n\n mi = [_compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) for\n x, discrete_feature in zip(_iterate_columns(X), discrete_mask)]\n\n return np.array(mi)\n\n\ndef mutual_info_regression(X, y, *, discrete_features='auto', n_neighbors=3,\n copy=True, random_state=None):\n \"\"\"Estimate mutual information for a continuous target variable.\n\n Mutual information (MI) [1]_ between two random variables is a non-negative\n value, which measures the dependency between the variables. It is equal\n to zero if and only if two random variables are independent, and higher\n values mean higher dependency.\n\n The function relies on nonparametric methods based on entropy estimation\n from k-nearest neighbors distances as described in [2]_ and [3]_. Both\n methods are based on the idea originally proposed in [4]_.\n\n It can be used for univariate features selection, read more in the\n :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Feature matrix.\n\n y : array-like of shape (n_samples,)\n Target vector.\n\n discrete_features : {'auto', bool, array-like}, default='auto'\n If bool, then determines whether to consider all features discrete\n or continuous. If array, then it should be either a boolean mask\n with shape (n_features,) or array with indices of discrete features.\n If 'auto', it is assigned to False for dense `X` and to True for\n sparse `X`.\n\n n_neighbors : int, default=3\n Number of neighbors to use for MI estimation for continuous variables,\n see [2]_ and [3]_. Higher values reduce variance of the estimation, but\n could introduce a bias.\n\n copy : bool, default=True\n Whether to make a copy of the given data. If set to False, the initial\n data will be overwritten.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for adding small noise to\n continuous variables in order to remove repeated values.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n mi : ndarray, shape (n_features,)\n Estimated mutual information between each feature and the target.\n\n Notes\n -----\n 1. The term \"discrete features\" is used instead of naming them\n \"categorical\", because it describes the essence more accurately.\n For example, pixel intensities of an image are discrete features\n (but hardly categorical) and you will get better results if mark them\n as such. Also note, that treating a continuous variable as discrete and\n vice versa will usually give incorrect results, so be attentive about\n that.\n 2. True mutual information can't be negative. If its estimate turns out\n to be negative, it is replaced by zero.\n\n References\n ----------\n .. [1] `Mutual Information\n <https://en.wikipedia.org/wiki/Mutual_information>`_\n on Wikipedia.\n .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\n information\". Phys. Rev. E 69, 2004.\n .. [3] B. C. Ross \"Mutual Information between Discrete and Continuous\n Data Sets\". PLoS ONE 9(2), 2014.\n .. [4] L. F. Kozachenko, N. N. Leonenko, \"Sample Estimate of the Entropy\n of a Random Vector\", Probl. Peredachi Inf., 23:2 (1987), 9-16\n \"\"\"\n return _estimate_mi(X, y, discrete_features, False, n_neighbors,\n copy, random_state)\n\n\ndef mutual_info_classif(X, y, *, discrete_features='auto', n_neighbors=3,\n copy=True, random_state=None):\n \"\"\"Estimate mutual information for a discrete target variable.\n\n Mutual information (MI) [1]_ between two random variables is a non-negative\n value, which measures the dependency between the variables. It is equal\n to zero if and only if two random variables are independent, and higher\n values mean higher dependency.\n\n The function relies on nonparametric methods based on entropy estimation\n from k-nearest neighbors distances as described in [2]_ and [3]_. Both\n methods are based on the idea originally proposed in [4]_.\n\n It can be used for univariate features selection, read more in the\n :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Feature matrix.\n\n y : array-like of shape (n_samples,)\n Target vector.\n\n discrete_features : {'auto', bool, array-like}, default='auto'\n If bool, then determines whether to consider all features discrete\n or continuous. If array, then it should be either a boolean mask\n with shape (n_features,) or array with indices of discrete features.\n If 'auto', it is assigned to False for dense `X` and to True for\n sparse `X`.\n\n n_neighbors : int, default=3\n Number of neighbors to use for MI estimation for continuous variables,\n see [2]_ and [3]_. Higher values reduce variance of the estimation, but\n could introduce a bias.\n\n copy : bool, default=True\n Whether to make a copy of the given data. If set to False, the initial\n data will be overwritten.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for adding small noise to\n continuous variables in order to remove repeated values.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n mi : ndarray, shape (n_features,)\n Estimated mutual information between each feature and the target.\n\n Notes\n -----\n 1. The term \"discrete features\" is used instead of naming them\n \"categorical\", because it describes the essence more accurately.\n For example, pixel intensities of an image are discrete features\n (but hardly categorical) and you will get better results if mark them\n as such. Also note, that treating a continuous variable as discrete and\n vice versa will usually give incorrect results, so be attentive about\n that.\n 2. True mutual information can't be negative. If its estimate turns out\n to be negative, it is replaced by zero.\n\n References\n ----------\n .. [1] `Mutual Information\n <https://en.wikipedia.org/wiki/Mutual_information>`_\n on Wikipedia.\n .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\n information\". Phys. Rev. E 69, 2004.\n .. [3] B. C. Ross \"Mutual Information between Discrete and Continuous\n Data Sets\". PLoS ONE 9(2), 2014.\n .. [4] L. F. Kozachenko, N. N. Leonenko, \"Sample Estimate of the Entropy\n of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16\n \"\"\"\n check_classification_targets(y)\n return _estimate_mi(X, y, discrete_features, True, n_neighbors,\n copy, random_state)\n",
"\"\"\"\nThis module contains the loss classes.\n\nSpecific losses are used for regression, binary classification or multiclass\nclassification.\n\"\"\"\n# Author: Nicolas Hug\n\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nfrom scipy.special import expit, logsumexp, xlogy\n\nfrom .common import Y_DTYPE\nfrom .common import G_H_DTYPE\nfrom ._loss import _update_gradients_least_squares\nfrom ._loss import _update_gradients_hessians_least_squares\nfrom ._loss import _update_gradients_least_absolute_deviation\nfrom ._loss import _update_gradients_hessians_least_absolute_deviation\nfrom ._loss import _update_gradients_hessians_binary_crossentropy\nfrom ._loss import _update_gradients_hessians_categorical_crossentropy\nfrom ._loss import _update_gradients_hessians_poisson\nfrom ...utils.stats import _weighted_percentile\n\n\nclass BaseLoss(ABC):\n \"\"\"Base class for a loss.\"\"\"\n\n def __init__(self, hessians_are_constant):\n self.hessians_are_constant = hessians_are_constant\n\n def __call__(self, y_true, raw_predictions, sample_weight):\n \"\"\"Return the weighted average loss\"\"\"\n return np.average(self.pointwise_loss(y_true, raw_predictions),\n weights=sample_weight)\n\n @abstractmethod\n def pointwise_loss(self, y_true, raw_predictions):\n \"\"\"Return loss value for each input\"\"\"\n\n # This variable indicates whether the loss requires the leaves values to\n # be updated once the tree has been trained. The trees are trained to\n # predict a Newton-Raphson step (see grower._finalize_leaf()). But for\n # some losses (e.g. least absolute deviation) we need to adjust the tree\n # values to account for the \"line search\" of the gradient descent\n # procedure. See the original paper Greedy Function Approximation: A\n # Gradient Boosting Machine by Friedman\n # (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.\n need_update_leaves_values = False\n\n def init_gradients_and_hessians(self, n_samples, prediction_dim,\n sample_weight):\n \"\"\"Return initial gradients and hessians.\n\n Unless hessians are constant, arrays are initialized with undefined\n values.\n\n Parameters\n ----------\n n_samples : int\n The number of samples passed to `fit()`.\n\n prediction_dim : int\n The dimension of a raw prediction, i.e. the number of trees\n built at each iteration. Equals 1 for regression and binary\n classification, or K where K is the number of classes for\n multiclass classification.\n\n sample_weight : array-like of shape(n_samples,) default=None\n Weights of training data.\n\n Returns\n -------\n gradients : ndarray, shape (prediction_dim, n_samples)\n The initial gradients. The array is not initialized.\n hessians : ndarray, shape (prediction_dim, n_samples)\n If hessians are constant (e.g. for `LeastSquares` loss, the\n array is initialized to ``1``. Otherwise, the array is allocated\n without being initialized.\n \"\"\"\n shape = (prediction_dim, n_samples)\n gradients = np.empty(shape=shape, dtype=G_H_DTYPE)\n\n if self.hessians_are_constant:\n # If the hessians are constant, we consider they are equal to 1.\n # - This is correct for the half LS loss\n # - For LAD loss, hessians are actually 0, but they are always\n # ignored anyway.\n hessians = np.ones(shape=(1, 1), dtype=G_H_DTYPE)\n else:\n hessians = np.empty(shape=shape, dtype=G_H_DTYPE)\n\n return gradients, hessians\n\n @abstractmethod\n def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):\n \"\"\"Return initial predictions (before the first iteration).\n\n Parameters\n ----------\n y_train : ndarray, shape (n_samples,)\n The target training values.\n\n sample_weight : array-like of shape(n_samples,) default=None\n Weights of training data.\n\n prediction_dim : int\n The dimension of one prediction: 1 for binary classification and\n regression, n_classes for multiclass classification.\n\n Returns\n -------\n baseline_prediction : float or ndarray, shape (1, prediction_dim)\n The baseline prediction.\n \"\"\"\n\n @abstractmethod\n def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):\n \"\"\"Update gradients and hessians arrays, inplace.\n\n The gradients (resp. hessians) are the first (resp. second) order\n derivatives of the loss for each sample with respect to the\n predictions of model, evaluated at iteration ``i - 1``.\n\n Parameters\n ----------\n gradients : ndarray, shape (prediction_dim, n_samples)\n The gradients (treated as OUT array).\n\n hessians : ndarray, shape (prediction_dim, n_samples) or \\\n (1,)\n The hessians (treated as OUT array).\n\n y_true : ndarray, shape (n_samples,)\n The true target values or each training sample.\n\n raw_predictions : ndarray, shape (prediction_dim, n_samples)\n The raw_predictions (i.e. values from the trees) of the tree\n ensemble at iteration ``i - 1``.\n\n sample_weight : array-like of shape(n_samples,) default=None\n Weights of training data.\n \"\"\"\n\n\nclass LeastSquares(BaseLoss):\n \"\"\"Least squares loss, for regression.\n\n For a given sample x_i, least squares loss is defined as::\n\n loss(x_i) = 0.5 * (y_true_i - raw_pred_i)**2\n\n This actually computes the half least squares loss to simplify\n the computation of the gradients and get a unit hessian (and be consistent\n with what is done in LightGBM).\n \"\"\"\n\n def __init__(self, sample_weight):\n # If sample weights are provided, the hessians and gradients\n # are multiplied by sample_weight, which means the hessians are\n # equal to sample weights.\n super().__init__(hessians_are_constant=sample_weight is None)\n\n def pointwise_loss(self, y_true, raw_predictions):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n loss = 0.5 * np.power(y_true - raw_predictions, 2)\n return loss\n\n def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):\n return np.average(y_train, weights=sample_weight)\n\n @staticmethod\n def inverse_link_function(raw_predictions):\n return raw_predictions\n\n def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n gradients = gradients.reshape(-1)\n if sample_weight is None:\n _update_gradients_least_squares(gradients, y_true, raw_predictions)\n else:\n hessians = hessians.reshape(-1)\n _update_gradients_hessians_least_squares(gradients, hessians,\n y_true, raw_predictions,\n sample_weight)\n\n\nclass LeastAbsoluteDeviation(BaseLoss):\n \"\"\"Least absolute deviation, for regression.\n\n For a given sample x_i, the loss is defined as::\n\n loss(x_i) = |y_true_i - raw_pred_i|\n \"\"\"\n\n def __init__(self, sample_weight):\n # If sample weights are provided, the hessians and gradients\n # are multiplied by sample_weight, which means the hessians are\n # equal to sample weights.\n super().__init__(hessians_are_constant=sample_weight is None)\n\n # This variable indicates whether the loss requires the leaves values to\n # be updated once the tree has been trained. The trees are trained to\n # predict a Newton-Raphson step (see grower._finalize_leaf()). But for\n # some losses (e.g. least absolute deviation) we need to adjust the tree\n # values to account for the \"line search\" of the gradient descent\n # procedure. See the original paper Greedy Function Approximation: A\n # Gradient Boosting Machine by Friedman\n # (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.\n need_update_leaves_values = True\n\n def pointwise_loss(self, y_true, raw_predictions):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n loss = np.abs(y_true - raw_predictions)\n return loss\n\n def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):\n if sample_weight is None:\n return np.median(y_train)\n else:\n return _weighted_percentile(y_train, sample_weight, 50)\n\n @staticmethod\n def inverse_link_function(raw_predictions):\n return raw_predictions\n\n def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n gradients = gradients.reshape(-1)\n if sample_weight is None:\n _update_gradients_least_absolute_deviation(gradients, y_true,\n raw_predictions)\n else:\n hessians = hessians.reshape(-1)\n _update_gradients_hessians_least_absolute_deviation(\n gradients, hessians, y_true, raw_predictions, sample_weight)\n\n def update_leaves_values(self, grower, y_true, raw_predictions,\n sample_weight):\n # Update the values predicted by the tree with\n # median(y_true - raw_predictions).\n # See note about need_update_leaves_values in BaseLoss.\n\n # TODO: ideally this should be computed in parallel over the leaves\n # using something similar to _update_raw_predictions(), but this\n # requires a cython version of median()\n for leaf in grower.finalized_leaves:\n indices = leaf.sample_indices\n if sample_weight is None:\n median_res = np.median(y_true[indices]\n - raw_predictions[indices])\n else:\n median_res = _weighted_percentile(\n y_true[indices] - raw_predictions[indices],\n sample_weight=sample_weight[indices],\n percentile=50\n )\n leaf.value = grower.shrinkage * median_res\n # Note that the regularization is ignored here\n\n\nclass Poisson(BaseLoss):\n \"\"\"Poisson deviance loss with log-link, for regression.\n\n For a given sample x_i, Poisson deviance loss is defined as::\n\n loss(x_i) = y_true_i * log(y_true_i/exp(raw_pred_i))\n - y_true_i + exp(raw_pred_i))\n\n This actually computes half the Poisson deviance to simplify\n the computation of the gradients.\n \"\"\"\n\n def __init__(self, sample_weight):\n super().__init__(hessians_are_constant=False)\n\n inverse_link_function = staticmethod(np.exp)\n\n def pointwise_loss(self, y_true, raw_predictions):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n # TODO: For speed, we could remove the constant xlogy(y_true, y_true)\n # Advantage of this form: minimum of zero at raw_predictions = y_true.\n loss = (xlogy(y_true, y_true) - y_true * (raw_predictions + 1)\n + np.exp(raw_predictions))\n return loss\n\n def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):\n y_pred = np.average(y_train, weights=sample_weight)\n eps = np.finfo(y_train.dtype).eps\n y_pred = np.clip(y_pred, eps, None)\n return np.log(y_pred)\n\n def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n gradients = gradients.reshape(-1)\n hessians = hessians.reshape(-1)\n _update_gradients_hessians_poisson(gradients, hessians,\n y_true, raw_predictions,\n sample_weight)\n\n\nclass BinaryCrossEntropy(BaseLoss):\n \"\"\"Binary cross-entropy loss, for binary classification.\n\n For a given sample x_i, the binary cross-entropy loss is defined as the\n negative log-likelihood of the model which can be expressed as::\n\n loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i\n\n See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,\n section 4.4.1 (about logistic regression).\n \"\"\"\n\n def __init__(self, sample_weight):\n super().__init__(hessians_are_constant=False)\n\n inverse_link_function = staticmethod(expit)\n\n def pointwise_loss(self, y_true, raw_predictions):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n # logaddexp(0, x) = log(1 + exp(x))\n loss = np.logaddexp(0, raw_predictions) - y_true * raw_predictions\n return loss\n\n def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):\n if prediction_dim > 2:\n raise ValueError(\n \"loss='binary_crossentropy' is not defined for multiclass\"\n \" classification with n_classes=%d, use\"\n \" loss='categorical_crossentropy' instead\" % prediction_dim)\n proba_positive_class = np.average(y_train, weights=sample_weight)\n eps = np.finfo(y_train.dtype).eps\n proba_positive_class = np.clip(proba_positive_class, eps, 1 - eps)\n # log(x / 1 - x) is the anti function of sigmoid, or the link function\n # of the Binomial model.\n return np.log(proba_positive_class / (1 - proba_positive_class))\n\n def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n gradients = gradients.reshape(-1)\n hessians = hessians.reshape(-1)\n _update_gradients_hessians_binary_crossentropy(\n gradients, hessians, y_true, raw_predictions, sample_weight)\n\n def predict_proba(self, raw_predictions):\n # shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to\n # return a view.\n raw_predictions = raw_predictions.reshape(-1)\n proba = np.empty((raw_predictions.shape[0], 2), dtype=Y_DTYPE)\n proba[:, 1] = expit(raw_predictions)\n proba[:, 0] = 1 - proba[:, 1]\n return proba\n\n\nclass CategoricalCrossEntropy(BaseLoss):\n \"\"\"Categorical cross-entropy loss, for multiclass classification.\n\n For a given sample x_i, the categorical cross-entropy loss is defined as\n the negative log-likelihood of the model and generalizes the binary\n cross-entropy to more than 2 classes.\n \"\"\"\n\n def __init__(self, sample_weight):\n super().__init__(hessians_are_constant=False)\n\n def pointwise_loss(self, y_true, raw_predictions):\n one_hot_true = np.zeros_like(raw_predictions)\n prediction_dim = raw_predictions.shape[0]\n for k in range(prediction_dim):\n one_hot_true[k, :] = (y_true == k)\n\n loss = (logsumexp(raw_predictions, axis=0) -\n (one_hot_true * raw_predictions).sum(axis=0))\n return loss\n\n def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):\n init_value = np.zeros(shape=(prediction_dim, 1), dtype=Y_DTYPE)\n eps = np.finfo(y_train.dtype).eps\n for k in range(prediction_dim):\n proba_kth_class = np.average(y_train == k,\n weights=sample_weight)\n proba_kth_class = np.clip(proba_kth_class, eps, 1 - eps)\n init_value[k, :] += np.log(proba_kth_class)\n\n return init_value\n\n def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):\n _update_gradients_hessians_categorical_crossentropy(\n gradients, hessians, y_true, raw_predictions, sample_weight)\n\n def predict_proba(self, raw_predictions):\n # TODO: This could be done in parallel\n # compute softmax (using exp(log(softmax)))\n proba = np.exp(raw_predictions -\n logsumexp(raw_predictions, axis=0)[np.newaxis, :])\n return proba.T\n\n\n_LOSSES = {\n 'squared_error': LeastSquares,\n 'absolute_error': LeastAbsoluteDeviation,\n 'binary_crossentropy': BinaryCrossEntropy,\n 'categorical_crossentropy': CategoricalCrossEntropy,\n 'poisson': Poisson,\n}\n",
"\"\"\"\n=============================\nSpecies distribution dataset\n=============================\n\nThis dataset represents the geographic distribution of species.\nThe dataset is provided by Phillips et. al. (2006).\n\nThe two species are:\n\n - `\"Bradypus variegatus\"\n <http://www.iucnredlist.org/details/3038/0>`_ ,\n the Brown-throated Sloth.\n\n - `\"Microryzomys minutus\"\n <http://www.iucnredlist.org/details/13408/0>`_ ,\n also known as the Forest Small Rice Rat, a rodent that lives in Peru,\n Colombia, Ecuador, Peru, and Venezuela.\n\nReferences\n----------\n\n`\"Maximum entropy modeling of species geographic distributions\"\n<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,\nR. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.\n\nNotes\n-----\n\nFor an example of using this dataset, see\n:ref:`examples/applications/plot_species_distribution_modeling.py\n<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.\n\"\"\"\n\n# Authors: Peter Prettenhofer <[email protected]>\n# Jake Vanderplas <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom io import BytesIO\nfrom os import makedirs, remove\nfrom os.path import exists\n\nimport logging\nimport numpy as np\n\nimport joblib\n\nfrom . import get_data_home\nfrom ._base import _fetch_remote\nfrom ._base import RemoteFileMetadata\nfrom ..utils import Bunch\nfrom ._base import _pkl_filepath\n\n# The original data can be found at:\n# https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip\nSAMPLES = RemoteFileMetadata(\n filename='samples.zip',\n url='https://ndownloader.figshare.com/files/5976075',\n checksum=('abb07ad284ac50d9e6d20f1c4211e0fd'\n '3c098f7f85955e89d321ee8efe37ac28'))\n\n# The original data can be found at:\n# https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip\nCOVERAGES = RemoteFileMetadata(\n filename='coverages.zip',\n url='https://ndownloader.figshare.com/files/5976078',\n checksum=('4d862674d72e79d6cee77e63b98651ec'\n '7926043ba7d39dcb31329cf3f6073807'))\n\nDATA_ARCHIVE_NAME = \"species_coverage.pkz\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _load_coverage(F, header_length=6, dtype=np.int16):\n \"\"\"Load a coverage file from an open file object.\n\n This will return a numpy array of the given dtype\n \"\"\"\n header = [F.readline() for _ in range(header_length)]\n make_tuple = lambda t: (t.split()[0], float(t.split()[1]))\n header = dict([make_tuple(line) for line in header])\n\n M = np.loadtxt(F, dtype=dtype)\n nodata = int(header[b'NODATA_value'])\n if nodata != -9999:\n M[nodata] = -9999\n return M\n\n\ndef _load_csv(F):\n \"\"\"Load csv file.\n\n Parameters\n ----------\n F : file object\n CSV file open in byte mode.\n\n Returns\n -------\n rec : np.ndarray\n record array representing the data\n \"\"\"\n names = F.readline().decode('ascii').strip().split(',')\n\n rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')\n rec.dtype.names = names\n return rec\n\n\ndef construct_grids(batch):\n \"\"\"Construct the map grid from the batch object\n\n Parameters\n ----------\n batch : Batch object\n The object returned by :func:`fetch_species_distributions`\n\n Returns\n -------\n (xgrid, ygrid) : 1-D arrays\n The grid corresponding to the values in batch.coverages\n \"\"\"\n # x,y coordinates for corner cells\n xmin = batch.x_left_lower_corner + batch.grid_size\n xmax = xmin + (batch.Nx * batch.grid_size)\n ymin = batch.y_left_lower_corner + batch.grid_size\n ymax = ymin + (batch.Ny * batch.grid_size)\n\n # x coordinates of the grid cells\n xgrid = np.arange(xmin, xmax, batch.grid_size)\n # y coordinates of the grid cells\n ygrid = np.arange(ymin, ymax, batch.grid_size)\n\n return (xgrid, ygrid)\n\n\ndef fetch_species_distributions(*, data_home=None,\n download_if_missing=True):\n \"\"\"Loader for species distribution dataset from Phillips et. al. (2006)\n\n Read more in the :ref:`User Guide <datasets>`.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing : bool, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n coverages : array, shape = [14, 1592, 1212]\n These represent the 14 features measured\n at each point of the map grid.\n The latitude/longitude values for the grid are discussed below.\n Missing data is represented by the value -9999.\n train : record array, shape = (1624,)\n The training points for the data. Each point has three fields:\n\n - train['species'] is the species name\n - train['dd long'] is the longitude, in degrees\n - train['dd lat'] is the latitude, in degrees\n test : record array, shape = (620,)\n The test points for the data. Same format as the training data.\n Nx, Ny : integers\n The number of longitudes (x) and latitudes (y) in the grid\n x_left_lower_corner, y_left_lower_corner : floats\n The (x,y) position of the lower-left corner, in degrees\n grid_size : float\n The spacing between points of the grid, in degrees\n\n References\n ----------\n\n * `\"Maximum entropy modeling of species geographic distributions\"\n <http://rob.schapire.net/papers/ecolmod.pdf>`_\n S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,\n 190:231-259, 2006.\n\n Notes\n -----\n\n This dataset represents the geographic distribution of species.\n The dataset is provided by Phillips et. al. (2006).\n\n The two species are:\n\n - `\"Bradypus variegatus\"\n <http://www.iucnredlist.org/details/3038/0>`_ ,\n the Brown-throated Sloth.\n\n - `\"Microryzomys minutus\"\n <http://www.iucnredlist.org/details/13408/0>`_ ,\n also known as the Forest Small Rice Rat, a rodent that lives in Peru,\n Colombia, Ecuador, Peru, and Venezuela.\n\n - For an example of using this dataset with scikit-learn, see\n :ref:`examples/applications/plot_species_distribution_modeling.py\n <sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.\n \"\"\"\n data_home = get_data_home(data_home)\n if not exists(data_home):\n makedirs(data_home)\n\n # Define parameters for the data files. These should not be changed\n # unless the data model changes. They will be saved in the npz file\n # with the downloaded data.\n extra_params = dict(x_left_lower_corner=-94.8,\n Nx=1212,\n y_left_lower_corner=-56.05,\n Ny=1592,\n grid_size=0.05)\n dtype = np.int16\n\n archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)\n\n if not exists(archive_path):\n if not download_if_missing:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n logger.info('Downloading species data from %s to %s' % (\n SAMPLES.url, data_home))\n samples_path = _fetch_remote(SAMPLES, dirname=data_home)\n with np.load(samples_path) as X: # samples.zip is a valid npz\n for f in X.files:\n fhandle = BytesIO(X[f])\n if 'train' in f:\n train = _load_csv(fhandle)\n if 'test' in f:\n test = _load_csv(fhandle)\n remove(samples_path)\n\n logger.info('Downloading coverage data from %s to %s' % (\n COVERAGES.url, data_home))\n coverages_path = _fetch_remote(COVERAGES, dirname=data_home)\n with np.load(coverages_path) as X: # coverages.zip is a valid npz\n coverages = []\n for f in X.files:\n fhandle = BytesIO(X[f])\n logger.debug(' - converting {}'.format(f))\n coverages.append(_load_coverage(fhandle))\n coverages = np.asarray(coverages, dtype=dtype)\n remove(coverages_path)\n\n bunch = Bunch(coverages=coverages,\n test=test,\n train=train,\n **extra_params)\n joblib.dump(bunch, archive_path, compress=9)\n else:\n bunch = joblib.load(archive_path)\n\n return bunch\n",
"# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi\n#\n# License: BSD 3 clause\n\"\"\"\nMulti-class / multi-label utility function\n==========================================\n\n\"\"\"\nfrom collections.abc import Sequence\nfrom itertools import chain\nimport warnings\n\nfrom scipy.sparse import issparse\nfrom scipy.sparse.base import spmatrix\nfrom scipy.sparse import dok_matrix\nfrom scipy.sparse import lil_matrix\n\nimport numpy as np\n\nfrom .validation import check_array, _assert_all_finite\n\n\ndef _unique_multiclass(y):\n if hasattr(y, '__array__'):\n return np.unique(np.asarray(y))\n else:\n return set(y)\n\n\ndef _unique_indicator(y):\n return np.arange(\n check_array(y, accept_sparse=['csr', 'csc', 'coo']).shape[1]\n )\n\n\n_FN_UNIQUE_LABELS = {\n 'binary': _unique_multiclass,\n 'multiclass': _unique_multiclass,\n 'multilabel-indicator': _unique_indicator,\n}\n\n\ndef unique_labels(*ys):\n \"\"\"Extract an ordered array of unique labels.\n\n We don't allow:\n - mix of multilabel and multiclass (single label) targets\n - mix of label indicator matrix and anything else,\n because there are no explicit labels)\n - mix of label indicator matrices of different sizes\n - mix of string and integer labels\n\n At the moment, we also don't allow \"multiclass-multioutput\" input type.\n\n Parameters\n ----------\n *ys : array-likes\n\n Returns\n -------\n out : ndarray of shape (n_unique_labels,)\n An ordered array of unique labels.\n\n Examples\n --------\n >>> from sklearn.utils.multiclass import unique_labels\n >>> unique_labels([3, 5, 5, 5, 7, 7])\n array([3, 5, 7])\n >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])\n array([1, 2, 3, 4])\n >>> unique_labels([1, 2, 10], [5, 11])\n array([ 1, 2, 5, 10, 11])\n \"\"\"\n if not ys:\n raise ValueError('No argument has been passed.')\n # Check that we don't mix label format\n\n ys_types = set(type_of_target(x) for x in ys)\n if ys_types == {\"binary\", \"multiclass\"}:\n ys_types = {\"multiclass\"}\n\n if len(ys_types) > 1:\n raise ValueError(\"Mix type of y not allowed, got types %s\" % ys_types)\n\n label_type = ys_types.pop()\n\n # Check consistency for the indicator format\n if (label_type == \"multilabel-indicator\" and\n len(set(check_array(y,\n accept_sparse=['csr', 'csc', 'coo']).shape[1]\n for y in ys)) > 1):\n raise ValueError(\"Multi-label binary indicator input with \"\n \"different numbers of labels\")\n\n # Get the unique set of labels\n _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)\n if not _unique_labels:\n raise ValueError(\"Unknown label type: %s\" % repr(ys))\n\n ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))\n\n # Check that we don't mix string type with number type\n if (len(set(isinstance(label, str) for label in ys_labels)) > 1):\n raise ValueError(\"Mix of label input types (string and number)\")\n\n return np.array(sorted(ys_labels))\n\n\ndef _is_integral_float(y):\n return y.dtype.kind == 'f' and np.all(y.astype(int) == y)\n\n\ndef is_multilabel(y):\n \"\"\" Check if ``y`` is in a multilabel format.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples,)\n Target values.\n\n Returns\n -------\n out : bool\n Return ``True``, if ``y`` is in a multilabel format, else ```False``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.utils.multiclass import is_multilabel\n >>> is_multilabel([0, 1, 0, 1])\n False\n >>> is_multilabel([[1], [0, 2], []])\n False\n >>> is_multilabel(np.array([[1, 0], [0, 0]]))\n True\n >>> is_multilabel(np.array([[1], [0], [0]]))\n False\n >>> is_multilabel(np.array([[1, 0, 0]]))\n True\n \"\"\"\n if hasattr(y, '__array__') or isinstance(y, Sequence):\n # DeprecationWarning will be replaced by ValueError, see NEP 34\n # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html\n with warnings.catch_warnings():\n warnings.simplefilter('error', np.VisibleDeprecationWarning)\n try:\n y = np.asarray(y)\n except np.VisibleDeprecationWarning:\n # dtype=object should be provided explicitly for ragged arrays,\n # see NEP 34\n y = np.array(y, dtype=object)\n\n if not (hasattr(y, \"shape\") and y.ndim == 2 and y.shape[1] > 1):\n return False\n\n if issparse(y):\n if isinstance(y, (dok_matrix, lil_matrix)):\n y = y.tocsr()\n return (len(y.data) == 0 or np.unique(y.data).size == 1 and\n (y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(np.unique(y.data))))\n else:\n labels = np.unique(y)\n\n return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(labels))\n\n\ndef check_classification_targets(y):\n \"\"\"Ensure that target y is of a non-regression type.\n\n Only the following target types (as defined in type_of_target) are allowed:\n 'binary', 'multiclass', 'multiclass-multioutput',\n 'multilabel-indicator', 'multilabel-sequences'\n\n Parameters\n ----------\n y : array-like\n \"\"\"\n y_type = type_of_target(y)\n if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',\n 'multilabel-indicator', 'multilabel-sequences']:\n raise ValueError(\"Unknown label type: %r\" % y_type)\n\n\ndef type_of_target(y):\n \"\"\"Determine the type of data indicated by the target.\n\n Note that this type is the most specific type that can be inferred.\n For example:\n\n * ``binary`` is more specific but compatible with ``multiclass``.\n * ``multiclass`` of integers is more specific but compatible with\n ``continuous``.\n * ``multilabel-indicator`` is more specific but compatible with\n ``multiclass-multioutput``.\n\n Parameters\n ----------\n y : array-like\n\n Returns\n -------\n target_type : str\n One of:\n\n * 'continuous': `y` is an array-like of floats that are not all\n integers, and is 1d or a column vector.\n * 'continuous-multioutput': `y` is a 2d array of floats that are\n not all integers, and both dimensions are of size > 1.\n * 'binary': `y` contains <= 2 discrete values and is 1d or a column\n vector.\n * 'multiclass': `y` contains more than two discrete values, is not a\n sequence of sequences, and is 1d or a column vector.\n * 'multiclass-multioutput': `y` is a 2d array that contains more\n than two discrete values, is not a sequence of sequences, and both\n dimensions are of size > 1.\n * 'multilabel-indicator': `y` is a label indicator matrix, an array\n of two dimensions with at least two columns, and at most 2 unique\n values.\n * 'unknown': `y` is array-like but none of the above, such as a 3d\n array, sequence of sequences, or an array of non-sequence objects.\n\n Examples\n --------\n >>> import numpy as np\n >>> type_of_target([0.1, 0.6])\n 'continuous'\n >>> type_of_target([1, -1, -1, 1])\n 'binary'\n >>> type_of_target(['a', 'b', 'a'])\n 'binary'\n >>> type_of_target([1.0, 2.0])\n 'binary'\n >>> type_of_target([1, 0, 2])\n 'multiclass'\n >>> type_of_target([1.0, 0.0, 3.0])\n 'multiclass'\n >>> type_of_target(['a', 'b', 'c'])\n 'multiclass'\n >>> type_of_target(np.array([[1, 2], [3, 1]]))\n 'multiclass-multioutput'\n >>> type_of_target([[1, 2]])\n 'multilabel-indicator'\n >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))\n 'continuous-multioutput'\n >>> type_of_target(np.array([[0, 1], [1, 1]]))\n 'multilabel-indicator'\n \"\"\"\n valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))\n and not isinstance(y, str))\n\n if not valid:\n raise ValueError('Expected array-like (array or non-string sequence), '\n 'got %r' % y)\n\n sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])\n if sparse_pandas:\n raise ValueError(\"y cannot be class 'SparseSeries' or 'SparseArray'\")\n\n if is_multilabel(y):\n return 'multilabel-indicator'\n\n # DeprecationWarning will be replaced by ValueError, see NEP 34\n # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html\n with warnings.catch_warnings():\n warnings.simplefilter('error', np.VisibleDeprecationWarning)\n try:\n y = np.asarray(y)\n except np.VisibleDeprecationWarning:\n # dtype=object should be provided explicitly for ragged arrays,\n # see NEP 34\n y = np.asarray(y, dtype=object)\n\n # The old sequence of sequences format\n try:\n if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)\n and not isinstance(y[0], str)):\n raise ValueError('You appear to be using a legacy multi-label data'\n ' representation. Sequence of sequences are no'\n ' longer supported; use a binary array or sparse'\n ' matrix instead - the MultiLabelBinarizer'\n ' transformer can convert to this format.')\n except IndexError:\n pass\n\n # Invalid inputs\n if y.ndim > 2 or (y.dtype == object and len(y) and\n not isinstance(y.flat[0], str)):\n return 'unknown' # [[[1, 2]]] or [obj_1] and not [\"label_1\"]\n\n if y.ndim == 2 and y.shape[1] == 0:\n return 'unknown' # [[]]\n\n if y.ndim == 2 and y.shape[1] > 1:\n suffix = \"-multioutput\" # [[1, 2], [1, 2]]\n else:\n suffix = \"\" # [1, 2, 3] or [[1], [2], [3]]\n\n # check float and contains non-integer float values\n if y.dtype.kind == 'f' and np.any(y != y.astype(int)):\n # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]\n _assert_all_finite(y)\n return 'continuous' + suffix\n\n if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):\n return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]\n else:\n return 'binary' # [1, 2] or [[\"a\"], [\"b\"]]\n\n\ndef _check_partial_fit_first_call(clf, classes=None):\n \"\"\"Private helper function for factorizing common classes param logic.\n\n Estimators that implement the ``partial_fit`` API need to be provided with\n the list of possible classes at the first call to partial_fit.\n\n Subsequent calls to partial_fit should check that ``classes`` is still\n consistent with a previous value of ``clf.classes_`` when provided.\n\n This function returns True if it detects that this was the first call to\n ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also\n set on ``clf``.\n\n \"\"\"\n if getattr(clf, 'classes_', None) is None and classes is None:\n raise ValueError(\"classes must be passed on the first call \"\n \"to partial_fit.\")\n\n elif classes is not None:\n if getattr(clf, 'classes_', None) is not None:\n if not np.array_equal(clf.classes_, unique_labels(classes)):\n raise ValueError(\n \"`classes=%r` is not the same as on last call \"\n \"to partial_fit, was: %r\" % (classes, clf.classes_))\n\n else:\n # This is the first call to partial_fit\n clf.classes_ = unique_labels(classes)\n return True\n\n # classes is None and clf.classes_ has already previously been set:\n # nothing to do\n return False\n\n\ndef class_distribution(y, sample_weight=None):\n \"\"\"Compute class priors from multioutput-multiclass target data.\n\n Parameters\n ----------\n y : {array-like, sparse matrix} of size (n_samples, n_outputs)\n The labels for each example.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n classes : list of size n_outputs of ndarray of size (n_classes,)\n List of classes for each column.\n\n n_classes : list of int of size n_outputs\n Number of classes in each column.\n\n class_prior : list of size n_outputs of ndarray of size (n_classes,)\n Class distribution of each column.\n\n \"\"\"\n classes = []\n n_classes = []\n class_prior = []\n\n n_samples, n_outputs = y.shape\n if sample_weight is not None:\n sample_weight = np.asarray(sample_weight)\n\n if issparse(y):\n y = y.tocsc()\n y_nnz = np.diff(y.indptr)\n\n for k in range(n_outputs):\n col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]\n # separate sample weights for zero and non-zero elements\n if sample_weight is not None:\n nz_samp_weight = sample_weight[col_nonzero]\n zeros_samp_weight_sum = (np.sum(sample_weight) -\n np.sum(nz_samp_weight))\n else:\n nz_samp_weight = None\n zeros_samp_weight_sum = y.shape[0] - y_nnz[k]\n\n classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],\n return_inverse=True)\n class_prior_k = np.bincount(y_k, weights=nz_samp_weight)\n\n # An explicit zero was found, combine its weight with the weight\n # of the implicit zeros\n if 0 in classes_k:\n class_prior_k[classes_k == 0] += zeros_samp_weight_sum\n\n # If an there is an implicit zero and it is not in classes and\n # class_prior, make an entry for it\n if 0 not in classes_k and y_nnz[k] < y.shape[0]:\n classes_k = np.insert(classes_k, 0, 0)\n class_prior_k = np.insert(class_prior_k, 0,\n zeros_samp_weight_sum)\n\n classes.append(classes_k)\n n_classes.append(classes_k.shape[0])\n class_prior.append(class_prior_k / class_prior_k.sum())\n else:\n for k in range(n_outputs):\n classes_k, y_k = np.unique(y[:, k], return_inverse=True)\n classes.append(classes_k)\n n_classes.append(classes_k.shape[0])\n class_prior_k = np.bincount(y_k, weights=sample_weight)\n class_prior.append(class_prior_k / class_prior_k.sum())\n\n return (classes, n_classes, class_prior)\n\n\ndef _ovr_decision_function(predictions, confidences, n_classes):\n \"\"\"Compute a continuous, tie-breaking OvR decision function from OvO.\n\n It is important to include a continuous value, not only votes,\n to make computing AUC or calibration meaningful.\n\n Parameters\n ----------\n predictions : array-like of shape (n_samples, n_classifiers)\n Predicted classes for each binary classifier.\n\n confidences : array-like of shape (n_samples, n_classifiers)\n Decision functions or predicted probabilities for positive class\n for each binary classifier.\n\n n_classes : int\n Number of classes. n_classifiers must be\n ``n_classes * (n_classes - 1 ) / 2``.\n \"\"\"\n n_samples = predictions.shape[0]\n votes = np.zeros((n_samples, n_classes))\n sum_of_confidences = np.zeros((n_samples, n_classes))\n\n k = 0\n for i in range(n_classes):\n for j in range(i + 1, n_classes):\n sum_of_confidences[:, i] -= confidences[:, k]\n sum_of_confidences[:, j] += confidences[:, k]\n votes[predictions[:, k] == 0, i] += 1\n votes[predictions[:, k] == 1, j] += 1\n k += 1\n\n # Monotonically transform the sum_of_confidences to (-1/3, 1/3)\n # and add it with votes. The monotonic transformation is\n # f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2\n # to ensure that we won't reach the limits and change vote order.\n # The motivation is to use confidence levels as a way to break ties in\n # the votes without switching any decision made based on a difference\n # of 1 vote.\n transformed_confidences = (sum_of_confidences /\n (3 * (np.abs(sum_of_confidences) + 1)))\n return votes + transformed_confidences\n",
"\"\"\"\n=========================================================================\nComparing randomized search and grid search for hyperparameter estimation\n=========================================================================\n\nCompare randomized search and grid search for optimizing hyperparameters of a\nlinear SVM with SGD training.\nAll parameters that influence the learning are searched simultaneously\n(except for the number of estimators, which poses a time / quality tradeoff).\n\nThe randomized search and the grid search explore exactly the same space of\nparameters. The result in parameter settings is quite similar, while the run\ntime for randomized search is drastically lower.\n\nThe performance is may slightly worse for the randomized search, and is likely\ndue to a noise effect and would not carry over to a held-out test set.\n\nNote that in practice, one would not search over this many different parameters\nsimultaneously using grid search, but pick only the ones deemed most important.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\n\nfrom time import time\nimport scipy.stats as stats\nfrom sklearn.utils.fixes import loguniform\n\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.datasets import load_digits\nfrom sklearn.linear_model import SGDClassifier\n\n# get some data\nX, y = load_digits(return_X_y=True)\n\n# build a classifier\nclf = SGDClassifier(loss='hinge', penalty='elasticnet',\n fit_intercept=True)\n\n\n# Utility function to report best scores\ndef report(results, n_top=3):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(\"Model with rank: {0}\".format(i))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\"\n .format(results['mean_test_score'][candidate],\n results['std_test_score'][candidate]))\n print(\"Parameters: {0}\".format(results['params'][candidate]))\n print(\"\")\n\n\n# specify parameters and distributions to sample from\nparam_dist = {'average': [True, False],\n 'l1_ratio': stats.uniform(0, 1),\n 'alpha': loguniform(1e-4, 1e0)}\n\n# run randomized search\nn_iter_search = 20\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist,\n n_iter=n_iter_search)\n\nstart = time()\nrandom_search.fit(X, y)\nprint(\"RandomizedSearchCV took %.2f seconds for %d candidates\"\n \" parameter settings.\" % ((time() - start), n_iter_search))\nreport(random_search.cv_results_)\n\n# use a full grid over all parameters\nparam_grid = {'average': [True, False],\n 'l1_ratio': np.linspace(0, 1, num=10),\n 'alpha': np.power(10, np.arange(-4, 1, dtype=float))}\n\n# run grid search\ngrid_search = GridSearchCV(clf, param_grid=param_grid)\nstart = time()\ngrid_search.fit(X, y)\n\nprint(\"GridSearchCV took %.2f seconds for %d candidate parameter settings.\"\n % (time() - start, len(grid_search.cv_results_['params'])))\nreport(grid_search.cv_results_)\n",
"\"\"\"\nThis module contains the BinMapper class.\n\nBinMapper is used for mapping a real-valued dataset into integer-valued bins.\nBin thresholds are computed with the quantiles so that each bin contains\napproximately the same number of samples.\n\"\"\"\n# Author: Nicolas Hug\n\nimport numpy as np\n\nfrom ...utils import check_random_state, check_array\nfrom ...base import BaseEstimator, TransformerMixin\nfrom ...utils.validation import check_is_fitted\nfrom ._binning import _map_to_bins\nfrom .common import X_DTYPE, X_BINNED_DTYPE, ALMOST_INF, X_BITSET_INNER_DTYPE\nfrom ._bitset import set_bitset_memoryview\n\n\ndef _find_binning_thresholds(col_data, max_bins):\n \"\"\"Extract quantiles from a continuous feature.\n\n Missing values are ignored for finding the thresholds.\n\n Parameters\n ----------\n col_data : array-like, shape (n_samples,)\n The continuous feature to bin.\n max_bins: int\n The maximum number of bins to use for non-missing values. If for a\n given feature the number of unique values is less than ``max_bins``,\n then those unique values will be used to compute the bin thresholds,\n instead of the quantiles\n\n Return\n ------\n binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,)\n The increasing numeric values that can be used to separate the bins.\n A given value x will be mapped into bin value i iff\n bining_thresholds[i - 1] < x <= binning_thresholds[i]\n \"\"\"\n # ignore missing values when computing bin thresholds\n missing_mask = np.isnan(col_data)\n if missing_mask.any():\n col_data = col_data[~missing_mask]\n col_data = np.ascontiguousarray(col_data, dtype=X_DTYPE)\n distinct_values = np.unique(col_data)\n if len(distinct_values) <= max_bins:\n midpoints = distinct_values[:-1] + distinct_values[1:]\n midpoints *= .5\n else:\n # We sort again the data in this case. We could compute\n # approximate midpoint percentiles using the output of\n # np.unique(col_data, return_counts) instead but this is more\n # work and the performance benefit will be limited because we\n # work on a fixed-size subsample of the full data.\n percentiles = np.linspace(0, 100, num=max_bins + 1)\n percentiles = percentiles[1:-1]\n midpoints = np.percentile(col_data, percentiles,\n interpolation='midpoint').astype(X_DTYPE)\n assert midpoints.shape[0] == max_bins - 1\n\n # We avoid having +inf thresholds: +inf thresholds are only allowed in\n # a \"split on nan\" situation.\n np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)\n return midpoints\n\n\nclass _BinMapper(TransformerMixin, BaseEstimator):\n \"\"\"Transformer that maps a dataset into integer-valued bins.\n\n For continuous features, the bins are created in a feature-wise fashion,\n using quantiles so that each bins contains approximately the same number\n of samples. For large datasets, quantiles are computed on a subset of the\n data to speed-up the binning, but the quantiles should remain stable.\n\n For categorical features, the raw categorical values are expected to be\n in [0, 254] (this is not validated here though) and each category\n corresponds to a bin. All categorical values must be known at\n initialization: transform() doesn't know how to bin unknown categorical\n values. Note that transform() is only used on non-training data in the\n case of early stopping.\n\n Features with a small number of values may be binned into less than\n ``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved\n for missing values.\n\n Parameters\n ----------\n n_bins : int, default=256\n The maximum number of bins to use (including the bin for missing\n values). Should be in [3, 256]. Non-missing values are binned on\n ``max_bins = n_bins - 1`` bins. The last bin is always reserved for\n missing values. If for a given feature the number of unique values is\n less than ``max_bins``, then those unique values will be used to\n compute the bin thresholds, instead of the quantiles. For categorical\n features indicated by ``is_categorical``, the docstring for\n ``is_categorical`` details on this procedure.\n subsample : int or None, default=2e5\n If ``n_samples > subsample``, then ``sub_samples`` samples will be\n randomly chosen to compute the quantiles. If ``None``, the whole data\n is used.\n is_categorical : ndarray of bool of shape (n_features,), default=None\n Indicates categorical features. By default, all features are\n considered continuous.\n known_categories : list of {ndarray, None} of shape (n_features,), \\\n default=none\n For each categorical feature, the array indicates the set of unique\n categorical values. These should be the possible values over all the\n data, not just the training data. For continuous features, the\n corresponding entry should be None.\n random_state: int, RandomState instance or None, default=None\n Pseudo-random number generator to control the random sub-sampling.\n Pass an int for reproducible output across multiple\n function calls.\n See :term: `Glossary <random_state>`.\n\n Attributes\n ----------\n bin_thresholds_ : list of ndarray\n For each feature, each array indicates how to map a feature into a\n binned feature. The semantic and size depends on the nature of the\n feature:\n - for real-valued features, the array corresponds to the real-valued\n bin thresholds (the upper bound of each bin). There are ``max_bins\n - 1`` thresholds, where ``max_bins = n_bins - 1`` is the number of\n bins used for non-missing values.\n - for categorical features, the array is a map from a binned category\n value to the raw category value. The size of the array is equal to\n ``min(max_bins, category_cardinality)`` where we ignore missing\n values in the cardinality.\n n_bins_non_missing_ : ndarray, dtype=np.uint32\n For each feature, gives the number of bins actually used for\n non-missing values. For features with a lot of unique values, this is\n equal to ``n_bins - 1``.\n is_categorical_ : ndarray of shape (n_features,), dtype=np.uint8\n Indicator for categorical features.\n missing_values_bin_idx_ : np.uint8\n The index of the bin where missing values are mapped. This is a\n constant across all features. This corresponds to the last bin, and\n it is always equal to ``n_bins - 1``. Note that if ``n_bins_missing_``\n is less than ``n_bins - 1`` for a given feature, then there are\n empty (and unused) bins.\n \"\"\"\n def __init__(self, n_bins=256, subsample=int(2e5), is_categorical=None,\n known_categories=None, random_state=None):\n self.n_bins = n_bins\n self.subsample = subsample\n self.is_categorical = is_categorical\n self.known_categories = known_categories\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit data X by computing the binning thresholds.\n\n The last bin is reserved for missing values, whether missing values\n are present in the data or not.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to bin.\n y: None\n Ignored.\n\n Returns\n -------\n self : object\n \"\"\"\n if not (3 <= self.n_bins <= 256):\n # min is 3: at least 2 distinct bins and a missing values bin\n raise ValueError('n_bins={} should be no smaller than 3 '\n 'and no larger than 256.'.format(self.n_bins))\n\n X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)\n max_bins = self.n_bins - 1\n\n rng = check_random_state(self.random_state)\n if self.subsample is not None and X.shape[0] > self.subsample:\n subset = rng.choice(X.shape[0], self.subsample, replace=False)\n X = X.take(subset, axis=0)\n\n if self.is_categorical is None:\n self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8)\n else:\n self.is_categorical_ = np.asarray(self.is_categorical,\n dtype=np.uint8)\n\n n_features = X.shape[1]\n known_categories = self.known_categories\n if known_categories is None:\n known_categories = [None] * n_features\n\n # validate is_categorical and known_categories parameters\n for f_idx in range(n_features):\n is_categorical = self.is_categorical_[f_idx]\n known_cats = known_categories[f_idx]\n if is_categorical and known_cats is None:\n raise ValueError(\n f\"Known categories for feature {f_idx} must be provided.\"\n )\n if not is_categorical and known_cats is not None:\n raise ValueError(\n f\"Feature {f_idx} isn't marked as a categorical feature, \"\n f\"but categories were passed.\"\n )\n\n self.missing_values_bin_idx_ = self.n_bins - 1\n\n self.bin_thresholds_ = []\n n_bins_non_missing = []\n\n for f_idx in range(n_features):\n if not self.is_categorical_[f_idx]:\n thresholds = _find_binning_thresholds(X[:, f_idx], max_bins)\n n_bins_non_missing.append(thresholds.shape[0] + 1)\n else:\n # Since categories are assumed to be encoded in\n # [0, n_cats] and since n_cats <= max_bins,\n # the thresholds *are* the unique categorical values. This will\n # lead to the correct mapping in transform()\n thresholds = known_categories[f_idx]\n n_bins_non_missing.append(thresholds.shape[0])\n\n self.bin_thresholds_.append(thresholds)\n\n self.n_bins_non_missing_ = np.array(n_bins_non_missing,\n dtype=np.uint32)\n return self\n\n def transform(self, X):\n \"\"\"Bin data X.\n\n Missing values will be mapped to the last bin.\n\n For categorical features, the mapping will be incorrect for unknown\n categories. Since the BinMapper is given known_categories of the\n entire training data (i.e. before the call to train_test_split() in\n case of early-stopping), this never happens.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to bin.\n\n Returns\n -------\n X_binned : array-like of shape (n_samples, n_features)\n The binned data (fortran-aligned).\n \"\"\"\n X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)\n check_is_fitted(self)\n if X.shape[1] != self.n_bins_non_missing_.shape[0]:\n raise ValueError(\n 'This estimator was fitted with {} features but {} got passed '\n 'to transform()'.format(self.n_bins_non_missing_.shape[0],\n X.shape[1])\n )\n binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order='F')\n _map_to_bins(X, self.bin_thresholds_, self.missing_values_bin_idx_,\n binned)\n return binned\n\n def make_known_categories_bitsets(self):\n \"\"\"Create bitsets of known categories.\n\n Returns\n -------\n - known_cat_bitsets : ndarray of shape (n_categorical_features, 8)\n Array of bitsets of known categories, for each categorical feature.\n - f_idx_map : ndarray of shape (n_features,)\n Map from original feature index to the corresponding index in the\n known_cat_bitsets array.\n \"\"\"\n\n categorical_features_indices = np.flatnonzero(self.is_categorical_)\n\n n_features = self.is_categorical_.size\n n_categorical_features = categorical_features_indices.size\n\n f_idx_map = np.zeros(n_features, dtype=np.uint32)\n f_idx_map[categorical_features_indices] = np.arange(\n n_categorical_features, dtype=np.uint32)\n\n known_categories = self.bin_thresholds_\n\n known_cat_bitsets = np.zeros((n_categorical_features, 8),\n dtype=X_BITSET_INNER_DTYPE)\n\n # TODO: complexity is O(n_categorical_features * 255). Maybe this is\n # worth cythonizing\n for mapped_f_idx, f_idx in enumerate(categorical_features_indices):\n for raw_cat_val in known_categories[f_idx]:\n set_bitset_memoryview(known_cat_bitsets[mapped_f_idx],\n raw_cat_val)\n\n return known_cat_bitsets, f_idx_map\n",
"\"\"\"Test kddcup99 loader, if the data is available,\nor if specifically requested via environment variable\n(e.g. for travis cron job).\n\nOnly 'percent10' mode is tested, as the full data\nis too big to use in unit-testing.\n\"\"\"\n\nfrom functools import partial\nimport pytest\n\nfrom sklearn.datasets.tests.test_common import check_as_frame\nfrom sklearn.datasets.tests.test_common import check_pandas_dependency_message\nfrom sklearn.datasets.tests.test_common import check_return_X_y\n\n\[email protected](\"as_frame\", [True, False])\[email protected](\n \"subset, n_samples, n_features\",\n [(None, 494021, 41),\n (\"SA\", 100655, 41),\n (\"SF\", 73237, 4),\n (\"http\", 58725, 3),\n (\"smtp\", 9571, 3)]\n)\ndef test_fetch_kddcup99_percent10(\n fetch_kddcup99_fxt, as_frame, subset, n_samples, n_features\n):\n data = fetch_kddcup99_fxt(subset=subset, as_frame=as_frame)\n assert data.data.shape == (n_samples, n_features)\n assert data.target.shape == (n_samples,)\n if as_frame:\n assert data.frame.shape == (n_samples, n_features + 1)\n\n\ndef test_fetch_kddcup99_return_X_y(fetch_kddcup99_fxt):\n fetch_func = partial(fetch_kddcup99_fxt, subset='smtp')\n data = fetch_func()\n check_return_X_y(data, fetch_func)\n\n\ndef test_fetch_kddcup99_as_frame(fetch_kddcup99_fxt):\n bunch = fetch_kddcup99_fxt()\n check_as_frame(bunch, fetch_kddcup99_fxt)\n\n\ndef test_fetch_kddcup99_shuffle(fetch_kddcup99_fxt):\n dataset = fetch_kddcup99_fxt(\n random_state=0, subset='SA', percent10=True,\n )\n dataset_shuffled = fetch_kddcup99_fxt(\n random_state=0, subset='SA', shuffle=True, percent10=True,\n )\n assert set(dataset['target']) == set(dataset_shuffled['target'])\n assert dataset_shuffled.data.shape == dataset.data.shape\n assert dataset_shuffled.target.shape == dataset.target.shape\n\n\ndef test_pandas_dependency_message(fetch_kddcup99_fxt, hide_available_pandas):\n check_pandas_dependency_message(fetch_kddcup99_fxt)\n\n\ndef test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path):\n \"\"\"Check that a nice error message is raised when cache is corrupted.\"\"\"\n kddcup99_dir = tmp_path / \"kddcup99_10-py3\"\n kddcup99_dir.mkdir()\n samples_path = kddcup99_dir / \"samples\"\n\n with samples_path.open(\"wb\") as f:\n f.write(b\"THIS IS CORRUPTED\")\n\n msg = (f\"The cache for fetch_kddcup99 is invalid, please \"\n f\"delete {str(kddcup99_dir)} and run the fetch_kddcup99 again\")\n\n with pytest.raises(IOError, match=msg):\n fetch_kddcup99_fxt(data_home=str(tmp_path))\n",
"\"\"\"\n=============================\nRecursive feature elimination\n=============================\n\nA recursive feature elimination example showing the relevance of pixels in\na digit classification task.\n\n.. note::\n\n See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`\n\n\"\"\"\nprint(__doc__)\n\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_digits\nfrom sklearn.feature_selection import RFE\nimport matplotlib.pyplot as plt\n\n# Load the digits dataset\ndigits = load_digits()\nX = digits.images.reshape((len(digits.images), -1))\ny = digits.target\n\n# Create the RFE object and rank each pixel\nsvc = SVC(kernel=\"linear\", C=1)\nrfe = RFE(estimator=svc, n_features_to_select=1, step=1)\nrfe.fit(X, y)\nranking = rfe.ranking_.reshape(digits.images[0].shape)\n\n# Plot pixel ranking\nplt.matshow(ranking, cmap=plt.cm.Blues)\nplt.colorbar()\nplt.title(\"Ranking of pixels with RFE\")\nplt.show()\n",
"\"\"\"\nDistribution functions used in GLM\n\"\"\"\n\n# Author: Christian Lorentzen <[email protected]>\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nimport numbers\n\nimport numpy as np\nfrom scipy.special import xlogy\n\n\nDistributionBoundary = namedtuple(\"DistributionBoundary\",\n (\"value\", \"inclusive\"))\n\n\nclass ExponentialDispersionModel(metaclass=ABCMeta):\n r\"\"\"Base class for reproductive Exponential Dispersion Models (EDM).\n\n The pdf of :math:`Y\\sim \\mathrm{EDM}(y_\\textrm{pred}, \\phi)` is given by\n\n .. math:: p(y| \\theta, \\phi) = c(y, \\phi)\n \\exp\\left(\\frac{\\theta y-A(\\theta)}{\\phi}\\right)\n = \\tilde{c}(y, \\phi)\n \\exp\\left(-\\frac{d(y, y_\\textrm{pred})}{2\\phi}\\right)\n\n with mean :math:`\\mathrm{E}[Y] = A'(\\theta) = y_\\textrm{pred}`,\n variance :math:`\\mathrm{Var}[Y] = \\phi \\cdot v(y_\\textrm{pred})`,\n unit variance :math:`v(y_\\textrm{pred})` and\n unit deviance :math:`d(y,y_\\textrm{pred})`.\n\n Methods\n -------\n deviance\n deviance_derivative\n in_y_range\n unit_deviance\n unit_deviance_derivative\n unit_variance\n\n References\n ----------\n https://en.wikipedia.org/wiki/Exponential_dispersion_model.\n \"\"\"\n\n def in_y_range(self, y):\n \"\"\"Returns ``True`` if y is in the valid range of Y~EDM.\n\n Parameters\n ----------\n y : array of shape (n_samples,)\n Target values.\n \"\"\"\n # Note that currently supported distributions have +inf upper bound\n\n if not isinstance(self._lower_bound, DistributionBoundary):\n raise TypeError('_lower_bound attribute must be of type '\n 'DistributionBoundary')\n\n if self._lower_bound.inclusive:\n return np.greater_equal(y, self._lower_bound.value)\n else:\n return np.greater(y, self._lower_bound.value)\n\n @abstractmethod\n def unit_variance(self, y_pred):\n r\"\"\"Compute the unit variance function.\n\n The unit variance :math:`v(y_\\textrm{pred})` determines the variance as\n a function of the mean :math:`y_\\textrm{pred}` by\n :math:`\\mathrm{Var}[Y_i] = \\phi/s_i*v(y_\\textrm{pred}_i)`.\n It can also be derived from the unit deviance\n :math:`d(y,y_\\textrm{pred})` as\n\n .. math:: v(y_\\textrm{pred}) = \\frac{2}{\n \\frac{\\partial^2 d(y,y_\\textrm{pred})}{\n \\partialy_\\textrm{pred}^2}}\\big|_{y=y_\\textrm{pred}}\n\n See also :func:`variance`.\n\n Parameters\n ----------\n y_pred : array of shape (n_samples,)\n Predicted mean.\n \"\"\"\n\n @abstractmethod\n def unit_deviance(self, y, y_pred, check_input=False):\n r\"\"\"Compute the unit deviance.\n\n The unit_deviance :math:`d(y,y_\\textrm{pred})` can be defined by the\n log-likelihood as\n :math:`d(y,y_\\textrm{pred}) = -2\\phi\\cdot\n \\left(loglike(y,y_\\textrm{pred},\\phi) - loglike(y,y,\\phi)\\right).`\n\n Parameters\n ----------\n y : array of shape (n_samples,)\n Target values.\n\n y_pred : array of shape (n_samples,)\n Predicted mean.\n\n check_input : bool, default=False\n If True raise an exception on invalid y or y_pred values, otherwise\n they will be propagated as NaN.\n Returns\n -------\n deviance: array of shape (n_samples,)\n Computed deviance\n \"\"\"\n\n def unit_deviance_derivative(self, y, y_pred):\n r\"\"\"Compute the derivative of the unit deviance w.r.t. y_pred.\n\n The derivative of the unit deviance is given by\n :math:`\\frac{\\partial}{\\partialy_\\textrm{pred}}d(y,y_\\textrm{pred})\n = -2\\frac{y-y_\\textrm{pred}}{v(y_\\textrm{pred})}`\n with unit variance :math:`v(y_\\textrm{pred})`.\n\n Parameters\n ----------\n y : array of shape (n_samples,)\n Target values.\n\n y_pred : array of shape (n_samples,)\n Predicted mean.\n \"\"\"\n return -2 * (y - y_pred) / self.unit_variance(y_pred)\n\n def deviance(self, y, y_pred, weights=1):\n r\"\"\"Compute the deviance.\n\n The deviance is a weighted sum of the per sample unit deviances,\n :math:`D = \\sum_i s_i \\cdot d(y_i, y_\\textrm{pred}_i)`\n with weights :math:`s_i` and unit deviance\n :math:`d(y,y_\\textrm{pred})`.\n In terms of the log-likelihood it is :math:`D = -2\\phi\\cdot\n \\left(loglike(y,y_\\textrm{pred},\\frac{phi}{s})\n - loglike(y,y,\\frac{phi}{s})\\right)`.\n\n Parameters\n ----------\n y : array of shape (n_samples,)\n Target values.\n\n y_pred : array of shape (n_samples,)\n Predicted mean.\n\n weights : {int, array of shape (n_samples,)}, default=1\n Weights or exposure to which variance is inverse proportional.\n \"\"\"\n return np.sum(weights * self.unit_deviance(y, y_pred))\n\n def deviance_derivative(self, y, y_pred, weights=1):\n r\"\"\"Compute the derivative of the deviance w.r.t. y_pred.\n\n It gives :math:`\\frac{\\partial}{\\partial y_\\textrm{pred}}\n D(y, \\y_\\textrm{pred}; weights)`.\n\n Parameters\n ----------\n y : array, shape (n_samples,)\n Target values.\n\n y_pred : array, shape (n_samples,)\n Predicted mean.\n\n weights : {int, array of shape (n_samples,)}, default=1\n Weights or exposure to which variance is inverse proportional.\n \"\"\"\n return weights * self.unit_deviance_derivative(y, y_pred)\n\n\nclass TweedieDistribution(ExponentialDispersionModel):\n r\"\"\"A class for the Tweedie distribution.\n\n A Tweedie distribution with mean :math:`y_\\textrm{pred}=\\mathrm{E}[Y]`\n is uniquely defined by it's mean-variance relationship\n :math:`\\mathrm{Var}[Y] \\propto y_\\textrm{pred}^power`.\n\n Special cases are:\n\n ===== ================\n Power Distribution\n ===== ================\n 0 Normal\n 1 Poisson\n (1,2) Compound Poisson\n 2 Gamma\n 3 Inverse Gaussian\n\n Parameters\n ----------\n power : float, default=0\n The variance power of the `unit_variance`\n :math:`v(y_\\textrm{pred}) = y_\\textrm{pred}^{power}`.\n For ``0<power<1``, no distribution exists.\n \"\"\"\n def __init__(self, power=0):\n self.power = power\n\n @property\n def power(self):\n return self._power\n\n @power.setter\n def power(self, power):\n # We use a property with a setter, to update lower and\n # upper bound when the power parameter is updated e.g. in grid\n # search.\n if not isinstance(power, numbers.Real):\n raise TypeError('power must be a real number, input was {0}'\n .format(power))\n\n if power <= 0:\n # Extreme Stable or Normal distribution\n self._lower_bound = DistributionBoundary(-np.Inf, inclusive=False)\n elif 0 < power < 1:\n raise ValueError('Tweedie distribution is only defined for '\n 'power<=0 and power>=1.')\n elif 1 <= power < 2:\n # Poisson or Compound Poisson distribution\n self._lower_bound = DistributionBoundary(0, inclusive=True)\n elif power >= 2:\n # Gamma, Positive Stable, Inverse Gaussian distributions\n self._lower_bound = DistributionBoundary(0, inclusive=False)\n else: # pragma: no cover\n # this branch should be unreachable.\n raise ValueError\n\n self._power = power\n\n def unit_variance(self, y_pred):\n \"\"\"Compute the unit variance of a Tweedie distribution\n v(y_\\textrm{pred})=y_\\textrm{pred}**power.\n\n Parameters\n ----------\n y_pred : array of shape (n_samples,)\n Predicted mean.\n \"\"\"\n return np.power(y_pred, self.power)\n\n def unit_deviance(self, y, y_pred, check_input=False):\n r\"\"\"Compute the unit deviance.\n\n The unit_deviance :math:`d(y,y_\\textrm{pred})` can be defined by the\n log-likelihood as\n :math:`d(y,y_\\textrm{pred}) = -2\\phi\\cdot\n \\left(loglike(y,y_\\textrm{pred},\\phi) - loglike(y,y,\\phi)\\right).`\n\n Parameters\n ----------\n y : array of shape (n_samples,)\n Target values.\n\n y_pred : array of shape (n_samples,)\n Predicted mean.\n\n check_input : bool, default=False\n If True raise an exception on invalid y or y_pred values, otherwise\n they will be propagated as NaN.\n Returns\n -------\n deviance: array of shape (n_samples,)\n Computed deviance\n \"\"\"\n p = self.power\n\n if check_input:\n message = (\"Mean Tweedie deviance error with power={} can only be \"\n \"used on \".format(p))\n if p < 0:\n # 'Extreme stable', y any realy number, y_pred > 0\n if (y_pred <= 0).any():\n raise ValueError(message + \"strictly positive y_pred.\")\n elif p == 0:\n # Normal, y and y_pred can be any real number\n pass\n elif 0 < p < 1:\n raise ValueError(\"Tweedie deviance is only defined for \"\n \"power<=0 and power>=1.\")\n elif 1 <= p < 2:\n # Poisson and Compount poisson distribution, y >= 0, y_pred > 0\n if (y < 0).any() or (y_pred <= 0).any():\n raise ValueError(message + \"non-negative y and strictly \"\n \"positive y_pred.\")\n elif p >= 2:\n # Gamma and Extreme stable distribution, y and y_pred > 0\n if (y <= 0).any() or (y_pred <= 0).any():\n raise ValueError(message\n + \"strictly positive y and y_pred.\")\n else: # pragma: nocover\n # Unreachable statement\n raise ValueError\n\n if p < 0:\n # 'Extreme stable', y any realy number, y_pred > 0\n dev = 2 * (np.power(np.maximum(y, 0), 2-p) / ((1-p) * (2-p))\n - y * np.power(y_pred, 1-p) / (1-p)\n + np.power(y_pred, 2-p) / (2-p))\n\n elif p == 0:\n # Normal distribution, y and y_pred any real number\n dev = (y - y_pred)**2\n elif p < 1:\n raise ValueError(\"Tweedie deviance is only defined for power<=0 \"\n \"and power>=1.\")\n elif p == 1:\n # Poisson distribution\n dev = 2 * (xlogy(y, y/y_pred) - y + y_pred)\n elif p == 2:\n # Gamma distribution\n dev = 2 * (np.log(y_pred/y) + y/y_pred - 1)\n else:\n dev = 2 * (np.power(y, 2-p) / ((1-p) * (2-p))\n - y * np.power(y_pred, 1-p) / (1-p)\n + np.power(y_pred, 2-p) / (2-p))\n return dev\n\n\nclass NormalDistribution(TweedieDistribution):\n \"\"\"Class for the Normal (aka Gaussian) distribution.\"\"\"\n def __init__(self):\n super().__init__(power=0)\n\n\nclass PoissonDistribution(TweedieDistribution):\n \"\"\"Class for the scaled Poisson distribution.\"\"\"\n def __init__(self):\n super().__init__(power=1)\n\n\nclass GammaDistribution(TweedieDistribution):\n \"\"\"Class for the Gamma distribution.\"\"\"\n def __init__(self):\n super().__init__(power=2)\n\n\nclass InverseGaussianDistribution(TweedieDistribution):\n \"\"\"Class for the scaled InverseGaussianDistribution distribution.\"\"\"\n def __init__(self):\n super().__init__(power=3)\n\n\nEDM_DISTRIBUTIONS = {\n 'normal': NormalDistribution,\n 'poisson': PoissonDistribution,\n 'gamma': GammaDistribution,\n 'inverse-gaussian': InverseGaussianDistribution,\n}\n"
] | [
[
"matplotlib.pyplot.legend",
"sklearn.model_selection.GridSearchCV",
"sklearn.decomposition.NMF",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"sklearn.feature_selection.SelectKBest",
"matplotlib.pyplot.ylabel",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.bar",
"sklearn.svm.LinearSVC",
"matplotlib.pyplot.xlabel",
"numpy.array",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.asarray",
"numpy.sum",
"numpy.insert",
"numpy.empty"
],
[
"numpy.diag",
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.plot",
"scipy.sparse.issparse",
"sklearn.datasets.fetch_olivetti_faces",
"scipy.io.loadmat",
"sklearn.datasets.fetch_lfw_people",
"scipy.linalg.norm",
"sklearn.datasets.fetch_20newsgroups_vectorized",
"numpy.repeat",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.annotate",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.svds",
"scipy.sparse.linalg.norm",
"sklearn.utils.validation.check_random_state",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"sklearn.datasets.fetch_openml",
"sklearn.utils.gen_batches",
"matplotlib.pyplot.ylabel",
"sklearn.utils.extmath.randomized_svd",
"sklearn.datasets.make_sparse_uncorrelated",
"sklearn.datasets.fetch_rcv1",
"matplotlib.pyplot.scatter",
"sklearn.datasets.make_low_rank_matrix",
"numpy.random.uniform",
"matplotlib.pyplot.xlabel",
"numpy.vstack"
],
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.datasets.make_classification",
"sklearn.inspection._partial_dependence._grid_from_X",
"sklearn.cluster.KMeans",
"numpy.asarray",
"sklearn.ensemble.HistGradientBoostingClassifier",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.base.clone",
"numpy.mean",
"sklearn.inspection._partial_dependence._partial_dependence_brute",
"numpy.iinfo",
"sklearn.utils._testing.assert_allclose",
"numpy.allclose",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.utils._testing.assert_array_equal",
"sklearn.ensemble.HistGradientBoostingRegressor",
"sklearn.dummy.DummyClassifier",
"sklearn.tree.tests.test_tree.assert_is_subtree",
"sklearn.datasets.load_iris",
"sklearn.inspection.partial_dependence",
"sklearn.inspection._partial_dependence._partial_dependence_recursion",
"sklearn.utils.validation.check_random_state",
"numpy.testing.assert_allclose",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.corrcoef",
"numpy.array",
"numpy.random.RandomState",
"sklearn.preprocessing.scale",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.LogisticRegression",
"sklearn.pipeline.make_pipeline",
"sklearn.preprocessing.RobustScaler",
"numpy.ones",
"sklearn.datasets.make_regression",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.StandardScaler"
],
[
"matplotlib.pyplot.yticks",
"sklearn.linear_model.LogisticRegression",
"numpy.asarray",
"sklearn.model_selection.train_test_split",
"sklearn.pipeline.Pipeline",
"sklearn.metrics.classification_report",
"sklearn.base.clone",
"sklearn.neural_network.BernoulliRBM",
"matplotlib.pyplot.subplot",
"numpy.apply_along_axis",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.suptitle",
"sklearn.preprocessing.minmax_scale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.asarray",
"numpy.logical_not",
"numpy.dtype"
],
[
"sklearn.datasets.make_classification",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.FunctionTransformer",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.subplots",
"sklearn.metrics.plot_roc_curve",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.ensemble.RandomTreesEmbedding",
"sklearn.set_config"
],
[
"sklearn.manifold.SpectralEmbedding",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.min",
"matplotlib.pyplot.title",
"numpy.concatenate",
"numpy.max",
"numpy.random.normal",
"sklearn.cluster.AgglomerativeClustering",
"numpy.apply_along_axis",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.cm.nipy_spectral",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.linear_model.SGDClassifier",
"sklearn.datasets.fetch_20newsgroups"
],
[
"scipy.sparse.issparse",
"sklearn.random_projection._gaussian_random_matrix",
"numpy.unique",
"numpy.sqrt",
"sklearn.metrics.euclidean_distances",
"numpy.linalg.norm",
"scipy.sparse.csr_matrix",
"numpy.full",
"sklearn.random_projection.SparseRandomProjection",
"numpy.size",
"sklearn.random_projection.johnson_lindenstrauss_min_dim",
"numpy.mean",
"sklearn.utils._testing.assert_almost_equal",
"numpy.var",
"numpy.random.RandomState",
"sklearn.utils._testing.assert_array_equal",
"sklearn.random_projection._sparse_random_matrix",
"numpy.random.randint"
],
[
"numpy.hstack",
"scipy.sparse.issparse",
"numpy.abs",
"numpy.unique",
"scipy.special.digamma",
"numpy.any",
"numpy.array",
"numpy.nextafter",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"numpy.log",
"numpy.abs",
"scipy.special.expit",
"numpy.clip",
"numpy.power",
"numpy.median",
"numpy.ones",
"numpy.finfo",
"numpy.logaddexp",
"scipy.special.xlogy",
"numpy.zeros_like",
"numpy.exp",
"numpy.average",
"numpy.zeros",
"scipy.special.logsumexp",
"numpy.empty"
],
[
"numpy.asarray",
"numpy.arange",
"numpy.load",
"numpy.loadtxt"
],
[
"scipy.sparse.issparse",
"numpy.abs",
"numpy.unique",
"numpy.asarray",
"numpy.diff",
"numpy.bincount",
"numpy.insert",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.RandomizedSearchCV",
"numpy.linspace",
"sklearn.utils.fixes.loguniform",
"numpy.arange",
"numpy.flatnonzero",
"scipy.stats.uniform",
"sklearn.datasets.load_digits",
"sklearn.linear_model.SGDClassifier"
],
[
"numpy.linspace",
"numpy.clip",
"numpy.isnan",
"numpy.unique",
"numpy.ascontiguousarray",
"numpy.arange",
"numpy.asarray",
"numpy.percentile",
"numpy.flatnonzero",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros"
],
[
"sklearn.datasets.tests.test_common.check_as_frame",
"sklearn.datasets.tests.test_common.check_pandas_dependency_message",
"sklearn.datasets.tests.test_common.check_return_X_y"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.colorbar",
"sklearn.datasets.load_digits",
"sklearn.feature_selection.RFE",
"sklearn.svm.SVC",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.show"
],
[
"numpy.log",
"numpy.maximum",
"numpy.greater",
"numpy.power",
"numpy.greater_equal",
"scipy.special.xlogy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
junhyeokahn/RLBasicAlgorithm | [
"25e3e471336cb7855e28c0f9905e2214afdeb2e4"
] | [
"DP/PolicyEvaluation.py"
] | [
"import numpy as np\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\")\n\nfrom lib.envs.gridworld import GridworldEnv\n\nenv = GridworldEnv()\n\ndef policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n \"\"\"\n Evaluate a policy given an environment and a full description of the\n environment's dynamics.\n\n Args:\n policy: [S, A] shaped matrix representing the policy.\n\n env: OpenAI env. env.P represents the transition probabilities of the\n environment. env.P[s][a] is a (prob, next_state, reward, done) tuple.\n\n theta: We stop evaluation once our value function change is less than\n theta for all states.\n\n discount_factor: gamma discount factor.\n\n Returns:\n Vector of length env.nS representing the value function.\n \"\"\"\n # Start with a random (all 0) value function\n V = np.zeros(env.nS)\n while True:\n delta=0\n for s in range(env.nS):\n v=0\n for a, action_prob in enumerate(policy[s]):\n for transition_prob, next_state, reward, done in env.P[s][a]:\n v += action_prob * (reward + discount_factor*V[next_state])\n delta = max(delta, np.abs(v-V[s]))\n V[s] = v\n\n if delta<theta:\n break\n return np.array(V)\n\nrandom_policy = np.ones([env.nS, env.nA]) / env.nA\nv = policy_eval(random_policy, env)\n\n# Test: Make sure the evaluated policy is what we expected\nexpected_v = np.array([ 0, -14, -20, -22,\n -14, -18, -20, -20,\n -20, -20, -18, -14,\n -22, -20, -14, 0 ])\nnp.testing.assert_array_almost_equal(v, expected_v, decimal=2)\n\nprint(\"Reshaped Grid Value Function:\")\nprint(v.reshape(env.shape))\nprint(\"\")\n"
] | [
[
"numpy.abs",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pmartincalvo/osmnx | [
"15eddc0672f0ec951ada1b89eb417df44d35636e",
"15eddc0672f0ec951ada1b89eb417df44d35636e"
] | [
"osmnx/utils.py",
"osmnx/footprints.py"
] | [
"import sys\nimport os\nimport datetime as dt\nimport unicodedata\nimport networkx as nx\nimport numpy as np\nimport logging as lg\nfrom . import settings\n\n\ndef citation():\n \"\"\"\n Print the OSMnx package's citation information.\n\n Boeing, G. 2017. OSMnx: New Methods for Acquiring, Constructing, Analyzing,\n and Visualizing Complex Street Networks. Computers, Environment and Urban\n Systems, 65(126-139). https://doi.org/10.1016/j.compenvurbsys.2017.05.004\n \"\"\"\n\n cite = (\"To cite OSMnx, use:\\n\\n\"\n \"Boeing, G. 2017. OSMnx: New Methods for Acquiring, Constructing, Analyzing, \"\n \"and Visualizing Complex Street Networks. Computers, Environment and Urban \"\n \"Systems, 65(126-139). https://doi.org/10.1016/j.compenvurbsys.2017.05.004\"\n \"\\n\\n\"\n \"BibTeX entry for LaTeX users:\\n\\n\"\n\n \"@article{boeing_osmnx_2017,\\n\"\n \" title = {{OSMnx}: {New} {Methods} for {Acquiring}, {Constructing}, {Analyzing}, and {Visualizing} {Complex} {Street} {Networks}},\\n\"\n \" volume = {65},\\n\"\n \" doi = {10.1016/j.compenvurbsys.2017.05.004},\\n\"\n \" number = {126-139},\\n\"\n \" journal = {Computers, Environment and Urban Systems},\\n\"\n \" author = {Boeing, Geoff},\\n\"\n \" year = {2017}\\n\"\n \"}\")\n\n print(cite)\n\n\ndef make_str(value):\n \"\"\"\n Convert a passed-in value to unicode if Python 2, or string if Python 3.\n\n Parameters\n ----------\n value : any\n the value to convert to unicode/string\n\n Returns\n -------\n unicode or string\n \"\"\"\n if (sys.version_info > (3, 0)):\n # python 3.x has no unicode type, so if error, use str type\n return str(value)\n else:\n # for python 2.x compatibility, use unicode\n return unicode(value)\n\n\ndef config(data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n use_cache=settings.use_cache,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_tags_node=settings.useful_tags_node,\n useful_tags_path=settings.useful_tags_path,\n osm_xml_node_attrs=settings.osm_xml_node_attrs,\n osm_xml_node_tags=settings.osm_xml_node_tags,\n osm_xml_way_attrs=settings.osm_xml_way_attrs,\n osm_xml_way_tags=settings.osm_xml_way_tags,\n default_access=settings.default_access,\n default_crs=settings.default_crs,\n default_user_agent=settings.default_user_agent,\n default_referer=settings.default_referer,\n default_accept_language=settings.default_accept_language,\n nominatim_endpoint=settings.nominatim_endpoint,\n nominatim_key=settings.nominatim_key,\n overpass_endpoint=settings.overpass_endpoint,\n all_oneway=settings.all_oneway):\n \"\"\"\n Configure osmnx by setting the default global vars to desired values.\n\n Parameters\n ---------\n data_folder : string\n where to save and load data files\n logs_folder : string\n where to write the log files\n imgs_folder : string\n where to save figures\n cache_folder : string\n where to save the http response cache\n use_cache : bool\n if True, use a local cache to save/retrieve http responses instead of\n calling API repetitively for the same request URL\n log_file : bool\n if true, save log output to a log file in logs_folder\n log_console : bool\n if true, print log output to the console\n log_level : int\n one of the logger.level constants\n log_name : string\n name of the logger\n useful_tags_node : list\n a list of useful OSM tags to attempt to save from node elements\n useful_tags_path : list\n a list of useful OSM tags to attempt to save from path elements\n default_access : string\n default filter for OSM \"access\" key\n default_crs : string\n default CRS to set when creating graphs\n default_user_agent : string\n HTTP header user-agent\n default_referer : string\n HTTP header referer\n default_accept_language : string\n HTTP header accept-language\n nominatim_endpoint : string\n which API endpoint to use for nominatim queries\n nominatim_key : string\n your API key, if you are using an endpoint that requires one\n overpass_endpoint : string\n which API endpoint to use for overpass queries\n all_oneway : boolean\n if True, forces all paths to be loaded as oneway ways, preserving\n the original order of nodes stored in the OSM way XML.\n\n Returns\n -------\n None\n \"\"\"\n\n # set each global variable to the passed-in parameter value\n settings.use_cache = use_cache\n settings.cache_folder = cache_folder\n settings.data_folder = data_folder\n settings.imgs_folder = imgs_folder\n settings.logs_folder = logs_folder\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_tags_node = useful_tags_node\n settings.useful_tags_path = useful_tags_path\n settings.useful_tags_node = list(set(useful_tags_node + osm_xml_node_attrs + osm_xml_node_tags))\n settings.useful_tags_path = list(set(useful_tags_path + osm_xml_way_attrs + osm_xml_way_tags))\n settings.osm_xml_node_attrs = osm_xml_node_attrs\n settings.osm_xml_node_tags = osm_xml_node_tags\n settings.osm_xml_way_attrs = osm_xml_way_attrs\n settings.osm_xml_way_tags = osm_xml_way_tags\n settings.default_access = default_access\n settings.default_crs = default_crs\n settings.default_user_agent = default_user_agent\n settings.default_referer = default_referer\n settings.default_accept_language = default_accept_language\n settings.nominatim_endpoint = nominatim_endpoint\n settings.nominatim_key = nominatim_key\n settings.overpass_endpoint = overpass_endpoint\n settings.all_oneway = all_oneway\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n log('Configured osmnx')\n\n\ndef great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=6371009):\n \"\"\"\n Vectorized function to calculate the great-circle distance between two\n points or between vectors of points, using haversine.\n\n Parameters\n ----------\n lat1 : float or array of float\n lng1 : float or array of float\n lat2 : float or array of float\n lng2 : float or array of float\n earth_radius : numeric\n radius of earth in units in which distance will be returned (default is\n meters)\n\n Returns\n -------\n distance : float or vector of floats\n distance or vector of distances from (lat1, lng1) to (lat2, lng2) in\n units of earth_radius\n \"\"\"\n\n phi1 = np.deg2rad(lat1)\n phi2 = np.deg2rad(lat2)\n d_phi = phi2 - phi1\n\n theta1 = np.deg2rad(lng1)\n theta2 = np.deg2rad(lng2)\n d_theta = theta2 - theta1\n\n h = np.sin(d_phi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(d_theta / 2) ** 2\n h = np.minimum(1.0, h) # protect against floating point errors\n\n arc = 2 * np.arcsin(np.sqrt(h))\n\n # return distance in units of earth_radius\n distance = arc * earth_radius\n return distance\n\n\ndef euclidean_dist_vec(y1, x1, y2, x2):\n \"\"\"\n Vectorized function to calculate the euclidean distance between two points\n or between vectors of points.\n\n Parameters\n ----------\n y1 : float or array of float\n x1 : float or array of float\n y2 : float or array of float\n x2 : float or array of float\n\n Returns\n -------\n distance : float or array of float\n distance or vector of distances from (x1, y1) to (x2, y2) in graph units\n \"\"\"\n\n # euclid's formula\n distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n return distance\n\n\ndef log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n # if logging to file is turned on\n if settings.log_file:\n # get the current logger (or create a new one, if none), then log\n # message at requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print to\n # the console\n if settings.log_console:\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout. this prevents\n # logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for console display so it doesn't break\n # windows terminals\n message = unicodedata.normalize('NFKD', make_str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n\n\ndef get_logger(level=None, name=None, filename=None):\n \"\"\"\n Create a logger or return the current one if already instantiated.\n\n Parameters\n ----------\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n logger.logger\n \"\"\"\n\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if not getattr(logger, 'handler_set', None):\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime('%Y_%m_%d')\n log_filename = os.path.join(settings.logs_folder, '{}_{}.log'.format(filename, todays_date))\n\n # if the logs folder does not already exist, create it\n if not os.path.exists(settings.logs_folder):\n os.makedirs(settings.logs_folder)\n\n # create file handler and log formatter and set them up\n handler = lg.FileHandler(log_filename, encoding='utf-8')\n formatter = lg.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.handler_set = True\n\n return logger\n\n\ndef get_unique_nodes_ordered_from_way(way_edges_df):\n \"\"\"\n Function to recover the original order of nodes from a dataframe\n of edges associated with a single OSM way.\n\n Parameters\n ----------\n way_edges_df : pandas.DataFrame()\n Dataframe containing columns 'u' and 'v' corresponding to\n origin/desitination nodes.\n\n Returns\n -------\n unique_ordered_nodes : list\n An ordered list of unique node IDs\n\n NOTE: If the edges do not all connect (e.g. [(1, 2), (2,3),\n (10, 11), (11, 12), (12, 13)]), then this method will return\n only those nodes associated with the largest component of\n connected edges, even if subsequent connected chunks are contain\n more total nodes. This is done to ensure a proper topological\n representation of nodes in the XML way records because if there\n are unconnected components, the sorting algorithm cannot recover\n their original order. I don't believe that we would ever encounter\n this kind of disconnected structure of nodes within a given way,\n but as best I could tell it is not explicitly forbidden in the\n OSM XML design schema. I'm using a print statement right now to\n tell the user whether or not any nodes have been dropped and\n how many.\n \"\"\"\n\n G = nx.MultiDiGraph()\n all_nodes = list(way_edges_df['u'].values) + \\\n list(way_edges_df['v'].values)\n\n G.add_nodes_from(all_nodes)\n G.add_edges_from(way_edges_df[['u', 'v']].values)\n wccs = nx.weakly_connected_components(G)\n largest_wcc = max(wccs, key=len)\n node_subset = set(largest_wcc)\n\n # NOTE: this code (L387-403) is copied from geo_utils.py\n # which cannot be imported here without triggering a\n # circular import error. This should be fixed next time the\n # code base is refactored\n\n # copy nodes into new graph\n G2 = G.__class__()\n G2.add_nodes_from((n, G.nodes[n]) for n in node_subset)\n\n # copy edges to new graph, including parallel edges\n if G2.is_multigraph:\n G2.add_edges_from((n, nbr, key, d)\n for n, nbrs in G.adj.items() if n in node_subset\n for nbr, keydict in nbrs.items() if nbr in node_subset\n for key, d in keydict.items())\n else:\n G2.add_edges_from((n, nbr, d)\n for n, nbrs in G.adj.items() if n in node_subset\n for nbr, d in nbrs.items() if nbr in node_subset)\n\n # update graph attribute dict, and return graph\n G2.graph.update(G.graph)\n\n unique_ordered_nodes = list(nx.topological_sort(G2))\n num_unique_nodes = len(np.unique(all_nodes))\n\n if len(unique_ordered_nodes) < num_unique_nodes:\n print('Recovered order for {0} of {1} nodes'.format(\n len(unique_ordered_nodes), num_unique_nodes))\n\n return unique_ordered_nodes",
"################################################################################\n# Module: footprints.py\n# Description: Download and plot footprints from OpenStreetMap\n# License: MIT, see full license in LICENSE.txt\n# Web: https://github.com/gboeing/osmnx\n################################################################################\n\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport time\nfrom descartes import PolygonPatch\nfrom matplotlib.collections import PatchCollection\nfrom shapely.geometry import LineString\nfrom shapely.geometry import Polygon\nfrom shapely.geometry import MultiPolygon\nfrom shapely.ops import polygonize\n\nfrom . import settings\nfrom .core import consolidate_subdivide_geometry\nfrom .core import get_polygons_coordinates\nfrom .core import overpass_request\nfrom .core import bbox_from_point\nfrom .core import gdf_from_place\nfrom .plot import save_and_show\nfrom .projection import project_geometry\nfrom .utils import log\nfrom .geo_utils import geocode\n\n\ndef osm_footprints_download(polygon=None, north=None, south=None, east=None, west=None,\n footprint_type='building', timeout=180, memory=None,\n max_query_area_size=50*1000*50*1000):\n \"\"\"\n Download OpenStreetMap footprint data as a list of json responses.\n\n Parameters\n ----------\n polygon : shapely Polygon or MultiPolygon\n geographic shape to fetch the footprints within\n north : float\n northern latitude of bounding box\n south : float\n southern latitude of bounding box\n east : float\n eastern longitude of bounding box\n west : float\n western longitude of bounding box\n footprint_type : string\n type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n timeout : int\n the timeout interval for requests and to pass to API\n memory : int\n server memory allocation size for the query, in bytes. If none, server\n will use its default allocation size\n max_query_area_size : float\n max area for any part of the geometry, in the units the geometry is in:\n any polygon bigger will get divided up for multiple queries to API\n (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are\n meters))\n\n Returns\n -------\n list\n list of response_json dicts\n \"\"\"\n\n # check if we're querying by polygon or by bounding box based on which\n # argument(s) where passed into this function\n by_poly = polygon is not None\n by_bbox = not (north is None or south is None or east is None or west is None)\n if not (by_poly or by_bbox):\n raise ValueError('You must pass a polygon or north, south, east, and west')\n\n response_jsons = []\n\n # pass server memory allocation in bytes for the query to the API\n # if None, pass nothing so the server will use its default allocation size\n # otherwise, define the query's maxsize parameter value as whatever the\n # caller passed in\n if memory is None:\n maxsize = ''\n else:\n maxsize = '[maxsize:{}]'.format(memory)\n\n # define the query to send the API\n if by_bbox:\n # turn bbox into a polygon and project to local UTM\n polygon = Polygon([(west, south), (east, south), (east, north), (west, north)])\n geometry_proj, crs_proj = project_geometry(polygon)\n\n # subdivide it if it exceeds the max area size (in meters), then project\n # back to lat-long\n geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(geometry_proj, max_query_area_size=max_query_area_size)\n geometry, _ = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True)\n log('Requesting footprints data within bounding box from API in {:,} request(s)'.format(len(geometry)))\n start_time = time.time()\n\n # loop through each polygon rectangle in the geometry (there will only\n # be one if original bbox didn't exceed max area size)\n for poly in geometry:\n # represent bbox as south,west,north,east and round lat-longs to 8\n # decimal places (ie, within 1 mm) so URL strings aren't different\n # due to float rounding issues (for consistent caching)\n west, south, east, north = poly.bounds\n query_template = ('[out:json][timeout:{timeout}]{maxsize};'\n '((way[\"{footprint_type}\"]({south:.8f},{west:.8f},{north:.8f},{east:.8f});'\n '(._;>;););'\n '(relation[\"{footprint_type}\"]({south:.8f},{west:.8f},{north:.8f},{east:.8f});'\n '(._;>;);););out;')\n query_str = query_template.format(north=north, south=south, east=east, west=west, timeout=timeout,\n maxsize=maxsize, footprint_type=footprint_type)\n response_json = overpass_request(data={'data':query_str}, timeout=timeout)\n response_jsons.append(response_json)\n msg = ('Got all footprint data within bounding box from '\n 'API in {:,} request(s) and {:,.2f} seconds')\n log(msg.format(len(geometry), time.time()-start_time))\n\n elif by_poly:\n # project to utm, divide polygon up into sub-polygons if area exceeds a\n # max size (in meters), project back to lat-long, then get a list of polygon(s) exterior coordinates\n geometry_proj, crs_proj = project_geometry(polygon)\n geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(geometry_proj, max_query_area_size=max_query_area_size)\n geometry, _ = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True)\n polygon_coord_strs = get_polygons_coordinates(geometry)\n log('Requesting footprint data within polygon from API in {:,} request(s)'.format(len(polygon_coord_strs)))\n start_time = time.time()\n\n # pass each polygon exterior coordinates in the list to the API, one at\n # a time\n for polygon_coord_str in polygon_coord_strs:\n query_template = ('[out:json][timeout:{timeout}]{maxsize};('\n 'way(poly:\"{polygon}\")[\"{footprint_type}\"];(._;>;);'\n 'relation(poly:\"{polygon}\")[\"{footprint_type}\"];(._;>;););out;')\n query_str = query_template.format(polygon=polygon_coord_str, timeout=timeout, maxsize=maxsize,\n footprint_type=footprint_type)\n response_json = overpass_request(data={'data':query_str}, timeout=timeout)\n response_jsons.append(response_json)\n msg = ('Got all footprint data within polygon from API in '\n '{:,} request(s) and {:,.2f} seconds')\n log(msg.format(len(polygon_coord_strs), time.time()-start_time))\n\n return response_jsons\n\n\ndef create_footprints_gdf(polygon=None, north=None, south=None, east=None, west=None,\n footprint_type='building', retain_invalid=False, responses=None):\n \"\"\"\n Get footprint (polygon) data from OSM and convert it into a GeoDataFrame.\n\n Parameters\n ----------\n polygon : shapely Polygon or MultiPolygon\n geographic shape to fetch the footprints within\n north : float\n northern latitude of bounding box\n south : float\n southern latitude of bounding box\n east : float\n eastern longitude of bounding box\n west : float\n western longitude of bounding box\n footprint_type : string\n type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n retain_invalid : bool\n if False discard any footprints with an invalid geometry\n responses : list\n list of response jsons\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n # allow pickling between downloading footprints and converting them to a GeoDataFrame\n if responses is None:\n responses = osm_footprints_download(polygon, north, south, east, west, footprint_type)\n\n # parse the list of responses into separate dicts of vertices, footprints and relations\n # create a set of ways not directly tagged with footprint_type\n vertices, footprints, relations, untagged_ways = responses_to_dicts(responses, footprint_type)\n\n # create simple Shapely geometries (Polygon or LineString) for all of the ways in footprints\n for footprint_key, footprint_val in footprints.items():\n footprint_val['geometry'] = create_footprint_geometry(footprint_key, footprint_val, vertices)\n\n # create a complex Shapely Polygon or MultiPolygon for each relation\n for relation_key, relation_val in relations.items():\n relation_val['geometry'] = create_relation_geometry(relation_key, relation_val, footprints)\n \n # merge relations into the footprints dictionary\n footprints.update(relations)\n\n # delete supporting geometry not directly tagged with footprint_type from the footprints dictionary\n for untagged_way in untagged_ways:\n try:\n del footprints[untagged_way]\n except KeyError:\n log('untagged_way {} not found in footprints dict'.format(untagged_way))\n\n # Convert footprints dictionary to a GeoDataFrame\n gdf = gpd.GeoDataFrame.from_dict(footprints, orient='index')\n gdf.crs = settings.default_crs\n\n # filter the gdf to only include valid Polygons or MultiPolygons\n if not retain_invalid: \n filter1 = gdf['geometry'].is_valid\n filter2 = (gdf['geometry'].geom_type == 'Polygon') | (gdf['geometry'].geom_type == 'MultiPolygon')\n filter_combined = filter1 & filter2\n gdf = gdf[filter_combined]\n \n return gdf\n\n\ndef responses_to_dicts(responses, footprint_type):\n \"\"\"\n Parse a list of json responses into dictionaries of vertices, footprints, and relations.\n\n Note: OSM's data model and the Overpass API will return open ways (lines) as part of\n a 'polygon' query. These may be fragments of the inner and outer rings of relations or\n they may be open ways mistakenly tagged with 'polygon' type tags.\n\n Ways not directly tagged with the footprint type are added to the untagged_ways set for\n removal from the footprints dictionary at the end of the process.\n\n Some inner ways of relations may be tagged with the footprint type in their own right e.g.\n landuse=meadow as an inner way in a landuse=forest relation and need to be kept. These are\n created here.\n\n Parameters\n ----------\n responses : list\n list of json responses\n footprint_type : string\n type of footprint downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n\n Returns\n -------\n vertices\n dictionary of OSM nodes including their lat, lon coordinates\n footprints\n dictionary of OSM ways including their nodes and tags\n relations\n dictionary of OSM relations including member ids and tags\n untagged_footprints\n set of ids for ways or relations not directly tagged with footprint_type\n \"\"\"\n # create dictionaries to hold vertices, footprints and relations\n vertices = {}\n footprints = {}\n relations = {}\n # create a set to hold the ids of ways not directly tagged as footprint_type\n untagged_footprints = set()\n\n # loop through each response once adding each element to one of the dicts\n for response in responses:\n for element in response['elements']:\n # NODES - only keep coordinates\n if 'type' in element and element['type']=='node':\n vertices[element['id']] = {'lat' : element['lat'],\n 'lon' : element['lon']}\n # WAYS - both open and closed\n elif 'type' in element and element['type']=='way':\n footprint = {'nodes' : element['nodes']}\n if 'tags' in element:\n for tag in element['tags']:\n footprint[tag] = element['tags'][tag]\n footprints[element['id']] = footprint\n # add ways not individually tagged with footprint_type to the untagged_footprints set\n if ('tags' not in element) or (footprint_type not in element['tags']):\n untagged_footprints.add(element['id'])\n # RELATIONS\n elif 'type' in element and element['type']=='relation':\n relation = {'members' : {}}\n for member in element['members']:\n if 'type' in member and member['type']=='way':\n relation['members'].update({member['ref']:member.get('role')})\n if 'tags' in element:\n for tag in element['tags']:\n relation[tag] = element['tags'][tag]\n relations[element['id']] = relation\n # add relations not individually tagged with footprint_type to the untagged_footprints set\n if ('tags' not in element) or (footprint_type not in element['tags']):\n untagged_footprints.add(element['id'])\n # Log any other Elements found in the response\n else:\n log('Element {} is not a node, way or relation'.format(element['id']))\n\n return vertices, footprints, relations, untagged_footprints\n\n\ndef create_footprint_geometry(footprint_key, footprint_val, vertices):\n \"\"\"\n Create Shapely geometry for open or closed ways in the initial footprints dictionary.\n\n Closed ways are converted directly to Shapely Polygons, open ways (fragments that will\n form the outer and inner rings of relations) are converted to LineStrings.\n\n Parameters\n ----------\n footprint_key : int\n the id of the way/footprint to process\n footprint_val : dict\n the nodes and tags of the footprint\n vertices : dict\n the dictionary of OSM nodes with their coordinates\n\n Returns\n -------\n Shapely Polygon or LineString\n \"\"\"\n # CLOSED WAYS\n if footprint_val['nodes'][0] == footprint_val['nodes'][-1]:\n try:\n footprint_geometry = Polygon([(vertices[node]['lon'], vertices[node]['lat']) for node in footprint_val['nodes']])\n except Exception:\n log('Polygon has invalid geometry: {}'.format(footprint_key))\n # OPEN WAYS \n else:\n try:\n footprint_geometry = LineString([(vertices[node]['lon'], vertices[node]['lat']) for node in footprint_val['nodes']])\n except Exception:\n log('LineString has invalid geometry: {}'.format(footprint_key))\n\n return footprint_geometry\n\n\ndef create_relation_geometry(relation_key, relation_val, footprints):\n \"\"\"\n Create Shapely geometry for relations - Polygons with holes or MultiPolygons\n\n OSM relations are used to define complex polygons - polygons with holes or\n multi-polygons. The polygons' outer and inner rings may be made up of chains\n of LineStrings. https://wiki.openstreetmap.org/wiki/Relation:multipolygon \n requires that multipolygon rings have an outer or inner 'role'.\n \n OSM's data model allows a polygon type tag e.g. 'building' to be added to \n any OSM element. This can include non-polygon relations e.g. bus routes.\n Relations that do not have at least one closed ring with an outer role \n are filtered out.\n\n Inner rings that are tagged with the footprint type in their own right e.g.\n landuse=meadow as an inner ring of landuse=forest will have been included in\n the footprints dictionary as part of the original parsing and are not dealt\n with here.\n\n Parameters\n ----------\n relation_key : int\n the id of the relation to process\n relation_val : dict\n members and tags of the relation\n footprints : dictionary\n dictionary of all footprints (including open and closed ways)\n\n Returns\n -------\n Shapely Polygon or MultiPolygon\n \"\"\"\n\n # create empty lists to hold member geometries\n multipoly = []\n outer_polys = []\n outer_lines = []\n inner_polys = []\n inner_lines = []\n\n # add each members geometry to a list according to its role and geometry type\n for member_id, member_role in relation_val['members'].items():\n if member_role == 'outer':\n if footprints[member_id]['geometry'].geom_type == 'Polygon':\n outer_polys.append(footprints[member_id]['geometry'])\n elif footprints[member_id]['geometry'].geom_type == 'LineString':\n outer_lines.append(footprints[member_id]['geometry'])\n elif member_role == 'inner':\n if footprints[member_id]['geometry'].geom_type == 'Polygon':\n inner_polys.append(footprints[member_id]['geometry'])\n elif footprints[member_id]['geometry'].geom_type == 'LineString':\n inner_lines.append(footprints[member_id]['geometry'])\n\n # try to polygonize open outer ways and concatenate them to outer_polys\n if len(outer_lines) > 0:\n try:\n result = list(polygonize(outer_lines))\n except Exception:\n log(\"polygonize failed for 'outer' ways in relation: {}\".format(relation_key))\n else:\n outer_polys += result\n\n # try to polygonize open inner ways and concatenate them to inner_polys\n if len(inner_lines) > 0:\n try:\n result = list(polygonize(inner_lines))\n except Exception:\n log(\"polygonize failed for 'inner' ways in relation: {}\".format(relation_key))\n else:\n inner_polys += result\n\n # filter out relations missing both 'outer' and 'inner' polygons or just 'outer'\n if len(outer_polys + inner_polys) == 0:\n log(\"Relation {} missing 'outer' and 'inner' closed ways\".format(relation_key))\n elif len(outer_polys) == 0:\n log(\"Relation {} missing 'outer' closed ways\".format(relation_key))\n # process the others to multipolygons\n else:\n for outer_poly in outer_polys:\n temp_poly = outer_poly\n for inner_poly in inner_polys:\n if inner_poly.within(outer_poly):\n temp_poly=temp_poly.difference(inner_poly)\n multipoly.append(temp_poly)\n\n # return relations with one outer way as Polygons, multiple outer ways as MultiPolygons\n if len(multipoly) == 1:\n return multipoly[0]\n elif len(multipoly) > 1: \n return MultiPolygon(multipoly)\n else:\n log('relation {} could not be converted to a complex footprint'.format(relation_key))\n\n\ndef footprints_from_point(point, distance, footprint_type='building', retain_invalid=False):\n \"\"\"\n Get footprints within some distance north, south, east, and west of\n a lat-long point.\n\n Parameters\n ----------\n point : tuple\n a lat-long point\n distance : numeric\n distance in meters\n footprint_type : string\n type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n retain_invalid : bool\n if False discard any footprints with an invalid geometry\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n\n bbox = bbox_from_point(point=point, distance=distance)\n north, south, east, west = bbox\n return create_footprints_gdf(north=north, south=south, east=east, west=west,\n footprint_type=footprint_type, retain_invalid=retain_invalid)\n\n\ndef footprints_from_address(address, distance, footprint_type='building', retain_invalid=False):\n \"\"\"\n Get footprints within some distance north, south, east, and west of\n an address.\n\n Parameters\n ----------\n address : string\n the address to geocode to a lat-long point\n distance : numeric\n distance in meters\n footprint_type : string\n type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n retain_invalid : bool\n if False discard any footprints with an invalid geometry\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n\n # geocode the address string to a (lat, lon) point\n point = geocode(query=address)\n\n # get footprints within distance of this point\n return footprints_from_point(point, distance, footprint_type=footprint_type,\n retain_invalid=retain_invalid)\n\n\ndef footprints_from_polygon(polygon, footprint_type='building', retain_invalid=False):\n \"\"\"\n Get footprints within some polygon.\n\n Parameters\n ----------\n polygon : shapely Polygon or MultiPolygon\n the shape to get data within. coordinates should be in units of\n latitude-longitude degrees.\n footprint_type : string\n type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n retain_invalid : bool\n if False discard any footprints with an invalid geometry\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n\n return create_footprints_gdf(polygon=polygon, footprint_type=footprint_type,\n retain_invalid=retain_invalid)\n\n\ndef footprints_from_place(place, footprint_type='building', retain_invalid=False, which_result=1):\n \"\"\"\n Get footprints within the boundaries of some place.\n\n The query must be geocodable and OSM must have polygon boundaries for the\n geocode result. If OSM does not have a polygon for this place, you can\n instead get its footprints using the footprints_from_address function, which\n geocodes the place name to a point and gets the footprints within some distance\n of that point.\n\n Parameters\n ----------\n place : string\n the query to geocode to get geojson boundary polygon\n footprint_type : string\n type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.\n retain_invalid : bool\n if False discard any footprints with an invalid geometry\n which_result : int\n max number of results to return and which to process upon receipt\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n\n city = gdf_from_place(place, which_result=which_result)\n polygon = city['geometry'].iloc[0]\n return create_footprints_gdf(polygon, retain_invalid=retain_invalid,\n footprint_type=footprint_type)\n\n\ndef plot_footprints(gdf, fig=None, ax=None, figsize=None, color='#333333', bgcolor='w',\n set_bounds=True, bbox=None, save=False, show=True, close=False,\n filename='image', file_format='png', dpi=600):\n \"\"\"\n Plot a GeoDataFrame of footprints.\n\n Parameters\n ----------\n gdf : GeoDataFrame\n footprints\n fig : figure\n ax : axis\n figsize : tuple\n color : string\n the color of the footprints\n bgcolor : string\n the background color of the plot\n set_bounds : bool\n if True, set bounds from either passed-in bbox or the spatial extent of the gdf\n bbox : tuple\n if True and if set_bounds is True, set the display bounds to this bbox\n save : bool\n whether to save the figure to disk or not\n show : bool\n whether to display the figure or not\n close : bool\n close the figure (only if show equals False) to prevent display\n filename : string\n the name of the file to save\n file_format : string\n the format of the file to save (e.g., 'jpg', 'png', 'svg')\n dpi : int\n the resolution of the image file if saving\n\n Returns\n -------\n fig, ax : tuple\n\n \"\"\"\n\n if fig is None or ax is None:\n fig, ax = plt.subplots(figsize=figsize, facecolor=bgcolor)\n ax.set_facecolor(bgcolor)\n\n # extract each polygon as a descartes patch, and add to a matplotlib patch\n # collection\n patches = []\n for geometry in gdf['geometry']:\n if isinstance(geometry, Polygon):\n patches.append(PolygonPatch(geometry))\n elif isinstance(geometry, MultiPolygon):\n for subpolygon in geometry: #if geometry is multipolygon, go through each constituent subpolygon\n patches.append(PolygonPatch(subpolygon))\n pc = PatchCollection(patches, facecolor=color, edgecolor=color, linewidth=0, alpha=1)\n ax.add_collection(pc)\n\n if set_bounds:\n if bbox is None:\n # set the figure bounds to the polygons' bounds\n left, bottom, right, top = gdf.total_bounds\n else:\n top, bottom, right, left = bbox\n ax.set_xlim((left, right))\n ax.set_ylim((bottom, top))\n\n # turn off the axis display set the margins to zero and point the ticks in\n # so there's no space around the plot\n ax.axis('off')\n ax.margins(0)\n ax.tick_params(which='both', direction='in')\n fig.canvas.draw()\n\n # make everything square\n ax.set_aspect('equal')\n fig.canvas.draw()\n\n fig, ax = save_and_show(fig=fig, ax=ax, save=save, show=show, close=close,\n filename=filename, file_format=file_format, dpi=dpi, axis_off=True)\n\n return fig, ax\n"
] | [
[
"numpy.minimum",
"numpy.sqrt",
"numpy.unique",
"numpy.cos",
"numpy.sin",
"numpy.deg2rad"
],
[
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bert9bert/statsmodels | [
"898ddfc483c45bb0f8e5156dd8506abda84c9b63",
"898ddfc483c45bb0f8e5156dd8506abda84c9b63",
"898ddfc483c45bb0f8e5156dd8506abda84c9b63"
] | [
"statsmodels/discrete/discrete_model.py",
"statsmodels/genmod/generalized_linear_model.py",
"statsmodels/robust/robust_linear_model.py"
] | [
"\"\"\"\nLimited dependent variable and qualitative variables.\n\nIncludes binary outcomes, count data, (ordered) ordinal data and limited\ndependent variables.\n\nGeneral References\n--------------------\n\nA.C. Cameron and P.K. Trivedi. `Regression Analysis of Count Data`.\n Cambridge, 1998\n\nG.S. Madalla. `Limited-Dependent and Qualitative Variables in Econometrics`.\n Cambridge, 1983.\n\nW. Greene. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.\n\"\"\"\nfrom __future__ import division\n\n__all__ = [\"Poisson\", \"Logit\", \"Probit\", \"MNLogit\", \"NegativeBinomial\"]\n\nfrom statsmodels.compat.python import lmap, lzip, range\nimport numpy as np\nfrom scipy.special import gammaln\nfrom scipy import stats, special, optimize # opt just for nbin\nimport statsmodels.tools.tools as tools\nfrom statsmodels.tools import data as data_tools\nfrom statsmodels.tools.decorators import (resettable_cache,\n cache_readonly)\nfrom statsmodels.regression.linear_model import OLS\nfrom scipy import stats, special, optimize # opt just for nbin\nfrom scipy.stats import nbinom\nfrom statsmodels.tools.sm_exceptions import PerfectSeparationError\nfrom statsmodels.tools.numdiff import (approx_fprime, approx_hess,\n approx_hess_cs, approx_fprime_cs)\nimport statsmodels.base.model as base\nfrom statsmodels.base.data import handle_data # for mnlogit\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.compat.numpy import np_matrix_rank\nfrom pandas.core.api import get_dummies\n\nfrom statsmodels.base.l1_slsqp import fit_l1_slsqp\ntry:\n import cvxopt\n have_cvxopt = True\nexcept ImportError:\n have_cvxopt = False\n\n#TODO: When we eventually get user-settable precision, we need to change\n# this\nFLOAT_EPS = np.finfo(float).eps\n\n#TODO: add options for the parameter covariance/variance\n# ie., OIM, EIM, and BHHH see Green 21.4\n\n_discrete_models_docs = \"\"\"\n\"\"\"\n\n_discrete_results_docs = \"\"\"\n %(one_line_description)s\n\n Parameters\n ----------\n model : A DiscreteModel instance\n params : array-like\n The parameters of a fitted model.\n hessian : array-like\n The hessian of the fitted model.\n scale : float\n A scale parameter for the covariance matrix.\n\n Returns\n -------\n *Attributes*\n\n aic : float\n Akaike information criterion. `-2*(llf - p)` where `p` is the number\n of regressors including the intercept.\n bic : float\n Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the\n number of regressors including the intercept.\n bse : array\n The standard errors of the coefficients.\n df_resid : float\n See model definition.\n df_model : float\n See model definition.\n fitted_values : array\n Linear predictor XB.\n llf : float\n Value of the loglikelihood\n llnull : float\n Value of the constant-only loglikelihood\n llr : float\n Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`\n llr_pvalue : float\n The chi-squared probability of getting a log-likelihood ratio\n statistic greater than llr. llr has a chi-squared distribution\n with degrees of freedom `df_model`.\n prsquared : float\n McFadden's pseudo-R-squared. `1 - (llf / llnull)`\n%(extra_attr)s\"\"\"\n\n_l1_results_attr = \"\"\" nnz_params : Integer\n The number of nonzero parameters in the model. Train with\n trim_params == True or else numerical error will distort this.\n trimmed : Boolean array\n trimmed[i] == True if the ith parameter was trimmed from the model.\"\"\"\n\n\n# helper for MNLogit (will be generally useful later)\n\ndef _numpy_to_dummies(endog):\n if endog.dtype.kind in ['S', 'O']:\n endog_dummies, ynames = tools.categorical(endog, drop=True,\n dictnames=True)\n elif endog.ndim == 2:\n endog_dummies = endog\n ynames = range(endog.shape[1])\n else:\n endog_dummies, ynames = tools.categorical(endog, drop=True,\n dictnames=True)\n return endog_dummies, ynames\n\n\ndef _pandas_to_dummies(endog):\n if endog.ndim == 2:\n if endog.shape[1] == 1:\n yname = endog.columns[0]\n endog_dummies = get_dummies(endog.iloc[:, 0])\n else: # series\n yname = 'y'\n endog_dummies = endog\n else:\n yname = endog.name\n endog_dummies = get_dummies(endog)\n ynames = endog_dummies.columns.tolist()\n\n return endog_dummies, ynames, yname\n\n\n#### Private Model Classes ####\n\n\nclass DiscreteModel(base.LikelihoodModel):\n \"\"\"\n Abstract class for discrete choice models.\n\n This class does not do anything itself but lays out the methods and\n call signature expected of child classes in addition to those of\n statsmodels.model.LikelihoodModel.\n \"\"\"\n def __init__(self, endog, exog, **kwargs):\n super(DiscreteModel, self).__init__(endog, exog, **kwargs)\n self.raise_on_perfect_prediction = True\n\n def initialize(self):\n \"\"\"\n Initialize is called by\n statsmodels.model.LikelihoodModel.__init__\n and should contain any preprocessing that needs to be done for a model.\n \"\"\"\n # assumes constant\n self.df_model = float(np_matrix_rank(self.exog) - 1)\n self.df_resid = (float(self.exog.shape[0] -\n np_matrix_rank(self.exog)))\n\n def cdf(self, X):\n \"\"\"\n The cumulative distribution function of the model.\n \"\"\"\n raise NotImplementedError\n\n def pdf(self, X):\n \"\"\"\n The probability density (mass) function of the model.\n \"\"\"\n raise NotImplementedError\n\n def _check_perfect_pred(self, params, *args):\n endog = self.endog\n fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]]))\n if (self.raise_on_perfect_prediction and\n np.allclose(fittedvalues - endog, 0)):\n msg = \"Perfect separation detected, results not available\"\n raise PerfectSeparationError(msg)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n \"\"\"\n Fit the model using maximum likelihood.\n\n The rest of the docstring is from\n statsmodels.base.model.LikelihoodModel.fit\n \"\"\"\n if callback is None:\n callback = self._check_perfect_pred\n else:\n pass # make a function factory to have multiple call-backs\n\n mlefit = super(DiscreteModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n\n return mlefit # up to subclasses to wrap results\n\n fit.__doc__ += base.LikelihoodModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=True,\n callback=None, alpha=0, trim_mode='auto',\n auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03,\n qc_verbose=False, **kwargs):\n \"\"\"\n Fit the model using a regularized maximum likelihood.\n The regularization method AND the solver used is determined by the\n argument method.\n\n Parameters\n ----------\n start_params : array-like, optional\n Initial guess of the solution for the loglikelihood maximization.\n The default is an array of zeros.\n method : 'l1' or 'l1_cvxopt_cp'\n See notes for details.\n maxiter : Integer or 'defined_by_method'\n Maximum number of iterations to perform.\n If 'defined_by_method', then use method defaults (see notes).\n full_output : bool\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n disp : bool\n Set to True to print convergence messages.\n fargs : tuple\n Extra arguments passed to the likelihood function, i.e.,\n loglike(x,*args)\n callback : callable callback(xk)\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n retall : bool\n Set to True to return list of solutions at each iteration.\n Available in Results object's mle_retvals attribute.\n alpha : non-negative scalar or numpy array (same size as parameters)\n The weight multiplying the l1 penalty term\n trim_mode : 'auto, 'size', or 'off'\n If not 'off', trim (set to zero) parameters that would have been\n zero if the solver reached the theoretical minimum.\n If 'auto', trim params using the Theory above.\n If 'size', trim params if they have very small absolute value\n size_trim_tol : float or 'auto' (default = 'auto')\n For use when trim_mode == 'size'\n auto_trim_tol : float\n For sue when trim_mode == 'auto'. Use\n qc_tol : float\n Print warning and don't allow auto trim when (ii) (above) is\n violated by this much.\n qc_verbose : Boolean\n If true, print out a full QC report upon failure\n\n Notes\n -----\n Extra parameters are not penalized if alpha is given as a scalar.\n An example is the shape parameter in NegativeBinomial `nb1` and `nb2`.\n\n Optional arguments for the solvers (available in Results.mle_settings)::\n\n 'l1'\n acc : float (default 1e-6)\n Requested accuracy as used by slsqp\n 'l1_cvxopt_cp'\n abstol : float\n absolute accuracy (default: 1e-7).\n reltol : float\n relative accuracy (default: 1e-6).\n feastol : float\n tolerance for feasibility conditions (default: 1e-7).\n refinement : int\n number of iterative refinement steps when solving KKT\n equations (default: 1).\n\n\n Optimization methodology\n\n With :math:`L` the negative log likelihood, we solve the convex but\n non-smooth problem\n\n .. math:: \\\\min_\\\\beta L(\\\\beta) + \\\\sum_k\\\\alpha_k |\\\\beta_k|\n\n via the transformation to the smooth, convex, constrained problem\n in twice as many variables (adding the \"added variables\" :math:`u_k`)\n\n .. math:: \\\\min_{\\\\beta,u} L(\\\\beta) + \\\\sum_k\\\\alpha_k u_k,\n\n subject to\n\n .. math:: -u_k \\\\leq \\\\beta_k \\\\leq u_k.\n\n With :math:`\\\\partial_k L` the derivative of :math:`L` in the\n :math:`k^{th}` parameter direction, theory dictates that, at the\n minimum, exactly one of two conditions holds:\n\n (i) :math:`|\\\\partial_k L| = \\\\alpha_k` and :math:`\\\\beta_k \\\\neq 0`\n (ii) :math:`|\\\\partial_k L| \\\\leq \\\\alpha_k` and :math:`\\\\beta_k = 0`\n\n \"\"\"\n ### Set attributes based on method\n if method in ['l1', 'l1_cvxopt_cp']:\n cov_params_func = self.cov_params_func_l1\n else:\n raise Exception(\"argument method == %s, which is not handled\"\n % method)\n\n ### Bundle up extra kwargs for the dictionary kwargs. These are\n ### passed through super(...).fit() as kwargs and unpacked at\n ### appropriate times\n alpha = np.array(alpha)\n assert alpha.min() >= 0\n try:\n kwargs['alpha'] = alpha\n except TypeError:\n kwargs = dict(alpha=alpha)\n kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0])\n kwargs['trim_mode'] = trim_mode\n kwargs['size_trim_tol'] = size_trim_tol\n kwargs['auto_trim_tol'] = auto_trim_tol\n kwargs['qc_tol'] = qc_tol\n kwargs['qc_verbose'] = qc_verbose\n\n ### Define default keyword arguments to be passed to super(...).fit()\n if maxiter == 'defined_by_method':\n if method == 'l1':\n maxiter = 1000\n elif method == 'l1_cvxopt_cp':\n maxiter = 70\n\n ## Parameters to pass to super(...).fit()\n # For the 'extra' parameters, pass all that are available,\n # even if we know (at this point) we will only use one.\n extra_fit_funcs = {'l1': fit_l1_slsqp}\n if have_cvxopt and method == 'l1_cvxopt_cp':\n from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp\n extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp\n elif method.lower() == 'l1_cvxopt_cp':\n message = (\"Attempt to use l1_cvxopt_cp failed since cvxopt \"\n \"could not be imported\")\n\n if callback is None:\n callback = self._check_perfect_pred\n else:\n pass # make a function factory to have multiple call-backs\n\n mlefit = super(DiscreteModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs,\n cov_params_func=cov_params_func, **kwargs)\n\n return mlefit # up to subclasses to wrap results\n\n def cov_params_func_l1(self, likelihood_model, xopt, retvals):\n \"\"\"\n Computes cov_params on a reduced parameter space\n corresponding to the nonzero parameters resulting from the\n l1 regularized fit.\n\n Returns a full cov_params matrix, with entries corresponding\n to zero'd values set to np.nan.\n \"\"\"\n H = likelihood_model.hessian(xopt)\n trimmed = retvals['trimmed']\n nz_idx = np.nonzero(trimmed == False)[0]\n nnz_params = (trimmed == False).sum()\n if nnz_params > 0:\n H_restricted = H[nz_idx[:, None], nz_idx]\n # Covariance estimate for the nonzero params\n H_restricted_inv = np.linalg.inv(-H_restricted)\n else:\n H_restricted_inv = np.zeros(0)\n\n cov_params = np.nan * np.ones(H.shape)\n cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv\n\n return cov_params\n\n def predict(self, params, exog=None, linear=False):\n \"\"\"\n Predict response variable of a model given exogenous variables.\n \"\"\"\n raise NotImplementedError\n\n def _derivative_exog(self, params, exog=None, dummy_idx=None,\n count_idx=None):\n \"\"\"\n This should implement the derivative of the non-linear function\n \"\"\"\n raise NotImplementedError\n\nclass BinaryModel(DiscreteModel):\n\n def __init__(self, endog, exog, **kwargs):\n super(BinaryModel, self).__init__(endog, exog, **kwargs)\n if (not issubclass(self.__class__, MultinomialModel) and\n not np.all((self.endog >= 0) & (self.endog <= 1))):\n raise ValueError(\"endog must be in the unit interval.\")\n\n\n def predict(self, params, exog=None, linear=False):\n \"\"\"\n Predict response variable of a model given exogenous variables.\n\n Parameters\n ----------\n params : array-like\n Fitted parameters of the model.\n exog : array-like\n 1d or 2d array of exogenous values. If not supplied, the\n whole exog attribute of the model is used.\n linear : bool, optional\n If True, returns the linear predictor dot(exog,params). Else,\n returns the value of the cdf at the linear predictor.\n\n Returns\n -------\n array\n Fitted values at exog.\n \"\"\"\n if exog is None:\n exog = self.exog\n if not linear:\n return self.cdf(np.dot(exog, params))\n else:\n return np.dot(exog, params)\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n bnryfit = super(BinaryModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1BinaryResults(self, bnryfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n return L1BinaryResultsWrapper(discretefit)\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"\n For computing marginal effects standard errors.\n\n This is used only in the case of discrete and count regressors to\n get the variance-covariance of the marginal effects. It returns\n [d F / d params] where F is the predict.\n\n Transform can be 'dydx' or 'eydx'. Checking is done in margeff\n computations for appropriate transform.\n \"\"\"\n if exog is None:\n exog = self.exog\n dF = self.pdf(np.dot(exog, params))[:,None] * exog\n if 'ey' in transform:\n dF /= self.predict(params, exog)[:,None]\n return dF\n\n def _derivative_exog(self, params, exog=None, transform='dydx',\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects returns dF(XB) / dX where F(.) is\n the predicted probabilities\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n \"\"\"\n #note, this form should be appropriate for\n ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit\n if exog is None:\n exog = self.exog\n margeff = np.dot(self.pdf(np.dot(exog, params))[:,None],\n params[None,:])\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:,None]\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff\n\nclass MultinomialModel(BinaryModel):\n\n def _handle_data(self, endog, exog, missing, hasconst, **kwargs):\n if data_tools._is_using_ndarray_type(endog, None):\n endog_dummies, ynames = _numpy_to_dummies(endog)\n yname = 'y'\n elif data_tools._is_using_pandas(endog, None):\n endog_dummies, ynames, yname = _pandas_to_dummies(endog)\n else:\n endog = np.asarray(endog)\n endog_dummies, ynames = _numpy_to_dummies(endog)\n yname = 'y'\n\n if not isinstance(ynames, dict):\n ynames = dict(zip(range(endog_dummies.shape[1]), ynames))\n\n self._ynames_map = ynames\n data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs)\n data.ynames = yname # overwrite this to single endog name\n data.orig_endog = endog\n self.wendog = data.endog\n\n # repeating from upstream...\n for key in kwargs:\n try:\n setattr(self, key, data.__dict__.pop(key))\n except KeyError:\n pass\n return data\n\n def initialize(self):\n \"\"\"\n Preprocesses the data for MNLogit.\n \"\"\"\n super(MultinomialModel, self).initialize()\n # This is also a \"whiten\" method in other models (eg regression)\n self.endog = self.endog.argmax(1) # turn it into an array of col idx\n self.J = self.wendog.shape[1]\n self.K = self.exog.shape[1]\n self.df_model *= (self.J-1) # for each J - 1 equation.\n self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1)\n\n def predict(self, params, exog=None, linear=False):\n \"\"\"\n Predict response variable of a model given exogenous variables.\n\n Parameters\n ----------\n params : array-like\n 2d array of fitted parameters of the model. Should be in the\n order returned from the model.\n exog : array-like\n 1d or 2d array of exogenous values. If not supplied, the\n whole exog attribute of the model is used. If a 1d array is given\n it assumed to be 1 row of exogenous variables. If you only have\n one regressor and would like to do prediction, you must provide\n a 2d array with shape[1] == 1.\n linear : bool, optional\n If True, returns the linear predictor dot(exog,params). Else,\n returns the value of the cdf at the linear predictor.\n\n Notes\n -----\n Column 0 is the base case, the rest conform to the rows of params\n shifted up one for the base case.\n \"\"\"\n if exog is None: # do here to accomodate user-given exog\n exog = self.exog\n if exog.ndim == 1:\n exog = exog[None]\n pred = super(MultinomialModel, self).predict(params, exog, linear)\n if linear:\n pred = np.column_stack((np.zeros(len(exog)), pred))\n return pred\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n if start_params is None:\n start_params = np.zeros((self.K * (self.J-1)))\n else:\n start_params = np.asarray(start_params)\n callback = lambda x : None # placeholder until check_perfect_pred\n # skip calling super to handle results from LikelihoodModel\n mnfit = base.LikelihoodModel.fit(self, start_params = start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n mnfit.params = mnfit.params.reshape(self.K, -1, order='F')\n mnfit = MultinomialResults(self, mnfit)\n return MultinomialResultsWrapper(mnfit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n if start_params is None:\n start_params = np.zeros((self.K * (self.J-1)))\n else:\n start_params = np.asarray(start_params)\n mnfit = DiscreteModel.fit_regularized(\n self, start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n mnfit.params = mnfit.params.reshape(self.K, -1, order='F')\n mnfit = L1MultinomialResults(self, mnfit)\n return L1MultinomialResultsWrapper(mnfit)\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"\n For computing marginal effects standard errors.\n\n This is used only in the case of discrete and count regressors to\n get the variance-covariance of the marginal effects. It returns\n [d F / d params] where F is the predicted probabilities for each\n choice. dFdparams is of shape nobs x (J*K) x (J-1)*K.\n The zero derivatives for the base category are not included.\n\n Transform can be 'dydx' or 'eydx'. Checking is done in margeff\n computations for appropriate transform.\n \"\"\"\n if exog is None:\n exog = self.exog\n if params.ndim == 1: # will get flatted from approx_fprime\n params = params.reshape(self.K, self.J-1, order='F')\n\n eXB = np.exp(np.dot(exog, params))\n sum_eXB = (1 + eXB.sum(1))[:,None]\n J, K = lmap(int, [self.J, self.K])\n repeat_eXB = np.repeat(eXB, J, axis=1)\n X = np.tile(exog, J-1)\n # this is the derivative wrt the base level\n F0 = -repeat_eXB * X / sum_eXB ** 2\n # this is the derivative wrt the other levels when\n # dF_j / dParams_j (ie., own equation)\n #NOTE: this computes too much, any easy way to cut down?\n F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2)\n F1 = F1.transpose((1,0,2)) # put the nobs index first\n\n # other equation index\n other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool)\n F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \\\n (sum_eXB**2)).transpose((1,0,2))[:, other_idx]\n dFdX = np.concatenate((F0[:, None,:], F1), axis=1)\n\n if 'ey' in transform:\n dFdX /= self.predict(params, exog)[:, :, None]\n return dFdX\n\n def _derivative_exog(self, params, exog=None, transform='dydx',\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects returns dF(XB) / dX where F(.) is\n the predicted probabilities\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n\n For Multinomial models the marginal effects are\n\n P[j] * (params[j] - sum_k P[k]*params[k])\n\n It is returned unshaped, so that each row contains each of the J\n equations. This makes it easier to take derivatives of this for\n standard errors. If you want average marginal effects you can do\n margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects\n for choice J are in column J\n \"\"\"\n J = int(self.J) # number of alternative choices\n K = int(self.K) # number of variables\n #note, this form should be appropriate for\n ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit\n if exog is None:\n exog = self.exog\n if params.ndim == 1: # will get flatted from approx_fprime\n params = params.reshape(K, J-1, order='F')\n zeroparams = np.c_[np.zeros(K), params] # add base in\n\n cdf = self.cdf(np.dot(exog, params))\n margeff = np.array([cdf[:,[j]]* (zeroparams[:,j]-np.array([cdf[:,[i]]*\n zeroparams[:,i] for i in range(int(J))]).sum(0))\n for j in range(J)])\n margeff = np.transpose(margeff, (1,2,0))\n # swap the axes to make sure margeff are in order nobs, K, J\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:,None,:]\n\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff.reshape(len(exog), -1, order='F')\n\nclass CountModel(DiscreteModel):\n def __init__(self, endog, exog, offset=None, exposure=None, missing='none',\n **kwargs):\n super(CountModel, self).__init__(endog, exog, missing=missing,\n offset=offset,\n exposure=exposure, **kwargs)\n if exposure is not None:\n self.exposure = np.log(self.exposure)\n self._check_inputs(self.offset, self.exposure, self.endog)\n if offset is None:\n delattr(self, 'offset')\n if exposure is None:\n delattr(self, 'exposure')\n\n def _check_inputs(self, offset, exposure, endog):\n if offset is not None and offset.shape[0] != endog.shape[0]:\n raise ValueError(\"offset is not the same length as endog\")\n\n if exposure is not None and exposure.shape[0] != endog.shape[0]:\n raise ValueError(\"exposure is not the same length as endog\")\n\n def _get_init_kwds(self):\n # this is a temporary fixup because exposure has been transformed\n # see #1609\n kwds = super(CountModel, self)._get_init_kwds()\n if 'exposure' in kwds and kwds['exposure'] is not None:\n kwds['exposure'] = np.exp(kwds['exposure'])\n return kwds\n\n def predict(self, params, exog=None, exposure=None, offset=None,\n linear=False):\n \"\"\"\n Predict response variable of a count model given exogenous variables.\n\n Notes\n -----\n If exposure is specified, then it will be logged by the method.\n The user does not need to log it first.\n \"\"\"\n #TODO: add offset tp\n if exog is None:\n exog = self.exog\n offset = getattr(self, 'offset', 0)\n exposure = getattr(self, 'exposure', 0)\n\n else:\n if exposure is None:\n exposure = 0\n else:\n exposure = np.log(exposure)\n if offset is None:\n offset = 0\n\n if not linear:\n return np.exp(np.dot(exog, params[:exog.shape[1]]) + exposure + offset) # not cdf\n else:\n return np.dot(exog, params[:exog.shape[1]]) + exposure + offset\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"\n For computing marginal effects standard errors.\n\n This is used only in the case of discrete and count regressors to\n get the variance-covariance of the marginal effects. It returns\n [d F / d params] where F is the predict.\n\n Transform can be 'dydx' or 'eydx'. Checking is done in margeff\n computations for appropriate transform.\n \"\"\"\n if exog is None:\n exog = self.exog\n #NOTE: this handles offset and exposure\n dF = self.predict(params, exog)[:,None] * exog\n if 'ey' in transform:\n dF /= self.predict(params, exog)[:,None]\n return dF\n\n def _derivative_exog(self, params, exog=None, transform=\"dydx\",\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects. These are the marginal effects\n d F(XB) / dX\n For the Poisson model F(XB) is the predicted counts rather than\n the probabilities.\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n \"\"\"\n # group 3 poisson, nbreg, zip, zinb\n if exog is None:\n exog = self.exog\n margeff = self.predict(params, exog)[:,None] * params[None,:]\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:,None]\n\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n cntfit = super(CountModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n discretefit = CountResults(self, cntfit)\n return CountResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1CountResults(self, cntfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n return L1CountResultsWrapper(discretefit)\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n\nclass OrderedModel(DiscreteModel):\n pass\n\n#### Public Model Classes ####\n\nclass Poisson(CountModel):\n __doc__ = \"\"\"\n Poisson model for count data\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' :\n \"\"\"offset : array_like\n Offset is added to the linear prediction with coefficient equal to 1.\n exposure : array_like\n Log(exposure) is added to the linear prediction with coefficient\n equal to 1.\n\n \"\"\" + base._missing_param_doc}\n\n\n def cdf(self, X):\n \"\"\"\n Poisson model cumulative distribution function\n\n Parameters\n -----------\n X : array-like\n `X` is the linear predictor of the model. See notes.\n\n Returns\n -------\n The value of the Poisson CDF at each point.\n\n Notes\n -----\n The CDF is defined as\n\n .. math:: \\\\exp\\\\left(-\\\\lambda\\\\right)\\\\sum_{i=0}^{y}\\\\frac{\\\\lambda^{i}}{i!}\n\n where :math:`\\\\lambda` assumes the loglinear model. I.e.,\n\n .. math:: \\\\ln\\\\lambda_{i}=X\\\\beta\n\n The parameter `X` is :math:`X\\\\beta` in the above formula.\n \"\"\"\n y = self.endog\n return stats.poisson.cdf(y, np.exp(X))\n\n def pdf(self, X):\n \"\"\"\n Poisson model probability mass function\n\n Parameters\n -----------\n X : array-like\n `X` is the linear predictor of the model. See notes.\n\n Returns\n -------\n pdf : ndarray\n The value of the Poisson probability mass function, PMF, for each\n point of X.\n\n Notes\n --------\n The PMF is defined as\n\n .. math:: \\\\frac{e^{-\\\\lambda_{i}}\\\\lambda_{i}^{y_{i}}}{y_{i}!}\n\n where :math:`\\\\lambda` assumes the loglinear model. I.e.,\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n\n The parameter `X` is :math:`x_{i}\\\\beta` in the above formula.\n \"\"\"\n y = self.endog\n return np.exp(stats.poisson.logpmf(y, np.exp(X)))\n\n def loglike(self, params):\n \"\"\"\n Loglikelihood of Poisson model\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n --------\n .. math:: \\\\ln L=\\\\sum_{i=1}^{n}\\\\left[-\\\\lambda_{i}+y_{i}x_{i}^{\\\\prime}\\\\beta-\\\\ln y_{i}!\\\\right]\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n XB = np.dot(self.exog, params) + offset + exposure\n endog = self.endog\n return np.sum(-np.exp(XB) + endog*XB - gammaln(endog+1))\n\n def loglikeobs(self, params):\n \"\"\"\n Loglikelihood for observations of Poisson model\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n --------\n .. math:: \\\\ln L_{i}=\\\\left[-\\\\lambda_{i}+y_{i}x_{i}^{\\\\prime}\\\\beta-\\\\ln y_{i}!\\\\right]\n\n for observations :math:`i=1,...,n`\n\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n XB = np.dot(self.exog, params) + offset + exposure\n endog = self.endog\n #np.sum(stats.poisson.logpmf(endog, np.exp(XB)))\n return -np.exp(XB) + endog*XB - gammaln(endog+1)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n cntfit = super(CountModel, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n\n if 'cov_type' in kwargs:\n cov_kwds = kwargs.get('cov_kwds', {})\n kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}\n else:\n kwds = {}\n discretefit = PoissonResults(self, cntfit, **kwds)\n return PoissonResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1PoissonResults(self, cntfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n return L1PoissonResultsWrapper(discretefit)\n\n fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__\n\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"fit the model subject to linear equality constraints\n\n The constraints are of the form `R params = q`\n where R is the constraint_matrix and q is the vector of\n constraint_values.\n\n The estimation creates a new model with transformed design matrix,\n exog, and converts the results back to the original parameterization.\n\n Parameters\n ----------\n constraints : formula expression or tuple\n If it is a tuple, then the constraint needs to be given by two\n arrays (constraint_matrix, constraint_value), i.e. (R, q).\n Otherwise, the constraints can be given as strings or list of\n strings.\n see t_test for details\n start_params : None or array_like\n starting values for the optimization. `start_params` needs to be\n given in the original parameter space and are internally\n transformed.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the transformed model.\n\n Returns\n -------\n results : Results instance\n\n \"\"\"\n\n #constraints = (R, q)\n # TODO: temporary trailing underscore to not overwrite the monkey\n # patched version\n # TODO: decide whether to move the imports\n from patsy import DesignInfo\n from statsmodels.base._constraints import fit_constrained\n\n # same pattern as in base.LikelihoodModel.t_test\n lc = DesignInfo(self.exog_names).linear_constraint(constraints)\n R, q = lc.coefs, lc.constants\n\n # TODO: add start_params option, need access to tranformation\n # fit_constrained needs to do the transformation\n params, cov, res_constr = fit_constrained(self, R, q,\n start_params=start_params,\n fit_kwds=fit_kwds)\n #create dummy results Instance, TODO: wire up properly\n res = self.fit(maxiter=0, method='nm', disp=0,\n warn_convergence=False) # we get a wrapper back\n res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)\n res.mle_retvals['iterations'] = res_constr.mle_retvals.get(\n 'iterations', np.nan)\n res.mle_retvals['converged'] = res_constr.mle_retvals['converged']\n res._results.params = params\n res._results.normalized_cov_params = cov\n k_constr = len(q)\n res._results.df_resid += k_constr\n res._results.df_model -= k_constr\n res._results.constraints = lc\n res._results.k_constr = k_constr\n res._results.results_constrained = res_constr\n return res\n\n\n def score(self, params):\n \"\"\"\n Poisson model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta}=\\\\sum_{i=1}^{n}\\\\left(y_{i}-\\\\lambda_{i}\\\\right)x_{i}\n\n where the loglinear model is assumed\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n X = self.exog\n L = np.exp(np.dot(X,params) + offset + exposure)\n return np.dot(self.endog - L, X)\n\n def score_obs(self, params):\n \"\"\"\n Poisson model Jacobian of the log-likelihood for each observation\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray (nobs, k_vars)\n The score vector of the model evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta}=\\\\left(y_{i}-\\\\lambda_{i}\\\\right)x_{i}\n\n for observations :math:`i=1,...,n`\n\n where the loglinear model is assumed\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n X = self.exog\n L = np.exp(np.dot(X,params) + offset + exposure)\n return (self.endog - L)[:,None] * X\n\n def hessian(self, params):\n \"\"\"\n Poisson model Hessian matrix of the loglikelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta\\\\partial\\\\beta^{\\\\prime}}=-\\\\sum_{i=1}^{n}\\\\lambda_{i}x_{i}x_{i}^{\\\\prime}\n\n where the loglinear model is assumed\n\n .. math:: \\\\ln\\\\lambda_{i}=x_{i}\\\\beta\n\n \"\"\"\n offset = getattr(self, \"offset\", 0)\n exposure = getattr(self, \"exposure\", 0)\n X = self.exog\n L = np.exp(np.dot(X,params) + exposure + offset)\n return -np.dot(L*X.T, X)\n\nclass Logit(BinaryModel):\n __doc__ = \"\"\"\n Binary choice logit model\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc}\n\n def cdf(self, X):\n \"\"\"\n The logistic cumulative distribution function\n\n Parameters\n ----------\n X : array-like\n `X` is the linear predictor of the logit model. See notes.\n\n Returns\n -------\n 1/(1 + exp(-X))\n\n Notes\n ------\n In the logit model,\n\n .. math:: \\\\Lambda\\\\left(x^{\\\\prime}\\\\beta\\\\right)=\\\\text{Prob}\\\\left(Y=1|x\\\\right)=\\\\frac{e^{x^{\\\\prime}\\\\beta}}{1+e^{x^{\\\\prime}\\\\beta}}\n \"\"\"\n X = np.asarray(X)\n return 1/(1+np.exp(-X))\n\n def pdf(self, X):\n \"\"\"\n The logistic probability density function\n\n Parameters\n -----------\n X : array-like\n `X` is the linear predictor of the logit model. See notes.\n\n Returns\n -------\n pdf : ndarray\n The value of the Logit probability mass function, PMF, for each\n point of X. ``np.exp(-x)/(1+np.exp(-X))**2``\n\n Notes\n -----\n In the logit model,\n\n .. math:: \\\\lambda\\\\left(x^{\\\\prime}\\\\beta\\\\right)=\\\\frac{e^{-x^{\\\\prime}\\\\beta}}{\\\\left(1+e^{-x^{\\\\prime}\\\\beta}\\\\right)^{2}}\n \"\"\"\n X = np.asarray(X)\n return np.exp(-X)/(1+np.exp(-X))**2\n\n def loglike(self, params):\n \"\"\"\n Log-likelihood of logit model.\n\n Parameters\n -----------\n params : array-like\n The parameters of the logit model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n ------\n .. math:: \\\\ln L=\\\\sum_{i}\\\\ln\\\\Lambda\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n logistic distribution is symmetric.\n \"\"\"\n q = 2*self.endog - 1\n X = self.exog\n return np.sum(np.log(self.cdf(q*np.dot(X,params))))\n\n def loglikeobs(self, params):\n \"\"\"\n Log-likelihood of logit model for each observation.\n\n Parameters\n -----------\n params : array-like\n The parameters of the logit model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n ------\n .. math:: \\\\ln L=\\\\sum_{i}\\\\ln\\\\Lambda\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n for observations :math:`i=1,...,n`\n\n where :math:`q=2y-1`. This simplification comes from the fact that the\n logistic distribution is symmetric.\n \"\"\"\n q = 2*self.endog - 1\n X = self.exog\n return np.log(self.cdf(q*np.dot(X,params)))\n\n def score(self, params):\n \"\"\"\n Logit model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params: array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta}=\\\\sum_{i=1}^{n}\\\\left(y_{i}-\\\\Lambda_{i}\\\\right)x_{i}\n \"\"\"\n\n y = self.endog\n X = self.exog\n L = self.cdf(np.dot(X,params))\n return np.dot(y - L,X)\n\n def score_obs(self, params):\n \"\"\"\n Logit model Jacobian of the log-likelihood for each observation\n\n Parameters\n ----------\n params: array-like\n The parameters of the model\n\n Returns\n -------\n jac : ndarray, (nobs, k_vars)\n The derivative of the loglikelihood for each observation evaluated\n at `params`.\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta}=\\\\left(y_{i}-\\\\Lambda_{i}\\\\right)x_{i}\n\n for observations :math:`i=1,...,n`\n\n \"\"\"\n\n y = self.endog\n X = self.exog\n L = self.cdf(np.dot(X, params))\n return (y - L)[:,None] * X\n\n def hessian(self, params):\n \"\"\"\n Logit model Hessian matrix of the log-likelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta\\\\partial\\\\beta^{\\\\prime}}=-\\\\sum_{i}\\\\Lambda_{i}\\\\left(1-\\\\Lambda_{i}\\\\right)x_{i}x_{i}^{\\\\prime}\n \"\"\"\n X = self.exog\n L = self.cdf(np.dot(X,params))\n return -np.dot(L*(1-L)*X.T,X)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n bnryfit = super(Logit, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n\n discretefit = LogitResults(self, bnryfit)\n return BinaryResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\nclass Probit(BinaryModel):\n __doc__ = \"\"\"\n Binary choice Probit model\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc}\n\n def cdf(self, X):\n \"\"\"\n Probit (Normal) cumulative distribution function\n\n Parameters\n ----------\n X : array-like\n The linear predictor of the model (XB).\n\n Returns\n --------\n cdf : ndarray\n The cdf evaluated at `X`.\n\n Notes\n -----\n This function is just an alias for scipy.stats.norm.cdf\n \"\"\"\n return stats.norm._cdf(X)\n\n def pdf(self, X):\n \"\"\"\n Probit (Normal) probability density function\n\n Parameters\n ----------\n X : array-like\n The linear predictor of the model (XB).\n\n Returns\n --------\n pdf : ndarray\n The value of the normal density function for each point of X.\n\n Notes\n -----\n This function is just an alias for scipy.stats.norm.pdf\n\n \"\"\"\n X = np.asarray(X)\n return stats.norm._pdf(X)\n\n\n def loglike(self, params):\n \"\"\"\n Log-likelihood of probit model (i.e., the normal distribution).\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n -----\n .. math:: \\\\ln L=\\\\sum_{i}\\\\ln\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n\n q = 2*self.endog - 1\n X = self.exog\n return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)),\n FLOAT_EPS, 1)))\n\n def loglikeobs(self, params):\n \"\"\"\n Log-likelihood of probit model for each observation\n\n Parameters\n ----------\n params : array-like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n -----\n .. math:: \\\\ln L_{i}=\\\\ln\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)\n\n for observations :math:`i=1,...,n`\n\n where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n\n q = 2*self.endog - 1\n X = self.exog\n return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))\n\n\n def score(self, params):\n \"\"\"\n Probit model score (gradient) vector\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta}=\\\\sum_{i=1}^{n}\\\\left[\\\\frac{q_{i}\\\\phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}{\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}\\\\right]x_{i}\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n y = self.endog\n X = self.exog\n XB = np.dot(X,params)\n q = 2*y - 1\n # clip to get rid of invalid divide complaint\n L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)\n return np.dot(L,X)\n\n def score_obs(self, params):\n \"\"\"\n Probit model Jacobian for each observation\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n jac : ndarray, (nobs, k_vars)\n The derivative of the loglikelihood for each observation evaluated\n at `params`.\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta}=\\\\left[\\\\frac{q_{i}\\\\phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}{\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}\\\\right]x_{i}\n\n for observations :math:`i=1,...,n`\n\n Where :math:`q=2y-1`. This simplification comes from the fact that the\n normal distribution is symmetric.\n \"\"\"\n y = self.endog\n X = self.exog\n XB = np.dot(X,params)\n q = 2*y - 1\n # clip to get rid of invalid divide complaint\n L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)\n return L[:,None] * X\n\n def hessian(self, params):\n \"\"\"\n Probit model Hessian matrix of the log-likelihood\n\n Parameters\n ----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta\\\\partial\\\\beta^{\\\\prime}}=-\\\\lambda_{i}\\\\left(\\\\lambda_{i}+x_{i}^{\\\\prime}\\\\beta\\\\right)x_{i}x_{i}^{\\\\prime}\n\n where\n\n .. math:: \\\\lambda_{i}=\\\\frac{q_{i}\\\\phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}{\\\\Phi\\\\left(q_{i}x_{i}^{\\\\prime}\\\\beta\\\\right)}\n\n and :math:`q=2y-1`\n \"\"\"\n X = self.exog\n XB = np.dot(X,params)\n q = 2*self.endog - 1\n L = q*self.pdf(q*XB)/self.cdf(q*XB)\n return np.dot(-L*(L+XB)*X.T,X)\n\n def fit(self, start_params=None, method='newton', maxiter=35,\n full_output=1, disp=1, callback=None, **kwargs):\n bnryfit = super(Probit, self).fit(start_params=start_params,\n method=method, maxiter=maxiter, full_output=full_output,\n disp=disp, callback=callback, **kwargs)\n discretefit = ProbitResults(self, bnryfit)\n return BinaryResultsWrapper(discretefit)\n fit.__doc__ = DiscreteModel.fit.__doc__\n\nclass MNLogit(MultinomialModel):\n __doc__ = \"\"\"\n Multinomial logit model\n\n Parameters\n ----------\n endog : array-like\n `endog` is an 1-d vector of the endogenous response. `endog` can\n contain strings, ints, or floats. Note that if it contains strings,\n every distinct string will be a category. No stripping of whitespace\n is done.\n exog : array-like\n A nobs x k array where `nobs` is the number of observations and `k`\n is the number of regressors. An intercept is not included by default\n and should be added by the user. See `statsmodels.tools.add_constant`.\n %(extra_params)s\n\n Attributes\n ----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n J : float\n The number of choices for the endogenous variable. Note that this\n is zero-indexed.\n K : float\n The actual number of parameters for the exogenous design. Includes\n the constant if the design has one.\n names : dict\n A dictionary mapping the column number in `wendog` to the variables\n in `endog`.\n wendog : array\n An n x j array where j is the number of unique categories in `endog`.\n Each column of j is a dummy variable indicating the category of\n each observation. See `names` for a dictionary mapping each column to\n its category.\n\n Notes\n -----\n See developer notes for further information on `MNLogit` internals.\n \"\"\" % {'extra_params' : base._missing_param_doc}\n\n def pdf(self, eXB):\n \"\"\"\n NotImplemented\n \"\"\"\n raise NotImplementedError\n\n def cdf(self, X):\n \"\"\"\n Multinomial logit cumulative distribution function.\n\n Parameters\n ----------\n X : array\n The linear predictor of the model XB.\n\n Returns\n --------\n cdf : ndarray\n The cdf evaluated at `X`.\n\n Notes\n -----\n In the multinomial logit model.\n .. math:: \\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\n \"\"\"\n eXB = np.column_stack((np.ones(len(X)), np.exp(X)))\n return eXB/eXB.sum(1)[:,None]\n\n def loglike(self, params):\n \"\"\"\n Log-likelihood of the multinomial logit model.\n\n Parameters\n ----------\n params : array-like\n The parameters of the multinomial logit model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n ------\n .. math:: \\\\ln L=\\\\sum_{i=1}^{n}\\\\sum_{j=0}^{J}d_{ij}\\\\ln\\\\left(\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)\n\n where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0\n if not.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n d = self.wendog\n logprob = np.log(self.cdf(np.dot(self.exog,params)))\n return np.sum(d * logprob)\n\n def loglikeobs(self, params):\n \"\"\"\n Log-likelihood of the multinomial logit model for each observation.\n\n Parameters\n ----------\n params : array-like\n The parameters of the multinomial logit model.\n\n Returns\n -------\n loglike : ndarray (nobs,)\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes\n\n Notes\n ------\n .. math:: \\\\ln L_{i}=\\\\sum_{j=0}^{J}d_{ij}\\\\ln\\\\left(\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)\n\n for observations :math:`i=1,...,n`\n\n where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0\n if not.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n d = self.wendog\n logprob = np.log(self.cdf(np.dot(self.exog,params)))\n return d * logprob\n\n def score(self, params):\n \"\"\"\n Score matrix for multinomial logit model log-likelihood\n\n Parameters\n ----------\n params : array\n The parameters of the multinomial logit model.\n\n Returns\n --------\n score : ndarray, (K * (J-1),)\n The 2-d score vector, i.e. the first derivative of the\n loglikelihood function, of the multinomial logit model evaluated at\n `params`.\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L}{\\\\partial\\\\beta_{j}}=\\\\sum_{i}\\\\left(d_{ij}-\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)x_{i}\n\n for :math:`j=1,...,J`\n\n In the multinomial model the score matrix is K x J-1 but is returned\n as a flattened array to work with the solvers.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,\n params))[:,1:]\n #NOTE: might need to switch terms if params is reshaped\n return np.dot(firstterm.T, self.exog).flatten()\n\n def loglike_and_score(self, params):\n \"\"\"\n Returns log likelihood and score, efficiently reusing calculations.\n\n Note that both of these returned quantities will need to be negated\n before being minimized by the maximum likelihood fitting machinery.\n\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n cdf_dot_exog_params = self.cdf(np.dot(self.exog, params))\n loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params))\n firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:]\n score_array = np.dot(firstterm.T, self.exog).flatten()\n return loglike_value, score_array\n\n def score_obs(self, params):\n \"\"\"\n Jacobian matrix for multinomial logit model log-likelihood\n\n Parameters\n ----------\n params : array\n The parameters of the multinomial logit model.\n\n Returns\n --------\n jac : ndarray, (nobs, k_vars*(J-1))\n The derivative of the loglikelihood for each observation evaluated\n at `params` .\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial\\\\ln L_{i}}{\\\\partial\\\\beta_{j}}=\\\\left(d_{ij}-\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right)x_{i}\n\n for :math:`j=1,...,J`, for observations :math:`i=1,...,n`\n\n In the multinomial model the score vector is K x (J-1) but is returned\n as a flattened array. The Jacobian has the observations in rows and\n the flatteded array of derivatives in columns.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,\n params))[:,1:]\n #NOTE: might need to switch terms if params is reshaped\n return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1)\n\n def hessian(self, params):\n \"\"\"\n Multinomial logit Hessian matrix of the log-likelihood\n\n Parameters\n -----------\n params : array-like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (J*K, J*K)\n The Hessian, second derivative of loglikelihood function with\n respect to the flattened parameters, evaluated at `params`\n\n Notes\n -----\n .. math:: \\\\frac{\\\\partial^{2}\\\\ln L}{\\\\partial\\\\beta_{j}\\\\partial\\\\beta_{l}}=-\\\\sum_{i=1}^{n}\\\\frac{\\\\exp\\\\left(\\\\beta_{j}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\left[\\\\boldsymbol{1}\\\\left(j=l\\\\right)-\\\\frac{\\\\exp\\\\left(\\\\beta_{l}^{\\\\prime}x_{i}\\\\right)}{\\\\sum_{k=0}^{J}\\\\exp\\\\left(\\\\beta_{k}^{\\\\prime}x_{i}\\\\right)}\\\\right]x_{i}x_{l}^{\\\\prime}\n\n where\n :math:`\\\\boldsymbol{1}\\\\left(j=l\\\\right)` equals 1 if `j` = `l` and 0\n otherwise.\n\n The actual Hessian matrix has J**2 * K x K elements. Our Hessian\n is reshaped to be square (J*K, J*K) so that the solvers can use it.\n\n This implementation does not take advantage of the symmetry of\n the Hessian and could probably be refactored for speed.\n \"\"\"\n params = params.reshape(self.K, -1, order='F')\n X = self.exog\n pr = self.cdf(np.dot(X,params))\n partials = []\n J = self.wendog.shape[1] - 1\n K = self.exog.shape[1]\n for i in range(J):\n for j in range(J): # this loop assumes we drop the first col.\n if i == j:\n partials.append(\\\n -np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X))\n else:\n partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X))\n H = np.array(partials)\n # the developer's notes on multinomial should clear this math up\n H = np.transpose(H.reshape(J,J,K,K), (0,2,1,3)).reshape(J*K,J*K)\n return H\n\n\n#TODO: Weibull can replaced by a survival analsysis function\n# like stat's streg (The cox model as well)\n#class Weibull(DiscreteModel):\n# \"\"\"\n# Binary choice Weibull model\n#\n# Notes\n# ------\n# This is unfinished and untested.\n# \"\"\"\n##TODO: add analytic hessian for Weibull\n# def initialize(self):\n# pass\n#\n# def cdf(self, X):\n# \"\"\"\n# Gumbell (Log Weibull) cumulative distribution function\n# \"\"\"\n## return np.exp(-np.exp(-X))\n# return stats.gumbel_r.cdf(X)\n# # these two are equivalent.\n# # Greene table and discussion is incorrect.\n#\n# def pdf(self, X):\n# \"\"\"\n# Gumbell (LogWeibull) probability distribution function\n# \"\"\"\n# return stats.gumbel_r.pdf(X)\n#\n# def loglike(self, params):\n# \"\"\"\n# Loglikelihood of Weibull distribution\n# \"\"\"\n# X = self.exog\n# cdf = self.cdf(np.dot(X,params))\n# y = self.endog\n# return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf))\n#\n# def score(self, params):\n# y = self.endog\n# X = self.exog\n# F = self.cdf(np.dot(X,params))\n# f = self.pdf(np.dot(X,params))\n# term = (y*f/F + (1 - y)*-f/(1-F))\n# return np.dot(term,X)\n#\n# def hessian(self, params):\n# hess = nd.Jacobian(self.score)\n# return hess(params)\n#\n# def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):\n## The example had problems with all zero start values, Hessian = 0\n# if start_params is None:\n# start_params = OLS(self.endog, self.exog).fit().params\n# mlefit = super(Weibull, self).fit(start_params=start_params,\n# method=method, maxiter=maxiter, tol=tol)\n# return mlefit\n#\n\nclass NegativeBinomial(CountModel):\n __doc__ = \"\"\"\n Negative Binomial Model for count data\n\n%(params)s\n %(extra_params)s\n\n Attributes\n -----------\n endog : array\n A reference to the endogenous response variable\n exog : array\n A reference to the exogenous design.\n\n References\n ----------\n\n References:\n\n Greene, W. 2008. \"Functional forms for the negtive binomial model\n for count data\". Economics Letters. Volume 99, Number 3, pp.585-590.\n Hilbe, J.M. 2011. \"Negative binomial regression\". Cambridge University\n Press.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' :\n \"\"\"loglike_method : string\n Log-likelihood type. 'nb2','nb1', or 'geometric'.\n Fitted value :math:`\\\\mu`\n Heterogeneity parameter :math:`\\\\alpha`\n\n - nb2: Variance equal to :math:`\\\\mu + \\\\alpha\\\\mu^2` (most common)\n - nb1: Variance equal to :math:`\\\\mu + \\\\alpha\\\\mu`\n - geometric: Variance equal to :math:`\\\\mu + \\\\mu^2`\n offset : array_like\n Offset is added to the linear prediction with coefficient equal to 1.\n exposure : array_like\n Log(exposure) is added to the linear prediction with coefficient\n equal to 1.\n\n \"\"\" + base._missing_param_doc}\n def __init__(self, endog, exog, loglike_method='nb2', offset=None,\n exposure=None, missing='none', **kwargs):\n super(NegativeBinomial, self).__init__(endog, exog, offset=offset,\n exposure=exposure,\n missing=missing, **kwargs)\n self.loglike_method = loglike_method\n self._initialize()\n if loglike_method in ['nb2', 'nb1']:\n self.exog_names.append('alpha')\n self.k_extra = 1\n else:\n self.k_extra = 0\n # store keys for extras if we need to recreate model instance\n # we need to append keys that don't go to super\n self._init_keys.append('loglike_method')\n\n def _initialize(self):\n if self.loglike_method == 'nb2':\n self.hessian = self._hessian_nb2\n self.score = self._score_nbin\n self.loglikeobs = self._ll_nb2\n self._transparams = True # transform lnalpha -> alpha in fit\n elif self.loglike_method == 'nb1':\n self.hessian = self._hessian_nb1\n self.score = self._score_nb1\n self.loglikeobs = self._ll_nb1\n self._transparams = True # transform lnalpha -> alpha in fit\n elif self.loglike_method == 'geometric':\n self.hessian = self._hessian_geom\n self.score = self._score_geom\n self.loglikeobs = self._ll_geometric\n else:\n raise NotImplementedError(\"Likelihood type must nb1, nb2 or \"\n \"geometric\")\n\n # Workaround to pickle instance methods\n def __getstate__(self):\n odict = self.__dict__.copy() # copy the dict since we change it\n del odict['hessian']\n del odict['score']\n del odict['loglikeobs']\n return odict\n\n def __setstate__(self, indict):\n self.__dict__.update(indict)\n self._initialize()\n\n def _ll_nbin(self, params, alpha, Q=0):\n endog = self.endog\n mu = self.predict(params)\n size = 1/alpha * mu**Q\n prob = size/(size+mu)\n coeff = (gammaln(size+endog) - gammaln(endog+1) -\n gammaln(size))\n llf = coeff + size*np.log(prob) + endog*np.log(1-prob)\n return llf\n\n def _ll_nb2(self, params):\n if self._transparams: # got lnalpha during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n return self._ll_nbin(params[:-1], alpha, Q=0)\n\n def _ll_nb1(self, params):\n if self._transparams: # got lnalpha during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n return self._ll_nbin(params[:-1], alpha, Q=1)\n\n def _ll_geometric(self, params):\n # we give alpha of 1 because it's actually log(alpha) where alpha=0\n return self._ll_nbin(params, 1, 0)\n\n def loglike(self, params):\n r\"\"\"\n Loglikelihood for negative binomial model\n\n Parameters\n ----------\n params : array-like\n The parameters of the model. If `loglike_method` is nb1 or\n nb2, then the ancillary parameter is expected to be the\n last element.\n\n Returns\n -------\n llf : float\n The loglikelihood value at `params`\n\n Notes\n -----\n Following notation in Greene (2008), with negative binomial\n heterogeneity parameter :math:`\\alpha`:\n\n .. math::\n\n \\lambda_i &= exp(X\\beta) \\\\\n \\theta &= 1 / \\alpha \\\\\n g_i &= \\theta \\lambda_i^Q \\\\\n w_i &= g_i/(g_i + \\lambda_i) \\\\\n r_i &= \\theta / (\\theta+\\lambda_i) \\\\\n ln \\mathcal{L}_i &= ln \\Gamma(y_i+g_i) - ln \\Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)\n\n where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1.\n For the geometric, :math:`\\alpha=0` as well.\n\n \"\"\"\n llf = np.sum(self.loglikeobs(params))\n return llf\n\n def _score_geom(self, params):\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n dparams = exog * (y-mu)/(mu+1)\n return dparams.sum(0)\n\n def _score_nbin(self, params, Q=0):\n \"\"\"\n Score vector for NB2 model\n \"\"\"\n if self._transparams: # lnalpha came in during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n params = params[:-1]\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n a1 = 1/alpha * mu**Q\n if Q: # nb1\n dparams = exog*mu/alpha*(np.log(1/(alpha + 1)) +\n special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha))\n dalpha = ((alpha*(y - mu*np.log(1/(alpha + 1)) -\n mu*(special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha) + 1)) -\n mu*(np.log(1/(alpha + 1)) +\n special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha)))/\n (alpha**2*(alpha + 1))).sum()\n\n else: # nb2\n dparams = exog*a1 * (y-mu)/(mu+a1)\n da1 = -alpha**-2\n dalpha = (special.digamma(a1+y) - special.digamma(a1) + np.log(a1)\n - np.log(a1+mu) - (a1+y)/(a1+mu) + 1).sum()*da1\n\n #multiply above by constant outside sum to reduce rounding error\n if self._transparams:\n return np.r_[dparams.sum(0), dalpha*alpha]\n else:\n return np.r_[dparams.sum(0), dalpha]\n\n def _score_nb1(self, params):\n return self._score_nbin(params, Q=1)\n\n def _hessian_geom(self, params):\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n\n # for dl/dparams dparams\n dim = exog.shape[1]\n hess_arr = np.empty((dim, dim))\n const_arr = mu*(1+y)/(mu+1)**2\n for i in range(dim):\n for j in range(dim):\n if j > i:\n continue\n hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *\n const_arr, axis=0)\n tri_idx = np.triu_indices(dim, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n return hess_arr\n\n\n def _hessian_nb1(self, params):\n \"\"\"\n Hessian of NB1 model.\n \"\"\"\n if self._transparams: # lnalpha came in during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n\n params = params[:-1]\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n\n a1 = mu/alpha\n\n # for dl/dparams dparams\n dim = exog.shape[1]\n hess_arr = np.empty((dim+1,dim+1))\n #const_arr = a1*mu*(a1+y)/(mu+a1)**2\n # not all of dparams\n dparams = exog/alpha*(np.log(1/(alpha + 1)) +\n special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha))\n\n dmudb = exog*mu\n xmu_alpha = exog*mu/alpha\n trigamma = (special.polygamma(1, mu/alpha + y) -\n special.polygamma(1, mu/alpha))\n for i in range(dim):\n for j in range(dim):\n if j > i:\n continue\n hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] +\n xmu_alpha[:,i,None] * xmu_alpha[:,j,None] *\n trigamma, axis=0)\n tri_idx = np.triu_indices(dim, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n\n # for dl/dparams dalpha\n da1 = -alpha**-2\n dldpda = np.sum(-mu/alpha * dparams + exog*mu/alpha *\n (-trigamma*mu/alpha**2 - 1/(alpha+1)), axis=0)\n\n hess_arr[-1,:-1] = dldpda\n hess_arr[:-1,-1] = dldpda\n\n # for dl/dalpha dalpha\n digamma_part = (special.digamma(y + mu/alpha) -\n special.digamma(mu/alpha))\n\n log_alpha = np.log(1/(alpha+1))\n alpha3 = alpha**3\n alpha2 = alpha**2\n mu2 = mu**2\n dada = ((alpha3*mu*(2*log_alpha + 2*digamma_part + 3) -\n 2*alpha3*y + alpha2*mu2*trigamma +\n 4*alpha2*mu*(log_alpha + digamma_part) +\n alpha2 * (2*mu - y) +\n 2*alpha*mu2*trigamma +\n 2*alpha*mu*(log_alpha + digamma_part) +\n mu2*trigamma)/(alpha**4*(alpha2 + 2*alpha + 1)))\n hess_arr[-1,-1] = dada.sum()\n\n return hess_arr\n\n def _hessian_nb2(self, params):\n \"\"\"\n Hessian of NB2 model.\n \"\"\"\n if self._transparams: # lnalpha came in during fit\n alpha = np.exp(params[-1])\n else:\n alpha = params[-1]\n a1 = 1/alpha\n params = params[:-1]\n\n exog = self.exog\n y = self.endog[:,None]\n mu = self.predict(params)[:,None]\n\n # for dl/dparams dparams\n dim = exog.shape[1]\n hess_arr = np.empty((dim+1,dim+1))\n const_arr = a1*mu*(a1+y)/(mu+a1)**2\n for i in range(dim):\n for j in range(dim):\n if j > i:\n continue\n hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *\n const_arr, axis=0)\n tri_idx = np.triu_indices(dim, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n\n # for dl/dparams dalpha\n da1 = -alpha**-2\n dldpda = np.sum(mu*exog*(y-mu)*da1/(mu+a1)**2 , axis=0)\n hess_arr[-1,:-1] = dldpda\n hess_arr[:-1,-1] = dldpda\n\n # for dl/dalpha dalpha\n #NOTE: polygamma(1,x) is the trigamma function\n da2 = 2*alpha**-3\n dalpha = da1 * (special.digamma(a1+y) - special.digamma(a1) +\n np.log(a1) - np.log(a1+mu) - (a1+y)/(a1+mu) + 1)\n dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) -\n special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) +\n (y - mu)/(mu + a1)**2)).sum()\n hess_arr[-1,-1] = dada\n\n return hess_arr\n\n #TODO: replace this with analytic where is it used?\n def score_obs(self, params):\n sc = approx_fprime_cs(params, self.loglikeobs)\n return sc\n\n def fit(self, start_params=None, method='bfgs', maxiter=35,\n full_output=1, disp=1, callback=None,\n cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):\n\n # Note: don't let super handle robust covariance because it has\n # transformed params\n\n if self.loglike_method.startswith('nb') and method not in ['newton',\n 'ncg']:\n self._transparams = True # in case same Model instance is refit\n elif self.loglike_method.startswith('nb'): # method is newton/ncg\n self._transparams = False # because we need to step in alpha space\n\n if start_params is None:\n # Use poisson fit as first guess.\n #TODO, Warning: this assumes exposure is logged\n offset = getattr(self, \"offset\", 0) + getattr(self, \"exposure\", 0)\n if np.size(offset) == 1 and offset == 0:\n offset = None\n mod_poi = Poisson(self.endog, self.exog, offset=offset)\n start_params = mod_poi.fit(disp=0).params\n if self.loglike_method.startswith('nb'):\n start_params = np.append(start_params, 0.1)\n mlefit = super(NegativeBinomial, self).fit(start_params=start_params,\n maxiter=maxiter, method=method, disp=disp,\n full_output=full_output, callback=lambda x:x,\n **kwargs)\n # TODO: Fix NBin _check_perfect_pred\n if self.loglike_method.startswith('nb'):\n # mlefit is a wrapped counts results\n self._transparams = False # don't need to transform anymore now\n # change from lnalpha to alpha\n if method not in [\"newton\", \"ncg\"]:\n mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])\n\n nbinfit = NegativeBinomialResults(self, mlefit._results)\n result = NegativeBinomialResultsWrapper(nbinfit)\n else:\n result = mlefit\n\n if cov_kwds is None:\n cov_kwds = {} #TODO: make this unnecessary ?\n result._get_robustcov_results(cov_type=cov_type,\n use_self=True, use_t=use_t, **cov_kwds)\n return result\n\n\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n\n if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and\n alpha != 0):\n # don't penalize alpha if alpha is scalar\n k_params = self.exog.shape[1] + self.k_extra\n alpha = alpha * np.ones(k_params)\n alpha[-1] = 0\n\n # alpha for regularized poisson to get starting values\n alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha\n\n self._transparams = False\n if start_params is None:\n # Use poisson fit as first guess.\n #TODO, Warning: this assumes exposure is logged\n offset = getattr(self, \"offset\", 0) + getattr(self, \"exposure\", 0)\n if np.size(offset) == 1 and offset == 0:\n offset = None\n mod_poi = Poisson(self.endog, self.exog, offset=offset)\n start_params = mod_poi.fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=0, callback=callback,\n alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params\n if self.loglike_method.startswith('nb'):\n start_params = np.append(start_params, 0.1)\n\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n if method in ['l1', 'l1_cvxopt_cp']:\n discretefit = L1NegativeBinomialResults(self, cntfit)\n else:\n raise Exception(\n \"argument method == %s, which is not handled\" % method)\n\n return L1NegativeBinomialResultsWrapper(discretefit)\n\n\n### Results Class ###\n\nclass DiscreteResults(base.LikelihoodModelResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for the discrete dependent variable models.\",\n \"extra_attr\" : \"\"}\n\n def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None,\n use_t=None):\n #super(DiscreteResults, self).__init__(model, params,\n # np.linalg.inv(-hessian), scale=1.)\n self.model = model\n self.df_model = model.df_model\n self.df_resid = model.df_resid\n self._cache = resettable_cache()\n self.nobs = model.exog.shape[0]\n self.__dict__.update(mlefit.__dict__)\n\n if not hasattr(self, 'cov_type'):\n # do this only if super, i.e. mlefit didn't already add cov_type\n # robust covariance\n if use_t is not None:\n self.use_t = use_t\n if cov_type == 'nonrobust':\n self.cov_type = 'nonrobust'\n self.cov_kwds = {'description' : 'Standard Errors assume that the ' +\n 'covariance matrix of the errors is correctly ' +\n 'specified.'}\n else:\n if cov_kwds is None:\n cov_kwds = {}\n from statsmodels.base.covtype import get_robustcov_results\n get_robustcov_results(self, cov_type=cov_type, use_self=True,\n **cov_kwds)\n\n\n\n def __getstate__(self):\n try:\n #remove unpicklable callback\n self.mle_settings['callback'] = None\n except (AttributeError, KeyError):\n pass\n return self.__dict__\n\n @cache_readonly\n def prsquared(self):\n return 1 - self.llf/self.llnull\n\n @cache_readonly\n def llr(self):\n return -2*(self.llnull - self.llf)\n\n @cache_readonly\n def llr_pvalue(self):\n return stats.chisqprob(self.llr, self.df_model)\n\n @cache_readonly\n def llnull(self):\n\n model = self.model\n kwds = model._get_init_kwds()\n # TODO: what parameters to pass to fit?\n mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)\n # TODO: consider catching and warning on convergence failure?\n # in the meantime, try hard to converge. see\n # TestPoissonConstrained1a.test_smoke\n res_null = mod_null.fit(disp=0, warn_convergence=False,\n maxiter=10000)\n return res_null.llf\n\n @cache_readonly\n def fittedvalues(self):\n return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]])\n\n @cache_readonly\n def aic(self):\n return -2*(self.llf - (self.df_model+1))\n\n @cache_readonly\n def bic(self):\n return -2*self.llf + np.log(self.nobs)*(self.df_model+1)\n\n def _get_endog_name(self, yname, yname_list):\n if yname is None:\n yname = self.model.endog_names\n if yname_list is None:\n yname_list = self.model.endog_names\n return yname, yname_list\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n \"\"\"Get marginal effects of the fitted model.\n\n Parameters\n ----------\n at : str, optional\n Options are:\n\n - 'overall', The average of the marginal effects at each\n observation.\n - 'mean', The marginal effects at the mean of each regressor.\n - 'median', The marginal effects at the median of each regressor.\n - 'zero', The marginal effects at zero for each regressor.\n - 'all', The marginal effects at each observation. If `at` is all\n only margeff will be available from the returned object.\n\n Note that if `exog` is specified, then marginal effects for all\n variables not specified by `exog` are calculated using the `at`\n option.\n method : str, optional\n Options are:\n\n - 'dydx' - dy/dx - No transformation is made and marginal effects\n are returned. This is the default.\n - 'eyex' - estimate elasticities of variables in `exog` --\n d(lny)/d(lnx)\n - 'dyex' - estimate semielasticity -- dy/d(lnx)\n - 'eydx' - estimate semeilasticity -- d(lny)/dx\n\n Note that tranformations are done after each observation is\n calculated. Semi-elasticities for binary variables are computed\n using the midpoint method. 'dyex' and 'eyex' do not make sense\n for discrete variables.\n atexog : array-like, optional\n Optionally, you can provide the exogenous variables over which to\n get the marginal effects. This should be a dictionary with the key\n as the zero-indexed column number and the value of the dictionary.\n Default is None for all independent variables less the constant.\n dummy : bool, optional\n If False, treats binary variables (if present) as continuous. This\n is the default. Else if True, treats binary variables as\n changing from 0 to 1. Note that any variable that is either 0 or 1\n is treated as binary. Each binary variable is treated separately\n for now.\n count : bool, optional\n If False, treats count variables (if present) as continuous. This\n is the default. Else if True, the marginal effect is the\n change in probabilities when each observation is increased by one.\n\n Returns\n -------\n DiscreteMargins : marginal effects instance\n Returns an object that holds the marginal effects, standard\n errors, confidence intervals, etc. See\n `statsmodels.discrete.discrete_margins.DiscreteMargins` for more\n information.\n\n Notes\n -----\n When using after Poisson, returns the expected number of events\n per period, assuming that the model is loglinear.\n \"\"\"\n from statsmodels.discrete.discrete_margins import DiscreteMargins\n return DiscreteMargins(self, (at, method, atexog, dummy, count))\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05,\n yname_list=None):\n \"\"\"Summarize the Regression Results\n\n Parameters\n -----------\n yname : string, optional\n Default is `y`\n xname : list of strings, optional\n Default is `var_##` for ## in p the number of regressors\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n\n top_left = [('Dep. Variable:', None),\n ('Model:', [self.model.__class__.__name__]),\n ('Method:', ['MLE']),\n ('Date:', None),\n ('Time:', None),\n #('No. iterations:', [\"%d\" % self.mle_retvals['iterations']]),\n ('converged:', [\"%s\" % self.mle_retvals['converged']])\n ]\n\n top_right = [('No. Observations:', None),\n ('Df Residuals:', None),\n ('Df Model:', None),\n ('Pseudo R-squ.:', [\"%#6.4g\" % self.prsquared]),\n ('Log-Likelihood:', None),\n ('LL-Null:', [\"%#8.5g\" % self.llnull]),\n ('LLR p-value:', [\"%#6.4g\" % self.llr_pvalue])\n ]\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Regression Results\"\n\n #boiler plate\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n yname, yname_list = self._get_endog_name(yname, yname_list)\n # for top of table\n smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],\n yname=yname, xname=xname, title=title)\n # for parameters, etc\n smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n if hasattr(self, 'constraints'):\n smry.add_extra_txt(['Model has been estimated subject to linear '\n 'equality constraints.'])\n\n #diagnostic table not used yet\n #smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n # yname=yname, xname=xname,\n # title=\"\")\n return smry\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental function to summarize regression results\n\n Parameters\n -----------\n xname : List of strings of length equal to the number of parameters\n Names of the independent variables (optional)\n yname : string\n Name of the dependent variable (optional)\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n # Summary\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n\n if hasattr(self, 'constraints'):\n smry.add_text('Model has been estimated subject to linear '\n 'equality constraints.')\n\n return smry\n\n\n\nclass CountResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for count data\",\n \"extra_attr\" : \"\"}\n @cache_readonly\n def resid(self):\n \"\"\"\n Residuals\n\n Notes\n -----\n The residuals for Count models are defined as\n\n .. math:: y - p\n\n where :math:`p = \\\\exp(X\\\\beta)`. Any exposure and offset variables\n are also handled.\n \"\"\"\n return self.model.endog - self.predict()\n\nclass NegativeBinomialResults(CountResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for NegativeBinomial 1 and 2\",\n \"extra_attr\" : \"\"}\n\n @cache_readonly\n def lnalpha(self):\n return np.log(self.params[-1])\n\n @cache_readonly\n def lnalpha_std_err(self):\n return self.bse[-1] / self.params[-1]\n\n @cache_readonly\n def aic(self):\n # + 1 because we estimate alpha\n k_extra = getattr(self.model, 'k_extra', 0)\n return -2*(self.llf - (self.df_model + self.k_constant + k_extra))\n\n @cache_readonly\n def bic(self):\n # + 1 because we estimate alpha\n k_extra = getattr(self.model, 'k_extra', 0)\n return -2*self.llf + np.log(self.nobs)*(self.df_model +\n self.k_constant + k_extra)\n\nclass L1CountResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for count data fit by l1 regularization\",\n \"extra_attr\" : _l1_results_attr}\n #discretefit = CountResults(self, cntfit)\n\n def __init__(self, model, cntfit):\n super(L1CountResults, self).__init__(model, cntfit)\n # self.trimmed is a boolean array with T/F telling whether or not that\n # entry in params has been set zero'd out.\n self.trimmed = cntfit.mle_retvals['trimmed']\n self.nnz_params = (self.trimmed == False).sum()\n # update degrees of freedom\n self.model.df_model = self.nnz_params - 1\n self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)\n # adjust for extra parameter in NegativeBinomial nb1 and nb2\n # extra parameter is not included in df_model\n k_extra = getattr(self.model, 'k_extra', 0)\n self.model.df_model -= k_extra\n self.model.df_resid += k_extra\n self.df_model = self.model.df_model\n self.df_resid = self.model.df_resid\n\nclass PoissonResults(CountResults):\n def predict_prob(self, n=None, exog=None, exposure=None, offset=None,\n transform=True):\n \"\"\"\n Return predicted probability of each count level for each observation\n\n Parameters\n ----------\n n : array-like or int\n The counts for which you want the probabilities. If n is None\n then the probabilities for each count from 0 to max(y) are\n given.\n\n Returns\n -------\n ndarray\n A nobs x n array where len(`n`) columns are indexed by the count\n n. If n is None, then column 0 is the probability that each\n observation is 0, column 1 is the probability that each\n observation is 1, etc.\n \"\"\"\n if n is not None:\n counts = np.atleast_2d(n)\n else:\n counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1))\n mu = self.predict(exog=exog, exposure=exposure, offset=offset,\n transform=transform, linear=False)[:,None]\n # uses broadcasting\n return stats.poisson.pmf(counts, mu)\n\nclass L1PoissonResults(L1CountResults, PoissonResults):\n pass\n\nclass L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults):\n pass\n\nclass OrderedResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" : \"A results class for ordered discrete data.\" , \"extra_attr\" : \"\"}\n pass\n\nclass BinaryResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" : \"A results class for binary data\", \"extra_attr\" : \"\"}\n\n def pred_table(self, threshold=.5):\n \"\"\"\n Prediction table\n\n Parameters\n ----------\n threshold : scalar\n Number between 0 and 1. Threshold above which a prediction is\n considered 1 and below which a prediction is considered 0.\n\n Notes\n ------\n pred_table[i,j] refers to the number of times \"i\" was observed and\n the model predicted \"j\". Correct predictions are along the diagonal.\n \"\"\"\n model = self.model\n actual = model.endog\n pred = np.array(self.predict() > threshold, dtype=float)\n bins = np.array([0, 0.5, 1])\n return np.histogram2d(actual, pred, bins=bins)[0]\n\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05,\n yname_list=None):\n smry = super(BinaryResults, self).summary(yname, xname, title, alpha,\n yname_list)\n fittedvalues = self.model.cdf(self.fittedvalues)\n absprederror = np.abs(self.model.endog - fittedvalues)\n predclose_sum = (absprederror < 1e-4).sum()\n predclose_frac = predclose_sum / len(fittedvalues)\n\n #add warnings/notes\n etext = []\n if predclose_sum == len(fittedvalues): #nobs?\n wstr = \"Complete Separation: The results show that there is\"\n wstr += \"complete separation.\\n\"\n wstr += \"In this case the Maximum Likelihood Estimator does \"\n wstr += \"not exist and the parameters\\n\"\n wstr += \"are not identified.\"\n etext.append(wstr)\n elif predclose_frac > 0.1: # TODO: get better diagnosis\n wstr = \"Possibly complete quasi-separation: A fraction \"\n wstr += \"%4.2f of observations can be\\n\" % predclose_frac\n wstr += \"perfectly predicted. This might indicate that there \"\n wstr += \"is complete\\nquasi-separation. In this case some \"\n wstr += \"parameters will not be identified.\"\n etext.append(wstr)\n if etext:\n smry.add_extra_txt(etext)\n return smry\n summary.__doc__ = DiscreteResults.summary.__doc__\n\n @cache_readonly\n def resid_dev(self):\n \"\"\"\n Deviance residuals\n\n Notes\n -----\n Deviance residuals are defined\n\n .. math:: d_j = \\\\pm\\\\left(2\\\\left[Y_j\\\\ln\\\\left(\\\\frac{Y_j}{M_jp_j}\\\\right) + (M_j - Y_j\\\\ln\\\\left(\\\\frac{M_j-Y_j}{M_j(1-p_j)} \\\\right) \\\\right] \\\\right)^{1/2}\n\n where\n\n :math:`p_j = cdf(X\\\\beta)` and :math:`M_j` is the total number of\n observations sharing the covariate pattern :math:`j`.\n\n For now :math:`M_j` is always set to 1.\n \"\"\"\n #These are the deviance residuals\n #model = self.model\n endog = self.model.endog\n #exog = model.exog\n # M = # of individuals that share a covariate pattern\n # so M[i] = 2 for i = two share a covariate pattern\n M = 1\n p = self.predict()\n #Y_0 = np.where(exog == 0)\n #Y_M = np.where(exog == M)\n #NOTE: Common covariate patterns are not yet handled\n res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \\\n endog*np.sqrt(2*M*np.abs(np.log(p)))\n return res\n\n @cache_readonly\n def resid_pearson(self):\n \"\"\"\n Pearson residuals\n\n Notes\n -----\n Pearson residuals are defined to be\n\n .. math:: r_j = \\\\frac{(y - M_jp_j)}{\\\\sqrt{M_jp_j(1-p_j)}}\n\n where :math:`p_j=cdf(X\\\\beta)` and :math:`M_j` is the total number of\n observations sharing the covariate pattern :math:`j`.\n\n For now :math:`M_j` is always set to 1.\n \"\"\"\n # Pearson residuals\n #model = self.model\n endog = self.model.endog\n #exog = model.exog\n # M = # of individuals that share a covariate pattern\n # so M[i] = 2 for i = two share a covariate pattern\n # use unique row pattern?\n M = 1\n p = self.predict()\n return (endog - M*p)/np.sqrt(M*p*(1-p))\n\n @cache_readonly\n def resid_response(self):\n \"\"\"\n The response residuals\n\n Notes\n -----\n Response residuals are defined to be\n\n .. math:: y - p\n\n where :math:`p=cdf(X\\\\beta)`.\n \"\"\"\n return self.model.endog - self.predict()\n\nclass LogitResults(BinaryResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for Logit Model\",\n \"extra_attr\" : \"\"}\n @cache_readonly\n def resid_generalized(self):\n \"\"\"\n Generalized residuals\n\n Notes\n -----\n The generalized residuals for the Logit model are defined\n\n .. math:: y - p\n\n where :math:`p=cdf(X\\\\beta)`. This is the same as the `resid_response`\n for the Logit model.\n \"\"\"\n # Generalized residuals\n return self.model.endog - self.predict()\n\nclass ProbitResults(BinaryResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\" : \"A results class for Probit Model\",\n \"extra_attr\" : \"\"}\n @cache_readonly\n def resid_generalized(self):\n \"\"\"\n Generalized residuals\n\n Notes\n -----\n The generalized residuals for the Probit model are defined\n\n .. math:: y\\\\frac{\\\\phi(X\\\\beta)}{\\\\Phi(X\\\\beta)}-(1-y)\\\\frac{\\\\phi(X\\\\beta)}{1-\\\\Phi(X\\\\beta)}\n \"\"\"\n # generalized residuals\n model = self.model\n endog = model.endog\n XB = self.predict(linear=True)\n pdf = model.pdf(XB)\n cdf = model.cdf(XB)\n return endog * pdf/cdf - (1-endog)*pdf/(1-cdf)\n\nclass L1BinaryResults(BinaryResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"Results instance for binary data fit by l1 regularization\",\n \"extra_attr\" : _l1_results_attr}\n def __init__(self, model, bnryfit):\n super(L1BinaryResults, self).__init__(model, bnryfit)\n # self.trimmed is a boolean array with T/F telling whether or not that\n # entry in params has been set zero'd out.\n self.trimmed = bnryfit.mle_retvals['trimmed']\n self.nnz_params = (self.trimmed == False).sum()\n self.model.df_model = self.nnz_params - 1\n self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)\n self.df_model = self.model.df_model\n self.df_resid = self.model.df_resid\n\n\nclass MultinomialResults(DiscreteResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for multinomial data\", \"extra_attr\" : \"\"}\n def _maybe_convert_ynames_int(self, ynames):\n # see if they're integers\n try:\n for i in ynames:\n if ynames[i] % 1 == 0:\n ynames[i] = str(int(ynames[i]))\n except TypeError:\n pass\n return ynames\n\n def _get_endog_name(self, yname, yname_list, all=False):\n \"\"\"\n If all is False, the first variable name is dropped\n \"\"\"\n model = self.model\n if yname is None:\n yname = model.endog_names\n if yname_list is None:\n ynames = model._ynames_map\n ynames = self._maybe_convert_ynames_int(ynames)\n # use range below to ensure sortedness\n ynames = [ynames[key] for key in range(int(model.J))]\n ynames = ['='.join([yname, name]) for name in ynames]\n if not all:\n yname_list = ynames[1:] # assumes first variable is dropped\n else:\n yname_list = ynames\n return yname, yname_list\n\n def pred_table(self):\n \"\"\"\n Returns the J x J prediction table.\n\n Notes\n -----\n pred_table[i,j] refers to the number of times \"i\" was observed and\n the model predicted \"j\". Correct predictions are along the diagonal.\n \"\"\"\n ju = self.model.J - 1 # highest index\n # these are the actual, predicted indices\n #idx = lzip(self.model.endog, self.predict().argmax(1))\n bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju]))\n return np.histogram2d(self.model.endog, self.predict().argmax(1),\n bins=bins)[0]\n\n @cache_readonly\n def bse(self):\n bse = np.sqrt(np.diag(self.cov_params()))\n return bse.reshape(self.params.shape, order='F')\n\n @cache_readonly\n def aic(self):\n return -2*(self.llf - (self.df_model+self.model.J-1))\n\n @cache_readonly\n def bic(self):\n return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1)\n\n def conf_int(self, alpha=.05, cols=None):\n confint = super(DiscreteResults, self).conf_int(alpha=alpha,\n cols=cols)\n return confint.transpose(2,0,1)\n\n def margeff(self):\n raise NotImplementedError(\"Use get_margeff instead\")\n\n @cache_readonly\n def resid_misclassified(self):\n \"\"\"\n Residuals indicating which observations are misclassified.\n\n Notes\n -----\n The residuals for the multinomial model are defined as\n\n .. math:: argmax(y_i) \\\\neq argmax(p_i)\n\n where :math:`argmax(y_i)` is the index of the category for the\n endogenous variable and :math:`argmax(p_i)` is the index of the\n predicted probabilities for each category. That is, the residual\n is a binary indicator that is 0 if the category with the highest\n predicted probability is the same as that of the observed variable\n and 1 otherwise.\n \"\"\"\n # it's 0 or 1 - 0 for correct prediction and 1 for a missed one\n return (self.model.wendog.argmax(1) !=\n self.predict().argmax(1)).astype(float)\n\n def summary2(self, alpha=0.05, float_format=\"%.4f\"):\n \"\"\"Experimental function to summarize regression results\n\n Parameters\n -----------\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary2.Summary : class to hold summary\n results\n\n \"\"\"\n\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_dict(summary2.summary_model(self))\n # One data frame per value of endog\n eqn = self.params.shape[1]\n confint = self.conf_int(alpha)\n for i in range(eqn):\n coefs = summary2.summary_params(self, alpha, self.params[:,i],\n self.bse[:,i], self.tvalues[:,i], self.pvalues[:,i],\n confint[i])\n # Header must show value of endog\n level_str = self.model.endog_names + ' = ' + str(i)\n coefs[level_str] = coefs.index\n coefs = coefs.ix[:,[-1,0,1,2,3,4,5]]\n smry.add_df(coefs, index=False, header=True, float_format=float_format)\n smry.add_title(results=self)\n return smry\n\n\nclass L1MultinomialResults(MultinomialResults):\n __doc__ = _discrete_results_docs % {\"one_line_description\" :\n \"A results class for multinomial data fit by l1 regularization\",\n \"extra_attr\" : _l1_results_attr}\n def __init__(self, model, mlefit):\n super(L1MultinomialResults, self).__init__(model, mlefit)\n # self.trimmed is a boolean array with T/F telling whether or not that\n # entry in params has been set zero'd out.\n self.trimmed = mlefit.mle_retvals['trimmed']\n self.nnz_params = (self.trimmed == False).sum()\n\n #Note: J-1 constants\n self.model.df_model = self.nnz_params - (self.model.J - 1)\n self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)\n self.df_model = self.model.df_model\n self.df_resid = self.model.df_resid\n\n\n#### Results Wrappers ####\n\nclass OrderedResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(OrderedResultsWrapper, OrderedResults)\n\nclass CountResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(CountResultsWrapper, CountResults)\n\nclass NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(NegativeBinomialResultsWrapper,\n NegativeBinomialResults)\n\nclass PoissonResultsWrapper(lm.RegressionResultsWrapper):\n pass\n #_methods = {\n # \"predict_prob\" : \"rows\",\n # }\n #_wrap_methods = lm.wrap.union_dicts(\n # lm.RegressionResultsWrapper._wrap_methods,\n # _methods)\nwrap.populate_wrapper(PoissonResultsWrapper, PoissonResults)\n\nclass L1CountResultsWrapper(lm.RegressionResultsWrapper):\n pass\n\nclass L1PoissonResultsWrapper(lm.RegressionResultsWrapper):\n pass\n #_methods = {\n # \"predict_prob\" : \"rows\",\n # }\n #_wrap_methods = lm.wrap.union_dicts(\n # lm.RegressionResultsWrapper._wrap_methods,\n # _methods)\nwrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults)\n\nclass L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1NegativeBinomialResultsWrapper,\n L1NegativeBinomialResults)\n\nclass BinaryResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\"resid_dev\" : \"rows\",\n \"resid_generalized\" : \"rows\",\n \"resid_pearson\" : \"rows\",\n \"resid_response\" : \"rows\"\n }\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\nwrap.populate_wrapper(BinaryResultsWrapper, BinaryResults)\n\nclass L1BinaryResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults)\n\nclass MultinomialResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\"resid_misclassified\" : \"rows\"}\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\nwrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults)\n\nclass L1MultinomialResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults)\n\n\nif __name__==\"__main__\":\n import numpy as np\n import statsmodels.api as sm\n# Scratch work for negative binomial models\n# dvisits was written using an R package, I can provide the dataset\n# on request until the copyright is cleared up\n#TODO: request permission to use dvisits\n data2 = np.genfromtxt('../datasets/dvisits/dvisits.csv', names=True)\n# note that this has missing values for Accident\n endog = data2['doctorco']\n exog = data2[['sex','age','agesq','income','levyplus','freepoor',\n 'freerepa','illness','actdays','hscore','chcond1',\n 'chcond2']].view(float, np.ndarray).reshape(len(data2),-1)\n exog = sm.add_constant(exog, prepend=True)\n poisson_mod = Poisson(endog, exog)\n poisson_res = poisson_mod.fit()\n# nb2_mod = NegBinTwo(endog, exog)\n# nb2_res = nb2_mod.fit()\n# solvers hang (with no error and no maxiter warn...)\n# haven't derived hessian (though it will be block diagonal) to check\n# newton, note that Lawless (1987) has the derivations\n# appear to be something wrong with the score?\n# according to Lawless, traditionally the likelihood is maximized wrt to B\n# and a gridsearch on a to determin ahat?\n# or the Breslow approach, which is 2 step iterative.\n nb2_params = [-2.190,.217,-.216,.609,-.142,.118,-.497,.145,.214,.144,\n .038,.099,.190,1.077] # alpha is last\n # taken from Cameron and Trivedi\n# the below is from Cameron and Trivedi as well\n# endog2 = np.array(endog>=1, dtype=float)\n# skipped for now, binary poisson results look off?\n data = sm.datasets.randhie.load()\n nbreg = NegativeBinomial\n mod = nbreg(data.endog, data.exog.view((float,9)))\n#FROM STATA:\n params = np.asarray([-.05654133, -.21214282, .0878311, -.02991813, .22903632,\n .06210226, .06799715, .08407035, .18532336])\n bse = [0.0062541, 0.0231818, 0.0036942, 0.0034796, 0.0305176, 0.0012397,\n 0.0198008, 0.0368707, 0.0766506]\n lnalpha = .31221786\n mod.loglike(np.r_[params,np.exp(lnalpha)])\n poiss_res = Poisson(data.endog, data.exog.view((float,9))).fit()\n func = lambda x: -mod.loglike(x)\n grad = lambda x: -mod.score(x)\n from scipy import optimize\n# res1 = optimize.fmin_l_bfgs_b(func, np.r_[poiss_res.params,.1],\n# approx_grad=True)\n res1 = optimize.fmin_bfgs(func, np.r_[poiss_res.params,.1], fprime=grad)\n from statsmodels.tools.numdiff import approx_hess_cs\n# np.sqrt(np.diag(-np.linalg.inv(approx_hess_cs(np.r_[params,lnalpha], mod.loglike))))\n#NOTE: this is the hessian in terms of alpha _not_ lnalpha\n hess_arr = mod.hessian(res1)\n",
"\"\"\"\nGeneralized linear models currently supports estimation using the one-parameter\nexponential families\n\nReferences\n----------\nGill, Jeff. 2000. Generalized Linear Models: A Unified Approach.\n SAGE QASS Series.\n\nGreen, PJ. 1984. \"Iteratively reweighted least squares for maximum\n likelihood estimation, and some robust and resistant alternatives.\"\n Journal of the Royal Statistical Society, Series B, 46, 149-192.\n\nHardin, J.W. and Hilbe, J.M. 2007. \"Generalized Linear Models and\n Extensions.\" 2nd ed. Stata Press, College Station, TX.\n\nMcCullagh, P. and Nelder, J.A. 1989. \"Generalized Linear Models.\" 2nd ed.\n Chapman & Hall, Boca Rotan.\n\"\"\"\n\nimport numpy as np\nfrom . import families\nfrom statsmodels.tools.decorators import cache_readonly, resettable_cache\n\nimport statsmodels.base.model as base\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.compat.numpy import np_matrix_rank\n\nfrom statsmodels.graphics._regressionplots_doc import (\n _plot_added_variable_doc,\n _plot_partial_residuals_doc,\n _plot_ceres_residuals_doc)\n\n# need import in module instead of lazily to copy `__doc__`\nfrom . import _prediction as pred\n\nfrom statsmodels.tools.sm_exceptions import (PerfectSeparationError,\n DomainWarning)\n\n__all__ = ['GLM']\n\n\ndef _check_convergence(criterion, iteration, atol, rtol):\n return np.allclose(criterion[iteration], criterion[iteration + 1],\n atol=atol, rtol=rtol)\n\n\nclass GLM(base.LikelihoodModel):\n __doc__ = \"\"\"\n Generalized Linear Models class\n\n GLM inherits from statsmodels.base.model.LikelihoodModel\n\n Parameters\n -----------\n endog : array-like\n 1d array of endogenous response variable. This array can be 1d or 2d.\n Binomial family models accept a 2d array with two columns. If\n supplied, each observation is expected to be [success, failure].\n exog : array-like\n A nobs x k array where `nobs` is the number of observations and `k`\n is the number of regressors. An intercept is not included by default\n and should be added by the user (models specified using a formula\n include an intercept by default). See `statsmodels.tools.add_constant`.\n family : family class instance\n The default is Gaussian. To specify the binomial distribution\n family = sm.family.Binomial()\n Each family can take a link instance as an argument. See\n statsmodels.family.family for more information.\n offset : array-like or None\n An offset to be included in the model. If provided, must be\n an array whose length is the number of rows in exog.\n exposure : array-like or None\n Log(exposure) will be added to the linear prediction in the model. Exposure\n is only valid if the log link is used. If provided, it must be an array\n with the same length as endog.\n freq_weights : array-like\n 1d array of frequency weights. The default is None. If None is selected\n or a blank value, then the algorithm will replace with an array of 1's\n with length equal to the endog.\n WARNING: Using weights is not verified yet for all possible options\n and results, see Notes.\n %(extra_params)s\n\n Attributes\n -----------\n df_model : float\n `p` - 1, where `p` is the number of regressors including the intercept.\n df_resid : float\n The number of observation `n` minus the number of regressors `p`.\n endog : array\n See Parameters.\n exog : array\n See Parameters.\n family : family class instance\n A pointer to the distribution family of the model.\n freq_weights : array\n See Parameters.\n mu : array\n The estimated mean response of the transformed variable.\n n_trials : array\n See Parameters.\n normalized_cov_params : array\n `p` x `p` normalized covariance of the design / exogenous data.\n pinv_wexog : array\n For GLM this is just the pseudo inverse of the original design.\n scale : float\n The estimate of the scale / dispersion. Available after fit is called.\n scaletype : str\n The scaling used for fitting the model. Available after fit is called.\n weights : array\n The value of the weights after the last iteration of fit.\n\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.scotland.load()\n >>> data.exog = sm.add_constant(data.exog)\n\n Instantiate a gamma family model with the default link function.\n\n >>> gamma_model = sm.GLM(data.endog, data.exog,\n ... family=sm.families.Gamma())\n\n >>> gamma_results = gamma_model.fit()\n >>> gamma_results.params\n array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,\n -0.00000015, -0.00051868, -0.00000243])\n >>> gamma_results.scale\n 0.0035842831734919055\n >>> gamma_results.deviance\n 0.087388516416999198\n >>> gamma_results.pearson_chi2\n 0.086022796163805704\n >>> gamma_results.llf\n -83.017202161073527\n\n See also\n --------\n statsmodels.genmod.families.family.Family\n :ref:`families`\n :ref:`links`\n\n Notes\n -----\n Only the following combinations make sense for family and link ::\n\n + ident log logit probit cloglog pow opow nbinom loglog logc\n Gaussian | x x x\n inv Gaussian | x x x\n binomial | x x x x x x x x x\n Poission | x x x\n neg binomial | x x x x\n gamma | x x x\n\n Not all of these link functions are currently available.\n\n Endog and exog are references so that if the data they refer to are already\n arrays and these arrays are changed, endog and exog will change.\n\n Using frequency weights: Frequency weights produce the same results as repeating\n observations by the frequencies (if those are integers). This is verified for all\n basic results with nonrobust or heteroscedasticity robust ``cov_type``. Other\n robust covariance types have not yet been verified, and at least the small sample\n correction is currently not based on the correct total frequency count.\n It has not yet been decided whether all the different types of residuals will be\n based on weighted residuals. Currently, residuals are not weighted.\n\n\n **Attributes**\n\n df_model : float\n Model degrees of freedom is equal to p - 1, where p is the number\n of regressors. Note that the intercept is not reported as a\n degree of freedom.\n df_resid : float\n Residual degrees of freedom is equal to the number of observation n\n minus the number of regressors p.\n endog : array\n See above. Note that `endog` is a reference to the data so that if\n data is already an array and it is changed, then `endog` changes\n as well.\n exposure : array-like\n Include ln(exposure) in model with coefficient constrained to 1. Can\n only be used if the link is the logarithm function.\n exog : array\n See above. Note that `exog` is a reference to the data so that if\n data is already an array and it is changed, then `exog` changes\n as well.\n freq_weights : array\n See above. Note that `freq_weights` is a reference to the data so that\n if data i already an array and it is changed, then `freq_weights`\n changes as well.\n iteration : int\n The number of iterations that fit has run. Initialized at 0.\n family : family class instance\n The distribution family of the model. Can be any family in\n statsmodels.families. Default is Gaussian.\n mu : array\n The mean response of the transformed variable. `mu` is the value of\n the inverse of the link function at lin_pred, where lin_pred is the\n linear predicted value of the WLS fit of the transformed variable.\n `mu` is only available after fit is called. See\n statsmodels.families.family.fitted of the distribution family for more\n information.\n n_trials : array\n See above. Note that `n_trials` is a reference to the data so that if\n data is already an array and it is changed, then `n_trials` changes\n as well. `n_trials` is the number of binomial trials and only available\n with that distribution. See statsmodels.families.Binomial for more\n information.\n normalized_cov_params : array\n The p x p normalized covariance of the design / exogenous data.\n This is approximately equal to (X.T X)^(-1)\n offset : array-like\n Include offset in model with coefficient constrained to 1.\n pinv_wexog : array\n The pseudoinverse of the design / exogenous data array. Note that\n GLM has no whiten method, so this is just the pseudo inverse of the\n design.\n The pseudoinverse is approximately equal to (X.T X)^(-1)X.T\n scale : float\n The estimate of the scale / dispersion of the model fit. Only\n available after fit is called. See GLM.fit and GLM.estimate_scale\n for more information.\n scaletype : str\n The scaling used for fitting the model. This is only available after\n fit is called. The default is None. See GLM.fit for more information.\n weights : array\n The value of the weights after the last iteration of fit. Only\n available after fit is called. See statsmodels.families.family for\n the specific distribution weighting functions.\n \"\"\" % {'extra_params' : base._missing_param_doc}\n\n def __init__(self, endog, exog, family=None, offset=None,\n exposure=None, freq_weights=None, missing='none', **kwargs):\n\n if (family is not None) and not isinstance(family.link, tuple(family.safe_links)):\n import warnings\n warnings.warn(\"The %s link function does not respect the domain of the %s family.\" %\n (family.link.__class__.__name__, family.__class__.__name__),\n DomainWarning)\n\n if exposure is not None:\n exposure = np.log(exposure)\n if offset is not None: # this should probably be done upstream\n offset = np.asarray(offset)\n\n self.freq_weights = freq_weights\n\n super(GLM, self).__init__(endog, exog, missing=missing,\n offset=offset, exposure=exposure,\n freq_weights=freq_weights, **kwargs)\n self._check_inputs(family, self.offset, self.exposure, self.endog,\n self.freq_weights)\n if offset is None:\n delattr(self, 'offset')\n if exposure is None:\n delattr(self, 'exposure')\n\n self.nobs = self.endog.shape[0]\n\n #things to remove_data\n self._data_attr.extend(['weights', 'pinv_wexog', 'mu', 'freq_weights',\n '_offset_exposure', 'n_trials'])\n # register kwds for __init__, offset and exposure are added by super\n self._init_keys.append('family')\n\n self._setup_binomial()\n\n # Construct a combined offset/exposure term. Note that\n # exposure has already been logged if present.\n offset_exposure = 0.\n if hasattr(self, 'offset'):\n offset_exposure = self.offset\n if hasattr(self, 'exposure'):\n offset_exposure = offset_exposure + self.exposure\n self._offset_exposure = offset_exposure\n\n self.scaletype = None\n\n\n def initialize(self):\n \"\"\"\n Initialize a generalized linear model.\n \"\"\"\n # TODO: intended for public use?\n self.history = {'fittedvalues' : [],\n 'params' : [np.inf],\n 'deviance' : [np.inf]}\n\n self.pinv_wexog = np.linalg.pinv(self.exog)\n self.normalized_cov_params = np.dot(self.pinv_wexog,\n np.transpose(self.pinv_wexog))\n\n self.df_model = np_matrix_rank(self.exog) - 1\n\n\n if (self.freq_weights is not None) and \\\n (self.freq_weights.shape[0] == self.endog.shape[0]):\n self.wnobs = self.freq_weights.sum()\n self.df_resid = self.wnobs - self.df_model - 1\n else:\n self.wnobs = self.exog.shape[0]\n self.df_resid = self.exog.shape[0] - self.df_model - 1\n\n def _check_inputs(self, family, offset, exposure, endog, freq_weights):\n\n # Default family is Gaussian\n if family is None:\n family = families.Gaussian()\n self.family = family\n\n if exposure is not None:\n if not isinstance(self.family.link, families.links.Log):\n raise ValueError(\"exposure can only be used with the log \"\n \"link function\")\n elif exposure.shape[0] != endog.shape[0]:\n raise ValueError(\"exposure is not the same length as endog\")\n\n if offset is not None:\n if offset.shape[0] != endog.shape[0]:\n raise ValueError(\"offset is not the same length as endog\")\n\n if freq_weights is not None:\n if freq_weights.shape[0] != endog.shape[0]:\n raise ValueError(\"freq weights not the same length as endog\")\n if len(freq_weights.shape) > 1:\n raise ValueError(\"freq weights has too many dimensions\")\n\n # internal flag to store whether freq_weights were not None\n self._has_freq_weights = (self.freq_weights is not None)\n if self.freq_weights is None:\n self.freq_weights = np.ones((endog.shape[0]))\n # TODO: check do we want to keep None as sentinel for freq_weights\n\n if np.shape(self.freq_weights) == () and self.freq_weights > 1:\n self.freq_weights = (self.freq_weights *\n np.ones((endog.shape[0])))\n\n def _get_init_kwds(self):\n # this is a temporary fixup because exposure has been transformed\n # see #1609, copied from discrete_model.CountModel\n kwds = super(GLM, self)._get_init_kwds()\n if 'exposure' in kwds and kwds['exposure'] is not None:\n kwds['exposure'] = np.exp(kwds['exposure'])\n return kwds\n\n def loglike_mu(self, mu, scale=1.):\n \"\"\"\n Evaluate the log-likelihood for a generalized linear model.\n \"\"\"\n return self.family.loglike(mu, self.endog, self.exog,\n self.freq_weights, scale)\n\n def loglike(self, params, scale=None):\n \"\"\"\n Evaluate the log-likelihood for a generalized linear model.\n \"\"\"\n lin_pred = np.dot(self.exog, params) + self._offset_exposure\n expval = self.family.link.inverse(lin_pred)\n if scale is None:\n scale = self.estimate_scale(expval)\n llf = self.family.loglike(self.endog, expval, self.freq_weights,\n scale)\n return llf\n\n def score_obs(self, params, scale=None):\n \"\"\"score first derivative of the loglikelihood for each observation.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score_obs : ndarray, 2d\n The first derivative of the loglikelihood function evaluated at\n params for each observation.\n\n \"\"\"\n\n score_factor = self.score_factor(params, scale=scale)\n return score_factor[:, None] * self.exog\n\n\n def score(self, params, scale=None):\n \"\"\"score, first derivative of the loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score : ndarray_1d\n The first derivative of the loglikelihood function calculated as\n the sum of `score_obs`\n\n \"\"\"\n return self.score_obs(params, scale=scale).sum(0)\n\n\n def score_factor(self, params, scale=None):\n \"\"\"weights for score for each observation\n\n This can be considered as score residuals.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score_factor : ndarray_1d\n A 1d weight vector used in the calculation of the score_obs.\n The score_obs are obtained by `score_factor[:, None] * exog`\n\n \"\"\"\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n score_factor = (self.endog - mu) / self.family.link.deriv(mu)\n score_factor /= self.family.variance(mu)\n score_factor *= self.freq_weights\n\n if not scale == 1:\n score_factor /= scale\n\n return score_factor\n\n\n def hessian_factor(self, params, scale=None, observed=True):\n \"\"\"Weights for calculating Hessian\n\n Parameters\n ----------\n params : ndarray\n parameter at which Hessian is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n observed : bool\n If True, then the observed Hessian is returned. If false then the\n expected information matrix is returned.\n\n Returns\n -------\n hessian_factor : ndarray, 1d\n A 1d weight vector used in the calculation of the Hessian.\n The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`\n \"\"\"\n\n # calculating eim_factor\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n eim_factor = 1 / (self.family.link.deriv(mu)**2 *\n self.family.variance(mu))\n eim_factor *= self.freq_weights * self.n_trials\n\n if not observed:\n if not scale == 1:\n eim_factor /= scale\n return eim_factor\n\n # calculating oim_factor, eim_factor is with scale=1\n\n score_factor = self.score_factor(params, scale=1.)\n if eim_factor.ndim > 1 or score_factor.ndim > 1:\n raise RuntimeError('something wrong')\n\n tmp = self.family.variance(mu) * self.family.link.deriv2(mu)\n tmp += self.family.variance.deriv(mu) * self.family.link.deriv(mu)\n\n tmp = score_factor * eim_factor * tmp\n # correct for duplicatee freq_weights in oim_factor and score_factor\n tmp /= self.freq_weights\n oim_factor = eim_factor * (1 + tmp)\n\n if tmp.ndim > 1:\n raise RuntimeError('something wrong')\n\n if not scale == 1:\n oim_factor /= scale\n\n return oim_factor\n\n\n def hessian(self, params, scale=None, observed=True):\n \"\"\"Hessian, second derivative of loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n parameter at which Hessian is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n observed : bool\n If True, then the observed Hessian is returned. If false then the\n expected information matrix is returned.\n\n Returns\n -------\n hessian : ndarray\n Hessian, i.e. observed information, or expected information matrix.\n \"\"\"\n\n factor = self.hessian_factor(params, scale=scale, observed=observed)\n hess = -np.dot(self.exog.T * factor, self.exog)\n return hess\n\n\n def information(self, params, scale=None):\n \"\"\"\n Fisher information matrix.\n \"\"\"\n return self.hessian(params, scale=scale, observed=False)\n\n\n def score_test(self, params_constrained, k_constraints=None,\n exog_extra=None, observed=True):\n \"\"\"score test for restrictions or for omitted variables\n\n The covariance matrix for the score is based on the Hessian, i.e.\n observed information matrix or optionally on the expected information\n matrix..\n\n Parameters\n ----------\n params_constrained : array_like\n estimated parameter of the restricted model. This can be the\n parameter estimate for the current when testing for omitted\n variables.\n k_constraints : int or None\n Number of constraints that were used in the estimation of params\n restricted relative to the number of exog in the model.\n This must be provided if no exog_extra are given. If exog_extra is\n not None, then k_constraints is assumed to be zero if it is None.\n exog_extra : None or array_like\n Explanatory variables that are jointly tested for inclusion in the\n model, i.e. omitted variables.\n observed : bool\n If True, then the observed Hessian is used in calculating the\n covariance matrix of the score. If false then the expected\n information matrix is used.\n\n Returns\n -------\n chi2_stat : float\n chisquare statistic for the score test\n p-value : float\n P-value of the score test based on the chisquare distribution.\n df : int\n Degrees of freedom used in the p-value calculation. This is equal\n to the number of constraints.\n\n Notes\n -----\n not yet verified for case with scale not equal to 1.\n\n \"\"\"\n\n if exog_extra is None:\n if k_constraints is None:\n raise ValueError('if exog_extra is None, then k_constraints'\n 'needs to be given')\n\n score = self.score(params_constrained)\n hessian = self.hessian(params_constrained, observed=observed)\n\n else:\n #exog_extra = np.asarray(exog_extra)\n if k_constraints is None:\n k_constraints = 0\n\n ex = np.column_stack((self.exog, exog_extra))\n k_constraints += ex.shape[1] - self.exog.shape[1]\n\n score_factor = self.score_factor(params_constrained)\n score = (score_factor[:, None] * ex).sum(0)\n hessian_factor = self.hessian_factor(params_constrained,\n observed=observed)\n hessian = -np.dot(ex.T * hessian_factor, ex)\n\n\n from scipy import stats\n # TODO check sign, why minus?\n chi2stat = -score.dot(np.linalg.solve(hessian, score[:, None]))\n pval = stats.chi2.sf(chi2stat, k_constraints)\n # return a stats results instance instead? Contrast?\n return chi2stat, pval, k_constraints\n\n\n def _update_history(self, tmp_result, mu, history):\n \"\"\"\n Helper method to update history during iterative fit.\n \"\"\"\n history['params'].append(tmp_result.params)\n history['deviance'].append(self.family.deviance(self.endog, mu,\n self.freq_weights))\n return history\n\n def estimate_scale(self, mu):\n \"\"\"\n Estimates the dispersion/scale.\n\n Type of scale can be chose in the fit method.\n\n Parameters\n ----------\n mu : array\n mu is the mean response estimate\n\n Returns\n -------\n Estimate of scale\n\n Notes\n -----\n The default scale for Binomial and Poisson families is 1. The default\n for the other families is Pearson's Chi-Square estimate.\n\n See also\n --------\n statsmodels.genmod.generalized_linear_model.GLM.fit for more information\n \"\"\"\n if not self.scaletype:\n if isinstance(self.family, (families.Binomial, families.Poisson)):\n return 1.\n else:\n resid = self.endog - mu\n return ((self.freq_weights * (np.power(resid, 2) /\n self.family.variance(mu))).sum() /\n (self.df_resid))\n\n if isinstance(self.scaletype, float):\n return np.array(self.scaletype)\n\n if isinstance(self.scaletype, str):\n if self.scaletype.lower() == 'x2':\n resid = self.endog - mu\n return ((self.freq_weights * (np.power(resid, 2) /\n self.family.variance(mu))).sum() /\n (self.df_resid))\n elif self.scaletype.lower() == 'dev':\n return (self.family.deviance(self.endog, mu,\n self.freq_weights) /\n (self.df_resid))\n else:\n raise ValueError(\"Scale %s with type %s not understood\" %\n (self.scaletype, type(self.scaletype)))\n\n else:\n raise ValueError(\"Scale %s with type %s not understood\" %\n (self.scaletype, type(self.scaletype)))\n\n def estimate_tweedie_power(self, mu, method='brentq', low=1.01, high=5.):\n \"\"\"\n Tweedie specific function to estimate scale and the variance parameter.\n The variance parameter is also referred to as p, xi, or shape.\n\n Parameters\n ----------\n mu : array-like\n Fitted mean response variable\n method : str, defaults to 'brentq'\n Scipy optimizer used to solve the Pearson equation. Only brentq\n currently supported.\n low : float, optional\n Low end of the bracketing interval [a,b] to be used in the search\n for the power. Defaults to 1.01.\n high : float, optional\n High end of the bracketing interval [a,b] to be used in the search\n for the power. Defaults to 5.\n\n Returns\n -------\n power : float\n The estimated shape or power\n \"\"\"\n if method == 'brentq':\n from scipy.optimize import brentq\n\n def psi_p(power, mu):\n scale = ((self.freq_weights * (self.endog - mu) ** 2 /\n (mu ** power)).sum() / self.df_resid)\n return (np.sum(self.freq_weights * ((self.endog - mu) ** 2 /\n (scale * (mu ** power)) - 1) *\n np.log(mu)) / self.freq_weights.sum())\n power = brentq(psi_p, low, high, args=(mu))\n else:\n raise NotImplementedError('Only brentq can currently be used')\n return power\n\n def predict(self, params, exog=None, exposure=None, offset=None,\n linear=False):\n \"\"\"\n Return predicted values for a design matrix\n\n Parameters\n ----------\n params : array-like\n Parameters / coefficients of a GLM.\n exog : array-like, optional\n Design / exogenous data. Is exog is None, model exog is used.\n exposure : array-like, optional\n Exposure time values, only can be used with the log link\n function. See notes for details.\n offset : array-like, optional\n Offset values. See notes for details.\n linear : bool\n If True, returns the linear predicted values. If False,\n returns the value of the inverse of the model's link function at\n the linear predicted values.\n\n Returns\n -------\n An array of fitted values\n\n Notes\n -----\n Any `exposure` and `offset` provided here take precedence over\n the `exposure` and `offset` used in the model fit. If `exog`\n is passed as an argument here, then any `exposure` and\n `offset` values in the fit will be ignored.\n\n Exposure values must be strictly positive.\n \"\"\"\n\n # Use fit offset if appropriate\n if offset is None and exog is None and hasattr(self, 'offset'):\n offset = self.offset\n elif offset is None:\n offset = 0.\n\n if exposure is not None and not isinstance(self.family.link,\n families.links.Log):\n raise ValueError(\"exposure can only be used with the log link function\")\n\n # Use fit exposure if appropriate\n if exposure is None and exog is None and hasattr(self, 'exposure'):\n # Already logged\n exposure = self.exposure\n elif exposure is None:\n exposure = 0.\n else:\n exposure = np.log(exposure)\n\n if exog is None:\n exog = self.exog\n\n linpred = np.dot(exog, params) + offset + exposure\n if linear:\n return linpred\n else:\n return self.family.fitted(linpred)\n\n def get_distribution(self, params, scale=1, exog=None, exposure=None,\n offset=None):\n \"\"\"\n Returns a random number generator for the predictive distribution.\n\n Parameters\n ----------\n params : array-like\n The model parameters.\n scale : scalar\n The scale parameter.\n exog : array-like\n The predictor variable matrix.\n\n Returns a frozen random number generator object. Use the\n ``rvs`` method to generate random values.\n\n Notes\n -----\n Due to the behavior of ``scipy.stats.distributions objects``,\n the returned random number generator must be called with\n ``gen.rvs(n)`` where ``n`` is the number of observations in\n the data set used to fit the model. If any other value is\n used for ``n``, misleading results will be produced.\n \"\"\"\n\n fit = self.predict(params, exog, exposure, offset, linear=False)\n\n import scipy.stats.distributions as dist\n\n if isinstance(self.family, families.Gaussian):\n return dist.norm(loc=fit, scale=np.sqrt(scale))\n\n elif isinstance(self.family, families.Binomial):\n return dist.binom(n=1, p=fit)\n\n elif isinstance(self.family, families.Poisson):\n return dist.poisson(mu=fit)\n\n elif isinstance(self.family, families.Gamma):\n alpha = fit / float(scale)\n return dist.gamma(alpha, scale=scale)\n\n else:\n raise ValueError(\"get_distribution not implemented for %s\" % self.family.name)\n\n def _setup_binomial(self):\n # this checks what kind of data is given for Binomial.\n # family will need a reference to endog if this is to be removed from\n # preprocessing\n self.n_trials = np.ones((self.endog.shape[0])) # For binomial\n if isinstance(self.family, families.Binomial):\n tmp = self.family.initialize(self.endog, self.freq_weights)\n self.endog = tmp[0]\n self.n_trials = tmp[1]\n\n def fit(self, start_params=None, maxiter=100, method='IRLS', tol=1e-8,\n scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,\n full_output=True, disp=False, max_start_irls=3, **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family.\n\n Parameters\n ----------\n start_params : array-like, optional\n Initial guess of the solution for the loglikelihood maximization.\n The default is family-specific and is given by the\n ``family.starting_mu(endog)``. If start_params is given then the\n initial mean will be calculated as ``np.dot(exog, start_params)``.\n maxiter : int, optional\n Default is 100.\n method : string\n Default is 'IRLS' for iteratively reweighted least squares.\n Otherwise gradient optimization is used.\n tol : float\n Convergence tolerance. Default is 1e-8.\n scale : string or float, optional\n `scale` can be 'X2', 'dev', or a float\n The default value is None, which uses `X2` for Gamma, Gaussian,\n and Inverse Gaussian.\n `X2` is Pearson's chi-square divided by `df_resid`.\n The default is 1 for the Binomial and Poisson families.\n `dev` is the deviance divided by df_resid\n cov_type : string\n The type of parameter estimate covariance matrix to compute.\n cov_kwds : dict-like\n Extra arguments for calculating the covariance of the parameter\n estimates.\n use_t : bool\n If True, the Student t-distribution is used for inference.\n full_output : bool, optional\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n Not used if methhod is IRLS.\n disp : bool, optional\n Set to True to print convergence messages. Not used if method is\n IRLS.\n max_start_irls : int\n The number of IRLS iterations used to obtain starting\n values for gradient optimization. Only relevant if\n `method` is set to something other than 'IRLS'.\n\n If IRLS fitting used, the following additional parameters are\n available:\n\n atol : float, optional\n The absolute tolerance criterion that must be satisfied. Defaults\n to ``tol``. Convergence is attained when:\n :math:`rtol * prior + atol > abs(current - prior)`\n rtol : float, optional\n The relative tolerance criterion that must be satisfied. Defaults\n to 0 which means ``rtol`` is not used. Convergence is attained\n when:\n :math:`rtol * prior + atol > abs(current - prior)`\n tol_criterion : str, optional\n Defaults to ``'deviance'``. Can optionally be ``'params'``.\n \"\"\"\n self.scaletype = scale\n\n if method.lower() == \"irls\":\n return self._fit_irls(start_params=start_params, maxiter=maxiter,\n tol=tol, scale=scale, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t, **kwargs)\n else:\n return self._fit_gradient(start_params=start_params,\n method=method,\n maxiter=maxiter,\n tol=tol, scale=scale,\n full_output=full_output,\n disp=disp, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t,\n max_start_irls=max_start_irls,\n **kwargs)\n\n def _fit_gradient(self, start_params=None, method=\"newton\",\n maxiter=100, tol=1e-8, full_output=True,\n disp=True, scale=None, cov_type='nonrobust',\n cov_kwds=None, use_t=None, max_start_irls=3,\n **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family iteratively\n using the scipy gradient optimizers.\n \"\"\"\n\n if (max_start_irls > 0) and (start_params is None):\n irls_rslt = self._fit_irls(start_params=start_params, maxiter=max_start_irls,\n tol=tol, scale=scale, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t, **kwargs)\n start_params = irls_rslt.params\n\n rslt = super(GLM, self).fit(start_params=start_params, tol=tol,\n maxiter=maxiter, full_output=full_output,\n method=method, disp=disp, **kwargs)\n\n mu = self.predict(rslt.params)\n scale = self.estimate_scale(mu)\n\n glm_results = GLMResults(self, rslt.params,\n rslt.normalized_cov_params / scale,\n scale,\n cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n # TODO: iteration count is not always available\n history = {'iteration': 0}\n if full_output:\n glm_results.mle_retvals = rslt.mle_retvals\n if 'iterations' in rslt.mle_retvals:\n history['iteration'] = rslt.mle_retvals['iterations']\n glm_results.method = method\n glm_results.fit_history = history\n\n return GLMResultsWrapper(glm_results)\n\n\n def _fit_irls(self, start_params=None, maxiter=100, tol=1e-8,\n scale=None, cov_type='nonrobust', cov_kwds=None,\n use_t=None, **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family using\n iteratively reweighted least squares (IRLS).\n \"\"\"\n atol = kwargs.get('atol')\n rtol = kwargs.get('rtol', 0.)\n tol_criterion = kwargs.get('tol_criterion', 'deviance')\n atol = tol if atol is None else atol\n\n endog = self.endog\n wlsexog = self.exog\n if start_params is None:\n start_params = np.zeros(self.exog.shape[1], np.float)\n mu = self.family.starting_mu(self.endog)\n lin_pred = self.family.predict(mu)\n else:\n lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure\n mu = self.family.fitted(lin_pred)\n dev = self.family.deviance(self.endog, mu, self.freq_weights)\n if np.isnan(dev):\n raise ValueError(\"The first guess on the deviance function \"\n \"returned a nan. This could be a boundary \"\n \" problem and should be reported.\")\n\n # first guess on the deviance is assumed to be scaled by 1.\n # params are none to start, so they line up with the deviance\n history = dict(params=[np.inf, start_params], deviance=[np.inf, dev])\n converged = False\n criterion = history[tol_criterion]\n # This special case is used to get the likelihood for a specific\n # params vector.\n if maxiter == 0:\n mu = self.family.fitted(lin_pred)\n self.scale = self.estimate_scale(mu)\n wls_results = lm.RegressionResults(self, start_params, None)\n iteration = 0\n for iteration in range(maxiter):\n self.weights = (self.freq_weights * self.n_trials *\n self.family.weights(mu))\n wlsendog = (lin_pred + self.family.link.deriv(mu) * (self.endog-mu)\n - self._offset_exposure)\n wls_results = lm.WLS(wlsendog, wlsexog, self.weights).fit()\n lin_pred = np.dot(self.exog, wls_results.params) + self._offset_exposure\n mu = self.family.fitted(lin_pred)\n history = self._update_history(wls_results, mu, history)\n self.scale = self.estimate_scale(mu)\n if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):\n msg = \"Perfect separation detected, results not available\"\n raise PerfectSeparationError(msg)\n converged = _check_convergence(criterion, iteration + 1, atol,\n rtol)\n if converged:\n break\n self.mu = mu\n\n glm_results = GLMResults(self, wls_results.params,\n wls_results.normalized_cov_params,\n self.scale,\n cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n glm_results.method = \"IRLS\"\n history['iteration'] = iteration + 1\n glm_results.fit_history = history\n glm_results.converged = converged\n return GLMResultsWrapper(glm_results)\n\n\n def fit_regularized(self, method=\"elastic_net\", alpha=0.,\n start_params=None, refit=False, **kwargs):\n \"\"\"\n Return a regularized fit to a linear regression model.\n\n Parameters\n ----------\n method :\n Only the `elastic_net` approach is currently implemented.\n alpha : scalar or array-like\n The penalty weight. If a scalar, the same penalty weight\n applies to all variables in the model. If a vector, it\n must have the same length as `params`, and contains a\n penalty weight for each coefficient.\n start_params : array-like\n Starting values for `params`.\n refit : bool\n If True, the model is refit using only the variables that\n have non-zero coefficients in the regularized fit. The\n refitted model is not regularized.\n\n Returns\n -------\n An array, or a GLMResults object of the same type returned by `fit`.\n\n Notes\n -----\n The penalty is the ``elastic net`` penalty, which is a\n combination of L1 and L2 penalties.\n\n The function that is minimized is: \n \n .. math::\n\n -loglike/n + alpha*((1-L1\\_wt)*|params|_2^2/2 + L1\\_wt*|params|_1)\n\n where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.\n\n Post-estimation results are based on the same data used to\n select variables, hence may be subject to overfitting biases.\n\n The elastic_net method uses the following keyword arguments:\n\n maxiter : int\n Maximum number of iterations\n L1_wt : float\n Must be in [0, 1]. The L1 penalty has weight L1_wt and the\n L2 penalty has weight 1 - L1_wt.\n cnvrg_tol : float\n Convergence threshold for line searches\n zero_tol : float\n Coefficients below this threshold are treated as zero.\n \"\"\"\n from statsmodels.base.elastic_net import fit_elasticnet\n\n if method != \"elastic_net\":\n raise ValueError(\"method for fit_regularied must be elastic_net\")\n\n defaults = {\"maxiter\" : 50, \"L1_wt\" : 1, \"cnvrg_tol\" : 1e-10,\n \"zero_tol\" : 1e-10}\n defaults.update(kwargs)\n\n result = fit_elasticnet(self, method=method,\n alpha=alpha,\n start_params=start_params,\n refit=refit,\n **defaults)\n\n self.mu = self.predict(result.params)\n self.scale = self.estimate_scale(self.mu)\n\n return result\n\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"fit the model subject to linear equality constraints\n\n The constraints are of the form `R params = q`\n where R is the constraint_matrix and q is the vector of\n constraint_values.\n\n The estimation creates a new model with transformed design matrix,\n exog, and converts the results back to the original parameterization.\n\n\n Parameters\n ----------\n constraints : formula expression or tuple\n If it is a tuple, then the constraint needs to be given by two\n arrays (constraint_matrix, constraint_value), i.e. (R, q).\n Otherwise, the constraints can be given as strings or list of\n strings.\n see t_test for details\n start_params : None or array_like\n starting values for the optimization. `start_params` needs to be\n given in the original parameter space and are internally\n transformed.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the transformed model.\n\n Returns\n -------\n results : Results instance\n\n \"\"\"\n\n from patsy import DesignInfo\n from statsmodels.base._constraints import fit_constrained\n\n # same pattern as in base.LikelihoodModel.t_test\n lc = DesignInfo(self.exog_names).linear_constraint(constraints)\n R, q = lc.coefs, lc.constants\n\n # TODO: add start_params option, need access to tranformation\n # fit_constrained needs to do the transformation\n params, cov, res_constr = fit_constrained(self, R, q,\n start_params=start_params,\n fit_kwds=fit_kwds)\n #create dummy results Instance, TODO: wire up properly\n res = self.fit(start_params=params, maxiter=0) # we get a wrapper back\n res._results.params = params\n res._results.normalized_cov_params = cov\n k_constr = len(q)\n res._results.df_resid += k_constr\n res._results.df_model -= k_constr\n res._results.constraints = lc\n res._results.k_constr = k_constr\n res._results.results_constrained = res_constr\n # TODO: the next is not the best. history should bin in results\n res._results.model.history = res_constr.model.history\n return res\n\n\nclass GLMResults(base.LikelihoodModelResults):\n \"\"\"\n Class to contain GLM results.\n\n GLMResults inherits from statsmodels.LikelihoodModelResults\n\n Parameters\n ----------\n See statsmodels.LikelihoodModelReesults\n\n Returns\n -------\n **Attributes**\n\n aic : float\n Akaike Information Criterion\n -2 * `llf` + 2*(`df_model` + 1)\n bic : float\n Bayes Information Criterion\n `deviance` - `df_resid` * log(`nobs`)\n deviance : float\n See statsmodels.families.family for the distribution-specific deviance\n functions.\n df_model : float\n See GLM.df_model\n df_resid : float\n See GLM.df_resid\n fit_history : dict\n Contains information about the iterations. Its keys are `iterations`,\n `deviance` and `params`.\n fittedvalues : array\n Linear predicted values for the fitted model.\n dot(exog, params)\n llf : float\n Value of the loglikelihood function evalued at params.\n See statsmodels.families.family for distribution-specific\n loglikelihoods.\n model : class instance\n Pointer to GLM model instance that called fit.\n mu : array\n See GLM docstring.\n nobs : float\n The number of observations n.\n normalized_cov_params : array\n See GLM docstring\n null_deviance : float\n The value of the deviance function for the model fit with a constant\n as the only regressor.\n params : array\n The coefficients of the fitted model. Note that interpretation\n of the coefficients often depends on the distribution family and the\n data.\n pearson_chi2 : array\n Pearson's Chi-Squared statistic is defined as the sum of the squares\n of the Pearson residuals.\n pinv_wexog : array\n See GLM docstring.\n pvalues : array\n The two-tailed p-values for the parameters.\n resid_anscombe : array\n Anscombe residuals. See statsmodels.families.family for distribution-\n specific Anscombe residuals.\n resid_deviance : array\n Deviance residuals. See statsmodels.families.family for distribution-\n specific deviance residuals.\n resid_pearson : array\n Pearson residuals. The Pearson residuals are defined as\n (`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution\n specific variance function. See statsmodels.families.family and\n statsmodels.families.varfuncs for more information.\n resid_response : array\n Respnose residuals. The response residuals are defined as\n `endog` - `fittedvalues`\n resid_working : array\n Working residuals. The working residuals are defined as\n `resid_response`/link'(`mu`). See statsmodels.family.links for the\n derivatives of the link functions. They are defined analytically.\n scale : float\n The estimate of the scale / dispersion for the model fit.\n See GLM.fit and GLM.estimate_scale for more information.\n stand_errors : array\n The standard errors of the fitted GLM. #TODO still named bse\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModelResults\n \"\"\"\n\n def __init__(self, model, params, normalized_cov_params, scale,\n cov_type='nonrobust', cov_kwds=None, use_t=None):\n super(GLMResults, self).__init__(model, params,\n normalized_cov_params=\n normalized_cov_params, scale=scale)\n self.family = model.family\n self._endog = model.endog\n self.nobs = model.endog.shape[0]\n self._freq_weights = model.freq_weights\n if isinstance(self.family, families.Binomial):\n self._n_trials = self.model.n_trials\n else:\n self._n_trials = 1\n self.df_resid = model.df_resid\n self.df_model = model.df_model\n self.pinv_wexog = model.pinv_wexog\n self._cache = resettable_cache()\n # are these intermediate results needed or can we just\n # call the model's attributes?\n\n # for remove data and pickle without large arrays\n self._data_attr.extend(['results_constrained', '_freq_weights'])\n self.data_in_cache = getattr(self, 'data_in_cache', [])\n self.data_in_cache.extend(['null', 'mu'])\n self._data_attr_model = getattr(self, '_data_attr_model', [])\n self._data_attr_model.append('mu')\n\n # robust covariance\n from statsmodels.base.covtype import get_robustcov_results\n if use_t is None:\n self.use_t = False # TODO: class default\n else:\n self.use_t = use_t\n\n # temporary warning\n ct = (cov_type == 'nonrobust') or (cov_type.startswith('HC'))\n if self.model._has_freq_weights and not ct:\n import warnings\n from statsmodels.tools.sm_exceptions import SpecificationWarning\n warnings.warn('cov_type not fully supported with freq_weights',\n SpecificationWarning)\n\n if cov_type == 'nonrobust':\n self.cov_type = 'nonrobust'\n self.cov_kwds = {'description' : 'Standard Errors assume that the ' +\n 'covariance matrix of the errors is correctly ' +\n 'specified.'}\n\n else:\n if cov_kwds is None:\n cov_kwds = {}\n get_robustcov_results(self, cov_type=cov_type, use_self=True,\n use_t=use_t, **cov_kwds)\n\n @cache_readonly\n def resid_response(self):\n return self._n_trials * (self._endog-self.mu)\n\n @cache_readonly\n def resid_pearson(self):\n return (np.sqrt(self._n_trials) * (self._endog-self.mu) /\n np.sqrt(self.family.variance(self.mu)))\n\n @cache_readonly\n def resid_working(self):\n # Isn't self.resid_response is already adjusted by _n_trials?\n val = (self.resid_response * self.family.link.deriv(self.mu))\n val *= self._n_trials\n return val\n\n @cache_readonly\n def resid_anscombe(self):\n return self.family.resid_anscombe(self._endog, self.fittedvalues)\n\n @cache_readonly\n def resid_deviance(self):\n return self.family.resid_dev(self._endog, self.fittedvalues)\n\n @cache_readonly\n def pearson_chi2(self):\n chisq = (self._endog - self.mu)**2 / self.family.variance(self.mu)\n chisq *= self._freq_weights\n chisqsum = np.sum(chisq)\n return chisqsum\n\n\n @cache_readonly\n def fittedvalues(self):\n return self.mu\n\n\n @cache_readonly\n def mu(self):\n return self.model.predict(self.params)\n\n\n @cache_readonly\n def null(self):\n endog = self._endog\n model = self.model\n exog = np.ones((len(endog), 1))\n kwargs = {}\n if hasattr(model, 'offset'):\n kwargs['offset'] = model.offset\n if hasattr(model, 'exposure'):\n kwargs['exposure'] = model.exposure\n if len(kwargs) > 0:\n return GLM(endog, exog, family=self.family, **kwargs).fit().fittedvalues\n else:\n wls_model = lm.WLS(endog, exog,\n weights=self._freq_weights * self._n_trials)\n return wls_model.fit().fittedvalues\n\n @cache_readonly\n def deviance(self):\n return self.family.deviance(self._endog, self.mu, self._freq_weights)\n\n @cache_readonly\n def null_deviance(self):\n return self.family.deviance(self._endog, self.null, self._freq_weights)\n\n @cache_readonly\n def llnull(self):\n return self.family.loglike(self._endog, self.null,\n self._freq_weights, scale=self.scale)\n\n @cache_readonly\n def llf(self):\n _modelfamily = self.family\n val = _modelfamily.loglike(self._endog, self.mu,\n self._freq_weights, scale=self.scale)\n return val\n\n @cache_readonly\n def aic(self):\n return -2 * self.llf + 2*(self.df_model+1)\n\n @cache_readonly\n def bic(self):\n return (self.deviance -\n (self.model.wnobs - self.df_model - 1) *\n np.log(self.model.wnobs))\n\n\n def get_prediction(self, exog=None, exposure=None, offset=None,\n transform=True, linear=False,\n row_labels=None):\n\n import statsmodels.regression._prediction as linpred\n\n pred_kwds = {'exposure': exposure, 'offset': offset, 'linear': True}\n\n # two calls to a get_prediction duplicates exog generation if patsy\n res_linpred = linpred.get_prediction(self, exog=exog, transform=transform,\n row_labels=row_labels, pred_kwds=pred_kwds)\n\n pred_kwds['linear'] = False\n res = pred.get_prediction_glm(self, exog=exog, transform=transform,\n row_labels=row_labels,\n linpred=res_linpred,\n link=self.model.family.link,\n pred_kwds=pred_kwds)\n\n return res\n\n\n get_prediction.__doc__ = pred.get_prediction_glm.__doc__\n\n\n def remove_data(self):\n #GLM has alias/reference in result instance\n self._data_attr.extend([i for i in self.model._data_attr\n if not '_data.' in i])\n super(self.__class__, self).remove_data()\n\n #TODO: what are these in results?\n self._endog = None\n self._freq_weights = None\n self._n_trials = None\n\n remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__\n\n def plot_added_variable(self, focus_exog, resid_type=None,\n use_glm_weights=True, fit_kwargs=None,\n ax=None):\n # Docstring attached below\n\n from statsmodels.graphics.regressionplots import plot_added_variable\n\n fig = plot_added_variable(self, focus_exog,\n resid_type=resid_type,\n use_glm_weights=use_glm_weights,\n fit_kwargs=fit_kwargs, ax=ax)\n\n return fig\n\n plot_added_variable.__doc__ = _plot_added_variable_doc % {\n 'extra_params_doc' : ''}\n\n def plot_partial_residuals(self, focus_exog, ax=None):\n # Docstring attached below\n\n from statsmodels.graphics.regressionplots import plot_partial_residuals\n\n return plot_partial_residuals(self, focus_exog, ax=ax)\n\n plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {\n 'extra_params_doc' : ''}\n\n def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,\n ax=None):\n # Docstring attached below\n\n from statsmodels.graphics.regressionplots import plot_ceres_residuals\n\n return plot_ceres_residuals(self, focus_exog, frac,\n cond_means=cond_means, ax=ax)\n\n plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {\n 'extra_params_doc' : ''}\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05):\n \"\"\"\n Summarize the Regression Results\n\n Parameters\n -----------\n yname : string, optional\n Default is `y`\n xname : list of strings, optional\n Default is `var_##` for ## in p the number of regressors\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Model Family:', [self.family.__class__.__name__]),\n ('Link Function:', [self.family.link.__class__.__name__]),\n ('Method:', [self.method]),\n ('Date:', None),\n ('Time:', None),\n ('No. Iterations:',\n [\"%d\" % self.fit_history['iteration']]),\n ]\n\n top_right = [('No. Observations:', None),\n ('Df Residuals:', None),\n ('Df Model:', None),\n ('Scale:', [self.scale]),\n ('Log-Likelihood:', None),\n ('Deviance:', [\"%#8.5g\" % self.deviance]),\n ('Pearson chi2:', [\"%#6.3g\" % self.pearson_chi2])\n ]\n\n if title is None:\n title = \"Generalized Linear Model Regression Results\"\n\n #create summary tables\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right, # [],\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n if hasattr(self, 'constraints'):\n smry.add_extra_txt(['Model has been estimated subject to linear '\n 'equality constraints.'])\n\n #diagnostic table is not used yet:\n #smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n # yname=yname, xname=xname,\n # title=\"\")\n\n return smry\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n\n \"\"\"Experimental summary for regression Results\n\n Parameters\n -----------\n yname : string\n Name of the dependent variable (optional)\n xname : List of strings of length equal to the number of parameters\n Names of the independent variables (optional)\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary2.Summary : class to hold summary\n results\n\n \"\"\"\n self.method = 'IRLS'\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n if hasattr(self, 'constraints'):\n smry.add_text('Model has been estimated subject to linear '\n 'equality constraints.')\n\n return smry\n\n\nclass GLMResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\n 'resid_anscombe' : 'rows',\n 'resid_deviance' : 'rows',\n 'resid_pearson' : 'rows',\n 'resid_response' : 'rows',\n 'resid_working' : 'rows'\n }\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\nwrap.populate_wrapper(GLMResultsWrapper, GLMResults)\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n data = sm.datasets.longley.load()\n #data.exog = add_constant(data.exog)\n GLMmod = GLM(data.endog, data.exog).fit()\n GLMT = GLMmod.summary(returns='tables')\n## GLMT[0].extend_right(GLMT[1])\n## print(GLMT[0])\n## print(GLMT[2])\n GLMTp = GLMmod.summary(title='Test GLM')\n\n \"\"\"\nFrom Stata\n. webuse beetle\n. glm r i.beetle ldose, family(binomial n) link(cloglog)\n\nIteration 0: log likelihood = -79.012269\nIteration 1: log likelihood = -76.94951\nIteration 2: log likelihood = -76.945645\nIteration 3: log likelihood = -76.945645\n\nGeneralized linear models No. of obs = 24\nOptimization : ML Residual df = 20\n Scale parameter = 1\nDeviance = 73.76505595 (1/df) Deviance = 3.688253\nPearson = 71.8901173 (1/df) Pearson = 3.594506\n\nVariance function: V(u) = u*(1-u/n) [Binomial]\nLink function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]\n\n AIC = 6.74547\nLog likelihood = -76.94564525 BIC = 10.20398\n\n------------------------------------------------------------------------------\n | OIM\n r | Coef. Std. Err. z P>|z| [95% Conf. Interval]\n-------------+----------------------------------------------------------------\n beetle |\n 2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783\n 3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867\n |\n ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658\n _cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116\n------------------------------------------------------------------------------\n\"\"\"\n\n #NOTE: wfs dataset has been removed due to a licensing issue\n # example of using offset\n #data = sm.datasets.wfs.load()\n # get offset\n #offset = np.log(data.exog[:,-1])\n #exog = data.exog[:,:-1]\n\n # convert dur to dummy\n #exog = sm.tools.categorical(exog, col=0, drop=True)\n # drop reference category\n # convert res to dummy\n #exog = sm.tools.categorical(exog, col=0, drop=True)\n # convert edu to dummy\n #exog = sm.tools.categorical(exog, col=0, drop=True)\n # drop reference categories and add intercept\n #exog = sm.add_constant(exog[:,[1,2,3,4,5,7,8,10,11,12]])\n\n #endog = np.round(data.endog)\n #mod = sm.GLM(endog, exog, family=sm.families.Poisson()).fit()\n\n #res1 = GLM(endog, exog, family=sm.families.Poisson(),\n # offset=offset).fit(tol=1e-12, maxiter=250)\n #exposuremod = GLM(endog, exog, family=sm.families.Poisson(),\n # exposure = data.exog[:,-1]).fit(tol=1e-12,\n # maxiter=250)\n #assert(np.all(res1.params == exposuremod.params))\n",
"\"\"\"\nRobust linear models with support for the M-estimators listed under\n:ref:`norms <norms>`.\n\nReferences\n----------\nPJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York. 1981.\n\nPJ Huber. 1973, 'The 1972 Wald Memorial Lectures: Robust Regression:\n Asymptotics, Conjectures, and Monte Carlo.' The Annals of Statistics,\n 1.5, 799-821.\n\nR Venables, B Ripley. 'Modern Applied Statistics in S' Springer, New York,\n 2002.\n\"\"\"\nfrom statsmodels.compat.python import string_types\nimport numpy as np\nimport scipy.stats as stats\n\nfrom statsmodels.tools.decorators import (cache_readonly,\n resettable_cache)\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.robust.norms as norms\nimport statsmodels.robust.scale as scale\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.compat.numpy import np_matrix_rank\n\n__all__ = ['RLM']\n\ndef _check_convergence(criterion, iteration, tol, maxiter):\n return not (np.any(np.fabs(criterion[iteration] -\n criterion[iteration-1]) > tol) and iteration < maxiter)\n\nclass RLM(base.LikelihoodModel):\n __doc__ = \"\"\"\n Robust Linear Models\n\n Estimate a robust linear model via iteratively reweighted least squares\n given a robust criterion estimator.\n\n %(params)s\n M : statsmodels.robust.norms.RobustNorm, optional\n The robust criterion function for downweighting outliers.\n The current options are LeastSquares, HuberT, RamsayE, AndrewWave,\n TrimmedMean, Hampel, and TukeyBiweight. The default is HuberT().\n See statsmodels.robust.norms for more information.\n %(extra_params)s\n\n Notes\n -----\n\n **Attributes**\n\n df_model : float\n The degrees of freedom of the model. The number of regressors p less\n one for the intercept. Note that the reported model degrees\n of freedom does not count the intercept as a regressor, though\n the model is assumed to have an intercept.\n df_resid : float\n The residual degrees of freedom. The number of observations n\n less the number of regressors p. Note that here p does include\n the intercept as using a degree of freedom.\n endog : array\n See above. Note that endog is a reference to the data so that if\n data is already an array and it is changed, then `endog` changes\n as well.\n exog : array\n See above. Note that endog is a reference to the data so that if\n data is already an array and it is changed, then `endog` changes\n as well.\n M : statsmodels.robust.norms.RobustNorm\n See above. Robust estimator instance instantiated.\n nobs : float\n The number of observations n\n pinv_wexog : array\n The pseudoinverse of the design / exogenous data array. Note that\n RLM has no whiten method, so this is just the pseudo inverse of the\n design.\n normalized_cov_params : array\n The p x p normalized covariance of the design / exogenous data.\n This is approximately equal to (X.T X)^(-1)\n\n\n Examples\n ---------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.stackloss.load()\n >>> data.exog = sm.add_constant(data.exog)\n >>> rlm_model = sm.RLM(data.endog, data.exog, \\\n M=sm.robust.norms.HuberT())\n\n >>> rlm_results = rlm_model.fit()\n >>> rlm_results.params\n array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])\n >>> rlm_results.bse\n array([ 0.11100521, 0.30293016, 0.12864961, 9.79189854])\n >>> rlm_results_HC2 = rlm_model.fit(cov=\"H2\")\n >>> rlm_results_HC2.params\n array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])\n >>> rlm_results_HC2.bse\n array([ 0.11945975, 0.32235497, 0.11796313, 9.08950419])\n >>> mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.Hampel())\n >>> rlm_hamp_hub = mod.fit(scale_est=sm.robust.scale.HuberScale())\n >>> rlm_hamp_hub.params\n array([ 0.73175452, 1.25082038, -0.14794399, -40.27122257])\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc}\n\n def __init__(self, endog, exog, M=norms.HuberT(), missing='none',\n **kwargs):\n self.M = M\n super(base.LikelihoodModel, self).__init__(endog, exog,\n missing=missing, **kwargs)\n self._initialize()\n #things to remove_data\n self._data_attr.extend(['weights', 'pinv_wexog'])\n\n def _initialize(self):\n \"\"\"\n Initializes the model for the IRLS fit.\n\n Resets the history and number of iterations.\n \"\"\"\n self.pinv_wexog = np.linalg.pinv(self.exog)\n self.normalized_cov_params = np.dot(self.pinv_wexog,\n np.transpose(self.pinv_wexog))\n self.df_resid = (np.float(self.exog.shape[0] -\n np_matrix_rank(self.exog)))\n self.df_model = np.float(np_matrix_rank(self.exog)-1)\n self.nobs = float(self.endog.shape[0])\n\n def score(self, params):\n raise NotImplementedError\n\n def information(self, params):\n raise NotImplementedError\n\n def predict(self, params, exog=None):\n \"\"\"\n Return linear predicted values from a design matrix.\n\n Parameters\n ----------\n params : array-like, optional after fit has been called\n Parameters of a linear model\n exog : array-like, optional.\n Design / exogenous data. Model exog is used if None.\n\n Returns\n -------\n An array of fitted values\n\n Notes\n -----\n If the model as not yet been fit, params is not optional.\n \"\"\"\n #copied from linear_model\n if exog is None:\n exog = self.exog\n return np.dot(exog, params)\n\n def loglike(self, params):\n raise NotImplementedError\n\n def deviance(self, tmp_results):\n \"\"\"\n Returns the (unnormalized) log-likelihood from the M estimator.\n \"\"\"\n return self.M((self.endog - tmp_results.fittedvalues) /\n tmp_results.scale).sum()\n\n def _update_history(self, tmp_results, history, conv):\n history['params'].append(tmp_results.params)\n history['scale'].append(tmp_results.scale)\n if conv == 'dev':\n history['deviance'].append(self.deviance(tmp_results))\n elif conv == 'sresid':\n history['sresid'].append(tmp_results.resid/tmp_results.scale)\n elif conv == 'weights':\n history['weights'].append(tmp_results.model.weights)\n return history\n\n def _estimate_scale(self, resid):\n \"\"\"\n Estimates the scale based on the option provided to the fit method.\n \"\"\"\n if isinstance(self.scale_est, str):\n if self.scale_est.lower() == 'mad':\n return scale.mad(resid, center=0)\n if self.scale_est.lower() == 'stand_mad':\n return scale.mad(resid)\n else:\n raise ValueError(\"Option %s for scale_est not understood\" %\n self.scale_est)\n elif isinstance(self.scale_est, scale.HuberScale):\n return self.scale_est(self.df_resid, self.nobs, resid)\n else:\n return scale.scale_est(self, resid)**2\n\n def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',\n update_scale=True, conv='dev'):\n \"\"\"\n Fits the model using iteratively reweighted least squares.\n\n The IRLS routine runs until the specified objective converges to `tol`\n or `maxiter` has been reached.\n\n Parameters\n ----------\n conv : string\n Indicates the convergence criteria.\n Available options are \"coefs\" (the coefficients), \"weights\" (the\n weights in the iteration), \"sresid\" (the standardized residuals),\n and \"dev\" (the un-normalized log-likelihood for the M\n estimator). The default is \"dev\".\n cov : string, optional\n 'H1', 'H2', or 'H3'\n Indicates how the covariance matrix is estimated. Default is 'H1'.\n See rlm.RLMResults for more information.\n init : string\n Specifies method for the initial estimates of the parameters.\n Default is None, which means that the least squares estimate\n is used. Currently it is the only available choice.\n maxiter : int\n The maximum number of iterations to try. Default is 50.\n scale_est : string or HuberScale()\n 'mad' or HuberScale()\n Indicates the estimate to use for scaling the weights in the IRLS.\n The default is 'mad' (median absolute deviation. Other options are\n 'HuberScale' for Huber's proposal 2. Huber's proposal 2 has\n optional keyword arguments d, tol, and maxiter for specifying the\n tuning constant, the convergence tolerance, and the maximum number\n of iterations. See statsmodels.robust.scale for more information.\n tol : float\n The convergence tolerance of the estimate. Default is 1e-8.\n update_scale : Bool\n If `update_scale` is False then the scale estimate for the\n weights is held constant over the iteration. Otherwise, it\n is updated for each fit in the iteration. Default is True.\n\n Returns\n -------\n results : object\n statsmodels.rlm.RLMresults\n \"\"\"\n if not cov.upper() in [\"H1\",\"H2\",\"H3\"]:\n raise ValueError(\"Covariance matrix %s not understood\" % cov)\n else:\n self.cov = cov.upper()\n conv = conv.lower()\n if not conv in [\"weights\",\"coefs\",\"dev\",\"sresid\"]:\n raise ValueError(\"Convergence argument %s not understood\" \\\n % conv)\n self.scale_est = scale_est\n\n wls_results = lm.WLS(self.endog, self.exog).fit()\n if not init:\n self.scale = self._estimate_scale(wls_results.resid)\n\n history = dict(params = [np.inf], scale = [])\n if conv == 'coefs':\n criterion = history['params']\n elif conv == 'dev':\n history.update(dict(deviance = [np.inf]))\n criterion = history['deviance']\n elif conv == 'sresid':\n history.update(dict(sresid = [np.inf]))\n criterion = history['sresid']\n elif conv == 'weights':\n history.update(dict(weights = [np.inf]))\n criterion = history['weights']\n\n # done one iteration so update\n history = self._update_history(wls_results, history, conv)\n iteration = 1\n converged = 0\n while not converged:\n self.weights = self.M.weights(wls_results.resid/self.scale)\n wls_results = lm.WLS(self.endog, self.exog,\n weights=self.weights).fit()\n if update_scale is True:\n self.scale = self._estimate_scale(wls_results.resid)\n history = self._update_history(wls_results, history, conv)\n iteration += 1\n converged = _check_convergence(criterion, iteration, tol, maxiter)\n results = RLMResults(self, wls_results.params,\n self.normalized_cov_params, self.scale)\n\n history['iteration'] = iteration\n results.fit_history = history\n results.fit_options = dict(cov=cov.upper(), scale_est=scale_est,\n norm=self.M.__class__.__name__, conv=conv)\n #norm is not changed in fit, no old state\n\n #doing the next causes exception\n #self.cov = self.scale_est = None #reset for additional fits\n #iteration and history could contain wrong state with repeated fit\n return RLMResultsWrapper(results)\n\nclass RLMResults(base.LikelihoodModelResults):\n \"\"\"\n Class to contain RLM results\n\n Returns\n -------\n **Attributes**\n\n bcov_scaled : array\n p x p scaled covariance matrix specified in the model fit method.\n The default is H1. H1 is defined as\n ``k**2 * (1/df_resid*sum(M.psi(sresid)**2)*scale**2)/\n ((1/nobs*sum(M.psi_deriv(sresid)))**2) * (X.T X)^(-1)``\n\n where ``k = 1 + (df_model +1)/nobs * var_psiprime/m**2``\n where ``m = mean(M.psi_deriv(sresid))`` and\n ``var_psiprime = var(M.psi_deriv(sresid))``\n\n H2 is defined as\n ``k * (1/df_resid) * sum(M.psi(sresid)**2) *scale**2/\n ((1/nobs)*sum(M.psi_deriv(sresid)))*W_inv``\n\n H3 is defined as\n ``1/k * (1/df_resid * sum(M.psi(sresid)**2)*scale**2 *\n (W_inv X.T X W_inv))``\n\n where `k` is defined as above and\n ``W_inv = (M.psi_deriv(sresid) exog.T exog)^(-1)``\n\n See the technical documentation for cleaner formulae.\n bcov_unscaled : array\n The usual p x p covariance matrix with scale set equal to 1. It\n is then just equivalent to normalized_cov_params.\n bse : array\n An array of the standard errors of the parameters. The standard\n errors are taken from the robust covariance matrix specified in the\n argument to fit.\n chisq : array\n An array of the chi-squared values of the paramter estimates.\n df_model\n See RLM.df_model\n df_resid\n See RLM.df_resid\n fit_history : dict\n Contains information about the iterations. Its keys are `deviance`,\n `params`, `iteration` and the convergence criteria specified in\n `RLM.fit`, if different from `deviance` or `params`.\n fit_options : dict\n Contains the options given to fit.\n fittedvalues : array\n The linear predicted values. dot(exog, params)\n model : statsmodels.rlm.RLM\n A reference to the model instance\n nobs : float\n The number of observations n\n normalized_cov_params : array\n See RLM.normalized_cov_params\n params : array\n The coefficients of the fitted model\n pinv_wexog : array\n See RLM.pinv_wexog\n pvalues : array\n The p values associated with `tvalues`. Note that `tvalues` are assumed to be distributed\n standard normal rather than Student's t.\n resid : array\n The residuals of the fitted model. endog - fittedvalues\n scale : float\n The type of scale is determined in the arguments to the fit method in\n RLM. The reported scale is taken from the residuals of the weighted\n least squares in the last IRLS iteration if update_scale is True. If\n update_scale is False, then it is the scale given by the first OLS\n fit before the IRLS iterations.\n sresid : array\n The scaled residuals.\n tvalues : array\n The \"t-statistics\" of params. These are defined as params/bse where bse are taken\n from the robust covariance matrix specified in the argument to fit.\n weights : array\n The reported weights are determined by passing the scaled residuals\n from the last weighted least squares fit in the IRLS algortihm.\n\n See also\n --------\n statsmodels.base.model.LikelihoodModelResults\n \"\"\"\n\n\n def __init__(self, model, params, normalized_cov_params, scale):\n super(RLMResults, self).__init__(model, params,\n normalized_cov_params, scale)\n self.model = model\n self.df_model = model.df_model\n self.df_resid = model.df_resid\n self.nobs = model.nobs\n self._cache = resettable_cache()\n #for remove_data\n self.data_in_cache = ['sresid']\n\n self.cov_params_default = self.bcov_scaled\n #TODO: \"pvals\" should come from chisq on bse?\n\n @cache_readonly\n def fittedvalues(self):\n return np.dot(self.model.exog, self.params)\n\n @cache_readonly\n def resid(self):\n return self.model.endog - self.fittedvalues # before bcov\n\n @cache_readonly\n def sresid(self):\n return self.resid/self.scale\n\n @cache_readonly\n def bcov_unscaled(self):\n return self.normalized_cov_params\n\n @cache_readonly\n def weights(self):\n return self.model.weights\n\n @cache_readonly\n def bcov_scaled(self):\n model = self.model\n m = np.mean(model.M.psi_deriv(self.sresid))\n var_psiprime = np.var(model.M.psi_deriv(self.sresid))\n k = 1 + (self.df_model+1)/self.nobs * var_psiprime/m**2\n\n if model.cov == \"H1\":\n return k**2 * (1/self.df_resid*\\\n np.sum(model.M.psi(self.sresid)**2)*self.scale**2)\\\n /((1/self.nobs*np.sum(model.M.psi_deriv(self.sresid)))**2)\\\n *model.normalized_cov_params\n else:\n W = np.dot(model.M.psi_deriv(self.sresid)*model.exog.T,\n model.exog)\n W_inv = np.linalg.inv(W)\n # [W_jk]^-1 = [SUM(psi_deriv(Sr_i)*x_ij*x_jk)]^-1\n # where Sr are the standardized residuals\n if model.cov == \"H2\":\n # These are correct, based on Huber (1973) 8.13\n return k*(1/self.df_resid)*np.sum(\\\n model.M.psi(self.sresid)**2)*self.scale**2\\\n /((1/self.nobs)*np.sum(\\\n model.M.psi_deriv(self.sresid)))*W_inv\n elif model.cov == \"H3\":\n return k**-1*1/self.df_resid*np.sum(\\\n model.M.psi(self.sresid)**2)*self.scale**2\\\n *np.dot(np.dot(W_inv, np.dot(model.exog.T,model.exog)),\\\n W_inv)\n\n @cache_readonly\n def pvalues(self):\n return stats.norm.sf(np.abs(self.tvalues))*2\n\n @cache_readonly\n def bse(self):\n return np.sqrt(np.diag(self.bcov_scaled))\n\n @cache_readonly\n def chisq(self):\n return (self.params/self.bse)**2\n\n def remove_data(self):\n super(self.__class__, self).remove_data()\n #self.model.history['sresid'] = None\n #self.model.history['weights'] = None\n\n remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__\n\n def summary(self, yname=None, xname=None, title=0, alpha=.05,\n return_fmt='text'):\n \"\"\"\n This is for testing the new summary setup\n \"\"\"\n from statsmodels.iolib.summary import (summary_top,\n summary_params, summary_return)\n\n## left = [(i, None) for i in (\n## 'Dependent Variable:',\n## 'Model type:',\n## 'Method:',\n##\t\t\t'Date:',\n## 'Time:',\n## 'Number of Obs:',\n## 'df resid',\n##\t\t 'df model',\n## )]\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['IRLS']),\n ('Norm:', [self.fit_options['norm']]),\n ('Scale Est.:', [self.fit_options['scale_est']]),\n ('Cov Type:', [self.fit_options['cov']]),\n ('Date:', None),\n ('Time:', None),\n ('No. Iterations:', [\"%d\" % self.fit_history['iteration']])\n ]\n top_right = [('No. Observations:', None),\n ('Df Residuals:', None),\n ('Df Model:', None)\n ]\n\n if not title is None:\n title = \"Robust linear Model Regression Results\"\n\n #boiler plate\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n #diagnostic table is not used yet\n# smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n# yname=yname, xname=xname,\n# title=\"\")\n\n#add warnings/notes, added to text format only\n etext =[]\n wstr = \\\n'''If the model instance has been used for another fit with different fit\nparameters, then the fit options might not be the correct ones anymore .'''\n etext.append(wstr)\n\n if etext:\n smry.add_extra_txt(etext)\n\n return smry\n\n\n def summary2(self, xname=None, yname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental summary function for regression results\n\n Parameters\n -----------\n xname : List of strings of length equal to the number of parameters\n Names of the independent variables (optional)\n yname : string\n Name of the dependent variable (optional)\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n # Summary\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n\n return smry\n\n\nclass RLMResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(RLMResultsWrapper, RLMResults)\n\nif __name__==\"__main__\":\n#NOTE: This is to be removed\n#Delivery Time Data is taken from Montgomery and Peck\n import statsmodels.api as sm\n\n#delivery time(minutes)\n endog = np.array([16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83,\n 79.24, 21.50, 40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00,\n 9.50, 35.10, 17.90, 52.32, 18.75, 19.83, 10.75])\n\n#number of cases, distance (Feet)\n exog = np.array([[7, 3, 3, 4, 6, 7, 2, 7, 30, 5, 16, 10, 4, 6, 9, 10, 6,\n 7, 3, 17, 10, 26, 9, 8, 4], [560, 220, 340, 80, 150, 330, 110, 210, 1460,\n 605, 688, 215, 255, 462, 448, 776, 200, 132, 36, 770, 140, 810, 450, 635,\n 150]])\n exog = exog.T\n exog = sm.add_constant(exog)\n\n# model_ols = models.regression.OLS(endog, exog)\n# results_ols = model_ols.fit()\n\n# model_ramsaysE = RLM(endog, exog, M=norms.RamsayE())\n# results_ramsaysE = model_ramsaysE.fit(update_scale=False)\n\n# model_andrewWave = RLM(endog, exog, M=norms.AndrewWave())\n# results_andrewWave = model_andrewWave.fit(update_scale=False)\n\n# model_hampel = RLM(endog, exog, M=norms.Hampel(a=1.7,b=3.4,c=8.5)) # convergence problems with scale changed, not with 2,4,8 though?\n# results_hampel = model_hampel.fit(update_scale=False)\n\n#######################\n### Stack Loss Data ###\n#######################\n from statsmodels.datasets.stackloss import load\n data = load()\n data.exog = sm.add_constant(data.exog)\n#############\n### Huber ###\n#############\n# m1_Huber = RLM(data.endog, data.exog, M=norms.HuberT())\n# results_Huber1 = m1_Huber.fit()\n# m2_Huber = RLM(data.endog, data.exog, M=norms.HuberT())\n# results_Huber2 = m2_Huber.fit(cov=\"H2\")\n# m3_Huber = RLM(data.endog, data.exog, M=norms.HuberT())\n# results_Huber3 = m3_Huber.fit(cov=\"H3\")\n##############\n### Hampel ###\n##############\n# m1_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())\n# results_Hampel1 = m1_Hampel.fit()\n# m2_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())\n# results_Hampel2 = m2_Hampel.fit(cov=\"H2\")\n# m3_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())\n# results_Hampel3 = m3_Hampel.fit(cov=\"H3\")\n################\n### Bisquare ###\n################\n# m1_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())\n# results_Bisquare1 = m1_Bisquare.fit()\n# m2_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())\n# results_Bisquare2 = m2_Bisquare.fit(cov=\"H2\")\n# m3_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())\n# results_Bisquare3 = m3_Bisquare.fit(cov=\"H3\")\n\n\n##############################################\n# Huber's Proposal 2 scaling #\n##############################################\n\n################\n### Huber'sT ###\n################\n m1_Huber_H = RLM(data.endog, data.exog, M=norms.HuberT())\n results_Huber1_H = m1_Huber_H.fit(scale_est=scale.HuberScale())\n# m2_Huber_H\n# m3_Huber_H\n# m4 = RLM(data.endog, data.exog, M=norms.HuberT())\n# results4 = m1.fit(scale_est=\"Huber\")\n# m5 = RLM(data.endog, data.exog, M=norms.Hampel())\n# results5 = m2.fit(scale_est=\"Huber\")\n# m6 = RLM(data.endog, data.exog, M=norms.TukeyBiweight())\n# results6 = m3.fit(scale_est=\"Huber\")\n\n\n\n\n# print \"\"\"Least squares fit\n#%s\n#Huber Params, t = 2.\n#%s\n#Ramsay's E Params\n#%s\n#Andrew's Wave Params\n#%s\n#Hampel's 17A Function\n#%s\n#\"\"\" % (results_ols.params, results_huber.params, results_ramsaysE.params,\n# results_andrewWave.params, results_hampel.params)\n\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"pandas.core.api.get_dummies",
"numpy.concatenate",
"numpy.all",
"numpy.max",
"numpy.exp",
"scipy.stats.norm._cdf",
"numpy.allclose",
"scipy.stats.chisqprob",
"numpy.eye",
"scipy.special.digamma",
"numpy.finfo",
"numpy.size",
"numpy.repeat",
"numpy.zeros",
"numpy.log",
"numpy.nonzero",
"scipy.optimize.fmin_bfgs",
"numpy.linalg.inv",
"scipy.stats.norm._pdf",
"numpy.genfromtxt",
"numpy.atleast_2d",
"numpy.append",
"scipy.stats.poisson.pmf",
"numpy.transpose",
"scipy.special.gammaln",
"scipy.special.polygamma",
"numpy.array",
"numpy.sum",
"numpy.histogram2d",
"numpy.abs",
"numpy.triu_indices",
"numpy.tile",
"numpy.ones",
"numpy.empty"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.asarray",
"scipy.stats.distributions.poisson",
"numpy.exp",
"scipy.stats.distributions.binom",
"scipy.stats.distributions.gamma",
"numpy.allclose",
"numpy.column_stack",
"numpy.zeros",
"numpy.log",
"numpy.power",
"numpy.isnan",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.linalg.solve",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.shape",
"scipy.optimize.brentq",
"scipy.stats.chi2.sf"
],
[
"numpy.diag",
"numpy.dot",
"numpy.abs",
"numpy.linalg.inv",
"numpy.linalg.pinv",
"numpy.transpose",
"numpy.array",
"numpy.fabs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [
"0.18",
"0.19"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
volpatto/UQpy | [
"acbe1d6e655e98917f56b324f019881ea9ccca82"
] | [
"example/Bayesian/More advanced examples with FE models - Sfepy/material_homogenization.py"
] | [
"#!/usr/bin/env python\n\n# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport sys\nsys.path.append('.')\n\nimport matplotlib as mlp\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PolyCollection\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n\nimport numpy as np\n\nfrom sfepy.base.base import Struct, output\nfrom sfepy.base.log import Log\nfrom sfepy import data_dir\n\nclass MaterialSimulator(object):\n\n @staticmethod\n def create_app(filename, is_homog=False, **kwargs):\n from sfepy.base.conf import ProblemConf, get_standard_keywords\n from sfepy.homogenization.homogen_app import HomogenizationApp\n from sfepy.applications import PDESolverApp\n\n required, other = get_standard_keywords()\n if is_homog:\n required.remove('equations')\n\n conf = ProblemConf.from_file(filename, required, other,\n define_args=kwargs)\n options = Struct(output_filename_trunk=None,\n save_ebc=False,\n save_ebc_nodes=False,\n save_regions=False,\n save_regions_as_groups=False,\n save_field_meshes=False,\n solve_not=False,\n )\n output.set_output(filename='sfepy_log.txt', quiet=True)\n\n if is_homog:\n app = HomogenizationApp(conf, options, 'material_opt_micro:')\n\n else:\n app = PDESolverApp(conf, options, 'material_opt_macro:')\n\n app.conf.opt_data = {}\n opts = conf.options\n if hasattr(opts, 'parametric_hook'): # Parametric study.\n parametric_hook = conf.get_function(opts.parametric_hook)\n app.parametrize(parametric_hook)\n\n return app\n\n def __init__(self, macro_fn, micro_fn, phis, plot_meshes_bool=False):\n self.macro_app = self.create_app(macro_fn, is_homog=False, is_opt=True)\n self.micro_app = self.create_app(micro_fn, is_homog=True, is_opt=True)\n self.phis = phis\n self.plot_meshes_bool = plot_meshes_bool\n\n @staticmethod\n def rotate_mat(D, angle):\n s = np.sin(angle)\n c = np.cos(angle)\n s2 = s**2\n c2 = c**2\n sc = s * c\n T = np.array([[c2, 0, s2, 0, 2*sc,0],\n [0, 1, 0, 0, 0, 0],\n [s2, 0, c2, 0, -2*sc, 0],\n [0, 0, 0, c, 0, -s],\n [-sc, 0, sc, 0, c2 - s2, 0],\n [0, 0, 0, s, 0, c]])\n\n return np.dot(np.dot(T, D), T.T)\n\n def plot_meshes(self):\n # plot mesh for micro problem\n pb = self.micro_app.problem\n coors = pb.domain.mesh.coors\n #print(set(coors[:,2]))\n graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])\n graph_slice = np.zeros((graph.shape[0], 4))\n for j in range(graph.shape[0]):\n graph_slice[j,:] = graph[j,coors[graph[j,:],2] == 0]\n cells_matrix = pb.domain.regions['Ym'].get_cells()\n cells_fibers = pb.domain.regions['Yf'].get_cells()\n fig = plt.figure(figsize = (12, 5))\n ax = fig.add_subplot(121)\n pc = PolyCollection(verts=coors[graph[cells_matrix,0:4],:2], facecolors='white', \n edgecolors='black')\n ax.add_collection(pc)\n pc = PolyCollection(verts=coors[graph[cells_fibers,0:4],:2], facecolors='gray', \n edgecolors='black')\n ax.add_collection(pc)\n ax.axis('equal')\n ax.set_title('2D plot of microstructure')\n ax = fig.add_subplot(122, projection='3d')\n for e in range(graph.shape[0]):\n if e in cells_fibers:\n color = 'gray'\n else:\n color = 'white'\n tupleList = coors[graph[e,:],:]\n vertices = [[0, 1, 2, 3], [4, 5, 6, 7], \n [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]\n verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] \n for ix in range(len(vertices))]\n pc3d = Poly3DCollection(verts=verts, facecolors=color, \n edgecolors='black', linewidths=1, alpha=0.5)\n ax.add_collection3d(pc3d)\n ax.set_title('3D plot of microstructure')\n plt.show(fig)\n \n # plot mesh for macro problem\n pb = self.macro_app.problem\n coors = pb.domain.mesh.coors\n graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])\n fig2 = plt.figure(figsize=(5,6))\n ax = fig2.add_subplot(111, projection='3d')\n for e in range(graph.shape[0]):\n tupleList = coors[graph[e,:],:]\n vertices = [[0, 1, 2, 3], [4, 5, 6, 7], \n [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]\n verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] \n for ix in range(len(vertices))]\n pc3d = Poly3DCollection(verts=verts, facecolors='white', \n edgecolors='black', linewidths=1, alpha=0.5)\n ax.add_collection3d(pc3d)\n ax.set_xlim3d(-0.03, 0.03)\n ax.set_ylim3d(-0.01, 0.01)\n ax.set_zlim3d(-0.01, 0.1)\n ax.set_title('3D plot of macro system')\n plt.show(fig2)\n return None\n\n def mat_eval(self, x):\n mic_od = self.micro_app.conf.opt_data\n mac_od = self.macro_app.conf.opt_data\n\n mic_od['coefs'] = {}\n mic_od['mat_params'] = x_norm2real(x)\n self.micro_app()\n\n D = mic_od['D_homog']\n comp_k = []\n for phi in self.phis:\n #print('phi = %d' % phi)\n\n mac_od['D_homog'] = self.rotate_mat(D, np.deg2rad(phi))\n self.macro_app()\n\n comp_k.append(mac_od['k'])\n\n # added by Audrey: get a plot of a slice of the mesh\n if self.plot_meshes_bool:\n self.plot_meshes()\n \n return comp_k\n\ndef bounds():\n x_L = [120e9, 0.2, 2e9, 0.2]\n x_U = [200e9, 0.45, 8e9, 0.45]\n return x_L, x_U\n\ndef x_norm2real(x):\n x_L, x_U = np.array(bounds())\n return x * (x_U - x_L) + x_L\n\ndef x_real2norm(x):\n x_L, x_U = np.array(bounds())\n return (x - x_L) / (x_U - x_L)\n\nmicro_filename = data_dir + '/examples/homogenization/' + 'homogenization_opt.py'\nmacro_filename = data_dir + '/examples/homogenization/' + 'linear_elasticity_opt.py'\n\ndef one_simulation(x0, plot_meshes_bool=False):\n \"\"\"\n This function is the main callable here: it takes in as input the parameter vector, \n here x0=[E_fiber, nu_fiber, E_matrix, nu_matrix], and returns the simulated output \n (here slope of the force-elongation curve obtained during a tensile test), to be compared\n with the measured data.\n \"\"\"\n x0 = x0.reshape((-1, ))\n phis = [0, 30, 60, 90]\n #exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])\n ms = MaterialSimulator(macro_filename, micro_filename,\n phis,\n plot_meshes_bool=plot_meshes_bool)\n qoi = ms.mat_eval(x0)\n return qoi\n\ndef one_simulation_2params(x0, plot_meshes_bool=False):\n x0 = x0.reshape((-1, ))\n x0 = np.array([x0[0], 0.45, x0[1], 0.])\n phis = [0, 30, 60, 90]\n #exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])\n ms = MaterialSimulator(macro_filename, micro_filename,\n phis, plot_meshes_bool=plot_meshes_bool)\n\n qoi = ms.mat_eval(x0)\n return qoi\n\ndef one_simulation_2params_rvs(x0, plot_meshes_bool=False):\n x0 = x0.reshape((-1, ))\n x0 = np.array([x0[0], 0.45, x0[1], 0.])\n phis = [0, 30, 60, 90]\n ms = MaterialSimulator(macro_filename, micro_filename,\n phis,\n plot_meshes_bool=plot_meshes_bool)\n\n qoi = ms.mat_eval(x0)\n qoi = np.tile(np.array(qoi), 100)\n return qoi\n"
] | [
[
"numpy.dot",
"numpy.cos",
"numpy.sin",
"numpy.deg2rad",
"matplotlib.collections.PolyCollection",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robotsorcerer/LevelSetPy | [
"54064ee7fd0144e0d658dd4f6121cbc1fda664b9",
"54064ee7fd0144e0d658dd4f6121cbc1fda664b9",
"54064ee7fd0144e0d658dd4f6121cbc1fda664b9"
] | [
"ExplicitIntegration/Integration/ode_cfl_1.py",
"Tensors/tensor_utils.py",
"Grids/cells_grid.py"
] | [
"__all__ = [\"odeCFL1\"]\n\nimport cupy as cp\nimport numpy as np\nfrom LevelSetPy.Utilities import *\nfrom .ode_cfl_set import odeCFLset\nfrom .ode_cfl_call import odeCFLcallPostTimestep\n\ndef odeCFL1(schemeFunc, tspan, y0, options=None, schemeData=None):\n \"\"\"\n odeCFL1: integrate a CFL constrained ODE (eg a PDE by method of lines).\n\n [ t, y, schemeData ] = odeCFL1(schemeFunc, tspan, y0, options, schemeData)\n\n Integrates a system forward in time by CFL constrained timesteps\n using a first order forward Euler scheme\n (which happens to be the first order TVD RK scheme).\n\n parameters:\n schemeFunc\t Function handle to a CFL constrained ODE system\n (typically an approximation to an HJ term, see below).\n tspan Range of time over which to integrate (see below).\n y0 Initial condition vector\n (typically the data array in vector form).\n options An option structure generated by odeCFLset\n (use [] as a placeholder if necessary).\n schemeData Structure passed through to schemeFunc.\n\n\n t Output time(s) (see below).\n y Output state (see below).\n schemeData Output version of schemeData (see below).\n\n A CFL constrained ODE system is described by a function with prototype\n\n [ ydot, stepBound, schemeData ] = schemeFunc(t, y, schemeData)\n\n where t is the current time, y the current state vector and\n schemeData is passed directly through. The output stepBound\n is the maximum allowed time step that will be taken by this function\n (typically the option parameter factorCFL will choose a smaller step size).\n\n The time interval tspan may be given as\n 1) A two entry vector [ t0 tf ], in which case the output will\n be scalar t = tf and a row vector y = y(tf).\n 2) A vector with three or more entries, in which case the output will\n be column vector t = tspan and each row of y will be the solution\n at one of the times in tspan. Unlike Matlab's ode suite routines,\n this version just repeatedly calls version (1), so it is not\n particularly efficient.\n\n Depending on the options specified, the final time may not be reached.\n If integration terminates early, then t (in tspan case (1)) or t(end)\n (in tspan case(2)) will contain the final time reached.\n\n Note that using this routine for integrating HJ PDEs will usually\n require that the data array be turned into a vector before the call\n and reshaped into an array after the call. Option (2) for tspan should\n not be used in this case because of the excessive memory requirements\n for storing solutions at multiple timesteps.\n\n The output version of schemeData will normally be identical to the inp.t\n version, and therefore can be ignored. However, it is possible for\n schemeFunc or a PostTimestep routine (see odeCFLset) to modify the\n structure during integration, and the version of schemeData at tf is\n returned in this output argument.\n\n\n Copyright 2005 Ian M. Mitchell ([email protected]).\n This software is used, copied and distributed under the licensing\n agreement contained in the file LICENSE in the top directory of\n the distribution.\n\n Ian Mitchell, 5/14/03.\n Calling parameters modified to more closely match Matlab's ODE suite\n Ian Mitchell, 2/6/04.\n Modified to allow vector level sets. Ian Mitchell, 11/23/04.\n Modified to add terminalEvent option, Ian Mitchell, 1/30/05.\n\n Lekan Molu, 08/21/2021\n \"\"\"\n small = 100 * eps\n #---------------------------------------------------------------------------\n # Make sure we have the default options settings\n if not options:\n options = odeCFLset()\n\n # Number of timesteps to be returned.\n numT = len(tspan)\n #---------------------------------------------------------------------------\n # If we were asked to integrate forward to a final time.\n if(numT == 2):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Is this a vector level set integration?\n if(iscell(y0)):\n numY = len(y0)\n # We need a cell vector form of schemeFunc.\n if(iscell(schemeFunc)):\n schemeFuncCell = schemeFunc\n else:\n schemeFuncCell = [schemeFunc for i in range(numY)]\n else:\n # Set numY, but be careful: ((numY == 1) & iscell(y0)) is possible.\n numY = 1\n # We need a cell vector form of schemeFunc.\n schemeFuncCell = schemeFunc\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n t = tspan[0]\n steps = 0; startTime = cputime(); stepBound = np.zeros((numY), dtype=np.float64)\n ydot = cell(numY, 1); y = copy.copy(y0)\n\n while(tspan[1] - t >= small * np.abs(tspan[1])):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # First substep: Forward Euler from t_n to t_{n+1}.\n\n # Approximate the derivative and CFL restriction.\n for i in range(numY):\n ydot[i], stepBound[i], schemeData = schemeFuncCell[i](t, y, schemeData)\n # If this is a vector level set, rotate the lists of vector arguments.\n if(iscell(y)):\n y = y[1:]\n\n if(iscell(schemeData)):\n schemeData = schemeData[1:]\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Determine CFL bound on timestep, but not beyond the final time.\n # For vector level sets, use the most restrictive stepBound.\n # We'll use this fixed timestep for both substeps.\n deltaT = np.min(np.hstack((options.factorCFL*stepBound, \\\n tspan[1] - t, options.maxStep)))\n # If there is a terminal event function registered, we need\n # to maintain the info from the last timestep.\n if options.terminalEvent:\n yOld , tOld = y, t\n # Update time.\n t += deltaT\n # Update level set functions.\n if(iscell(y)):\n for i in range(numY):\n y1[i] +=(deltaT * ydot[i])\n else:\n y1 = y + deltaT * ydot[0]\n steps += 1\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # If there is one or more post-timestep routines, call them.\n if options.postTimestep:\n y, schemeData = odeCFLcallPostTimestep(t, y, schemeData, options)\n\n # If we are in single step mode, then do not repeat.\n if(strcmp(options.singleStep, 'on')):\n break\n\n # If there is a terminal event function, establish initial sign\n # of terminal event vector.\n if options.terminalEvent:\n eventValue, schemeData = options.terminalEvent(t, y, tOld, yOld, schemeData)\n\n if((steps > 1) and np.any(np.sign(eventValue) != np.sign(eventValueOld))):\n break\n else:\n eventValueOld = eventValue\n\n endTime = cputime()\n if(strcmp(options.stats, 'on')):\n info(f'{steps} steps in {(endTime-startTime):.2} seconds from {tspan[0]} to {t}.')\n elif(numT > 2):\n # If we were asked for the solution at multiple timesteps.\n t, y, schemeData = odeCFLmultipleSteps(schemeFunc, tspan, y0, options, schemeData)\n else:\n # Malformed time span.\n error('tspan must contain at least two entries')\n\n\n return t, y, schemeData\n",
"__all__ = ['dims_check', 'get_size', 'use_gpu']\n\n__author__ \t\t= \"Lekan Molu\"\n__copyright__ \t= \"2021, Tensor Algebra\"\n__license__ \t= \"Molux Licence\"\n__maintainer__ \t= \"Lekan Molu\"\n__email__ \t\t= \"[email protected]\"\n__status__ \t\t= \"Finished\"\n\n\nimport copy\nimport cupy as cp\nimport numpy as np\nfrom LevelSetPy.Utilities import isscalar, error\n\nuse_gpu = True if cp.is_available else False\n\ndef dims_check(dims=None, N=None, M=None):\n \"\"\"\n This preprocesses dimensions of a tensor\n\n Signature:\n newdims, _ = dimscheck(dims, N): Check that the specified dimensions are valid for a tensor\n\n \"\"\"\n if dims is None:\n dims = np.arange(N)\n\n if isscalar(dims):\n dims = np.array([dims])\n\n if np.max(dims)<0:\n tf = np.isin(-dims, range(N)).astype(np.int64)\n tf = np.array([tf]) if isscalar(tf) else tf\n\n\n if min(tf)==0:\n error(\"Invalid dimension specified.\")\n dims = list(set(range(N)).difference(-dims))\n\n tf = np.isin(dims, range(N)).astype(np.int64)\n tf = np.array([tf]) if isscalar(tf) else tf\n\n if min(tf)==0:\n error(\"Invalid dimension specified.\")\n\n P = len(dims)\n\n sorted_dims = np.sort(dims)\n sorted_dims_idx = np.argsort(dims)\n\n if M > N: raise ValueError(\"We cannot have more multiplicands than dimensions\")\n\n\n if (M != N) and (M != P):\n raise ValueError(\"Invalid number of multiplicands\")\n\n if P==M:\n \"\"\"\n Number of items in dims and number of multiplicands\n are equal; therefore, index in order of how sorted_dims\n was sorted.\n \"\"\"\n vidx = copy.copy(sorted_dims_idx)\n else:\n \"\"\"\n Number of multiplicands is equal to the number of\n dimensions in the tensor; therefore, index multiplicands by\n dimensions specified in dims argument.\n \"\"\"\n vidx = copy.copy(sorted_dims)\n\n return sorted_dims, vidx\n\ndef get_size(obj, seen=None):\n \"\"\"Recursively finds size of objects\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size\n",
"__all__ = [\n \"cells_from_grid\"\n]\n\nimport copy\nimport numpy as np\nfrom LevelSetPy.Utilities import *\nfrom LevelSetPy.Grids import getOGPBounds, createGrid\nfrom .cell_neighs import neighbors\n\n\ndef cells_from_grid(g, bounds, padding=None):\n \"\"\"\n gs = cells_from_grid(g, bounds, padding)\n Splits the grid into smaller grids, each with specified bounds.\n Optionally, padding can be specified so that the grids overlap\n\n Inp.ts:\n g - original grid\n bounds - list of bounds of the smaller grids. This should be a g.dim\n dimensional matrix that specifies the \"grid\" of bounds.\n For example, suppose the original grid is a [-1, 1]^2 grid in 2D.\n Then, the following bounds would split it into [-1, 0]^2, [0, 1]^2,\n [-1, 0] x [0, 1], and [0, 1] x [-1, 0] grids:\n bounds = {[-1, 0, 1], [-1, 0, 1]};\n padding - amount of overlap between two adjacent subgrids\n\n Output:\n gs - subgrids\n\n Author: Lekan Molu, September 04, 2021\n \"\"\"\n if padding is None:\n padding = np.zeros((g.dim, 1))\n\n assert isinstance(bounds, list), 'bounds must be a list or list of lists'\n ## Create a grid for the bounds\n if g.dim > 1:\n bounds_grid = np.meshgrid(*bounds, sparse=False, indexing='ij');\n else:\n # indexing and sparse flags have no effect in 1D case\n bounds_grid = np.meshgrid(bounds, indexing='ij')[0]\n\n ## Create grids based on the bound grid\n temp = size(bounds_grid[0])\n temparr = np.array((temp))\n gs = np.zeros(temparr-(temparr>1).astype(np.int64))\n\n ii = cell(g.dim, 1)\n gss = []\n partition = {}\n for i in range(numel(gs)):\n ii = np.asarray(np.unravel_index(i, size(gs), order='F'))\n iip = copy.copy(ii)\n # print('iip: ', iip)\n for j in range(g.dim):\n iip[j] += 1\n grid_min = []\n grid_max = []\n # turn'em to indices (tuples) to aid dynamic\n # indexing (see: https://numpy.org/doc/stable/user/basics.indexing.html)\n ii, iip = tuple(ii), tuple(iip)\n for j in range(g.dim):\n grid_min.append(bounds_grid[j][ii])\n grid_max.append(bounds_grid[j][iip])\n grid_min, grid_max = np.vstack((grid_min)), np.vstack((grid_max))\n #print(f'grid_min: {grid_min.shape}, grid_max: {grid_max.shape}')\n grid_min, grid_max, N = getOGPBounds(g, grid_min, grid_max, padding);\n\n # create cell within grid\n celi = createGrid(grid_min, grid_max, N, process=True)\n celi.neighs = neighbors(ii, gs.shape) # neighbors of this cell\n celi.idx = ii # index of this cell within the grid subgrd\n celi.gshape = gs.shape # shape of containing grid\n gss.append(celi)\n # partition[ii]=celi\n\n # result=dict(cellshape = gs.shape, cells=gss, partition=partition)\n return gss\n"
] | [
[
"numpy.sign",
"numpy.hstack",
"numpy.zeros",
"numpy.abs"
],
[
"numpy.arange",
"numpy.sort",
"numpy.max",
"numpy.argsort",
"numpy.array"
],
[
"numpy.vstack",
"numpy.array",
"numpy.zeros",
"numpy.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siqim/Machine-Learning-with-Graphs | [
"697d83bb206be0825ebaf0dad128b9eb24908705"
] | [
"examples/dataset.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on December 30, 2020\n\n@author: Siqi Miao\n\"\"\"\n\nimport torch\nfrom torch_sparse import SparseTensor\nimport torch_geometric.transforms as T\n\nfrom pathlib2 import Path\nimport scipy.io as sio\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom skmultilearn.model_selection import iterative_train_test_split\nfrom ogb.nodeproppred import PygNodePropPredDataset, Evaluator\n\n\nclass Dataset(object):\n def __init__(self, root, name, make_edge_index=False):\n\n self.root = root\n self.name = name\n self.make_edge_index = make_edge_index\n\n self.num_classes = None\n self.split_idx = None\n self.x = None\n self.y = None\n self.adj_t = None\n self.edge_index = None\n self.num_nodes = None\n self.criterion = None\n self.metric = None\n\n self.heterophily_dataset = ['chameleon', 'actor']\n\n if name == 'ogb':\n self.setup_ogb()\n elif name == 'wiki':\n self.setup_wiki()\n elif name in self.heterophily_dataset:\n self.setup_geom()\n else:\n raise KeyboardInterrupt\n\n def setup_ogb(self):\n\n dataset = PygNodePropPredDataset(name='ogbn-arxiv', root=self.root, transform=T.ToSparseTensor())\n data = dataset[0]\n\n self.metric = 'Accuracy'\n self.num_classes = dataset.num_classes\n self.split_idx = dataset.get_idx_split()\n\n self.x = data.x\n self.y = data.y\n self.adj_t = data.adj_t.to_symmetric()\n self.num_nodes = data.num_nodes\n\n if self.make_edge_index:\n row = self.adj_t.storage.row()\n col = self.adj_t.storage.col()\n self.edge_index = torch.stack((row, col), dim=0)\n\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def setup_wiki(self):\n\n mat = sio.loadmat(self.root / 'wiki' / 'POS.mat')\n\n self.metric = 'MicroF1'\n self.num_nodes = 4777\n self.num_classes = 40\n\n adj_t = mat['network'].tocoo()\n self.adj_t = SparseTensor(row=torch.LongTensor(adj_t.row), col=torch.LongTensor(adj_t.col),\n sparse_sizes=(self.num_nodes, self.num_nodes))\n\n if self.make_edge_index:\n row = self.adj_t.storage.row()\n col = self.adj_t.storage.col()\n self.edge_index = torch.stack((row, col), dim=0)\n\n self.y = torch.from_numpy(mat['group'].todense()).float()\n idx = torch.arange(self.y.shape[0]).view(-1, 1)\n train_idx, _, test_idx, _ = iterative_train_test_split(idx, self.y, test_size=0.1)\n self.split_idx = {'train': train_idx.view(-1), 'valid': test_idx.view(-1), 'test': test_idx.view(-1)}\n\n self.criterion = torch.nn.BCEWithLogitsLoss() # for multi-label classification\n\n def setup_geom(self):\n edge_file = self.root / self.name / 'out1_graph_edges.txt'\n feature_label_file = self.root / self.name / 'out1_node_feature_label.txt'\n\n self.metric = 'Accuracy'\n\n edges = edge_file.open('r').readlines()[1:]\n edges = torch.LongTensor([(lambda x: [int(x[0]), int(x[1])])(edge.strip().split('\\t')) for edge in edges])\n self.num_nodes = torch.max(edges).item() + 1\n self.adj_t = SparseTensor(row=torch.LongTensor(edges[:, 0]), col=torch.LongTensor(edges[:, 1]),\n sparse_sizes=(self.num_nodes, self.num_nodes))\n # self.adj_t = self.adj_t.to_symmetric()\n\n if self.make_edge_index:\n self.edge_index = edges.t()\n\n idx = []\n x = []\n y = []\n xy = feature_label_file.open('r').readlines()[1:]\n for line in xy:\n node_id, feature, label = line.strip().split('\\t')\n idx.append(int(node_id))\n\n if self.name == 'actor':\n one_hot = torch.zeros(932)\n pos_with_ones = list(map(int, feature.split(',')))\n one_hot[pos_with_ones] = 1\n x.append(one_hot.int().tolist())\n else:\n x.append(list(map(int, feature.split(','))))\n y.append(int(label))\n\n _, indices = torch.sort(torch.LongTensor(idx))\n self.x = torch.LongTensor(x)[indices]\n self.y = torch.LongTensor(y).view(-1, 1)[indices]\n self.num_classes = torch.max(self.y).item() + 1\n\n idx = torch.arange(self.y.shape[0]).view(-1, 1)\n train_idx, val_test_idx = train_test_split(idx, test_size=0.4, stratify=self.y)\n val_idx, test_idx = train_test_split(val_test_idx, test_size=0.5, stratify=self.y[val_test_idx.squeeze()])\n self.split_idx = {'train': train_idx.view(-1), 'valid': val_idx.view(-1), 'test': test_idx.view(-1)}\n\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def eval(self, y_true, logits, split_idx):\n\n if self.name == 'ogb':\n evaluator = Evaluator(name='ogbn-arxiv')\n y_pred = logits.argmax(dim=1, keepdim=True)\n train_acc = evaluator.eval({\n 'y_true': y_true[split_idx['train']],\n 'y_pred': y_pred[split_idx['train']],\n })['acc']\n valid_acc = evaluator.eval({\n 'y_true': y_true[split_idx['valid']],\n 'y_pred': y_pred[split_idx['valid']],\n })['acc']\n test_acc = evaluator.eval({\n 'y_true': y_true[split_idx['test']],\n 'y_pred': y_pred[split_idx['test']],\n })['acc']\n return train_acc, valid_acc, test_acc\n\n elif self.name == 'wiki':\n y_pred = torch.sigmoid(logits) > 0.5\n train_f1 = f1_score(y_true[split_idx['train']], y_pred[split_idx['train']], average='micro')\n valid_f1 = f1_score(y_true[split_idx['valid']], y_pred[split_idx['valid']], average='micro')\n test_f1 = f1_score(y_true[split_idx['test']], y_pred[split_idx['test']], average='micro')\n return train_f1, valid_f1, test_f1\n\n elif self.name in self.heterophily_dataset:\n y_pred = logits.argmax(dim=1, keepdim=True)\n train_acc = accuracy_score(y_true[split_idx['train']], y_pred[split_idx['train']])\n valid_acc = accuracy_score(y_true[split_idx['valid']], y_pred[split_idx['valid']])\n test_acc = accuracy_score(y_true[split_idx['test']], y_pred[split_idx['test']])\n return train_acc, valid_acc, test_acc\n\n\nif __name__ == '__main__':\n data = Dataset(root=Path('../dataset'), name='ogb', make_edge_index=True)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.sigmoid",
"torch.max",
"torch.zeros",
"scipy.io.loadmat",
"sklearn.model_selection.train_test_split",
"torch.nn.BCEWithLogitsLoss",
"torch.arange",
"torch.stack",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
yinonbaron/biomass_distribution | [
"783a8d2f59754bde9b0ea802512b131abbe7d8a0",
"783a8d2f59754bde9b0ea802512b131abbe7d8a0"
] | [
"plants/non_wood_biomass/non_wood_biomass.py",
"bacteria_archaea/marine/marine_prok_biomass_estimate.py"
] | [
"\n# coding: utf-8\n\n# # Estimating the fraction of plant biomass which is not woody\n# To estimate the total non-woody plant biomass, we rely on two methods. The first is to estimate the global average leaf and root mass fractions, and the second is by estimating the total biomass of roots and leaves.\n# \n# ## Method1 - fraction of leaves and roots\n# To estimate the global average leaf and root mass fractions, we rely on a recent meta-analysis which collected data on the lead, shoot and root mass fractions in several different biomes ([Poorter et al.](http://dx.doi.org/10.1111/j.1469-8137.2011.03952.x)). Here are the mean leaf, shoot, and root mass fractions in each biome:\n\n# In[1]:\n\n# Load dependencies\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import gmean\nimport sys\nsys.path.insert(0,'../../statistics_helper/')\nfrom fraction_helper import *\n\n# Load data from Poorter et al.\nfractions = pd.read_excel('non_wood_biomass_data.xlsx','Poorter',skiprows=1,index_col=0)\nfractions\n\n\n# We calculate weighted mean of leaf and root mass fractions. We use the fraction of total plant biomass in each biome as our weights from [Erb et al.](http://dx.doi.org/10.1038/ngeo2782) for the weighted mean. Here is the data from Erb et al.:\n\n# In[2]:\n\n# Load data on the total plant biomass in each biome from Erb et al.\nbiomes = pd.read_excel('non_wood_biomass_data.xlsx','Erb',skiprows=1)\nbiomes\n\n\n# The specific biomes in Erb et al. are not fully matching the biomes in Poorter et al., and thus we traslate between the biomes in the two studies:\n\n# In[3]:\n\n# Calculate the sum of the mass fractions of leaves and roots\nnon_wood_frac = (fractions['LMF']+fractions['RMF'])/fractions.sum(axis=1)\n\n# Calculate the total biomass of each biome by the biomes reported in Poorter et al.\ntot_biomass = biomes.groupby('Categories included in Poorter').sum()\n\n# For the temperate steppe, desert and mountain, we use the mean values from grassland and shrubland in Poorter et al.\nnon_wood_frac.loc['Grassland, shrubland'] = frac_mean(np.array([non_wood_frac.loc['Grassland'],non_wood_frac.loc['Shrubland']]))\n\n\n# Set the non-woody fraction as a column in the biome data\ntot_biomass['Non wood fraction'] = non_wood_frac\n\n# Calculate the weighed average of the non-woody biomass fraction\nmean_non_wood_frac = np.average(tot_biomass['Non wood fraction'], weights= tot_biomass['Total biomass [Gt C]'])\nprint('Our global average for non-woody mass fraction is ≈%.0f percent' %(mean_non_wood_frac*100))\n\n\n# Our estimate of the total non-woody plant biomass is the product of our best estimate of the total plant biomass and our estimate of the global average non-woody mass fraction:\n\n# In[4]:\n\n\n# Our best estimate for the total biomass\ntot_plant_biomass = 450e15\n\n# Multiply our estimate for the non-woody mass fraction by our estimate\n# of the total plant biomass\nmethod1_non_wood_biomass = mean_non_wood_frac*tot_plant_biomass\n\nprint('Our best estimate for the total non-wood plant biomass based on the fraction of roots and leaves is ≈%.0f Gt C' %(method1_non_wood_biomass/1e15))\n\n\n# ## Method2 - total biomass of leaves and roots\n# Our second method for estimating the total non-woody plant biomass is based on estimating the total biomass of roots and leaves. For roots, we rely on the estimate made by [Jackson et al.](http://dx.doi.org/10.1007/BF00333714):\n\n# In[5]:\n\nroots_jackson = 146e15\n\n\n# To estimate the total biomass of leaves, we rely on biome averages on the leaf area index (LAI) from [Asner et al.](http://dx.doi.org/10.1046/j.1466-822X.2003.00026.x). Here is the data from Asner et al.:\n\n# In[6]:\n\nbiome_LAI = pd.read_excel('non_wood_biomass_data.xlsx','Asner',skiprows=1,index_col=0)\nbiome_LAI\n\n\n# We use data on the area on each biome from the book \"Biogeochemistry\", and multiply the LAI in each biome by the total area of each biome to estimate the global leaf area:\n\n# In[7]:\n\n# Load biome area data\nbiome_area = pd.read_excel('non_wood_biomass_data.xlsx','Biome area',skiprows=1,index_col=0)\n\n# Calculate the mean LAI for boreal forests\nbiome_LAI.loc['Boreal forest'] = gmean(biome_LAI.loc[['Boreal DBL','Boreal ENL']])\n\n# Calculate the mean LAI for temperate forests\nbiome_LAI.loc['Temperate forest'] = gmean(biome_LAI.loc[['Temperate DBL','Temperate EBL','Temperate ENL']])\n\n# Calculate the mean LAI for tropical forests\nbiome_LAI.loc['Tropical forest'] = gmean(biome_LAI.loc[['Tropical DBL','Tropical EBL']])\n\n# Calculate the mean LAI for temperate grasslands\nbiome_LAI.loc['Temperate grassland'] = biome_LAI.loc['Grassland']\n\n# Calculate the mean LAI for tropical savanna\nbiome_LAI.loc['Tropical savanna'] = gmean(biome_LAI.loc[['Grassland','Shrubland']])\n\n# Multiply the mean LAI in each biome by the total area of each biome\ntot_leaf_area = (biome_LAI['LAI [m^2 m^-2]']*biome_area['Area [m^2]']).sum()\nprint('Our estimate for the total leaf area is ≈%.1e m^2' % tot_leaf_area)\n\n\n# To convert the total leaf area into total biomass of leaves, we use an estimate for the average leaf mass per area (LMA) from the Glopnet database [Wright et al.](http://dx.doi.org/10.1038/nature02403):\n\n# In[8]:\n\n# Load the glopnet data\nglopnet_data = pd.read_excel('non_wood_biomass_data.xlsx','glopnet_data')\n\n# Calculate the geometric mean of the LMA\ngeomean_LMA = 10**glopnet_data.loc[glopnet_data['GF']=='T',['log LMA']].mean()\n\n# Convert the global leaf area to global leaf biomass\ntot_leaf_biomass = tot_leaf_area*geomean_LMA/2\n\nprint('Our estimate for the global leaf biomass is ≈%.1f Gt C' %(tot_leaf_biomass/1e15))\n\n\n# We sum our estimates for the total biomass of roots and leaves to produce our estimate of the total non-woody plant biomass:\n\n# In[9]:\n\nmethod2_non_wood_biomass = tot_leaf_biomass + roots_jackson\nprint('Our best estimate for the total non-wood plant biomass based on estimates of the total biomass of roots and leaves is ≈%.0f Gt C' %(method2_non_wood_biomass/1e15))\n\n\n# We use the geometric mean of our estimates from the two methods as our best estimate for the total non-woody plant biomass:\n\n# In[10]:\n\nbest_non_wood_biomass = gmean([method1_non_wood_biomass,method2_non_wood_biomass])\nprint('Our best estimate for the total non-wood plant biomass is ≈%.0f Gt C' %(best_non_wood_biomass/1e15))\n\n\n# # Estimating the total belowground plant biomass\n# To estimate the total belowground plant biomass, we use the same procedure as for estimating the total non-woody plant biomass. We rely on two methods - the first is based on calculating the mean root mass fraction.\n# ## Method1 - fraction of roots\n# To estimate the global average root mass fractions, we rely on a recent meta-analysis which collected data on the lead, shoot and root mass fractions in several different biomes ([Poorter et al.](http://dx.doi.org/10.1111/j.1469-8137.2011.03952.x)). We calculate the global average root mass fraction by taking into account the relative plant biomass present in each biome, based on data from [Erb et al.](http://dx.doi.org/10.1038/ngeo2782).\n\n# In[11]:\n\n# Calculate the root mass fraction in each biome based on data from Poorter et al.\nroot_frac = (fractions['RMF'])/fractions.sum(axis=1)\n\n# For the temperate steppe, desert and mountain, we use the mean values from grassland and shrubland in Poorter et al.\nroot_frac.loc['Grassland, shrubland'] = frac_mean(np.array([root_frac.loc['Grassland'],root_frac.loc['Shrubland']]))\n\n\n# Set the root fraction as a column in the biome data\ntot_biomass['Root fraction'] = root_frac\n\n# Calculate the weighted average root mass fraction\nmean_root_frac = np.average(tot_biomass['Root fraction'], weights= tot_biomass['Total biomass [Gt C]'])\n\nprint('Our estimate for the global average root mass fraction is ≈%.1f percent' %(mean_root_frac*100))\n\n\n# To estimate the total biomass of roots, we multiply the global average root mass fraction by our best estimate for the total plant biomass:\n\n# In[12]:\n\nmethod1_root_biomass = mean_root_frac*tot_plant_biomass\n\nprint('Our estimate of the total root biomass based on the global average root mass fraction is ≈%0.1f Gt C' %(method1_root_biomass/1e15))\n\n\n# As a second source for estimating the global biomass of roots, we rely on the estimate in [Jackson et al.](http://dx.doi.org/10.1007/BF00333714). We use the geometric mean of the estimate from the two methods as our best estimate of the total biomass of roots, which we use as our best estimate for the total belowground plant biomass:\n\n# In[13]:\n\nbest_root_biomass = gmean([method1_root_biomass,roots_jackson])\n\nprint('Our best estimate for the total belowground plant biomass is ≈%0.1f Gt C' %(best_root_biomass/1e15))\n\n",
"\n# coding: utf-8\n\n# # Estimating the total biomass of marine archaea and bacteria\n# \n# We use our best estimates for the total number of marine prokaryotes, the carbon content of marine prokaryotes and the fraction of marine archaea and bacteria out of the total population of marine prokaryotes to estimate the total biomass of marine bacteria and archaea\n\n# In[1]:\n\nimport numpy as np\nimport pandas as pd\npd.options.display.float_format = '{:,.1e}'.format\nimport sys\nsys.path.insert(0, '../../statistics_helper')\nfrom CI_helper import *\nresults = pd.read_excel('marine_prok_biomass_estimate.xlsx')\n\n\n# These are our best estimates for the different parameters required for the estimate, along with the associated uncertainties\n\n# In[2]:\n\nresults.head()\n\n\n# We multiply all the relevant parameters to arrive at our best estimate for the biomass of marine archaea and bacteria, and propagate the uncertainties associated with each parameter to calculate the uncertainty associated with the estimate for the total biomass\n\n# In[3]:\n\n# Calculate the total biomass of marine archaea and bacteria\ntotal_arch_biomass = results['Value'][0]*results['Value'][1]*(1+results['Value'][4])*1e-15*results['Value'][2]\ntotal_bac_biomass = results['Value'][0]*results['Value'][1]*(1+results['Value'][4])*1e-15*results['Value'][3]\n\nprint('Our best estimate for the total biomass of marine archaea is %.1f Gt C' %(total_arch_biomass/1e15))\nprint('Our best estimate for the total biomass of marine bacteria is %.1f Gt C' %(total_bac_biomass/1e15))\n\n# Propagate the uncertainty in the total biomass of bacteria and archaea\nprok_biomass_CI = CI_sum_prop(estimates=np.array([results['Value'][0]*results['Value'][1], results['Value'][0]*results['Value'][1]*results['Value'][4]]), mul_CIs=np.array([CI_prod_prop(results['Uncertainty'][:2]),results['Uncertainty'][4]]))\n\n# Propagate the uncertainty associated with each parameter to the final estimate\narch_biomass_uncertainty = CI_prod_prop(np.array([prok_biomass_CI,results['Uncertainty'][2]]))\nbac_biomass_uncertainty = CI_prod_prop(np.array([prok_biomass_CI,results['Uncertainty'][3]]))\n\nprint('The uncertainty associated with the estimate for the biomass of archaea is %.1f-fold' %arch_biomass_uncertainty)\nprint('The uncertainty associated with the estimate for the biomass of bacteria is %.1f-fold' %bac_biomass_uncertainty)\n\n"
] | [
[
"scipy.stats.gmean",
"numpy.array",
"pandas.read_excel",
"numpy.average"
],
[
"numpy.array",
"pandas.read_excel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Davidyz/AutoStacker | [
"9f637891b9379b166e41597bcd44a8011561beea"
] | [
"modules/algo.py"
] | [
"import numpy as np\n\nfrom modules.imageRW import Image\nfrom typing import Iterator, Optional, List\nfrom __future__ import annotations\n\nclass InputException(Exception):\n pass\n\ndef mean(images: Iterator[Image], group_size: int) -> Iterator[Image|None]:\n stackImage: Image|None = None\n while True:\n try:\n i = next(images)\n for j in range(group_size):\n if stackImage:\n stackImage += i / group_size\n else:\n stackImage = np.zeros(i.shape, dtype=np.uint32).view(Image)\n stackImage += i.copy() / group_size\n stackImage.setExif(i.exif)\n yield stackImage\n stackImage = None\n except StopIteration:\n break\n\ndef maxBright(images: Iterator[Image], group_size: int) -> Iterator[Image|None]:\n stackImage: Image|None = None\n\n while True:\n try:\n i = next(images)\n for j in range(group_size):\n if stackImage:\n stackImage = np.array(np.maximum(stackImage, i)).view(Image)\n else:\n stackImage = np.zeros(i.shape, dtype=np.uint32).view(Image)\n stackImage = np.array(np.maximum(stackImage, i)).view(Image)\n stackImage.setExif(i.exif)\n yield stackImage\n stackImage = None\n except StopIteration:\n break\n\ndef mode(images: Iterator[Image]) -> List[Image]:\n modeArray = []\n return modeArray\n\nALGORITHMS = {'mean': mean,\n 'max': maxBright}\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.maximum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zoi-mibtp/pyDNase | [
"047d2f89af6109a530505b370782c4841d710cbf"
] | [
"pyDNase/scripts/dnase_average_profile.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport pyDNase\nimport numpy as np\nimport matplotlib as mpl\nfrom clint.textui import progress, puts\n#Required for headless operation\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\nparser = argparse.ArgumentParser(description='Plots average profile of DNase (or Tn5 for ATAC-seq) activity surrounding a list of regions in a BED file')\nparser.add_argument(\"-w\", \"--window_size\", help=\"Size of flanking area around centre of the regions to plot (default: 100)\",default=100,type=int)\nparser.add_argument(\"-bf\", \"--bias-file\", help=\"Location of the sorted, index\",default = None,type=str)\nparser.add_argument(\"-i\",action=\"store_true\", help=\"Ignores any strand information in BED file and plots data relative to reference strand\",default=False)\nparser.add_argument(\"-c\",action=\"store_true\", help=\"Combine the strand information into one graph\",default=False)\nparser.add_argument(\"-n\",action=\"store_true\", help=\"Normalise cut counts to a fraction peaks\",default=False)\nparser.add_argument(\"-b\",action=\"store_true\", help=\"Normalise for cutting bias\",default=False)\nparser.add_argument(\"-A\",action=\"store_true\", help=\"ATAC-seq mode\",default=False)\nparser.add_argument(\"regions\", help=\"BED file of the regions you want to generate the average profile for\")\nparser.add_argument(\"reads\", help=\"The BAM file containing the DNase-seq data\")\nparser.add_argument(\"output\", help=\"filename to write the output to\")\nargs = parser.parse_args()\n\nreads = pyDNase.BAMHandler(args.reads,ATAC=args.A)\nif args.b:\n if args.bias_file != None:\n freads = pyDNase.BAMHandlerWithBias(pyDNase.FASTAHandler(args.bias_file),args.reads,ATAC=args.A)\n else:\n raise ValueError(\"No FASTA file provided for bias correction!\")\nregions = pyDNase.GenomicIntervalSet(args.regions)\n\n\n\n#Set all strands to positive if \"ignore strands\" is enabled\nif args.i:\n for each in regions:\n each.strand = \"+\"\n\nputs(\"Resizing Regions to {0}\".format(args.window_size))\nregions.resizeRegions(args.window_size)\n\nfw = []\nrv = []\nputs(\"Reading Data from BAM file...\")\nfor each in progress.bar(regions):\n if sum(reads[each][\"+\"]) and sum(reads[each][\"-\"]):\n if args.b:\n try:\n fw.append(np.divide(reads[each][\"+\"],freads[each][\"+\"]))\n rv.append(np.divide(reads[each][\"-\"],freads[each][\"-\"]))\n except Exception:\n pass\n else:\n fw.append(reads[each][\"+\"])\n rv.append(reads[each][\"-\"])\n\nif args.n:\n fw = [list(map(float,i))for i in fw]\n rv = [list(map(float,i)) for i in rv]\n fw = [np.divide(np.subtract(i, min(i)), np.subtract(max(i) , min(i))) for i in fw]\n rv = [np.divide(np.subtract(i, min(i)), np.subtract(max(i) , min(i))) for i in rv]\n\nif args.c:\n plt.plot(np.add(np.mean(fw,axis=0),np.mean(rv,axis=0)),c=\"red\")\nelse:\n plt.plot(np.mean(fw,axis=0),c=\"red\")\n plt.plot(np.mean(rv,axis=0),c=\"blue\")\n\n#Pad the axis out reads bit\nrcParams['xtick.major.pad'] = 20 \nrcParams['ytick.major.pad'] = 20\n\n#Sort out the X axis ticks\nticks = [0,args.window_size,args.window_size*2]\nlabels = [-args.window_size,0,args.window_size]\nplt.xticks(ticks, labels)\n\n#Make the yaxis start from 0\nplt.gca().set_ylim(0)\n\n#Makes ticks only appear on the left hand side\nplt.gca().yaxis.set_ticks_position('left')\n\n#Remove top and right borders\nplt.gca().spines['top'].set_visible(False)\nplt.gca().spines['right'].set_visible(False)\n\nplt.gca().tick_params(axis='both', which='major', labelsize=28, pad=12)\n\nif args.bias_file:\n plt.gca().set_ylabel('Average DNase Activity\\n (Observed/Expected)',size=\"32\", multialignment='center')\nelse:\n if args.A:\n plt.gca().set_ylabel('Average Tn5 integrations',size=\"26\", multialignment='center')\n else:\n plt.gca().set_ylabel('Average DNase activity',size=\"26\", multialignment='center')\nplt.savefig(args.output,bbox_inches='tight')\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"numpy.mean",
"matplotlib.pyplot.xticks",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
remichartier/014_selfDrivingCarND_BehavioralCloningProject | [
"1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff",
"1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff"
] | [
"archiveOldVersions/generator_v02.py",
"archiveOldVersions/model_v10.py"
] | [
"#!/usr/bin/env python\n\n# History\n# v01 : adaptation from the one given by Udacity to work\n# v02 : adapt to commonFunctions_v10.py to use generator.\n# Start adding again everything from model_v12.py (image augmentation)\n\nimport os\nimport csv\nimport cv2\nimport numpy as np\nimport sklearn\n\nfrom math import ceil\nfrom random import shuffle\nfrom sklearn.model_selection import train_test_split\n\nfrom commonFunctions_v10 import get_lines_logfile \nfrom commonFunctions_v10 import get_info_from_lines\nfrom commonFunctions_v10 import flip_horizontally\n\nSTEER_CORRECTION_FACTOR = 0.2 # to tune up for left and right images/measurements\n\n# Set our batch size for fit generator\nbatch_len= 6\n\n# Reading CSV file, extracting lines.\nsamples = get_lines_logfile()\n\ntrain_samples, validation_samples = train_test_split(samples[1:], test_size=0.2)\n\n\ndef generator(samples, batch_size=batch_len):\n num_samples = len(samples)\n # print('num_samples : {}'.format(num_samples))\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n # correction : should go only until min(num_samples,offset+batch_size)\n batch_samples = samples[offset: min(num_samples,offset+batch_size)]\n\n # here will get both center, left, right images + their measurements.\n # if batch_size = 32 --> 32*3 = 96 images ....\n images, angles = get_info_from_lines(batch_samples,STEER_CORRECTION_FACTOR,nb_images=None)\n # data augmentation flip horizontally image + inverse measurements\n augm_images, augm_measurements = flip_horizontally(images,angles)\n images.extend(augm_images)\n angles.extend(augm_measurements)\n \n # Nvidia : need to convert images in YUV ...\n images = RGB2YUV(images)\n \n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# Set our batch size (*3 due to image center + left + right ....), then *2 due to flip of each images\nbatch_size=batch_len*3*2 #6*3*2 = 36 ....\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Activation, Dropout\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/127.5 - 1.,\n input_shape=(160,320,3)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, \n steps_per_epoch=ceil(len(train_samples)/batch_size), \n validation_data=validation_generator, \n validation_steps=ceil(len(validation_samples)/batch_size), \n epochs=5, verbose=1)",
"#!/usr/bin/env python\nimport numpy as np\nimport cv2\n\nfrom commonFunctions_v07 import get_info_from_logfile\nfrom commonFunctions_v07 import flip_horizontally\nfrom commonFunctions_v07 import visualize_loss_history\nfrom commonFunctions_v07 import RGB2YUV\n\n# History\n# v01 : Start\n# v02 : add nb_images to read parameter\n# v03 : add normalization + mean centering data to 0\n# v04 : data augmentation flip horizontally image + inverse measurements\n# v05 : use left/right images + measurements with Steering error correction\n# v06 : cropping images\n# v07 : add a generator to load data and preprocess it on the fly, in batchsize portions \n# to feed into your Behavioral Cloning model .\n# v08 : Adding loss viusalization tool\n# v09 : Re-start from v06 as fit_generator and need to add generator obsolete.\n# Latest Keras.Model.fit integrates a generator in itself.\n# ie v09 : Visualize loss history\n# v10 : choose better model for self driving cars and for this simulation.\n# Trying https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars\n\nSTEER_CORRECTION_FACTOR = 0.2 # to tune up for left and right images/measurements\n\n# Set our batch size for fit generator\nbatch_size=32\n\n# get images + steering angle measurements\nimages, measurements = get_info_from_logfile(STEER_CORRECTION_FACTOR,nb_images=100)\n\n# data augmentation flip horizontally image + inverse measurements\naugm_images, augm_measurements = flip_horizontally(images,measurements)\nimages.extend(augm_images)\nmeasurements.extend(augm_measurements)\n\n# Nvidia : need to convert images in YUV ...\nimages = RGB2YUV(images)\n\nprint('converting images to np arrays. Please wait ...')\nX_train = np.array(images)\ny_train = np.array(measurements)\nprint('converting images to np arrays. Done')\n\n#print(f'X_train shape : {X_train.shape}')\n#print(f'images shape : {im.shape}')\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Activation, Dropout\nfrom keras.callbacks import ModelCheckpoint,EarlyStopping\nfrom keras.layers.convolutional import Conv2D\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: ((x/255) - 0.5),input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\n\n# Nvidia : strided convolutions in the first three convolutional layers with a 2×2 stride and a 5×5 kernel\n# The input image is split into YUV planes and passed to the network.\nmodel.add(Conv2D(filters=24,kernel_size=5,strides=2,padding=\"valid\"))\nmodel.add(Conv2D(filters=36,kernel_size=5,strides=2,padding=\"valid\"))\nmodel.add(Conv2D(filters=48,kernel_size=5,strides=2,padding=\"valid\"))\n# and a non-strided convolution with a 3×3 kernel size in the final two convolutional layers.\nmodel.add(Conv2D(filters=64,kernel_size=3,strides=1,padding=\"valid\"))\nmodel.add(Conv2D(filters=64,kernel_size=3,strides=1,padding=\"valid\"))\n# follow the five convolutional layers with three fully connected layers, \n# leading to a final output control value which is the inverse-turning-radius. \nmodel.add(Dropout(0.5))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dropout(0.5))\nmodel.add(Activation('relu'))\nmodel.add(Dense(50))\nmodel.add(Dropout(0.5))\nmodel.add(Activation('relu'))\nmodel.add(Dense(10))\nmodel.add(Dropout(0.5))\nmodel.add(Activation('relu'))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\n# Callbacks to save best model and prevent overfit by early stopping \ncheckpoint = ModelCheckpoint(filepath='bestModelFolder/model.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=True)\nstopper = EarlyStopping(monitor='val_loss', min_delta=0.0003, patience=3)\n# model.fit(callbacks=[checkpoint, stopper])\nhistory_object = model.fit(X_train,y_train, batch_size, validation_split=0.2, shuffle = True, epochs=10, callbacks=[checkpoint, stopper])\n\n'''\nfit(\n x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None,\n validation_split=0.0, validation_data=None, shuffle=True, class_weight=None,\n sample_weight=None, initial_epoch=0, steps_per_epoch=None,\n validation_steps=None, validation_batch_size=None, validation_freq=1,\n max_queue_size=10, workers=1, use_multiprocessing=False\n)\n'''\n\nmodel.save('model.h5')\n\n# save picture lossHistory.png\nvisualize_loss_history(history_object)\n\n"
] | [
[
"sklearn.utils.shuffle",
"numpy.array",
"sklearn.model_selection.train_test_split"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DanPorter/babelscan | [
"71fa43f13a8318efbcdb412c4fca533d4b6f9ec9",
"71fa43f13a8318efbcdb412c4fca533d4b6f9ec9"
] | [
"babelscan_unit_test.py",
"babelscan/fitting.py"
] | [
"\"\"\"\nUnit test for babelscan\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport babelscan\n\n\nprint('####################################################')\nprint('############## babelscan unit tests ################')\nprint('####################################################')\nprint('\\n')\nprint(babelscan.module_info())\n\nfile = r\"C:\\Users\\dgpor\\Dropbox\\Python\\ExamplePeaks\\810002.nxs\" # eta scan with pilatus\ncv_file = r\"C:\\Users\\dgpor\\Dropbox\\Python\\ExamplePeaks\\857991.nxs\" # trajectory scan/ cvscan/ kthZebra\nim_file = r'C:\\\\Users\\\\dgpor\\\\OneDrive - Diamond Light Source Ltd\\\\I16\\\\Nexus_Format\\\\example_nexus\\\\872996.nxs' # hkl scan with data\ndat_file = r'C:\\\\Users\\\\dgpor\\\\OneDrive - Diamond Light Source Ltd\\\\I16\\\\Nexus_Format\\\\example_nexus\\\\872996.dat'\ndatadir = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\example_nexus\" # eta scan with pilatus\nrsmap = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\example_nexus\\872996-pilatus3_100k-files\\rsmap_872996_201215_101906.nxs\"\ni10_file = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\I10_nexus\\i10-578596.nxs\"\ni06_file = r\"C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\I06_example\\227980.dat\"\n\n\nprint('\\n\\n############ File Type Tests ##############')\nprint('standard I16 eta scan:')\nscan = babelscan.file_loader(file)\nprint(scan)\nprint('\\nI16 CV scan:')\nscan = babelscan.file_loader(cv_file)\nprint(scan)\nprint('\\nI16 hkl scan:')\nscan = babelscan.file_loader(im_file)\nprint(scan)\nprint('\\nI16 .dat file:')\nscan = babelscan.file_loader(dat_file)\nprint(scan)\nprint('\\nI16 rsmap file:')\nscan = babelscan.file_loader(rsmap)\nprint(scan)\nprint('\\nI10 Nexus file:')\nscan = babelscan.file_loader(i10_file)\nprint(scan)\nprint('\\nI06 .dat file:')\nscan = babelscan.file_loader(i06_file, scan_command_name='command')\nprint(scan)\n\n\nprint('\\n\\n############ Missing count_time Tests ##############')\nscan = babelscan.file_loader(file, debug='all')\nscan.add2namespace(['count_time', 'counttime', 'Time', 't'], None, 'count_time')\nprint(scan)\nprint('\\n\\n')\nprint(scan('count_time'))\nprint('\\n\\n')\nprint(scan('nroi[31,31]'))\n\n\nprint('\\n\\n############### FolderMonitor Tests ################')\nexp = babelscan.FolderMonitor(datadir)\nscan = exp.scan(0)\nprint(scan)\n\n\nprint('\\n\\n##################### Plot Tests ###################')\nscan = exp.scan(794940)\nx, y, dy, xlab, ylab = scan.get_plot_data('axes', 'nroi_peak[31,31]', '/count_time/Transmission', 'np.sqrt(x+0.1)')\n\nplt.figure()\nplt.errorbar(x, y, dy, fmt='-o')\nplt.xlabel(xlab)\nplt.ylabel(ylab)\nplt.title(scan.title())\n\nscan.plot.plot_image('sum', clim=[0, 100])\nplt.show()\n\nprint('\\n\\n##################### Fit Tests ###################')\nscan = exp(877619) # merlin\nscan.fit('axes', 'nroi_peak[31, 31]')\nscan.plot('axes', ['nroi_peak[31, 31]', 'fit'])\nprint(scan.string('amplitude'))\n\nscan = exp.scan(794940) # multipeak\nscan.fit.multi_peak_fit(npeaks=2)\nscan.plot('axes', ['signal', 'fit', 'p1_fit', 'p2_fit', 'bkg_fit'])\nplt.show()\n\nprint('\\n\\n################# MultiScan Tests ##################')\nscan_range = range(794932, 794947, 1) # datadir, sperp, spara, eta scans\nscans = exp.scans(scan_range, ['sperp', 'spara'])\nprint(scans)\n\n\nprint('\\n\\n################### Volume Tests ###################')\nscan = babelscan.file_loader(im_file)\nvolume = scan.volume()\nprint('%r, %s' % (scan, scan.find_image()))\nprint(volume)\nprint(np.max(volume))\nprint(volume.peak_search())\n\nscan1 = babelscan.file_loader(dat_file)\nvolume1 = scan1.volume()\nprint('\\n%r' % scan1)\nprint(volume1)\nprint(np.max(volume1))\nprint(volume1.peak_search())\n\nscan2 = babelscan.file_loader(file)\nvolume2 = scan2.volume()\nprint('\\n%r, %s' % (scan2, scan2.find_image()))\nprint(volume2)\nprint(np.max(volume2))\nprint(volume2.peak_search())\n\nscan3 = babelscan.file_loader(rsmap)\nvolume3 = scan3.volume()\nprint('\\n%r, %s' % (scan3, scan3.find_image()))\nprint(volume3)\nprint(np.max(volume3))\nprint(volume3.peak_search())\n\n# Volume plot\nvolume2.plot()\nam = np.array(volume2.argmax())\nprint('Volume argmax:', am, am - (10, 10, 10), am + (10, 10, 10))\nfrom babelscan.plotting_matplotlib import create_axes, labels\nax = create_axes()\nvolume2.plot.cut(am-(10,10,10), am+(10,10,10), axes=ax)\nlabels('Volume', 'pixels', 'value', legend=True, axes=ax)\nplt.show()\n\n\nprint('\\n\\n#################### Time Tests ####################')\nallscan = exp.allscannumbers()\nfor scn in allscan:\n scan = exp.scan(scn)\n scan.options(start_time_name=['start_time', 'TimeSec'], end_time_name=['end_time', 'TimeSec'])\n scan.add2namespace(['counttime', 'Time', 't'], other_names='count_time', default_value=0)\n start_time = scan.time_start()\n duration = scan.duration()\n print('#%s start: %s, duration: %s' % (scn, start_time, duration))\n\n\nprint('\\n\\n#################### .dat Tests ####################')\nexp.set_format('%d.dat')\nallscan = exp.allscannumbers()\nfor scn in allscan:\n scan = exp.scan(scn)\n scan.options(start_time_name=['start_time', 'TimeSec'], end_time_name=['end_time', 'TimeSec'])\n scan.add2namespace(['counttime', 'Time', 't'], other_names='count_time', default_value=0)\n start_time = scan.time_start()\n duration = scan.duration()\n print(scan)\n print('#%s start: %s, duration: %s' % (scn, start_time, duration))\n\n\nprint('\\n\\n########## More FolderMonitor Tests ################')\nexp = babelscan.FolderMonitor(datadir)\n# Add options\nexp.options(\n str_list=['scan_number', 'scan_command', 'axes', 'signal', 'start_time', 'end_time', 'count_time'],\n start_time_name=['start_time', 'TimeSec'],\n end_time_name=['end_time', 'TimeSec'],\n names={'count_time': ['Time', 'counttime', 't']},\n defaults={'count_time': 0, 'start_time': None, 'end_time': None}\n)\nallfiles = exp.allscanfiles()\nfor f in allfiles:\n print(exp.scan(f))\n",
"\"\"\"\nFitting functions using lmfit\n\nSee: https://lmfit.github.io/lmfit-py/builtin_models.html\n\nUse of peakfit:\nfrom fitting import peakfit\nfit = peakfit(xdata, ydata) # returns lmfit object\nprint(fit)\nfit.plot()\n\"\"\"\n\nimport numpy as np\nfrom lmfit.models import GaussianModel, LorentzianModel, VoigtModel, PseudoVoigtModel, LinearModel, ExponentialModel\n\n# https://lmfit.github.io/lmfit-py/builtin_models.html#peak-like-models\nMODELS = {\n 'gaussian': GaussianModel,\n 'lorentz': LorentzianModel,\n 'voight': VoigtModel,\n 'pvoight': PseudoVoigtModel,\n 'linear': LinearModel,\n 'exponential': ExponentialModel\n}\n\nPEAK_MODELS = {\n 'gaussian': ['gaussian', 'gauss'],\n 'voight': ['voight', 'voight model'],\n 'pvoight': ['pseudovoight', 'pvoight'],\n 'lorentz': ['lorentz', 'lorentzian', 'lor'],\n}\n\nBACKGROUND_MODELS = {\n 'linear': ['flat', 'slope', 'linear', 'line', 'straight'],\n 'exponential': ['exponential', 'curve']\n}\n\n# https://lmfit.github.io/lmfit-py/fitting.html#fit-methods-table\nMETHODS = {\n 'leastsq': 'Levenberg-Marquardt',\n 'nelder': 'Nelder-Mead',\n 'lbfgsb': 'L-BFGS-B',\n 'powell': 'Powell',\n 'cg': 'Conjugate Gradient',\n 'newton': 'Newton-CG',\n 'cobyla': 'COBYLA',\n 'bfgsb': 'BFGS',\n 'tnc': 'Truncated Newton',\n 'trust-ncg': 'Newton CG trust-region',\n 'trust-exact': 'Exact trust-region',\n 'trust-krylov': 'Newton GLTR trust-region',\n 'trust-constr': 'Constrained trust-region',\n 'dogleg': 'Dogleg',\n 'slsqp': 'Sequential Linear Squares Programming',\n 'differential_evolution': 'Differential Evolution',\n 'brute': 'Brute force method',\n 'basinhopping': 'Basinhopping',\n 'ampgo': 'Adaptive Memory Programming for Global Optimization',\n 'shgo': 'Simplicial Homology Global Ooptimization',\n 'dual_annealing': 'Dual Annealing',\n 'emcee': 'Maximum likelihood via Monte-Carlo Markov Chain',\n}\n\n\ndef error_func(y):\n \"\"\"Default error function\"\"\"\n return np.sqrt(np.abs(y) + 1)\n\n\ndef peak_ratio(y, yerror=None):\n \"\"\"\n Return the ratio signal / error for given dataset\n From Blessing, J. Appl. Cryst. (1997). 30, 421-426 Equ: (1) + (6)\n peak_ratio = (sum((y-bkg)/dy^2)/sum(1/dy^2)) / sqrt(i/sum(1/dy^2))\n :param y: array of y data\n :param yerror: array of errors on data, or None to calcualte np.sqrt(y+0.001)\n :return: float ratio signal / err\n \"\"\"\n if yerror is None:\n yerror = error_func(y)\n bkg = np.min(y)\n wi = 1 / yerror ** 2\n signal = np.sum(wi * (y - bkg)) / np.sum(wi)\n err = np.sqrt(len(y) / np.sum(wi))\n return signal / err\n\n\ndef gen_weights(yerrors=None):\n \"\"\"\n Generate weights for fitting routines\n :param yerrors: array(n) or None\n :return: array(n) or None\n \"\"\"\n if yerrors is None or np.all(np.abs(yerrors) < 0.001):\n weights = None\n else:\n yerrors = np.asarray(yerrors, dtype=float)\n yerrors[yerrors < 1] = 1.0\n weights = 1 / yerrors\n weights = np.abs(np.nan_to_num(weights))\n return weights\n\n\ndef gauss(x, y=None, height=1, cen=0, fwhm=0.5, bkg=0):\n \"\"\"\n Define Gaussian distribution in 1 or 2 dimensions\n From http://fityk.nieto.pl/model.html\n x = [1xn] array of values, defines size of gaussian in dimension 1\n y = None* or [1xm] array of values, defines size of gaussian in dimension 2\n height = peak height\n cen = peak centre\n fwhm = peak full width at half-max\n bkg = background\n \"\"\"\n\n if y is None:\n y = cen\n\n x = np.asarray(x, dtype=np.float).reshape([-1])\n y = np.asarray(y, dtype=np.float).reshape([-1])\n X, Y = np.meshgrid(x, y)\n g = height * np.exp(-np.log(2) * (((X - cen) ** 2 + (Y - cen) ** 2) / (fwhm / 2) ** 2)) + bkg\n\n if len(y) == 1:\n g = g.reshape([-1])\n return g\n\n\ndef group_adjacent(values, close=10):\n \"\"\"\n Average adjacent values in array, return grouped array and indexes to return groups to original array\n E.G.\n grp, idx = group_adjacent([1,2,3,10,12,31], close=3)\n grp -> [2, 11, 31]\n idx -> [[0,1,2], [3,4], [5]]\n\n :param values: array of values to be grouped\n :param close: float\n :return grouped_values: float array(n) of grouped values\n :return indexes: [n] list of lists, each item relates to an averaged group, with indexes from values\n \"\"\"\n # Check distance between good peaks\n dist_chk = []\n dist_idx = []\n gx = 0\n dist = [values[gx]]\n idx = [gx]\n while gx < len(values) - 1:\n gx += 1\n if (values[gx] - values[gx - 1]) < close:\n dist += [values[gx]]\n idx += [gx]\n # print('Close %2d %2d %2d %s' % (gx, indexes[gx], indexes[gx-1], dist))\n else:\n dist_chk += [np.mean(dist)]\n dist_idx += [idx]\n dist = [values[gx]]\n idx = [gx]\n # print('Next %2d %2d %2d %s' % (gx, indexes[gx], indexes[gx-1], dist_chk))\n dist_chk += [np.mean(dist)]\n dist_idx += [idx]\n # print('Last %2d %2d %2d %s' % (gx, indexes[gx], indexes[gx-1], dist_chk))\n return np.array(dist_chk), dist_idx\n\n\ndef local_maxima_1d(y):\n \"\"\"\n Find local maxima in 1d array\n Returns points with central point higher than neighboring points\n Copied from scipy.signal._peak_finding_utils\n https://github.com/scipy/scipy/blob/v1.7.1/scipy/signal/_peak_finding_utils.pyx\n :param y: list or array\n :return: array of peak indexes\n \"\"\"\n y = np.asarray(y, dtype=float).reshape(-1)\n\n # Preallocate, there can't be more maxima than half the size of `y`\n midpoints = np.empty(y.shape[0] // 2, dtype=np.intp)\n m = 0 # Pointer to the end of valid area in allocated arrays\n i = 1 # Pointer to current sample, first one can't be maxima\n i_max = y.shape[0] - 1 # Last sample can't be maxima\n while i < i_max:\n # Test if previous sample is smaller\n if y[i - 1] < y[i]:\n i_ahead = i + 1 # Index to look ahead of current sample\n\n # Find next sample that is unequal to x[i]\n while i_ahead < i_max and y[i_ahead] == y[i]:\n i_ahead += 1\n\n # Maxima is found if next unequal sample is smaller than x[i]\n if y[i_ahead] < y[i]:\n left_edge = i\n right_edge = i_ahead - 1\n midpoints[m] = (left_edge + right_edge) // 2\n m += 1\n # Skip samples that can't be maximum\n i = i_ahead\n i += 1\n return midpoints[:m]\n\n\ndef find_local_maxima(y, yerror=None):\n \"\"\"\n Find local maxima in 1d arrays, returns index of local maximums, plus\n estimation of the peak power for each maxima and a classification of whether the maxima is greater than\n the standard deviation of the error\n E.G.\n index, power, isgood = find_local_maxima(ydata)\n maxima = ydata[index[isgood]]\n maxima_power = power[isgood]\n Peak Power:\n peak power for each maxima is calculated using the peak_ratio algorithm for each maxima and adjacent points\n Good Peaks:\n Maxima are returned Good if: power > (max(y) - min(y)) / std(yerror)\n :param y: array(n) of data\n :param yerror: array(n) of errors on data, or None to use default error function (sqrt(abs(y)+1))\n :return index: array(m<n) of indexes in y of maxima\n :return power: array(m) of estimated peak power for each maxima\n :return isgood: bool array(m) where True elements have power > power of the array\n \"\"\"\n\n if yerror is None or np.all(np.abs(yerror) < 0.1):\n yerror = error_func(y)\n yerror[yerror < 1] = 1.0\n bkg = np.min(y)\n wi = 1 / yerror ** 2\n\n index = local_maxima_1d(y)\n # average nearest 3 points to peak\n power = np.array([np.sum(wi[m-1:m+2] * (y[m-1:m+2] - bkg)) / np.sum(wi[m-1:m+2]) for m in index])\n # Determine if peak is good\n isgood = power > (np.max(y) - np.min(y)) / (np.std(yerror) + 1)\n return index, power, isgood\n\n\ndef find_peaks(y, yerror=None, min_peak_power=None, peak_distance_idx=6):\n \"\"\"\n Find peak shaps in linear-spaced 1d arrays with poisson like numerical values\n E.G.\n index, power = find_peaks(ydata, yerror, min_peak_power=None, peak_distance_idx=10)\n peak_centres = xdata[index] # ordered by peak strength\n :param y: array(n) of data\n :param yerror: array(n) of errors on data, or None to use default error function (sqrt(abs(y)+1))\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :return index: array(m) of indexes in y of peaks that satisfy conditions\n :return power: array(m) of estimated power of each peak\n \"\"\"\n # Get all peak positions\n midpoints, peak_signals, chk = find_local_maxima(y, yerror)\n\n if min_peak_power is None:\n good_peaks = chk\n else:\n good_peaks = peak_signals >= min_peak_power\n\n # select indexes of good peaks\n peaks_idx = midpoints[good_peaks]\n peak_power = peak_signals[good_peaks]\n if len(peaks_idx) == 0:\n return peaks_idx, peak_power\n\n # Average peaks close to each other\n group_idx, group_signal_idx = group_adjacent(peaks_idx, peak_distance_idx)\n peaks_idx = np.round(group_idx).astype(int)\n peak_power = np.array([np.sum(peak_power[ii]) for ii in group_signal_idx])\n\n # sort peak order by strength\n power_sort = np.argsort(peak_power)\n return peaks_idx[power_sort], peak_power[power_sort]\n\n\ndef peak_results(res):\n \"\"\"\n Generate totals dict\n :param res: lmfit_result\n :return: {totals: (value, error)}\n \"\"\"\n peak_prefx = [mod.prefix for mod in res.components if 'bkg' not in mod.prefix]\n npeaks = len(peak_prefx)\n nn = 1 / len(peak_prefx) if len(peak_prefx) > 0 else 1\n comps = res.eval_components()\n fit_dict = {\n 'lmfit': res,\n 'npeaks': npeaks,\n 'chisqr': res.chisqr,\n 'xdata': res.userkws['x'],\n 'ydata': res.data,\n 'weights': res.weights,\n 'yerror': 1 / res.weights if res.weights is not None else 0 * res.data,\n 'yfit': res.best_fit,\n }\n for comp_prefx, comp in comps.items():\n fit_dict['%sfit' % comp_prefx] = comp\n for pname, param in res.params.items():\n ename = 'stderr_' + pname\n fit_dict[pname] = param.value\n fit_dict[ename] = param.stderr\n totals = {\n 'amplitude': np.sum([res.params['%samplitude' % pfx].value for pfx in peak_prefx]),\n 'center': np.mean([res.params['%scenter' % pfx].value for pfx in peak_prefx]),\n 'sigma': np.mean([res.params['%ssigma' % pfx].value for pfx in peak_prefx]),\n 'height': np.mean([res.params['%sheight' % pfx].value for pfx in peak_prefx]),\n 'fwhm': np.mean([res.params['%sfwhm' % pfx].value for pfx in peak_prefx]),\n 'background': np.mean(comps['bkg_']),\n 'stderr_amplitude': np.sqrt(np.sum([res.params['%samplitude' % pfx].stderr ** 2 for pfx in peak_prefx])),\n 'stderr_center': np.sqrt(np.sum([res.params['%scenter' % pfx].stderr ** 2 for pfx in peak_prefx])) * nn,\n 'stderr_sigma': np.sqrt(np.sum([res.params['%ssigma' % pfx].stderr ** 2 for pfx in peak_prefx])) * nn,\n 'stderr_height': np.sqrt(np.sum([res.params['%sheight' % pfx].stderr ** 2 for pfx in peak_prefx])) * nn,\n 'stderr_fwhm': np.sqrt(np.sum([res.params['%sfwhm' % pfx].stderr ** 2 for pfx in peak_prefx])) * nn,\n }\n fit_dict.update(totals)\n return fit_dict\n\n\ndef modelfit(xvals, yvals, yerrors=None, model=None, initial_parameters=None, fix_parameters=None,\n method='leastsq', print_result=False, plot_result=False):\n \"\"\"\n Fit x,y data to a model from lmfit\n E.G.:\n res = peakfit(x, y, model='Gauss')\n print(res.fit_report())\n res.plot()\n val = res.params['amplitude'].value\n err = res.params['amplitude'].stderr\n\n Model:\n from lmfit import models\n model1 = model.GaussianModel()\n model2 = model.LinearModel()\n model = model1 + model2\n res = model.fit(y, x=x)\n\n Provide initial guess:\n res = modelfit(x, y, model=VoightModel(), initial_parameters={'center':1.23})\n\n Fix parameter:\n res = modelfit(x, y, model=VoightModel(), fix_parameters={'sigma': fwhm/2.3548200})\n\n :param xvals: array(n) position data\n :param yvals: array(n) intensity data\n :param yerrors: None or array(n) - error data to pass to fitting function as weights: 1/errors^2\n :param model: lmfit.Model\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param method: str method name, from lmfit fitting methods\n :param print_result: if True, prints the fit results using fit.fit_report()\n :param plot_result: if True, plots the results using fit.plot()\n :return: lmfit.model.ModelResult < fit results object\n \"\"\"\n\n xvals = np.asarray(xvals, dtype=float).reshape(-1)\n yvals = np.asarray(yvals, dtype=float).reshape(-1)\n weights = gen_weights(yerrors)\n\n if initial_parameters is None:\n initial_parameters = {}\n if fix_parameters is None:\n fix_parameters = {}\n\n if model is None:\n model = GaussianModel() + LinearModel()\n\n pars = model.make_params()\n\n # user input parameters\n for ipar, ival in initial_parameters.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=True)\n for ipar, ival in fix_parameters.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=False)\n\n res = model.fit(yvals, pars, x=xvals, weights=weights, method=method)\n\n if print_result:\n print(res.fit_report())\n if plot_result:\n res.plot()\n return res\n\n\ndef peakfit(xvals, yvals, yerrors=None, model='Voight', background='slope',\n initial_parameters=None, fix_parameters=None, method='leastsq', print_result=False, plot_result=False):\n \"\"\"\n Fit x,y data to a peak model using lmfit\n E.G.:\n res = peakfit(x, y, model='Gauss')\n print(res.fit_report())\n res.plot()\n val = res.params['amplitude'].value\n err = res.params['amplitude'].stderr\n\n Peak Models:\n Choice of peak model: 'Gaussian', 'Lorentzian', 'Voight',' PseudoVoight'\n Background Models:\n Choice of background model: 'slope', 'exponential'\n\n Peak Parameters:\n 'amplitude', 'center', 'sigma', pvoight only: 'fraction'\n output only: 'fwhm', 'height'\n Background parameters:\n 'bkg_slope', 'bkg_intercept', or for exponential: 'bkg_amplitude', 'bkg_decay'\n\n Provide initial guess:\n res = peakfit(x, y, model='Voight', initial_parameters={'center':1.23})\n\n Fix parameter:\n res = peakfit(x, y, model='gauss', fix_parameters={'sigma': fwhm/2.3548200})\n\n :param xvals: array(n) position data\n :param yvals: array(n) intensity data\n :param yerrors: None or array(n) - error data to pass to fitting function as weights: 1/errors^2\n :param model: str, specify the peak model: 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param method: str method name, from lmfit fitting methods\n :param print_result: if True, prints the fit results using fit.fit_report()\n :param plot_result: if True, plots the results using fit.plot()\n :return: lmfit.model.ModelResult < fit results object\n \"\"\"\n\n xvals = np.asarray(xvals, dtype=float).reshape(-1)\n yvals = np.asarray(yvals, dtype=float).reshape(-1)\n weights = gen_weights(yerrors)\n\n if initial_parameters is None:\n initial_parameters = {}\n if fix_parameters is None:\n fix_parameters = {}\n\n peak_mod = None\n bkg_mod = None\n for model_name, names in PEAK_MODELS.items():\n if model.lower() in names:\n peak_mod = MODELS[model_name]()\n for model_name, names in BACKGROUND_MODELS.items():\n if background.lower() in names:\n bkg_mod = MODELS[model_name](prefix='bkg_')\n\n pars = peak_mod.guess(yvals, x=xvals)\n pars += bkg_mod.make_params()\n # pars += bkg_mod.make_params(intercept=np.min(yvals), slope=0)\n # pars['gamma'].set(value=0.7, vary=True, expr='') # don't fix gamma\n\n # user input parameters\n for ipar, ival in initial_parameters.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=True)\n for ipar, ival in fix_parameters.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=False)\n\n mod = peak_mod + bkg_mod\n res = mod.fit(yvals, pars, x=xvals, weights=weights, method=method)\n\n if print_result:\n print(res.fit_report())\n if plot_result:\n res.plot()\n return res\n\n\ndef peak2dfit(xdata, ydata, image_data, initial_parameters=None, fix_parameters=None,\n print_result=False, plot_result=False):\n \"\"\"\n Fit Gaussian Peak in 2D\n *** requires lmfit > 1.0.3 ***\n Not yet finished!\n :param xdata:\n :param ydata:\n :param image_data:\n :param initial_parameters:\n :param fix_parameters:\n :param print_result:\n :param plot_result:\n :return:\n \"\"\"\n from lmfit.models import Gaussian2dModel # lmfit V1.0.3+\n print('Not yet finished...')\n pass\n\n\ndef generate_model(xvals, yvals, yerrors=None,\n npeaks=None, min_peak_power=None, peak_distance_idx=6,\n model='Gaussian', background='slope', initial_parameters=None, fix_parameters=None):\n \"\"\"\n Generate lmfit profile models\n See: https://lmfit.github.io/lmfit-py/builtin_models.html#example-3-fitting-multiple-peaks-and-using-prefixes\n E.G.:\n mod, pars = generate_model(x, y, npeaks=1, model='Gauss', backgroud='slope')\n\n Peak Search:\n The number of peaks and initial peak centers will be estimated using the find_peaks function. If npeaks is given,\n the largest npeaks will be used initially. 'min_peak_power' and 'peak_distance_idx' can be input to tailor the\n peak search results.\n If the peak search returns < npeaks, fitting parameters will initially choose npeaks equally distributed points\n\n Peak Models:\n Choice of peak model: 'Gaussian', 'Lorentzian', 'Voight',' PseudoVoight'\n Background Models:\n Choice of background model: 'slope', 'exponential'\n\n :param xvals: array(n) position data\n :param yvals: array(n) intensity data\n :param yerrors: None or array(n) - error data to pass to fitting function as weights: 1/errors^2\n :param npeaks: None or int number of peaks to fit. None will guess the number of peaks\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :param model: str or lmfit.Model, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :return: lmfit.model.ModelResult < fit results object\n \"\"\"\n xvals = np.asarray(xvals, dtype=float).reshape(-1)\n yvals = np.asarray(yvals, dtype=float).reshape(-1)\n\n # Find peaks\n peak_idx, peak_pow = find_peaks(yvals, yerrors, min_peak_power, peak_distance_idx)\n peak_centers = {'p%d_center' % (n+1): xvals[peak_idx[n]] for n in range(len(peak_idx))}\n if npeaks is None:\n npeaks = len(peak_centers)\n\n if initial_parameters is None:\n initial_parameters = {}\n if fix_parameters is None:\n fix_parameters = {}\n\n peak_mod = None\n bkg_mod = None\n for model_name, names in PEAK_MODELS.items():\n if model.lower() in names:\n peak_mod = MODELS[model_name]\n for model_name, names in BACKGROUND_MODELS.items():\n if background.lower() in names:\n bkg_mod = MODELS[model_name]\n\n mod = bkg_mod(prefix='bkg_')\n for n in range(npeaks):\n mod += peak_mod(prefix='p%d_' % (n+1))\n\n pars = mod.make_params()\n\n # initial parameters\n min_wid = np.mean(np.diff(xvals))\n max_wid = xvals.max() - xvals.min()\n area = (yvals.max() - yvals.min()) * (3 * min_wid)\n percentile = np.linspace(0, 100, npeaks + 2)\n for n in range(1, npeaks+1):\n pars['p%d_amplitude' % n].set(value=area/npeaks, min=0)\n pars['p%d_sigma' % n].set(value=3*min_wid, min=min_wid, max=max_wid)\n pars['p%d_center' % n].set(value=np.percentile(xvals, percentile[n]), min=xvals.min(), max=xvals.max())\n # find_peak centers\n for ipar, ival in peak_centers.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=True)\n # user input parameters\n for ipar, ival in initial_parameters.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=True)\n for ipar, ival in fix_parameters.items():\n if ipar in pars:\n pars[ipar].set(value=ival, vary=False)\n return mod, pars\n\n\ndef generate_model_script(xvals, yvals, yerrors=None,\n npeaks=None, min_peak_power=None, peak_distance_idx=6,\n model='Gaussian', background='slope', initial_parameters=None, fix_parameters=None,\n include_babelscan=True):\n \"\"\"\n Generate script to create lmfit profile models\n E.G.:\n string = generate_mode_stringl(x, y, npeaks=1, model='Gauss', backgroud='slope')\n\n :param xvals: array(n) position data\n :param yvals: array(n) intensity data\n :param yerrors: None or array(n) - error data to pass to fitting function as weights: 1/errors^2\n :param npeaks: None or int number of peaks to fit. None will guess the number of peaks\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :param model: str or lmfit.Model, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param include_babelscan: if False, only include lmfit imports\n :return: str\n \"\"\"\n\n data = \"xdata = np.array(%s)\\n\" % list(xvals)\n data += \"ydata = np.array(%s)\\n\" % list(yvals)\n if yerrors is None or np.all(np.abs(yerrors) < 0.001):\n data += 'yerrors = None\\n'\n data += 'weights = None\\n\\n'\n else:\n data += \"yerrors = np.array(%s)\\n\" % list(yerrors)\n data += \"yerrors[yerrors < 1] = 1.0\\n\"\n data += \"weights = 1 / yerrors\\n\\n\"\n\n if initial_parameters is None:\n initial_parameters = {}\n if fix_parameters is None:\n fix_parameters = {}\n params = \"initial = %s\\nfixed = %s\\n\" % (initial_parameters, fix_parameters)\n\n if include_babelscan:\n out = \"import numpy as np\\nfrom babelscan import fitting\\n\\n\"\n out += data\n out += '%s\\n' % params\n out += \"mod, pars = fitting.generate_model(xdata, ydata, yerrors,\\n\" \\\n \" npeaks=%s, min_peak_power=%s, peak_distance_idx=%s,\\n\" \\\n \" model='%s', background='%s',\\n\" \\\n \" initial_parameters=initial, fix_parameters=fixed)\\n\" % (\n npeaks, min_peak_power, peak_distance_idx, model, background\n )\n else:\n # Find peaks\n peak_idx, peak_pow = find_peaks(yvals, yerrors, min_peak_power, peak_distance_idx)\n peak_centers = {'p%d_center' % (n + 1): xvals[peak_idx[n]] for n in range(len(peak_idx))}\n for model_name, names in PEAK_MODELS.items():\n if model.lower() in names:\n peak_mod = MODELS[model_name]\n for model_name, names in BACKGROUND_MODELS.items():\n if background.lower() in names:\n bkg_mod = MODELS[model_name]\n peak_name = peak_mod.__name__\n bkg_name = bkg_mod.__name__\n\n out = \"import numpy as np\\nfrom lmfit import models\\n\\n\"\n out += data\n out += \"%speak_centers = %s\\n\\n\" % (params, peak_centers)\n out += \"mod = models.%s(prefix='bkg_')\\n\" % bkg_name\n out += \"for n in range(len(peak_centers)):\\n mod += models.%s(prefix='p%%d_' %% (n+1))\\n\" % peak_name\n out += \"pars = mod.make_params()\\n\\n\"\n out += \"# initial parameters\\n\"\n out += \"min_wid = np.mean(np.diff(xdata))\\n\"\n out += \"max_wid = xdata.max() - xdata.min()\\n\"\n out += \"area = (ydata.max() - ydata.min()) * (3 * min_wid)\\n\"\n out += \"for n in range(1, len(peak_centers)+1):\\n\"\n out += \" pars['p%d_amplitude' % n].set(value=area/len(peak_centers), min=0)\\n\"\n out += \" pars['p%d_sigma' % n].set(value=3*min_wid, min=min_wid, max=max_wid)\\n\"\n out += \"# find_peak centers\\n\"\n out += \"for ipar, ival in peak_centers.items():\\n\"\n out += \" if ipar in pars:\\n\"\n out += \" pars[ipar].set(value=ival, vary=True)\\n\"\n out += \"# user input parameters\\n\"\n out += \"for ipar, ival in initial.items():\\n\"\n out += \" if ipar in pars:\\n\"\n out += \" pars[ipar].set(value=ival, vary=True)\\n\"\n out += \"for ipar, ival in fixed.items():\\n\"\n out += \" if ipar in pars:\\n\"\n out += \" pars[ipar].set(value=ival, vary=False)\\n\\n\"\n out += \"# Fit data\\n\"\n out += \"res = mod.fit(ydata, pars, x=xdata, weights=weights, method='leastsqr')\\n\"\n out += \"print(res.fit_report())\\n\\n\"\n out += \"fig, grid = res.plot()\\n\"\n out += \"ax1, ax2 = fig.axes\\n\"\n out += \"comps = res.eval_components()\\n\"\n out += \"for component in comps.keys():\\n\"\n out += \" ax2.plot(xdata, comps[component], label=component)\\n\"\n out += \" ax2.legend()\\n\\n\"\n return out\n\n\ndef multipeakfit(xvals, yvals, yerrors=None,\n npeaks=None, min_peak_power=None, peak_distance_idx=10,\n model='Gaussian', background='slope', initial_parameters=None, fix_parameters=None, method='leastsq',\n print_result=False, plot_result=False):\n \"\"\"\n Fit x,y data to a model with multiple peaks using lmfit\n See: https://lmfit.github.io/lmfit-py/builtin_models.html#example-3-fitting-multiple-peaks-and-using-prefixes\n E.G.:\n res = multipeakfit(x, y, npeaks=None, model='Gauss', plot_result=True)\n val = res.params['p1_amplitude'].value\n err = res.params['p1_amplitude'].stderr\n\n Peak Search:\n The number of peaks and initial peak centers will be estimated using the find_peaks function. If npeaks is given,\n the largest npeaks will be used initially. 'min_peak_power' and 'peak_distance_idx' can be input to tailor the\n peak search results.\n If the peak search returns < npeaks, fitting parameters will initially choose npeaks equally distributed points\n\n Peak Models:\n Choice of peak model: 'Gaussian', 'Lorentzian', 'Voight',' PseudoVoight'\n Background Models:\n Choice of background model: 'slope', 'exponential'\n\n Peak Parameters (%d=number of peak):\n Parameters in '.._parameters' dicts and in output results. Each peak (upto npeaks) has a set number of parameters:\n 'p%d_amplitude', 'p%d_center', 'p%d_dsigma', pvoight only: 'p%d_fraction'\n output only: 'p%d_fwhm', 'p%d_height'\n Background parameters:\n 'bkg_slope', 'bkg_intercept', or for exponential: 'bkg_amplitude', 'bkg_decay'\n\n Provide initial guess:\n res = multipeakfit(x, y, model='Voight', initial_parameters={'p1_center':1.23})\n\n Fix parameter:\n res = multipeakfit(x, y, model='gauss', fix_parameters={'p1_sigma': fwhm/2.3548200})\n\n :param xvals: array(n) position data\n :param yvals: array(n) intensity data\n :param yerrors: None or array(n) - error data to pass to fitting function as weights: 1/errors^2\n :param npeaks: None or int number of peaks to fit. None will guess the number of peaks\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :param model: str or lmfit.Model, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param method: str method name, from lmfit fitting methods\n :param print_result: if True, prints the fit results using fit.fit_report()\n :param plot_result: if True, plots the results using fit.plot()\n :return: lmfit.model.ModelResult < fit results object\n \"\"\"\n xvals = np.asarray(xvals, dtype=float).reshape(-1)\n yvals = np.asarray(yvals, dtype=float).reshape(-1)\n weights = gen_weights(yerrors)\n\n mod, pars = generate_model(xvals, yvals, yerrors,\n npeaks=npeaks, min_peak_power=min_peak_power, peak_distance_idx=peak_distance_idx,\n model=model, background=background,\n initial_parameters=initial_parameters, fix_parameters=fix_parameters)\n\n # Fit data against model using choosen method\n res = mod.fit(yvals, pars, x=xvals, weights=weights, method=method)\n\n if print_result:\n print(res.fit_report())\n if plot_result:\n fig, grid = res.plot()\n ax1, ax2 = fig.axes\n # Add peak components\n comps = res.eval_components(x=xvals)\n for component in comps.keys():\n ax2.plot(xvals, comps[component], label=component)\n ax2.legend()\n return res\n\n\n\"----------------------------------------------------------------------------------------------------------------------\"\n\"------------------------------------------------ ScanFitManager ------------------------------------------------------\"\n\"----------------------------------------------------------------------------------------------------------------------\"\n\n\nclass ScanFitManager:\n \"\"\"\n ScanFitManager\n Holds several functions for automatically fitting scan data\n\n fit = ScanFitManager(scan)\n fit.peak_ratio(yaxis) # calculates peak power\n fit.find_peaks(xaxis, yaxis) # automated peak finding routine\n fit.fit(xaxis, yaxis) # estimate & fit data against a peak profile model using lmfit\n fit.multi_peak_fit(xaxis, yaxis) # find peaks & fit multiprofile model using lmfit\n fit.model_fit(xaxis, yaxis, model, pars) # fit supplied model against data\n fit.fit_results() # return lmfit.ModelResult for last fit\n fit.fit_values() # return dict of fit values for last fit\n fit.fit_report() # return str of fit report\n fit.plot() # plot last lmfit results\n * xaxis, yaxis are str names of arrays in the scan namespace\n\n :param scan: babelscan.Scan\n \"\"\"\n\n def __init__(self, scan):\n self.scan = scan\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calls ScanFitManager.fit(...)\"\"\"\n return self.fit(*args, **kwargs)\n\n def __str__(self):\n return self.fit_report()\n\n def peak_ratio(self, yaxis='signal'):\n \"\"\"\n Return the ratio signal / error for given dataset\n From Blessing, J. Appl. Cryst. (1997). 30, 421-426 Equ: (1) + (6)\n peak_ratio = (sum((y-bkg)/dy^2)/sum(1/dy^2)) / sqrt(i/sum(1/dy^2))\n :param yaxis: str name or address of array to plot on y axis\n :return: float ratio signal / err\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data('axes', yaxis, None, None)\n return peak_ratio(ydata, yerror)\n\n def find_peaks(self, xaxis='axes', yaxis='signal', min_peak_power=None, peak_distance_idx=6):\n \"\"\"\n Find peak shaps in linear-spaced 1d arrays with poisson like numerical values\n E.G.\n centres, index, power = self.find_peaks(xaxis, yaxis, min_peak_power=None, peak_distance_idx=10)\n :param xaxis: str name or address of array to plot on x axis\n :param yaxis: str name or address of array to plot on y axis\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :return centres: array(m) of peak centers in x, equiv. to xdata[index]\n :return index: array(m) of indexes in y of peaks that satisfy conditions\n :return power: array(m) of estimated power of each peak\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)\n index, power = find_peaks(ydata, yerror, min_peak_power, peak_distance_idx)\n return xdata[index], index, power\n\n def fit(self, xaxis='axes', yaxis='signal', model='Gaussian', background='slope',\n initial_parameters=None, fix_parameters=None, method='leastsq', print_result=False, plot_result=False):\n \"\"\"\n Fit x,y data to a peak model using lmfit\n E.G.:\n res = self.fit('axes', 'signal', model='Gauss')\n print(res.fit_report())\n res.plot()\n val = res.params['amplitude'].value\n err = res.params['amplitude'].stderr\n\n Peak Models:\n Choice of peak model: 'Gaussian', 'Lorentzian', 'Voight',' PseudoVoight'\n Background Models:\n Choice of background model: 'slope', 'exponential'\n\n Peak Parameters (%d=number of peak):\n 'amplitude', 'center', 'sigma', pvoight only: 'fraction'\n output only: 'fwhm', 'height'\n Background parameters:\n 'bkg_slope', 'bkg_intercept', or for exponential: 'bkg_amplitude', 'bkg_decay'\n\n Provide initial guess:\n res = self.fit(x, y, model='Voight', initial_parameters={'p1_center':1.23})\n\n Fix parameter:\n res = self.fit(x, y, model='gauss', fix_parameters={'p1_sigma': fwhm/2.3548200})\n\n :param xaxis: str name or address of array to plot on x axis\n :param yaxis: str name or address of array to plot on y axis\n :param model: str, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param method: str method name, from lmfit fitting methods\n :param print_result: if True, prints the fit results using fit.fit_report()\n :param plot_result: if True, plots the results using fit.plot()\n :return: lmfit.model.ModelResult < fit results object\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)\n\n # lmfit\n res = peakfit(xdata, ydata, yerror, model=model, background=background,\n initial_parameters=initial_parameters, fix_parameters=fix_parameters, method=method)\n\n output = peak_results(res)\n self.scan.update_namespace(output)\n self.scan.add2namespace('lmfit', res, 'fit_result')\n self.scan.add2namespace('fit', res.best_fit, other_names=['fit_%s' % yname])\n\n if print_result:\n print(self.scan.title())\n print(res.fit_report())\n if plot_result:\n fig, grid = res.plot()\n fig.suptitle(self.scan.title(), fontsize=12)\n # plt.subplots_adjust(top=0.85, left=0.15)\n ax1, ax2 = fig.axes\n ax1.set_title('')\n ax2.set_xlabel(xname)\n ax2.set_ylabel(yname)\n return res\n\n def multi_peak_fit(self, xaxis='axes', yaxis='signal',\n npeaks=None, min_peak_power=None, peak_distance_idx=6,\n model='Gaussian', background='slope',\n initial_parameters=None, fix_parameters=None, method='leastsq',\n print_result=False, plot_result=False):\n \"\"\"\n Fit x,y data to a peak model using lmfit\n E.G.:\n res = self.multi_peak_fit('axes', 'signal', npeaks=2, model='Gauss')\n print(res.fit_report())\n res.plot()\n val1 = res.params['p1_amplitude'].value\n val2 = res.params['p2_amplitude'].value\n\n Peak centers:\n Will attempt a fit using 'npeaks' peaks, with centers defined by defalult by the find_peaks function\n if 'npeaks' is None, the number of peaks found by find_peaks will determine npeaks\n if 'npeaks' is greater than the number of peaks found by find_peaks, initial peak centers are evenly\n distrubuted along xdata.\n\n Peak Models:\n Choice of peak model: 'Gaussian', 'Lorentzian', 'Voight',' PseudoVoight'\n Background Models:\n Choice of background model: 'slope', 'exponential'\n\n Peak Parameters (%d=number of peak):\n 'p%d_amplitude', 'p%d_center', 'p%d_sigma', pvoight only: 'p%d_fraction'\n output only: 'p%d_fwhm', 'p%d_height'\n Background parameters:\n 'bkg_slope', 'bkg_intercept', or for exponential: 'bkg_amplitude', 'bkg_decay'\n Total parameters (always available, output only - sum/averages of all peaks):\n 'amplitude', 'center', 'sigma', 'fwhm', 'height', 'background'\n\n Provide initial guess:\n res = self.multi_peak_fit(x, y, model='Voight', initial_parameters={'p1_center':1.23})\n\n Fix parameter:\n res = self.multi_peak_fit(x, y, model='gauss', fix_parameters={'p1_sigma': fwhm/2.3548200})\n\n :param xaxis: str name or address of array to plot on x axis\n :param yaxis: str name or address of array to plot on y axis\n :param npeaks: None or int number of peaks to fit. None will guess the number of peaks\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :param model: str, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param method: str method name, from lmfit fitting methods\n :param print_result: if True, prints the fit results using fit.fit_report()\n :param plot_result: if True, plots the results using fit.plot()\n :return: lmfit.model.ModelResult < fit results object\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)\n\n # lmfit\n res = multipeakfit(xdata, ydata, yerror, npeaks=npeaks, min_peak_power=min_peak_power,\n peak_distance_idx=peak_distance_idx, model=model, background=background,\n initial_parameters=initial_parameters, fix_parameters=fix_parameters, method=method)\n\n output = peak_results(res)\n self.scan.update_namespace(output)\n self.scan.add2namespace('lmfit', res, 'fit_result')\n self.scan.add2namespace('fit', res.best_fit, other_names=['fit_%s' % yname])\n\n if print_result:\n print(self.scan.title())\n print(res.fit_report())\n print('Totals:')\n print('\\n'.join(self.scan.string(['amplitude', 'center', 'height', 'sigma', 'fwhm', 'background'])))\n if plot_result:\n fig, grid = res.plot()\n fig.suptitle(self.scan.title(), fontsize=12)\n # plt.subplots_adjust(top=0.85, left=0.15)\n ax1, ax2 = fig.axes\n ax1.set_title('')\n ax2.set_xlabel(xname)\n ax2.set_ylabel(yname)\n comps = res.eval_components()\n for component in comps.keys():\n ax2.plot(xdata, comps[component], label=component)\n ax2.legend()\n return res\n\n def modelfit(self, xaxis='axis', yaxis='signal', model=None, pars=None, method='leastsq',\n print_result=False, plot_result=False):\n \"\"\"\n Fit data from scan against lmfit model\n :param xaxis: str name or address of array to plot on x axis\n :param yaxis: str name or address of array to plot on y axis\n :param model: lmfit.Model - object defining combination of models\n :param pars: lmfit.Parameters - object defining model parameters\n :param method: str name of fitting method to use\n :param print_result: bool, if True, print results.fit_report()\n :param plot_result: bool, if True, generate results.plot()\n :return: lmfit fit results\n\n Example:\n from lmfit.models import GaussianModel, LinearModel\n mod = GaussainModel(prefix='p1_') + LinearModel(prefix='bkg_')\n pars = mod.make_params()\n pars['p1_center'].set(value=np.mean(x), min=x.min(), max=x.max())\n res = scan.fit.modelfit('axis', 'signal', mod, pars)\n print(res.fit_report())\n res.plot()\n area = res.params['p1_amplitude'].value\n err = res.params['p1_amplitude'].stderr\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)\n\n # weights\n if yerror is None or np.all(np.abs(yerror) < 0.001):\n weights = None\n else:\n weights = 1 / np.square(yerror, dtype=float)\n weights = np.nan_to_num(weights)\n\n # Default model, pars\n if model is None:\n model = LinearModel()\n if pars is None:\n pars = model.guess(ydata, x=xdata)\n\n # lmfit\n res = model.fit(ydata, pars, x=xdata, weights=weights, method=method)\n\n self.scan.add2namespace('lmfit', res, 'fit_result')\n self.scan.add2namespace('fit', res.best_fit, other_names=['fit_%s' % yname])\n fit_dict = {}\n for pname, param in res.params.items():\n ename = 'stderr_' + pname\n fit_dict[pname] = param.value\n fit_dict[ename] = param.stderr\n for name, value in fit_dict.items():\n self.scan.add2namespace(name, value)\n # Add peak components\n comps = res.eval_components(x=xdata)\n for component in comps.keys():\n self.scan.add2namespace('%sfit' % component, comps[component])\n\n if print_result:\n print(self.scan.title())\n print(res.fit_report())\n if plot_result:\n fig, grid = res.plot()\n # plt.suptitle(self.title(), fontsize=12)\n # plt.subplots_adjust(top=0.85, left=0.15)\n ax1, ax2 = fig.axes\n ax2.set_xlabel(xname)\n ax2.set_ylabel(yname)\n return res\n\n def gen_model(self, xaxis='axes', yaxis='signal',\n npeaks=None, min_peak_power=None, peak_distance_idx=6,\n model='Gaussian', background='slope',\n initial_parameters=None, fix_parameters=None):\n \"\"\"\n Generate lmfit model and parameters\n :param xaxis: str name or address of array to plot on x axis\n :param yaxis: str name or address of array to plot on y axis\n :param npeaks: None or int number of peaks to fit. None will guess the number of peaks\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :param model: str, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :return: model, pars\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)\n mod, pars = generate_model(xdata, ydata,\n npeaks=npeaks, min_peak_power=min_peak_power, peak_distance_idx=peak_distance_idx,\n model=model, background=background,\n initial_parameters=initial_parameters, fix_parameters=fix_parameters)\n return mod, pars\n\n def gen_model_script(self, xaxis='axes', yaxis='signal',\n npeaks=None, min_peak_power=None, peak_distance_idx=6,\n model='Gaussian', background='slope',\n initial_parameters=None, fix_parameters=None, include_babelscan=True):\n \"\"\"\n Generate script string of\n :param xaxis: str name or address of array to plot on x axis\n :param yaxis: str name or address of array to plot on y axis\n :param npeaks: None or int number of peaks to fit. None will guess the number of peaks\n :param min_peak_power: float, only return peaks with power greater than this. If None compare against std(y)\n :param peak_distance_idx: int, group adjacent maxima if closer in index than this\n :param model: str, specify the peak model 'Gaussian','Lorentzian','Voight'\n :param background: str, specify the background model: 'slope', 'exponential'\n :param initial_parameters: None or dict of initial values for parameters\n :param fix_parameters: None or dict of parameters to fix at positions\n :param include_babelscan: if False, only include imports for lmfit\n :return: str\n \"\"\"\n xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)\n out = generate_model_script(xdata, ydata, yerror,\n npeaks=npeaks, min_peak_power=min_peak_power,\n peak_distance_idx=peak_distance_idx,\n model=model, background=background,\n initial_parameters=initial_parameters, fix_parameters=fix_parameters,\n include_babelscan=include_babelscan)\n return out\n\n def fit_result(self, parameter_name=None):\n \"\"\"\n Returns parameter, error from the last run fit\n :param parameter_name: str, name from last fit e.g. 'amplitude', or None to return lmfit object\n :param\n :return:\n \"\"\"\n if not self.scan.isinnamespace('lmfit'):\n self.fit()\n lmfit = self.scan('lmfit')\n if parameter_name is None:\n return lmfit\n param = lmfit.params[parameter_name]\n return param.value, param.stderr\n\n def fit_values(self, fit_result=None):\n \"\"\"Return dict of values from last fit\"\"\"\n if fit_result is None:\n lmfit = self.fit_result()\n return peak_results(lmfit)\n\n def fit_report(self, fit_result=None):\n \"\"\"Return lmfit.ModelResult.fit_report()\"\"\"\n if fit_result is None:\n lmfit = self.fit_result()\n return lmfit.fit_report()\n\n def plot(self, fit_result=None):\n \"\"\"Plot fit results\"\"\"\n if fit_result is None:\n lmfit = self.fit_result()\n\n fig, grid = lmfit.plot()\n fig.suptitle(self.scan.title(), fontsize=12)\n # plt.subplots_adjust(top=0.85, left=0.15)\n ax1, ax2 = fig.axes\n ax1.set_title('')\n return fig\n\n\n\"----------------------------------------------------------------------------------------------------------------------\"\n\"---------------------------------------------- MultiScanFitManager ---------------------------------------------------\"\n\"----------------------------------------------------------------------------------------------------------------------\"\n\n\nclass MultiScanFitManager:\n \"\"\"\n MultiScanFitManager\n Enables fitting across multiple scans in a multiscan object\n\n fit = MultiScanFitManager(scans)\n fit.fit(xaxis, yaxis) # estimate & fit data against a peak profile model using lmfit\n fit.multi_peak_fit(xaxis, yaxis) # find peaks & fit multiprofile model using lmfit\n fit.model_fit(xaxis, yaxis, model, pars) # fit supplied model against data\n fit.fit_results() # return lmfit.ModelResult for last fit\n fit.fit_values() # return dict of fit values for last fit\n fit.fit_report() # return str of fit report\n fit.plot() # plot last lmfit results\n * xaxis, yaxis are str names of arrays in the scan namespace\n\n :param scan: babelscan.MultiScan\n \"\"\"\n\n def __init__(self, multiscan):\n self.multiscan = multiscan\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calls ScanFitManager.fit(...)\"\"\"\n return self.fit(*args, **kwargs)\n\n def fit(self, xaxis='axes', yaxis='signal', model='Gaussian', background='slope',\n initial_parameters=None, fix_parameters=None, method='leastsq', print_result=False, plot_result=False):\n \"\"\"\n Automatic fitting of scan\n\n Use LMFit\n Pass fit_type = LMFit model\n return LMFit output\n \"\"\"\n out = [\n scan.fit(xaxis, yaxis, model, background, initial_parameters, fix_parameters, method,\n print_result, plot_result)\n for scan in self.multiscan\n ]\n return out\n\n def multi_peak_fit(self, xaxis='axes', yaxis='signal',\n npeaks=None, min_peak_power=None, peak_distance_idx=10,\n model='Gaussian', background='slope',\n initial_parameters=None, fix_parameters=None, method='leastsq',\n print_result=False, plot_result=False):\n \"\"\"\n Automatic fitting of scan\n\n Use LMFit\n Pass fit_type = LMFit model\n return LMFit output\n \"\"\"\n out = [\n scan.fit.multi_peak_fit(xaxis, yaxis,\n npeaks=npeaks, min_peak_power=min_peak_power, peak_distance_idx=peak_distance_idx,\n model=model, background=background, initial_parameters=initial_parameters,\n fix_parameters=fix_parameters, method=method,\n print_result=print_result, plot_result=plot_result)\n for scan in self.multiscan\n ]\n return out\n\n def model_fit(self, xaxis='axis', yaxis='signal', model=None, pars=None, method='leastsq',\n print_result=False, plot_result=False):\n \"\"\"\n Automatic fitting of scan against given lmfit Model\n \"\"\"\n out = [\n scan.fit.model_fit(xaxis, yaxis, model=model, pars=pars, method=method,\n print_result=print_result, plot_result=plot_result)\n for scan in self.multiscan\n ]\n return out\n\n"
] | [
[
"matplotlib.pyplot.figure",
"numpy.max",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.square",
"numpy.log",
"numpy.abs",
"numpy.linspace",
"numpy.min",
"numpy.asarray",
"numpy.nan_to_num",
"numpy.percentile",
"numpy.round",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.diff",
"numpy.argsort",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
li-phone/DetectionCompetition | [
"a917f16790ec30358e3cfe1aa6e327a2070a1235",
"a917f16790ec30358e3cfe1aa6e327a2070a1235"
] | [
"mmdet-v2/tools/third_party/useless/cocoutils/coco_check.py",
"review_code/modelarts_deploy/mmdet/mmdet/models/detectors/base.py"
] | [
"import os\nimport json\nimport cv2 as cv\nimport numpy as np\nfrom tqdm import tqdm\n\ntry:\n from pandas import json_normalize\nexcept:\n from pandas.io.json import json_normalize\n\n\ndef load_dict(fname):\n with open(fname, \"r\") as fp:\n o = json.load(fp, )\n return o\n\n\ndef save_dict(fname, d, mode='w', **kwargs):\n # 持久化写入\n with open(fname, mode, encoding='utf-8') as fp:\n # json.dump(d, fp, cls=NpEncoder, indent=1, separators=(',', ': '))\n json.dump(d, fp, **kwargs)\n\n\ndef get_segmentation(points):\n return [points[0], points[1], points[2] + points[0], points[1],\n points[2] + points[0], points[3] + points[1], points[0], points[3] + points[1]]\n\n\ndef check_coco(src, dst, img_dir=None, replace=True):\n if not replace:\n print('There is an existed {}.'.format(dst))\n return\n coco = load_dict(src)\n cats = json_normalize(coco['categories'])\n cats = cats.sort_values(by='id')\n coco['categories'] = cats.to_dict('records')\n\n imgs = json_normalize(coco['images'])\n if 'image_id' in list(imgs.columns):\n imgs = imgs.rename(columns={'image_id': 'id'})\n imgs['file_name'] = imgs['file_name'].apply(lambda x: os.path.basename(x))\n imgs = imgs.sort_values(by='id')\n coco['images'] = imgs.to_dict('records')\n\n if 'annotations' in coco:\n anns = json_normalize(coco['annotations'])\n else:\n ann_fakes = [\n {\"area\": 100, \"iscrowd\": 0, \"image_id\": image['id'], \"bbox\": [0, 0, 10, 10], \"category_id\": 1, \"id\": 1}\n for image in coco['images']\n ]\n anns = json_normalize(ann_fakes)\n anns['id'] = list(range(anns.shape[0]))\n anns = anns.to_dict('records')\n for v in anns:\n if 'segmentation' not in v:\n seg = get_segmentation(v['bbox'])\n v['segmentation'] = [[float(_) for _ in seg]]\n coco['annotations'] = anns\n # check image shape\n if img_dir is not None:\n for i, v in tqdm(enumerate(coco['images'])):\n if os.path.exists(os.path.join(img_dir, v['file_name'])):\n img_ = cv.imread(os.path.join(img_dir, v['file_name']))\n height_, width_, _ = img_.shape\n else:\n height_, width_, _ = None, None, 3\n v['width'] = width_\n v['height'] = height_\n save_dict(dst, coco)\n print('check_coco done!')\n return dst\n\n\ndef check_box(coco, save_name, img_dir):\n if isinstance(coco, str):\n coco = load_dict(coco)\n images = {v['id']: v for v in coco['images']}\n cat2label = {v['id']: v['name'] for v in coco['categories']}\n annotations = {v['id']: v for v in coco['annotations']}\n error_boxes = []\n for k, v in annotations.items():\n b = v['bbox']\n image = images[v['image_id']]\n if not (0 <= b[0] <= image['width'] and 0 <= b[1] <= image['height'] and b[2] > 0 and b[3] > 0 \\\n and 0 <= b[0] + b[2] <= image['width'] and 0 <= b[1] + b[3] <= image['height']):\n error_boxes.append(v['id'])\n from draw_box import DrawBox\n draw = DrawBox(len(cat2label))\n\n def save_coco():\n coco['annotations'] = [v for k, v in annotations.items()]\n save_dict(save_name, coco)\n print('save done!')\n\n def help():\n print('Q: quit, Z: save, X: delete, *: stride\\n' \\\n 'W: up, A: left, S: down, D: right\\n' \\\n 'L: box left, R: box right, T: box top, B: box bottom\\n')\n\n stride = 10\n while len(error_boxes) > 0:\n print('error boxes size: ', len(error_boxes))\n v = annotations[error_boxes[0]]\n b = v['bbox']\n b = [b[0], b[1], b[2] + b[0], b[3] + b[1]]\n image = images[v['image_id']]\n src_img = cv.imread(os.path.join(img_dir, image['file_name']))\n cv.namedWindow('Error_Box', cv.WINDOW_NORMAL)\n direction = 0\n while True:\n img = draw.draw_box(src_img, [b], [cat2label[v['category_id']]])\n show_img = np.array(img).copy()\n cv.imshow(\"Error_Box\", show_img)\n key = cv.waitKeyEx(0)\n if key == 104:\n help()\n break\n elif key == 56:\n try:\n s = float(input('please input number: '))\n stride = s\n print('stride', stride)\n except:\n print('please input number!')\n elif key == 113:\n error_boxes.pop(0)\n break\n elif key == 120:\n ann_id = error_boxes[0]\n annotations.pop(ann_id)\n error_boxes.pop(0)\n b = [b[0], b[1], b[2] - b[0], b[3] - b[1]]\n v['bbox'] = b\n save_coco()\n break\n elif key == 122:\n error_boxes.pop(0)\n b = [b[0], b[1], b[2] - b[0], b[3] - b[1]]\n v['bbox'] = b\n save_coco()\n break\n elif key == 108:\n direction = 0\n elif key == 116:\n direction = 1\n elif key == 114:\n direction = 2\n elif key == 98:\n direction = 3\n elif key == 97:\n b[direction] -= stride\n b[direction] = max(b[direction], 0)\n elif key == 119:\n b[direction] -= stride\n b[direction] = max(b[direction], 0)\n elif key == 100:\n b[direction] += stride\n b[direction] = min(b[direction], show_img.shape[1])\n elif key == 115:\n b[direction] += stride\n b[direction] = min(b[direction], show_img.shape[0])\n save_coco()\n print('check_box done!')\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser(description='Check ann_file')\n parser.add_argument('ann_file', help='annotation file or test image directory')\n parser.add_argument('save_name', help='save_name')\n parser.add_argument('img_dir', help='img_dir')\n parser.add_argument('--check_type', default='coco,box', help='check_type')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n check_type = args.check_type.split(',')\n if 'coco' in check_type:\n args.ann_file = check_coco(args.ann_file, args.save_name, args.img_dir)\n if 'box' in check_type:\n check_box(args.ann_file, args.save_name, args.img_dir)\n\n\nif __name__ == '__main__':\n main()\n",
"from abc import ABCMeta, abstractmethod\n\nimport mmcv\nimport numpy as np\n\n\n# import pycocotools.mask as maskUtils\n\n\ndef maskUtils():\n pass\n\n\nimport torch.nn as nn\n\nfrom mmdet.core import auto_fp16, get_classes, tensor2imgs\n\n\nclass BaseDetector(nn.Module, metaclass=ABCMeta):\n \"\"\"Base class for detectors\"\"\"\n\n def __init__(self):\n super(BaseDetector, self).__init__()\n self.fp16_enabled = False\n\n @property\n def with_neck(self):\n return hasattr(self, 'neck') and self.neck is not None\n\n @property\n def with_shared_head(self):\n return hasattr(self, 'shared_head') and self.shared_head is not None\n\n @property\n def with_bbox(self):\n return hasattr(self, 'bbox_head') and self.bbox_head is not None\n\n @property\n def with_mask(self):\n return hasattr(self, 'mask_head') and self.mask_head is not None\n\n @abstractmethod\n def extract_feat(self, imgs):\n pass\n\n def extract_feats(self, imgs):\n assert isinstance(imgs, list)\n for img in imgs:\n yield self.extract_feat(img)\n\n @abstractmethod\n def forward_train(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n img (list[Tensor]): list of tensors of shape (1, C, H, W).\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has:\n 'img_shape', 'scale_factor', 'flip', and my also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n **kwargs: specific to concrete implementation\n \"\"\"\n pass\n\n async def async_simple_test(self, img, img_meta, **kwargs):\n raise NotImplementedError\n\n @abstractmethod\n def simple_test(self, img, img_meta, **kwargs):\n pass\n\n @abstractmethod\n def aug_test(self, imgs, img_metas, **kwargs):\n pass\n\n def init_weights(self, pretrained=None):\n if pretrained is not None:\n from mmdet.apis import get_root_logger\n logger = get_root_logger()\n logger.info('load model from: {}'.format(pretrained))\n\n async def aforward_test(self, *, img, img_meta, **kwargs):\n for var, name in [(img, 'img'), (img_meta, 'img_meta')]:\n if not isinstance(var, list):\n raise TypeError('{} must be a list, but got {}'.format(\n name, type(var)))\n\n num_augs = len(img)\n if num_augs != len(img_meta):\n raise ValueError(\n 'num of augmentations ({}) != num of image meta ({})'.format(\n len(img), len(img_meta)))\n # TODO: remove the restriction of imgs_per_gpu == 1 when prepared\n imgs_per_gpu = img[0].size(0)\n assert imgs_per_gpu == 1\n\n if num_augs == 1:\n return await self.async_simple_test(img[0], img_meta[0], **kwargs)\n else:\n raise NotImplementedError\n\n def forward_test(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_meta (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch\n \"\"\"\n for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError('{} must be a list, but got {}'.format(\n name, type(var)))\n\n num_augs = len(imgs)\n if num_augs != len(img_metas):\n raise ValueError(\n 'num of augmentations ({}) != num of image meta ({})'.format(\n len(imgs), len(img_metas)))\n # TODO: remove the restriction of imgs_per_gpu == 1 when prepared\n imgs_per_gpu = imgs[0].size(0)\n assert imgs_per_gpu == 1\n\n if num_augs == 1:\n return self.simple_test(imgs[0], img_metas[0], **kwargs)\n else:\n return self.aug_test(imgs, img_metas, **kwargs)\n\n @auto_fp16(apply_to=('img',))\n def forward(self, img, img_meta, return_loss=True, **kwargs):\n \"\"\"\n Calls either forward_train or forward_test depending on whether\n return_loss=True. Note this setting will change the expected inputs.\n When `return_loss=True`, img and img_meta are single-nested (i.e.\n Tensor and List[dict]), and when `resturn_loss=False`, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n \"\"\"\n if return_loss:\n return self.forward_train(img, img_meta, **kwargs)\n else:\n return self.forward_test(img, img_meta, **kwargs)\n\n def show_result(self, data, result, dataset=None, score_thr=0.3):\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n\n img_tensor = data['img'][0]\n img_metas = data['img_meta'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n if dataset is None:\n class_names = self.CLASSES\n elif isinstance(dataset, str):\n class_names = get_classes(dataset)\n elif isinstance(dataset, (list, tuple)):\n class_names = dataset\n else:\n raise TypeError(\n 'dataset must be a valid dataset name or a sequence'\n ' of class names, not {}'.format(type(dataset)))\n\n for img, img_meta in zip(imgs, img_metas):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n bboxes = np.vstack(bbox_result)\n # draw segmentation masks\n if segm_result is not None:\n segms = mmcv.concat_list(segm_result)\n inds = np.where(bboxes[:, -1] > score_thr)[0]\n for i in inds:\n color_mask = np.random.randint(\n 0, 256, (1, 3), dtype=np.uint8)\n mask = maskUtils.decode(segms[i]).astype(np.bool)\n img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5\n # draw bounding boxes\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n mmcv.imshow_det_bboxes(\n img_show,\n bboxes,\n labels,\n class_names=class_names,\n score_thr=score_thr)\n"
] | [
[
"pandas.io.json.json_normalize",
"numpy.array"
],
[
"numpy.full",
"numpy.concatenate",
"numpy.where",
"numpy.vstack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BaiduXLab/apollo | [
"2764e934b6d0da1342be781447348288ac84c5e9"
] | [
"modules/tools/create_map/create_map.py"
] | [
"#!/usr/bin/env python\n\n###############################################################################\n# Copyright 2017 The Apollo Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\"\"\"\nCreate base map from localization and mobileye lane detection\n\"\"\"\n\nimport argparse\nimport csv\nimport math\nimport numpy as np\nimport os\nimport rospy\nimport sys\n\nfrom modules.map.proto.map_pb2 import Map\nfrom modules.map.proto.map_lane_pb2 import LaneBoundaryType, Lane\nfrom modules.map.proto.map_road_pb2 import BoundaryEdge, Road\n\nfrom modules.routing.proto.routing_pb2 import LaneWaypoint\nfrom modules.routing.proto.poi_pb2 import POI, Landmark\n\nclass DataPoint:\n \"\"\"\n class of data sample (localization and mobileye lane detection)\n \"\"\"\n\n def __init__(self):\n self.pos_x = 0.0 # localization\n self.pos_y = 0.0\n self.pos_z = 0.0\n self.theta = 0.0 # heading\n self.dist_left = 0.0 # distance to left lane marking\n self.conf_left = 0 # confidence of left lane marking (0/1: low confidence, -1/-2: high confidence)\n self.dist_right = 0.0 # distance to right lane marking\n self.conf_right = 0 # confidence of right lane marking (0/1: low confidence, -1/-2: high confidence)\n self.width = 0.0 # lane width\n self.ratio = 0.0 # relative position within a lane (dist_left / width)\n self.center_x = 0.0 # point on the center line of current lane\n self.center_y = 0.0\n\ndef distance(x1, y1, x2, y2):\n \"\"\"\n l2 distance\n \"\"\"\n\n return math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))\n\ndef interpolate_width(data, default_width):\n \"\"\"\n fill 'width' field of all data samples by interpolation\n \"\"\"\n\n # Collect a set of consecutive entries with low confidence on left OR right lane detection \n intervals = []\n interval_begin = -1\n interval_end = -1\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 or entry.conf_right >= 0:\n if interval_begin < 0:\n interval_begin = index\n interval_end = index\n else:\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n interval_begin = -1\n interval_end = -1\n entry.width = entry.dist_left + entry.dist_right\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n\n # Iterate through intervals to interpolate width\n for interval in intervals:\n for index in range(interval[0], interval[1] + 1):\n if interval[0] == 0 and interval[1] == len(data) - 1:\n data[index].width = default_width\n else:\n if interval[0] == 0:\n data[index].width = data[interval[1] + 1].width\n elif interval[1] == len(data) - 1:\n data[index].width = data[interval[0] - 1].width\n else:\n alpha = float(index - interval[0] + 1) / (interval[1] - interval[0] + 2)\n data[index].width = (1.0 - alpha) * data[interval[0] - 1].width + alpha * data[interval[1] + 1].width\n\n # Fill in dist_left/right and conf_left/right using interpolated width\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 and entry.conf_right < 0:\n entry.dist_left = entry.width - entry.dist_right\n entry.conf_left = -1\n elif entry.conf_left < 0 and entry.conf_right >= 0:\n entry.dist_right = entry.width - entry.dist_left\n entry.conf_right = -1\n\ndef interpolate_ratio(data, default_ratio):\n \"\"\"\n fill 'ratio' field of all data samples by interpolation\n \"\"\"\n\n # Collect a set of consecutive entries with low confidence on left AND right lane detection \n intervals = []\n interval_begin = -1\n interval_end = -1\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 and entry.conf_right >= 0:\n if interval_begin < 0:\n interval_begin = index\n interval_end = index\n else:\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n interval_begin = -1\n interval_end = -1\n entry.ratio = float(entry.dist_left) / entry.width\n if interval_begin >= 0:\n intervals.append((interval_begin, interval_end))\n\n # Iterate through intervals to interpolate ratio\n for interval in intervals:\n for index in range(interval[0], interval[1] + 1):\n if interval[0] == 0 and interval[1] == len(data) - 1:\n data[index].ratio = default_ratio\n else:\n if interval[0] == 0:\n data[index].ratio = data[interval[1] + 1].ratio\n elif interval[1] == len(data) - 1:\n data[index].ratio = data[interval[0] - 1].ratio\n else:\n alpha = float(index - interval[0] + 1) / (interval[1] - interval[0] + 2)\n data[index].ratio = (1.0 - alpha) * data[interval[0] - 1].ratio + alpha * data[interval[1] + 1].ratio\n\n # Fill in dist_left/right and conf_left/right using interpolated ratio\n for (index, entry) in enumerate(data):\n if entry.conf_left >= 0 and entry.conf_right >= 0:\n entry.dist_left = entry.width * entry.ratio\n entry.dist_right = entry.width - entry.dist_left\n entry.conf_left = -1\n entry.conf_right = -1\n\ndef compute_center(data):\n \"\"\"\n fill 'center_x' and 'center_y' fields of all data samples\n \"\"\"\n\n for entry in data:\n pos_x = entry.pos_x\n pos_y = entry.pos_y\n pos_z = entry.pos_z\n theta = entry.theta\n dist_left = entry.dist_left\n dist_right = entry.dist_right\n\n theta_left = theta + np.pi / 2.0\n pos_l_x = pos_x + dist_left * np.cos(theta_left)\n pos_l_y = pos_y + dist_left * np.sin(theta_left)\n\n theta_right = theta - np.pi / 2.0\n pos_r_x = pos_x + dist_right * np.cos(theta_right)\n pos_r_y = pos_y + dist_right * np.sin(theta_right)\n\n entry.center_x = (pos_l_x + pos_r_x) / 2.0\n entry.center_y = (pos_l_y + pos_r_y) / 2.0\n\ndef sample_data(data, sample_distance):\n \"\"\"\n sample 'data' at the interval of 'sample_distance'\n \"\"\"\n\n result = []\n\n if len(data) > 0:\n last_x = data[0].center_x\n last_y = data[0].center_y\n result.append(data[0])\n\n for entry in data[1:]:\n if distance(last_x, last_y, entry.center_x, entry.center_y) > sample_distance:\n result.append(entry)\n last_x = entry.center_x\n last_y = entry.center_y\n\n return result\n\ndef extract_data(data, dim):\n \"\"\"\n extract dimension 'dim' (center_x, center_y or width) of 'data' into a list\n \"\"\"\n\n result = []\n for entry in data:\n if dim == 'center_x':\n result.append(entry.center_x)\n elif dim == 'center_y':\n result.append(entry.center_y)\n elif dim == 'width':\n result.append(entry.width)\n return result\n\ndef laplacian_operator(data):\n \"\"\"\n apply laplacian operator on data\n \"\"\"\n\n lap = []\n lap.append(0.0)\n for index in range(1, len(data) - 1):\n lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index])\n lap.append(0.0)\n return lap\n\ndef laplacian_smooth(data, alpha = 0.5, iterations = 3):\n \"\"\"\n apply laplacian smoothing on data\n \"\"\"\n\n for iteration in range(iterations):\n lap = laplacian_operator(data)\n for index in range(len(data)):\n data[index] += alpha * lap[index]\n\ndef update_data(data, dim, new_data):\n \"\"\"\n copy new_data to dimension 'dim' of 'data'\n \"\"\"\n\n for entry, new_entry in zip(data, new_data):\n if dim == 'center_x':\n entry.center_x = new_entry\n elif dim == 'center_y':\n entry.center_y = new_entry\n elif dim == 'width':\n entry.width = new_entry\n\ndef smooth_dimension(data, dim):\n \"\"\"\n smooth dimension 'dim' of 'data'\n \"\"\"\n\n extracted_data = extract_data(data, dim)\n if dim == 'width':\n laplacian_smooth(extracted_data, 1.0, 1000)\n else:\n laplacian_smooth(extracted_data, 1.0, 1000)\n update_data(data, dim, extracted_data)\n\ndef smooth_center_width(data):\n \"\"\"\n smooth centers and widths of data\n \"\"\"\n\n smooth_dimension(data, 'center_x')\n smooth_dimension(data, 'center_y')\n smooth_dimension(data, 'width')\n\ndef split_data(data, max_lane_length):\n \"\"\"\n split data into multiple lists, each of which is not longer than 'max_lane_length'\n \"\"\"\n\n result = []\n current = []\n total_length = 0.0\n\n if len(data) > 0:\n last_x = data[0].center_x\n last_y = data[0].center_y\n current.append(data[0])\n\n for entry in data[1:]:\n current.append(entry)\n\n d = distance(last_x, last_y, entry.center_x, entry.center_y)\n total_length += d\n\n if total_length > max_lane_length:\n result.append(current)\n\n current = []\n current.append(entry)\n total_length = 0.0\n\n last_x = entry.center_x\n last_y = entry.center_y\n\n if total_length > 0.0:\n result.append(current)\n\n return result\n\ndef create_lane(data, offset, lane_count, left_lanes, right_lanes):\n \"\"\"\n create a lane using 'data' whose lateral index is 'offset'\n offset = 0: center lane; offset < 0: left lanes; offset > 0: right lanes\n lane_count: longitutional index of lane (used for naming)\n left_lanes, right_lanes: number of left/right lanes (used for boundary types)\n \"\"\"\n\n total_length = 0.0\n total_left_length = 0.0\n total_right_length = 0.0\n\n lane = Lane()\n lane.id.id = \"lane_\" + str(lane_count) + \"_\" + str(offset)\n\n lane_central_curve_seg = lane.central_curve.segment.add()\n\n start_heading = data[0].theta\n\n lane_left_boundary_curve_seg = lane.left_boundary.curve.segment.add()\n lane_left_boundary_curve_seg.heading = float(start_heading)\n lane_left_boundary_curve_seg.s = 0.0\n\n lane_right_boundary_curve_seg = lane.right_boundary.curve.segment.add()\n lane_right_boundary_curve_seg.heading = float(start_heading)\n lane_right_boundary_curve_seg.s = 0.0\n\n last_l_x = 0.0\n last_l_y = 0.0\n\n last_c_x = 0.0\n last_c_y = 0.0\n\n last_r_x = 0.0\n last_r_y = 0.0\n\n for (index, entry) in enumerate(data):\n theta = entry.theta\n theta_left = theta + np.pi / 2.0\n theta_right = theta - np.pi / 2.0\n\n pos_c_x = entry.center_x\n pos_c_y = entry.center_y\n\n pos_l_x = pos_c_x + entry.width * (0.5 - offset) * np.cos(theta_left)\n pos_l_y = pos_c_y + entry.width * (0.5 - offset) * np.sin(theta_left)\n\n pos_r_x = pos_c_x + entry.width * (0.5 + offset) * np.cos(theta_right)\n pos_r_y = pos_c_y + entry.width * (0.5 + offset) * np.sin(theta_right)\n\n pos_c_x = (pos_l_x + pos_r_x) / 2.0\n pos_c_y = (pos_l_y + pos_r_y) / 2.0\n\n if index == 0:\n lane_central_curve_seg.start_position.x = pos_c_x\n lane_central_curve_seg.start_position.y = pos_c_y\n\n lane_left_boundary_curve_seg.start_position.x = pos_l_x\n lane_left_boundary_curve_seg.start_position.y = pos_l_y\n\n lane_right_boundary_curve_seg.start_position.x = pos_r_x\n lane_right_boundary_curve_seg.start_position.y = pos_r_y\n\n else:\n d = distance(last_c_x, last_c_y, pos_c_x, pos_c_y)\n total_length += d\n\n d_left = distance(last_l_x, last_l_y, pos_l_x, pos_l_y)\n total_left_length += d_left\n\n d_right = distance(last_r_x, last_r_y, pos_r_x, pos_r_y)\n total_right_length += d_right\n\n point = lane_central_curve_seg.line_segment.point.add()\n point.x = pos_c_x\n point.y = pos_c_y\n\n point = lane_left_boundary_curve_seg.line_segment.point.add()\n point.x = pos_l_x\n point.y = pos_l_y\n\n point = lane_right_boundary_curve_seg.line_segment.point.add() \n point.x = pos_r_x\n point.y = pos_r_y\n\n sample = lane.left_sample.add()\n sample.s = total_length\n sample.width = entry.width / 2.0\n\n sample = lane.right_sample.add()\n sample.s = total_length\n sample.width = entry.width / 2.0\n\n last_l_x = pos_l_x\n last_l_y = pos_l_y\n\n last_r_x = pos_r_x\n last_r_y = pos_r_y\n\n last_c_x = pos_c_x\n last_c_y = pos_c_y\n\n lane_central_curve_seg.length = total_length\n lane_left_boundary_curve_seg.length = total_left_length\n lane_right_boundary_curve_seg.length = total_right_length\n\n boundary_type = lane.left_boundary.boundary_type.add()\n boundary_type.s = 0.0\n if offset == -left_lanes:\n boundary_type.types.append(LaneBoundaryType.DOUBLE_YELLOW)\n else:\n boundary_type.types.append(LaneBoundaryType.DOTTED_WHITE)\n\n lane.left_boundary.length = total_left_length\n\n boundary_type = lane.right_boundary.boundary_type.add()\n boundary_type.s = 0.0\n if offset == right_lanes:\n boundary_type.types.append(LaneBoundaryType.CURB)\n else:\n boundary_type.types.append(LaneBoundaryType.DOTTED_WHITE)\n\n lane.right_boundary.length = total_right_length\n\n lane.length = total_length\n lane.speed_limit = 29.06\n lane.type = Lane.CITY_DRIVING\n lane.turn = Lane.NO_TURN\n\n return lane\n\ndef create_road(data, left_lanes, right_lanes):\n \"\"\"\n create a road using 'data'\n left_lanes, right_lanes: number of left/right lanes\n \"\"\"\n road = Road()\n road.id.id = \"road\"\n section = road.section.add()\n section.id.id = \"section\"\n\n left_edge = section.boundary.outer_polygon.edge.add()\n left_edge.type = BoundaryEdge.LEFT_BOUNDARY\n\n right_edge = section.boundary.outer_polygon.edge.add()\n right_edge.type = BoundaryEdge.RIGHT_BOUNDARY\n\n total_left_length = 0.0\n total_right_length = 0.0\n\n start_heading = data[0].theta\n\n left_edge_curve_seg = left_edge.curve.segment.add()\n left_edge_curve_seg.heading = float(start_heading)\n left_edge_curve_seg.s = 0.0\n\n right_edge_curve_seg = right_edge.curve.segment.add()\n right_edge_curve_seg.heading = float(start_heading)\n right_edge_curve_seg.s = 0.0\n\n last_l_x = 0.0\n last_l_y = 0.0\n\n last_r_x = 0.0\n last_r_y = 0.0\n\n for (index, entry) in enumerate(data):\n theta = entry.theta\n theta_left = theta + np.pi / 2.0\n theta_right = theta - np.pi / 2.0\n\n pos_l_x = entry.center_x + entry.width * (0.5 + left_lanes) * np.cos(theta_left)\n pos_l_y = entry.center_y + entry.width * (0.5 + left_lanes) * np.sin(theta_left)\n\n pos_r_x = entry.center_x + entry.width * (0.5 + right_lanes) * np.cos(theta_right)\n pos_r_y = entry.center_y + entry.width * (0.5 + right_lanes) * np.sin(theta_right)\n\n if index == 0:\n left_edge_curve_seg.start_position.x = pos_l_x\n left_edge_curve_seg.start_position.y = pos_l_y\n\n right_edge_curve_seg.start_position.x = pos_r_x\n right_edge_curve_seg.start_position.y = pos_r_y\n\n else:\n d_left = distance(last_l_x, last_l_y, pos_l_x, pos_l_y)\n total_left_length += d_left\n\n d_right = distance(last_r_x, last_r_y, pos_r_x, pos_r_y)\n total_right_length += d_right\n\n point = left_edge_curve_seg.line_segment.point.add()\n point.x = pos_l_x\n point.y = pos_l_y\n\n point = right_edge_curve_seg.line_segment.point.add() \n point.x = pos_r_x\n point.y = pos_r_y\n\n last_l_x = pos_l_x\n last_l_y = pos_l_y\n\n last_r_x = pos_r_x\n last_r_y = pos_r_y\n\n left_edge_curve_seg.length = total_left_length\n right_edge_curve_seg.length = total_right_length\n\n return road\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Generate Base Map from Recorded Localization and Mobileye Lane Detection')\n parser.add_argument(\n '-i',\n '--input_file',\n help='Recorded localization and mobileye lane detection in CSV format',\n type=str,\n default='/tmp/lane.csv')\n parser.add_argument(\n '--debug',\n help='Print debugging info in /tmp',\n action='store_true')\n parser.add_argument(\n '-o',\n '--output_file',\n help='Output file name of generated base map',\n type=str,\n default='modules/map/data/gen/base_map.txt')\n parser.add_argument(\n '-e',\n '--end_waypoint_file',\n help='Output file name of default end waypoint',\n type=str,\n default='modules/map/data/gen/default_end_way_point.txt')\n parser.add_argument(\n '--default_width',\n help='Default lane width in meters (only effective when mobileye lane detection fails for ALL frames)',\n type=float,\n default=3.5)\n parser.add_argument(\n '--sample_distance',\n help='minimum distance (in meters) of two adjacent samples of a lane',\n type=float,\n default=0.2)\n parser.add_argument(\n '--max_lane_length',\n help='maximum length (in meters) of a lane (longer lanes will be split)',\n type=float,\n default=100.0)\n parser.add_argument(\n '--left_lanes',\n help='Number of lanes on the left',\n type=int,\n default=0)\n parser.add_argument(\n '--right_lanes',\n help='Number of lanes on the right',\n type=int,\n default=0)\n args = vars(parser.parse_args())\n\n csv_file_name = args['input_file']\n map_file_name = args['output_file']\n waypoint_file_name = args['end_waypoint_file']\n default_width = args['default_width']\n debug_option = args['debug']\n sample_distance = args['sample_distance']\n max_lane_length = args['max_lane_length']\n left_lanes = args['left_lanes']\n right_lanes = args['right_lanes']\n\n default_ratio = 0.5\n temp_csv_file_name = '/tmp/lane_interpolation.csv'\n\n rows = []\n with open(csv_file_name, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n rows.append(row)\n\n # Extract data samples\n data = []\n for row in rows[1:]:\n entry = DataPoint()\n entry.pos_x = float(row[0])\n entry.pos_y = float(row[1])\n entry.pos_z = float(row[2])\n entry.theta = float(row[3])\n entry.dist_left = abs(float(row[4]))\n entry.conf_left = int(row[5])\n if entry.dist_left < 0.1:\n entry.conf_left = 0\n entry.dist_right = abs(float(row[6]))\n entry.conf_right = int(row[7])\n if entry.dist_right < 0.1:\n entry.conf_right = 0\n entry.width = default_width\n entry.ratio = default_ratio\n data.append(entry)\n\n # Fill in widths using interpolation\n interpolate_width(data, default_width)\n # Fill in ratios using interpolation\n interpolate_ratio(data, default_ratio)\n # Fill in centers\n compute_center(data)\n\n # Sample data at the interval of sample_distance\n data = sample_data(data, sample_distance)\n # Smooth center curves and widths\n smooth_center_width(data)\n\n # Output debug info if necessary\n if debug_option:\n with open(temp_csv_file_name, 'w') as csvfile:\n for row in data:\n csvfile.write(\n \"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n\" %\n (row.pos_x, row.pos_y, row.pos_z, row.theta, row.dist_left, row.conf_left, row.dist_right, row.conf_right, row.width, row.ratio, row.center_x, row.center_y))\n\n # Split data samples into lists with maximum length of max_lane_length\n list_data = split_data(data, max_lane_length)\n\n # Create individual lanes\n lane_sets = []\n for (lane_count, lane_data) in enumerate(list_data):\n lane_set = []\n for offset in range(-left_lanes, right_lanes + 1):\n lane_set.append(create_lane(lane_data, offset, lane_count, left_lanes, right_lanes))\n lane_sets.append(lane_set)\n\n # Create road\n road = create_road(data, left_lanes, right_lanes)\n\n # Create map\n mp = Map()\n mp.header.version = \"1.400000\"\n mp.header.date = \"20170919\"\n mp.header.district = \"101\"\n\n # Set up predecessors, successors, left/right neighbors\n for lane_count in range(len(lane_sets)):\n for lane_offset in range(len(lane_sets[lane_count])):\n if lane_count != 0:\n lane_sets[lane_count][lane_offset].predecessor_id.add().id = lane_sets[lane_count - 1][lane_offset].id.id\n if lane_count != len(lane_sets) - 1:\n lane_sets[lane_count][lane_offset].successor_id.add().id = lane_sets[lane_count + 1][lane_offset].id.id\n if lane_offset != 0:\n lane_sets[lane_count][lane_offset].left_neighbor_forward_lane_id.add().id = lane_sets[lane_count][lane_offset - 1].id.id\n if lane_offset != len(lane_sets[lane_count]) - 1:\n lane_sets[lane_count][lane_offset].right_neighbor_forward_lane_id.add().id = lane_sets[lane_count][lane_offset + 1].id.id\n\n # Add road/lanes to map and let road contain lanes\n mp.road.extend([road])\n for lane_set in lane_sets:\n for lane in lane_set:\n mp.road[0].section[0].lane_id.add().id = lane.id.id\n mp.lane.extend([lane])\n\n # Output map\n with open(map_file_name, \"w\") as f:\n f.write(mp.__str__())\n\n # Create default end_way_point using the farthest point of last central lane \n last_central_lane = lane_sets[-1][left_lanes]\n\n poi = POI()\n landmark = poi.landmark.add()\n landmark.name = \"default\"\n waypoint = landmark.waypoint.add()\n waypoint.id = last_central_lane.id.id\n waypoint.s = last_central_lane.length\n waypoint.pose.x = last_central_lane.central_curve.segment[0].line_segment.point[-1].x\n waypoint.pose.y = last_central_lane.central_curve.segment[0].line_segment.point[-1].y\n\n # Output default end_way_point\n with open(waypoint_file_name, \"w\") as f:\n f.write(poi.__str__())\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
beiyuouo/fedhf | [
"0caa873a5db7494b0f9197848c34243fcb8c49f6",
"0caa873a5db7494b0f9197848c34243fcb8c49f6"
] | [
"fedhf/api/dpm/laplace.py",
"tests/test_api/test_dpm.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : fedhf\\api\\dpm\\laplace_noise.py\n# @Time : 2022-05-02 22:39:42\n# @Author : Bingjie Yan\n# @Email : [email protected]\n# @License : Apache License 2.0\n\nimport numpy as np\nimport torch\n\n\ndef laplace_noise(sensitivity, size, epsilon, **kwargs):\n \"\"\"\n Generate Laplace noise with the given sensitivity.\n :param sensitivity: the sensitivity of the privacy mechanism\n :param size: the size of the noise\n :param epsilon: the privacy parameter\n :param kwargs: other parameters\n :return: the generated noise\n \"\"\"\n noise_scale = sensitivity / epsilon\n return np.random.laplace(0, noise_scale, size)\n\n\ndef laplace_clip(model: torch.nn.Module, clip: float):\n \"\"\"\n Clip the model parameters.\n :param model: the model\n :param clip: the clipping bound\n :return: None\n \"\"\"\n for k, v in model.named_parameters():\n v.grad /= max(1, v.grad.norm(1) / clip)",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : tests\\test_api\\test_dpm.py\n# @Time : 2022-05-02 23:36:03\n# @Author : Bingjie Yan\n# @Email : [email protected]\n# @License : Apache License 2.0\n\nfrom copy import copy, deepcopy\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom fedhf.api import opts, dpm\n\nfrom fedhf.model.nn import MLP\nfrom fedhf.dataset.random import RandomDataset\n\n\nclass TestDPM:\n args = opts().parse([])\n\n def test_calculate_sensitivity(self):\n lr = 0.1\n clip = 10\n data_size = 100\n sensitivity = dpm.calculate_sensitivity(lr, clip, data_size)\n assert sensitivity == 2 * lr * clip / data_size\n\n def test_none(self):\n dpm.build_mechanism('none', dpm.calculate_sensitivity(0.1, 10, 100), 100, 0.1)\n assert np.all(\n dpm.build_mechanism('none', dpm.calculate_sensitivity(0.1, 10, 100), 100, 0.1) == 0)\n\n def test_gaussian_noise(self):\n dpm.build_mechanism('gaussian',\n dpm.calculate_sensitivity(0.1, 10, 100),\n 100,\n 0.1,\n delta=0.1)\n\n def test_laplace_noise(self):\n dpm.build_mechanism('laplace', dpm.calculate_sensitivity(0.1, 10, 100), 100, 0.1)\n\n def test_none_clip(self):\n model = MLP(None, input_dim=10 * 10, output_dim=10)\n data = RandomDataset(None, 100, (1, 10, 10), 10)\n\n model.train()\n optim = torch.optim.SGD(model.parameters(), lr=1)\n crit = nn.CrossEntropyLoss()\n\n for epoch in range(1):\n loss = torch.tensor(0)\n for i, (x, y) in enumerate(data):\n x = x.view(1, 1, 10, 10)\n y = y.view(-1)\n output = model(x)\n loss = loss + crit(output, y)\n\n loss.backward()\n optim.step()\n # optim.zero_grad()\n\n grads = {k: v.grad.detach().numpy().copy() for k, v in model.named_parameters()}\n dpm.build_clip_grad('none', model, 0.1)\n\n # check grad is not changed\n\n for k, v in model.named_parameters():\n assert np.allclose(grads[k], v.grad.detach().numpy())\n\n def test_gaussian_clip(self):\n model = MLP(None, input_dim=10 * 10, output_dim=10)\n data = RandomDataset(None, 100, (1, 10, 10), 10)\n\n model.train()\n optim = torch.optim.SGD(model.parameters(), lr=1)\n crit = nn.CrossEntropyLoss()\n\n for epoch in range(1):\n loss = torch.tensor(0)\n for i, (x, y) in enumerate(data):\n x = x.view(1, 1, 10, 10)\n y = y.view(-1)\n output = model(x)\n loss = loss + crit(output, y)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n clip = 1e-8\n grads = {k: v.grad.detach().numpy().copy() for k, v in model.named_parameters()}\n print(grads)\n dpm.build_clip_grad('gaussian', model, clip)\n\n for k, v in model.named_parameters():\n assert np.all(np.abs(v.grad.detach().numpy()) <= clip)\n assert np.any(v.grad.detach().numpy() != grads[k])\n\n def test_laplace_clip(self):\n model = MLP(None, input_dim=10 * 10, output_dim=10)\n data = RandomDataset(None, 100, (1, 10, 10), 10)\n\n model.train()\n optim = torch.optim.SGD(model.parameters(), lr=1)\n crit = nn.CrossEntropyLoss()\n\n for epoch in range(1):\n loss = torch.tensor(0)\n for i, (x, y) in enumerate(data):\n x = x.view(1, 1, 10, 10)\n y = y.view(-1)\n output = model(x)\n loss = loss + crit(output, y)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n clip = 1e-8\n grads = {k: v.grad.detach().numpy().copy() for k, v in model.named_parameters()}\n print(grads)\n dpm.build_clip_grad('laplace', model, clip)\n\n for k, v in model.named_parameters():\n assert np.all(np.abs(v.grad.detach().numpy()) <= clip)\n assert np.any(v.grad.detach().numpy() != grads[k])"
] | [
[
"numpy.random.laplace"
],
[
"torch.nn.CrossEntropyLoss",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiahfong/alr | [
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a",
"ee561c545bd98ec17c4f9c3040ef23b0222ef71a"
] | [
"docs/source/experiments/legacy/ssl_vs_bald_vs_ssal_basic/mnist/recycle/det_SSL/pseudo_label.py",
"docs/source/experiments/warm_start/mnist/restart/train.py",
"docs/source/experiments/custom/cifar10/temporal_batch_bald/train.py",
"docs/source/experiments/old/model_selection/cifar/train.py",
"experiments/thesis/ephemeral/mnist/legacy/dont_reset_weights_more_iters/train.py",
"tests/test_alr.py",
"experiments/thesis/ephemeral/mnist/legacy/dont_reset_weights/train.py"
] | [
"r\"\"\"\nvanilla pseudo-labeling implementation\n\"\"\"\nfrom collections import defaultdict\n\nfrom alr.utils import timeop, manual_seed\nfrom alr.data.datasets import Dataset\nfrom alr.data import UnlabelledDataset\nfrom alr.training import VanillaPLTrainer\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr import MCDropout\n\nimport pickle\nimport numpy as np\nimport torch\nimport torch.utils.data as torchdata\nfrom torch.nn import functional as F\nfrom pathlib import Path\n\n\nif __name__ == \"__main__\":\n manual_seed(42)\n kwargs = dict(num_workers=4, pin_memory=True)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n sizes = np.arange(20, 260, 10)\n N = len(sizes)\n # validation dataset size\n VAL_SIZE = 5_000\n # according to the paper:\n BATCH_SIZE = 32\n UNLABELLED_BATCH_SIZE = 256\n # at least prolong the epoch to have this many points (see RandomFixedLengthSampler)\n MIN_TRAIN_SIZE = 12_500\n # well, early stopping should kick-in before then.\n EPOCHS = 200\n REPEATS = 6\n\n # paths\n pl_metrics = Path(\"pl_metrics\")\n metrics = Path(\"metrics\")\n saved_models = Path(\"saved_models\")\n metrics.mkdir()\n saved_models.mkdir()\n log_every = 2\n\n accs = defaultdict(list)\n\n for r in range(1, REPEATS + 1):\n for i, n in enumerate(sizes, 1):\n train, test = Dataset.MNIST.get()\n train, pool = torchdata.random_split(train, (n, len(train) - n))\n pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))\n pool = UnlabelledDataset(pool, debug=True)\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n\n print(f\"=== Iteration {i} of {N} ({i/N:.2%}) ===\")\n print(f\"\\ttrain: {len(train)}; pool: {len(pool)}; test: {len(test)}\")\n\n if (i - 1) % log_every == 0 and r == 1:\n pl_log = str(pl_metrics / f\"dsize_{n}\")\n else:\n pl_log = None\n\n trainer = VanillaPLTrainer(\n model,\n labelled_loss=F.nll_loss,\n unlabelled_loss=F.nll_loss,\n optimiser=\"Adam\",\n patience=3,\n reload_best=True,\n track_pl_metrics=pl_log,\n device=device,\n )\n\n train_loader = torchdata.DataLoader(\n train,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n train, length=MIN_TRAIN_SIZE, shuffle=True\n ),\n **kwargs,\n )\n pool_loader = torchdata.DataLoader(\n pool,\n batch_size=UNLABELLED_BATCH_SIZE,\n shuffle=True,\n **kwargs,\n )\n val_loader = torchdata.DataLoader(\n val,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n test_loader = torchdata.DataLoader(\n test,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n\n with timeop() as t:\n history = trainer.fit(\n train_loader,\n pool_loader,\n val_loader,\n epochs=EPOCHS,\n )\n\n test_metrics = trainer.evaluate(test_loader)\n accs[n].append(test_metrics[\"acc\"])\n print(\n f\"\\t[train] loss, acc: ({history['stage2']['train_loss'][-1]}, {history['stage2']['train_acc'][-1]})\\n\"\n f\"\\t[test] loss, acc: ({test_metrics['loss']}, {test_metrics['acc']})\\n\"\n f\"\\ttime: {t}\"\n )\n\n if pl_log:\n torch.save(\n model.state_dict(),\n saved_models / f\"repeat_{r}_dsize_{n}_weights.pth\",\n )\n\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n }\n with open(metrics / f\"repeat_{r}_dsize_{n}_metrics.pkl\", \"wb\") as fp:\n pickle.dump(payload, fp)\n\n with open(\"accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n",
"r\"\"\"\nFrom previous experiments, we saw that ephemeral pseudo-labelling helped boost accuracy\ndespite starting with only 20 points. We could kick-start BALD with 85% accuracy with 24 iterations\nbut it seems like using 80% accuracy at 10 iterations is a good trade-off. It's harder to gain more\naccuracy as the number of iteration increases.\n\nThis experiment kick-starts BALD10 acquisition by warming the model to 80% accuracy (with 10 iterations\nof ephemeral pseudo-labelling). However, the acquisition loop will NOT run ephemeral P.L. as we've seen\na decrease in performance when doing so. There are two possibilities: (1) warm-starting the model\nhas caused it to lower its entropy on the pool dataset, hence causing it to actually perform worse.\n(2) warm-starting it actually helped! my bet is (unfortunately) on the former, given previous observations\n(i.e. ephemeral bald10 performs worse than bald10 -- but i'm hopeful, notwithstanding.).\n\"\"\"\nfrom collections import defaultdict\n\nfrom alr.utils import manual_seed, eval_fwd_exp, timeop\nfrom alr.acquisition import BALD\nfrom alr import MCDropout\nfrom alr.data.datasets import Dataset\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.data import UnlabelledDataset, DataManager\nfrom alr.training import Trainer\nfrom alr.training.repeated_acquisition_utils import (\n get_confident_indices,\n RelabelledDataset,\n)\n\nimport torch\nimport torch.utils.data as torchdata\nimport pickle\nfrom torch.nn import functional as F\nfrom pathlib import Path\n\n\ndef main(b, threshold, warm_start_iters, log_every):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n # --- constants ---\n BATCH_SIZE = 64\n EPOCHS = 200\n REPS = 6\n ITERS = 23\n # +1 because of the structure of our loop\n warm_start_iters += 1\n VAL_SIZE = 5_000\n MIN_TRAIN_LEN = 12_500\n\n # --- setup ---\n train, pool, test = Dataset.MNIST.get_fixed()\n val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))\n pool = UnlabelledDataset(pool, debug=True)\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n bald = BALD(eval_fwd_exp(model), device=device, batch_size=1024, **kwargs)\n dm = DataManager(train, pool, bald)\n val_loader = torchdata.DataLoader(\n val,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n test_loader = torchdata.DataLoader(\n test,\n batch_size=1024,\n shuffle=False,\n **kwargs,\n )\n\n warm_start_accs = []\n accs = defaultdict(list)\n template = f\"wsi={warm_start_iters}_b={b}_thresh={threshold}\"\n pl_metrics = Path(\"pl_metrics\") / template\n metrics = Path(\"metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n\n for r in range(1, REPS + 1):\n print(f\"- Repeat {r} of {REPS} -\")\n dm.reset()\n ws_accs_r = {}\n # store temporarily labelled points (will be union-ed with the training dataset)\n pseudo_labelled_points = None\n for i in range(1, warm_start_iters + 1):\n if pseudo_labelled_points is not None:\n full_train_dataset = torchdata.ConcatDataset(\n (dm.labelled, pseudo_labelled_points)\n )\n else:\n full_train_dataset = dm.labelled\n train_length = len(full_train_dataset)\n print(\n f\"=== Warm start iteration {i} of {warm_start_iters} ({i / warm_start_iters:.2%}) ===\"\n )\n print(\n f\"\\ttrain: {train_length}; \"\n f\"pool: {dm.n_unlabelled}; \"\n f\"val: {len(val)}; \"\n f\"test: {len(test)}\"\n )\n model.reset_weights()\n\n # -- stage 1: train --\n trainer = Trainer(\n model, F.nll_loss, \"Adam\", patience=3, reload_best=True, device=device\n )\n train_loader = torchdata.DataLoader(\n full_train_dataset,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n full_train_dataset, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(train_loader, val_loader, epochs=EPOCHS)\n\n test_metrics = trainer.evaluate(test_loader)\n ws_accs_r[train_length] = test_metrics[\"acc\"]\n\n print(\n f\"\\t[test] loss, acc: ({test_metrics['loss']:.4f}, {test_metrics['acc']:.4f}); time: {t}\"\n )\n\n with open(\n metrics / f\"repeat_{r}_dsize_{train_length}_metrics.pkl\", \"wb\"\n ) as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n }\n pickle.dump(payload, fp)\n\n if (i - 1) % log_every == 0:\n torch.save(\n model.state_dict(),\n saved_models / f\"repeat_{r}_dsize_{train_length}_weights.pth\",\n )\n\n # skip if this is the last iteration\n if i == warm_start_iters:\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n continue\n\n # -- stage 2: acquire more data into the training set --\n\n # -- acquire using pseudo-labels --\n dm.unlabelled.debug = True\n idxs, plabs = get_confident_indices(\n model=model,\n dataset=dm.unlabelled,\n threshold=threshold,\n root=((pl_metrics / f\"repeat_{r}\") if r == 1 else None),\n step=i,\n device=device,\n **kwargs,\n )\n\n if idxs.shape[0]:\n truth = torchdata.Subset(dm.unlabelled, idxs)\n\n # replace true labels with pseudo-labels\n pseudo_labelled_points = RelabelledDataset(truth, plabs)\n assert len(pseudo_labelled_points) == idxs.shape[0]\n else:\n print(\n f\"\\tSelf-labelling didn't happen because none of the pseudo-labels are confident enough.\"\n )\n warm_start_accs.append(ws_accs_r)\n\n dm.unlabelled.debug = False\n\n print(\n f\"Warm-started with {warm_start_iters} iterations. Beginning AL acquisitions\"\n )\n\n for i in range(1, ITERS + 1):\n dm.acquire(b=b)\n print(f\"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===\")\n print(\n f\"\\ttrain: {dm.n_labelled}; val: {len(val)}; \"\n f\"pool: {dm.n_unlabelled}; test: {len(test)}\"\n )\n # model.reset_weights() # leverage p.l. from before, DON'T reset!\n trainer = Trainer(\n model,\n F.nll_loss,\n optimiser=\"Adam\",\n patience=3,\n reload_best=True,\n device=device,\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n trainer.fit(train_loader, val_loader, epochs=EPOCHS)\n test_metric = trainer.evaluate(test_loader)\n print(f\"\\t[test] acc: {test_metric['acc']}, time: {t}\")\n accs[dm.n_labelled].append(test_metric[\"acc\"])\n\n with open(f\"{template}_warm_start_accs.pkl\", \"wb\") as fp:\n pickle.dump(warm_start_accs, fp)\n\n with open(f\"{template}_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n\nif __name__ == \"__main__\":\n main(b=10, threshold=0.9, warm_start_iters=10, log_every=2)\n",
"from alr.training.pl_mixup import PLMixupTrainer, temp_ds_transform\nfrom alr.utils import manual_seed, timeop\nfrom alr.data.datasets import Dataset\nfrom alr.data import DataManager, UnlabelledDataset\nfrom alr.training.utils import PLPredictionSaver\nfrom alr import ALRModel\nfrom alr.acquisition import AcquisitionFunction\nfrom batchbald_redux.batchbald import get_batchbald_batch\n\nimport torch\nimport pickle\nimport torch.utils.data as torchdata\nimport torchvision as tv\nimport numpy as np\nfrom collections import defaultdict\nfrom ignite.engine import create_supervised_evaluator\nfrom pathlib import Path\nfrom torch import nn\n\n\nclass TemporalBatchBALD(AcquisitionFunction):\n def __init__(self):\n super(TemporalBatchBALD, self).__init__()\n self.labels_E_N_C: torch.Tensor = None\n self.recent_score = None\n\n def __call__(self, X_pool: torchdata.Dataset, b: int):\n pool_size = len(X_pool)\n mc_preds = self._labels.permute((1, 0, 2)) # self.labels_E_N_C.double()\n candidate_batch = get_batchbald_batch(\n mc_preds,\n batch_size=b,\n num_samples=10_000,\n dtype=torch.double,\n device=\"cuda:0\",\n )\n scores = np.array(candidate_batch.scores)\n indices = candidate_batch.indices\n assert scores.shape == (b,)\n assert np.isfinite(scores).all()\n self.recent_score = scores\n return np.array(indices[:b])\n\n @property\n def _labels(self):\n E = self.labels_E_N_C.shape[0]\n # have at least 10 even if self._last percent of E is\n # less than 10. If E is less than 10, then take everything (E)\n e = max(min(10, E), int(E * 0.2))\n return self.labels_E_N_C[-e:].double()\n\n\nclass Net(ALRModel):\n def __init__(self, model):\n super(Net, self).__init__()\n self.model = model\n self.snap()\n\n def forward(self, x):\n return self.model(x)\n\n\ndef calc_calib_metrics(loader, model: nn.Module, log_dir, device):\n evaluator = create_supervised_evaluator(model, metrics=None, device=device)\n pds = PLPredictionSaver(log_dir)\n pds.attach(evaluator)\n evaluator.run(loader)\n\n\ndef main( # acq_name: str,\n alpha: float, b: int, augment: bool, iters: int, repeats: int\n):\n acq_name = \"tbbald\"\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n # ========= CONSTANTS ===========\n BATCH_SIZE = 100\n # at least have this much points in one epoch (see RandomFixedLengthSampler)\n MIN_TRAIN_LENGTH = 20_000\n VAL_SIZE = 5_000\n MIN_LABELLED = 16\n # stage 1 and stage 2 patience\n PATIENCE = (5, 25)\n # how many epochs before LR is reduced\n LR_PATIENCE = 10\n # stage 1 and stage 2 of PL mixup training\n EPOCHS = (100, 400)\n\n REPEATS = repeats\n ITERS = iters\n\n # ========= SETUP ===========\n train, pool, test = Dataset.CIFAR10.get_fixed(raw=True)\n pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))\n pool = UnlabelledDataset(pool)\n test_loader = torchdata.DataLoader(\n test,\n batch_size=512,\n shuffle=False,\n **kwargs,\n )\n train_transform = test_transform = tv.transforms.Compose(\n [\n tv.transforms.ToTensor(),\n tv.transforms.Normalize(*Dataset.CIFAR10.normalisation_params),\n ]\n )\n test_ds_transform = temp_ds_transform(test_transform)\n if augment:\n data_augmentation = Dataset.CIFAR10.get_augmentation\n else:\n data_augmentation = None\n accs = defaultdict(list)\n\n template = f\"{acq_name}_{b}_alpha_{alpha}\" + (\"_aug\" if augment else \"\")\n metrics = Path(\"metrics\") / template\n calib_metrics = Path(\"calib_metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics.mkdir(parents=True)\n calib_metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n bald_scores = None\n\n # since we need to know which points were taken for val dataset\n with open(metrics / \"pool_idxs.pkl\", \"wb\") as fp:\n pickle.dump(pool._dataset.indices, fp)\n\n for r in range(1, REPEATS + 1):\n print(f\"- [{acq_name} (b={b})] repeat #{r} of {REPEATS}-\")\n model = Net(Dataset.CIFAR10.model).to(device)\n acq_fn = TemporalBatchBALD()\n dm = DataManager(train, pool, acq_fn)\n dm.reset() # this resets pool\n\n for i in range(1, ITERS + 1):\n model.reset_weights()\n trainer = PLMixupTrainer(\n model,\n \"SGD\",\n train_transform,\n test_transform,\n {\"lr\": 0.1, \"momentum\": 0.9, \"weight_decay\": 1e-4},\n kwargs,\n log_dir=None,\n rfls_length=MIN_TRAIN_LENGTH,\n alpha=alpha,\n min_labelled=MIN_LABELLED,\n data_augmentation=data_augmentation,\n batch_size=BATCH_SIZE,\n patience=PATIENCE,\n lr_patience=LR_PATIENCE,\n device=device,\n )\n with dm.unlabelled.tmp_debug():\n with timeop() as t:\n history = trainer.fit(\n dm.labelled, val, dm.unlabelled, epochs=EPOCHS\n )\n\n acq_fn.labels_E_N_C = trainer.soft_label_history\n\n # eval\n test_metrics = trainer.evaluate(test_loader)\n print(f\"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===\")\n print(\n f\"\\ttrain: {dm.n_labelled}; val: {len(val)}; \"\n f\"pool: {dm.n_unlabelled}; test: {len(test)}\"\n )\n print(f\"\\t[test] acc: {test_metrics['acc']:.4f}, time: {t}\")\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n\n # save stuff\n\n # pool calib\n with dm.unlabelled.tmp_debug():\n pool_loader = torchdata.DataLoader(\n temp_ds_transform(test_transform, with_targets=True)(dm.unlabelled),\n batch_size=512,\n shuffle=False,\n **kwargs,\n )\n calc_calib_metrics(\n pool_loader,\n model,\n calib_metrics / \"pool\" / f\"rep_{r}\" / f\"iter_{i}\",\n device=device,\n )\n calc_calib_metrics(\n test_loader,\n model,\n calib_metrics / \"test\" / f\"rep_{r}\" / f\"iter_{i}\",\n device=device,\n )\n\n with open(metrics / f\"rep_{r}_iter_{i}.pkl\", \"wb\") as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n \"labelled_classes\": dm.unlabelled.labelled_classes,\n \"labelled_indices\": dm.unlabelled.labelled_indices,\n \"bald_scores\": bald_scores,\n }\n pickle.dump(payload, fp)\n torch.save(model.state_dict(), saved_models / f\"rep_{r}_iter_{i}.pt\")\n # flush results frequently for the impatient\n with open(template + \"_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n # finally, acquire points\n # transform pool samples toTensor and normalise them (since we used raw above!)\n acquired_idxs, _ = dm.acquire(b=b, transform=test_ds_transform)\n # if bald, store ALL bald scores and the acquired idx so we can map the top b scores\n # to the b acquired_idxs\n # acquired_idxs has the top b scores from recent_score\n bald_scores = (acquired_idxs, acq_fn.recent_score)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n args = argparse.ArgumentParser()\n args.add_argument(\"--alpha\", type=float, default=0.4)\n args.add_argument(\"--b\", default=10, type=int, help=\"Batch acq size (default = 10)\")\n args.add_argument(\"--augment\", action=\"store_true\")\n args.add_argument(\"--iters\", default=199, type=int)\n args.add_argument(\"--reps\", default=1, type=int)\n args = args.parse_args()\n\n main(\n alpha=args.alpha,\n b=args.b,\n augment=args.augment,\n iters=args.iters,\n repeats=args.reps,\n )\n",
"from alr.data.datasets import Dataset\nfrom alr.data import DataManager, UnlabelledDataset\nfrom alr.acquisition import RandomAcquisition\nfrom alr.training import Trainer\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.training.utils import PLPredictionSaver\nfrom alr.utils import manual_seed, timeop, stratified_partition\nfrom alr import MCDropout\n\nfrom models.resnet import resnet18_v2\nfrom models.wide_resnet import WRN28_2_wn\nfrom models.vgg import vgg16_cinic10_bn\nfrom models.pre_resnet_18 import PreactResNet18_WNdrop\nfrom models.efficient import EfficientNet\n\nimport torch\nimport pickle\nimport torch.utils.data as torchdata\nfrom torch.nn import functional as F\nfrom collections import defaultdict\nfrom ignite.engine import create_supervised_evaluator\nfrom pathlib import Path\nfrom torch import nn\n\n\ndef calc_calib_metrics(loader, model: nn.Module, log_dir, device):\n evaluator = create_supervised_evaluator(model, metrics=None, device=device)\n pds = PLPredictionSaver(log_dir)\n pds.attach(evaluator)\n evaluator.run(loader)\n\n\ndef main(model_name, aug, dataset, iters, repeats):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n # ========= CONSTANTS ===========\n BATCH_SIZE = 64\n # with early stopping, this'll probably be lesser\n EPOCHS = 200\n # at least have this much points in one epoch (see RandomFixedLengthSampler)\n MIN_TRAIN_LENGTH = 20_000\n VAL_SIZE = 5_000\n\n REPEATS = repeats\n ITERS = iters\n\n # ========= SETUP ===========\n if dataset == \"cifar\":\n train, test = Dataset.CIFAR10.get(augmentation=aug)\n elif dataset == \"cinic\":\n train, test = Dataset.CINIC10.get(augmentation=aug)\n else:\n raise ValueError(\"dataset only accepts two arguments: cinic or cifar\")\n train, pool = stratified_partition(train, classes=10, size=20)\n pool_idxs = pool.indices[:]\n pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))\n pool = UnlabelledDataset(pool)\n val_loader = torchdata.DataLoader(\n val,\n batch_size=512,\n shuffle=False,\n **kwargs,\n )\n test_loader = torchdata.DataLoader(\n test,\n batch_size=512,\n shuffle=False,\n **kwargs,\n )\n accs = defaultdict(list)\n\n template = f\"{model_name}_{dataset}\" + (\"_aug\" if aug else \"\")\n metrics = Path(\"metrics\") / template\n calib_metrics = Path(\"calib_metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics.mkdir(parents=True)\n calib_metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n\n # since we need to know which points were taken for val dataset\n with open(metrics / \"pool_idxs.pkl\", \"wb\") as fp:\n pickle.dump((pool_idxs, pool._dataset.indices), fp)\n\n for r in range(1, REPEATS + 1):\n print(f\"- [{model_name}] repeat #{r} of {REPEATS}-\")\n if model_name == \"vgg\":\n # 1D (0.5 by default in FC)\n model = vgg16_cinic10_bn(num_classes=10)\n elif model_name == \"wres\":\n # 1d weights + fc\n model = WRN28_2_wn(num_classes=10, dropout=0.5)\n elif model_name == \"res\":\n # 2d\n model = resnet18_v2(num_classes=10, dropout_rate=0.3, fc_dropout_rate=0.3)\n elif model_name == \"pres\":\n # 2d\n model = PreactResNet18_WNdrop(drop_val=0.3, num_classes=10)\n elif model_name == \"13cnn\":\n model = Dataset.CIFAR10.model\n elif model_name == \"eff\":\n model = EfficientNet(version=3, dropout_rate=0.5, num_classes=10)\n else:\n raise ValueError(f\"Unknown model architecture {model_name}.\")\n\n model = MCDropout(model, forward=20, fast=False).to(device)\n dm = DataManager(train, pool, RandomAcquisition())\n dm.reset() # this resets pool\n\n for i in range(1, ITERS + 1):\n model.reset_weights()\n trainer = Trainer(\n model,\n F.nll_loss,\n optimiser=\"Adam\",\n patience=10,\n reload_best=True,\n device=device,\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LENGTH, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(train_loader, val_loader, epochs=EPOCHS)\n\n # eval\n test_metrics = trainer.evaluate(test_loader)\n print(f\"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===\")\n print(\n f\"\\ttrain: {dm.n_labelled}; val: {len(val)}; \"\n f\"pool: {dm.n_unlabelled}; test: {len(test)}\"\n )\n print(f\"\\t[test] acc: {test_metrics['acc']:.4f}, time: {t}\")\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n\n # save stuff\n\n # pool calib\n with dm.unlabelled.tmp_debug():\n pool_loader = torchdata.DataLoader(\n dm.unlabelled,\n batch_size=512,\n shuffle=False,\n **kwargs,\n )\n calc_calib_metrics(\n pool_loader,\n model,\n calib_metrics / \"pool\" / f\"rep_{r}\" / f\"iter_{i}\",\n device=device,\n )\n calc_calib_metrics(\n test_loader,\n model,\n calib_metrics / \"test\" / f\"rep_{r}\" / f\"iter_{i}\",\n device=device,\n )\n\n with open(metrics / f\"rep_{r}_iter_{i}.pkl\", \"wb\") as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n \"labelled_classes\": dm.unlabelled.labelled_classes,\n \"labelled_indices\": dm.unlabelled.labelled_indices,\n }\n pickle.dump(payload, fp)\n torch.save(model.state_dict(), saved_models / f\"rep_{r}_iter_{i}.pt\")\n # flush results frequently for the impatient\n with open(template + \"_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n dm.acquire(b=200)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n args = argparse.ArgumentParser()\n args.add_argument(\"--model\", choices=[\"vgg\", \"wres\", \"res\", \"pres\", \"13cnn\", \"eff\"])\n args.add_argument(\"--data\", choices=[\"cinic\", \"cifar\"])\n args.add_argument(\"--iters\", default=11, type=int)\n args.add_argument(\"--reps\", default=1, type=int)\n args.add_argument(\"--aug\", action=\"store_true\")\n args = args.parse_args()\n main(args.model, args.aug, args.data, args.iters, args.reps)\n",
"import pickle\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Optional, Callable\n\nimport numpy as np\nimport torch\nimport torch.utils.data as torchdata\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.engine import create_supervised_evaluator, Events, Engine\nfrom ignite.metrics import Accuracy, Loss\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom alr import ALRModel\nfrom alr import MCDropout\nfrom alr.acquisition import BALD\nfrom alr.data import DataManager\nfrom alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset\nfrom alr.data.datasets import Dataset\nfrom alr.training import Trainer\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.training.utils import EarlyStopper, PLPredictionSaver\nfrom alr.utils import eval_fwd_exp, timeop, manual_seed\nfrom alr.utils._type_aliases import _DeviceType, _Loss_fn\n\n\nclass PseudoLabelManager:\n def __init__(\n self,\n pool: UnlabelledDataset,\n model: nn.Module,\n threshold: float,\n log_dir: Optional[str] = None,\n device: _DeviceType = None,\n **kwargs,\n ):\n bs = kwargs.pop(\"batch_size\", 1024)\n shuffle = kwargs.pop(\"shuffle\", False)\n assert not shuffle\n self._pool = pool\n self._loader = torchdata.DataLoader(\n pool, batch_size=bs, shuffle=shuffle, **kwargs\n )\n self._model = model\n self._log_dir = log_dir\n self._device = device\n self._threshold = threshold\n self.acquired_sizes = []\n\n def attach(self, engine: Engine):\n engine.add_event_handler(Events.STARTED, self._initialise)\n # could also be EPOCH_COMPLETED since there's only one iteration in each epoch\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)\n\n def _load_labels(self, engine: Engine):\n evaluator = create_supervised_evaluator(\n self._model, metrics=None, device=self._device\n )\n plc = PseudoLabelCollector(\n self._threshold,\n log_dir=self._log_dir,\n )\n plc.attach(evaluator, batch_size=self._loader.batch_size)\n plc.global_step_from_engine(engine)\n evaluator.run(self._loader)\n indices, pseudo_labels = (\n evaluator.state.pl_indices.cpu().numpy(),\n evaluator.state.pl_plabs.cpu().numpy(),\n )\n self.acquired_sizes.append(indices.shape[0])\n if indices.shape[0]:\n confident_points = torchdata.Subset(self._pool, indices)\n if self._pool.debug:\n # pool returns target labels too\n engine.state.pseudo_labelled_dataset = RelabelDataset(\n confident_points, pseudo_labels\n )\n else:\n engine.state.pseudo_labelled_dataset = PseudoLabelDataset(\n confident_points, pseudo_labels\n )\n else:\n engine.state.pseudo_labelled_dataset = None\n\n @staticmethod\n def _initialise(engine: Engine):\n engine.state.pseudo_labelled_dataset = None\n\n\nclass PseudoLabelCollector:\n def __init__(\n self,\n threshold: float,\n log_dir: Optional[str] = None,\n pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),\n ):\n self._indices = []\n self._plabs = []\n self._pred_transform = pred_transform\n self._output_transform = lambda x: x\n self._thresh = threshold\n self._targets = []\n self._preds = []\n if log_dir:\n self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)\n else:\n self._saver = None\n self._batch_size = None\n\n def _parse(self, engine: Engine):\n preds, targets = self._output_transform(engine.state.output)\n # state.iteration starts with 1\n iteration = engine.state.iteration - 1\n offset = iteration * self._batch_size\n with torch.no_grad():\n preds = self._pred_transform(preds)\n preds_max, plabs = torch.max(preds, dim=-1)\n mask = torch.nonzero(preds_max >= self._thresh).flatten()\n if mask.shape[0]:\n # plabs = [N,]\n self._plabs.append(plabs[mask])\n self._indices.append(mask + offset)\n\n def _flush(self, engine: Engine):\n if self._indices and self._plabs:\n engine.state.pl_indices = torch.cat(self._indices)\n engine.state.pl_plabs = torch.cat(self._plabs)\n else:\n engine.state.pl_indices = torch.Tensor([])\n engine.state.pl_plabs = torch.Tensor([])\n self._indices = []\n self._plabs = []\n\n def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):\n r\"\"\"\n\n Args:\n engine (Engine): ignite engine object\n batch_size (int): engine's batch size\n output_transform (Callable): if engine.state.output is not (preds, target),\n then output_transform should return aforementioned tuple.\n\n Returns:\n NoneType: None\n \"\"\"\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)\n engine.add_event_handler(Events.COMPLETED, self._flush)\n self._output_transform = output_transform\n self._batch_size = batch_size\n if self._saver:\n self._saver.attach(engine, output_transform=output_transform)\n\n def global_step_from_engine(self, engine: Engine):\n if self._saver:\n self._saver.global_step_from_engine(engine)\n\n\ndef _update_dataloader(\n loader: torchdata.DataLoader,\n dataset: torchdata.Dataset,\n sampler: Optional[torchdata.Sampler] = None,\n):\n # attributes that usually go in dataloader's constructor\n attrs = [k for k in loader.__dict__.keys() if not k.startswith(\"_\")]\n drop = [\"dataset\", \"sampler\", \"batch_sampler\", \"dataset_kind\"]\n kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}\n if not isinstance(\n loader.sampler,\n (\n torchdata.SequentialSampler,\n torchdata.RandomSampler,\n RandomFixedLengthSampler,\n ),\n ):\n raise ValueError(\n f\"Only sequential, random, and random fixed length samplers \"\n f\"are supported in _update_dataloader\"\n )\n kwargs[\"dataset\"] = dataset\n # Sequential and Random will be automatically determined if sampler is None (depending on shuffle)\n kwargs[\"sampler\"] = sampler\n return torchdata.DataLoader(**kwargs)\n\n\ndef create_pseudo_label_trainer(\n model: ALRModel,\n loss: _Loss_fn,\n optimiser: str,\n train_loader: torchdata.DataLoader,\n val_loader: torchdata.DataLoader,\n pseudo_label_manager: PseudoLabelManager,\n rfls_len: Optional[int] = None,\n patience: Optional[int] = None,\n reload_best: Optional[bool] = None,\n epochs: Optional[int] = 1,\n device: _DeviceType = None,\n *args,\n **kwargs,\n):\n def _step(engine: Engine, _):\n # update loader accordingly: if pld is not none, concatenate them\n new_loader = train_loader\n pld = engine.state.pseudo_labelled_dataset\n if pld is not None:\n # only reset weights if engine.state.epoch != 1\n model.reset_weights()\n train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))\n # update dataloader's dataset attribute\n if rfls_len:\n new_loader = _update_dataloader(\n train_loader,\n train_ds,\n RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),\n )\n else:\n new_loader = _update_dataloader(train_loader, train_ds)\n else:\n assert engine.state.epoch == 1\n\n # begin supervised training\n trainer = Trainer(\n model,\n loss,\n optimiser,\n patience,\n reload_best,\n device=device,\n *args,\n **kwargs,\n )\n history = trainer.fit(\n new_loader,\n val_loader=val_loader,\n epochs=epochs,\n )\n\n # if early stopping was applied w/ patience, then the actual train acc and loss should be\n # -patience from the final loss/acc UNLESS we reached the maximum number of epochs.\n if patience and len(history[\"train_loss\"]) != epochs:\n return history[\"train_loss\"][-patience], history[\"train_acc\"][-patience]\n return history[\"train_loss\"][-1], history[\"train_acc\"][-1]\n\n e = Engine(_step)\n pseudo_label_manager.attach(e)\n return e\n\n\nclass EphemeralTrainer:\n def __init__(\n self,\n model: ALRModel,\n pool: UnlabelledDataset,\n loss: _Loss_fn,\n optimiser: str,\n threshold: float,\n random_fixed_length_sampler_length: Optional[int] = None,\n log_dir: Optional[str] = None,\n patience: Optional[int] = None,\n reload_best: Optional[bool] = False,\n device: _DeviceType = None,\n pool_loader_kwargs: Optional[dict] = {},\n *args,\n **kwargs,\n ):\n self._pool = pool\n self._model = model\n self._loss = loss\n self._optimiser = optimiser\n self._patience = patience\n self._reload_best = reload_best\n self._device = device\n self._args = args\n self._kwargs = kwargs\n self._threshold = threshold\n self._log_dir = log_dir\n self._pool_loader_kwargs = pool_loader_kwargs\n self._rfls_len = random_fixed_length_sampler_length\n\n def fit(\n self,\n train_loader: torchdata.DataLoader,\n val_loader: Optional[torchdata.DataLoader] = None,\n iterations: Optional[int] = 1,\n epochs: Optional[int] = 1,\n ):\n if self._patience and val_loader is None:\n raise ValueError(\n \"If patience is specified, then val_loader must be provided in .fit().\"\n )\n\n val_evaluator = create_supervised_evaluator(\n self._model,\n metrics={\"acc\": Accuracy(), \"loss\": Loss(self._loss)},\n device=self._device,\n )\n\n history = defaultdict(list)\n pbar = ProgressBar()\n\n def _log_metrics(engine: Engine):\n # train_loss and train_acc are moving averages of the last epoch\n # in the supervised training loop\n train_loss, train_acc = engine.state.output\n history[f\"train_loss\"].append(train_loss)\n history[f\"train_acc\"].append(train_acc)\n pbar.log_message(\n f\"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\\n\"\n f\"\\ttrain acc = {train_acc}, train loss = {train_loss}\"\n )\n if val_loader is None:\n return # job done\n # val loader - save to history and print metrics. Also, add handlers to\n # evaluator (e.g. early stopping, model checkpointing that depend on val_acc)\n metrics = val_evaluator.run(val_loader).metrics\n\n history[f\"val_acc\"].append(metrics[\"acc\"])\n history[f\"val_loss\"].append(metrics[\"loss\"])\n pbar.log_message(\n f\"\\tval acc = {metrics['acc']}, val loss = {metrics['loss']}\"\n )\n\n pseudo_label_manager = PseudoLabelManager(\n pool=self._pool,\n model=self._model,\n threshold=self._threshold,\n log_dir=self._log_dir,\n device=self._device,\n **self._pool_loader_kwargs,\n )\n trainer = create_pseudo_label_trainer(\n model=self._model,\n loss=self._loss,\n optimiser=self._optimiser,\n train_loader=train_loader,\n val_loader=val_loader,\n pseudo_label_manager=pseudo_label_manager,\n rfls_len=self._rfls_len,\n patience=self._patience,\n reload_best=self._reload_best,\n epochs=epochs,\n device=self._device,\n *self._args,\n **self._kwargs,\n )\n # output of trainer are running averages of train_loss and train_acc (from the\n # last epoch of the supervised trainer)\n pbar.attach(trainer, output_transform=lambda x: {\"loss\": x[0], \"acc\": x[1]})\n if val_loader is not None and self._patience:\n es = EarlyStopper(\n self._model, self._patience, trainer, key=\"acc\", mode=\"max\"\n )\n es.attach(val_evaluator)\n trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)\n trainer.run(\n range(iterations),\n max_epochs=iterations,\n epoch_length=1,\n )\n if val_loader is not None and self._patience and self._reload_best:\n es.reload_best()\n\n history[\"train_size\"] = np.array(pseudo_label_manager.acquired_sizes) + len(\n train_loader.dataset\n )\n return history\n\n def evaluate(self, data_loader: torchdata.DataLoader) -> dict:\n evaluator = create_supervised_evaluator(\n self._model,\n metrics={\"acc\": Accuracy(), \"loss\": Loss(self._loss)},\n device=self._device,\n )\n return evaluator.run(data_loader).metrics\n\n\ndef main(threshold: float, b: int):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n BATCH_SIZE = 64\n REPS = 6\n ITERS = 24\n VAL_SIZE = 5_000\n MIN_TRAIN_LEN = 12_500\n SSL_ITERATIONS = 200\n EPOCHS = 200\n\n accs = defaultdict(list)\n\n template = f\"thresh_{threshold}_b_{b}\"\n calib_metrics = Path(\"calib_metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics = Path(\"metrics\") / template\n calib_metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n metrics.mkdir(parents=True)\n\n train, pool, test = Dataset.MNIST.get_fixed()\n val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))\n pool = UnlabelledDataset(pool)\n test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)\n val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)\n\n for r in range(1, REPS + 1):\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n bald = BALD(eval_fwd_exp(model), device=device, batch_size=512, **kwargs)\n dm = DataManager(train, pool, bald)\n dm.reset() # to reset pool\n print(f\"=== repeat #{r} of {REPS} ===\")\n for i in range(1, ITERS + 1):\n # don't reset weights: let ephemeral trainer take care of it\n # since we're collecting calibration metrics,\n # make pool return targets too. (i.e. debug mode)\n with dm.unlabelled.tmp_debug():\n trainer = EphemeralTrainer(\n model,\n dm.unlabelled,\n F.nll_loss,\n \"Adam\",\n threshold=threshold,\n random_fixed_length_sampler_length=MIN_TRAIN_LEN,\n log_dir=(calib_metrics / f\"rep_{r}\" / f\"iter_{i}\"),\n patience=3,\n reload_best=True,\n device=device,\n pool_loader_kwargs=kwargs,\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(\n train_loader,\n val_loader,\n iterations=SSL_ITERATIONS,\n epochs=EPOCHS,\n )\n # eval on test set\n test_metrics = trainer.evaluate(test_loader)\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n print(f\"-- Iteration {i} of {ITERS} --\")\n print(\n f\"\\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\\n\"\n f\"\\t[test] acc: {test_metrics['acc']}; time: {t}\"\n )\n\n # save stuff\n with open(metrics / f\"rep_{r}_iter_{i}.pkl\", \"wb\") as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n \"labelled_classes\": dm.unlabelled.labelled_classes,\n \"labelled_indices\": dm.unlabelled.labelled_indices,\n }\n pickle.dump(payload, fp)\n torch.save(model.state_dict(), saved_models / f\"rep_{r}_iter_{i}.pth\")\n\n # finally, acquire points\n dm.acquire(b)\n\n with open(f\"{template}_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n\nif __name__ == \"__main__\":\n main(threshold=0.95, b=10)\n",
"import torch\nimport numpy as np\nimport copy\n\nfrom alr import MCDropout, ALRModel\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Net1(nn.Module):\n def __init__(self):\n super(Net1, self).__init__()\n self.fc = nn.Linear(10, 10)\n self.drop = nn.Dropout()\n\n def forward(self, x):\n return self.drop(self.fc(x))\n\n\nclass Net2(nn.Module):\n def __init__(self):\n super(Net2, self).__init__()\n self.fc = nn.Linear(10, 10)\n self.drop = nn.Dropout()\n\n def forward(self, x):\n return F.log_softmax(self.drop(self.fc(x)), dim=-1)\n\n\ndef test_mcd_logsoft_consistency():\n # apply_softmax should be consistent with actually using F.log_softmax\n # in the model definition itself.\n model1 = MCDropout(\n Net1(), output_transform=lambda x: F.log_softmax(x, dim=-1), forward=10\n )\n model2 = MCDropout(Net2(), forward=10)\n model2.load_state_dict(model1.state_dict(), strict=True)\n model1.train()\n model2.train()\n tensor = torch.randn(size=(5, 10))\n torch.manual_seed(42)\n logsoft1 = model1(tensor)\n torch.manual_seed(42)\n logsoft2 = model2(tensor)\n assert torch.allclose(logsoft1, logsoft2)\n\n\ndef test_mcd_with_logsoft():\n # model's forward pass should sum to one\n model = MCDropout(\n Net1(), output_transform=lambda x: F.log_softmax(x, dim=-1), forward=10\n )\n model.train()\n output = model(torch.randn(size=(5, 10))).exp_().sum(dim=-1)\n assert torch.allclose(output, torch.ones_like(output))\n\n\ndef test_mcd_stochastic_fwd():\n # stochastic_forward's individual forward passes should sum to one\n model = MCDropout(\n Net1(), output_transform=lambda x: F.log_softmax(x, dim=-1), forward=10\n )\n model.eval()\n size = (12301, 10)\n output = model.stochastic_forward(torch.randn(size=size)).exp_()\n assert output.shape == (10, size[0], 10)\n output = output.sum(dim=-1)\n assert torch.allclose(output, torch.ones_like(output))\n\n\ndef test_mcd_stochastic_fwd_wo_logsoft():\n # stochastic_forward's individual forward passes should sum to one\n model = MCDropout(Net2(), forward=10)\n model.eval()\n size = (12301, 10)\n output = model.stochastic_forward(torch.randn(size=size)).exp_()\n assert output.shape == (10, size[0], 10)\n output = output.sum(dim=-1)\n assert torch.allclose(output, torch.ones_like(output))\n\n\ndef test_mcd_eval_forward_logsumexp():\n # using log_softmax\n model = MCDropout(\n Net1(),\n reduce=\"logsumexp\",\n output_transform=lambda x: F.log_softmax(x, dim=-1),\n forward=10,\n )\n model.eval()\n output = model(torch.randn(size=(12309, 10))).exp_().sum(dim=-1)\n assert torch.allclose(output, torch.ones_like(output))\n\n\ndef test_mcd_eval_forward_mean():\n # using softmax\n model = MCDropout(\n Net1(),\n reduce=\"mean\",\n output_transform=lambda x: F.softmax(x, dim=-1),\n forward=10,\n )\n model.eval()\n output = model(torch.randn(size=(12309, 10))).sum(dim=-1)\n assert torch.allclose(output, torch.ones_like(output))\n\n\ndef test_mcd_eval_forward_consistent_with_predict():\n # when model's in eval, predict should have the same behaviour as forward\n model = MCDropout(\n Net1(), output_transform=lambda x: F.log_softmax(x, dim=-1), forward=10\n )\n model.eval()\n input = torch.randn(size=(12309, 10))\n torch.manual_seed(42)\n output = model(input).exp_()\n torch.manual_seed(42)\n # model.predict overrides model.train() with .eval()\n model.train()\n output2 = model.predict(input).exp_()\n assert torch.allclose(output, output2)\n\n\ndef test_mcd_fast_stochastic_fwd_flat_data():\n data = torch.from_numpy(np.random.normal(size=(1, 10))).float()\n net = MCDropout(Net2(), forward=50, fast=True)\n with torch.no_grad():\n preds = net.stochastic_forward(data)\n assert preds.size() == (50, 1, 10)\n # all n_forward instances of the data are identical,\n # but we assert that the output is stochastic, as required.\n # if the same dropout2d mask was used for each item in the batch, then\n # the variance wouldn't be 0\n assert (preds.var(dim=0) > 1e-3).all()\n\n\ndef test_mcd_fast_stochastic_fwd_img_data():\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, 5)\n # 32 24 24\n self.dropout1 = nn.Dropout2d()\n # maxpool --\n # 32 12 12\n self.conv2 = nn.Conv2d(32, 64, 5)\n # 64 8 8\n self.dropout2 = nn.Dropout2d()\n # maxpool --\n # 64 4 4\n self.fc1 = nn.Linear(64 * 4 * 4, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = F.max_pool2d(self.dropout1(F.relu(self.conv1(x))), 2)\n x = F.max_pool2d(self.dropout2(F.relu(self.conv2(x))), 2)\n x = x.view(-1, 64 * 4 * 4)\n x = self.fc2(F.relu(self.fc1(x)))\n return F.log_softmax(x, dim=1)\n\n img = torch.from_numpy(np.random.normal(size=(1, 3, 28, 28))).float()\n net = MCDropout(Net(), forward=20, fast=True)\n with torch.no_grad():\n preds = net.stochastic_forward(img)\n assert preds.size() == (20, 1, 10)\n # all n_forward instances of the img are identical,\n # but we assert that the output is stochastic, as required.\n # if the same dropout2d mask was used for each item in the batch, then\n # the variance wouldn't be 0\n assert (preds.var(dim=0) > 1e-3).all()\n\n\ndef test_ALRModel_reset_weights_param():\n class A(ALRModel):\n def __init__(self):\n super().__init__()\n self.w = nn.Linear(10, 2)\n b = torch.ones(size=(2,))\n # this should be tracked and reset properly too!\n self.b = nn.Parameter(b)\n self.snap()\n\n def forward(self, x):\n return F.log_softmax(self.w(x) + self.b, dim=-1)\n\n data = torch.from_numpy(np.random.normal(size=(32, 10))).float()\n targets = torch.from_numpy(np.random.randint(0, 2, size=(32,)))\n net = A()\n\n optim = torch.optim.Adam(net.parameters())\n store = copy.deepcopy(net.state_dict())\n\n # train model to change weights\n for _ in range(50):\n preds = net(data)\n loss = F.nll_loss(preds, targets)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # weights shouldn't be the same after they're trained\n with torch.no_grad():\n for k, v in net.state_dict().items():\n assert isinstance(v, torch.Tensor)\n assert (torch.abs(v - store[k]) > 1e-4).all()\n\n # reset weights\n net.reset_weights()\n\n # make sure weights are reset\n for k, v in net.state_dict().items():\n assert isinstance(v, torch.Tensor)\n assert torch.allclose(v, store[k])\n\n\ndef test_ALRModel_reset_weights():\n class A(nn.Module):\n def __init__(self):\n super().__init__()\n self.w = nn.Linear(10, 2)\n\n def forward(self, x):\n return F.log_softmax(self.w(x), dim=-1)\n\n data = torch.from_numpy(np.random.normal(size=(32, 10))).float()\n targets = torch.from_numpy(np.random.randint(0, 2, size=(32,)))\n net = MCDropout(A())\n\n optim = torch.optim.Adam(net.parameters())\n store = copy.deepcopy(net.state_dict())\n\n # train model to change weights\n for _ in range(50):\n preds = net(data)\n loss = F.nll_loss(preds, targets)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # weights shouldn't be the same after they're trained\n with torch.no_grad():\n for k, v in net.state_dict().items():\n assert isinstance(v, torch.Tensor)\n assert (torch.abs(v - store[k]) > 1e-4).all()\n\n # reset weights\n net.reset_weights()\n\n # make sure weights are reset\n for k, v in net.state_dict().items():\n assert isinstance(v, torch.Tensor)\n assert torch.allclose(v, store[k])\n\n\ndef test_mc_dropout_fast_img_data(benchmark):\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, 5)\n # 32 24 24\n self.dropout1 = nn.Dropout2d()\n # maxpool --\n # 32 12 12\n self.conv2 = nn.Conv2d(32, 64, 5)\n # 64 8 8\n self.dropout2 = nn.Dropout2d()\n # maxpool --\n # 64 4 4\n self.fc1 = nn.Linear(64 * 4 * 4, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = F.max_pool2d(self.dropout1(F.relu(self.conv1(x))), 2)\n x = F.max_pool2d(self.dropout2(F.relu(self.conv2(x))), 2)\n x = x.view(-1, 64 * 4 * 4)\n x = self.fc2(F.relu(self.fc1(x)))\n return F.log_softmax(x, dim=1)\n\n img = torch.from_numpy(np.random.normal(size=(32, 3, 28, 28))).float()\n\n def fast():\n net = MCDropout(Net(), forward=20, fast=True)\n with torch.no_grad():\n net.stochastic_forward(img)\n\n benchmark(fast)\n\n\ndef test_mc_dropout_regular_img_data(benchmark):\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, 5)\n # 32 24 24\n self.dropout1 = nn.Dropout2d()\n # maxpool --\n # 32 12 12\n self.conv2 = nn.Conv2d(32, 64, 5)\n # 64 8 8\n self.dropout2 = nn.Dropout2d()\n # maxpool --\n # 64 4 4\n self.fc1 = nn.Linear(64 * 4 * 4, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = F.max_pool2d(self.dropout1(F.relu(self.conv1(x))), 2)\n x = F.max_pool2d(self.dropout2(F.relu(self.conv2(x))), 2)\n x = x.view(-1, 64 * 4 * 4)\n x = self.fc2(F.relu(self.fc1(x)))\n return F.log_softmax(x, dim=1)\n\n img = torch.from_numpy(np.random.normal(size=(32, 3, 28, 28))).float()\n\n def regular():\n net = MCDropout(Net(), forward=20, fast=False)\n with torch.no_grad():\n net.stochastic_forward(img)\n\n benchmark(regular)\n\n\ndef test_mc_dropout_fast_flat_data(benchmark):\n data = torch.from_numpy(np.random.normal(size=(32, 10))).float()\n\n def fast():\n net = MCDropout(Net2(), forward=50, fast=True)\n with torch.no_grad():\n net.stochastic_forward(data)\n\n benchmark(fast)\n\n\ndef test_mc_dropout_regular_flat_data(benchmark):\n data = torch.from_numpy(np.random.normal(size=(32, 10))).float()\n\n def regular():\n net = MCDropout(Net2(), forward=50, fast=False)\n with torch.no_grad():\n net.stochastic_forward(data)\n\n benchmark(regular)\n",
"import pickle\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Optional, Callable\n\nimport numpy as np\nimport torch\nimport torch.utils.data as torchdata\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.engine import create_supervised_evaluator, Events, Engine\nfrom ignite.metrics import Accuracy, Loss\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom alr import ALRModel\nfrom alr import MCDropout\nfrom alr.acquisition import BALD\nfrom alr.data import DataManager\nfrom alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset\nfrom alr.data.datasets import Dataset\nfrom alr.training import Trainer\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.training.utils import EarlyStopper, PLPredictionSaver\nfrom alr.utils import eval_fwd_exp, timeop, manual_seed\nfrom alr.utils._type_aliases import _DeviceType, _Loss_fn\n\n\nclass PseudoLabelManager:\n def __init__(\n self,\n pool: UnlabelledDataset,\n model: nn.Module,\n threshold: float,\n log_dir: Optional[str] = None,\n device: _DeviceType = None,\n **kwargs,\n ):\n bs = kwargs.pop(\"batch_size\", 1024)\n shuffle = kwargs.pop(\"shuffle\", False)\n assert not shuffle\n self._pool = pool\n self._loader = torchdata.DataLoader(\n pool, batch_size=bs, shuffle=shuffle, **kwargs\n )\n self._model = model\n self._log_dir = log_dir\n self._device = device\n self._threshold = threshold\n self.acquired_sizes = []\n\n def attach(self, engine: Engine):\n engine.add_event_handler(Events.STARTED, self._initialise)\n # could also be EPOCH_COMPLETED since there's only one iteration in each epoch\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)\n\n def _load_labels(self, engine: Engine):\n evaluator = create_supervised_evaluator(\n self._model, metrics=None, device=self._device\n )\n plc = PseudoLabelCollector(\n self._threshold,\n log_dir=self._log_dir,\n )\n plc.attach(evaluator, batch_size=self._loader.batch_size)\n plc.global_step_from_engine(engine)\n evaluator.run(self._loader)\n indices, pseudo_labels = (\n evaluator.state.pl_indices.cpu().numpy(),\n evaluator.state.pl_plabs.cpu().numpy(),\n )\n self.acquired_sizes.append(indices.shape[0])\n if indices.shape[0]:\n confident_points = torchdata.Subset(self._pool, indices)\n if self._pool.debug:\n # pool returns target labels too\n engine.state.pseudo_labelled_dataset = RelabelDataset(\n confident_points, pseudo_labels\n )\n else:\n engine.state.pseudo_labelled_dataset = PseudoLabelDataset(\n confident_points, pseudo_labels\n )\n else:\n engine.state.pseudo_labelled_dataset = None\n\n @staticmethod\n def _initialise(engine: Engine):\n engine.state.pseudo_labelled_dataset = None\n\n\nclass PseudoLabelCollector:\n def __init__(\n self,\n threshold: float,\n log_dir: Optional[str] = None,\n pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),\n ):\n self._indices = []\n self._plabs = []\n self._pred_transform = pred_transform\n self._output_transform = lambda x: x\n self._thresh = threshold\n self._targets = []\n self._preds = []\n if log_dir:\n self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)\n else:\n self._saver = None\n self._batch_size = None\n\n def _parse(self, engine: Engine):\n preds, targets = self._output_transform(engine.state.output)\n # state.iteration starts with 1\n iteration = engine.state.iteration - 1\n offset = iteration * self._batch_size\n with torch.no_grad():\n preds = self._pred_transform(preds)\n preds_max, plabs = torch.max(preds, dim=-1)\n mask = torch.nonzero(preds_max >= self._thresh).flatten()\n if mask.shape[0]:\n # plabs = [N,]\n self._plabs.append(plabs[mask])\n self._indices.append(mask + offset)\n\n def _flush(self, engine: Engine):\n if self._indices and self._plabs:\n engine.state.pl_indices = torch.cat(self._indices)\n engine.state.pl_plabs = torch.cat(self._plabs)\n else:\n engine.state.pl_indices = torch.Tensor([])\n engine.state.pl_plabs = torch.Tensor([])\n self._indices = []\n self._plabs = []\n\n def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):\n r\"\"\"\n\n Args:\n engine (Engine): ignite engine object\n batch_size (int): engine's batch size\n output_transform (Callable): if engine.state.output is not (preds, target),\n then output_transform should return aforementioned tuple.\n\n Returns:\n NoneType: None\n \"\"\"\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)\n engine.add_event_handler(Events.COMPLETED, self._flush)\n self._output_transform = output_transform\n self._batch_size = batch_size\n if self._saver:\n self._saver.attach(engine, output_transform=output_transform)\n\n def global_step_from_engine(self, engine: Engine):\n if self._saver:\n self._saver.global_step_from_engine(engine)\n\n\ndef _update_dataloader(\n loader: torchdata.DataLoader,\n dataset: torchdata.Dataset,\n sampler: Optional[torchdata.Sampler] = None,\n):\n # attributes that usually go in dataloader's constructor\n attrs = [k for k in loader.__dict__.keys() if not k.startswith(\"_\")]\n drop = [\"dataset\", \"sampler\", \"batch_sampler\", \"dataset_kind\"]\n kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}\n if not isinstance(\n loader.sampler,\n (\n torchdata.SequentialSampler,\n torchdata.RandomSampler,\n RandomFixedLengthSampler,\n ),\n ):\n raise ValueError(\n f\"Only sequential, random, and random fixed length samplers \"\n f\"are supported in _update_dataloader\"\n )\n kwargs[\"dataset\"] = dataset\n # Sequential and Random will be automatically determined if sampler is None (depending on shuffle)\n kwargs[\"sampler\"] = sampler\n return torchdata.DataLoader(**kwargs)\n\n\ndef create_pseudo_label_trainer(\n model: ALRModel,\n loss: _Loss_fn,\n optimiser: str,\n train_loader: torchdata.DataLoader,\n val_loader: torchdata.DataLoader,\n pseudo_label_manager: PseudoLabelManager,\n rfls_len: Optional[int] = None,\n patience: Optional[int] = None,\n reload_best: Optional[bool] = None,\n epochs: Optional[int] = 1,\n device: _DeviceType = None,\n *args,\n **kwargs,\n):\n def _step(engine: Engine, _):\n # update loader accordingly: if pld is not none, concatenate them\n new_loader = train_loader\n pld = engine.state.pseudo_labelled_dataset\n if pld is not None:\n # only reset weights if engine.state.epoch != 1\n model.reset_weights()\n train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))\n # update dataloader's dataset attribute\n if rfls_len:\n new_loader = _update_dataloader(\n train_loader,\n train_ds,\n RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),\n )\n else:\n new_loader = _update_dataloader(train_loader, train_ds)\n else:\n assert engine.state.epoch == 1\n\n # begin supervised training\n trainer = Trainer(\n model,\n loss,\n optimiser,\n patience,\n reload_best,\n device=device,\n *args,\n **kwargs,\n )\n history = trainer.fit(\n new_loader,\n val_loader=val_loader,\n epochs=epochs,\n )\n\n # if early stopping was applied w/ patience, then the actual train acc and loss should be\n # -patience from the final loss/acc UNLESS we reached the maximum number of epochs.\n if patience and len(history[\"train_loss\"]) != epochs:\n return history[\"train_loss\"][-patience], history[\"train_acc\"][-patience]\n return history[\"train_loss\"][-1], history[\"train_acc\"][-1]\n\n e = Engine(_step)\n pseudo_label_manager.attach(e)\n return e\n\n\nclass EphemeralTrainer:\n def __init__(\n self,\n model: ALRModel,\n pool: UnlabelledDataset,\n loss: _Loss_fn,\n optimiser: str,\n threshold: float,\n random_fixed_length_sampler_length: Optional[int] = None,\n log_dir: Optional[str] = None,\n patience: Optional[int] = None,\n reload_best: Optional[bool] = False,\n device: _DeviceType = None,\n pool_loader_kwargs: Optional[dict] = {},\n *args,\n **kwargs,\n ):\n self._pool = pool\n self._model = model\n self._loss = loss\n self._optimiser = optimiser\n self._patience = patience\n self._reload_best = reload_best\n self._device = device\n self._args = args\n self._kwargs = kwargs\n self._threshold = threshold\n self._log_dir = log_dir\n self._pool_loader_kwargs = pool_loader_kwargs\n self._rfls_len = random_fixed_length_sampler_length\n\n def fit(\n self,\n train_loader: torchdata.DataLoader,\n val_loader: Optional[torchdata.DataLoader] = None,\n iterations: Optional[int] = 1,\n epochs: Optional[int] = 1,\n ):\n if self._patience and val_loader is None:\n raise ValueError(\n \"If patience is specified, then val_loader must be provided in .fit().\"\n )\n\n val_evaluator = create_supervised_evaluator(\n self._model,\n metrics={\"acc\": Accuracy(), \"loss\": Loss(self._loss)},\n device=self._device,\n )\n\n history = defaultdict(list)\n pbar = ProgressBar()\n\n def _log_metrics(engine: Engine):\n # train_loss and train_acc are moving averages of the last epoch\n # in the supervised training loop\n train_loss, train_acc = engine.state.output\n history[f\"train_loss\"].append(train_loss)\n history[f\"train_acc\"].append(train_acc)\n pbar.log_message(\n f\"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\\n\"\n f\"\\ttrain acc = {train_acc}, train loss = {train_loss}\"\n )\n if val_loader is None:\n return # job done\n # val loader - save to history and print metrics. Also, add handlers to\n # evaluator (e.g. early stopping, model checkpointing that depend on val_acc)\n metrics = val_evaluator.run(val_loader).metrics\n\n history[f\"val_acc\"].append(metrics[\"acc\"])\n history[f\"val_loss\"].append(metrics[\"loss\"])\n pbar.log_message(\n f\"\\tval acc = {metrics['acc']}, val loss = {metrics['loss']}\"\n )\n\n pseudo_label_manager = PseudoLabelManager(\n pool=self._pool,\n model=self._model,\n threshold=self._threshold,\n log_dir=self._log_dir,\n device=self._device,\n **self._pool_loader_kwargs,\n )\n trainer = create_pseudo_label_trainer(\n model=self._model,\n loss=self._loss,\n optimiser=self._optimiser,\n train_loader=train_loader,\n val_loader=val_loader,\n pseudo_label_manager=pseudo_label_manager,\n rfls_len=self._rfls_len,\n patience=self._patience,\n reload_best=self._reload_best,\n epochs=epochs,\n device=self._device,\n *self._args,\n **self._kwargs,\n )\n # output of trainer are running averages of train_loss and train_acc (from the\n # last epoch of the supervised trainer)\n pbar.attach(trainer, output_transform=lambda x: {\"loss\": x[0], \"acc\": x[1]})\n if val_loader is not None and self._patience:\n es = EarlyStopper(\n self._model, self._patience, trainer, key=\"acc\", mode=\"max\"\n )\n es.attach(val_evaluator)\n trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)\n trainer.run(\n range(iterations),\n max_epochs=iterations,\n epoch_length=1,\n )\n if val_loader is not None and self._patience and self._reload_best:\n es.reload_best()\n\n history[\"train_size\"] = np.array(pseudo_label_manager.acquired_sizes) + len(\n train_loader.dataset\n )\n return history\n\n def evaluate(self, data_loader: torchdata.DataLoader) -> dict:\n evaluator = create_supervised_evaluator(\n self._model,\n metrics={\"acc\": Accuracy(), \"loss\": Loss(self._loss)},\n device=self._device,\n )\n return evaluator.run(data_loader).metrics\n\n\ndef main(threshold: float, b: int):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n BATCH_SIZE = 64\n REPS = 3\n ITERS = 14\n VAL_SIZE = 5_000\n MIN_TRAIN_LEN = 12_500\n SSL_ITERATIONS = 200\n EPOCHS = 200\n\n accs = defaultdict(list)\n\n template = f\"thresh_{threshold}_b_{b}\"\n calib_metrics = Path(\"calib_metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics = Path(\"metrics\") / template\n calib_metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n metrics.mkdir(parents=True)\n\n train, pool, test = Dataset.MNIST.get_fixed()\n val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))\n pool = UnlabelledDataset(pool)\n test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)\n val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)\n\n for r in range(1, REPS + 1):\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n bald = BALD(eval_fwd_exp(model), device=device, batch_size=512, **kwargs)\n dm = DataManager(train, pool, bald)\n dm.reset() # to reset pool\n print(f\"=== repeat #{r} of {REPS} ===\")\n for i in range(1, ITERS + 1):\n # don't reset weights: let ephemeral trainer take care of it\n # since we're collecting calibration metrics,\n # make pool return targets too. (i.e. debug mode)\n with dm.unlabelled.tmp_debug():\n trainer = EphemeralTrainer(\n model,\n dm.unlabelled,\n F.nll_loss,\n \"Adam\",\n threshold=threshold,\n random_fixed_length_sampler_length=MIN_TRAIN_LEN,\n log_dir=(calib_metrics / f\"rep_{r}\" / f\"iter_{i}\"),\n patience=3,\n reload_best=True,\n device=device,\n pool_loader_kwargs=kwargs,\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(\n train_loader,\n val_loader,\n iterations=SSL_ITERATIONS,\n epochs=EPOCHS,\n )\n # eval on test set\n test_metrics = trainer.evaluate(test_loader)\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n print(f\"-- Iteration {i} of {ITERS} --\")\n print(\n f\"\\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\\n\"\n f\"\\t[test] acc: {test_metrics['acc']}; time: {t}\"\n )\n\n # save stuff\n with open(metrics / f\"rep_{r}_iter_{i}.pkl\", \"wb\") as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n \"labelled_classes\": dm.unlabelled.labelled_classes,\n \"labelled_indices\": dm.unlabelled.labelled_indices,\n }\n pickle.dump(payload, fp)\n torch.save(model.state_dict(), saved_models / f\"rep_{r}_iter_{i}.pth\")\n\n # finally, acquire points\n dm.acquire(b)\n\n with open(f\"{template}_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n\nif __name__ == \"__main__\":\n main(threshold=0.95, b=10)\n"
] | [
[
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
],
[
"torch.utils.data.Subset",
"torch.utils.data.ConcatDataset",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
],
[
"numpy.isfinite",
"numpy.array",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
],
[
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
],
[
"torch.max",
"torch.Tensor",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.utils.data.ConcatDataset",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nonzero",
"torch.utils.data.Subset",
"numpy.array"
],
[
"torch.abs",
"torch.nn.Dropout",
"torch.nn.Parameter",
"torch.nn.Dropout2d",
"torch.ones",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.randn",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.Linear",
"numpy.random.normal",
"torch.no_grad",
"torch.allclose",
"torch.ones_like",
"numpy.random.randint"
],
[
"torch.max",
"torch.Tensor",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.utils.data.ConcatDataset",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nonzero",
"torch.utils.data.Subset",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
texifter/trust-defender | [
"08747df28adc3d2431a73087e06cb0647e8397d2"
] | [
"test_nnet.py"
] | [
"import argparse\nimport numpy\nimport pandas as pd\nimport os\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_json\nfrom ngram_classifier import NGramClassifier\nfrom sklearn.metrics import precision_recall_fscore_support\n\nCLASS_WEIGHTS = [\n (\"num_days\", 0.997821848), \n (\"statuses_per_day\", 1.065570851),\n (\"followers_per_day\", 1.021055002),\n (\"following_per_day\", 1.122703153),\n (\"desc_len_terms\", 1.171072307),\n (\"num_list_items\", 1.017727903),\n (\"num_hashtags\", 0.889418197),\n (\"url_count\", 1.018365516)\n]\n\ndef get_input_vector(row, classifier):\n '''\n (classifier): p_good\n (classifier): p_bot\n num_days\n statuses_per_day\n followers_per_day\n following_per_day\n desc_len_terms\n num_list_items\n num_hashtags\n url_count\n '''\n class_probs = classifier.classify_text(str(row[\"user_profile_description\"]))\n ret = [ class_probs[\"good\"], class_probs[\"bot\"]]\n for label, weight in CLASS_WEIGHTS:\n ret.append(float(row[label]) * weight)\n return ret\n\n\ndef get_training_output(row):\n class_label = str(row[\"class_value\"])\n return 0.0 if class_label == \"good\" else 1.0\n\n\ndef recall_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\ndef precision_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef f1_m(y_true, y_pred):\n precision = precision_m(y_true, y_pred)\n recall = recall_m(y_true, y_pred)\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", help=\"test input csv file\")\n parser.add_argument(\"-m\", \"--model\", help=\"ngram model file\")\n parser.add_argument(\"-n\", \"--nnetmodel\", help=\"NNet model file\")\n args = parser.parse_args()\n\n if not args.input:\n raise \"missing input file\"\n if not args.model:\n raise \"missing ngram model file\"\n if not args.nnetmodel:\n raise \"missing nnet model file\"\n\n classifier = NGramClassifier(model_path=args.model)\n\n with open(args.nnetmodel, 'r') as json_file:\n loaded_model_json = json_file.read()\n nnet = model_from_json(loaded_model_json)\n nnet.load_weights(f'{args.nnetmodel}.h5')\n nnet.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc',f1_m,precision_m, recall_m])\n \n df_test = pd.read_csv(args.input, keep_default_na=False)\n targets_x = []\n targets_y = []\n predictions = []\n for index, row in df_test.iterrows():\n input_vector = get_input_vector(row, classifier)\n targets_x.append(input_vector)\n targets_y.append(get_training_output(row))\n loss, accuracy, f1_score, precision, recall = nnet.evaluate(numpy.array(targets_x), numpy.array(targets_y), verbose=0)\n\n print(f'loss: {loss}, acc: {accuracy}, prec: {precision}, recall: {recall}, f1: {f1_score}')\n"
] | [
[
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
liuzuxin/metadrive | [
"850c207536531bc85179084acd7c30ab14a66111",
"850c207536531bc85179084acd7c30ab14a66111",
"850c207536531bc85179084acd7c30ab14a66111",
"850c207536531bc85179084acd7c30ab14a66111"
] | [
"metadrive/examples/profile_metadrive.py",
"metadrive/policy/idm_policy.py",
"metadrive/tests/test_functionality/test_bicycle_model.py",
"metadrive/component/vehicle_module/lidar.py"
] | [
"import time\n\nimport numpy as np\n\nfrom metadrive import MetaDriveEnv\nfrom metadrive.utils import setup_logger\n\nif __name__ == '__main__':\n print(\"Start to profile the efficiency of MetaDrive with 1000 maps and ~8 vehicles!\")\n setup_logger(debug=False)\n env = MetaDriveEnv(dict(\n environment_num=1000,\n start_seed=1010,\n ))\n obs = env.reset()\n start = time.time()\n action = [0.0, 1.]\n total_steps = 10000\n vehicle_num = [len(env.engine.traffic_manager.vehicles)]\n for s in range(total_steps):\n o, r, d, i = env.step(action)\n if d:\n env.reset()\n vehicle_num.append(len(env.engine.traffic_manager.vehicles))\n if (s + 1) % 100 == 0:\n print(\n \"Finish {}/10000 simulation steps. Time elapse: {:.4f}. Average FPS: {:.4f}, Average number of \"\n \"vehicles: {:.4f}\".format(\n s + 1,\n time.time() - start, (s + 1) / (time.time() - start), np.mean(vehicle_num)\n )\n )\n print(\n \"Total Time Elapse: {:.3f}, average FPS: {:.3f}, average number of vehicles: {:.3f}.\".format(\n time.time() - start, total_steps / (time.time() - start), np.mean(vehicle_num)\n )\n )\n",
"import logging\n\nimport numpy as np\nfrom metadrive.component.vehicle_module.PID_controller import PIDController\nfrom metadrive.policy.base_policy import BasePolicy\nfrom metadrive.policy.manual_control_policy import ManualControlPolicy\nfrom metadrive.utils.math_utils import not_zero, wrap_to_pi\n\n\nclass FrontBackObjects:\n def __init__(self, front_ret, back_ret, front_dist, back_dist):\n self.front_objs = front_ret\n self.back_objs = back_ret\n self.front_dist = front_dist\n self.back_dist = back_dist\n\n def left_lane_exist(self):\n return True if self.front_dist[0] is not None else False\n\n def right_lane_exist(self):\n return True if self.front_dist[-1] is not None else False\n\n def has_front_object(self):\n return True if self.front_objs[1] is not None else False\n\n def has_back_object(self):\n return True if self.back_objs[1] is not None else False\n\n def has_left_front_object(self):\n return True if self.front_objs[0] is not None else False\n\n def has_left_back_object(self):\n return True if self.back_objs[0] is not None else False\n\n def has_right_front_object(self):\n return True if self.front_objs[-1] is not None else False\n\n def has_right_back_object(self):\n return True if self.back_objs[-1] is not None else False\n\n def front_object(self):\n return self.front_objs[1]\n\n def left_front_object(self):\n return self.front_objs[0]\n\n def right_front_object(self):\n return self.front_objs[-1]\n\n def back_object(self):\n return self.back_objs[1]\n\n def left_back_object(self):\n return self.back_objs[0]\n\n def right_back_object(self):\n return self.back_objs[-1]\n\n def left_front_min_distance(self):\n assert self.left_lane_exist(), \"left lane doesn't exist\"\n return self.front_dist[0]\n\n def right_front_min_distance(self):\n assert self.right_lane_exist(), \"right lane doesn't exist\"\n return self.front_dist[-1]\n\n def front_min_distance(self):\n return self.front_dist[1]\n\n def left_back_min_distance(self):\n assert self.left_lane_exist(), \"left lane doesn't exist\"\n return self.back_dist[0]\n\n def right_back_min_distance(self):\n assert self.right_lane_exist(), \"right lane doesn't exist\"\n return self.back_dist[-1]\n\n def back_min_distance(self):\n return self.back_dist[1]\n\n @classmethod\n def get_find_front_back_objs(cls, objs, lane, position, max_distance, ref_lanes=None):\n \"\"\"\n Find objects in front of/behind the lane and its left lanes/right lanes, return objs, dist.\n If ref_lanes is None, return filter results of this lane\n \"\"\"\n if ref_lanes is not None:\n assert lane in ref_lanes\n idx = lane.index[-1] if ref_lanes is not None else None\n left_lane = ref_lanes[idx - 1] if ref_lanes is not None and idx > 0 else None\n right_lane = ref_lanes[idx + 1] if ref_lanes is not None and idx + 1 < len(ref_lanes) else None\n lanes = [left_lane, lane, right_lane]\n\n min_front_long = [max_distance if lane is not None else None for lane in lanes]\n min_back_long = [max_distance if lane is not None else None for lane in lanes]\n\n front_ret = [None, None, None]\n back_ret = [None, None, None]\n\n find_front_in_current_lane = [False, False, False]\n find_back_in_current_lane = [False, False, False]\n\n current_long = [lane.local_coordinates(position)[0] if lane is not None else None for lane in lanes]\n left_long = [lane.length - current_long[idx] if lane is not None else None for idx, lane in enumerate(lanes)]\n\n for i, lane in enumerate(lanes):\n if lane is None:\n continue\n for obj in objs:\n if obj.lane is lane:\n long = lane.local_coordinates(obj.position)[0] - current_long[i]\n if min_front_long[i] > long > 0:\n min_front_long[i] = long\n front_ret[i] = obj\n find_front_in_current_lane[i] = True\n if long < 0 and abs(long) < min_back_long[i]:\n min_back_long[i] = abs(long)\n back_ret[i] = obj\n find_back_in_current_lane[i] = True\n\n elif not find_front_in_current_lane[i] and lane.is_previous_lane_of(obj.lane):\n long = obj.lane.local_coordinates(obj.position)[0] + left_long[i]\n if min_front_long[i] > long > 0:\n min_front_long[i] = long\n front_ret[i] = obj\n elif not find_back_in_current_lane[i] and obj.lane.is_previous_lane_of(lane):\n long = obj.lane.length - obj.lane.local_coordinates(obj.position)[0] + current_long[i]\n if min_back_long[i] > long:\n min_back_long[i] = long\n back_ret[i] = obj\n\n return cls(front_ret, back_ret, min_front_long, min_back_long)\n\n\nclass IDMPolicy(BasePolicy):\n \"\"\"\n We implement this policy based on the HighwayEnv code base.\n \"\"\"\n TAU_ACC = 0.6 # [s]\n TAU_HEADING = 0.3 # [s]\n TAU_LATERAL = 0.8 # [s]\n\n TAU_PURSUIT = 0.5 * TAU_HEADING # [s]\n KP_A = 1 / TAU_ACC\n KP_HEADING = 1 / TAU_HEADING\n KP_LATERAL = 1 / TAU_LATERAL # [1/s]\n MAX_STEERING_ANGLE = np.pi / 3 # [rad]\n DELTA_SPEED = 5 # [m/s]\n\n DISTANCE_WANTED = 10.0\n \"\"\"Desired jam distance to the front vehicle.\"\"\"\n\n TIME_WANTED = 1.5 # [s]\n \"\"\"Desired time gap to the front v\"\"\"\n\n DELTA = 10.0 # []\n \"\"\"Exponent of the velocity term.\"\"\"\n\n DELTA_RANGE = [3.5, 4.5]\n \"\"\"Range of delta when chosen randomly.\"\"\"\n\n # Lateral policy parameters\n LANE_CHANGE_FREQ = 50 # [step]\n LANE_CHANGE_SPEED_INCREASE = 10\n SAFE_LANE_CHANGE_DISTANCE = 15\n MAX_LONG_DIST = 30\n MAX_SPEED = 100\n\n # Normal speed\n NORMAL_SPEED = 30\n\n # Creep Speed\n CREEP_SPEED = 5\n\n # acc factor\n ACC_FACTOR = 1.0\n DEACC_FACTOR = -5\n\n def __init__(self, control_object, random_seed):\n super(IDMPolicy, self).__init__(control_object=control_object, random_seed=random_seed)\n self.target_speed = self.NORMAL_SPEED\n self.routing_target_lane = None\n self.available_routing_index_range = None\n self.overtake_timer = self.np_random.randint(0, self.LANE_CHANGE_FREQ)\n self.enable_lane_change = self.engine.global_config.get(\"enable_idm_lane_change\", True)\n self.heading_pid = PIDController(1.7, 0.01, 3.5)\n self.lateral_pid = PIDController(0.3, .002, 0.05)\n\n def act(self, *args, **kwargs):\n # concat lane\n sucess = self.move_to_next_road()\n all_objects = self.control_object.lidar.get_surrounding_objects(self.control_object)\n try:\n if sucess and self.enable_lane_change:\n # perform lane change due to routing\n acc_front_obj, acc_front_dist, steering_target_lane = self.lane_change_policy(all_objects)\n else:\n # can not find routing target lane\n surrounding_objects = FrontBackObjects.get_find_front_back_objs(\n all_objects,\n self.routing_target_lane,\n self.control_object.position,\n max_distance=self.MAX_LONG_DIST\n )\n acc_front_obj = surrounding_objects.front_object()\n acc_front_dist = surrounding_objects.front_min_distance()\n steering_target_lane = self.routing_target_lane\n except:\n # error fallback\n acc_front_obj = None\n acc_front_dist = 5\n steering_target_lane = self.routing_target_lane\n logging.warning(\"IDM bug! fall back\")\n print(\"IDM bug! fall back\")\n\n # control by PID and IDM\n steering = self.steering_control(steering_target_lane)\n acc = self.acceleration(acc_front_obj, acc_front_dist)\n return [steering, acc]\n\n def move_to_next_road(self):\n # routing target lane is in current ref lanes\n current_lanes = self.control_object.navigation.current_ref_lanes\n if self.routing_target_lane is None:\n self.routing_target_lane = self.control_object.lane\n return True if self.routing_target_lane in current_lanes else False\n if self.routing_target_lane not in current_lanes:\n for lane in current_lanes:\n if self.routing_target_lane.is_previous_lane_of(lane):\n # two lanes connect\n self.routing_target_lane = lane\n return True\n # lane change for lane num change\n return False\n elif self.control_object.lane in current_lanes and self.routing_target_lane is not self.control_object.lane:\n # lateral routing lane change\n self.routing_target_lane = self.control_object.lane\n self.overtake_timer = self.np_random.randint(0, int(self.LANE_CHANGE_FREQ / 2))\n return True\n else:\n return True\n\n def steering_control(self, target_lane) -> float:\n # heading control following a lateral distance control\n ego_vehicle = self.control_object\n long, lat = target_lane.local_coordinates(ego_vehicle.position)\n lane_heading = target_lane.heading_theta_at(long + 1)\n v_heading = ego_vehicle.heading_theta\n steering = self.heading_pid.get_result(wrap_to_pi(lane_heading - v_heading))\n steering += self.lateral_pid.get_result(-lat)\n return float(steering)\n\n def acceleration(self, front_obj, dist_to_front) -> float:\n ego_vehicle = self.control_object\n ego_target_speed = not_zero(self.target_speed, 0)\n acceleration = self.ACC_FACTOR * (1 - np.power(max(ego_vehicle.speed, 0) / ego_target_speed, self.DELTA))\n if front_obj:\n d = dist_to_front\n speed_diff = self.desired_gap(ego_vehicle, front_obj) / not_zero(d)\n acceleration -= self.ACC_FACTOR * (speed_diff**2)\n return acceleration\n\n def desired_gap(self, ego_vehicle, front_obj, projected: bool = True) -> float:\n d0 = self.DISTANCE_WANTED\n tau = self.TIME_WANTED\n ab = -self.ACC_FACTOR * self.DEACC_FACTOR\n dv = np.dot(ego_vehicle.velocity - front_obj.velocity, ego_vehicle.heading) if projected \\\n else ego_vehicle.speed - front_obj.speed\n d_star = d0 + ego_vehicle.speed * tau + ego_vehicle.speed * dv / (2 * np.sqrt(ab))\n return d_star\n\n def reset(self):\n self.heading_pid.reset()\n self.lateral_pid.reset()\n self.target_speed = self.NORMAL_SPEED\n self.routing_target_lane = None\n self.available_routing_index_range = None\n self.overtake_timer = self.np_random.randint(0, self.LANE_CHANGE_FREQ)\n\n def lane_change_policy(self, all_objects):\n current_lanes = self.control_object.navigation.current_ref_lanes\n surrounding_objects = FrontBackObjects.get_find_front_back_objs(\n all_objects, self.routing_target_lane, self.control_object.position, self.MAX_LONG_DIST, current_lanes\n )\n self.available_routing_index_range = [i for i in range(len(current_lanes))]\n next_lanes = self.control_object.navigation.next_ref_lanes\n lane_num_diff = len(current_lanes) - len(next_lanes) if next_lanes is not None else 0\n\n # We have to perform lane changing because the number of lanes in next road is less than current road\n if lane_num_diff > 0:\n # lane num decreasing happened in left road or right road\n if current_lanes[0].is_previous_lane_of(next_lanes[0]):\n index_range = [i for i in range(len(next_lanes))]\n else:\n index_range = [i for i in range(lane_num_diff, len(current_lanes))]\n self.available_routing_index_range = index_range\n if self.routing_target_lane.index[-1] not in index_range:\n # not on suitable lane do lane change !!!\n if self.routing_target_lane.index[-1] > index_range[-1]:\n # change to left\n if surrounding_objects.left_back_min_distance(\n ) < self.SAFE_LANE_CHANGE_DISTANCE or surrounding_objects.left_front_min_distance() < 5:\n # creep to wait\n self.target_speed = self.CREEP_SPEED\n return surrounding_objects.front_object(), surrounding_objects.front_min_distance(\n ), self.routing_target_lane\n else:\n # it is time to change lane!\n self.target_speed = self.NORMAL_SPEED\n return surrounding_objects.left_front_object(), surrounding_objects.left_front_min_distance(), \\\n current_lanes[self.routing_target_lane.index[-1] - 1]\n else:\n # change to right\n if surrounding_objects.right_back_min_distance(\n ) < self.SAFE_LANE_CHANGE_DISTANCE or surrounding_objects.right_front_min_distance() < 5:\n # unsafe, creep and wait\n self.target_speed = self.CREEP_SPEED\n return surrounding_objects.front_object(), surrounding_objects.front_min_distance(\n ), self.routing_target_lane,\n else:\n # change lane\n self.target_speed = self.NORMAL_SPEED\n return surrounding_objects.right_front_object(), surrounding_objects.right_front_min_distance(), \\\n current_lanes[self.routing_target_lane.index[-1] + 1]\n\n # lane follow or active change lane/overtake for high driving speed\n if abs(self.control_object.speed - self.NORMAL_SPEED) > 3 and surrounding_objects.has_front_object(\n ) and abs(surrounding_objects.front_object().speed -\n self.NORMAL_SPEED) > 3 and self.overtake_timer > self.LANE_CHANGE_FREQ:\n # may lane change\n right_front_speed = surrounding_objects.right_front_object().speed if surrounding_objects.has_right_front_object() else self.MAX_SPEED \\\n if surrounding_objects.right_lane_exist() and surrounding_objects.right_front_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE and surrounding_objects.right_back_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE else None\n front_speed = surrounding_objects.front_object().speed if surrounding_objects.has_front_object(\n ) else self.MAX_SPEED\n left_front_speed = surrounding_objects.left_front_object().speed if surrounding_objects.has_left_front_object() else self.MAX_SPEED \\\n if surrounding_objects.left_lane_exist() and surrounding_objects.left_front_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE and surrounding_objects.left_back_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE else None\n if left_front_speed is not None and left_front_speed - front_speed > self.LANE_CHANGE_SPEED_INCREASE:\n # left overtake has a high priority\n expect_lane_idx = current_lanes.index(self.routing_target_lane) - 1\n if expect_lane_idx in self.available_routing_index_range:\n return surrounding_objects.left_front_object(), surrounding_objects.left_front_min_distance(), \\\n current_lanes[expect_lane_idx]\n if right_front_speed is not None and right_front_speed - front_speed > self.LANE_CHANGE_SPEED_INCREASE:\n expect_lane_idx = current_lanes.index(self.routing_target_lane) + 1\n if expect_lane_idx in self.available_routing_index_range:\n return surrounding_objects.right_front_object(), surrounding_objects.right_front_min_distance(), \\\n current_lanes[expect_lane_idx]\n\n # fall back to lane follow\n self.target_speed = self.NORMAL_SPEED\n self.overtake_timer += 1\n return surrounding_objects.front_object(), surrounding_objects.front_min_distance(), self.routing_target_lane\n\n\nclass ManualControllableIDMPolicy(IDMPolicy):\n def __init__(self, *args, **kwargs):\n super(ManualControllableIDMPolicy, self).__init__(*args, **kwargs)\n self.manual_control_policy = ManualControlPolicy(*args, **kwargs)\n\n def act(self, agent_id):\n if self.control_object is self.engine.current_track_vehicle and self.engine.global_config[\"manual_control\"] \\\n and not self.engine.current_track_vehicle.expert_takeover:\n return self.manual_control_policy.act(agent_id)\n else:\n return super(ManualControllableIDMPolicy, self).act(agent_id)\n\n\nclass WaymoIDMPolicy(IDMPolicy):\n NORMAL_SPEED = 20\n\n def __init__(self, control_object, random_seed):\n super(IDMPolicy, self).__init__(control_object=control_object, random_seed=random_seed)\n self.target_speed = self.NORMAL_SPEED\n self.routing_target_lane = None\n self.available_routing_index_range = None\n self.overtake_timer = self.np_random.randint(0, self.LANE_CHANGE_FREQ)\n self.enable_lane_change = False\n\n self.heading_pid = PIDController(1.7, 0.01, 3.5)\n self.lateral_pid = PIDController(0.3, .0, 0.0)\n\n def steering_control(self, target_lane) -> float:\n # heading control following a lateral distance control\n ego_vehicle = self.control_object\n long, lat = target_lane.local_coordinates(ego_vehicle.position)\n lane_heading = target_lane.heading_theta_at(long + 1)\n v_heading = ego_vehicle.heading_theta\n steering = self.heading_pid.get_result(wrap_to_pi(lane_heading - v_heading))\n # steering += self.lateral_pid.get_result(-lat)\n return float(steering)\n\n def move_to_next_road(self):\n # routing target lane is in current ref lanes\n current_lanes = self.control_object.navigation.current_ref_lanes\n if self.routing_target_lane is None:\n self.routing_target_lane = self.control_object.lane\n return True if self.routing_target_lane in current_lanes else False\n if self.routing_target_lane not in current_lanes:\n for lane in current_lanes:\n self.routing_target_lane = lane\n return True\n # lane change for lane num change\n self.routing_target_lane = self.control_object.navigation.map.road_network.get_lane(\n self.control_object.navigation.next_checkpoint_lane_index\n )\n return True\n return False\n",
"import numpy as np\n\nfrom metadrive.component.vehicle_model.bicycle_model import BicycleModel\nfrom metadrive.envs.metadrive_env import MetaDriveEnv\nfrom metadrive.utils import setup_logger\nfrom metadrive.utils.math_utils import norm\n\n\ndef predict(current_state, actions, model):\n model.reset(*current_state)\n for action in actions:\n model.predict(0.1, action)\n return model.state\n\n\ndef _test_bicycle_model():\n horizon = 10\n setup_logger(True)\n env = MetaDriveEnv(\n {\n \"environment_num\": 1,\n \"traffic_density\": .0,\n \"use_render\": True,\n # \"manual_control\": True,\n \"map\": \"CCCC\",\n \"vehicle_config\": {\n \"enable_reverse\": False,\n }\n }\n )\n bicycle_model = BicycleModel()\n o = env.reset()\n vehicle = env.current_track_vehicle\n v_dir = vehicle.velocity_direction\n bicycle_model.reset(*vehicle.position, vehicle.speed, vehicle.heading_theta, np.arctan2(v_dir[1], v_dir[0]))\n actions = []\n for steering in [1.0, 0.8, 0.6, 0.4, 0.2, 0]:\n for dir in [-1, 1]:\n s = dir * steering\n for throttle in [1.0, 0.8, 0.6, 0.4, 0.2, 0, -0.5]:\n actions += [[s, throttle]] * 20\n predict_states = []\n for s in range(len(actions)):\n vehicle = env.current_track_vehicle\n v_dir = vehicle.velocity_direction\n predict_states.append(\n predict(\n current_state=(\n *env.current_track_vehicle.position, env.current_track_vehicle.speed,\n env.current_track_vehicle.heading_theta, np.arctan2(v_dir[1], v_dir[0])\n ),\n actions=[actions[i] for i in range(s, s + horizon)],\n model=bicycle_model\n )\n )\n o, r, d, info = env.step(actions[s])\n index = s - horizon\n if index >= 0:\n state = predict_states[index]\n print(norm(state[\"x\"] - vehicle.position[0], state[\"y\"] - vehicle.position[1]))\n\n\nif __name__ == \"__main__\":\n _test_bicycle_model()\n",
"import math\nfrom typing import Set\n\nimport numpy as np\nfrom panda3d.bullet import BulletGhostNode, BulletCylinderShape\nfrom panda3d.core import NodePath\n\nfrom metadrive.component.lane.abs_lane import AbstractLane\nfrom metadrive.component.vehicle_module.distance_detector import DistanceDetector\nfrom metadrive.constants import CamMask, CollisionGroup\nfrom metadrive.engine.engine_utils import get_engine\nfrom metadrive.utils.coordinates_shift import panda_position\nfrom metadrive.utils.utils import get_object_from_node\n\n\nclass Lidar(DistanceDetector):\n ANGLE_FACTOR = True\n Lidar_point_cloud_obs_dim = 240\n DEFAULT_HEIGHT = 1.2\n\n BROAD_PHASE_EXTRA_DIST = 0\n\n def __init__(self, num_lasers: int = 240, distance: float = 50, enable_show=False):\n super(Lidar, self).__init__(num_lasers, distance, enable_show)\n self.origin.hide(CamMask.RgbCam | CamMask.Shadow | CamMask.Shadow | CamMask.DepthCam)\n self.mask = CollisionGroup.Vehicle | CollisionGroup.InvisibleWall | CollisionGroup.TrafficObject\n\n # lidar can calculate the detector mask by itself\n self.angle_delta = 360 / num_lasers if num_lasers > 0 else None\n self.broad_detector = NodePath(BulletGhostNode(\"detector_mask\"))\n self.broad_detector.node().addShape(BulletCylinderShape(self.BROAD_PHASE_EXTRA_DIST + distance, 5))\n self.broad_detector.node().setIntoCollideMask(CollisionGroup.LidarBroadDetector)\n self.broad_detector.node().setStatic(True)\n engine = get_engine()\n engine.physics_world.static_world.attach(self.broad_detector.node())\n self.enable_mask = True if not engine.global_config[\"_disable_detector_mask\"] else False\n\n def perceive(self, base_vehicle, detector_mask=True):\n res = self._get_lidar_mask(base_vehicle)\n lidar_mask = res[0] if detector_mask and self.enable_mask else None\n detected_objects = res[1]\n return super(Lidar, self).perceive(base_vehicle, base_vehicle.engine.physics_world.dynamic_world,\n lidar_mask)[0], detected_objects\n\n @staticmethod\n def get_surrounding_vehicles(detected_objects) -> Set:\n from metadrive.component.vehicle.base_vehicle import BaseVehicle\n vehicles = set()\n objs = detected_objects\n for ret in objs:\n if isinstance(ret, BaseVehicle):\n vehicles.add(ret)\n return vehicles\n\n def get_surrounding_vehicles_info(self, ego_vehicle, detected_objects, num_others: int = 4):\n from metadrive.utils.math_utils import norm, clip\n surrounding_vehicles = list(self.get_surrounding_vehicles(detected_objects))\n surrounding_vehicles.sort(\n key=lambda v: norm(ego_vehicle.position[0] - v.position[0], ego_vehicle.position[1] - v.position[1])\n )\n surrounding_vehicles += [None] * num_others\n res = []\n for vehicle in surrounding_vehicles[:num_others]:\n if vehicle is not None:\n # assert isinstance(vehicle, IDMVehicle or Base), \"Now MetaDrive Doesn't support other vehicle type\"\n relative_position = ego_vehicle.projection(vehicle.position - ego_vehicle.position)\n # It is possible that the centroid of other vehicle is too far away from ego but lidar shed on it.\n # So the distance may greater than perceive distance.\n res.append(clip((relative_position[0] / self.perceive_distance + 1) / 2, 0.0, 1.0))\n res.append(clip((relative_position[1] / self.perceive_distance + 1) / 2, 0.0, 1.0))\n\n relative_velocity = ego_vehicle.projection(vehicle.velocity - ego_vehicle.velocity)\n res.append(clip((relative_velocity[0] / ego_vehicle.max_speed + 1) / 2, 0.0, 1.0))\n res.append(clip((relative_velocity[1] / ego_vehicle.max_speed + 1) / 2, 0.0, 1.0))\n else:\n res += [0.0] * 4\n return res\n\n def _get_lidar_mask(self, vehicle):\n pos1 = vehicle.position\n head1 = vehicle.heading_theta\n\n mask = np.zeros((self.num_lasers, ), dtype=np.bool)\n mask.fill(False)\n objs = self.get_surrounding_objects(vehicle)\n for obj in objs:\n pos2 = obj.position\n length = obj.LENGTH if hasattr(obj, \"LENGTH\") else vehicle.LENGTH\n width = obj.WIDTH if hasattr(obj, \"WIDTH\") else vehicle.WIDTH\n half_max_span_square = ((length + width) / 2)**2\n diff = (pos2[0] - pos1[0], pos2[1] - pos1[1])\n dist_square = diff[0]**2 + diff[1]**2\n if dist_square < half_max_span_square:\n mask.fill(True)\n continue\n\n span = math.asin(math.sqrt(half_max_span_square / dist_square))\n # relative heading of v2's center when compared to v1's center\n relative_head = math.atan2(diff[1], diff[0])\n head_in_1 = relative_head - head1\n head_in_1_max = head_in_1 + span\n head_in_1_min = head_in_1 - span\n head_1_max = np.rad2deg(head_in_1_max)\n head_1_min = np.rad2deg(head_in_1_min)\n mask = self._mark_this_range(head_1_min, head_1_max, mask)\n\n return mask, objs\n\n def get_surrounding_objects(self, vehicle):\n self.broad_detector.setPos(panda_position(vehicle.position))\n physics_world = vehicle.engine.physics_world.dynamic_world\n contact_results = physics_world.contactTest(self.broad_detector.node(), True).getContacts()\n objs = set()\n for contact in contact_results:\n node0 = contact.getNode0()\n node1 = contact.getNode1()\n nodes = [node0, node1]\n nodes.remove(self.broad_detector.node())\n obj = get_object_from_node(nodes[0])\n if not isinstance(obj, AbstractLane) and obj is not None:\n objs.add(obj)\n if vehicle in objs:\n objs.remove(vehicle)\n return objs\n\n def _mark_this_range(self, small_angle, large_angle, mask):\n # We use clockwise to determine small and large angle.\n # For example, if you wish to fill 355 deg to 5 deg, then small_angle is 355, large_angle is 5.\n small_angle = small_angle % 360\n large_angle = large_angle % 360\n\n assert 0 <= small_angle <= 360\n assert 0 <= large_angle <= 360\n\n small_index = math.floor(small_angle / self.angle_delta)\n large_index = math.ceil(large_angle / self.angle_delta)\n if large_angle < small_angle: # We are in the case like small=355, large=5\n mask[small_index:] = True\n mask[:large_index + 1] = True\n else:\n mask[small_index:large_index + 1] = True\n return mask\n\n def destroy(self):\n get_engine().physics_world.static_world.remove(self.broad_detector.node())\n self.broad_detector.removeNode()\n super(Lidar, self).destroy()\n"
] | [
[
"numpy.mean"
],
[
"numpy.dot",
"numpy.sqrt"
],
[
"numpy.arctan2"
],
[
"numpy.zeros",
"numpy.rad2deg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
archiviral/machine-learning-assignments | [
"198d5a713344ac33fe479eed01c534a3ab12d78c"
] | [
"assignment_4/dtd.py"
] | [
"import argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom copy import deepcopy\n\nimport numpy as np\n\nCONTINOUS_COLUMNS = [0, 2, 3, 9, 10, 11]\nTTL = 30\n\n\nclass Node:\n def __init__(self, prediction, continuous=None, unqs=None, column=None, median=None):\n self.children = []\n self.column = column\n self.continuous = continuous\n self.unqs = unqs\n self.median = median\n self.prediction = prediction\n\n\ndef entropy(xd, y, continuous, label=None):\n indicesl = []\n median = None\n unqs = None\n\n if not continuous:\n unqs, counts = np.unique(xd, return_counts=True)\n entropy = 0\n\n for unq, count in zip(unqs, counts):\n indices = np.argwhere(xd == unq)\n indicesl.append(indices)\n ys = y[indices]\n cnts = np.unique(ys, return_counts=True)[1]\n probs = cnts / ys.shape[0]\n ent = np.sum(-1 * probs * np.log2(probs))\n entropy = entropy + ((count / xd.shape[0]) * ent) \n else:\n xd = xd.astype(int)\n median = np.median(xd)\n \n entropy = 0\n conds = [xd < median, xd >= median]\n for cond in conds:\n indices = np.argwhere(cond)\n indicesl.append(indices)\n ys = y[indices]\n cnts = np.unique(ys, return_counts=True)[1]\n probs = cnts / ys.shape[0]\n ent = np.sum(-1 * probs * np.log2(probs))\n entropy = entropy + ((ys.shape[0] / xd.shape[0]) * ent) \n \n # if label: print(label, entropy)\n return entropy, indicesl, median, unqs\n\n\ndef create_tree(x, y, labels):\n # print(x.shape[0], 'rows.')\n ents = []\n indicesll = []\n medians = []\n unqsl = []\n\n for i in range(x.shape[1]):\n ent, indicesl, median, unqs = entropy(x[:, i], y, continuous=i in CONTINOUS_COLUMNS, label=labels[i])\n ents.append(ent)\n indicesll.append(indicesl)\n medians.append(median)\n unqsl.append(unqs)\n\n minent = min(ents)\n vals, cnts = np.unique(y, return_counts=True)\n prediction = vals[np.argmax(cnts)]\n \n if not minent or len(list(filter(lambda x: x.shape[0] > 0, indicesl))) < 2:\n # print('Leaf node.')\n node = Node(prediction=prediction)\n return node\n\n column = ents.index(minent)\n indicesl = indicesll[column]\n median = medians[column]\n unqs = unqsl[column]\n\n # print('[*] Splitting by column', column, ':', labels[column])\n # print('[*] Number of branches :', len(indicesl))\n\n node = Node(prediction=prediction, column=column, continuous=column in CONTINOUS_COLUMNS, median=median, unqs=unqs)\n for indices in indicesl:\n indices = indices.flatten()\n child = create_tree(x[indices, :], y[indices, :], labels)\n node.children.append(child)\n \n if len(node.children) < 2:\n node.children = []\n node.column = None\n node.median = None\n \n return node\n\n\ndef height(tree):\n return 1 + max([height(child) for child in tree.children]) if tree.children else 1\n\n\ndef cnodes(tree):\n return 1 + sum([cnodes(child) for child in tree.children])\n\n\ndef __predict(tree, xr):\n if not tree.children: return tree.prediction\n\n if tree.continuous:\n if int(xr[tree.column]) < tree.median:\n return __predict(tree.children[0], xr)\n else:\n return __predict(tree.children[1], xr)\n else:\n try:\n return __predict(tree.children[list(tree.unqs).index(xr[tree.column])], xr)\n except ValueError:\n return tree.prediction\n\n\ndef predict(tree, x, y=None):\n preds = []\n accuracy = None\n\n for i in range(x.shape[0]):\n preds.append(__predict(tree, x[i, :]))\n preds = np.array(preds)\n\n if isinstance(y, np.ndarray):\n y = y.flatten().astype(np.uint8)\n accuracy = np.sum(preds == y) / y.shape[0]\n return preds, accuracy\n\n\n\ndef prune(tree, nb):\n copied = deepcopy(tree)\n count = 0\n stack = [copied]\n\n while True:\n node = stack.pop()\n if count == nb:\n # print('Node nb', nb, ', Removing', len(node.children), 'children.')\n node.children = []\n return copied\n for child in node.children:\n stack.append(child)\n count = count + 1\n\n\ndef optimize(tree, x, y, begin):\n global_best_tree = tree\n global_best_accr = predict(tree, x, y)[1]\n\n while True:\n start = time.time()\n best_tree = global_best_tree\n best_accr = global_best_accr\n print(height(global_best_tree), cnodes(global_best_tree), global_best_accr)\n\n for i in range(cnodes(global_best_tree)):\n if time.time() - begin > TTL:\n return best_tree\n\n pruned = prune(global_best_tree, i)\n # print(f'[*] Pruned node {i}. Height: {height(pruned)}. Nodes: {cnodes(pruned)}.')\n accr = predict(pruned, x, y)[1]\n if accr > best_accr:\n best_accr = accr\n best_tree = pruned\n print('[*] Iteration time:', time.time() - start)\n if best_accr > global_best_accr:\n global_best_accr = best_accr\n global_best_tree = best_tree\n else:\n return global_best_tree\n\n\n\ndef dt(args):\n begin = time.time()\n with open(args.trainfile) as f:\n train = np.loadtxt(f, delimiter=',', dtype=object)\n train = np.delete(train, 3, 1)\n x = train[1:, :-1]\n y = train[1:, -1:]\n y = y.astype(np.uint8)\n labels = train[0, :]\n\n tree = create_tree(x, y, labels)\n print(f'[*] Tree created. Height: {height(tree)}. Nodes: {cnodes(tree)}.')\n\n \n with open(args.validfile) as f:\n valid = np.loadtxt(f, delimiter=',', dtype=object)\n valid = np.delete(valid, 3, 1)\n x = valid[1:, :-1]\n y = valid[1:, -1:]\n\n optimized = optimize(tree, x, y, begin)\n print(f'[*] Optimized tree. Height: {height(optimized)}. Nodes: {cnodes(optimized)}.')\n\n preds, accuracy = predict(optimized, x, y)\n np.savetxt(args.validpred, preds, fmt='%i')\n print('[*] Accuracy on validation data:', accuracy)\n\n\n with open(args.testfile) as f:\n test = np.loadtxt(f, delimiter=',', dtype=object)\n test = np.delete(test, 3, 1)\n x = test[1:, :-1]\n if args.testlabels:\n with open(args.testlabels) as f:\n y = np.loadtxt(f, delimiter=',', dtype=int)\n preds, accuracy = predict(optimized, x, y)\n print('[*] Accuracy on test data:', accuracy)\n else:\n preds, accuracy = predict(optimized, x)\n np.savetxt(args.testpred, preds, fmt='%i')\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('trainfile', type=str)\n parser.add_argument('validfile', type=str)\n parser.add_argument('testfile', type=str)\n parser.add_argument('validpred', type=str)\n parser.add_argument('testpred', type=str)\n parser.add_argument('testlabels', nargs='?', default='', type=str)\n parser.set_defaults(func=dt)\n\n if len(sys.argv) < 2:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args()\n args.func(args)\n\n\n\nif __name__=='__main__':\n main()\n"
] | [
[
"numpy.log2",
"numpy.unique",
"numpy.median",
"numpy.argwhere",
"numpy.delete",
"numpy.argmax",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pentagram-lang/pentagram | [
"5c4dc2fc516ec2844dc71ddb778ddadec036ce55"
] | [
"bootstrap/pentagram/interpret/block_test.py"
] | [
"from __future__ import annotations\n\nfrom numpy import int32\nfrom pentagram.interpret.block import interpret_block\nfrom pentagram.interpret.test import init_test_frame_stack\nfrom pentagram.machine import MachineExpressionStack\nfrom pentagram.machine import MachineFrameStack\nfrom pentagram.machine import MachineNumber\nfrom pentagram.syntax import SyntaxBlock\nfrom pentagram.syntax import SyntaxExpression\nfrom pentagram.syntax import SyntaxNumber\n\n\ndef test_interpret_block_enter() -> None:\n block = SyntaxBlock(\n [SyntaxExpression([SyntaxNumber(int32(4))])]\n )\n frame_stack = init_test_frame_stack(\n block, MachineExpressionStack([])\n )\n interpret_block(frame_stack)\n assert frame_stack == init_test_frame_stack(\n block,\n MachineExpressionStack([MachineNumber(int32(4))]),\n term_index=1,\n )\n\n\ndef test_interpret_block_exit() -> None:\n block = SyntaxBlock(\n [SyntaxExpression([SyntaxNumber(int32(4))])]\n )\n frame_stack = init_test_frame_stack(\n block, MachineExpressionStack([]), statement_index=1\n )\n interpret_block(frame_stack)\n assert frame_stack == MachineFrameStack([])\n"
] | [
[
"numpy.int32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cnll0075/Merlion | [
"37fb75ccb204d128fde8ad4230f7893da724cf7c"
] | [
"ts_datasets/ts_datasets/anomaly/smd.py"
] | [
"#\n# Copyright (c) 2021 salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n#\nimport os\nimport sys\nimport logging\nimport requests\nimport tarfile\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom ts_datasets.anomaly.base import TSADBaseDataset\n\n_logger = logging.getLogger(__name__)\n_logger.setLevel(logging.DEBUG)\n_handler = logging.StreamHandler(sys.stdout)\n_handler.setLevel(logging.DEBUG)\n_logger.addHandler(_handler)\n\n\nclass SMD(TSADBaseDataset):\n \"\"\"\n The Server Machine Dataset (SMD) is a new 5-week-long dataset from\n a large Internet company collected and made publicly available.\n It contains data from 28 server machines and each machine is monitored by 33 metrics.\n SMD is divided into training set and testing set of equal size.\n\n - source: https://github.com/NetManAIOps/OmniAnomaly\n \"\"\"\n\n filename = \"ServerMachineDataset\"\n url = \"https://www.dropbox.com/s/x53ph5cru62kv0f/ServerMachineDataset.tar.gz?dl=1\"\n valid_subsets = (\n [f\"machine-1-{i}\" for i in range(1, 9)]\n + [f\"machine-2-{i}\" for i in range(1, 10)]\n + [f\"machine-3-{i}\" for i in range(1, 12)]\n )\n\n def __init__(self, subset=\"all\", rootdir=None):\n super().__init__()\n if subset == \"all\":\n subset = self.valid_subsets\n elif type(subset) == str:\n assert subset in self.valid_subsets, f\"subset should be in {self.valid_subsets}, but got {subset}\"\n subset = [subset]\n\n if rootdir is None:\n fdir = os.path.dirname(os.path.abspath(__file__))\n merlion_root = os.path.abspath(os.path.join(fdir, \"..\", \"..\", \"..\"))\n rootdir = os.path.join(merlion_root, \"data\", \"smd\")\n\n # Download the SMD dataset if it doesn't exist\n download(_logger, rootdir, SMD.url, SMD.filename)\n for s in subset:\n # Load training/test datasets\n df, metadata = combine_train_test_datasets(\n *SMD._load_data(directory=os.path.join(rootdir, SMD.filename), sequence_name=s)\n )\n self.time_series.append(df)\n self.metadata.append(metadata)\n\n @staticmethod\n def _load_data(directory, sequence_name):\n with open(os.path.join(directory, \"test\", f\"{sequence_name}.txt\"), \"r\") as f:\n test_data = np.genfromtxt(f, dtype=np.float32, delimiter=\",\")\n with open(os.path.join(directory, \"test_label\", f\"{sequence_name}.txt\"), \"r\") as f:\n test_labels = np.genfromtxt(f, dtype=np.float32, delimiter=\",\")\n with open(os.path.join(directory, \"train\", f\"{sequence_name}.txt\"), \"r\") as f:\n train_data = np.genfromtxt(f, dtype=np.float32, delimiter=\",\")\n return (pd.DataFrame(train_data), pd.DataFrame(test_data), test_labels.astype(int))\n\n\ndef combine_train_test_datasets(train_df, test_df, test_labels):\n train_df.columns = [str(c) for c in train_df.columns]\n test_df.columns = [str(c) for c in test_df.columns]\n df = pd.concat([train_df, test_df]).reset_index()\n if \"index\" in df:\n df.drop(columns=[\"index\"], inplace=True)\n df.index = pd.to_datetime(df.index * 60, unit=\"s\")\n df.index.rename(\"timestamp\", inplace=True)\n # There are no labels for training examples, so the training labels are set to 0 by default\n # The dataset is only for unsupervised time series anomaly detection\n metadata = pd.DataFrame(\n {\n \"trainval\": df.index < df.index[train_df.shape[0]],\n \"anomaly\": np.concatenate([np.zeros(train_df.shape[0], dtype=int), test_labels]),\n },\n index=df.index,\n )\n return df, metadata\n\n\ndef download(logger, datapath, url, filename):\n os.makedirs(datapath, exist_ok=True)\n compressed_file = os.path.join(datapath, f\"{filename}.tar.gz\")\n\n # Download the compressed dataset\n if not os.path.exists(compressed_file):\n logger.info(\"Downloading \" + url)\n with requests.get(url, stream=True) as r:\n with open(compressed_file, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.flush()\n\n # Uncompress the downloaded tar file\n if not os.path.exists(os.path.join(datapath, \"_SUCCESS\")):\n logger.info(f\"Uncompressing {compressed_file}\")\n tar = tarfile.open(compressed_file, \"r:gz\")\n tar.extractall(path=datapath)\n tar.close()\n Path(os.path.join(datapath, \"_SUCCESS\")).touch()\n"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.DataFrame",
"numpy.genfromtxt",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
asa008/nhyai | [
"33be2078cf2835d85fedc901d343568e79a5941f"
] | [
"backend/api/ocr/text/keras_detect.py"
] | [
"\"\"\"\nYOLO_v3 Model Defined in Keras.\nReference: https://github.com/qqwweee/keras-yolo3.git\n\"\"\"\nfrom config import kerasTextModel,IMGSIZE,keras_anchors,class_names,GPU,GPUID\nfrom .keras_yolo3 import yolo_text,box_layer,K\n\nfrom apphelper.image import resize_im,letterbox_image\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\ngraph = tf.get_default_graph()##解决web.py 相关报错问题\n\nanchors = [float(x) for x in keras_anchors.split(',')]\nanchors = np.array(anchors).reshape(-1, 2)\nnum_anchors = len(anchors)\n\nnum_classes = len(class_names)\ntextModel = yolo_text(num_classes,anchors)\ntextModel.load_weights(kerasTextModel)\n\n\nsess = K.get_session()\nimage_shape = K.placeholder(shape=(2, ))##图像原尺寸:h,w\ninput_shape = K.placeholder(shape=(2, ))##图像resize尺寸:h,w\nbox_score = box_layer([*textModel.output,image_shape,input_shape],anchors, num_classes)\n\n\n\ndef text_detect(img,prob = 0.05):\n im = Image.fromarray(img)\n scale = IMGSIZE[0]\n w,h = im.size\n w_,h_ = resize_im(w,h, scale=scale, max_scale=2048)##短边固定为608,长边max_scale<4000\n #boxed_image,f = letterbox_image(im, (w_,h_))\n boxed_image = im.resize((w_,h_), Image.BICUBIC)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n imgShape = np.array([[h,w]])\n inputShape = np.array([[h_,w_]])\n \n \n global graph\n with graph.as_default():\n ##定义 graph变量 解决web.py 相关报错问题\n \"\"\"\n pred = textModel.predict_on_batch([image_data,imgShape,inputShape])\n box,scores = pred[:,:4],pred[:,-1]\n \n \"\"\"\n box,scores = sess.run(\n [box_score],\n feed_dict={\n textModel.input: image_data,\n input_shape: [h_, w_],\n image_shape: [h, w],\n K.learning_phase(): 0\n })[0]\n \n\n keep = np.where(scores>prob)\n box[:, 0:4][box[:, 0:4]<0] = 0\n box[:, 0][box[:, 0]>=w] = w-1\n box[:, 1][box[:, 1]>=h] = h-1\n box[:, 2][box[:, 2]>=w] = w-1\n box[:, 3][box[:, 3]>=h] = h-1\n box = box[keep[0]]\n scores = scores[keep[0]]\n return box,scores\n\n"
] | [
[
"tensorflow.get_default_graph",
"numpy.array",
"numpy.expand_dims",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
eyalnaor/DeepTemporalSR | [
"7d8c821431dec3a4c480550c61a6033fcac5e640"
] | [
"torch_resizer.py"
] | [
"'''\nCode courtesy of Ben Feinstein & Assaf Shocher\nPlease see their work:\nhttps://github.com/assafshocher/PyTorch-Resizer\nhttps://github.com/feinsteinben\n'''\nimport numpy as np\nimport torch\nfrom math import pi\nfrom torch import nn\n\n\nclass Resizer(nn.Module):\n def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True, device=None, dtype=None):\n super(Resizer, self).__init__()\n # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa\n scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)\n self.device = device\n # Choose interpolation method, each method has the matching kernel size\n method, kernel_width = {\n \"cubic\": (cubic, 4.0),\n \"lanczos2\": (lanczos2, 4.0),\n \"lanczos3\": (lanczos3, 6.0),\n \"box\": (box, 1.0),\n \"linear\": (linear, 2.0),\n None: (cubic, 6.0) # set default interpolation method as cubic\n }.get(kernel)\n\n # Antialiasing is only used when downscaling\n antialiasing *= (np.any(np.array(scale_factor) < 1))\n\n # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient\n sorted_dims = np.argsort(np.array(scale_factor))\n self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]\n\n # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction\n field_of_view_list = []\n weights_list = []\n for dim in self.sorted_dims:\n # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the\n # weights that multiply the values there to get its result.\n weights, field_of_view = self.contributions(in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing)\n\n # convert to torch tensor\n if dtype is not None:\n weights = torch.tensor(weights.T, dtype=dtype, device=device)\n else:\n weights = torch.tensor(weights.T, dtype=torch.float32, device=device)\n # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for\n # tmp_im[field_of_view.T], (bsxfun style)\n weights_list.append(nn.Parameter(torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]), requires_grad=False))\n field_of_view_list.append(nn.Parameter(torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long, device=device), requires_grad=False))\n\n self.field_of_view = nn.ParameterList(field_of_view_list)\n self.weights = nn.ParameterList(weights_list)\n self.in_shape = in_shape\n\n def forward(self, in_tensor):\n x = in_tensor\n\n # make sure input is in the correct size\n assert list(self.in_shape[1:]) == list(x.shape[1:]), 'wrong input shape: %s, expected %s' % (str(x.shape), str(self.in_shape))\n\n # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim\n for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):\n # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize\n x = torch.transpose(x, dim, 0)\n\n # This is a bit of a complicated multiplication: x[field_of_view] is a tensor of order image_dims+1.\n # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim\n # only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with\n # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:\n # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the\n # same number\n x = torch.sum(x[fov] * w, dim=0).to(self.device)\n\n # Finally we swap back the axes to the original order\n x = torch.transpose(x, dim, 0)\n\n return x\n\n def fix_scale_and_size(self, input_shape, output_shape, scale_factor):\n # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the\n # same size as the number of input dimensions)\n if scale_factor is not None:\n # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.\n if np.isscalar(scale_factor) and len(input_shape) > 1:\n scale_factor = [scale_factor, scale_factor]\n\n # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales\n scale_factor = list(scale_factor)\n scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor\n\n # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size\n # to all the unspecified dimensions\n if output_shape is not None:\n output_shape = list(input_shape[len(output_shape):]) + list(np.uint(np.array(output_shape)))\n\n # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is\n # sub-optimal, because there can be different scales to the same output-shape.\n if scale_factor is None:\n scale_factor = np.array(output_shape) / np.array(input_shape)\n\n # Dealing with missing output-shape. calculating according to scale-factor\n if output_shape is None:\n output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))\n\n return scale_factor, output_shape\n\n def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):\n # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied\n # such that each position from the field_of_view will be multiplied with a matching filter from the\n # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers\n # around it. This is only done for one dimension of the image.\n\n # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of\n # 1/sf. this means filtering is more 'low-pass filter'.\n fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing and scale < 1.0 else kernel\n kernel_width *= 1.0 / scale if antialiasing and scale < 1.0 else 1.0\n\n # These are the coordinates of the output image\n out_coordinates = np.arange(1, out_length+1)\n\n # since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting\n # the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.\n # to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.\n shifted_out_coordinates = out_coordinates - (out_length - in_length*scale)/2\n\n # These are the matching positions of the output-coordinates on the input image coordinates.\n # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:\n # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.\n # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to\n # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big\n # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).\n # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is\n # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:\n # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)\n match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)\n\n # This is the left boundary to start multiplying the filter from, it depends on the size of the filter\n left_boundary = np.floor(match_coordinates - kernel_width / 2)\n\n # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers\n # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)\n expanded_kernel_width = np.ceil(kernel_width) + 2\n\n # Determine a set of field_of_view for each each output position, these are the pixels in the input image\n # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the\n # vertical dim is the pixels it 'sees' (kernel_size + 2)\n field_of_view = np.squeeze(np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1))\n\n # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the\n # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in\n # 'field_of_view')\n weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)\n\n # Normalize weights to sum up to 1. be careful from dividing by 0\n sum_weights = np.sum(weights, axis=1)\n sum_weights[sum_weights == 0] = 1.0\n weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)\n\n # We use this mirror structure as a trick for reflection padding at the boundaries\n mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))\n field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]\n\n # Get rid of weights and pixel positions that are of zero weight\n non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))\n weights = np.squeeze(weights[:, non_zero_out_pixels])\n field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])\n\n # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size\n return weights, field_of_view\n\n\n# These next functions are all interpolation methods. x is the distance from the left pixel center\n\n\ndef cubic(x):\n absx = np.abs(x)\n absx2 = absx ** 2\n absx3 = absx ** 3\n return ((1.5*absx3 - 2.5*absx2 + 1) * (absx <= 1) +\n (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * ((1 < absx) & (absx <= 2)))\n\n\ndef lanczos2(x):\n return (((np.sin(pi*x) * np.sin(pi*x/2) + np.finfo(np.float32).eps) /\n ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps))\n * (abs(x) < 2))\n\n\ndef box(x):\n return ((-0.5 <= x) & (x < 0.5)) * 1.0\n\n\ndef lanczos3(x):\n return (((np.sin(pi*x) * np.sin(pi*x/3) + np.finfo(np.float32).eps) /\n ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps))\n * (abs(x) < 3))\n\n\ndef linear(x):\n return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))\n"
] | [
[
"torch.transpose",
"numpy.expand_dims",
"numpy.abs",
"numpy.mod",
"numpy.arange",
"numpy.squeeze",
"torch.sum",
"torch.tensor",
"numpy.sin",
"numpy.ceil",
"numpy.finfo",
"torch.nn.ParameterList",
"numpy.floor",
"numpy.any",
"numpy.isscalar",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robfalck/AoC2017 | [
"fa19f3fb42d979b60888a1954bea571c9d4ee735"
] | [
"day24/day24.py"
] | [
"from __future__ import print_function, division, absolute_import\n\nimport copy\nimport time\nimport numpy as np\nimport sys\n\n\nclass Bridge(object):\n\n def __init__(self, initial_components, available_components):\n self.components = list(initial_components)\n self.score = sum([sum(tup) for tup in self.components])\n self.available_components = available_components\n\n def next_required_number(self):\n if len(self.components) == 1:\n c = self.components[0]\n nrn = c[0] if c.index(0) == 1 else c[1]\n else:\n c1 = self.components[-1]\n c2 = self.components[-2]\n nrn = c1[0] if c1[1] in c2 else c1[1]\n return nrn\n\n def add_component(self, c):\n nrn = self.next_required_number()\n if nrn not in c:\n raise ValueError('Invalid connection, wrong port. Needed: {0} Got: {1}'.format(nrn, str(c)))\n if c not in self.available_components:\n raise ValueError('Component unavailable:', c)\n self.components.append(c)\n self.score += sum(c)\n self.available_components.remove(c)\n\n # def score(self):\n # return sum([sum(tup) for tup in self.components])\n\n def length(self):\n return len(self.components)\n\n def assemble_next(self):\n \"\"\"\n Find the next required number in the bridge. Return\n a *new* list of bridges each with a different valid\n component on the end, depending on the available components.\n\n Returns\n -------\n\n \"\"\"\n nrn = self.next_required_number()\n next_components = [c for c in self.available_components if nrn in c]\n new_bridges = []\n\n for nx in next_components:\n b = Bridge(initial_components=tuple(self.components),\n available_components=self.available_components.copy())\n b.add_component(nx)\n new_bridges.append(b)\n return new_bridges\n\n def __str__(self):\n s = '--'.join(['{0}/{1}'.format(*c) for c in self.components])\n return s\n\n\ndef solve(inp):\n\n components = [(int(line.split('/')[0]), int(line.split('/')[1])) for line in inp]\n\n starting_comps = [c for c in components if 0 in c]\n\n bridges = []\n\n for sc in starting_comps:\n bridges.append(Bridge((sc,), set(components)-set((sc,))))\n\n complete_bridges = []\n complete_bridges.extend(bridges)\n\n for i in range(1000):\n print('.', end='')\n sys.stdout.flush()\n\n new_bridges = []\n for b in bridges:\n new_bridges.extend(b.assemble_next())\n\n if not new_bridges:\n # Terminate once no new bridges can be built\n break\n\n bridges = new_bridges\n complete_bridges.extend(new_bridges)\n strongest_bridge = complete_bridges[np.argmax([b.score for b in complete_bridges])]\n\n print()\n print('Strongest bridge:')\n print(' ', str(strongest_bridge))\n print(' strength = ', strongest_bridge.score, 'length =', strongest_bridge.length())\n\n longest_length = np.max([b.length() for b in complete_bridges])\n\n longest_bridges = [b for b in bridges if b.length() == longest_length]\n\n strongest_longest_bridge = longest_bridges[np.argmax([b.score for b in longest_bridges])]\n\n print('Strongest longest bridge:')\n print(' ', str(strongest_longest_bridge))\n print(' strength = ', strongest_longest_bridge.score, 'length =', strongest_longest_bridge.length())\n\n\n\n\n\nif __name__ == '__main__':\n\n\n with open('test_input.txt', 'r') as f:\n puzzle_input = [line.strip() for line in f.readlines() if line]\n\n t0 = time.time()\n solve(puzzle_input)\n print('Time to solve test:', time.time()-t0, 'sec')\n\n with open('input.txt', 'r') as f:\n puzzle_input = [line.strip() for line in f.readlines() if line]\n\n t0 = time.time()\n solve(puzzle_input)\n print('Time to solve:', time.time()-t0, 'sec')\n"
] | [
[
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
verypluming/transitivity | [
"46808ff20a2aed55a54be58c35427b630711d014"
] | [
"scripts/format_veridicality.py"
] | [
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport pandas as pd\nimport re\nimport copy\nimport os\nimport sys\nimport random\nimport argparse\n\n\ndef trans_label(label):\n if label == \"yes\":\n return \"entailment\"\n elif label == \"unk\":\n return \"neutral\"\n\ndef check_pn(sentence):\n prop_n = {\"ann\":\"Ann\", \"bob\":\"Bob\", \"chris\":\"Chris\", \"daniel\":\"Daniel\", \"elliot\":\"Elliot\", \"fred\":\"Fred\", \"greg\":\"Greg\", \"henry\":\"Henry\", \"tom\":\"Tom\", \"john\":\"John\"}\n for pn, npn in prop_n.items():\n if pn in sentence:\n tmp1 = re.compile(pn)\n tmp2 = re.compile(npn)\n sentence = re.sub(pn, npn, sentence)\n return sentence\n\ndef check_factive(pred):\n positive_clause_preds = ['realized', 'acknowledged', 'remembered', 'noted', 'found', 'noticed', 'learned', 'saw', 'revealed', 'discovered', 'understood', 'knew', 'admitted', 'recognized', 'observed']\n neutral_clause_preds = ['felt', 'claimed', 'doubted', 'hoped', 'predicted', 'implied', 'suspected', 'wished', 'thought', 'believed', 'heard', 'expected', 'estimated', 'assumed', 'argued']\n #positive_clause_preds = ['realized', 'knew', 'remembered']\n #neutral_clause_preds = ['hoped', 'felt', 'mentioned']\n\n if pred in positive_clause_preds:\n return \"f\"\n elif pred in neutral_clause_preds:\n return \"nf\"\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter)\nparser.add_argument(\"--input\", nargs='?', type=str, help=\"input file\")\nARGS = parser.parse_args()\nsentences = []\nfiles = glob.glob(ARGS.input+\"/data*.tsv\")\nfor file in files:\n with open(file, \"r\") as f:\n for line in f:\n if re.search(\"data_t_h\", file):\n s1, s2, depth, connect, label = line.split(\"\\t\")\n genre = \"ph.depth\"+str(depth)+\".boolean\"+str(connect)+\".\"+label.strip()\n s1 = check_pn(s1)\n s2 = check_pn(s2)\n if re.search(\"punct\", s1):\n s1 = re.sub(\"\\spunct\", \",\", s1)\n if re.search(\"punct\", s2):\n s1 = re.sub(\"\\spunct\", \",\", s2)\n s1 = s1[0].upper() + s1[1:]\n s1 = s1.strip()+\".\"\n s2 = s2[0].upper() + s2[1:]\n s2 = s2.strip()+\".\"\n sentences.append([genre, s1, s2, trans_label(label.strip())])\n elif re.search(\"data_ft_t\", file):\n s1, s2, depth, connect, th_label, label = line.split(\"\\t\")\n genre = \"fpp.depth\"+str(depth)+\".boolean\"+str(connect)+\".\"+label.strip()\n s1 = check_pn(s1)\n s2 = check_pn(s2)\n if re.search(\"punct\", s1):\n s1 = re.sub(\"\\spunct\", \",\", s1)\n if re.search(\"punct\", s2):\n s1 = re.sub(\"\\spunct\", \",\", s2)\n s1 = s1[0].upper() + s1[1:]\n s1 = s1.strip()+\".\"\n s2 = s2[0].upper() + s2[1:]\n s2 = s2.strip()+\".\"\n sentences.append([genre, s1, s2, trans_label(label.strip())])\n\ndf = pd.DataFrame(sentences, columns=['genre', 'sentence1', 'sentence2', 'gold_label'])\ntrain =pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])\ntrain['index'] = df.index\ntrain['promptID'] = df.index\ntrain['pairID'] = df.index\ntrain['gold_label'] = df[\"gold_label\"]\ntrain['genre'] = df[\"genre\"]\ntrain['sentence1'] = df[\"sentence1\"]\ntrain['sentence2'] = df[\"sentence2\"]\nfinal_train = train.sample(frac=1)\nfinal_train.to_csv(ARGS.input+\"/train.tsv\", sep=\"\\t\", index=False)\n\nsentences = []\nwith open(ARGS.input+\"/data_ft_h.tsv\", \"r\") as f:\n for line in f:\n s1, s2, depth, connect, th_label, label = line.split(\"\\t\")\n pred_label = check_factive(s1.split(\" \")[1])\n genre = \"depth\"+str(depth)+\".boolean\"+str(connect)+\".\"+pred_label+\".\"+th_label\n s1 = check_pn(s1)\n s2 = check_pn(s2)\n if re.search(\"punct\", s1):\n s1 = re.sub(\"\\spunct\", \",\", s1)\n if re.search(\"punct\", s2):\n s1 = re.sub(\"\\spunct\", \",\", s2)\n s1 = s1[0].upper() + s1[1:]\n s1 = s1.strip()+\".\"\n s2 = s2[0].upper() + s2[1:]\n s2 = s2.strip()+\".\"\n sentences.append([genre, s1, s2, trans_label(label.strip())])\n\ndf2 = pd.DataFrame(sentences, columns=['genre', 'sentence1', 'sentence2', 'gold_label'])\ntest = pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])\ntest['index'] = df2.index\ntest['promptID'] = df2.index\ntest['pairID'] = df2.index\ntest['gold_label'] = df2[\"gold_label\"]\ntest['genre'] = df2[\"genre\"]\ntest['sentence1'] = df2[\"sentence1\"]\ntest['sentence2'] = df2[\"sentence2\"]\nfinal_test = test.sample(frac=1)\nfinal_test.to_csv(ARGS.input+\"/dev_matched.tsv\", sep=\"\\t\", index=False)"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jvparidon/sub2vec | [
"adb9e72b64dc6dbde3c2060ee0d3964ab623a149"
] | [
"subs2vec/norms.py"
] | [
"\"\"\"Predict lexical norms, either to evaluate word vectors, or to get norms for unnormed words.\"\"\"\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model\nimport sklearn.model_selection\nimport sklearn.preprocessing\nimport sklearn.utils\nimport argparse\nimport os\nfrom .vecs import Vectors\nfrom .utensils import log_timer\nimport logging\nlogging.basicConfig(format='[{levelname}] {message}', style='{', level=logging.INFO)\npath = os.path.dirname(__file__)\n\n\n@log_timer\ndef evaluate_norms(lang, vecs_fname, alpha=1.0):\n \"\"\"Predict lexical norms to evaluate a set of word vectors in a given language.\n \n Writes scores to tab-separated text file but also returns them.\n\n :param lang: language to evaluate word vectors in (uses two-letter ISO codes)\n :param vecs_fname: word vectors to evaluate\n :param alpha: regularization strength, default 1.0, set higher for small datasets\n :return: pandas DataFrame containing the norms results\n \"\"\"\n norms_path = os.path.join(path, 'datasets', 'norms')\n if not os.path.exists('results'):\n os.mkdir('results')\n results_path = os.path.join('results', 'norms')\n if not os.path.exists(results_path):\n os.mkdir(results_path)\n logging.info(f'evaluating lexical norm prediction with {vecs_fname}')\n vectors = Vectors(vecs_fname, normalize=True, n=1e6, d=300)\n scores = []\n for norms_fname in os.listdir(norms_path):\n if norms_fname.startswith(lang):\n logging.info(f'predicting norms from {norms_fname}')\n norms = pd.read_csv(os.path.join(norms_path, norms_fname), sep='\\t', comment='#')\n norms = norms.set_index('word')\n score = predict_norms(vectors, norms, alpha)['scores']\n score['source'] = norms_fname\n scores.append(score)\n scores_fname = os.path.split(vecs_fname)[1].replace('.vec', '.tsv')\n if len(scores) > 0:\n scores = pd.concat(scores)\n scores.to_csv(os.path.join(results_path, scores_fname), sep='\\t', index=False)\n return scores\n\n\n@log_timer\ndef predict_norms(vectors, norms, alpha=1.0):\n \"\"\"Predict lexical norms and return score.\n\n :param vectors: Vectors object containing word vectors\n :param norms: pandas DataFrame of lexical norms\n :param alpha: regularization strength, default 1.0, set higher for small datasets\n :return: dict containing scores and predictions in separate pandas DataFrames\n \"\"\"\n vecs_df = vectors.as_df()\n cols = norms.columns.values\n df = norms.join(vecs_df, how='inner')\n # compensate for missing ys somehow\n total = len(norms)\n missing = len(norms) - len(df)\n penalty = (total - missing) / total\n logging.info(f'missing vectors for {missing} out of {total} words')\n df = sklearn.utils.shuffle(df) # shuffle is important for unbiased results on ordered datasets!\n\n model = sklearn.linear_model.Ridge(alpha=alpha) # use ridge regression models\n cv = sklearn.model_selection.RepeatedKFold(n_splits=5, n_repeats=10)\n\n # compute crossvalidated prediction scores\n scores = []\n for col in cols:\n # set dependent variable and calculate 10-fold mean fit/predict scores\n df_subset = df.loc[:, vecs_df.columns.values] # use .loc[] so copy is created and no setting with copy warning is issued\n df_subset[col] = df[col]\n df_subset = df_subset.dropna() # drop NaNs for this specific y\n x = df_subset[vecs_df.columns.values]\n y = df_subset[col]\n cv_scores = sklearn.model_selection.cross_val_score(model, x, y, cv=cv)\n median_score = np.median(cv_scores)\n penalized_score = median_score * penalty\n scores.append({\n 'norm': col,\n 'adjusted r': np.sqrt(penalized_score), # take square root of explained variance to get Pearson r\n 'adjusted r-squared': penalized_score,\n 'r-squared': median_score,\n 'r': np.sqrt(median_score),\n })\n\n # predict (extend norms)\n x_full = df[vecs_df.columns.values]\n predictions = df.loc[:, cols] # use .loc[] so copy is created and no setting with copy warning is raised by pandas\n for col in cols:\n # set dependent variable and fit, but predict for whole x (so including unobserved y)\n df_subset = df.loc[:, vecs_df.columns.values] # use .loc[] so copy is created and no setting with copy warning is raised\n df_subset[col] = df[col]\n df_subset = df_subset.dropna() # drop NaNs for this specific y\n x = df_subset[vecs_df.columns.values]\n y = df_subset[col]\n model.fit(x, y)\n predictions[f'{col} predicted'] = model.predict(x_full)\n\n return {'scores': pd.DataFrame(scores), 'predictions': predictions}\n\n\ndef extend_norms(vecs_fname, norms_fname, alpha=1.0):\n \"\"\"Extend lexical norms to unobserved words, using word vectors.\n\n Writes predictions to tab-separated text file.\n\n :param vecs_fname: file containing word vectors to use for prediction.\n :param norms_fname: file containing norms in tab-separated columns, first column should contain words,\n first line should contain column names, unobserved cells should be left empty\n :param alpha: regularization strength, default 1.0, set higher for small datasets\n \"\"\"\n logging.info(f'extending lexical norms with {vecs_fname}')\n vectors = Vectors(vecs_fname, normalize=True, n=1e6, d=300)\n norms = pd.read_csv(norms_fname, sep='\\t', comment='#')\n norms = norms.set_index('word')\n results = predict_norms(vectors, norms, alpha)\n base_fname = '.'.join(norms_fname.split('.')[:-1])\n results['scores'].to_csv(f'{base_fname}.scores.tsv', sep='\\t', index=False)\n results['predictions'].to_csv(f'{base_fname}.predictions.tsv', sep='\\t', index=True)\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='perform crossvalidated penalized regression of lexical norms using word vectors as predictors')\n argparser.add_argument('lang', help='language to predict norms for (uses two-letter ISO language codes)')\n argparser.add_argument('vecs_fname', help='vectors to evaluate (or use for lexical norm extension')\n argparser.add_argument('--extend_norms', help='file containing lexical norms to extend')\n argparser.add_argument('--alpha', type=float, default=1.0, help='regularization strength, default 1.0, set higher for small datasets')\n args = argparser.parse_args()\n\n if args.extend_norms:\n extend_norms(vecs_fname=args.vecs_fname, norms_fname=args.extend_norms, alpha=args.alpha)\n else:\n print(evaluate_norms(lang=args.lang, vecs_fname=args.vecs_fname, alpha=args.alpha))\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"numpy.sqrt",
"numpy.median",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
IntelligentSensor/PHMRepository | [
"8684c7851970293d607d18c580cec7edbf72ad17"
] | [
"Prognostics/dl-models.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport random\nimport numpy as np\nimport seaborn as sns\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom preprocess import preprocess\n\n\nimport keras as K\nimport tensorflow as tf\nfrom keras.regularizers import l2\nfrom keras.utils import plot_model\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.callbacks import TensorBoard\nfrom keras.models import model_from_json\nfrom keras.layers import Input, Dense, Flatten, Activation\nfrom keras.layers import Conv1D, Dropout, BatchNormalization, MaxPooling1D\n\nfrom keras.layers import LSTM, Bidirectional\nfrom keras.layers.core import Flatten, Dense, Dropout\n\npy_ver = sys.version\nk_ver = K.__version__\ntf_ver = tf.__version__\n\nK.backend.clear_session()\n\nprint(\"Using Python version \" + str(py_ver))\nprint(\"Using Keras version \" + str(k_ver))\nprint(\"Using TensorFlow version \" + str(tf_ver))\n\nClass_dict={0:'正常', 1:'溶液地未连接', 2:'流通池接地', 3:'电缆线未连接', 4:'球泡破裂', 5:'支架损坏',\n 6:'电极污染', 7:'电解液缺失', 8:'水样波动'}\n\n# 训练参数\nbatch_size = 10\nepochs = 30\nnum_classes = 9\nlength = 2048\nBatchNorm = True # 是否批量归一化\nnumber = 200 # 每类样本的数量\nnormal = True # 是否标准化\nrate = [0.7,0.2,0.1] # 测试集验证集划分比例\n\npath = '/Users/tung/Python/WorkProject/PHMresearch/WDCNN&LR_FaultDiagnosis/'\npreprocess = preprocess()\n\nx_train, y_train, x_valid, y_valid, x_test, y_test = preprocess.prepro(d_path=path+'data/0HP',length=length,\n number=number,\n normal=normal,\n rate=rate,\n enc=True, enc_step=340)\n\n# 输入卷积的时候还需要修改一下,增加通道数目\nx_train, x_valid, x_test = x_train[:,:,np.newaxis], x_valid[:,:,np.newaxis], x_test[:,:,np.newaxis]\n\n# 输入数据的维度\ninput_shape =x_train.shape[1:]\n\nprint('训练样本维度:', x_train.shape)\nprint(x_train.shape[0], '训练样本个数')\nprint('验证样本的维度', x_valid.shape)\nprint(x_valid.shape[0], '验证样本个数')\nprint('测试样本的维度', x_test.shape)\nprint(x_test.shape[0], '测试样本个数')\n\ndef get_label(row):\n for c in range(len(row)):\n if row[c]==1:\n return c\n\ndef decode(arr):\n temp = np.zeros(len(arr))\n for i in range(len(arr)):\n temp[i] = get_label(arr[i])\n return temp\ny_test_decode = decode(y_test)\ny_train_decode = decode(y_train)\n\ndef auc(y_true, y_pred):\n auc = tf.metrics.auc(y_true, y_pred)[1]\n K.get_session().run(tf.local_variables_initializer())\n return auc\n\n'WDCNN'\n# 自定义卷积层wdcnn\ndef wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding, pool_size, BatchNormal):\n \"\"\"wdcnn层神经元\n \n :param filters: 卷积核的数目,整数\n :param kernerl_size: 卷积核的尺寸,整数\n :param strides: 步长,整数\n :param conv_padding: 'same','valid'\n :param pool_padding: 'same','valid'\n :param pool_size: 池化层核尺寸,整数\n :param BatchNormal: 是否Batchnormal,布尔值\n :return: model\n \"\"\"\n model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides,\n padding=conv_padding, kernel_regularizer=l2(1e-4)))\n if BatchNormal:\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding))\n return model\n\n# 实例化序贯模型\nmodel = Sequential()\n# 搭建输入层,第一层卷积。因为要指定input_shape,所以单独放出来\nmodel.add(Conv1D(filters=16, kernel_size=64, strides=16, padding='same',kernel_regularizer=l2(1e-4), input_shape=input_shape))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling1D(pool_size=2))\n\n#0-1之间均匀分布的Dropout\n# model.add(Dropout( np.round(random.uniform(0,1), 2) ))\n\n# 第二层卷积\nmodel = wdcnn(filters=32, kernerl_size=3, strides=1, conv_padding='same',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 第三层卷积\nmodel = wdcnn(filters=64, kernerl_size=3, strides=1, conv_padding='same',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 第四层卷积\nmodel = wdcnn(filters=64, kernerl_size=3, strides=1, conv_padding='same',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 第五层卷积\nmodel = wdcnn(filters=64, kernerl_size=3, strides=1, conv_padding='valid',\n pool_padding='valid', pool_size=2, BatchNormal=BatchNorm)\n# 从卷积到全连接需要展平\nmodel.add(Flatten())\n\n# 添加全连接层\nmodel.add(Dense(units=90, activation='relu', kernel_regularizer=l2(1e-4)))\n# 增加输出层\nmodel.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\nmodel.summary()\n\n# 编译模型 评价函数和损失函数相似,不过评价函数的结果不会用于训练过程中\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy',\n metrics=['accuracy']) #metrics=[auc]\n\nstart = datetime.now()\n\n# TensorBoard调用查看一下训练情况\ntb_cb = TensorBoard(log_dir='logs')\n\n# 开始模型训练\nhistory = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_valid, y_valid), shuffle=True,\n callbacks=[tb_cb])\n\nprint(\"This took \", datetime.now() - start)\n\n#变dropout率\n#BN与训练速度和识别率\n#样本量与识别率及标准差的关系\n#对输入数据添加高斯白噪声\ndef wgn(x, snr):\n snr = 10**(snr/10.0)\n xpower = np.sum(x**2)/len(x)\n npower = xpower / snr\n return np.random.randn(len(x)) * np.sqrt(npower)\n\ntest = x_train[0]\ntestwgn = wgn(test, 10).reshape((2048, 1)) #-4dB~10dB\ntemp = test + testwgn\n\n#第一层卷积核大小与抗噪\n#feature map特征可分性\n#保存模型\nmodel_path = path + 'models/wdcnn.h5'\nmodel.save(model_path)\ndel model\n\n# 模型包含一个自定义 wdcnn 类的实例\nmodel = load_model(path+'models/wdcnn.h5', custom_objects={'wdcnn': wdcnn})\nmodel.summary()\n#fine-tune\n\n#evaluation\nscore = model.evaluate(x=x_test, y=y_test, verbose=0)\nprint(\"测试集上的损失:\", score[0])\nprint(\"测试集上的损失:\",score[1])\nplot_model(model=model, to_file=path+'models/wdcnn.png', show_shapes=True)\n\n#prediction\nstart = datetime.now()\n\nunknown = x_test[0].reshape((1, 2048, 1))\npredicted = model.predict(unknown)\nprint(\"Using model to predict fault for features: \")\nprint(unknown)\nprint(\"\\nPredicted softmax vector is: \")\nprint(predicted)\nprint(\"\\nPredicted fault is: \")\nprint(Class_dict[np.argmax(predicted)])\n\nprint(\"This took \", datetime.now() - start)\n\n'LSTM'\nx_train = x_train.reshape((x_train.shape[0], 16, 128)) #time_step、input_dim\nx_valid = x_valid.reshape((x_valid.shape[0], 16, 128))\nx_test = x_test.reshape((x_test.shape[0], 16, 128))\n\nmodel = Sequential()\n\n#隐藏层设置为10, input_shape(time_step、input_dim) stateful=True使用状态RNN\nmodel.add(LSTM(units=9, input_shape=(x_train.shape[1], x_train.shape[2])))\nmodel.add(BatchNormalization())\n\n#全连接层,输出单个类,units为num_classes\nmodel.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\n\n#告诉模型输入的格式\nmodel.build((None, x_train.shape[1], x_train.shape[2])) #time_step、input_dim\n\n# #重置网络中所有层的状态\n# model.reset_states()\n\n# #重置指定层的状态\n# model.layers[0].reset_states()\nmodel.summary()\n\n#损失函数为交叉熵,优化器为Adam,学习率为0.001\nmodel.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['acc'])\n\nstart = datetime.now()\nhistory =model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_valid, y_valid))#训练模型并进行测试\n\nprint(\"This took \", datetime.now() - start)\n\n#保存模型\nmodel_path = path+'models/LSTM.h5'\nmodel.save(model_path)\ndel model\n\nmodel = load_model(path+'models/LSTM.h5')\nmodel.summary()\n\n#evaluation\nscore = history.model.evaluate(x=x_test, y=y_test, verbose=0)\nprint(\"测试集上的损失:\", score[0])\nprint(\"测试集上的损失:\",score[1])\nplot_model(model=model, to_file=path+'models/LSTM.png', show_shapes=True)\n\n#prediction\nstart = datetime.now()\n\nunknown = x_test[0].reshape((1, 16, 128))\npredicted = model.predict(unknown)\nprint(\"Using model to predict species for features: \")\nprint(unknown)\nprint(\"\\nPredicted softmax vector is: \")\nprint(predicted)\nprint(\"\\nPredicted fault is: \")\nprint(Class_dict[np.argmax(predicted)])\n\nprint(\"This took \", datetime.now() - start)\n\n'biLSTM'\nmodel = Sequential()\n#隐藏层设置为10, input_shape元组第二个参数指\nmodel.add(Bidirectional(LSTM(units=9, input_shape=(x_train.shape[1], x_train.shape[2])))) # activation='tanh'\nmodel.add(BatchNormalization())\n\n#全连接层,输出单个类,units为num_classes\nmodel.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\n\n#告诉模型输入的格式\nmodel.build((None, x_train.shape[1], x_train.shape[2])) #time_step、input_dim\nmodel.summary()\n\n#损失函数为交叉熵,优化器为Adam,学习率为0.001\nmodel.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['acc'])\n\nstart = datetime.now()\nhistory =model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_valid, y_valid))#训练模型并进行测试\n\nprint(\"This took \", datetime.now() - start)\n\n#嵌套网络保存\nmodel.save_weights(path+'models/biLSTM.h5')\nmodel.load_weights(path+'models/biLSTM.h5',by_name=True)\njson_string = model.to_json()\nmodel=model_from_json(json_string)\nmodel.build((None, x_train.shape[1], x_train.shape[2])) #time_step、input_dim\nmodel.summary()\n\n#evaluation\nscore = history.model.evaluate(x=x_test, y=y_test, verbose=0)\nprint(\"测试集上的损失:\",score[0])\nprint(\"测试集上的损失:\",score[1])\nplot_model(model=model, to_file=path+'models/biLSTM.png', show_shapes=True)\n\n#prediction\nstart = datetime.now()\n\nunknown = x_test[0].reshape((1, 16, 128))\npredicted = model.predict(unknown)\nprint(\"Using model to predict species for features: \")\nprint(unknown)\nprint(\"\\nPredicted softmax vector is: \")\nprint(predicted)\nprint(\"\\nPredicted fault is: \")\nprint(Class_dict[np.argmax(predicted)])\n\nprint(\"This took \", datetime.now() - start)\n"
] | [
[
"tensorflow.local_variables_initializer",
"numpy.sqrt",
"numpy.argmax",
"tensorflow.metrics.auc",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
xccheng/mars | [
"8146d1b7d3f3bc2a652c414a336a2f884a06a108",
"8146d1b7d3f3bc2a652c414a336a2f884a06a108",
"8146d1b7d3f3bc2a652c414a336a2f884a06a108"
] | [
"mars/dataframe/groupby/transform.py",
"mars/learn/metrics/tests/integrated/test_ranking.py",
"mars/learn/cluster/tests/integrated/test_distributed_kmeans.py"
] | [
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nfrom ... import opcodes\nfrom ...core import OutputType\nfrom ...custom_log import redirect_custom_log\nfrom ...serialize import BoolField, TupleField, DictField, AnyField, StringField\nfrom ...utils import enter_current_session\nfrom ..operands import DataFrameOperandMixin, DataFrameOperand\nfrom ..utils import build_empty_df, build_empty_series, parse_index\n\n\nclass GroupByTransform(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.TRANSFORM\n _op_module_ = 'dataframe.groupby'\n\n _func = AnyField('func')\n _args = TupleField('args')\n _kwds = DictField('kwds')\n\n _call_agg = BoolField('call_agg')\n\n # for chunk\n _tileable_op_key = StringField('tileable_op_key')\n\n def __init__(self, func=None, args=None, kwds=None, call_agg=None, output_types=None,\n tileable_op_key=None, **kw):\n super().__init__(_func=func, _args=args, _kwds=kwds, _call_agg=call_agg,\n _output_types=output_types, _tileable_op_key=tileable_op_key, **kw)\n\n @property\n def func(self):\n return self._func\n\n @property\n def args(self):\n return getattr(self, '_args', None) or ()\n\n @property\n def kwds(self):\n return getattr(self, '_kwds', None) or dict()\n\n @property\n def call_agg(self):\n return self._call_agg\n\n @property\n def tileable_op_key(self):\n return self._tileable_op_key\n\n def _infer_df_func_returns(self, in_groupby, dtypes, index):\n index_value, output_types, new_dtypes = None, None, None\n\n output_types = [OutputType.dataframe] \\\n if in_groupby.op.output_types[0] == OutputType.dataframe_groupby else [OutputType.series]\n\n try:\n empty_groupby = in_groupby.op.build_mock_groupby()\n with np.errstate(all='ignore'):\n if self.call_agg:\n infer_df = empty_groupby.agg(self.func, *self.args, **self.kwds)\n else:\n infer_df = empty_groupby.transform(self.func, *self.args, **self.kwds)\n\n # todo return proper index when sort=True is implemented\n index_value = parse_index(None, in_groupby.key, self.func)\n\n if isinstance(infer_df, pd.DataFrame):\n output_types = [OutputType.dataframe]\n new_dtypes = new_dtypes or infer_df.dtypes\n else:\n output_types = [OutputType.series]\n new_dtypes = new_dtypes or (infer_df.name, infer_df.dtype)\n except: # noqa: E722 # nosec\n pass\n\n self.output_types = output_types if not self.output_types else self.output_types\n dtypes = new_dtypes if dtypes is None else dtypes\n index_value = index_value if index is None else parse_index(index)\n return dtypes, index_value\n\n def __call__(self, groupby, dtypes=None, index=None):\n in_df = groupby.inputs[0]\n\n dtypes, index_value = self._infer_df_func_returns(groupby, dtypes, index)\n for arg, desc in zip((self.output_types, dtypes, index_value),\n ('output_types', 'dtypes', 'index')):\n if arg is None:\n raise TypeError(f'Cannot determine {desc} by calculating with enumerate data, '\n 'please specify it as arguments')\n\n if self.output_types[0] == OutputType.dataframe:\n new_shape = (np.nan if self.call_agg else in_df.shape[0], len(dtypes))\n return self.new_dataframe([groupby], shape=new_shape, dtypes=dtypes, index_value=index_value,\n columns_value=parse_index(dtypes.index, store_data=True))\n else:\n name, dtype = dtypes\n new_shape = (np.nan,) if self.call_agg else groupby.shape\n return self.new_series([groupby], name=name, shape=new_shape, dtype=dtype,\n index_value=index_value)\n\n @classmethod\n def tile(cls, op):\n in_groupby = op.inputs[0]\n out_df = op.outputs[0]\n\n chunks = []\n for c in in_groupby.chunks:\n inp_chunks = [c]\n\n new_op = op.copy().reset_key()\n new_op._tileable_op_key = op.key\n if op.output_types[0] == OutputType.dataframe:\n new_index = c.index if c.ndim == 2 else c.index + (0,)\n chunks.append(new_op.new_chunk(\n inp_chunks, index=new_index, shape=(np.nan, len(out_df.dtypes)), dtypes=out_df.dtypes,\n columns_value=out_df.columns_value, index_value=out_df.index_value))\n else:\n chunks.append(new_op.new_chunk(\n inp_chunks, name=out_df.name, index=(c.index[0],), shape=(np.nan,), dtype=out_df.dtype,\n index_value=out_df.index_value))\n\n new_op = op.copy()\n kw = out_df.params.copy()\n kw['chunks'] = chunks\n if op.output_types[0] == OutputType.dataframe:\n kw['nsplits'] = ((np.nan,) * len(chunks), (len(out_df.dtypes),))\n else:\n kw['nsplits'] = ((np.nan,) * len(chunks),)\n return new_op.new_tileables([in_groupby], **kw)\n\n @classmethod\n @redirect_custom_log\n @enter_current_session\n def execute(cls, ctx, op):\n in_data = ctx[op.inputs[0].key]\n out_chunk = op.outputs[0]\n\n if not in_data:\n if op.output_types[0] == OutputType.dataframe:\n ctx[op.outputs[0].key] = build_empty_df(out_chunk.dtypes)\n else:\n ctx[op.outputs[0].key] = build_empty_series(out_chunk.dtype)\n return\n\n if op.call_agg:\n result = in_data.agg(op.func, *op.args, **op.kwds)\n else:\n result = in_data.transform(op.func, *op.args, **op.kwds)\n\n if result.ndim == 2:\n result = result.astype(op.outputs[0].dtypes, copy=False)\n else:\n result = result.astype(op.outputs[0].dtype, copy=False)\n ctx[op.outputs[0].key] = result\n\n\ndef groupby_transform(groupby, func, *args, dtypes=None, index=None, output_types=None, **kwargs):\n # todo this can be done with sort_index implemented\n if not groupby.op.groupby_params.get('as_index', True):\n raise NotImplementedError('transform when set_index == False is not supported')\n\n call_agg = kwargs.pop('_call_agg', False)\n if not call_agg and isinstance(func, (dict, list)):\n raise TypeError(f'Does not support transform with {type(func)}')\n\n op = GroupByTransform(func=func, args=args, kwds=kwargs, output_types=output_types,\n call_agg=call_agg)\n return op(groupby, dtypes=dtypes, index=index)\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport unittest\n\nimport numpy as np\nimport pandas as pd\ntry:\n import sklearn\n from sklearn.metrics import roc_curve as sklearn_roc_curve, auc as sklearn_auc\nexcept ImportError:\n sklearn = None\n\nfrom mars import dataframe as md\nfrom mars.learn.metrics import roc_curve, auc\nfrom mars.learn.tests.integrated.base import LearnIntegrationTestBase\nfrom mars.session import new_session\n\n\[email protected](sklearn is None, 'sklearn not installed')\[email protected](sys.platform == 'win32', \"plasma don't support windows\")\nclass Test(LearnIntegrationTestBase):\n def testRocCurveAuc(self):\n service_ep = 'http://127.0.0.1:' + self.web_port\n timeout = 120 if 'CI' in os.environ else -1\n with new_session(service_ep) as sess:\n run_kwargs = {'timeout': timeout}\n\n rs = np.random.RandomState(0)\n raw = pd.DataFrame({'a': rs.randint(0, 10, (10,)),\n 'b': rs.rand(10)})\n\n df = md.DataFrame(raw)\n y = df['a'].to_tensor().astype('int')\n pred = df['b'].to_tensor().astype('float')\n fpr, tpr, thresholds = roc_curve(y, pred, pos_label=2,\n session=sess, run_kwargs=run_kwargs)\n m = auc(fpr, tpr, session=sess, run_kwargs=run_kwargs)\n\n sk_fpr, sk_tpr, sk_threshod = sklearn_roc_curve(raw['a'].to_numpy().astype('int'),\n raw['b'].to_numpy().astype('float'),\n pos_label=2)\n expect_m = sklearn_auc(sk_fpr, sk_tpr)\n self.assertAlmostEqual(m.fetch(session=sess), expect_m)\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport unittest\n\nimport numpy as np\ntry:\n from sklearn.cluster import KMeans as SK_KMEANS\n from sklearn.datasets import make_blobs\nexcept ImportError:\n pass\n\nfrom mars import tensor as mt\nfrom mars.learn.cluster import KMeans\nfrom mars.learn.tests.integrated.base import LearnIntegrationTestBase\nfrom mars.session import new_session\n\n\[email protected](KMeans is None, 'scikit-learn not installed')\[email protected](sys.platform == 'win32', 'does not run in windows')\nclass Test(LearnIntegrationTestBase):\n def testDistributedKMeans(self):\n service_ep = 'http://127.0.0.1:' + self.web_port\n timeout = 120 if 'CI' in os.environ else -1\n\n with new_session(service_ep) as sess:\n run_kwargs = {'timeout': timeout}\n\n rnd = np.random.RandomState(0)\n X, _ = make_blobs(random_state=rnd)\n raw = X\n X = mt.tensor(X, chunk_size=50)\n\n km_elkan = KMeans(algorithm='elkan', n_clusters=5,\n random_state=0, n_init=1, tol=1e-4,\n init='k-means++')\n sk_km_elkan = SK_KMEANS(algorithm='elkan', n_clusters=5,\n random_state=0, n_init=1, tol=1e-4,\n init='k-means++')\n\n km_elkan.fit(X, session=sess, run_kwargs=run_kwargs)\n sk_km_elkan.fit(raw)\n\n np.testing.assert_allclose(km_elkan.cluster_centers_, sk_km_elkan.cluster_centers_)\n np.testing.assert_array_equal(km_elkan.labels_, sk_km_elkan.labels_)\n\n self.assertEqual(km_elkan.n_iter_, sk_km_elkan.n_iter_)\n\n with new_session(service_ep) as sess2:\n run_kwargs = {'timeout': timeout}\n\n rnd = np.random.RandomState(0)\n X, _ = make_blobs(random_state=rnd)\n X = mt.tensor(X, chunk_size=50)\n\n kmeans = KMeans(n_clusters=5, random_state=0, n_init=1,\n tol=1e-4, init='k-means||')\n kmeans.fit(X, session=sess2, run_kwargs=run_kwargs)\n"
] | [
[
"numpy.errstate"
],
[
"sklearn.metrics.auc",
"numpy.random.RandomState"
],
[
"sklearn.cluster.KMeans",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_allclose",
"numpy.random.RandomState",
"sklearn.datasets.make_blobs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AyrtonB/ElexonDataPortal | [
"939c811f85dff15d0f7eb164fd1982ba0307192e"
] | [
"ElexonDataPortal/dev/orchestrator.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).\n\n__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',\n 'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',\n 'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',\n 'query_orchestrator']\n\n# Cell\nimport pandas as pd\nfrom tqdm import tqdm\nfrom warnings import warn\nfrom requests.models import Response\n\nfrom . import utils, raw\n\n# Cell\ndef retry_request(raw, method, kwargs, n_attempts=3):\n attempts = 0\n success = False\n\n while (attempts < n_attempts) and (success == False):\n try:\n r = getattr(raw, method)(**kwargs)\n utils.check_status(r)\n success = True\n except Exception as e:\n attempts += 1\n if attempts == n_attempts:\n raise e\n\n return r\n\ndef if_possible_parse_local_datetime(df):\n dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']\n\n dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]\n sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]\n\n if len(dt_cols)==1 and len(sp_cols)==1:\n df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])\n\n return df\n\ndef SP_and_date_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)\n date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]\n\n for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):\n kwargs.update({\n kwargs_map['date']: datetime.strftime('%Y-%m-%d'),\n kwargs_map['SP']: SP,\n })\n\n missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_SP = utils.parse_xml_response(r)\n df = pd.concat([df, df_SP])\n\n df = utils.expand_cols(df)\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef handle_capping(\n r: Response,\n df: pd.DataFrame,\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n end_date: str,\n request_type: str,\n **kwargs\n):\n capping_applied = utils.check_capping(r)\n assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'\n\n if capping_applied == True: # only subset of date range returned\n dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']\n dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]\n\n if len(dt_cols) == 1:\n start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')\n if 'start_time' in kwargs.keys():\n kwargs['start_time'] = '00:00'\n\n if pd.to_datetime(start_date) >= pd.to_datetime(end_date):\n warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\\nThe `start_date` will be set one day earlier than the `end_date`.')\n start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')\n\n warn(f'Response was capped, request is rerunning for missing data from {start_date}')\n df_rerun = date_range_request(\n method=method,\n kwargs_map=kwargs_map,\n func_params=func_params,\n api_key=api_key,\n start_date=start_date,\n end_date=end_date,\n request_type=request_type,\n **kwargs\n )\n\n df = pd.concat([df, df_rerun])\n df = df.drop_duplicates()\n\n else:\n warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')\n\n return df\n\ndef date_range_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n request_type: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n for kwarg in ['start_time', 'end_time']:\n if kwarg not in kwargs_map.keys():\n kwargs_map[kwarg] = kwarg\n\n kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')\n kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = pd.to_datetime(end_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')\n\n if 'SP' in kwargs_map.keys():\n kwargs[kwargs_map['SP']] = '*'\n func_params.remove('SP')\n func_params += [kwargs_map['SP']]\n\n missing_kwargs = list(set(func_params) - set(['start_date', 'end_date', 'start_time', 'end_time'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n if request_type == 'date_range':\n kwargs.pop(kwargs_map['start_time'])\n kwargs.pop(kwargs_map['end_time'])\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df = utils.parse_xml_response(r)\n df = if_possible_parse_local_datetime(df)\n\n # Handling capping\n df = handle_capping(\n r,\n df,\n method=method,\n kwargs_map=kwargs_map,\n func_params=func_params,\n api_key=api_key,\n end_date=end_date,\n request_type=request_type,\n **kwargs\n )\n\n return df\n\n# Cell\ndef year_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n start_year = int(pd.to_datetime(start_date).strftime('%Y'))\n end_year = int(pd.to_datetime(end_date).strftime('%Y'))\n\n for year in tqdm(range(start_year, end_year+1), desc=stream):\n kwargs.update({kwargs_map['year']: year})\n\n missing_kwargs = list(set(func_params) - set(['year'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_year = utils.parse_xml_response(r)\n df = pd.concat([df, df_year])\n\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef construct_year_month_pairs(start_date, end_date):\n dt_rng = pd.date_range(start_date, end_date, freq='M')\n\n if len(dt_rng) == 0:\n year_month_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %b').split(' '))]\n else:\n year_month_pairs = [tuple(dt.strftime('%Y %b').split(' ')) for dt in dt_rng]\n\n year_month_pairs = [(int(year), week.upper()) for year, week in year_month_pairs]\n\n return year_month_pairs\n\ndef year_and_month_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n year_month_pairs = construct_year_month_pairs(start_date, end_date)\n\n for year, month in tqdm(year_month_pairs, desc=stream):\n kwargs.update({\n kwargs_map['year']: year,\n kwargs_map['month']: month\n })\n\n missing_kwargs = list(set(func_params) - set(['year', 'month'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_year = utils.parse_xml_response(r)\n df = pd.concat([df, df_year])\n\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef clean_year_week(year, week):\n year = int(year)\n\n if week == '00':\n year = int(year) - 1\n week = 52\n\n else:\n year = int(year)\n week = int(week.strip('0'))\n\n return year, week\n\ndef construct_year_week_pairs(start_date, end_date):\n dt_rng = pd.date_range(start_date, end_date, freq='W')\n\n if len(dt_rng) == 0:\n year_week_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %W').split(' '))]\n else:\n year_week_pairs = [tuple(dt.strftime('%Y %W').split(' ')) for dt in dt_rng]\n\n year_week_pairs = [clean_year_week(year, week) for year, week in year_week_pairs]\n\n return year_week_pairs\n\ndef year_and_week_request(\n method: str,\n kwargs_map: dict,\n func_params: list,\n api_key: str,\n start_date: str,\n end_date: str,\n n_attempts: int=3,\n **kwargs\n):\n assert start_date is not None, '`start_date` must be specified'\n assert end_date is not None, '`end_date` must be specified'\n\n df = pd.DataFrame()\n stream = '_'.join(method.split('_')[1:])\n\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n year_week_pairs = construct_year_week_pairs(start_date, end_date)\n\n for year, week in tqdm(year_week_pairs, desc=stream):\n kwargs.update({\n kwargs_map['year']: year,\n kwargs_map['week']: week\n })\n\n missing_kwargs = list(set(func_params) - set(['year', 'week'] + list(kwargs.keys())))\n assert len(missing_kwargs) == 0, f\"The following kwargs are missing: {', '.join(missing_kwargs)}\"\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df_year = utils.parse_xml_response(r)\n df = pd.concat([df, df_year])\n\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef non_temporal_request(\n method: str,\n api_key: str,\n n_attempts: int=3,\n **kwargs\n):\n kwargs.update({\n 'APIKey': api_key,\n 'ServiceType': 'xml'\n })\n\n r = retry_request(raw, method, kwargs, n_attempts=n_attempts)\n\n df = utils.parse_xml_response(r)\n df = if_possible_parse_local_datetime(df)\n\n return df\n\n# Cell\ndef query_orchestrator(\n method: str,\n api_key: str,\n request_type: str,\n kwargs_map: dict=None,\n func_params: list=None,\n start_date: str=None,\n end_date: str=None,\n n_attempts: int=3,\n **kwargs\n):\n if request_type not in ['non_temporal']:\n kwargs.update({\n 'kwargs_map': kwargs_map,\n 'func_params': func_params,\n 'start_date': start_date,\n 'end_date': end_date,\n })\n\n if request_type in ['date_range', 'date_time_range']:\n kwargs.update({\n 'request_type': request_type,\n })\n\n request_type_to_func = {\n 'SP_and_date': SP_and_date_request,\n 'date_range': date_range_request,\n 'date_time_range': date_range_request,\n 'year': year_request,\n 'year_and_month': year_and_month_request,\n 'year_and_week': year_and_week_request,\n 'non_temporal': non_temporal_request\n }\n\n assert request_type in request_type_to_func.keys(), f\"{request_type} must be one of: {', '.join(request_type_to_func.keys())}\"\n request_func = request_type_to_func[request_type]\n\n df = request_func(\n method=method,\n api_key=api_key,\n n_attempts=n_attempts,\n **kwargs\n )\n\n df = df.reset_index(drop=True)\n\n return df"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
sx14/hierarchical-relationship | [
"d9ed2f0c3394e435374cf3ab5afeb47a6a56ed9a",
"d9ed2f0c3394e435374cf3ab5afeb47a6a56ed9a",
"d9ed2f0c3394e435374cf3ab5afeb47a6a56ed9a",
"d9ed2f0c3394e435374cf3ab5afeb47a6a56ed9a"
] | [
"open_relation/infer/tree_infer2.py",
"lib/roi_data_layer/roidb.py",
"open_relation/eval/proc_ext_fc7.py",
"open_relation/eval/show_det.py"
] | [
"# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\n\n\n\n\n\ndef cal_rank_scores(label_num):\n # rank scores [1 - 10]\n # s = a(x - b)^2 + c\n # if rank is 0, score is 10\n # b = num-1\n s_min = 1.0\n s_max = 10.0\n b = label_num - 1\n c = s_min\n a = (s_max - c) / b ** 2\n rank_scores = [0] * label_num\n for r in range(label_num):\n rank_scores[r] = a*(r-b)**2 + c\n return rank_scores\n\n\ndef cal_rank_scores1(n_item):\n s_max = 10\n ranks = np.arange(1, n_item+1).astype(np.float)\n\n s = (np.cos(ranks / n_item * np.pi) + 1) * (s_max * 1.0 / 2)\n return s\n\n\nclass TreeNode:\n def __init__(self, name, index):\n self._rank = -1\n self._name = name\n self._index = index\n self._parents = []\n self._children = []\n\n def __str__(self):\n return '%s[%d]' % (self._name, self._rank)\n\n def add_children(self, child):\n self._children.append(child)\n\n def children(self):\n return self._children\n\n def append_parent(self, parent):\n self._parents.append(parent)\n\n def set_rank(self, rank):\n self._rank = rank\n\n def rank(self):\n return self._rank\n\n def index(self):\n return self._index\n\n def name(self):\n return self._name\n\n\ndef construct_tree(label_hier, ranked_inds):\n ind2node = dict()\n for label in label_hier.get_all_labels():\n hnode = label_hier.get_node_by_name(label)\n tnode = TreeNode(label, hnode.index())\n ind2node[hnode.index()] = tnode\n\n for label in label_hier.get_all_labels():\n hnode = label_hier.get_node_by_name(label)\n tnode = ind2node[hnode.index()]\n hypers = hnode.hypers()\n for hyper in hypers:\n pnode = ind2node[hyper.index()]\n pnode.add_children(tnode)\n tnode.append_parent(pnode)\n\n for r, ind in enumerate(ranked_inds):\n rank = r + 1 # 1 based\n tnode = ind2node[ind]\n tnode.set_rank(rank)\n\n return ind2node\n\n\ndef top_down(tree, label_hier):\n def choose_child(children, parent_rank):\n choice = None\n if len(children) == 1:\n choice = children[0]\n elif len(children) > 1:\n ranked_children = sorted(children, key=lambda c: c.rank())\n r1 = ranked_children[0].rank()\n r2 = ranked_children[1].rank()\n if (r2 - r1) > r1:\n # r1 is confident, and doesn't confuse with r2\n choice = ranked_children[0]\n return choice\n\n # root as default\n root_ind = label_hier.root().index()\n tnode = tree[root_ind]\n while tnode:\n choice = tnode\n tnode = choose_child(tnode.children(), 0)\n return [choice.index(), choice.rank()]\n\n\ndef bottom_up(tree, label_hier, top2_raw, thr):\n node1 = label_hier.get_node_by_index(top2_raw[0][0])\n node2 = label_hier.get_node_by_index(top2_raw[1][0])\n n1_path = node1.trans_hyper_inds()\n n2_path = node2.trans_hyper_inds()\n min_plength = min(len(n1_path), len(n2_path))\n common_path = set(n1_path) & set(n2_path)\n if len(common_path) * 1.0 / min_plength >= thr:\n pred_ind = max(common_path)\n return [pred_ind, tree[pred_ind].rank()]\n else:\n return top2_raw[0]\n\n\ndef my_infer(label_hier, scores, target):\n obj_thr = {'b_u': 0.75,\n 't_d': 0.5,\n 'min_dis': label_hier.label_sum() / 7,\n 'half': label_hier.label_sum() / 3}\n pre_thr = {'b_u': 0.6,\n 't_d': 0.4,\n 'min_dis': 3,\n 'half': 10}\n thr = {'obj': obj_thr,\n 'pre': pre_thr}\n\n threshold = thr[target]\n\n # label_ind 2 rank\n ranked_inds = np.argsort(scores).tolist()\n ranked_inds.reverse() # descending\n\n # top2 raw label as default predictions\n raw_top2 = []\n for r, ind in enumerate(ranked_inds):\n node = label_hier.get_node_by_index(ind)\n if node.is_raw() and len(raw_top2) < 2:\n raw_top2.append([ind, r+1])\n\n # confident part\n half_rank = threshold['half']\n if raw_top2[0][1] < half_rank and (raw_top2[1][1] - raw_top2[0][1]) > threshold['min_dis']:\n cands = raw_top2\n elif raw_top2[0][1] < half_rank and (raw_top2[1][1] - raw_top2[0][1]) <= threshold['min_dis']:\n ind2node = construct_tree(label_hier, ranked_inds)\n cands = [bottom_up(ind2node, label_hier, raw_top2, threshold['b_u']), raw_top2[0]]\n elif raw_top2[0][1] >= half_rank and (raw_top2[1][1] - raw_top2[0][1]) <= threshold['min_dis']:\n ind2node = construct_tree(label_hier, ranked_inds)\n cands = [bottom_up(ind2node, label_hier, raw_top2, threshold['t_d']), raw_top2[0]]\n if cands[0][0] == raw_top2[0][0]:\n cands = [top_down(ind2node, label_hier), raw_top2[0]]\n else:\n ind2node = construct_tree(label_hier, ranked_inds)\n cands = [top_down(ind2node, label_hier), raw_top2[0]]\n return cands\n\n",
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Transform a roidb into a trainable roidb by adding a bunch of metadata.\"\"\"\n\nimport numpy as np\nfrom lib.fast_rcnn.config import cfg\nimport lib.utils.cython_bbox\n\ndef prepare_roidb(imdb):\n \"\"\"Enrich the imdb's roidb by adding some derived quantities that\n are useful for training. This function precomputes the maximum\n overlap, taken over ground-truth boxes, between each ROI and\n each ground-truth box. The class with maximum overlap is also\n recorded.\n \"\"\"\n roidb = imdb.roidb\n for i in xrange(len(imdb.image_index)):\n roidb[i]['image'] = imdb.image_path_at(i)\n # need gt_overlaps as a dense array for argmax\n gt_overlaps = roidb[i]['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb[i]['max_classes'] = max_classes\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\ndef add_bbox_regression_targets(roidb):\n \"\"\"Add information needed to train bounding-box regressors.\"\"\"\n assert len(roidb) > 0\n assert 'max_classes' in roidb[0], 'Did you call prepare_roidb first?'\n\n num_images = len(roidb)\n # Infer number of classes from the number of columns in gt_overlaps\n num_classes = roidb[0]['gt_overlaps'].shape[1]\n for im_i in xrange(num_images):\n rois = roidb[im_i]['boxes']\n max_overlaps = roidb[im_i]['max_overlaps']\n max_classes = roidb[im_i]['max_classes']\n roidb[im_i]['bbox_targets'] = \\\n _compute_targets(rois, max_overlaps, max_classes)\n\n # Compute values needed for means and stds\n # var(x) = E(x^2) - E(x)^2\n class_counts = np.zeros((num_classes, 1)) + cfg.EPS\n sums = np.zeros((num_classes, 4))\n squared_sums = np.zeros((num_classes, 4))\n for im_i in xrange(num_images):\n targets = roidb[im_i]['bbox_targets']\n for cls in xrange(1, num_classes):\n cls_inds = np.where(targets[:, 0] == cls)[0]\n if cls_inds.size > 0:\n class_counts[cls] += cls_inds.size\n sums[cls, :] += targets[cls_inds, 1:].sum(axis=0)\n squared_sums[cls, :] += (targets[cls_inds, 1:] ** 2).sum(axis=0)\n\n means = sums / class_counts\n stds = np.sqrt(squared_sums / class_counts - means ** 2)\n\n # Normalize targets\n for im_i in xrange(num_images):\n targets = roidb[im_i]['bbox_targets']\n for cls in xrange(1, num_classes):\n cls_inds = np.where(targets[:, 0] == cls)[0]\n roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :]\n roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :]\n\n # These values will be needed for making predictions\n # (the predicts will need to be unnormalized and uncentered)\n return means.ravel(), stds.ravel()\n\ndef _compute_targets(rois, overlaps, labels):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n # Ensure ROIs are floats\n rois = rois.astype(np.float, copy=False)\n\n # Indices of ground-truth ROIs\n gt_inds = np.where(overlaps == 1)[0]\n # Indices of examples for which we try to make predictions\n ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]\n\n # Get IoU overlap between each ex ROI and gt ROI\n ex_gt_overlaps = utils.cython_bbox.bbox_overlaps(rois[ex_inds, :],\n rois[gt_inds, :])\n\n # Find which gt ROI each ex ROI has max overlap with:\n # this will be the ex ROI's gt target\n gt_assignment = ex_gt_overlaps.argmax(axis=1)\n gt_rois = rois[gt_inds[gt_assignment], :]\n ex_rois = rois[ex_inds, :]\n\n ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + cfg.EPS\n ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + cfg.EPS\n ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths\n ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights\n\n gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + cfg.EPS\n gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + cfg.EPS\n gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths\n gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights\n\n targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths\n targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights\n targets_dw = np.log(gt_widths / ex_widths)\n targets_dh = np.log(gt_heights / ex_heights)\n\n targets = np.zeros((rois.shape[0], 5), dtype=np.float32)\n targets[ex_inds, 0] = labels[ex_inds]\n targets[ex_inds, 1] = targets_dx\n targets[ex_inds, 2] = targets_dy\n targets[ex_inds, 3] = targets_dw\n targets[ex_inds, 4] = targets_dh\n return targets\n",
"import os\nimport sys\nimport cv2\nimport numpy as np\nos.environ['GLOG_minloglevel'] = '3'\nimport caffe\nfrom lib.fast_rcnn.test import im_detect\nfrom open_relation import global_config\n\n\n# load cnn\nos.environ['GLOG_minloglevel'] = '3'\nprototxt = global_config.fast_prototxt_path\ncaffemodel = global_config.fast_caffemodel_path\ncaffe.set_mode_gpu()\ncaffe.set_device(0)\ncnn = caffe.Net(prototxt, caffemodel, caffe.TEST)\n\n\ndef ext_cnn_feat(im, boxes):\n im_detect(cnn, im, boxes)\n fc7s = np.array(cnn.blobs['fc7'].data)\n return fc7s\n\n\n\nargs = sys.argv\nimg_path = args[1]\ntemp_box_name = args[2]\ntemp_fc7_name = args[3]\n\nboxes = np.load(temp_box_name+'.npy')\nim = cv2.imread(img_path)\nfc7 = ext_cnn_feat(im, boxes)\nnp.save(temp_fc7_name, fc7)\n",
"import os\nimport pickle\nimport cv2\nimport numpy as np\nfrom open_relation.dataset.dataset_config import DatasetConfig\nfrom open_relation.dataset.show_box import show_boxes\nfrom open_relation.train.train_config import hyper_params\n\n\ndataset = 'vrd'\ndataset_config = DatasetConfig(dataset)\npre_config = hyper_params[dataset]['predicate']\nobj_config = hyper_params[dataset]['object']\ndet_roidb_path = dataset_config.extra_config['object'].det_box_path\ndet_roidb = pickle.load(open(det_roidb_path))\n\nimg_root = dataset_config.data_config['img_root']\nfor img_id in det_roidb:\n img_path = os.path.join(img_root, img_id+'.jpg')\n im = cv2.imread(img_path)\n dets = det_roidb[img_id]\n dets_temp = np.copy(dets)\n dets_temp[:, 2] = dets[:, 2] - dets[:, 0] # width\n dets_temp[:, 3] = dets[:, 3] - dets[:, 1] # height\n confs = dets[:, 4]\n show_boxes(im, dets_temp[:, :4], confs)"
] | [
[
"numpy.argsort",
"numpy.arange",
"numpy.cos"
],
[
"numpy.log",
"numpy.where",
"numpy.zeros",
"numpy.sqrt"
],
[
"numpy.load",
"numpy.array",
"numpy.save"
],
[
"numpy.copy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
honpui/RFCN | [
"c3e24ea9a143e6ba31698dc6031f6681517eaaff"
] | [
"main.py"
] | [
"\"\"\"\nRFCN\n\"\"\"\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torchvision\nimport torch.nn.functional as functional\n\nfrom dataset import SBDClassSeg, MyTestData\nfrom transform import Colorize\nfrom criterion import CrossEntropyLoss2d\nfrom model import RFCN, FCN8s\nfrom myfunc import imsave, tensor2image\nimport MR\n\nimport visdom\nimport numpy as np\nimport argparse\nimport os\nimport gc\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--phase', type=str, default='train', help='train or test')\nparser.add_argument('--param', type=str, default=None, help='path to pre-trained parameters')\nparser.add_argument('--data', type=str, default='./train', help='path to input data')\nparser.add_argument('--out', type=str, default='./out', help='path to output data')\nopt = parser.parse_args()\n\nopt.phase = 'train'\nopt.data = '/media/xyz/Files/data/datasets'\nopt.out = '/media/xyz/Files/data/models/torch/RFCN_pretrain'\nopt.param = '/media/xyz/Files/data/models/torch/RFCN_pretrain/RFCN-epoch-4-step-11354.pth'\n\nprint(opt)\n\nvis = visdom.Visdom()\nwin0 = vis.image(torch.zeros(3, 100, 100))\nwin1 = vis.image(torch.zeros(3, 100, 100))\nwin2 = vis.image(torch.zeros(3, 100, 100))\nwin22 = vis.image(torch.zeros(3, 100, 100))\nwin3 = vis.image(torch.zeros(3, 100, 100))\ncolor_transform = Colorize()\n\"\"\"parameters\"\"\"\niterNum = 30\n\n\"\"\"data loader\"\"\"\n# dataRoot = '/media/xyz/Files/data/datasets'\n# checkRoot = '/media/xyz/Files/fcn8s-deconv'\ndataRoot = opt.data\nif not os.path.exists(opt.out):\n os.mkdir(opt.out)\nif opt.phase == 'train':\n checkRoot = opt.out\n loader = torch.utils.data.DataLoader(\n SBDClassSeg(dataRoot, split='train', transform=True),\n batch_size=1, shuffle=True, num_workers=4, pin_memory=True)\nelse:\n outputRoot = opt.out\n loader = torch.utils.data.DataLoader(\n MyTestData(dataRoot, transform=True),\n batch_size=1, shuffle=True, num_workers=4, pin_memory=True)\n\n\"\"\"nets\"\"\"\nmodel = RFCN()\nif opt.param is None:\n vgg16 = torchvision.models.vgg16(pretrained=True)\n model.copy_params_from_vgg16(vgg16, copy_fc8=False, init_upscore=True)\nelse:\n model.load_state_dict(torch.load(opt.param))\n\ncriterion = CrossEntropyLoss2d()\noptimizer = torch.optim.Adam(model.parameters(), 0.0001, betas=(0.5, 0.999))\n\nmodel = model.cuda()\n\nmr_sal = MR.MR_saliency()\nif opt.phase == 'train':\n \"\"\"train\"\"\"\n for it in range(iterNum):\n epoch_loss = []\n for ib, data in enumerate(loader):\n # prior map\n _img = tensor2image(data[0][0])\n pmap = mr_sal.saliency(_img).astype(float) / 255.0\n pmap = 1.0 - pmap\n pmap = torch.unsqueeze(torch.FloatTensor(pmap), 0)\n pmap = torch.unsqueeze(pmap, 0)\n pmap = Variable(pmap).cuda()\n img = Variable(data[0]).cuda()\n\n # segmentation gt and bg&fg gt\n targets_S = Variable(data[1]).cuda()\n targets_G = torch.LongTensor(1, targets_S.size()[-2], targets_S.size()[-1]).fill_(0)\n targets_G[0][data[1] == 0] == 1\n targets_G = Variable(targets_G).cuda()\n\n model.zero_grad()\n loss = 0\n for ir in range(3):\n outputs = model(torch.cat((img, pmap.detach()), 1)) # detach or not?\n loss_S = criterion(outputs[:, :21, :, :], targets_S)\n loss_G = criterion(outputs[:, -2:, :, :], targets_G)\n _loss = loss_G + loss_S\n _loss.backward()\n loss += _loss.data[0]\n\n # update prior map\n del pmap\n gc.collect()\n pmap = functional.sigmoid(outputs[:, -1, :, :])\n pmap = torch.unsqueeze(pmap, 0)\n\n # visulize\n image = img[0].data.cpu()\n image[0] = image[0] + 122.67891434\n image[1] = image[1] + 116.66876762\n image[2] = image[2] + 104.00698793\n title = 'input (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n vis.image(image, win=win1, env='fcn', opts=dict(title=title))\n title = 'output_c (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n vis.image(color_transform(outputs[0, :21].cpu().max(0)[1].data),\n win=win2, env='fcn', opts=dict(title=title))\n title = 'output_l (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n bb = functional.sigmoid(outputs[0, -1:].cpu().data)\n vis.image(bb.repeat(3, 1, 1),\n win=win22, env='fcn', opts=dict(title=title))\n title = 'target (epoch: %d, step: %d, recurrent: %d)' % (it, ib, ir)\n vis.image(color_transform(targets_S.cpu().data),\n win=win3, env='fcn', opts=dict(title=title))\n\n del outputs\n gc.collect()\n\n # update the net\n optimizer.step()\n\n # show loss plot in this batch\n epoch_loss.append(loss)\n average = sum(epoch_loss) / len(epoch_loss)\n print('loss: %.4f (epoch: %d, step: %d)' % (loss, it, ib))\n epoch_loss.append(average)\n x = np.arange(1, len(epoch_loss) + 1, 1)\n title = 'loss'\n vis.line(np.array(epoch_loss), x, env='fcn', win=win0,\n opts=dict(title=title))\n\n del img, targets_S, targets_G\n gc.collect()\n\n # save parameters in each iteration\n filename = ('%s/RFCN-epoch-%d-step-%d.pth' \\\n % (checkRoot, it, ib))\n torch.save(model.state_dict(), filename)\n print('save: (epoch: %d, step: %d)' % (it, ib))\nelse:\n for ib, data in enumerate(loader):\n print('testing batch %d' % ib)\n inputs = Variable(data[0]).cuda()\n outputs = model(inputs)\n hhh = color_transform(outputs[0].cpu().max(0)[1].data)\n imsave(os.path.join(outputRoot, data[1][0] + '.png'), hhh)\n"
] | [
[
"torch.load",
"torch.zeros",
"torch.unsqueeze",
"torch.nn.functional.sigmoid",
"torch.FloatTensor",
"numpy.array",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ricklupton/cued_datalogger | [
"dde38d04819782922e757f1eed8e5eb44cbe4f84"
] | [
"cued_datalogger/analysis/sonogram.py"
] | [
"import sys,traceback\n\nfrom cued_datalogger.api.numpy_extensions import to_dB\nfrom cued_datalogger.api.pyqt_extensions import BaseNControl, MatplotlibCanvas\nfrom cued_datalogger.api.pyqtgraph_extensions import ColorMapPlotWidget\nfrom cued_datalogger.api.toolbox import Toolbox\n\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QSlider, QPushButton, QLabel, QSpinBox, QHBoxLayout, QGridLayout\n\nimport numpy as np\n\nimport scipy.signal\n\n\nclass MatplotlibSonogramContourWidget(MatplotlibCanvas):\n \"\"\"A MatplotlibCanvas widget displaying the Sonogram contour plot.\"\"\"\n\n def __init__(self, sonogram_toolbox=None,\n channel=None,\n contour_spacing_dB=None,\n num_contours=None):\n self.sonogram_toolbox = sonogram_toolbox\n self.channel = channel\n self.contour_spacing_dB = contour_spacing_dB\n self.num_contours = num_contours\n\n #self.sonogram_toolbox.num_contours_slider.valueChanged.connect(self.update_plot)\n #self.sonogram_toolbox.num_contours_spinbox.valueChanged.connect(self.update_plot)\n #self.sonogram_toolbox.contour_spacing_slider.valueChanged.connect(self.update_plot)\n #self.sonogram_toolbox.contour_spacing_spinbox.valueChanged.connect(self.update_plot)\n\n MatplotlibCanvas.__init__(self, \"Sonogram: Contour Plot\")\n\n self.update_plot()\n\n def update_plot(self):\n \"\"\"Redraw the sonogram on the canvas.\"\"\"\n if self.channel is not None:\n self.F_bins, self.T_bins = np.meshgrid(self.channel.data(\"sonogram_frequency\"),\n self.channel.data(\"sonogram_time\"))\n\n self.axes.clear()\n\n self.update_contour_sequence()\n\n self.axes.contour(self.F_bins, self.T_bins,\n to_dB(np.abs(self.channel.data(\"sonogram\"))),\n self.contour_sequence)\n\n self.axes.set_xlabel('Freq (Hz)')\n self.axes.set_ylabel('Time (s)')\n\n self.axes.set_xlim(self.channel.data(\"sonogram_frequency\").min(),\n self.channel.data(\"sonogram_frequency\").max())\n self.axes.set_ylim(self.channel.data(\"sonogram_time\").min(),\n self.channel.data(\"sonogram_time\").max())\n\n self.draw()\n\n def update_contour_sequence(self):\n \"\"\"Update the array which says where to plot contours, how many etc.\"\"\"\n if self.channel is not None:\n # Create a vector with the right spacing from min to max value\n self.contour_sequence = np.arange(to_dB(np.abs(self.channel.data(\"sonogram\"))).min(),\n to_dB(np.abs(self.channel.data(\"sonogram\"))).max(),\n self.contour_spacing_dB)\n # Take the appropriate number of contours\n self.contour_sequence = self.contour_sequence[-self.num_contours:]\n\n def update_contour_spacing(self, value):\n \"\"\"Slot for updating the plot when the contour spacing is changed.\"\"\"\n self.contour_spacing_dB = value\n self.update_plot()\n\n def update_num_contours(self, value):\n \"\"\"Slot for updating the plot when the number of contours is changed.\"\"\"\n self.num_contours = value\n self.update_plot()\n\n def set_selected_channels(self, selected_channels):\n \"\"\"Update which channel is being plotted.\"\"\"\n # If no channel list is given\n if not selected_channels:\n self.channel = None\n else:\n self.channel = selected_channels[0]\n self.update_plot()\n\n\nclass SonogramDisplayWidget(ColorMapPlotWidget):\n \"\"\"\n The SonogramDisplayWidget is the main display widget for everything in\n the sonogram domain.\n \"\"\"\n def __init__(self, parent=None,\n window_width=256,\n window_overlap_fraction=8,\n contour_spacing_dB=5,\n num_contours=5):\n\n super().__init__(parent)\n self.parent = parent\n\n self.channels = []\n\n self.window_width = window_width\n self.window_overlap_fraction = window_overlap_fraction\n self.contour_spacing_dB = contour_spacing_dB\n self.num_contours = num_contours\n\n self.PlotWidget.setLabel('bottom', \"Frequency\", \"Hz\")\n self.PlotWidget.setLabel('left', \"Time\", \"s\")\n\n self.show()\n\n def update_window_width(self, value):\n \"\"\"Slot for updating the plot when the window width is changed.\"\"\"\n self.window_width = value\n self.update_plot()\n\n def update_window_overlap_fraction(self, value):\n \"\"\"Slot for updating the plot when the window overlap fraction is changed.\"\"\"\n self.window_overlap_fraction = value\n self.update_plot()\n\n def update_contour_spacing(self, value):\n \"\"\"Slot for updating the plot when the contour spacing is changed.\"\"\"\n self.contour_spacing_dB = value\n self.update_plot()\n\n def update_num_contours(self, value):\n \"\"\"Slot for updating the plot when the number of contours is changed.\"\"\"\n self.num_contours = value\n self.update_plot()\n\n def calculate_sonogram(self):\n \"\"\"Calculate the sonogram, and store the values in the channel\n (including autogenerated datasets). Sonogram data is in complex form.\"\"\"\n for channel in self.channels:\n if channel.is_dataset(\"time_series\"):\n (frequencies,\n times,\n spectrum) = scipy.signal.spectrogram(channel.data(\"time_series\"),\n channel.metadata(\"sample_rate\"),\n window=scipy.signal.get_window('hann', self.window_width),\n nperseg=self.window_width,\n noverlap=self.window_width // self.window_overlap_fraction,\n return_onesided=False,\n mode = 'complex')\n # SciPy's spectrogram gives the FT transposed, so we need to transpose it back\n spectrum = spectrum.transpose()\n # Scipy calculates all the conjugate spectra/frequencies as well -\n # we only want the positive ones\n frequencies = np.abs(frequencies[:frequencies.size // 2 + 1])\n spectrum = spectrum[:, :spectrum.shape[1] // 2 + 1]\n\n channel.add_dataset(\"sonogram_frequency\", data=frequencies, units=\"Hz\")\n channel.add_dataset(\"sonogram_omega\", data=frequencies*2*np.pi, units=\"rad\")\n channel.add_dataset(\"sonogram_time\", data=times, units=\"s\")\n\n channel.add_dataset(\"sonogram\", data=spectrum, units=None)\n channel.add_dataset(\"sonogram_phase\", data=np.angle(spectrum), units='rad')\n channel.add_dataset(\"sonogram_step\", data=self.window_width // self.window_overlap_fraction, units=None)\n\n def update_plot(self):\n \"\"\"Clear the canvas and replot.\"\"\"\n self.clear()\n if self.channels is not None:\n for channel in self.channels:\n if not channel.is_dataset(\"sonogram\"):\n self.calculate_sonogram()\n self.plot_colormap(channel.data(\"sonogram_frequency\"),\n channel.data(\"sonogram_time\"),\n to_dB(np.abs(channel.data(\"sonogram\"))),\n num_contours=self.num_contours,\n contour_spacing_dB=self.contour_spacing_dB)\n\n def set_selected_channels(self, selected_channels):\n \"\"\"Update which channel is being plotted.\"\"\"\n self.channels = []\n\n if selected_channels:\n self.channels = selected_channels\n\n self.update_plot()\n\n\nclass SonogramToolbox(Toolbox):\n \"\"\"Toolbox containing Sonogram controls.\"\"\"\n\n sig_window_width_changed = pyqtSignal(int)\n sig_window_overlap_fraction_changed = pyqtSignal(int)\n sig_num_contours_changed = pyqtSignal(int)\n sig_contour_spacing_changed = pyqtSignal(int)\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.parent = parent\n\n self.window_width = 256\n self.window_overlap_fraction = 8\n self.num_contours = 5\n self.contour_spacing_dB = 5\n\n self.init_ui()\n\n\n def init_ui(self):\n #------------Window width controls------------\n self.window_width_label = QLabel(self)\n self.window_width_label.setText(\"Window width\")\n # Create control\n self.window_width_control = BaseNControl(Qt.Vertical, self)\n self.window_width_control.set_power_range(0, 10)\n self.window_width_control.set_value(self.window_width)\n self.window_width_control.valueChanged.connect(self.sig_window_width_changed)\n\n #------------Window increment controls------------\n self.window_overlap_fraction_label = QLabel(self)\n self.window_overlap_fraction_label.setText(\"Window overlap fraction\")\n # Create control\n self.window_overlap_fraction_control = BaseNControl(Qt.Vertical, self)\n self.window_overlap_fraction_control.set_power_range(0, 6)\n self.window_overlap_fraction_control.set_value(self.window_overlap_fraction)\n self.window_overlap_fraction_control.valueChanged.connect(self.sig_window_overlap_fraction_changed.emit)\n\n #------------Contour spacing controls------------\n self.contour_spacing_label = QLabel(self)\n self.contour_spacing_label.setText(\"Contour spacing\")\n # Create spinbox\n self.contour_spacing_spinbox = QSpinBox(self)\n self.contour_spacing_spinbox.setObjectName(\"contour_spacing_spinbox\")\n self.contour_spacing_spinbox.setRange(1, 12)\n # Create slider\n self.contour_spacing_slider = QSlider(Qt.Vertical, self)\n self.contour_spacing_slider.setObjectName(\"contour_spacing_slider\")\n self.contour_spacing_slider.setRange(1, 12)\n # Connect spinbox and slider together\n self.contour_spacing_spinbox.valueChanged.connect(self.contour_spacing_slider.setValue)\n self.contour_spacing_slider.valueChanged.connect(self.contour_spacing_spinbox.setValue)\n # Set values\n self.contour_spacing_spinbox.setValue(self.contour_spacing_dB)\n self.contour_spacing_slider.setValue(self.contour_spacing_dB)\n # Update screen on change\n self.contour_spacing_slider.valueChanged.connect(self.sig_contour_spacing_changed.emit)\n self.contour_spacing_spinbox.valueChanged.connect(self.sig_contour_spacing_changed.emit)\n\n #------------Num contours controls------------\n self.num_contours_label = QLabel(self)\n self.num_contours_label.setText(\"Num contours\")\n # Create spinbox\n self.num_contours_spinbox = QSpinBox(self)\n self.num_contours_spinbox.setObjectName(\"num_contours_spinbox\")\n self.num_contours_spinbox.setRange(1, 12)\n # Create slider\n self.num_contours_slider = QSlider(Qt.Vertical, self)\n self.num_contours_slider.setObjectName(\"num_contours_slider\")\n self.num_contours_slider.setRange(1, 12)\n # Connect spinbox and slider together\n self.num_contours_spinbox.valueChanged.connect(self.num_contours_slider.setValue)\n self.num_contours_slider.valueChanged.connect(self.num_contours_spinbox.setValue)\n # Set values\n self.num_contours_spinbox.setValue(self.num_contours)\n self.num_contours_slider.setValue(self.num_contours)\n # Update screen on change\n self.num_contours_slider.valueChanged.connect(self.sig_num_contours_changed.emit)\n self.num_contours_spinbox.valueChanged.connect(self.sig_num_contours_changed.emit)\n\n #------------Matplotlib window controls---------\n # Create button\n self.convert_to_contour_btn = QPushButton(\"Show as contour plot\", self)\n self.convert_to_contour_btn.resize(self.convert_to_contour_btn.sizeHint())\n self.convert_to_contour_btn.clicked.connect(self.open_contour_plot)\n\n #------------Layout------------\n # Sonogram controls:\n self.sonogram_controls_tab = QWidget(self)\n\n sonogram_controls_layout = QGridLayout()\n sonogram_controls_layout.addWidget(self.window_width_label, 0, 0)\n sonogram_controls_layout.addWidget(self.window_width_control, 1, 0)\n sonogram_controls_layout.addWidget(self.window_overlap_fraction_label, 0, 1)\n sonogram_controls_layout.addWidget(self.window_overlap_fraction_control, 1, 1)\n\n self.sonogram_controls_tab.setLayout(sonogram_controls_layout)\n\n # Plot controls:\n self.plot_controls_tab = QWidget(self)\n\n plot_controls_layout = QGridLayout()\n plot_controls_layout.addWidget(self.contour_spacing_label, 1, 0)\n plot_controls_layout.addWidget(self.contour_spacing_spinbox, 2, 0)\n plot_controls_layout.addWidget(self.contour_spacing_slider, 3, 0)\n plot_controls_layout.addWidget(self.num_contours_label, 1, 1)\n plot_controls_layout.addWidget(self.num_contours_spinbox, 2, 1)\n plot_controls_layout.addWidget(self.num_contours_slider, 3, 1)\n\n self.plot_controls_tab.setLayout(plot_controls_layout)\n\n # Export:\n self.export_tab = QWidget(self)\n\n export_layout = QGridLayout()\n export_layout.addWidget(self.convert_to_contour_btn, 0, 0)\n export_layout.setRowStretch(1,1)\n\n self.export_tab.setLayout(export_layout)\n\n #-------------Add tabs-----------------\n self.addTab(self.plot_controls_tab, \"Plot Controls\")\n self.addTab(self.sonogram_controls_tab, \"Sonogram Controls\")\n self.addTab(self.export_tab, \"Export\")\n\n def open_contour_plot(self):\n if hasattr(self, 'contour_plot'):\n self.contour_plot.close()\n delattr(self, 'contour_plot')\n else:\n self.contour_plot = MatplotlibSonogramContourWidget(channel=self.channel,\n contour_spacing_dB=self.contour_spacing_dB,\n num_contours=self.num_contours)\n self.sig_contour_spacing_changed.connect(self.contour_plot.update_contour_spacing)\n self.sig_num_contours_changed.connect(self.contour_plot.update_num_contours)\n self.contour_plot.show()\n\n def set_selected_channels(self, selected_channels):\n \"\"\"Update which channel is being plotted\"\"\"\n # If no channel list is given\n if not selected_channels:\n self.channel = None\n else:\n self.channel = selected_channels[0]\n print(\"Sonogram channel:\" + self.channel.name)\n\n\n if hasattr(self, 'contour_plot'):\n self.contour_plot.set_selected_channels(selected_channels)\n\ndef func_1(t, w, x, A=4e3):\n \"\"\"A simple decaying sine wave function.\"\"\"\n return A * np.exp((1j*w - x)*t)\n\n\ndef function_generator(t):\n \"\"\"A simple function generator with echoes.\"\"\"\n f1 = func_1(t, 2000*2*np.pi, 2)\n f2 = func_1(t, 500*2*np.pi, 1)\n # Create an echo of one of the functions\n f1[f1.size//2:] += f1[:f1.size//2]\n result = f1 + f2\n return result\n\n\nif __name__ == '__main__':\n duration = 10.0\n t = np.arange(0.0, duration, 1/4096)\n sig = function_generator(t)\n\n app = 0\n\n app = QApplication(sys.argv)\n\n w = QWidget()\n\n hbox = QHBoxLayout()\n w.setLayout(hbox)\n\n toolbox = SonogramToolbox(w)\n displaywidget = SonogramDisplayWidget()\n\n from cued_datalogger.api.channel import Channel\n displaywidget.channel = Channel()\n displaywidget.channel.add_dataset(\"time_series\", data=sig)\n displaywidget.channel.add_dataset(\"time\", data=t)\n\n displaywidget.update_plot()\n\n hbox.addWidget(toolbox)\n hbox.addWidget(displaywidget)\n\n toolbox.contour_spacing_slider.valueChanged.connect(displaywidget.update_contour_spacing)\n toolbox.contour_spacing_spinbox.valueChanged.connect(displaywidget.update_contour_spacing)\n\n toolbox.num_contours_slider.valueChanged.connect(displaywidget.update_num_contours)\n toolbox.num_contours_spinbox.valueChanged.connect(displaywidget.update_num_contours)\n\n toolbox.window_overlap_fraction_control.valueChanged.connect(displaywidget.update_window_overlap_fraction)\n\n toolbox.window_width_control.valueChanged.connect(displaywidget.update_window_width)\n\n w.show()\n\n sys.exit(app.exec_())\n\n"
] | [
[
"numpy.arange",
"numpy.angle",
"numpy.exp",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhaojing1995/One-shot_ReID | [
"a109a1aee5ad1036b20ba0779af565c09506469a"
] | [
"tools.py"
] | [
"import numpy as np\n\n\n\nif __name__==\"__main__\":\n b = np.load(\"logs/l_feas/test1.npy\")\n print(b)"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RyanJDick/halite_rl | [
"e6309a24d3d613171ceb6522ddf07fece3815e62"
] | [
"halite_rl/ppo/sample.py"
] | [
"import numpy as np\n\nimport torch\n\nfrom halite_rl.utils import SubProcessWrapper\n\n\nclass EpisodeData():\n def __init__(self):\n self.observations = [] # Observations (states).\n self.actions = [] # Selected actions.\n self.act_log_probs = [] # Log probability of selected action.\n self.value_preds = [] # Value predictions given observation (from critic network).\n self.rewards = [] # Rewards obtained in each step.\n self.step_info = [] # Additional details about the step for logging purposes.\n\ndef sample_batch(models, env_constructor, device, config):\n \"\"\"Sample a batch of environment rollouts.\n\n Parameters:\n -----------\n models : dict[str: nn.Module]\n Dict mapping player_ids to actor-critic NN models.\n config : dict\n Config settings.\n\n Returns:\n --------\n TODO\n\n \"\"\"\n\n # Initialize envs.\n envs = [SubProcessWrapper(env_constructor) for _ in range(config[\"SAMPLE_PARALLEL_ENVS\"])]\n\n player_ids = list(models.keys())\n\n # EpisodeData for in-progress episodes.\n # ep_datas[i][p_id] references the EpisodeData for player p_id in the i'th env.\n ep_datas = [{p_id: None for p_id in player_ids} for _ in envs]\n\n # actions[i][p_id] references the action for player p_id in the i'th env.\n actions = [{p_id: None for p_id in player_ids} for _ in envs]\n\n num_steps = {p_id: 0 for p_id in player_ids}\n\n # final_ep_datas[p_id][i] references the EpisodeData for the i'th episode collected for player p_id.\n final_ep_datas = {p_id: [] for p_id in player_ids}\n\n # While at least one player is below SAMPLE_MIN_NUM_STEPS.\n while np.any(np.array([n for n in num_steps.values()]) < config[\"SAMPLE_MIN_NUM_STEPS\"]):\n # 1. Step all envs asynchronously.\n\n # Keep a record of which envs were 'reset' and which were 'stepped' so that we\n # know what return values to expect when we receive the results asynchronously.\n env_was_reset = []\n for i_env, env in enumerate(envs):\n if not env.call_sync(\"is_in_progress\"):\n env_was_reset.append(True)\n for p_id in player_ids:\n ep_data = ep_datas[i_env][p_id]\n # If this is not the very first iteration, then save the episode.\n if ep_data is not None:\n # Drop the last observation, as we never acted on it.\n ep_data.observations = ep_data.observations[:len(ep_data.rewards)]\n final_ep_datas[p_id].append(ep_data)\n num_steps[p_id] += len(ep_data.rewards)\n ep_datas[i_env] = {p_id: EpisodeData() for p_id in player_ids}\n env.call_async(\"reset\")\n else:\n env_was_reset.append(False)\n actions = {p_id: ep_datas[i_env][p_id].actions[-1] for p_id in player_ids}\n env.call_async(\"step\", actions)\n\n # 2. Receive results from async env steps.\n\n for i_env, env in enumerate(envs):\n if env_was_reset[i_env]:\n obs = env.get_result()\n for p_id in player_ids:\n ep_datas[i_env][p_id].observations.append(obs[p_id])\n else:\n obs, rewards, dones, step_infos = env.get_result()\n for p_id in player_ids:\n ep_data = ep_datas[i_env][p_id]\n ep_data.observations.append(obs[p_id])\n ep_data.rewards.append(rewards[p_id])\n # step_infos entry should already exist for this step.\n ep_data.step_info[-1].update(step_infos[p_id])\n\n # 3. Sample actions.\n\n player_id_to_state_batch = {p_id: [] for p_id in player_ids}\n for i_env, env in enumerate(envs):\n for p_id in player_ids:\n player_id_to_state_batch[p_id].append(ep_datas[i_env][p_id].observations[-1])\n\n for p_id in player_ids:\n model = models[p_id]\n with torch.no_grad():\n state_batch = np.array(player_id_to_state_batch[p_id])\n state_batch = torch.Tensor(state_batch)\n state_batch = state_batch.to(device)\n ship_act_logits, shipyard_act_logits, value_preds = model(state_batch)\n\n ship_action_dist, shipyard_action_dist = model.get_action_distribution(\n ship_act_logits, shipyard_act_logits, state_batch)\n\n ship_action = ship_action_dist.sample()\n shipyard_action = shipyard_action_dist.sample()\n ship_act_entropy = ship_action_dist.entropy()\n shipyard_act_entropy = shipyard_action_dist.entropy()\n\n action_log_prob = model.action_log_prob(\n ship_action_dist,\n shipyard_action_dist,\n ship_action,\n shipyard_action,\n )\n\n ship_action = ship_action.cpu().detach().numpy()\n shipyard_action = shipyard_action.cpu().detach().numpy()\n action_log_prob = action_log_prob.cpu().detach().numpy()\n value_preds = value_preds.cpu().detach().numpy()\n ship_act_entropy = ship_act_entropy.cpu().detach().numpy()\n shipyard_act_entropy = shipyard_act_entropy.cpu().detach().numpy()\n\n for i_env, env in enumerate(envs):\n if env.call_sync(\"is_in_progress\"):\n ep_data = ep_datas[i_env][p_id]\n ep_data.actions.append((\n ship_action[i_env, ...],\n shipyard_action[i_env, ...],\n ))\n ep_data.act_log_probs.append(action_log_prob[i_env])\n ep_data.value_preds.append(value_preds[i_env])\n # Create step_info entry with info for step that hasn't happend (in env) yet.\n ep_data.step_info.append(\n {\n \"ship_action_dist_entropy\": ship_act_entropy[i_env],\n \"shipyard_action_dist_entropy\": shipyard_act_entropy[i_env],\n }\n )\n\n # Close all envs\n for e in envs:\n e.close()\n\n return final_ep_datas\n"
] | [
[
"numpy.array",
"torch.no_grad",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sethaxen/arviz | [
"422c00b3cc24f3983bea283396bff0195374dcc3",
"422c00b3cc24f3983bea283396bff0195374dcc3"
] | [
"arviz/plots/compareplot.py",
"arviz/plots/khatplot.py"
] | [
"\"\"\"Summary plot for model comparison.\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom .plot_utils import _scale_fig_size\r\n\r\n\r\ndef plot_compare(\r\n comp_df,\r\n insample_dev=True,\r\n plot_standard_error=True,\r\n plot_ic_diff=True,\r\n order_by_rank=True,\r\n figsize=None,\r\n textsize=None,\r\n plot_kwargs=None,\r\n ax=None,\r\n):\r\n \"\"\"\r\n Summary plot for model comparison.\r\n\r\n This plot is in the style of the one used in the book Statistical Rethinking (Chapter 6)\r\n by Richard McElreath.\r\n\r\n Notes\r\n -----\r\n Defaults to comparing Widely Accepted Information Criterion (WAIC) if present in comp_df column,\r\n otherwise compares Leave-one-out (loo)\r\n\r\n\r\n Parameters\r\n ----------\r\n comp_df : pd.DataFrame\r\n Result of the `az.compare()` method\r\n insample_dev : bool, optional\r\n Plot in-sample deviance, that is the value of the information criteria without the\r\n penalization given by the effective number of parameters (pIC). Defaults to True\r\n plot_standard_error : bool, optional\r\n Plot the standard error of the information criteria estimate. Defaults to True\r\n plot_ic_diff : bool, optional\r\n Plot standard error of the difference in information criteria between each model\r\n and the top-ranked model. Defaults to True\r\n order_by_rank : bool\r\n If True (default) ensure the best model is used as reference.\r\n figsize : tuple, optional\r\n If None, size is (6, num of models) inches\r\n textsize: float\r\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\r\n on figsize.\r\n plot_kwargs : dict, optional\r\n Optional arguments for plot elements. Currently accepts 'color_ic',\r\n 'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse',\r\n 'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize'\r\n ax : axes, optional\r\n Matplotlib axes\r\n\r\n Returns\r\n -------\r\n ax : matplotlib axes\r\n\r\n\r\n Examples\r\n --------\r\n Show default compare plot\r\n\r\n .. plot::\r\n :context: close-figs\r\n\r\n >>> import arviz as az\r\n >>> model_compare = az.compare({'Centered 8 schools': az.load_arviz_data('centered_eight'),\r\n >>> 'Non-centered 8 schools': az.load_arviz_data('non_centered_eight')})\r\n >>> az.plot_compare(model_compare)\r\n\r\n Plot standard error and information criteria difference only\r\n\r\n .. plot::\r\n :context: close-figs\r\n\r\n >>> az.plot_compare(model_compare, insample_dev=False)\r\n\r\n \"\"\"\r\n if figsize is None:\r\n figsize = (6, len(comp_df))\r\n\r\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\r\n\r\n if ax is None:\r\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\r\n\r\n if plot_kwargs is None:\r\n plot_kwargs = {}\r\n\r\n yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1, retstep=True)\r\n yticks_pos[1::2] = yticks_pos[1::2] + step / 2\r\n\r\n yticks_labels = [\"\"] * len(yticks_pos)\r\n\r\n _information_criterion = [\"waic\", \"loo\"]\r\n column_index = [c.lower() for c in comp_df.columns]\r\n for information_criterion in _information_criterion:\r\n if information_criterion in column_index:\r\n break\r\n else:\r\n raise ValueError(\r\n \"comp_df must contain one of the following\"\r\n \" information criterion: {}\".format(_information_criterion)\r\n )\r\n\r\n if order_by_rank:\r\n comp_df.sort_values(by=\"rank\", inplace=True)\r\n\r\n if plot_ic_diff:\r\n yticks_labels[0] = comp_df.index[0]\r\n yticks_labels[2::2] = comp_df.index[1:]\r\n ax.set_yticks(yticks_pos)\r\n ax.errorbar(\r\n x=comp_df[information_criterion].iloc[1:],\r\n y=yticks_pos[1::2],\r\n xerr=comp_df.dse[1:],\r\n color=plot_kwargs.get(\"color_dse\", \"grey\"),\r\n fmt=plot_kwargs.get(\"marker_dse\", \"^\"),\r\n mew=linewidth,\r\n elinewidth=linewidth,\r\n )\r\n\r\n else:\r\n yticks_labels = comp_df.index\r\n ax.set_yticks(yticks_pos[::2])\r\n\r\n if plot_standard_error:\r\n ax.errorbar(\r\n x=comp_df[information_criterion],\r\n y=yticks_pos[::2],\r\n xerr=comp_df.se,\r\n color=plot_kwargs.get(\"color_ic\", \"k\"),\r\n fmt=plot_kwargs.get(\"marker_ic\", \"o\"),\r\n mfc=\"None\",\r\n mew=linewidth,\r\n lw=linewidth,\r\n )\r\n else:\r\n ax.plot(\r\n comp_df[information_criterion],\r\n yticks_pos[::2],\r\n color=plot_kwargs.get(\"color_ic\", \"k\"),\r\n marker=plot_kwargs.get(\"marker_ic\", \"o\"),\r\n mfc=\"None\",\r\n mew=linewidth,\r\n lw=0,\r\n )\r\n\r\n if insample_dev:\r\n ax.plot(\r\n comp_df[information_criterion] - (2 * comp_df[\"p_\" + information_criterion]),\r\n yticks_pos[::2],\r\n color=plot_kwargs.get(\"color_insample_dev\", \"k\"),\r\n marker=plot_kwargs.get(\"marker_insample_dev\", \"o\"),\r\n mew=linewidth,\r\n lw=0,\r\n )\r\n\r\n ax.axvline(\r\n comp_df[information_criterion].iloc[0],\r\n ls=plot_kwargs.get(\"ls_min_ic\", \"--\"),\r\n color=plot_kwargs.get(\"color_ls_min_ic\", \"grey\"),\r\n lw=linewidth,\r\n )\r\n\r\n scale_col = information_criterion + \"_scale\"\r\n if scale_col in comp_df:\r\n scale = comp_df[scale_col].iloc[0].capitalize()\r\n else:\r\n scale = \"Deviance\"\r\n ax.set_xlabel(scale, fontsize=ax_labelsize)\r\n ax.set_yticklabels(yticks_labels)\r\n ax.set_ylim(-1 + step, 0 - step)\r\n ax.tick_params(labelsize=xt_labelsize)\r\n\r\n return ax\r\n",
"\"\"\"Pareto tail indices plot.\"\"\"\nimport warnings\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import to_rgba_array\nimport matplotlib.cm as cm\nimport numpy as np\nfrom xarray import DataArray\n\nfrom .plot_utils import (\n _scale_fig_size,\n get_coords,\n color_from_dim,\n format_coords_as_labels,\n set_xticklabels,\n)\nfrom ..stats import ELPDData\nfrom ..stats.stats_utils import histogram\n\n\ndef plot_khat(\n khats,\n color=None,\n xlabels=False,\n show_bins=False,\n bin_format=\"{1:.1f}%\",\n annotate=False,\n hover_label=False,\n hover_format=\"{1}\",\n figsize=None,\n textsize=None,\n coords=None,\n legend=False,\n markersize=None,\n ax=None,\n hlines_kwargs=None,\n **kwargs\n):\n \"\"\"\n Plot Pareto tail indices.\n\n Parameters\n ----------\n khats : ELPDData cointaining pareto shapes information or array\n Pareto tail indices.\n color : str or array_like, optional\n Colors of the scatter plot, if color is a str all dots will have the same color,\n if it is the size of the observations, each dot will have the specified color,\n otherwise, it will be interpreted as a list of the dims to be used for the color code\n xlabels : bool, optional\n Use coords as xticklabels\n show_bins : bool, optional\n Show the number of khats which fall in each bin.\n bin_format : str, optional\n The string is used as formatting guide calling ``bin_format.format(count, pct)``.\n annotate : bool, optional\n Show the labels of k values larger than 1.\n hover_label : bool, optional\n Show the datapoint label when hovering over it with the mouse. Requires an interactive\n backend.\n hover_format : str, optional\n String used to format the hover label via ``hover_format.format(idx, coord_label)``\n figsize : tuple, optional\n Figure size. If None it will be defined automatically.\n textsize: float, optional\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n coords : mapping, optional\n Coordinates of points to plot. **All** values are used for computation, but only a\n a subset can be plotted for convenience.\n legend : bool, optional\n Include a legend to the plot. Only taken into account when color argument is a dim name.\n markersize: int, optional\n markersize for scatter plot. Defaults to `None` in which case it will\n be chosen based on autoscaling for figsize.\n ax: axes, optional\n Matplotlib axes\n hlines_kwargs: dictionary, optional\n Additional keywords passed to ax.hlines\n kwargs :\n Additional keywords passed to ax.scatter\n\n Returns\n -------\n ax : axes\n Matplotlib axes.\n\n Examples\n --------\n Plot estimated pareto shape parameters showing how many fall in each category.\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> radon = az.load_arviz_data(\"radon\")\n >>> loo_radon = az.loo(radon, pointwise=True)\n >>> az.plot_khat(loo_radon, show_bins=True)\n\n Show xlabels\n\n .. plot::\n :context: close-figs\n\n >>> centered_eight = az.load_arviz_data(\"centered_eight\")\n >>> khats = az.loo(centered_eight, pointwise=True).pareto_k\n >>> az.plot_khat(khats, xlabels=True, annotate=True)\n\n Use coord values to create color mapping\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_khat(loo_radon, color=\"observed_county\", cmap=\"tab20\")\n\n Use custom color scheme\n\n .. plot::\n :context: close-figs\n\n >>> counties = radon.posterior.observed_county.values\n >>> colors = [\n ... \"blue\" if county[-1] in (\"A\", \"N\") else \"green\" for county in counties\n ... ]\n >>> az.plot_khat(loo_radon, color=colors)\n\n \"\"\"\n if hover_label and mpl.get_backend() not in mpl.rcsetup.interactive_bk:\n hover_label = False\n warnings.warn(\n \"hover labels are only available with interactive backends. To switch to an \"\n \"interactive backend from ipython or jupyter, use `%matplotlib` there should be \"\n \"no need to restart the kernel. For other cases, see \"\n \"https://matplotlib.org/3.1.0/tutorials/introductory/usage.html#backends\",\n UserWarning,\n )\n\n if hlines_kwargs is None:\n hlines_kwargs = {}\n hlines_kwargs.setdefault(\"linestyle\", [\":\", \"-.\", \"--\", \"-\"])\n hlines_kwargs.setdefault(\"alpha\", 0.7)\n hlines_kwargs.setdefault(\"zorder\", -1)\n hlines_kwargs.setdefault(\"color\", \"C1\")\n\n if coords is None:\n coords = {}\n\n if color is None:\n color = \"C0\"\n\n if isinstance(khats, np.ndarray):\n khats = khats.flatten()\n xlabels = False\n legend = False\n dims = []\n else:\n if isinstance(khats, ELPDData):\n khats = khats.pareto_k\n if not isinstance(khats, DataArray):\n raise ValueError(\"Incorrect khat data input. Check the documentation\")\n\n khats = get_coords(khats, coords)\n dims = khats.dims\n\n n_data_points = khats.size\n xdata = np.arange(n_data_points)\n if isinstance(khats, DataArray):\n coord_labels = format_coords_as_labels(khats)\n else:\n coord_labels = xdata.astype(str)\n\n (figsize, ax_labelsize, _, xt_labelsize, linewidth, scaled_markersize) = _scale_fig_size(\n figsize, textsize\n )\n\n if markersize is None:\n markersize = scaled_markersize ** 2 # s in scatter plot mus be markersize square\n # for dots to have the same size\n kwargs.setdefault(\"s\", markersize)\n kwargs.setdefault(\"marker\", \"+\")\n\n if isinstance(color, str):\n if color in dims:\n colors, color_mapping = color_from_dim(khats, color)\n cmap_name = kwargs.get(\"cmap\", plt.rcParams[\"image.cmap\"])\n cmap = getattr(cm, cmap_name)\n rgba_c = cmap(colors)\n else:\n legend = False\n rgba_c = to_rgba_array(np.full(n_data_points, color))\n else:\n legend = False\n try:\n rgba_c = to_rgba_array(color)\n except ValueError:\n cmap_name = kwargs.get(\"cmap\", plt.rcParams[\"image.cmap\"])\n cmap = getattr(cm, cmap_name)\n rgba_c = cmap(color)\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, constrained_layout=not xlabels)\n else:\n fig = ax.get_figure()\n\n khats = khats if isinstance(khats, np.ndarray) else khats.values.flatten()\n alphas = 0.5 + 0.2 * (khats > 0.5) + 0.3 * (khats > 1)\n rgba_c[:, 3] = alphas\n sc_plot = ax.scatter(xdata, khats, c=rgba_c, **kwargs)\n if annotate:\n idxs = xdata[khats > 1]\n for idx in idxs:\n ax.text(\n idx,\n khats[idx],\n coord_labels[idx],\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n fontsize=0.8 * xt_labelsize,\n )\n\n xmin, xmax = ax.get_xlim()\n if show_bins:\n xmax += n_data_points / 12\n ylims1 = ax.get_ylim()\n ax.hlines([0, 0.5, 0.7, 1], xmin=xmin, xmax=xmax, linewidth=linewidth, **hlines_kwargs)\n ylims2 = ax.get_ylim()\n ymin = min(ylims1[0], ylims2[0])\n ymax = min(ylims1[1], ylims2[1])\n if show_bins:\n bin_edges = np.array([ymin, 0.5, 0.7, 1, ymax])\n bin_edges = bin_edges[(bin_edges >= ymin) & (bin_edges <= ymax)]\n hist, _ = histogram(khats, bin_edges)\n for idx, count in enumerate(hist):\n ax.text(\n (n_data_points - 1 + xmax) / 2,\n np.mean(bin_edges[idx : idx + 2]),\n bin_format.format(count, count / n_data_points * 100),\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n ax.set_ylim(ymin, ymax)\n ax.set_xlim(xmin, xmax)\n\n ax.set_xlabel(\"Data Point\", fontsize=ax_labelsize)\n ax.set_ylabel(r\"Shape parameter k\", fontsize=ax_labelsize)\n ax.tick_params(labelsize=xt_labelsize)\n if xlabels:\n set_xticklabels(ax, coord_labels)\n fig.autofmt_xdate()\n fig.tight_layout()\n if legend:\n ncols = len(color_mapping) // 6 + 1\n for label, float_color in color_mapping.items():\n ax.scatter([], [], c=[cmap(float_color)], label=label, **kwargs)\n ax.legend(ncol=ncols, title=color)\n\n if hover_label and mpl.get_backend() in mpl.rcsetup.interactive_bk:\n _make_hover_annotation(fig, ax, sc_plot, coord_labels, rgba_c, hover_format)\n\n return ax\n\n\ndef _make_hover_annotation(fig, ax, sc_plot, coord_labels, rgba_c, hover_format):\n \"\"\"Show data point label when hovering over it with mouse.\"\"\"\n annot = ax.annotate(\n \"\",\n xy=(0, 0),\n xytext=(0, 0),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\", alpha=0.4),\n arrowprops=dict(arrowstyle=\"->\"),\n )\n annot.set_visible(False)\n xmid = np.mean(ax.get_xlim())\n ymid = np.mean(ax.get_ylim())\n offset = 10\n\n def update_annot(ind):\n\n idx = ind[\"ind\"][0]\n pos = sc_plot.get_offsets()[idx]\n annot_text = hover_format.format(idx, coord_labels[idx])\n annot.xy = pos\n annot.set_position(\n (-offset if pos[0] > xmid else offset, -offset if pos[1] > ymid else offset)\n )\n annot.set_text(annot_text)\n annot.get_bbox_patch().set_facecolor(rgba_c[idx])\n annot.set_ha(\"right\" if pos[0] > xmid else \"left\")\n annot.set_va(\"top\" if pos[1] > ymid else \"bottom\")\n\n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc_plot.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n"
] | [
[
"matplotlib.pyplot.subplots",
"numpy.linspace"
],
[
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.full",
"matplotlib.get_backend",
"numpy.mean",
"numpy.array",
"matplotlib.colors.to_rgba_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rox38431/EyeJaundice | [
"ee5939d203013cd522fbacdfcb75970bd696c962"
] | [
"interpretability/guided_back_propagation.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 2019/8/4 上午9:45\r\n\r\n@author: mick.yi\r\n\r\n\"\"\"\r\nimport torch\r\nfrom torch import nn\r\nimport numpy as np\r\n\r\n\r\nclass GuidedBackPropagation(object):\r\n\r\n def __init__(self, net):\r\n self.net = net\r\n for (name, module) in self.net.named_modules():\r\n if isinstance(module, nn.ReLU):\r\n module.register_backward_hook(self.backward_hook)\r\n self.net.eval()\r\n\r\n @classmethod\r\n def backward_hook(cls, module, grad_in, grad_out):\r\n \"\"\"\r\n\r\n :param module:\r\n :param grad_in: tuple,长度为1\r\n :param grad_out: tuple,长度为1\r\n :return: tuple(new_grad_in,)\r\n \"\"\"\r\n return torch.clamp(grad_in[0], min=0.0),\r\n\r\n def __call__(self, inputs, index=None):\r\n \"\"\"\r\n\r\n :param inputs: [1,3,H,W]\r\n :param index: class_id\r\n :return:\r\n \"\"\"\r\n self.net.zero_grad()\r\n output = self.net(inputs) # [1,num_classes]\r\n if index is None:\r\n index = np.argmax(output.cpu().data.numpy())\r\n target = output[0][index]\r\n\r\n target.backward()\r\n\r\n return inputs.grad[0] # [3,H,W]\r\n"
] | [
[
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
angiemsu/netharn | [
"728cb40aad299baf62c689430d07b29c67d8cf21",
"728cb40aad299baf62c689430d07b29c67d8cf21"
] | [
"netharn/util/nms/torch_nms.py",
"netharn/util/nms/nms_core.py"
] | [
"import torch\nimport numpy as np\n\n\ndef torch_nms(tlbr, scores, classes=None, thresh=.5, bias=0, fast=False):\n \"\"\"\n Non maximum suppression implemented with pytorch tensors\n\n CURRENTLY NOT WORKING\n\n Args:\n tlbr (Tensor): Bounding boxes of one image in the format (tlbr)\n scores (Tensor): Scores of each box\n classes (Tensor, optional): the classes of each box. If specified nms is applied to each class separately.\n thresh (float): iou threshold\n\n Returns:\n ByteTensor: keep: boolean array indicating which boxes were not pruned.\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import torch\n >>> import numpy as np\n >>> tlbr = torch.FloatTensor(np.array([\n >>> [0, 0, 100, 100],\n >>> [100, 100, 10, 10],\n >>> [10, 10, 100, 100],\n >>> [50, 50, 100, 100],\n >>> [100, 100, 130, 130],\n >>> [100, 100, 130, 130],\n >>> [100, 100, 130, 130],\n >>> ], dtype=np.float32))\n >>> scores = torch.FloatTensor(np.array([.1, .5, .9, .1, .3, .5, .4]))\n >>> classes = torch.FloatTensor(np.array([0, 0, 0, 0, 0, 0]))\n >>> thresh = .5\n >>> keep = torch_nms(tlbr, scores, classes, thresh)\n >>> bboxes[keep]\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import torch\n >>> import numpy as np\n >>> # Test to check that conflicts are correctly resolved\n >>> tlbr = torch.FloatTensor(np.array([\n >>> [100, 100, 150, 101],\n >>> [120, 100, 180, 101],\n >>> [150, 100, 200, 101],\n >>> ], dtype=np.float32))\n >>> scores = torch.FloatTensor(np.linspace(.8, .9, len(tlbr)))\n >>> classes = None\n >>> thresh = .3\n >>> keep = torch_nms(tlbr, scores, classes, thresh, fast=False)\n >>> bboxes[keep]\n \"\"\"\n if tlbr.numel() == 0:\n return []\n\n # Sort coordinates by descending score\n ordered_scores, order = scores.sort(0, descending=True)\n\n from netharn import util\n boxes = util.Boxes(tlbr[order], 'tlbr')\n ious = boxes.ious(boxes, bias=bias)\n\n # if False:\n # x1, y1, x2, y2 = tlbr[order].split(1, 1)\n\n # # Compute dx and dy between each pair of boxes (these mat contain every pair twice...)\n # dx = (x2.min(x2.t()) - x1.max(x1.t())).clamp_(min=0)\n # dy = (y2.min(y2.t()) - y1.max(y1.t())).clamp_(min=0)\n\n # # Compute iou\n # intersections = dx * dy\n # areas = (x2 - x1) * (y2 - y1)\n # unions = (areas + areas.t()) - intersections\n # ious = intersections / unions\n\n # Filter based on iou (and class)\n conflicting = (ious > thresh).triu(1)\n\n if classes is not None:\n ordered_classes = classes[order]\n same_class = (ordered_classes.unsqueeze(0) == ordered_classes.unsqueeze(1))\n conflicting = (conflicting & same_class)\n # Now we have a 2D matrix where conflicting[i, j] indicates if box[i]\n # conflicts with box[j]. For each box[i] we want to only keep the first\n # one that does not conflict with any other box[j].\n\n # Find out how many conflicts each ordered box has with other boxes that\n # have higher scores than it does. In other words...\n # n_conflicts[i] is the number of conflicts box[i] has with other boxes\n # that have a **higher score** than box[i] does. We will definately\n # keep any box where n_conflicts is 0, but we need to postprocess because\n # we might actually keep some boxes currently marked as conflicted.\n n_conflicts = conflicting.sum(0).byte()\n\n if not fast:\n # It is not enought to simply use all places where there are no\n # conflicts. Say we have boxes A, B, and C, where A conflicts with B,\n # B conflicts with C but A does not conflict with C. The fact that we\n # use A should mean that C is not longer conflicted.\n\n if True:\n # Marginally faster. best=618.2 us\n ordered_keep = np.zeros(len(conflicting), dtype=np.uint8)\n supress = np.zeros(len(conflicting), dtype=np.bool)\n for i, row in enumerate(conflicting.cpu().numpy() > 0):\n if not supress[i]:\n ordered_keep[i] = 1\n supress[row] = 1\n ordered_keep = torch.ByteTensor(ordered_keep).to(tlbr.device)\n else:\n # Marginally slower: best=1.382 ms,\n n_conflicts_post = n_conflicts.cpu()\n conflicting = conflicting.cpu()\n\n keep_len = len(n_conflicts_post) - 1\n for i in range(1, keep_len):\n if n_conflicts_post[i] > 0:\n n_conflicts_post -= conflicting[i]\n\n n_conflicts = n_conflicts_post.to(n_conflicts.device)\n ordered_keep = (n_conflicts == 0)\n else:\n # Now we can simply keep any box that has no conflicts.\n ordered_keep = (n_conflicts == 0)\n\n # Unsort, so keep is aligned with input boxes\n keep = ordered_keep.new(*ordered_keep.size())\n keep.scatter_(0, order, ordered_keep)\n return keep\n\n\ndef test_class_torch():\n import numpy as np\n import torch\n import netharn as nh\n import ubelt as ub\n # from netharn.util.nms.torch_nms import torch_nms\n # from netharn.util import non_max_supression\n\n thresh = .5\n\n num = 500\n rng = nh.util.ensure_rng(0)\n cpu_boxes = nh.util.Boxes.random(num, scale=400.0, rng=rng, format='tlbr', tensor=True)\n cpu_tlbr = cpu_boxes.to_tlbr().data\n # cpu_scores = torch.Tensor(rng.rand(len(cpu_tlbr)))\n # make all scores unique to ensure comparability\n cpu_scores = torch.Tensor(np.linspace(0, 1, len(cpu_tlbr)))\n cpu_cls = torch.LongTensor(rng.randint(0, 10, len(cpu_tlbr)))\n\n tlbr = cpu_boxes.to_tlbr().data.to('cuda')\n scores = cpu_scores.to('cuda')\n classes = cpu_cls.to('cuda')\n\n keep1 = []\n for idxs in ub.group_items(range(len(classes)), classes.cpu().numpy()).values():\n # cls_tlbr = tlbr.take(idxs, axis=0)\n # cls_scores = scores.take(idxs, axis=0)\n cls_tlbr = tlbr[idxs]\n cls_scores = scores[idxs]\n cls_keep = torch_nms(cls_tlbr, cls_scores, thresh=thresh, bias=0)\n keep1.extend(list(ub.compress(idxs, cls_keep.cpu().numpy())))\n keep1 = sorted(keep1)\n\n keep_ = torch_nms(tlbr, scores, classes=classes, thresh=thresh, bias=0)\n keep2 = np.where(keep_.cpu().numpy())[0].tolist()\n\n keep3 = nh.util.non_max_supression(tlbr.cpu().numpy(),\n scores.cpu().numpy(),\n classes=classes.cpu().numpy(),\n thresh=thresh, bias=0, impl='gpu')\n\n print(len(keep1))\n print(len(keep2))\n print(len(keep3))\n\n print(set(keep1) - set(keep2))\n print(set(keep2) - set(keep1))\n\n\ndef _benchmark():\n \"\"\"\n python -m netharn.util.nms.torch_nms _benchmark --show\n\n SeeAlso:\n PJR Darknet NonMax supression\n https://github.com/pjreddie/darknet/blob/master/src/box.c\n\n Lightnet NMS\n https://gitlab.com/EAVISE/lightnet/blob/master/lightnet/data/transform/_postprocess.py#L116\n\n \"\"\"\n import torch\n import numpy as np\n import netharn as nh\n from netharn.util.nms.torch_nms import torch_nms\n from netharn.util import non_max_supression\n import ubelt as ub\n import itertools as it\n\n N = 100\n bestof = 10\n\n ydata = ub.ddict(list)\n # xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, 2000]\n\n # max number of boxes yolo will spit out at a time\n max_boxes = 19 * 19 * 5\n\n xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, max_boxes]\n # xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500]\n xdata = [10, 100, 500]\n\n rng = nh.util.ensure_rng(0)\n\n thresh = 0.5\n\n for num in xdata:\n print('\\n\\n---- number of boxes = {} ----\\n'.format(num))\n\n outputs = {}\n\n # Build random test boxes and scores\n cpu_boxes = nh.util.Boxes.random(num, scale=10.0, rng=rng, format='tlbr', tensor=True)\n cpu_tlbr = cpu_boxes.to_tlbr().data\n # cpu_scores = torch.Tensor(rng.rand(len(cpu_tlbr)))\n # make all scores unique to ensure comparability\n cpu_scores = torch.Tensor(np.linspace(0, 1, len(cpu_tlbr)))\n cpu_cls = torch.LongTensor(rng.randint(0, 10, len(cpu_tlbr)))\n\n # Format boxes in lightnet format\n cpu_ln_boxes = torch.cat([cpu_boxes.to_cxywh().data, cpu_scores[:, None], cpu_cls.float()[:, None]], dim=-1)\n\n # Move boxes to numpy\n np_tlbr = cpu_tlbr.numpy()\n np_scores = cpu_scores.numpy()\n np_cls = cpu_cls.numpy() # NOQA\n\n gpu = torch.device('cuda', 0)\n\n measure_gpu = torch.cuda.is_available()\n measure_cpu = False or not torch.cuda.is_available()\n\n def _ln_output_to_keep(ln_output, ln_boxes):\n keep = []\n for row in ln_output:\n # Find the index that we kept\n idxs = np.where(np.all(np.isclose(ln_boxes, row), axis=1))[0]\n assert len(idxs) == 1\n keep.append(idxs[0])\n assert np.all(np.isclose(ln_boxes[keep], ln_output))\n return keep\n\n if measure_gpu:\n # Move boxes to the GPU\n gpu_tlbr = cpu_tlbr.to(gpu)\n gpu_scores = cpu_scores.to(gpu)\n gpu_cls = cpu_cls.to(gpu) # NOQA\n gpu_ln_boxes = cpu_ln_boxes.to(gpu)\n\n t1 = ub.Timerit(N, bestof=bestof, label='torch(gpu)')\n for timer in t1:\n with timer:\n keep = torch_nms(gpu_tlbr, gpu_scores, thresh=thresh)\n torch.cuda.synchronize()\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = np.where(keep.cpu().numpy())[0]\n\n t1 = ub.Timerit(N, bestof=bestof, label='cython(gpu)')\n for timer in t1:\n with timer:\n keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='gpu')\n torch.cuda.synchronize()\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n from lightnet.data.transform._postprocess import NonMaxSupression\n t1 = ub.Timerit(N, bestof=bestof, label='lightnet-slow(gpu)')\n for timer in t1:\n with timer:\n ln_output = NonMaxSupression._nms(gpu_ln_boxes, nms_thresh=thresh, class_nms=False, fast=False)\n torch.cuda.synchronize()\n # convert lightnet NMS output to keep for consistency\n keep = _ln_output_to_keep(ln_output, gpu_ln_boxes)\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n if False:\n t1 = ub.Timerit(N, bestof=bestof, label='lightnet-fast(gpu)')\n for timer in t1:\n with timer:\n ln_output = NonMaxSupression._nms(gpu_ln_boxes, nms_thresh=thresh, class_nms=False, fast=True)\n torch.cuda.synchronize()\n # convert lightnet NMS output to keep for consistency\n keep = _ln_output_to_keep(ln_output, gpu_ln_boxes)\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n if measure_cpu:\n t1 = ub.Timerit(N, bestof=bestof, label='torch(cpu)')\n for timer in t1:\n with timer:\n keep = torch_nms(cpu_tlbr, cpu_scores, thresh=thresh)\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = np.where(keep.cpu().numpy())[0]\n\n if True:\n t1 = ub.Timerit(N, bestof=bestof, label='cython(cpu)')\n for timer in t1:\n with timer:\n keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='cpu')\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n t1 = ub.Timerit(N, bestof=bestof, label='numpy(cpu)')\n for timer in t1:\n with timer:\n keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='py')\n ydata[t1.label].append(t1.min())\n outputs[t1.label] = sorted(keep)\n\n # Check that all kept boxes do not have more than `threshold` ious\n for key, idxs in outputs.items():\n ious = nh.util.box_ious(np_tlbr[idxs], np_tlbr[idxs])\n max_iou = (np.tril(ious) - np.eye(len(ious))).max()\n if max_iou > thresh:\n print('{} produced a bad result with max_iou={}'.format(key, max_iou))\n\n # Check result consistency:\n print('\\nResult stats:')\n for key in sorted(outputs.keys()):\n print(' * {:<20}: num={}'.format(key, len(outputs[key])))\n\n print('\\nResult overlaps (method1, method2: jaccard):')\n datas = []\n for k1, k2 in it.combinations(sorted(outputs.keys()), 2):\n idxs1 = set(outputs[k1])\n idxs2 = set(outputs[k2])\n jaccard = len(idxs1 & idxs2) / len(idxs1 | idxs2)\n datas.append((k1, k2, jaccard))\n datas = sorted(datas, key=lambda x: -x[2])\n for k1, k2, jaccard in datas:\n print(' * {:<20}, {:<20}: {:0.4f}'.format(k1, k2, jaccard))\n\n nh.util.mplutil.autompl()\n nh.util.mplutil.multi_plot(xdata, ydata, xlabel='num boxes', ylabel='seconds')\n nh.util.show_if_requested()\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m netharn.util.nms.torch_nms all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n",
"import torch\nimport numpy as np\nimport ubelt as ub\nfrom netharn.util.nms import py_nms\nfrom netharn.util import profiler\nfrom netharn.util.nms import torch_nms\nimport warnings\n\n_impls = {}\n_impls['py'] = py_nms.py_nms\n_impls['torch'] = torch_nms.torch_nms\n_automode = 'py'\ntry:\n from netharn.util.nms import cpu_nms\n _impls['cpu'] = cpu_nms.cpu_nms\n _automode = 'cpu'\nexcept Exception:\n warnings.warn('cpu_nms is not available')\ntry:\n if torch.cuda.is_available():\n from netharn.util.nms import gpu_nms\n _impls['gpu'] = gpu_nms.gpu_nms\n _automode = 'gpu'\nexcept Exception:\n warnings.warn('gpu_nms is not available')\n\n\[email protected]\ndef non_max_supression(tlbr, scores, thresh, bias=0.0, classes=None,\n impl='auto'):\n \"\"\"\n Non-Maximum Suppression\n\n Args:\n tlbr (ndarray): Nx4 boxes in tlbr format\n scores (ndarray): score for each bbox\n thresh (float): iou threshold\n bias (float): bias for iou computation either 0 or 1\n (hint: choosing 1 is wrong computer vision community)\n classes (ndarray or None): integer classes. If specified NMS is done\n on a perclass basis.\n impl (str): implementation can be auto, python, cpu, or gpu\n\n\n CommandLine:\n python ~/code/netharn/netharn/util/nms/nms_core.py nms\n python ~/code/netharn/netharn/util/nms/nms_core.py nms:0\n python ~/code/netharn/netharn/util/nms/nms_core.py nms:1\n\n References:\n https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx\n https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n https://github.com/bharatsingh430/soft-nms/blob/master/lib/nms/cpu_nms.pyx <- TODO\n\n Example:\n >>> dets = np.array([\n >>> [0, 0, 100, 100],\n >>> [100, 100, 10, 10],\n >>> [10, 10, 100, 100],\n >>> [50, 50, 100, 100],\n >>> ], dtype=np.float32)\n >>> scores = np.array([.1, .5, .9, .1])\n >>> thresh = .5\n >>> keep = non_max_supression(dets, scores, thresh, impl='py')\n >>> print('keep = {!r}'.format(keep))\n keep = [2, 1, 3]\n\n Example:\n >>> import ubelt as ub\n >>> dets = np.array([\n >>> [0, 0, 100, 100],\n >>> [100, 100, 10, 10],\n >>> [10, 10, 100, 100],\n >>> [50, 50, 100, 100],\n >>> [100, 100, 150, 101],\n >>> [120, 100, 180, 101],\n >>> [150, 100, 200, 101],\n >>> ], dtype=np.float32)\n >>> scores = np.linspace(0, 1, len(dets))\n >>> thresh = .2\n >>> solutions = {}\n >>> for impl in _impls:\n >>> solutions[impl] = sorted(non_max_supression(dets, scores, thresh, impl=impl))\n >>> print('solutions = {}'.format(ub.repr2(solutions, nl=1)))\n >>> assert ub.allsame(solutions.values())\n \"\"\"\n if tlbr.shape[0] == 0:\n return []\n\n if impl == 'auto':\n impl = _automode\n\n if classes is not None:\n keep = []\n for idxs in ub.group_items(range(len(classes)), classes).values():\n # cls_tlbr = tlbr.take(idxs, axis=0)\n # cls_scores = scores.take(idxs, axis=0)\n cls_tlbr = tlbr[idxs]\n cls_scores = scores[idxs]\n cls_keep = non_max_supression(cls_tlbr, cls_scores, thresh=thresh,\n bias=bias, impl=impl)\n keep.extend(list(ub.take(idxs, cls_keep)))\n return keep\n else:\n if impl == 'py':\n keep = py_nms.py_nms(tlbr, scores, thresh, bias=float(bias))\n elif impl == 'torch':\n was_tensor = torch.is_tensor(tlbr)\n if not was_tensor:\n tlbr = torch.Tensor(tlbr)\n scores = torch.Tensor(scores)\n flags = torch_nms.torch_nms(tlbr, scores, thresh=thresh,\n bias=float(bias))\n keep = np.where(flags.cpu().numpy())[0]\n else:\n # TODO: it would be nice to be able to pass torch tensors here\n nms = _impls[impl]\n tlbr = tlbr.astype(np.float32)\n scores = scores.astype(np.float32)\n # dets = np.hstack((tlbr, scores[:, None])).astype(np.float32)\n if impl == 'gpu':\n # HACK: we should parameterize which device is used\n device = torch.cuda.current_device()\n keep = nms(tlbr, scores, thresh, bias=float(bias), device_id=device)\n else:\n keep = nms(tlbr, scores, thresh, bias=float(bias))\n return keep\n\n\n# TODO: soft nms\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m netharn.util.nms.nms_core all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n"
] | [
[
"torch.ByteTensor",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"torch.device",
"numpy.tril",
"numpy.isclose"
],
[
"torch.Tensor",
"torch.is_tensor",
"torch.cuda.is_available",
"torch.cuda.current_device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eino/pyvista | [
"b9c4e67d43491958f70b04cd2664965b938910ba",
"b9c4e67d43491958f70b04cd2664965b938910ba",
"b9c4e67d43491958f70b04cd2664965b938910ba"
] | [
"examples/00-load/create-explicit-structured-grid.py",
"examples/00-load/create-tri-surface.py",
"examples/02-plot/gif.py"
] | [
"\"\"\"\n.. _ref_create_explicit_structured_grid:\n\nCreating an Explicit Structured Grid\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nCreate an explicit structured grid from NumPy arrays.\n\nNote this feature is only available for ``vtk>=9``.\n\n\"\"\"\n\nimport numpy as np\n\nimport pyvista as pv\n\nni, nj, nk = 4, 5, 6\nsi, sj, sk = 20, 10, 1\n\nxcorn = np.arange(0, (ni + 1) * si, si)\nxcorn = np.repeat(xcorn, 2)\nxcorn = xcorn[1:-1]\nxcorn = np.tile(xcorn, 4 * nj * nk)\n\nycorn = np.arange(0, (nj + 1) * sj, sj)\nycorn = np.repeat(ycorn, 2)\nycorn = ycorn[1:-1]\nycorn = np.tile(ycorn, (2 * ni, 2 * nk))\nycorn = np.transpose(ycorn)\nycorn = ycorn.flatten()\n\nzcorn = np.arange(0, (nk + 1) * sk, sk)\nzcorn = np.repeat(zcorn, 2)\nzcorn = zcorn[1:-1]\nzcorn = np.repeat(zcorn, (4 * ni * nj))\n\ncorners = np.stack((xcorn, ycorn, zcorn))\ncorners = corners.transpose()\n\nif pv._vtk.VTK9:\n dims = np.asarray((ni, nj, nk)) + 1\n grid = pv.ExplicitStructuredGrid(dims, corners)\n grid = grid.compute_connectivity()\n grid.plot(show_edges=True)\n",
"\"\"\"\n.. _triangulated_surface:\n\nCreate Triangulated Surface\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nCreate a surface from a set of points through a Delaunay triangulation.\n\"\"\"\n\nimport numpy as np\n\n# sphinx_gallery_thumbnail_number = 2\nimport pyvista as pv\n\n###############################################################################\n# Simple Triangulations\n# +++++++++++++++++++++\n#\n# First, create some points for the surface.\n\n# Define a simple Gaussian surface\nn = 20\nx = np.linspace(-200, 200, num=n) + np.random.uniform(-5, 5, size=n)\ny = np.linspace(-200, 200, num=n) + np.random.uniform(-5, 5, size=n)\nxx, yy = np.meshgrid(x, y)\nA, b = 100, 100\nzz = A * np.exp(-0.5 * ((xx / b) ** 2.0 + (yy / b) ** 2.0))\n\n# Get the points as a 2D NumPy array (N by 3)\npoints = np.c_[xx.reshape(-1), yy.reshape(-1), zz.reshape(-1)]\npoints[0:5, :]\n\n###############################################################################\n# Now use those points to create a point cloud PyVista data object. This will\n# be encompassed in a :class:`pyvista.PolyData` object.\n\n# simply pass the numpy points to the PolyData constructor\ncloud = pv.PolyData(points)\ncloud.plot(point_size=15)\n\n###############################################################################\n# Now that we have a PyVista data structure of the points, we can perform a\n# triangulation to turn those boring discrete points into a connected surface.\n\nsurf = cloud.delaunay_2d()\nsurf.plot(show_edges=True)\n\n\n###############################################################################\n# Masked Triangulations\n# +++++++++++++++++++++\n#\n\nx = np.arange(10, dtype=float)\nxx, yy, zz = np.meshgrid(x, x, [0])\npoints = np.column_stack((xx.ravel(order=\"F\"), yy.ravel(order=\"F\"), zz.ravel(order=\"F\")))\n# Perturb the points\npoints[:, 0] += np.random.rand(len(points)) * 0.3\npoints[:, 1] += np.random.rand(len(points)) * 0.3\n# Create the point cloud mesh to triangulate from the coordinates\ncloud = pv.PolyData(points)\ncloud\n\n###############################################################################\n# Run the triangulation on these points\nsurf = cloud.delaunay_2d()\nsurf.plot(cpos=\"xy\", show_edges=True)\n\n\n###############################################################################\n# Note that some of the outer edges are unconstrained and the triangulation\n# added unwanted triangles. We can mitigate that with the ``alpha`` parameter.\nsurf = cloud.delaunay_2d(alpha=1.0)\nsurf.plot(cpos=\"xy\", show_edges=True)\n\n\n###############################################################################\n# We could also add a polygon to ignore during the triangulation via the\n# ``edge_source`` parameter.\n\n# Define a polygonal hole with a clockwise polygon\nids = [22, 23, 24, 25, 35, 45, 44, 43, 42, 32]\n\n# Create a polydata to store the boundary\npolygon = pv.PolyData()\n# Make sure it has the same points as the mesh being triangulated\npolygon.points = points\n# But only has faces in regions to ignore\npolygon.faces = np.insert(ids, 0, len(ids))\n\nsurf = cloud.delaunay_2d(alpha=1.0, edge_source=polygon)\n\np = pv.Plotter()\np.add_mesh(surf, show_edges=True)\np.add_mesh(polygon, color=\"red\", opacity=0.5)\np.show(cpos=\"xy\")\n",
"\"\"\"\n.. _gif_movie_example:\n\nCreate a GIF Movie\n~~~~~~~~~~~~~~~~~~\nGenerate a moving gif from an active plotter.\n\n.. note::\n Use ``lighting=False`` to reduce the size of the color space to avoid\n \"jittery\" GIFs, especially for the scalar bar.\n\n\"\"\"\n\nimport numpy as np\n\nimport pyvista as pv\n\nx = np.arange(-10, 10, 0.5)\ny = np.arange(-10, 10, 0.5)\nx, y = np.meshgrid(x, y)\nr = np.sqrt(x**2 + y**2)\nz = np.sin(r)\n\n# Create and structured surface\ngrid = pv.StructuredGrid(x, y, z)\n\n# Create a plotter object and set the scalars to the Z height\nplotter = pv.Plotter(notebook=False, off_screen=True)\nplotter.add_mesh(\n grid,\n scalars=z.ravel(),\n lighting=False,\n show_edges=True,\n scalar_bar_args={\"title\": \"Height\"},\n clim=[-1, 1],\n)\n\n# Open a gif\nplotter.open_gif(\"wave.gif\")\n\npts = grid.points.copy()\n\n# Update Z and write a frame for each updated position\nnframe = 15\nfor phase in np.linspace(0, 2 * np.pi, nframe + 1)[:nframe]:\n z = np.sin(r + phase)\n pts[:, -1] = z.ravel()\n plotter.update_coordinates(pts, render=False)\n plotter.update_scalars(z.ravel(), render=False)\n\n # Write a frame. This triggers a render.\n plotter.write_frame()\n\n# Closes and finalizes movie\nplotter.close()\n"
] | [
[
"numpy.asarray",
"numpy.arange",
"numpy.tile",
"numpy.stack",
"numpy.transpose",
"numpy.repeat"
],
[
"numpy.meshgrid",
"numpy.linspace",
"numpy.arange",
"numpy.random.uniform",
"numpy.exp"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.arange",
"numpy.sin",
"numpy.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TiKeil/Trust-region-TSRBLOD-code | [
"70fb396aa07b57028771e3e6e424ab3d1ace10f0"
] | [
"scripts/plot_mu_d.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n#\n# ~~~\n# This file is part of the paper:\n#\n# \"A relaxed localized trust-region reduced basis approach for\n# optimization of multiscale problems\"\n#\n# by: Tim Keil and Mario Ohlberger\n#\n# https://github.com/TiKeil/Trust-region-TSRBLOD-code\n#\n# Copyright 2019-2022 all developers. All rights reserved.\n# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n# Authors:\n# Tim Keil (2022)\n# ~~~\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom pymor.core.logger import set_log_levels\nfrom pymor.core.defaults import set_defaults\nfrom pymor.core.cache import disable_caching\nfrom pdeopt.tools import print_iterations_and_walltime\nset_log_levels({'pymor': 'ERROR',\n 'notebook': 'INFO'})\n\ndef prepare_kernels():\n set_log_levels({'pymor': 'WARN'})\n set_defaults({\"pymor.algorithms.gram_schmidt.gram_schmidt.rtol\": 1e-4})# <-- very important for the estimator\n set_defaults({\"pymor.algorithms.gram_schmidt.gram_schmidt.check\": False})\n disable_caching()\n\nuse_pool = True\nif use_pool:\n from pymor.parallel.mpi import MPIPool\n pool = MPIPool()\n # store_in_tmp = '/scratch/tmp/t_keil02/lrblod/tmp'\n store_in_tmp = 'tmp'\nelse:\n from pymor.parallel.dummy import DummyPool\n pool = DummyPool()\n store_in_tmp = False\npool.apply(prepare_kernels)\nprint_on_ranks = True\n\n'''\n Variables for the experiment and discretization\n'''\n\ncoarse_elements = 20\nn = 1200\ndiameter = np.sqrt(2)/n\n\ntwo_scale_estimator_for_RBLOD = False\nsave_correctors = False\n\nuse_FEM = True\n#use_FEM = False\nuse_fine_mesh = True\n#use_fine_mesh = False\n\n# skip_estimator = False\nskip_estimator = True\n\nadd_error_residual = True\n# add_error_residual = False\n\nfrom pdeopt.problems import large_thermal_block\nfrom pdeopt.discretizer import discretize_quadratic_NCD_pdeopt_stationary_cg\nfrom pdeopt.discretize_gridlod import (discretize_gridlod, discretize_quadratic_pdeopt_with_gridlod)\n\nhigh_conductivity, low_conductivity, min_diffusivity, rhs_value = 4., 1.2, 1., 10.\nfirst_factor, second_factor = 4, 8\n\nprint(f'\\nVARIABLES: \\n'\n f'Coarse elements: {coarse_elements} x {coarse_elements}\\n'\n f'Fine elements: {n} x {n}\\n'\n f'high_c/low_c/min_c: {high_conductivity}/{low_conductivity}/{min_diffusivity}\\n'\n f'rhs/f_1/f_2: {rhs_value}/{first_factor}/{second_factor}\\n')\n\nglobal_problem, world, local_problem_constructer, f, aFines, f_fine = \\\n large_thermal_block(diameter, coarse_elements, blocks=(4, 4), plot=False, return_fine=use_FEM,\n high_conductivity=high_conductivity, low_conductivity=low_conductivity, rhs_value=rhs_value,\n first_factor=first_factor, second_factor=second_factor, min_diffusivity=min_diffusivity)\ndomain_of_interest = None\n\nproblem = global_problem\n\nmu_d = global_problem.parameter_space.sample_randomly(1, seed=23)[0]\nmu_d_array = mu_d.to_numpy()\n\nfor i in [3,4,6,7,8,9,11,14]:\n mu_d_array[i] = high_conductivity\nfor i in [3,4,5,6]:\n mu_d_array[i+25] = low_conductivity\n\nmu_d = mu_d.parameters.parse(mu_d_array)\nnorm_mu_d = np.linalg.norm(mu_d_array)\n# mu_d = None\n\n'''\n Some plotting\n'''\n\n#### plotting\nfrom pdeopt.gridlod_model import construct_aFine_from_mu\nfrom perturbations_for_2d_data import visualize\n\nvis_mu_block_1_array = mu_d_array.copy()\nvis_mu_block_2_array = mu_d_array.copy()\nfor i in range(0,len(mu_d_array),2):\n vis_mu_block_1_array[i] = 0\n vis_mu_block_2_array[i+1] = 0\nvis_mu_block_1 = mu_d.parameters.parse(vis_mu_block_1_array)\nvis_mu_block_2 = mu_d.parameters.parse(vis_mu_block_2_array)\n\nplt.figure()\naFine = construct_aFine_from_mu(aFines, global_problem.diffusion.coefficients, mu_d)\nvisualize.drawCoefficient_origin(np.array([n, n]), aFine, colorbar_font_size=10, logNorm=False)\n\nplt.figure()\naFine = construct_aFine_from_mu(aFines, global_problem.diffusion.coefficients, vis_mu_block_1)\nvisualize.drawCoefficient_origin(np.array([n, n]), aFine, colorbar_font_size=10, logNorm=False)\n\nplt.figure()\naFine = construct_aFine_from_mu(aFines, global_problem.diffusion.coefficients, vis_mu_block_2)\nvisualize.drawCoefficient_origin(np.array([n, n]), aFine, colorbar_font_size=10, logNorm=False)\n\nplt.show()\n\n"
] | [
[
"numpy.sqrt",
"numpy.linalg.norm",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liang324/wrs | [
"46eadec355c61a9c7bac1fa0f3cf419b2aac19aa",
"46eadec355c61a9c7bac1fa0f3cf419b2aac19aa",
"46eadec355c61a9c7bac1fa0f3cf419b2aac19aa",
"46eadec355c61a9c7bac1fa0f3cf419b2aac19aa",
"46eadec355c61a9c7bac1fa0f3cf419b2aac19aa"
] | [
"basis/trimesh_new/resources/helpers/id_helper.py",
"motion/trajectory/polynomial_wrsold.py",
"robot_sim/_kinematics/jlchain_mesh.py",
"0000_students_work/2021tro/gaussian_surface_bug/video_utils.py",
"modeling/_ode_cdhelper.py"
] | [
"\"\"\"\nfeatures.py\n---------------\n\nIn trimesh.comparison, we arbitrarily threshold identifier values\nat a certain number of significant figures.\n\nThis file permutates meshes around and observes how their identifier,\nwhich is supposed to be pretty invariant to translation and tessellation\nchanges. We use this to generate the arbitrary sigfig thresholds.\n\"\"\"\n\nimport numpy as np\nimport trimesh\n\nimport time\nimport json\nimport os\n\nimport collections\nimport logging\n\nTOL_ZERO = 1e-12\n\n\ndef permutations(mesh,\n function=lambda x: x.identifier,\n displacement_max=1e-8,\n count=1000,\n subdivisions=2,\n cutoff=3600):\n \"\"\"\n Permutate a mesh, record the maximum it deviates from the original mesh\n and the resulting value of an identifier function.\n\n Parameters\n ----------\n mesh: Trimesh object\n function: function which takes a single mesh as an argument\n and returns an (n,) float vector\n subdivisions: the maximum number of times to subdivide the mesh\n count: int, number of times to permutate each subdivision step\n\n Returns\n -----------\n identifiers: numpy array of identifiers\n \"\"\"\n\n identifiers = []\n start = time.time()\n\n # do subdivisions\n divided = [mesh.copy()]\n for j in range(subdivisions - 1):\n divided.append(divided[-1].copy().subdivide())\n\n for i, displacement in enumerate(np.linspace(0.0,\n displacement_max / mesh.scale,\n count)):\n # get one of the subdivided meshes\n current = np.random.choice(divided).copy()\n\n if i > (count / 10):\n # run first bunch without tessellation permutation\n current = current.permutate.tessellation()\n # after the first few displace it a lot\n\n transformed = trimesh.permutate.transform(current)\n # noisy = trimesh.permutate.noise(transformed, displacement)\n\n identifier = function(transformed)\n identifiers.append(identifier)\n\n if (time.time() - start) > cutoff:\n print('bailing for time:{} count:{}'.format(time.time() - start,\n i))\n return np.array(identifiers)\n\n return np.array(identifiers)\n\n\ndef get_meshes(path='../../../models', cutoff=None):\n \"\"\"\n Get a list of single- body meshes to test identifiers on.\n\n Parameters\n ------------\n path: str, location of models\n cutoff: int, number of meshes to stop loading at\n\n Returns\n ------------\n meshes: (n,) list of Trimesh objects\n \"\"\"\n\n bodies = collections.deque()\n for file_name in os.listdir(path):\n try:\n mesh = trimesh.load(os.path.join(path, file_name))\n split = mesh.split()\n bodies.extend(split)\n if len(split) > 1:\n bodies.append(mesh)\n except BaseException:\n continue\n\n if cutoff is not None and len(bodies) > cutoff:\n return np.array(bodies)\n\n for i in range(100):\n cylinder = trimesh.creation.cylinder(\n radius=np.random.random() * 100,\n height=np.random.random() * 1000,\n sections=int(np.clip(np.random.random() * 720,\n 20,\n 720)))\n\n capsule = trimesh.creation.capsule(\n radius=np.random.random() * 100,\n height=np.random.random() * 1000,\n count=np.clip(np.random.random(2) * 720,\n 20,\n 720).astype(int))\n bodies.append(cylinder)\n bodies.append(capsule)\n for i in range(10):\n bodies.append(trimesh.creation.random_soup(\n int(np.clip(np.random.random() * 1000,\n 20,\n 1000))))\n bodies.append(trimesh.creation.icosphere())\n bodies.append(trimesh.creation.uv_sphere())\n bodies.append(trimesh.creation.icosahedron())\n\n return np.array(bodies)\n\n\ndef data_stats(data):\n data = np.asanyarray(data, dtype=np.float64)\n\n # mean identifier\n mean = data.mean(axis=0)\n # thresholdable percentile\n percent = np.abs(mean - np.abs(np.percentile(data, 99.999, axis=0)))\n\n return mean, percent\n\n\nif __name__ == '__main__':\n trimesh.util.attach_to_log(level=logging.INFO)\n\n meshes = get_meshes()\n\n print('loaded meshes!')\n\n # we want the whole thing to last less than\n hours = 5\n cutoff = (hours * 3600) / len(meshes)\n cutoff = 30\n result = []\n running = []\n\n for i, m in enumerate(meshes):\n\n # calculate permutations\n identifier = permutations(m,\n count=1000,\n cutoff=cutoff)\n # get data\n mean, percent = data_stats(identifier)\n\n nz = np.logical_and(np.abs(mean) > TOL_ZERO,\n np.abs(percent) > TOL_ZERO)\n\n r = np.ones_like(mean) * 10\n r[nz] = np.round(np.log10(np.abs(mean[nz] / percent[nz]))) - 1\n\n running.append(r)\n result.append({'mean': mean.tolist(),\n 'percent': percent.tolist()})\n\n print('\\n\\n{}/{}'.format(i, len(meshes) - 1))\n print('mean', mean)\n print('percent', percent)\n print('oom', mean / percent)\n print('curun', running[-1])\n print('minrun', np.min(running, axis=0))\n print('meanrun', np.mean(running, axis=0))\n\n # every loop dump everything\n # thrash- ey for sure but intermediate results are great\n name_out = 'res.json'\n with open(name_out, 'w') as file_obj:\n json.dump(result,\n file_obj,\n indent=4)\n",
"import math\nimport numpy as np\n\n\nclass TrajPoly(object):\n\n def __init__(self, method=\"cubic\"):\n if method == \"cubic\":\n self.fit = self._cubic_coeffs\n self.predict = self._predict_cubic\n elif method == \"quintic\":\n self.fit = self._quintic_coeffs\n self.predict = self._predict_quintic\n self.cubicmat = np.array([[2, 1, -2, 1],\n [-3, -2, 3, -1],\n [0, 1, 0, 0],\n [1, 0, 0, 0]])\n self.quinticmat = np.array([[0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 0],\n [5, 4, 3, 2, 1, 0],\n [0, 0, 0, 2, 0, 0],\n [20, 12, 6, 2, 0, 0]])\n self.coeffs_array = None\n\n def _cubic_coeffs(self, conf0, spd0, conf1, spd1):\n self.coeffs_array = np.dot(self.cubicmat, np.vstack((conf0, spd0, conf1, spd1)))\n\n def _quintic_coeffs(self, conf0, spd0, conf1, spd1, acc0=None, acc1=None):\n if acc0 is None:\n acc0 = np.zeros_like(spd0)\n if acc1 is None:\n acc1 = np.zeros_like(spd1)\n self.coeffs_array = np.linalg.solve(self.quinticmat, np.vstack((conf0, conf1, spd0, spd1, acc0, acc1)))\n\n def _predict_cubic(self, step):\n \"\"\"\n step = currenttime/timeinterval\n :return:\n author: weiwei\n date: 20200327\n \"\"\"\n step_array = np.vstack([step ** 3,\n step ** 2,\n step,\n np.ones_like(step)])\n spd_step_array = np.vstack([3 * step ** 2,\n 2 * step,\n np.ones_like(step),\n np.zeros_like(step)])\n acc_step_array = np.vstack([6 * step,\n 2*np.ones_like(step),\n np.zeros_like(step),\n np.zeros_like(step)])\n if isinstance(step, np.ndarray):\n return np.dot(self.coeffs_array.T, step_array).T, \\\n np.dot(self.coeffs_array.T, spd_step_array).T, \\\n np.dot(self.coeffs_array.T, acc_step_array).T\n else:\n return np.dot(self.coeffs_array.T, step_array).T[0][0], \\\n np.dot(self.coeffs_array.T, spd_step_array).T[0][0], \\\n np.dot(self.coeffs_array.T, acc_step_array).T[0][0]\n\n def _predict_quintic(self, step):\n \"\"\"\n step = currenttime/timeinterval\n :return:\n author: weiwei\n date: 20200327\n \"\"\"\n conf_step_array = np.vstack([step ** 5, step ** 4, step ** 3, step ** 2, step, np.ones_like(step)])\n spd_step_array = np.vstack([5 * step ** 4,\n 4 * step ** 3,\n 3 * step ** 2, 2 * step,\n np.ones_like(step),\n np.zeros_like(step)])\n acc_step_array = np.vstack([20 * step ** 3,\n 12 * step ** 2,\n 6 * step,\n 2 * np.ones_like(step),\n np.zeros_like(step),\n np.zeros_like(step)])\n if isinstance(step, np.ndarray):\n return np.dot(self.coeffs_array.T, conf_step_array).T, \\\n np.dot(self.coeffs_array.T, spd_step_array).T, \\\n np.dot(self.coeffs_array.T, acc_step_array).T\n else:\n return np.dot(self.coeffs_array.T, conf_step_array).T[0][0], \\\n np.dot(self.coeffs_array.T, spd_step_array).T[0][0], \\\n np.dot(self.coeffs_array.T, acc_step_array).T[0][0]\n\n def set_interpolation_method(self, method):\n \"\"\"\n change interpolation method\n :param name: 'cubic' or 'quintic'\n :return:\n author: weiwei\n date: 20210331\n \"\"\"\n if method == \"cubic\":\n self.fit = self._cubic_coeffs\n self.predict = self._predict_cubic\n elif method == \"quintic\":\n self.fit = self._quintic_coeffs\n self.predict = self._predict_quintic\n else:\n pass\n\n def piecewise_interpolation(self, path, control_frequency=.005, time_interval=1.0):\n \"\"\"\n :param path: a 1d array of configurations\n :param control_frequency: the program will sample time_interval/control_frequency confs\n :param time_interval: time to move between adjacent joints\n :return:\n author: weiwei\n date: 20200328\n \"\"\"\n path = np.array(path)\n passing_conf_list = []\n passing_spd_list = []\n for id, mid_jnt_values in enumerate(path[:-1]):\n passing_conf_list.append(mid_jnt_values)\n if id == 0:\n passing_spd_list.append(np.zeros_like(mid_jnt_values))\n else:\n pre_jnt_values = path[id - 1]\n next_jnt_values = path[id + 1]\n pre_avg_spd = (mid_jnt_values - pre_jnt_values) / time_interval\n nxt_avg_spd = (next_jnt_values - mid_jnt_values) / time_interval\n pass_spd = (pre_avg_spd + nxt_avg_spd) / 2.0\n # set to 0 if signs are different -> reduces overshoot\n zero_id = np.where((np.sign(pre_avg_spd) + np.sign(nxt_avg_spd)) == 0.0)\n pass_spd[zero_id] = 0.0\n passing_spd_list.append(pass_spd)\n print(\"prev spd \", pre_avg_spd)\n print(\"next spd \", nxt_avg_spd)\n print(\"avg_spd \", pass_spd)\n passing_conf_list.append(path[-1]) # last pos\n passing_spd_list.append(np.zeros_like(path[-1])) # last spd\n interpolated_confs = []\n interpolated_spds = []\n interpolated_accs = []\n for id, passing_conf in enumerate(passing_conf_list):\n if id == 0:\n continue\n pre_passing_conf = passing_conf_list[id - 1]\n pre_passing_spd = passing_spd_list[id - 1]\n passing_spd = passing_spd_list[id]\n self.fit(pre_passing_conf, pre_passing_spd, passing_conf, passing_spd)\n samples = np.linspace(0,\n time_interval,\n math.floor(time_interval / control_frequency),\n endpoint=True) / time_interval\n print(\"samples \", samples)\n local_interpolated_confs, local_interplated_spds, local_interplated_accs = self.predict(samples)\n if id == len(passing_conf_list)-1:\n interpolated_confs += local_interpolated_confs.tolist()\n interpolated_spds += local_interplated_spds.tolist()\n interpolated_accs += local_interplated_accs.tolist()\n else:\n interpolated_confs += local_interpolated_confs.tolist()[:-1]\n interpolated_spds += local_interplated_spds.tolist()[:-1]\n interpolated_accs += local_interplated_accs.tolist()[:-1]\n return interpolated_confs, interpolated_spds, interpolated_accs\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n # y = [[0], [3], [0], [9], [0]]\n y = [[math.pi / 6], [math.pi/2]]\n y=[[-0.31294743], [0.85310819], [1.56021504], [0.83826746]]\n control_frequency = .005\n interval_time = 1\n traj = TrajPoly(method=\"quintic\")\n interpolated_confs, interpolated_spds, interpolated_accs = \\\n traj.piecewise_interpolation(y, control_frequency=control_frequency, time_interval=interval_time)\n # print(interpolated_spds)\n # interpolated_spds=np.array(interpolated_spds)\n # print(interpolated_confs)\n fig, axs = plt.subplots(3, figsize=(3.5,4.75))\n fig.tight_layout(pad=.7)\n x = np.linspace(0, interval_time*(len(y) - 1), (len(y) - 1) * math.floor(interval_time / control_frequency))\n axs[0].plot(x, interpolated_confs)\n axs[0].plot(range(0, interval_time * (len(y)), interval_time), y, '--o', color='tab:blue')\n axs[1].plot(x, interpolated_spds)\n axs[2].plot(x, interpolated_accs)\n # plt.quiver(x, interpolated_confs, x, interpolated_spds, width=.001)\n # plt.plot(y)\n plt.show()\n",
"import numpy as np\nimport modeling.geometric_model as gm\nimport modeling.collision_model as cm\nimport modeling.model_collection as mc\nimport basis.robot_math as rm\n\n\nclass JLChainMesh(object):\n \"\"\"\n The mesh generator class for JntLnks\n NOTE: it is unnecessary to attach a nodepath to render repeatedly\n once attached, it is always there. update the joint angles\n will change the attached model directly\n \"\"\"\n\n def __init__(self, jlobject, cdprimitive_type='box', cdmesh_type='triangles'):\n \"\"\"\n author: weiwei\n date: 20200331\n \"\"\"\n self.jlobject = jlobject\n for id in range(self.jlobject.ndof + 1):\n if self.jlobject.lnks[id]['meshfile'] is not None and self.jlobject.lnks[id]['collisionmodel'] is None:\n # in case the collision model is directly set, it allows manually specifying cd primitives\n # instead of auto initialization. Steps: 1. keep meshmodel to None; 2. directly set cm\n self.jlobject.lnks[id]['collisionmodel'] = cm.CollisionModel(self.jlobject.lnks[id]['meshfile'],\n cdprimit_type=cdprimitive_type,\n cdmesh_type=cdmesh_type)\n self.jlobject.lnks[id]['collisionmodel'].set_scale(self.jlobject.lnks[id]['scale'])\n\n def gen_meshmodel(self,\n tcp_jntid=None,\n tcp_loc_pos=None,\n tcp_loc_rotmat=None,\n toggle_tcpcs=True,\n toggle_jntscs=False,\n name='robot_mesh',\n rgba=None):\n mm_collection = mc.ModelCollection(name=name)\n for id in range(self.jlobject.ndof + 1):\n if self.jlobject.lnks[id]['collisionmodel'] is not None:\n this_collisionmodel = self.jlobject.lnks[id]['collisionmodel'].copy()\n pos = self.jlobject.lnks[id]['gl_pos']\n rotmat = self.jlobject.lnks[id]['gl_rotmat']\n this_collisionmodel.set_homomat(rm.homomat_from_posrot(pos, rotmat))\n this_rgba = self.jlobject.lnks[id]['rgba'] if rgba is None else rgba\n this_collisionmodel.set_rgba(this_rgba)\n this_collisionmodel.attach_to(mm_collection)\n # tool center coord\n if toggle_tcpcs:\n self._toggle_tcpcs(mm_collection,\n tcp_jntid,\n tcp_loc_pos,\n tcp_loc_rotmat,\n tcpic_rgba=np.array([.5, 0, 1, 1]), tcpic_thickness=.0062)\n # toggle all coord\n if toggle_jntscs:\n alpha = 1 if rgba == None else rgba[3]\n self._toggle_jntcs(mm_collection,\n jntcs_thickness=.0062,\n alpha=alpha)\n return mm_collection\n\n def gen_stickmodel(self,\n rgba=np.array([.5, 0, 0, 1]),\n thickness=.01,\n joint_ratio=1.62,\n link_ratio=.62,\n tcp_jntid=None,\n tcp_loc_pos=None,\n tcp_loc_rotmat=None,\n toggle_tcpcs=True,\n toggle_jntscs=False,\n toggle_connjnt=False,\n name='robotstick'):\n \"\"\"\n generate the stick model for a jntlnk object\n snp means stick nodepath\n :param rgba:\n :param tcp_jntid:\n :param tcp_loc_pos:\n :param tcp_loc_rotmat:\n :param toggle_tcpcs:\n :param toggle_jntscs:\n :param toggle_connjnt: draw the connecting joint explicitly or not\n :param name:\n :return:\n\n author: weiwei\n date: 20200331, 20201006\n \"\"\"\n stickmodel = mc.ModelCollection(name=name)\n id = 0\n loopdof = self.jlobject.ndof + 1\n if toggle_connjnt:\n loopdof = self.jlobject.ndof + 2\n while id < loopdof:\n cjid = self.jlobject.jnts[id]['child']\n jgpos = self.jlobject.jnts[id]['gl_posq'] # joint global pos\n cjgpos = self.jlobject.jnts[cjid]['gl_pos0'] # child joint global pos\n jgmtnax = self.jlobject.jnts[id][\"gl_motionax\"] # joint global rot ax\n gm.gen_stick(spos=jgpos, epos=cjgpos, thickness=thickness, type=\"rect\", rgba=rgba).attach_to(stickmodel)\n if id > 0:\n if self.jlobject.jnts[id]['type'] == \"revolute\":\n gm.gen_stick(spos=jgpos - jgmtnax * thickness, epos=jgpos + jgmtnax * thickness, type=\"rect\",\n thickness=thickness * joint_ratio, rgba=np.array([.3, .3, .2, rgba[3]])).attach_to(stickmodel)\n if self.jlobject.jnts[id]['type'] == \"prismatic\":\n jgpos0 = self.jlobject.jnts[id]['gl_pos0']\n gm.gen_stick(spos=jgpos0, epos=jgpos, type=\"round\", thickness=thickness * joint_ratio,\n rgba=np.array([.2, .3, .3, rgba[3]])).attach_to(stickmodel)\n id = cjid\n # tool center coord\n if toggle_tcpcs:\n self._toggle_tcpcs(stickmodel, tcp_jntid, tcp_loc_pos, tcp_loc_rotmat,\n tcpic_rgba=rgba + np.array([0, 0, 1, 0]), tcpic_thickness=thickness * link_ratio)\n # toggle all coord\n if toggle_jntscs:\n self._toggle_jntcs(stickmodel, jntcs_thickness=thickness * link_ratio, alpha=rgba[3])\n return stickmodel\n\n def gen_endsphere(self, rgba=None, name=''):\n \"\"\"\n generate an end sphere (es) to show the trajectory of the end effector\n\n :param jlobject: a JntLnk object\n :param rbga: color of the arm\n :return: null\n\n author: weiwei\n date: 20181003madrid, 20200331\n \"\"\"\n pass\n # eesphere = gm.StaticGeometricModel(name=name)\n # if rgba is not None:\n # gm.gen_sphere(pos=self.jlobject.jnts[-1]['linkend'], radius=.025, rgba=rgba).attach_to(eesphere)\n # return gm.StaticGeometricModel(eesphere)\n\n def _toggle_tcpcs(self,\n parent_model,\n tcp_jntid,\n tcp_loc_pos,\n tcp_loc_rotmat,\n tcpic_rgba,\n tcpic_thickness,\n tcpcs_thickness=None,\n tcpcs_length=None):\n \"\"\"\n :param parent_model: where to draw the frames to\n :param tcp_jntid: single id or a list of ids\n :param tcp_loc_pos:\n :param tcp_loc_rotmat:\n :param tcpic_rgba: color that used to render the tcp indicator\n :param tcpic_thickness: thickness the tcp indicator\n :param tcpcs_thickness: thickness the tcp coordinate frame\n :return:\n\n author: weiwei\n date: 20201125\n \"\"\"\n if tcp_jntid is None:\n tcp_jntid = self.jlobject.tcp_jntid\n if tcp_loc_pos is None:\n tcp_loc_pos = self.jlobject.tcp_loc_pos\n if tcp_loc_rotmat is None:\n tcp_loc_rotmat = self.jlobject.tcp_loc_rotmat\n if tcpcs_thickness is None:\n tcpcs_thickness = tcpic_thickness\n if tcpcs_length is None:\n tcpcs_length = tcpcs_thickness * 15\n tcp_gl_pos, tcp_gl_rotmat = self.jlobject.get_gl_tcp(tcp_jntid,\n tcp_loc_pos,\n tcp_loc_rotmat)\n if isinstance(tcp_gl_pos, list):\n for i, jid in enumerate(tcp_jntid):\n jgpos = self.jlobject.jnts[jid]['gl_posq']\n gm.gen_dashstick(spos=jgpos,\n epos=tcp_gl_pos[i],\n thickness=tcpic_thickness,\n rgba=tcpic_rgba,\n type=\"round\").attach_to(parent_model)\n gm.gen_mycframe(pos=tcp_gl_pos[i],\n rotmat=tcp_gl_rotmat[i],\n length=tcpcs_length,\n thickness=tcpcs_thickness,\n alpha=tcpic_rgba[3]).attach_to(parent_model)\n else:\n jgpos = self.jlobject.jnts[tcp_jntid]['gl_posq']\n gm.gen_dashstick(spos=jgpos,\n epos=tcp_gl_pos,\n thickness=tcpic_thickness,\n rgba=tcpic_rgba,\n type=\"round\").attach_to(parent_model)\n gm.gen_mycframe(pos=tcp_gl_pos,\n rotmat=tcp_gl_rotmat,\n length=tcpcs_length,\n thickness=tcpcs_thickness,\n alpha=tcpic_rgba[3]).attach_to(parent_model)\n\n def _toggle_jntcs(self, parentmodel, jntcs_thickness, jntcs_length=None, alpha=1):\n \"\"\"\n :param parentmodel: where to draw the frames to\n :return:\n\n author: weiwei\n date: 20201125\n \"\"\"\n if jntcs_length is None:\n jntcs_length = jntcs_thickness * 15\n for id in self.jlobject.tgtjnts:\n gm.gen_dashframe(pos=self.jlobject.jnts[id]['gl_pos0'],\n rotmat=self.jlobject.jnts[id]['gl_rotmat0'],\n length=jntcs_length,\n thickness=jntcs_thickness,\n alpha=alpha).attach_to(parentmodel)\n gm.gen_frame(pos=self.jlobject.jnts[id]['gl_posq'],\n rotmat=self.jlobject.jnts[id]['gl_rotmatq'],\n length=jntcs_length,\n thickness=jntcs_thickness,\n alpha=alpha).attach_to(parentmodel)\n",
"import os\n\nimport cv2\nimport cv2.aruco as aruco\nimport numpy as np\n\nimport config\n\ncameraMatrix = np.array([[1.42068235e+03, 0.00000000e+00, 9.49208512e+02],\n [0.00000000e+00, 1.37416685e+03, 5.39622051e+02],\n [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\ndistCoeffs = np.array([1.69926613e-01, -7.40003491e-01, -7.45655262e-03, -1.79442353e-03, 2.46650225e+00])\n\n\ndef video2img(video_f_name):\n vc = cv2.VideoCapture(os.path.join(config.ROOT, \"video\", video_f_name))\n c = 1\n\n output_path = os.path.join(config.ROOT, \"img/videocapture/\")\n if vc.isOpened():\n rval, frame = vc.read()\n else:\n print(\"open error!\")\n rval = False\n\n time_interval = 5\n while rval:\n rval, frame = vc.read()\n if c % time_interval == 0:\n if frame is None:\n continue\n corners, ids = detect_aruco(frame)\n if corners is None:\n continue\n print(ids)\n for i, corner in enumerate(corners):\n points = corner[0].astype(np.int32)\n cv2.polylines(frame, [points], True, (0, 255, 255))\n cv2.putText(frame, str(ids[i][0]), tuple(points[0]), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)\n\n rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners, 0.3, cameraMatrix, distCoeffs)\n\n for i in range(ids.size):\n # print( 'rvec {}, tvec {}'.format( rvecs[i], tvecs[i] ))\n # print( 'rvecs[{}] {}'.format( i, rvecs[i] ))\n # print( 'tvecs[{}] {}'.format( i, tvecs[i] ))\n aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1)\n cv2.imshow(\"img\", frame)\n cv2.waitKey(0)\n # cv2.imwrite(output_path+video_f_name.split(\".mp4\")[0] + str(int(c / time_interval)) + '.jpg', frame)\n c += 1\n vc.release()\n\n\ndef detect_aruco(img, tgtids=[1, 3, 9, 619]):\n parameters = aruco.DetectorParameters_create()\n aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_1000)\n\n corners, ids, _ = aruco.detectMarkers(img, aruco_dict, parameters=parameters)\n rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.3, cameraMatrix, distCoeffs)\n\n if len(corners) < len(tgtids):\n return None, None\n if len(ids) != len(tgtids):\n return None, None\n if ids[0] not in tgtids or ids[1] not in tgtids:\n return None, None\n return corners, ids\n\n\nif __name__ == '__main__':\n video_f_name = 'cat.mp4'\n video2img(video_f_name)\n",
"import numpy as np\nimport basis.robot_math as rm\nimport basis.data_adapter as da\nfrom panda3d.ode import OdeTriMeshData, OdeTriMeshGeom, OdeUtil, OdeRayGeom\n\n\n# util functions\ndef gen_cdmesh_vvnf(vertices, vertex_normals, faces):\n \"\"\"\n generate cdmesh given vertices, vertex_normals, and faces\n :return: panda3d.ode.OdeTriMeshGeomm\n author: weiwei\n date: 20210118\n \"\"\"\n objpdnp = da.nodepath_from_vvnf(vertices, vertex_normals, faces)\n obj_ot_geom = OdeTriMeshGeom(OdeTriMeshData(objpdnp, True))\n return obj_ot_geom\n\n\n# def gen_plane_cdmesh(updirection=np.array([0, 0, 1]), offset=0, name='autogen'):\n# \"\"\"\n# generate a plane bulletrigidbody node\n# :param updirection: the normal parameter of bulletplaneshape at panda3d\n# :param offset: the d parameter of bulletplaneshape at panda3d\n# :param name:\n# :return: bulletrigidbody\n# author: weiwei\n# date: 20170202, tsukuba\n# \"\"\"\n# bulletplnode = BulletRigidBodyNode(name)\n# bulletplshape = BulletPlaneShape(Vec3(updirection[0], updirection[1], updirection[2]), offset)\n# bulletplshape.setMargin(0)\n# bulletplnode.addShape(bulletplshape)\n# return bulletplnode\n\ndef is_collided(objcm0, objcm1):\n \"\"\"\n check if two objcm are collided after converting the specified cdmesh_type\n :param objcm0: an instance of CollisionModel or CollisionModelCollection\n :param objcm1: an instance of CollisionModel or CollisionModelCollection\n :return:\n author: weiwei\n date: 20210118\n \"\"\"\n obj0 = gen_cdmesh_vvnf(*objcm0.extract_rotated_vvnf())\n obj1 = gen_cdmesh_vvnf(*objcm1.extract_rotated_vvnf())\n contact_entry = OdeUtil.collide(obj0, obj1, max_contacts=10)\n contact_points = [da.pdv3_to_npv3(point) for point in contact_entry.getContactPoints()]\n return (True, contact_points) if len(contact_points) > 0 else (False, contact_points)\n\ndef rayhit_closet(pfrom, pto, objcm):\n \"\"\"\n :param pfrom:\n :param pto:\n :param objcm:\n :return:\n author: weiwei\n date: 20190805\n \"\"\"\n tgt_cdmesh = gen_cdmesh_vvnf(*objcm.extract_rotated_vvnf())\n ray = OdeRayGeom(length=1)\n length, dir = rm.unit_vector(pto - pfrom, toggle_length=True)\n ray.set(pfrom[0], pfrom[1], pfrom[2], dir[0], dir[1], dir[2])\n ray.setLength(length)\n contact_entry = OdeUtil.collide(ray, tgt_cdmesh, max_contacts=10)\n contact_points = [da.pdv3_to_npv3(point) for point in contact_entry.getContactPoints()]\n min_id = np.argmin(np.linalg.norm(pfrom-np.array(contact_points), axis=1))\n contact_normals = [da.pdv3_to_npv3(contact_entry.getContactGeom(i).getNormal()) for i in range(contact_entry.getNumContacts())]\n return contact_points[min_id], contact_normals[min_id]\n\ndef rayhit_all(pfrom, pto, objcm):\n \"\"\"\n :param pfrom:\n :param pto:\n :param objcm:\n :return:\n author: weiwei\n date: 20190805\n \"\"\"\n tgt_cdmesh = gen_cdmesh_vvnf(*objcm.extract_rotated_vvnf())\n ray = OdeRayGeom(length=1)\n length, dir = rm.unit_vector(pto-pfrom, toggle_length=True)\n ray.set(pfrom[0], pfrom[1], pfrom[2], dir[0], dir[1], dir[2])\n ray.setLength(length)\n hit_entry = OdeUtil.collide(ray, tgt_cdmesh)\n hit_points = [da.pdv3_to_npv3(point) for point in hit_entry.getContactPoints()]\n hit_normals = [da.pdv3_to_npv3(hit_entry.getContactGeom(i).getNormal()) for i in range(hit_entry.getNumContacts())]\n return hit_points, hit_normals\n\n\nif __name__ == '__main__':\n import os, math, basis\n import numpy as np\n import visualization.panda.world as wd\n import modeling.geometric_model as gm\n import modeling.collision_model as cm\n import basis.robot_math as rm\n\n wd.World(cam_pos=[1.0, 1, .0, 1.0], lookat_pos=[0, 0, 0])\n gm.gen_frame().attach_to(base)\n objpath = os.path.join(basis.__path__[0], 'objects', 'bunnysim.stl')\n objcm1 = cm.CollisionModel(objpath)\n homomat = np.eye(4)\n homomat[:3, :3] = rm.rotmat_from_axangle([0, 0, 1], math.pi / 2)\n homomat[:3, 3] = np.array([0.02, 0.02, 0])\n objcm1.set_homomat(homomat)\n objcm1.set_rgba([1, 1, .3, .2])\n objcm2 = objcm1.copy()\n objcm2.set_pos(objcm1.get_pos() + np.array([.05, .02, .0]))\n objcm1.change_cdmesh_type('convex_hull')\n objcm2.change_cdmesh_type('obb')\n iscollided, contact_points = is_collided(objcm1, objcm2)\n objcm1.show_cdmesh()\n objcm2.show_cdmesh()\n objcm1.attach_to(base)\n objcm2.attach_to(base)\n print(iscollided)\n for ctpt in contact_points:\n gm.gen_sphere(ctpt, radius=.001).attach_to(base)\n pfrom = np.array([0, 0, 0]) + np.array([1.0, 1.0, 1.0])\n # pto = np.array([0, 0, 0]) + np.array([-1.0, -1.0, -1.0])\n pto = np.array([0, 0, 0]) + np.array([0.02, 0.02, 0.02])\n # pfrom = np.array([0, 0, 0]) + np.array([0.0, 0.0, 1.0])\n # pto = np.array([0, 0, 0]) + np.array([0.0, 0.0, -1.0])\n # hit_point, hit_normal = rayhit_closet(pfrom=pfrom, pto=pto, objcm=objcm1)\n hit_points, hit_normals = rayhit_all(pfrom=pfrom, pto=pto, objcm=objcm1)\n # objcm.attach_to(base)\n # objcm.show_cdmesh(type='box')\n # objcm.show_cdmesh(type='convex_hull')\n # for hitpos, hitnormal in zip([hit_point], [hit_normal]):\n for hitpos, hitnormal in zip(hit_points, hit_normals):\n gm.gen_sphere(hitpos, radius=.003, rgba=np.array([0, 1, 1, 1])).attach_to(base)\n gm.gen_arrow(hitpos, epos=hitpos+hitnormal*.03, thickness=.002, rgba=np.array([0, 1, 1, 1])).attach_to(base)\n gm.gen_stick(spos=pfrom, epos=pto, thickness=.002).attach_to(base)\n # gm.gen_arrow(spos=hitpos, epos=hitpos + hitnrml * .07, thickness=.002, rgba=np.array([0, 1, 0, 1])).attach_to(base)\n base.run()\n"
] | [
[
"numpy.ones_like",
"numpy.abs",
"numpy.linspace",
"numpy.min",
"numpy.random.choice",
"numpy.random.random",
"numpy.percentile",
"numpy.asanyarray",
"numpy.mean",
"numpy.array"
],
[
"numpy.dot",
"numpy.ones_like",
"numpy.vstack",
"matplotlib.pyplot.subplots",
"numpy.sign",
"numpy.zeros_like",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mingcv/Bread | [
"20dedfe2105b08ce8499b216c3c2bfd3699af17f"
] | [
"train_NFM.py"
] | [
"import argparse\nimport datetime\nimport os\nimport traceback\n\nimport kornia\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom tqdm.autonotebook import tqdm\n\nimport models\nfrom datasets import LowLightDataset, LowLightFDataset\nfrom models import PSNR, SSIM, CosineLR\nfrom tools import SingleSummaryWriter\nfrom tools import saver, mutils\n\n\ndef get_args():\n parser = argparse.ArgumentParser('Breaking Downing the Darkness')\n parser.add_argument('--num_gpus', type=int, default=1, help='number of gpus being used')\n parser.add_argument('--num_workers', type=int, default=12, help='num_workers of dataloader')\n parser.add_argument('--batch_size', type=int, default=1, help='The number of images per batch among all devices')\n parser.add_argument('-m1', '--model1', type=str, default='INet',\n help='Model1 Name')\n parser.add_argument('-m2', '--model2', type=str, default='NSNet',\n help='Model2 Name')\n parser.add_argument('-m3', '--model3', type=str, default='NSNet',\n help='Model3 Name')\n\n parser.add_argument('-m1w', '--model1_weight', type=str, default=None,\n help='Model Name')\n parser.add_argument('-m2w', '--model2_weight', type=str, default=None,\n help='Model Name')\n\n parser.add_argument('--comment', type=str, default='default',\n help='Project comment')\n parser.add_argument('--graph', action='store_true')\n parser.add_argument('--no_sche', action='store_true')\n parser.add_argument('--sampling', action='store_true')\n\n parser.add_argument('--slope', type=float, default=2.)\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--optim', type=str, default='adam', help='select optimizer for training, '\n 'suggest using \\'admaw\\' until the'\n ' very final stage then switch to \\'sgd\\'')\n parser.add_argument('--num_epochs', type=int, default=500)\n parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')\n parser.add_argument('--save_interval', type=int, default=500, help='Number of steps between saving')\n parser.add_argument('--data_path', type=str, default='./data/LOL',\n help='the root folder of dataset')\n parser.add_argument('--log_path', type=str, default='logs/')\n parser.add_argument('--saved_path', type=str, default='logs/')\n args = parser.parse_args()\n return args\n\n\nclass ModelNSNet(nn.Module):\n def __init__(self, model1, model2, model3):\n super().__init__()\n self.texture_loss = models.SSIML1Loss(channels=1)\n self.model_ianet = model1(in_channels=1, out_channels=1)\n self.model_nsnet = model2(in_channels=2, out_channels=1)\n self.model_fusenet = model3(in_channels=3, out_channels=1)\n\n assert opt.model1_weight is not None\n self.load_weight(self.model_ianet, opt.model1_weight)\n self.load_weight(self.model_nsnet, opt.model2_weight)\n self.model_ianet.eval()\n self.model_nsnet.eval()\n self.eps = 1e-2\n\n def load_weight(self, model, weight_pth):\n state_dict = torch.load(weight_pth)\n ret = model.load_state_dict(state_dict, strict=True)\n print(ret)\n\n def noise_syn(self, illumi, strength):\n return torch.exp(-illumi) * strength\n\n def forward(self, image, image_gt, training=True):\n texture_nss = []\n with torch.no_grad():\n if training:\n image = image.squeeze(0)\n image_gt = image_gt.repeat(8, 1, 1, 1)\n\n texture_in, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image), 1, dim=1)\n texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image_gt), 1, dim=1)\n\n texture_in_down = F.interpolate(texture_in, scale_factor=0.5, mode='bicubic', align_corners=True)\n illumi = self.model_ianet(texture_in_down)\n illumi = F.interpolate(illumi, scale_factor=2, mode='bicubic', align_corners=True)\n noisy_gt = texture_in / torch.clamp_min(illumi, self.eps)\n\n for strength in [0, 0.05, 0.1]:\n illumi = torch.clamp(illumi, 0., 1.)\n attention = self.noise_syn(illumi, strength=strength)\n texture_res = self.model_nsnet(torch.cat([noisy_gt, attention], dim=1))\n texture_ns = noisy_gt + texture_res\n texture_nss.append(texture_ns)\n\n texture_nss = torch.cat(texture_nss, dim=1).detach()\n\n texture_fuse = self.model_fusenet(texture_nss)\n restor_loss = self.texture_loss(texture_fuse, texture_gt)\n psnr = PSNR(texture_fuse, texture_gt)\n ssim = SSIM(texture_fuse, texture_gt).item()\n return noisy_gt, texture_nss, texture_fuse, texture_res, illumi, restor_loss, psnr, ssim\n\n\ndef train(opt):\n if torch.cuda.is_available():\n torch.cuda.manual_seed(42)\n else:\n torch.manual_seed(42)\n\n timestamp = mutils.get_formatted_time()\n opt.saved_path = opt.saved_path + f'/{opt.comment}/{timestamp}'\n opt.log_path = opt.log_path + f'/{opt.comment}/{timestamp}/tensorboard/'\n os.makedirs(opt.log_path, exist_ok=True)\n os.makedirs(opt.saved_path, exist_ok=True)\n\n training_params = {'batch_size': opt.batch_size,\n 'shuffle': True,\n 'drop_last': True,\n 'num_workers': opt.num_workers}\n\n val_params = {'batch_size': 1,\n 'shuffle': False,\n 'drop_last': True,\n 'num_workers': opt.num_workers}\n\n training_set = LowLightFDataset(os.path.join(opt.data_path, 'train'), image_split='images_aug')\n training_generator = DataLoader(training_set, **training_params)\n\n val_set = LowLightDataset(os.path.join(opt.data_path, 'eval'))\n val_generator = DataLoader(val_set, **val_params)\n\n model1 = getattr(models, opt.model1)\n model2 = getattr(models, opt.model2)\n model3 = getattr(models, opt.model3)\n writer = SingleSummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")}/')\n\n model = ModelNSNet(model1, model2, model3)\n print(model)\n\n if opt.num_gpus > 0:\n model = model.cuda()\n if opt.num_gpus > 1:\n model = nn.DataParallel(model)\n\n if opt.optim == 'adam':\n optimizer = torch.optim.Adam(model.model_fusenet.parameters(), opt.lr)\n else:\n optimizer = torch.optim.SGD(model.model_fusenet.parameters(), opt.lr, momentum=0.9, nesterov=True)\n\n scheduler = CosineLR(optimizer, opt.lr, opt.num_epochs)\n epoch = 0\n step = 0\n model.model_fusenet.train()\n\n num_iter_per_epoch = len(training_generator)\n\n try:\n for epoch in range(opt.num_epochs):\n last_epoch = step // num_iter_per_epoch\n if epoch < last_epoch:\n continue\n\n epoch_loss = []\n progress_bar = tqdm(training_generator)\n\n saver.base_url = os.path.join(opt.saved_path, 'results', '%03d' % epoch)\n if not opt.sampling:\n for iter, (data, target, name) in enumerate(progress_bar):\n if iter < step - last_epoch * num_iter_per_epoch:\n progress_bar.update()\n continue\n try:\n if opt.num_gpus == 1:\n data = data.cuda()\n target = target.cuda()\n\n optimizer.zero_grad()\n\n noisy_gt, texture_nss, texture_fuse, texture_res, \\\n illumi, restor_loss, psnr, ssim = model(data, target, training=True)\n\n loss = restor_loss\n loss.backward()\n optimizer.step()\n\n epoch_loss.append(float(loss))\n\n progress_bar.set_description(\n 'Step: {}. Epoch: {}/{}. Iteration: {}/{}. restor_loss: {:.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(\n step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, restor_loss.item(), psnr,\n ssim))\n writer.add_scalar('Loss/train', loss, step)\n writer.add_scalar('PSNR/train', psnr, step)\n writer.add_scalar('SSIM/train', ssim, step)\n\n # log learning_rate\n current_lr = optimizer.param_groups[0]['lr']\n writer.add_scalar('learning_rate', current_lr, step)\n\n step += 1\n\n except Exception as e:\n print('[Error]', traceback.format_exc())\n print(e)\n continue\n\n if not opt.no_sche:\n scheduler.step()\n\n if epoch % opt.val_interval == 0:\n model.model_fusenet.eval()\n loss_ls = []\n psnrs = []\n ssims = []\n\n for iter, (data, target, name) in enumerate(val_generator):\n with torch.no_grad():\n if opt.num_gpus == 1:\n data = data.cuda()\n target = target.cuda()\n\n noisy_gt, texture_nss, texture_fuse, texture_res, \\\n illumi, restor_loss, psnr, ssim = model(data, target, training=False)\n texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(target), 1, dim=1)\n\n saver.save_image(noisy_gt, name=os.path.splitext(name[0])[0] + '_in')\n saver.save_image(texture_nss.transpose(0, 1), name=os.path.splitext(name[0])[0] + '_ns')\n saver.save_image(texture_fuse, name=os.path.splitext(name[0])[0] + '_fuse')\n saver.save_image(texture_res, name=os.path.splitext(name[0])[0] + '_res')\n saver.save_image(illumi, name=os.path.splitext(name[0])[0] + '_ill')\n saver.save_image(target, name=os.path.splitext(name[0])[0] + '_gt')\n\n loss = restor_loss\n loss_ls.append(loss.item())\n psnrs.append(psnr)\n ssims.append(ssim)\n\n loss = np.mean(np.array(loss_ls))\n psnr = np.mean(np.array(psnrs))\n ssim = np.mean(np.array(ssims))\n\n print(\n 'Val. Epoch: {}/{}. Loss: {:1.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(\n epoch, opt.num_epochs, loss, psnr, ssim))\n writer.add_scalar('Loss/val', loss, step)\n writer.add_scalar('PSNR/val', psnr, step)\n writer.add_scalar('SSIM/val', ssim, step)\n\n save_checkpoint(model, f'{opt.model3}_{\"%03d\" % epoch}_{psnr}_{ssim}_{step}.pth')\n\n model.model_fusenet.train()\n if opt.sampling:\n exit(0)\n except KeyboardInterrupt:\n save_checkpoint(model, f'{opt.model3}_{epoch}_{step}_keyboardInterrupt.pth')\n writer.close()\n writer.close()\n\n\ndef save_checkpoint(model, name):\n if isinstance(model, nn.DataParallel):\n torch.save(model.module.model_fusenet.state_dict(), os.path.join(opt.saved_path, name))\n else:\n torch.save(model.model_fdnet.state_dict(), os.path.join(opt.saved_path, name))\n\n\nif __name__ == '__main__':\n opt = get_args()\n train(opt)\n"
] | [
[
"torch.clamp",
"torch.cuda.manual_seed",
"torch.load",
"torch.cat",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.exp",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.functional.interpolate",
"torch.clamp_min",
"torch.nn.DataParallel",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/agents-1 | [
"c62215debda5bf5d89723f4112f1e3e2f063cd52"
] | [
"tf_agents/bandits/policies/policy_utilities.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for bandit policies.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.utils import common\n\n\nclass InfoFields(object):\n \"\"\"Strings which can be used in the policy info fields.\"\"\"\n # Mean of predicted rewards (per arm).\n PREDICTED_REWARDS_MEAN = 'predicted_rewards_mean'\n # Samples of predicted rewards (per arm).\n PREDICTED_REWARDS_SAMPLED = 'predicted_rewards_sampled'\n # Type of bandit policy (see enumerations in `BanditPolicyType`).\n BANDIT_POLICY_TYPE = 'bandit_policy_type'\n # Used to store the chosen action for a per-arm model.\n CHOSEN_ARM_FEATURES = 'chosen_arm_features'\n\n\nPolicyInfo = collections.namedtuple( # pylint: disable=invalid-name\n 'PolicyInfo',\n (policy_step.CommonFields.LOG_PROBABILITY,\n InfoFields.PREDICTED_REWARDS_MEAN,\n InfoFields.PREDICTED_REWARDS_SAMPLED,\n InfoFields.BANDIT_POLICY_TYPE))\n# Set default empty tuple for all fields.\nPolicyInfo.__new__.__defaults__ = ((),) * len(PolicyInfo._fields)\n\n\nPerArmPolicyInfo = collections.namedtuple( # pylint: disable=invalid-name\n 'PerArmPolicyInfo',\n (policy_step.CommonFields.LOG_PROBABILITY,\n InfoFields.PREDICTED_REWARDS_MEAN,\n InfoFields.PREDICTED_REWARDS_SAMPLED,\n InfoFields.BANDIT_POLICY_TYPE,\n InfoFields.CHOSEN_ARM_FEATURES))\n# Set default empty tuple for all fields.\nPerArmPolicyInfo.__new__.__defaults__ = ((),) * len(PerArmPolicyInfo._fields)\n\n\nclass BanditPolicyType(object):\n \"\"\"Enumeration of bandit policy types.\"\"\"\n # No bandit policy type specified.\n UNKNOWN = 0\n # Greedy decision made by bandit agent.\n GREEDY = 1\n # Random decision for exploration made by epsilon-greedy agent sampled from\n # uniform distribution over actions.\n UNIFORM = 2\n\n\ndef create_bandit_policy_type_tensor_spec(shape):\n \"\"\"Create tensor spec for bandit policy type.\"\"\"\n return tensor_spec.BoundedTensorSpec(\n shape=shape, dtype=tf.int32,\n minimum=BanditPolicyType.UNKNOWN, maximum=BanditPolicyType.UNIFORM)\n\n\[email protected]\ndef masked_argmax(input_tensor, mask, output_type=tf.int32):\n \"\"\"Computes the argmax where the allowed elements are given by a mask.\n\n Args:\n input_tensor: Rank-2 Tensor of floats.\n mask: 0-1 valued Tensor of the same shape as input.\n output_type: Integer type of the output.\n\n Returns:\n A Tensor of rank 1 and type `output_type`, with the masked argmax of every\n row of `input_tensor`.\n \"\"\"\n input_tensor.shape.assert_is_compatible_with(mask.shape)\n neg_inf = tf.constant(-float('Inf'), input_tensor.dtype)\n tf.compat.v1.assert_equal(\n tf.reduce_max(mask, axis=1), tf.constant(1, dtype=mask.dtype))\n modified_input = tf.compat.v2.where(\n tf.cast(mask, tf.bool), input_tensor, neg_inf)\n return tf.argmax(modified_input, axis=-1, output_type=output_type)\n\n\ndef has_bandit_policy_type(info, check_for_tensor=False):\n \"\"\"Check if policy info has `bandit_policy_type` field/tensor.\"\"\"\n if info in ((), None):\n return False\n fields = getattr(info, '_fields', None)\n has_field = fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields\n if has_field and check_for_tensor:\n return isinstance(info.bandit_policy_type, tf.Tensor)\n else:\n return has_field\n\n\ndef set_bandit_policy_type(info, bandit_policy_type):\n \"\"\"Sets the InfoFields.BANDIT_POLICY_TYPE on info to bandit_policy_type.\n\n If policy `info` does not support InfoFields.BANDIT_POLICY_TYPE, this method\n returns `info` as-is (without any modification).\n\n Args:\n info: Policy info on which to set bandit policy type.\n bandit_policy_type: Tensor containing BanditPolicyType enums or TensorSpec\n from `create_bandit_policy_type_tensor_spec()`.\n\n Returns:\n Policy info with modified field (if possible).\n \"\"\"\n if info in ((), None):\n return PolicyInfo(bandit_policy_type=bandit_policy_type)\n fields = getattr(info, '_fields', None)\n if fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields:\n return info._replace(bandit_policy_type=bandit_policy_type)\n try:\n info[InfoFields.BANDIT_POLICY_TYPE] = bandit_policy_type\n except TypeError:\n pass\n return info\n\n\[email protected]\ndef bandit_policy_uniform_mask(values, mask):\n \"\"\"Set bandit policy type tensor to BanditPolicyType.UNIFORM based on mask.\n\n Set bandit policy type `values` to BanditPolicyType.UNIFORM; returns tensor\n where output[i] is BanditPolicyType.UNIFORM if mask[i] is True, otherwise it\n is left as values[i].\n\n Args:\n values: Tensor containing `BanditPolicyType` enumerations.\n mask: Tensor of the same shape as `values` with boolean flags indicating\n values to set to `BanditPolicyType.UNIFORM`.\n\n Returns:\n Tensor containing `BanditPolicyType` enumerations with masked values.\n \"\"\"\n tf.compat.v1.assert_equal(tf.shape(mask), tf.shape(values))\n return tf.where(\n mask, tf.fill(tf.shape(values), BanditPolicyType.UNIFORM), values)\n"
] | [
[
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.cast",
"tensorflow.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
hchyun6086/auto-editor | [
"beef008763bcaad00b83d5b506f436e6edc8963e"
] | [
"auto_editor/audiotsm2/base/analysis_synthesis.py"
] | [
"'''audiotsm2/base/analysis_synthesis.py'''\n\nimport numpy as np\n\nfrom auto_editor.audiotsm2.utils import (windows, CBuffer, NormalizeBuffer)\nfrom .tsm import TSM\n\nEPSILON = 0.0001\n\n\nclass AnalysisSynthesisTSM(TSM):\n def __init__(self, converter, channels, frame_length, analysis_hop, synthesis_hop,\n analysis_window, synthesis_window, delta_before=0, delta_after=0):\n self._converter = converter\n\n self._channels = channels\n self._frame_length = frame_length\n self._analysis_hop = analysis_hop\n self._synthesis_hop = synthesis_hop\n\n self._analysis_window = analysis_window\n self._synthesis_window = synthesis_window\n\n self._delta_before = delta_before\n self._delta_after = delta_after\n\n # When the analysis hop is larger than the frame length, some samples\n # from the input need to be skipped. self._skip_input_samples tracks\n # how many samples should be skipped before reading the analysis frame.\n self._skip_input_samples = 0\n\n # This attribute is used to start the output signal in the middle of a\n # frame, which should be the peek of the window function\n self._skip_output_samples = 0\n\n # Compute the normalize window\n self._normalize_window = windows.product(self._analysis_window,\n self._synthesis_window)\n\n if(self._normalize_window is None):\n self._normalize_window = np.ones(self._frame_length)\n\n # Initialize the buffers\n delta = self._delta_before + self._delta_after\n self._in_buffer = CBuffer(self._channels, self._frame_length + delta)\n self._analysis_frame = np.empty(\n (self._channels, self._frame_length + delta))\n self._out_buffer = CBuffer(self._channels, self._frame_length)\n self._normalize_buffer = NormalizeBuffer(self._frame_length)\n\n self.clear()\n\n def clear(self):\n # Clear the buffers\n self._in_buffer.remove(self._in_buffer.length)\n self._out_buffer.remove(self._out_buffer.length)\n self._out_buffer.right_pad(self._frame_length)\n self._normalize_buffer.remove(self._normalize_buffer.length)\n\n # Left pad the input with half a frame of zeros, and ignore that half\n # frame in the output. This makes the output signal start in the middle\n # of a frame, which should be the peak of the window function.\n self._in_buffer.write(np.zeros(\n (self._channels, self._delta_before + self._frame_length // 2)))\n self._skip_output_samples = self._frame_length // 2\n\n self._converter.clear()\n\n def flush_to(self, writer):\n if(self._in_buffer.remaining_length == 0):\n raise RuntimeError(\n \"There is still data to process in the input buffer, flush_to method \"\n \"should only be called when write_to returns True.\"\n )\n\n n = self._out_buffer.write_to(writer)\n if(self._out_buffer.ready == 0):\n # The output buffer is empty\n self.clear()\n return n, True\n\n return n, False\n\n def get_max_output_length(self, input_length):\n input_length -= self._skip_input_samples\n if(input_length <= 0):\n return 0\n\n n_frames = input_length // self._analysis_hop + 1\n return n_frames * self._synthesis_hop\n\n def _process_frame(self):\n \"\"\"Read an analysis frame from the input buffer, process it, and write\n the result to the output buffer.\"\"\"\n # Generate the analysis frame and discard the input samples that will\n # not be needed anymore\n self._in_buffer.peek(self._analysis_frame)\n self._in_buffer.remove(self._analysis_hop)\n\n # Apply the analysis window\n windows.apply(self._analysis_frame, self._analysis_window)\n\n # Convert the analysis frame into a synthesis frame\n synthesis_frame = self._converter.convert_frame(self._analysis_frame)\n\n # Apply the synthesis window\n windows.apply(synthesis_frame, self._synthesis_window)\n\n # Overlap and add the synthesis frame in the output buffer\n self._out_buffer.add(synthesis_frame)\n\n # The overlap and add step changes the volume of the signal. The\n # normalize_buffer is used to keep track of \"how much of the input\n # signal was added\" to each part of the output buffer, allowing to\n # normalize it.\n self._normalize_buffer.add(self._normalize_window)\n\n # Normalize the samples that are ready to be written to the output\n normalize = self._normalize_buffer.to_array(end=self._synthesis_hop)\n normalize[normalize < EPSILON] = 1\n self._out_buffer.divide(normalize)\n self._out_buffer.set_ready(self._synthesis_hop)\n self._normalize_buffer.remove(self._synthesis_hop)\n\n def read_from(self, reader):\n n = reader.skip(self._skip_input_samples)\n self._skip_input_samples -= n\n if(self._skip_input_samples > 0):\n return n\n\n n += self._in_buffer.read_from(reader)\n\n if(self._in_buffer.remaining_length == 0 and\n self._out_buffer.remaining_length >= self._synthesis_hop):\n # The input buffer has enough data to process, and there is enough\n # space in the output buffer to store the output\n self._process_frame()\n\n # Skip output samples if necessary\n skipped = self._out_buffer.remove(self._skip_output_samples)\n self._out_buffer.right_pad(skipped)\n self._skip_output_samples -= skipped\n\n # Set the number of input samples to be skipped\n self._skip_input_samples = self._analysis_hop - self._frame_length\n if self._skip_input_samples < 0:\n self._skip_input_samples = 0\n\n return n\n\n def set_speed(self, speed):\n self._analysis_hop = int(self._synthesis_hop * speed)\n self._converter.set_analysis_hop(self._analysis_hop)\n\n def write_to(self, writer):\n n = self._out_buffer.write_to(writer)\n self._out_buffer.right_pad(n)\n\n if(self._in_buffer.remaining_length > 0 and self._out_buffer.ready == 0):\n # There is not enough data to process in the input buffer, and the\n # output buffer is empty\n return n, True\n\n return n, False\n"
] | [
[
"numpy.zeros",
"numpy.empty",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mastratton3/great_expectations | [
"151970d776c942bfc23cdd90c7ed00b57a34559d"
] | [
"great_expectations/dataset/pandas_dataset.py"
] | [
"from __future__ import division\n\nimport inspect\nimport json\nimport re\nfrom datetime import datetime\nfrom functools import wraps\nimport jsonschema\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom dateutil.parser import parse\nfrom scipy import stats\nfrom six import PY3, integer_types, string_types\nfrom numbers import Number\n\nfrom .dataset import Dataset\nfrom great_expectations.data_asset.util import DocInherit, parse_result_format\nfrom great_expectations.dataset.util import \\\n is_valid_partition_object, is_valid_categorical_partition_object, is_valid_continuous_partition_object, \\\n _scipy_distribution_positional_args_from_dict, validate_distribution_parameters\n\n\nclass MetaPandasDataset(Dataset):\n \"\"\"MetaPandasDataset is a thin layer between Dataset and PandasDataset.\n\n This two-layer inheritance is required to make @classmethod decorators work.\n\n Practically speaking, that means that MetaPandasDataset implements \\\n expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \\\n and PandasDataset implements the expectation methods themselves.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MetaPandasDataset, self).__init__(*args, **kwargs)\n\n @classmethod\n def column_map_expectation(cls, func):\n \"\"\"Constructs an expectation using column-map semantics.\n\n\n The MetaPandasDataset implementation replaces the \"column\" parameter supplied by the user with a pandas Series\n object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation\n logic while preserving the standard Dataset signature and expected behavior.\n\n See :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>` \\\n for full documentation of this function.\n \"\"\"\n if PY3:\n argspec = inspect.getfullargspec(func)[0][1:]\n else:\n argspec = inspect.getargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(self, column, mostly=None, result_format=None, *args, **kwargs):\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n result_format = parse_result_format(result_format)\n\n # FIXME temporary fix for missing/ignored value\n ignore_values = [None, np.nan]\n if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:\n ignore_values = []\n # Counting the number of unexpected values can be expensive when there is a large\n # number of np.nan values.\n # This only happens on expect_column_values_to_not_be_null expectations.\n # Since there is no reason to look for most common unexpected values in this case,\n # we will instruct the result formatting method to skip this step.\n result_format['partial_unexpected_count'] = 0 \n\n series = self[column]\n\n # FIXME rename to mapped_ignore_values?\n if len(ignore_values) == 0:\n boolean_mapped_null_values = np.array(\n [False for value in series])\n else:\n boolean_mapped_null_values = np.array([True if (value in ignore_values) or (pd.isnull(value)) else False\n for value in series])\n\n element_count = int(len(series))\n\n # FIXME rename nonnull to non_ignored?\n nonnull_values = series[boolean_mapped_null_values == False]\n nonnull_count = int((boolean_mapped_null_values == False).sum())\n\n boolean_mapped_success_values = func(\n self, nonnull_values, *args, **kwargs)\n success_count = np.count_nonzero(boolean_mapped_success_values)\n\n unexpected_list = list(\n nonnull_values[boolean_mapped_success_values == False])\n unexpected_index_list = list(\n nonnull_values[boolean_mapped_success_values == False].index)\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly)\n\n return_obj = self._format_map_output(\n result_format, success,\n element_count, nonnull_count,\n len(unexpected_list),\n unexpected_list, unexpected_index_list\n )\n\n # FIXME Temp fix for result format\n if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:\n del return_obj['result']['unexpected_percent_nonmissing']\n try:\n del return_obj['result']['partial_unexpected_counts']\n del return_obj['result']['partial_unexpected_list']\n except KeyError:\n pass\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n\n return inner_wrapper\n\n @classmethod\n def column_pair_map_expectation(cls, func):\n \"\"\"\n The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating\n truthiness of some condition on a per row basis across a pair of columns.\n \"\"\"\n if PY3:\n argspec = inspect.getfullargspec(func)[0][1:]\n else:\n argspec = inspect.getargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(self, column_A, column_B, mostly=None, ignore_row_if=\"both_values_are_missing\", result_format=None, *args, **kwargs):\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n series_A = self[column_A]\n series_B = self[column_B]\n\n if ignore_row_if == \"both_values_are_missing\":\n boolean_mapped_null_values = series_A.isnull() & series_B.isnull()\n elif ignore_row_if == \"either_value_is_missing\":\n boolean_mapped_null_values = series_A.isnull() | series_B.isnull()\n elif ignore_row_if == \"never\":\n boolean_mapped_null_values = series_A.map(lambda x: False)\n else:\n raise ValueError(\n \"Unknown value of ignore_row_if: %s\", (ignore_row_if,))\n\n assert len(series_A) == len(\n series_B), \"Series A and B must be the same length\"\n\n # This next bit only works if series_A and _B are the same length\n element_count = int(len(series_A))\n nonnull_count = (boolean_mapped_null_values == False).sum()\n\n nonnull_values_A = series_A[boolean_mapped_null_values == False]\n nonnull_values_B = series_B[boolean_mapped_null_values == False]\n nonnull_values = [value_pair for value_pair in zip(\n list(nonnull_values_A),\n list(nonnull_values_B)\n )]\n\n boolean_mapped_success_values = func(\n self, nonnull_values_A, nonnull_values_B, *args, **kwargs)\n success_count = boolean_mapped_success_values.sum()\n\n unexpected_list = [value_pair for value_pair in zip(\n list(series_A[(boolean_mapped_success_values == False) & (\n boolean_mapped_null_values == False)]),\n list(series_B[(boolean_mapped_success_values == False) & (\n boolean_mapped_null_values == False)])\n )]\n unexpected_index_list = list(series_A[(boolean_mapped_success_values == False) & (\n boolean_mapped_null_values == False)].index)\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly)\n\n return_obj = self._format_map_output(\n result_format, success,\n element_count, nonnull_count,\n len(unexpected_list),\n unexpected_list, unexpected_index_list\n )\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n return inner_wrapper\n\n @classmethod\n def multicolumn_map_expectation(cls, func):\n \"\"\"\n The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of\n evaluating truthiness of some condition on a per row basis across a set of columns.\n \"\"\"\n if PY3:\n argspec = inspect.getfullargspec(func)[0][1:]\n else:\n argspec = inspect.getargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(self, column_list, mostly=None, ignore_row_if=\"all_values_are_missing\",\n result_format=None, *args, **kwargs):\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n test_df = self[column_list]\n\n if ignore_row_if == \"all_values_are_missing\":\n boolean_mapped_skip_values = test_df.isnull().all(axis=1)\n elif ignore_row_if == \"any_value_is_missing\":\n boolean_mapped_skip_values = test_df.isnull().any(axis=1)\n elif ignore_row_if == \"never\":\n boolean_mapped_skip_values = pd.Series([False] * len(test_df))\n else:\n raise ValueError(\n \"Unknown value of ignore_row_if: %s\", (ignore_row_if,))\n\n boolean_mapped_success_values = func(\n self, test_df[boolean_mapped_skip_values == False], *args, **kwargs)\n success_count = boolean_mapped_success_values.sum()\n nonnull_count = (~boolean_mapped_skip_values).sum()\n element_count = len(test_df)\n\n unexpected_list = test_df[(boolean_mapped_skip_values == False) & (boolean_mapped_success_values == False)]\n unexpected_index_list = list(unexpected_list.index)\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly)\n\n return_obj = self._format_map_output(\n result_format, success,\n element_count, nonnull_count,\n len(unexpected_list),\n unexpected_list.to_dict(orient='records'), unexpected_index_list\n )\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n return inner_wrapper\n\n\nclass PandasDataset(MetaPandasDataset, pd.DataFrame):\n \"\"\"\n PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.\n\n For the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`\n\n Notes:\n 1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \\\n data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \\\n property on the original data frame.\n 2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection\n is performed by default).\n \"\"\"\n\n # this is necessary to subclass pandas in a proper way.\n # NOTE: specifying added properties in this way means that they will NOT be carried over when\n # the dataframe is manipulated, which we might want. To specify properties that are carried over\n # to manipulation results, we would just use `_metadata = ['row_count', ...]` here. The most likely\n # case is that we want the former, but also want to re-initialize these values to None so we don't\n # get an attribute error when trying to access them (I think this could be done in __finalize__?)\n _internal_names = pd.DataFrame._internal_names + [\n 'caching',\n ]\n _internal_names_set = set(_internal_names)\n\n # We may want to expand or alter support for subclassing dataframes in the future:\n # See http://pandas.pydata.org/pandas-docs/stable/extending.html#extending-subclassing-pandas\n\n @property\n def _constructor(self):\n return self.__class__\n\n def __finalize__(self, other, method=None, **kwargs):\n if isinstance(other, PandasDataset):\n self._initialize_expectations(other.get_expectations_config(\n discard_failed_expectations=False,\n discard_result_format_kwargs=False,\n discard_include_configs_kwargs=False,\n discard_catch_exceptions_kwargs=False))\n # If other was coerced to be a PandasDataset (e.g. via _constructor call during self.copy() operation)\n # then it may not have discard_subset_failing_expectations set. Default to self value\n self.discard_subset_failing_expectations = getattr(other, \"discard_subset_failing_expectations\",\n self.discard_subset_failing_expectations)\n if self.discard_subset_failing_expectations:\n self.discard_failing_expectations()\n super(PandasDataset, self).__finalize__(other, method, **kwargs)\n return self\n\n def __init__(self, *args, **kwargs):\n super(PandasDataset, self).__init__(*args, **kwargs)\n self.discard_subset_failing_expectations = kwargs.get(\n 'discard_subset_failing_expectations', False)\n\n def get_row_count(self):\n return self.shape[0]\n\n def get_table_columns(self):\n return list(self.columns)\n\n def get_column_sum(self, column):\n return self[column].sum()\n\n def get_column_max(self, column, parse_strings_as_datetimes=False):\n temp_column = self[column].dropna()\n if parse_strings_as_datetimes:\n temp_column = temp_column.map(parse)\n return temp_column.max()\n\n def get_column_min(self, column, parse_strings_as_datetimes=False):\n temp_column = self[column].dropna()\n if parse_strings_as_datetimes:\n temp_column = temp_column.map(parse)\n return temp_column.min()\n\n def get_column_mean(self, column):\n return self[column].mean()\n\n def get_column_nonnull_count(self, column):\n series = self[column]\n null_indexes = series.isnull()\n nonnull_values = series[null_indexes == False]\n return len(nonnull_values)\n\n def get_column_value_counts(self, column):\n return self[column].value_counts()\n\n def get_column_unique_count(self, column):\n return self.get_column_value_counts(column).shape[0]\n\n def get_column_modes(self, column):\n return list(self[column].mode().values)\n\n def get_column_median(self, column):\n return self[column].median()\n\n def get_column_stdev(self, column):\n return self[column].std()\n\n def get_column_hist(self, column, bins):\n hist, bin_edges = np.histogram(self[column], bins, density=False)\n return list(hist)\n\n def get_column_count_in_range(self, column, min_val=None, max_val=None, min_strictly=False, max_strictly=True):\n # TODO this logic could probably go in the non-underscore version if we want to cache\n if min_val is None and max_val is None:\n raise ValueError('Must specify either min or max value')\n if min_val is not None and max_val is not None and min_val > max_val:\n raise ValueError('Min value must be <= to max value')\n\n result = self[column]\n if min_val is not None:\n if min_strictly:\n result = result[result > min_val]\n else:\n result = result[result >= min_val]\n if max_val is not None:\n if max_strictly:\n result = result[result < max_val]\n else:\n result = result[result <= max_val]\n return len(result)\n\n\n ### Expectation methods ###\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_unique(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n return ~column.duplicated(keep=False)\n\n # @Dataset.expectation(['column', 'mostly', 'result_format'])\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_be_null(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None, include_nulls=True):\n\n return ~column.isnull()\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_null(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n return column.isnull()\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_of_type(self, column, type_,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n # Target Datasource {numpy, python} was removed in favor of a simpler type mapping\n type_map = {\n \"null\": [type(None), np.nan],\n \"boolean\": [bool, np.bool_],\n \"int\": [int, np.int64] + list(integer_types),\n \"long\": [int, np.longdouble] + list(integer_types),\n \"float\": [float, np.float_],\n \"double\": [float, np.longdouble],\n \"bytes\": [bytes, np.bytes_],\n \"string\": [string_types, np.string_]\n }\n\n target_type = type_map[type_]\n\n return column.map(lambda x: isinstance(x, tuple(target_type)))\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_in_type_list(self, column, type_list,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n # Target Datasource {numpy, python} was removed in favor of a simpler type mapping\n type_map = {\n \"null\": [type(None), np.nan],\n \"boolean\": [bool, np.bool_],\n \"int\": [int, np.int64] + list(integer_types),\n \"long\": [int, np.longdouble] + list(integer_types),\n \"float\": [float, np.float_],\n \"double\": [float, np.longdouble],\n \"bytes\": [bytes, np.bytes_],\n \"string\": [string_types, np.string_]\n }\n\n # Build one type list with each specified type list from type_map\n target_type_list = list()\n for type_ in type_list:\n target_type_list += type_map[type_]\n\n return column.map(lambda x: isinstance(x, tuple(target_type_list)))\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_in_set(self, column, value_set,\n mostly=None,\n parse_strings_as_datetimes=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n parsed_value_set = self._parse_value_set(value_set)\n else:\n parsed_value_set = value_set\n\n return column.isin(parsed_value_set)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_be_in_set(self, column, value_set,\n mostly=None,\n parse_strings_as_datetimes=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n parsed_value_set = self._parse_value_set(value_set)\n else:\n parsed_value_set = value_set\n\n return ~column.isin(parsed_value_set)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_between(self,\n column,\n min_value=None, max_value=None,\n parse_strings_as_datetimes=None,\n output_strftime_format=None,\n allow_cross_type_comparisons=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n if min_value is None and max_value is None:\n raise ValueError(\"min_value and max_value cannot both be None\")\n\n if parse_strings_as_datetimes:\n if min_value:\n min_value = parse(min_value)\n\n if max_value:\n max_value = parse(max_value)\n\n temp_column = column.map(parse)\n\n else:\n temp_column = column\n\n if min_value is not None and max_value is not None and min_value > max_value:\n raise ValueError(\"min_value cannot be greater than max_value\")\n\n def is_between(val):\n # TODO Might be worth explicitly defining comparisons between types (for example, between strings and ints).\n # Ensure types can be compared since some types in Python 3 cannot be logically compared.\n # print type(val), type(min_value), type(max_value), val, min_value, max_value\n\n if type(val) == None:\n return False\n else:\n if min_value is not None and max_value is not None:\n if allow_cross_type_comparisons:\n try:\n return (min_value <= val) and (val <= max_value)\n except TypeError:\n return False\n\n else:\n if (isinstance(val, string_types) != isinstance(min_value, string_types)) or (isinstance(val, string_types) != isinstance(max_value, string_types)):\n raise TypeError(\n \"Column values, min_value, and max_value must either be None or of the same type.\")\n\n return (min_value <= val) and (val <= max_value)\n\n elif min_value is None and max_value is not None:\n if allow_cross_type_comparisons:\n try:\n return val <= max_value\n except TypeError:\n return False\n\n else:\n if isinstance(val, string_types) != isinstance(max_value, string_types):\n raise TypeError(\n \"Column values, min_value, and max_value must either be None or of the same type.\")\n\n return val <= max_value\n\n elif min_value is not None and max_value is None:\n if allow_cross_type_comparisons:\n try:\n return min_value <= val\n except TypeError:\n return False\n\n else:\n if isinstance(val, string_types) != isinstance(min_value, string_types):\n raise TypeError(\n \"Column values, min_value, and max_value must either be None or of the same type.\")\n\n return min_value <= val\n\n else:\n return False\n\n return temp_column.map(is_between)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_increasing(self, column, strictly=None, parse_strings_as_datetimes=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n temp_column = column.map(parse)\n\n col_diff = temp_column.diff()\n\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[0] = pd.Timedelta(1)\n\n if strictly:\n return col_diff > pd.Timedelta(0)\n else:\n return col_diff >= pd.Timedelta(0)\n\n else:\n col_diff = column.diff()\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[col_diff.isnull()] = 1\n\n if strictly:\n return col_diff > 0\n else:\n return col_diff >= 0\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_decreasing(self, column, strictly=None, parse_strings_as_datetimes=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n if parse_strings_as_datetimes:\n temp_column = column.map(parse)\n\n col_diff = temp_column.diff()\n\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[0] = pd.Timedelta(-1)\n\n if strictly:\n return col_diff < pd.Timedelta(0)\n else:\n return col_diff <= pd.Timedelta(0)\n\n else:\n col_diff = column.diff()\n # The first element is null, so it gets a bye and is always treated as True\n col_diff[col_diff.isnull()] = -1\n\n if strictly:\n return col_diff < 0\n else:\n return col_diff <= 0\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_value_lengths_to_be_between(self, column, min_value=None, max_value=None,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n if min_value is None and max_value is None:\n raise ValueError(\"min_value and max_value cannot both be None\")\n\n # Assert that min_value and max_value are integers\n try:\n if min_value is not None and not float(min_value).is_integer():\n raise ValueError(\"min_value and max_value must be integers\")\n\n if max_value is not None and not float(max_value).is_integer():\n raise ValueError(\"min_value and max_value must be integers\")\n\n except ValueError:\n raise ValueError(\"min_value and max_value must be integers\")\n\n column_lengths = column.astype(str).str.len()\n\n if min_value is not None and max_value is not None:\n return column_lengths.between(min_value, max_value)\n\n elif min_value is None and max_value is not None:\n return column_lengths <= max_value\n\n elif min_value is not None and max_value is None:\n return column_lengths >= min_value\n\n else:\n return False\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_value_lengths_to_equal(self, column, value,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n return column.str.len() == value\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_regex(self, column, regex,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n return column.astype(str).str.contains(regex)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_match_regex(self, column, regex,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n return ~column.astype(str).str.contains(regex)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_regex_list(self, column, regex_list, match_on=\"any\",\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n\n regex_matches = []\n for regex in regex_list:\n regex_matches.append(column.astype(str).str.contains(regex))\n regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)\n\n if match_on == \"any\":\n return regex_match_df.any(axis='columns')\n elif match_on == \"all\":\n return regex_match_df.all(axis='columns')\n else:\n raise ValueError(\"match_on must be either 'any' or 'all'\")\n\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_not_match_regex_list(self, column, regex_list,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n regex_matches = []\n for regex in regex_list:\n regex_matches.append(column.astype(str).str.contains(regex))\n regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)\n\n return ~regex_match_df.any(axis='columns')\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_strftime_format(self, column, strftime_format,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None,\n meta=None):\n # Below is a simple validation that the provided format can both format and parse a datetime object.\n # %D is an example of a format that can format but not parse, e.g.\n try:\n datetime.strptime(datetime.strftime(\n datetime.now(), strftime_format), strftime_format)\n except ValueError as e:\n raise ValueError(\n \"Unable to use provided strftime_format. \" + e.message)\n\n def is_parseable_by_format(val):\n try:\n datetime.strptime(val, strftime_format)\n return True\n except TypeError as e:\n raise TypeError(\"Values passed to expect_column_values_to_match_strftime_format must be of type string.\\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.\")\n\n except ValueError as e:\n return False\n\n return column.map(is_parseable_by_format)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_dateutil_parseable(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n def is_parseable(val):\n try:\n if type(val) != str:\n raise TypeError(\n \"Values passed to expect_column_values_to_be_dateutil_parseable must be of type string.\\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.\")\n\n parse(val)\n return True\n\n except (ValueError, OverflowError):\n return False\n\n return column.map(is_parseable)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_be_json_parseable(self, column,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n def is_json(val):\n try:\n json.loads(val)\n return True\n except:\n return False\n\n return column.map(is_json)\n\n @DocInherit\n @MetaPandasDataset.column_map_expectation\n def expect_column_values_to_match_json_schema(self, column, json_schema,\n mostly=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n def matches_json_schema(val):\n try:\n val_json = json.loads(val)\n jsonschema.validate(val_json, json_schema)\n # jsonschema.validate raises an error if validation fails.\n # So if we make it this far, we know that the validation succeeded.\n return True\n except jsonschema.ValidationError:\n return False\n except jsonschema.SchemaError:\n raise\n except:\n raise\n\n return column.map(matches_json_schema)\n\n @DocInherit\n @MetaPandasDataset.column_aggregate_expectation\n def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(self, column, distribution,\n p_value=0.05, params=None,\n result_format=None,\n include_config=False,\n catch_exceptions=None, meta=None):\n column = self[column]\n\n if p_value <= 0 or p_value >= 1:\n raise ValueError(\"p_value must be between 0 and 1 exclusive\")\n\n # Validate params\n try:\n validate_distribution_parameters(\n distribution=distribution, params=params)\n except ValueError as e:\n raise e\n\n # Format arguments for scipy.kstest\n if (isinstance(params, dict)):\n positional_parameters = _scipy_distribution_positional_args_from_dict(\n distribution, params)\n else:\n positional_parameters = params\n\n # K-S Test\n ks_result = stats.kstest(column, distribution,\n args=positional_parameters)\n\n return {\n \"success\": ks_result[1] >= p_value,\n \"result\": {\n \"observed_value\": ks_result[1],\n \"details\": {\n \"expected_params\": positional_parameters,\n \"observed_ks_result\": ks_result\n }\n }\n }\n\n @DocInherit\n @MetaPandasDataset.column_aggregate_expectation\n def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(self, column, partition_object=None, p=0.05, bootstrap_samples=None, bootstrap_sample_size=None,\n result_format=None, include_config=False, catch_exceptions=None, meta=None):\n column = self[column]\n\n if not is_valid_continuous_partition_object(partition_object):\n raise ValueError(\"Invalid continuous partition object.\")\n\n # TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object\n if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):\n raise ValueError(\"Partition endpoints must be finite.\")\n\n if \"tail_weights\" in partition_object and np.sum(partition_object[\"tail_weights\"]) > 0:\n raise ValueError(\"Partition cannot have tail weights -- endpoints must be finite.\")\n\n test_cdf = np.append(np.array([0]), np.cumsum(\n partition_object['weights']))\n\n def estimated_cdf(x):\n return np.interp(x, partition_object['bins'], test_cdf)\n\n if bootstrap_samples is None:\n bootstrap_samples = 1000\n\n if bootstrap_sample_size is None:\n # Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've\n # compressed via a partition.\n\n # Sampling too few elements will make the test insensitive to significant differences, especially\n # for nonoverlapping ranges.\n bootstrap_sample_size = len(partition_object['weights']) * 2\n\n results = [stats.kstest(\n np.random.choice(column, size=bootstrap_sample_size, replace=True),\n estimated_cdf)[1]\n for k in range(bootstrap_samples)]\n\n test_result = (1 + sum(x >= p for x in results)) / \\\n (bootstrap_samples + 1)\n\n hist, bin_edges = np.histogram(column, partition_object['bins'])\n below_partition = len(\n np.where(column < partition_object['bins'][0])[0])\n above_partition = len(\n np.where(column > partition_object['bins'][-1])[0])\n\n # Expand observed partition to report, if necessary\n if below_partition > 0 and above_partition > 0:\n observed_bins = [np.min(column)] + \\\n partition_object['bins'] + [np.max(column)]\n observed_weights = np.concatenate(\n ([below_partition], hist, [above_partition])) / len(column)\n elif below_partition > 0:\n observed_bins = [np.min(column)] + partition_object['bins']\n observed_weights = np.concatenate(\n ([below_partition], hist)) / len(column)\n elif above_partition > 0:\n observed_bins = partition_object['bins'] + [np.max(column)]\n observed_weights = np.concatenate(\n (hist, [above_partition])) / len(column)\n else:\n observed_bins = partition_object['bins']\n observed_weights = hist / len(column)\n\n observed_cdf_values = np.cumsum(observed_weights)\n\n return_obj = {\n \"success\": test_result > p,\n \"result\": {\n \"observed_value\": test_result,\n \"details\": {\n \"bootstrap_samples\": bootstrap_samples,\n \"bootstrap_sample_size\": bootstrap_sample_size,\n \"observed_partition\": {\n \"bins\": observed_bins,\n \"weights\": observed_weights.tolist()\n },\n \"expected_partition\": {\n \"bins\": partition_object['bins'],\n \"weights\": partition_object['weights']\n },\n \"observed_cdf\": {\n \"x\": observed_bins,\n \"cdf_values\": [0] + observed_cdf_values.tolist()\n },\n \"expected_cdf\": {\n \"x\": partition_object['bins'],\n \"cdf_values\": test_cdf.tolist()\n }\n }\n }\n }\n\n return return_obj\n\n\n @DocInherit\n @MetaPandasDataset.column_pair_map_expectation\n def expect_column_pair_values_to_be_equal(self,\n column_A,\n column_B,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n return column_A == column_B\n\n @DocInherit\n @MetaPandasDataset.column_pair_map_expectation\n def expect_column_pair_values_A_to_be_greater_than_B(self,\n column_A,\n column_B,\n or_equal=None,\n parse_strings_as_datetimes=None,\n allow_cross_type_comparisons=None,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n # FIXME\n if allow_cross_type_comparisons == True:\n raise NotImplementedError\n\n if parse_strings_as_datetimes:\n temp_column_A = column_A.map(parse)\n temp_column_B = column_B.map(parse)\n\n else:\n temp_column_A = column_A\n temp_column_B = column_B\n\n if or_equal == True:\n return temp_column_A >= temp_column_B\n else:\n return temp_column_A > temp_column_B\n\n @DocInherit\n @MetaPandasDataset.column_pair_map_expectation\n def expect_column_pair_values_to_be_in_set(self,\n column_A,\n column_B,\n value_pairs_set,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n temp_df = pd.DataFrame({\"A\": column_A, \"B\": column_B})\n value_pairs_set = {(x, y) for x, y in value_pairs_set}\n\n results = []\n for i, t in temp_df.iterrows():\n if pd.isnull(t[\"A\"]):\n a = None\n else:\n a = t[\"A\"]\n\n if pd.isnull(t[\"B\"]):\n b = None\n else:\n b = t[\"B\"]\n\n results.append((a, b) in value_pairs_set)\n\n return pd.Series(results, temp_df.index)\n\n @DocInherit\n @MetaPandasDataset.multicolumn_map_expectation\n def expect_multicolumn_values_to_be_unique(self,\n column_list,\n ignore_row_if=\"all_values_are_missing\",\n result_format=None, include_config=False, catch_exceptions=None, meta=None\n ):\n threshold = len(column_list.columns)\n # Do not dropna here, since we have separately dealt with na in decorator\n return column_list.nunique(dropna=False, axis=1) >= threshold\n"
] | [
[
"scipy.stats.kstest",
"pandas.concat",
"pandas.Series",
"pandas.isnull",
"numpy.random.choice",
"numpy.min",
"numpy.cumsum",
"pandas.DataFrame",
"pandas.Timedelta",
"numpy.concatenate",
"numpy.max",
"numpy.where",
"numpy.interp",
"numpy.count_nonzero",
"numpy.array",
"numpy.histogram",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
victimsnino/ReactivePlusPlus | [
"bb187cc52936bce7c1ef4899d7dbb9c970cef291"
] | [
"ci/create_graphs_for_benchmark_data.py"
] | [
"import plotly.offline as pyo\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport pandas as pd\nimport plotly.graph_objects as go\n\ndef rindex(lst, value):\n return len(lst) - lst[::-1].index(value) - 1\n \ndashboard = open(\"./gh-pages/benchmark.html\", 'w')\ndashboard.write(\"<html><head></head><body>\" + \"\\n\")\ndashboard.write(\"<p> TIP: Each graph can be zoomed in via selection of interested region! Double-click to return to original zoom mode </p>\")\nadd_js = True\n\ndef dump_plot(fig, name):\n global add_js\n global dashboard\n\n dashboard.write(f\"<details> <summary><b>{name}</b></summary>\")\n dashboard.write(pyo.plot(fig, include_plotlyjs=add_js, output_type='div'))\n dashboard.write(\"</details><br>\")\n\n add_js = False\n\n\nresults = pd.read_csv(\"./gh-pages/results.csv\", index_col=\"id\")\nall_commits = list(results[\"commit\"].unique())\ntake_last=20\n# duplicate last row to fix issue with splines\nresults = pd.concat([results, results[results['commit'] == results[\"commit\"].unique()[-1]]]).reset_index(drop=True)\n\ncolormap = px.colors.qualitative.Plotly\nfor platform, data in results.groupby(\"platform\", sort=False, as_index=False):\n dashboard.write(f\"<h2>{platform} </h2>\")\n for name, bench_data in data.groupby(\"benchmark_name\", sort=False, as_index=False):\n fig = go.Figure()\n for i, (test_case, test_cases_data) in enumerate(bench_data.groupby(\"test_case\", sort=False, as_index=False)):\n for source, source_data in test_cases_data.groupby(\"source\", sort=False, as_index=False):\n commit_indexes=[all_commits.index(c) for c in source_data[\"commit\"]]\n fig.add_trace(go.Scatter(x=commit_indexes,\n y=source_data[\"value\"],\n line_shape='spline',\n mode='lines+markers',\n marker_color=colormap[i],\n line_color=colormap[i],\n line_dash='solid' if source == 'rpp' else 'dot',\n name=f'{test_case}, {source}'))\n if source == 'rpp':\n fig.add_trace(go.Scatter(\n x=commit_indexes + commit_indexes[::-1],\n y=pd.concat([source_data['lowerBound'],\n source_data['upperBound'][::-1]]),\n fill='toself',\n fillcolor=colormap[i],\n line_color=colormap[i],\n name=f'{test_case}, {source}',\n showlegend=False,\n mode=\"lines\",\n opacity=0.3,\n line_shape='spline',\n hoverinfo='skip'\n ))\n\n min_val = bench_data.groupby(\"commit\", sort=False)[\"value\"].agg([\"min\"])[-take_last:].min().values[0]\n max_val = bench_data.groupby(\"commit\", sort=False)[\"value\"].agg([\"max\"])[-take_last:].max().values[0]\n diff = (max_val - min_val) * 0.05\n min_val -= diff\n max_val += diff\n fig.update_layout(\n hovermode=\"x unified\",\n title_x=0.5,\n title=name,\n xaxis_title=\"Commit\",\n yaxis_title=\"ns/iter\",\n legend_title=\"Legend Title\",\n xaxis=dict(\n tickmode='array',\n tickvals=list(range(0, len(all_commits))),\n ticktext=all_commits,\n tickangle=-35,\n rangeslider=dict(visible=True)\n ),\n yaxis=dict(\n # autorange=True,\n fixedrange=False\n ))\n\n fig['layout']['xaxis'].update(range=[len(all_commits)-take_last, len(all_commits)])\n fig['layout']['yaxis'].update(range=[min_val, max_val])\n \n dump_plot(fig, name)\n\n\ndashboard.write(\"</body></html>\" + \"\\n\")\ndashboard.close()\n"
] | [
[
"pandas.concat",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
broadinstitute/tissue_purifier | [
"989ce9d58bba99a3f1c49743eed22dcc64e5f159",
"989ce9d58bba99a3f1c49743eed22dcc64e5f159",
"989ce9d58bba99a3f1c49743eed22dcc64e5f159"
] | [
"src/tissue_purifier/utils/nms_util.py",
"src/tissue_purifier/data/dataset.py",
"src/tissue_purifier/models/_optim_scheduler.py"
] | [
"import torch\nimport numpy\nfrom typing import Union, List, Any\n\n\nclass NonMaxSuppression:\n \"\"\"\n Given a set of bounding box defined over possibly different tissue\n Use Intersection_over_Minimum criteria to filter out overlapping proposals.\n \"\"\"\n\n @staticmethod\n @torch.no_grad()\n def compute_nm_mask(score: Union[torch.Tensor, numpy.ndarray],\n ids: Union[torch.Tensor, numpy.ndarray, List[Any]],\n patches_xywh: Union[torch.Tensor, numpy.ndarray],\n iom_threshold: float) -> (torch.Tensor, torch.Tensor):\n \"\"\"\n Filter the proposals according to their score and their Intersection over Minimum.\n\n Args:\n score: score used to sort the proposals of shape (N)\n ids: vector or list of shape (N) with the (tissue) id.\n IoMIN is always zero between patches with different (tissue) ids.\n patches_xywh: coordinates with the proposals of shape (N, 4) where 4 stand for x,y,w,h.\n iom_threshold: threshold of Intersection over Minimum. If IoM is larger than this value the proposals\n will be suppressed during NMS. Only the proposal with larger score will survive.\n\n Returns:\n (nms_mask_n, iomin_nn) where nms_mask_n is a boolean tensor of shape (N) with True\n if the proposal survived NMS and iomin_nn with the value of the IoMIN among all possible pairs.\n \"\"\"\n\n def _to_numpy(_x):\n if isinstance(_x, torch.Tensor):\n return _x.detach().cpu().numpy()\n elif isinstance(_x, numpy.ndarray):\n return _x\n elif isinstance(_x, list):\n return numpy.array(_x)\n\n def _to_torch(_x):\n if isinstance(_x, torch.Tensor):\n return _x\n elif isinstance(_x, numpy.ndarray):\n return torch.from_numpy(_x)\n else:\n raise Exception(\"Expected a torch.tensor or a numpy.ndarray. Received {0}\".format(type(_x)))\n\n # the tissue ids can be a list of string. Therefore I can not convert to torch tensor directly.\n ids_numpy = _to_numpy(ids)\n assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4\n assert score.shape == ids_numpy.shape == patches_xywh[:, 0].shape\n\n # this is O(N^2) algorithm (all boxes compared to all other boxes) but it is very simple\n x, y, w, h = _to_torch(patches_xywh).unbind(dim=-1)\n overlap_measure_tmp_nn = NonMaxSuppression._compute_iomin(x=x, y=y, w=w, h=h)\n\n mask_same_id_nn_numpy = (ids_numpy == ids_numpy[:, None])\n mask_same_id_nn = _to_torch(mask_same_id_nn_numpy).to(device=overlap_measure_tmp_nn.device)\n overlap_measure_nn = overlap_measure_tmp_nn * mask_same_id_nn # if ids are different IoMIN = 0\n\n binarized_overlap_nn = (overlap_measure_nn > iom_threshold).float()\n nms_mask_n = NonMaxSuppression.perform_nms_selection(mask_overlap_nn=binarized_overlap_nn,\n score_n=score,\n possible_n=torch.ones_like(score).bool())\n return nms_mask_n, overlap_measure_nn\n\n @staticmethod\n def perform_nms_selection(mask_overlap_nn: torch.Tensor,\n score_n: torch.Tensor,\n possible_n: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given a set of n proposals and the (n x n) binarized mask which describes if two proposals are\n mutually exclusive it performs the greedy NMS in parallel (if possible).\n\n Args:\n mask_overlap_nn: Binarized overlap matrix with 1 if IoMIN > threshold and 0 otherwise, i.e 1 means that\n two proposals are incompatible, 0 means that they are compatible.\n score_n: Score of the proposal. Higher score proposal have precedence.\n possible_n: Vector with 1 if the proposal can be chosen and 0 otherwise.\n\n Note:\n The algorithm terminates when there are no more suitable proposals\n (because they have all been suppressed by higher scoring ones).\n\n Returns:\n mask_nms_n: A tensor with the same shape as :attr:'score_n'. The entries are 1 if that proposal\n has been selected (i.e. survived NMS) and 0 otherwise.\n \"\"\"\n # reshape\n score_1n = score_n.unsqueeze(-2)\n possible_1n = possible_n.unsqueeze(-2)\n idx_n1 = torch.arange(start=0, end=score_n.shape[-1], step=1, device=score_n.device).view(-1, 1).long()\n selected_n1 = torch.zeros_like(score_n).unsqueeze(dim=-1)\n\n # Greedy algorithm in a loop\n n_iter = 0\n while possible_1n.sum() > 0:\n n_iter += 1\n score_mask_nn = mask_overlap_nn * (score_1n * possible_1n)\n index_n1 = torch.max(score_mask_nn, keepdim=True, dim=-1)[1]\n selected_n1 += possible_1n.transpose(dim0=-1, dim1=-2) * (idx_n1 == index_n1)\n blocks_1n = torch.sum(mask_overlap_nn * selected_n1, keepdim=True, dim=-2)\n possible_1n *= (blocks_1n == 0)\n mask_selected_n = selected_n1.squeeze(dim=-1).bool()\n # print(\"DEBUG nms performed in \", n_iter)\n # print(\"DEBUG nms. Mask \", mask_selected_n.shape, mask_selected_n.sum(), mask_selected_n.dtype)\n return mask_selected_n\n\n @staticmethod\n def _unroll_and_compare(x_tmp: torch.Tensor, label: str) -> torch.Tensor:\n \"\"\" Given a vector of size: (*, n) creates an output of size (*, n, n)\n obtained by comparing all vector entries with all other vector entries\n The comparison is either: MIN,MAX \"\"\"\n if label == \"MAX\":\n y_tmp = torch.max(x_tmp.unsqueeze(dim=-1), x_tmp.unsqueeze(dim=-2))\n elif label == \"MIN\":\n y_tmp = torch.min(x_tmp.unsqueeze(dim=-1), x_tmp.unsqueeze(dim=-2))\n else:\n raise Exception(\"label is unknown. It is \", label)\n return y_tmp\n\n @staticmethod\n def _compute_iomin(\n x: torch.Tensor,\n y: torch.Tensor,\n w: torch.Tensor,\n h: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given x,y,w,h compute the Intersection over Min Area (IoMin) among all possible pairs.\n\n Args:\n x: torch.Tensor of shape: (n) with the x-coordinate\n y: torch.Tensor of shape: (n) with the y-coordinate\n w: torch.Tensor of shape: (n) with the width\n h: torch.Tensor of shape: (n) with the height\n\n Returns:\n A matrix of shape (n, n) with the IoMIN\n \"\"\"\n\n assert x.shape == y.shape == w.shape == h.shape\n\n # compute x1,x3,y1,y3 and area\n x1 = x\n x3 = x + w\n y1 = y\n y3 = y + h\n area = w * h\n\n min_area_nn = NonMaxSuppression._unroll_and_compare(area, \"MIN\")\n xi1_nn = NonMaxSuppression._unroll_and_compare(x1, \"MAX\")\n yi1_nn = NonMaxSuppression._unroll_and_compare(y1, \"MAX\")\n xi3_nn = NonMaxSuppression._unroll_and_compare(x3, \"MIN\")\n yi3_nn = NonMaxSuppression._unroll_and_compare(y3, \"MIN\")\n\n intersection_area_nn = torch.clamp(xi3_nn - xi1_nn, min=0) * torch.clamp(yi3_nn - yi1_nn, min=0)\n return intersection_area_nn / min_area_nn\n",
"from typing import List, Optional, Tuple, Union, NamedTuple, Callable, Any\nimport torch\nimport collections.abc\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass MetadataCropperDataset(NamedTuple):\n f_name: Union[str, int]\n loc_x: Union[int, float]\n loc_y: Union[int, float]\n moran: Union[float, None]\n\n\nclass CropperTensor(torch.nn.Module):\n \"\"\"\n Base class for cropping a tensor and returning the crops and its coordinates, i.e. (crops, x_loc, y_loc)\n This does NOT fit into a standard transform since it returns a tuple and not just an augmented tensor.\n \"\"\"\n\n def __init__(\n self,\n crop_size: int = 224,\n strategy: str = 'random',\n stride: int = 200,\n n_crops: int = 10,\n random_order: bool = True,\n criterium_fn: Callable = None,\n **kargs,\n ):\n \"\"\"\n Args:\n crop_size: int, the size in pixel of the sliding tiling windows\n strategy: str, can be either 'random' or 'tiling' or 'identity'\n stride: Used only when :attr:'strategy' is 'tiling'.\n Displacement among consecutive sliding window. This allow to control the overlap between crops.\n n_crops: int, the size of crops to generate from a single image.\n random_order: Used only when :attr:'strategy' is 'tiling'.\n If true the crops are shuffled before being returned.\n criterium_fn: Callable which returns true if it is a valid crop, return False otherwise\n \"\"\"\n super().__init__()\n self.crop_size_ = crop_size\n self.strategy_ = strategy\n self.stride_ = stride\n self.n_crops_ = n_crops\n self.random_order_ = random_order\n self.criterium_fn_ = criterium_fn\n self._assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn)\n\n @staticmethod\n def _assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn):\n assert isinstance(crop_size, int)\n assert isinstance(stride, int)\n assert isinstance(n_crops, int)\n assert isinstance(random_order, bool)\n assert isinstance(criterium_fn, collections.abc.Callable)\n assert strategy == 'random' or strategy == 'tiling' or strategy == 'identity'\n\n def forward(\n self,\n tensor: torch.Tensor,\n crop_size: int = None,\n strategy: str = None,\n stride: int = None,\n n_crops: int = None,\n random_order: bool = None,\n criterium_fn: Callable = None) -> (List[torch.Tensor], List[int], List[int]):\n\n # All parameters default to the one used during initialization if they are not specified\n crop_size = self.crop_size_ if crop_size is None else crop_size\n strategy = self.strategy_ if strategy is None else strategy\n stride = self.stride_ if stride is None else stride\n n_crops = self.n_crops_ if n_crops is None else n_crops\n random_order = self.random_order_ if random_order is None else random_order\n criterium_fn = self.criterium_fn_ if criterium_fn is None else criterium_fn\n\n crops, x_locs, y_locs = self._crop(tensor, crop_size, strategy, stride, n_crops, random_order, criterium_fn)\n return crops, x_locs, y_locs\n\n @staticmethod\n def reapply_crops(tensor, patches_xywh) -> (List[torch.Tensor], List[int], List[int]):\n raise NotImplementedError\n\n def _crop(self,\n tensor,\n crop_size: int,\n strategy: str,\n stride: int,\n n_crops: int,\n random_order: bool,\n criterium_fn: Callable) -> (List[torch.Tensor], List[int], List[int]):\n \"\"\" This must be overwritten in derived class \"\"\"\n raise NotImplementedError\n\n def __repr__(self) -> str:\n \"\"\" This must be overwritten in derived class \"\"\"\n raise NotImplementedError\n\n\nclass CropperDenseTensor(CropperTensor):\n SAFETY_FACTOR = 3\n\n def __init__(self, min_threshold_value: float, min_threshold_fraction: float, **kargs):\n \"\"\"\n Args:\n min_threshold_value: binarize a crop according to\n :math:'tensor.sum(dim=-3, keepdim=True) > min_threshold_value'\n min_threshold_fraction: A crop with a fraction of True entry below this value is considered\n empty and disregarded.\n \"\"\"\n assert isinstance(min_threshold_value, float)\n self.min_threshold_value = min_threshold_value\n assert isinstance(min_threshold_fraction, float)\n self.min_threshold_fraction = min_threshold_fraction\n\n def criterium_fn(potential_crops):\n masks = potential_crops.sum(dim=-3, keepdim=False) > min_threshold_value\n number_of_true = masks.flatten(start_dim=-2).sum(dim=-1)\n area_of_crops = masks.shape[-1] * masks.shape[-2]\n return number_of_true.float() > area_of_crops * min_threshold_fraction\n\n super().__init__(criterium_fn=criterium_fn,\n **kargs)\n\n def __repr__(self):\n return self.__class__.__name__ + '(crop_size={0}, strategy={1}, stride={2}, random_order={3}, \\\n min_threshold_value={4}, min_threshold_fraction={5})'.format(self.crop_size_,\n self.strategy_,\n self.stride_,\n self.random_order_,\n self.min_threshold_value,\n self.min_threshold_fraction)\n\n @staticmethod\n def reapply_crops(tensor, patches_xywh) -> (List[torch.Tensor], List[int], List[int]):\n assert isinstance(patches_xywh, torch.LongTensor)\n assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4\n x_patch, y_patch, w_patch, h_patch = patches_xywh.chunk(chunks=4, dim=-1) # each one has shape (batch, 1)\n\n crops = []\n for ix, iy, iw, ih, in zip(x_patch, y_patch, w_patch, h_patch):\n tensor_tmp = tensor.narrow(dim=-2, start=ix.item(), length=iw.item())\n crop = tensor_tmp.narrow(dim=-1, start=iy.item(), length=ih.item())\n crops.append(crop.clone())\n return crops, x_patch.squeeze(-1).tolist(), y_patch.squeeze(-1).tolist()\n\n def _crop(self,\n tensor: torch.Tensor,\n crop_size: int,\n strategy: str,\n stride: int,\n n_crops: int,\n random_order: bool,\n criterium_fn: Callable) -> (List[torch.Tensor], List[int], List[int]):\n\n assert isinstance(tensor, torch.Tensor)\n self._assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn)\n\n if strategy == 'identity':\n return [tensor]*n_crops, [0]*n_crops, [0]*n_crops\n\n elif strategy == 'tiling' or strategy == 'random':\n\n w_img, h_img = tensor.shape[-2:]\n if strategy == 'tiling':\n # generate a random starting point\n x_corner_list, y_corner_list = [], []\n i0 = torch.randint(low=0, high=stride, size=[1]).item()\n j0 = torch.randint(low=0, high=stride, size=[1]).item()\n for i in range(i0, w_img-crop_size, stride):\n for j in range(j0, h_img-crop_size, stride):\n x_corner_list.append(i)\n y_corner_list.append(j)\n\n x_corner = torch.tensor(x_corner_list, device=tensor.device, dtype=torch.long)\n y_corner = torch.tensor(y_corner_list, device=tensor.device, dtype=torch.long)\n\n if random_order:\n index_shuffle = torch.randperm(n=x_corner.shape[0], dtype=torch.long, device=x_corner.device)\n x_corner = x_corner[index_shuffle]\n y_corner = y_corner[index_shuffle]\n elif strategy == 'random':\n # create two tensors (x_corner, y_corner) with the location of the bottom left corner of the crop\n x_corner = torch.randint(\n low=0,\n high=max(1, w_img - crop_size),\n size=[n_crops * self.SAFETY_FACTOR],\n device=tensor.device,\n dtype=torch.long,\n ) # low is included, high is excluded\n\n y_corner = torch.randint(\n low=0,\n high=max(1, h_img - crop_size),\n size=[n_crops * self.SAFETY_FACTOR],\n device=tensor.device,\n dtype=torch.long,\n ) # low is included, high is excluded\n else:\n raise Exception(\"strategy is not recognized\", strategy)\n\n # compute the crops\n crops, x_locs, y_locs = [], [], []\n for ix, iy in zip(x_corner, y_corner):\n tensor_tmp = torch.narrow(tensor, dim=-2, start=ix, length=crop_size)\n crop = torch.narrow(tensor_tmp, dim=-1, start=iy, length=crop_size)\n if self.criterium_fn_(crop):\n crops.append(crop.clone())\n x_locs.append(ix.item())\n y_locs.append(iy.item())\n\n # return at most n_crops items\n return crops[:n_crops], x_locs[:n_crops], y_locs[:n_crops]\n\n\nclass CropperSparseTensor(CropperTensor):\n SAFETY_FACTOR = 5\n\n def __init__(self,\n n_element_min: int = 100,\n **kargs,\n ):\n \"\"\"\n Args:\n n_element_min: create crops with (at least) this number of elements (i.e. cells or genes)\n \"\"\"\n assert isinstance(n_element_min, int)\n self.n_element_min = n_element_min\n\n def criterium_fn(n_elements):\n return n_elements >= n_element_min\n\n super().__init__(criterium_fn=criterium_fn,\n **kargs)\n\n def __repr__(self):\n return self.__class__.__name__ + '(crop_size={0}, strategy={1}, stride={2}, random_order={3}, \\\n n_element_min={4})'.format(self.crop_size_,\n self.strategy_,\n self.stride_,\n self.random_order_,\n self.n_element_min)\n\n @staticmethod\n def reapply_crops(sparse_tensor, patches_xywh) -> (List[torch.sparse.Tensor], List[int], List[int]):\n assert isinstance(patches_xywh, torch.Tensor)\n assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4\n assert isinstance(sparse_tensor, torch.sparse.Tensor)\n codes: torch.Tensor\n x_pixel: torch.Tensor\n y_pixel: torch.Tensor\n codes, x_pixel, y_pixel = sparse_tensor.indices() # each has shape (n_element)\n values = sparse_tensor.values()\n ch, w_img, h_img = sparse_tensor.size()\n\n x_patch, y_patch, w_patch, h_patch = patches_xywh.chunk(chunks=4, dim=-1) # each one has shape (batch, 1)\n\n mask = (x_pixel >= x_patch) * \\\n (x_pixel < x_patch + w_patch) * \\\n (y_pixel >= y_patch) * \\\n (y_pixel < y_patch + h_patch) # shape (batch, n_element)\n\n assert mask.shape[0] == x_patch.shape[0] == y_patch.shape[0] == w_patch.shape[0] == h_patch.shape[0]\n\n crops = []\n for n in range(mask.shape[0]):\n mask_n = mask[n] # shape (n_element)\n codes_n = codes[mask_n]\n x_pixel_n = x_pixel[mask_n] - x_patch[n, 0]\n y_pixel_n = y_pixel[mask_n] - y_patch[n, 0]\n values_n = values[mask_n]\n\n crops.append(\n torch.sparse_coo_tensor(\n indices=torch.stack((codes_n, x_pixel_n, y_pixel_n), dim=0),\n values=values_n,\n size=(ch, w_patch[n, 0], h_patch[n, 0]),\n device=x_pixel.device,\n requires_grad=False,\n ).coalesce()\n )\n return crops, x_patch.squeeze(-1).tolist(), y_patch.squeeze(-1).tolist()\n\n def _crop(self,\n sparse_tensor,\n crop_size: int,\n strategy: str,\n stride: int,\n n_crops: int,\n random_order: bool,\n criterium_fn: Callable) -> Tuple[list, list, list]:\n\n if strategy == 'identity':\n return [sparse_tensor]*n_crops, [0]*n_crops, [0]*n_crops\n\n self._assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn)\n assert sparse_tensor.is_sparse\n\n # this might break the code if num_worked>0 in dataloader\n # if torch.cuda.is_available():\n # sparse_tensor = sparse_tensor.cuda()\n\n codes, x_pixel, y_pixel = sparse_tensor.indices() # each has shape (n_elements)\n values = sparse_tensor.values()\n\n ch, w_img, h_img = sparse_tensor.size()\n\n if strategy == 'tiling':\n # generate a random starting point\n x_corner_list, y_corner_list = [], []\n i0 = torch.randint(low=-crop_size, high=0, size=[1]).item()\n j0 = torch.randint(low=-crop_size, high=0, size=[1]).item()\n for i in range(i0, w_img, stride):\n for j in range(j0, h_img, stride):\n x_corner_list.append(i)\n y_corner_list.append(j)\n\n x_corner = torch.tensor(x_corner_list, device=x_pixel.device, dtype=x_pixel.dtype).view(-1, 1)\n y_corner = torch.tensor(y_corner_list, device=x_pixel.device, dtype=x_pixel.dtype).view(-1, 1)\n\n if random_order:\n index_shuffle = torch.randperm(n=x_corner.shape[0], dtype=torch.long, device=x_corner.device)\n x_corner = x_corner[index_shuffle]\n y_corner = y_corner[index_shuffle]\n\n elif strategy == 'random':\n x_corner = torch.randint(\n low=0,\n high=max(1, sparse_tensor.shape[-2] - crop_size),\n size=[n_crops * self.SAFETY_FACTOR],\n device=x_pixel.device,\n dtype=x_pixel.dtype,\n ).view(-1, 1) # low is included, high is excluded\n\n y_corner = torch.randint(\n low=0,\n high=max(1, sparse_tensor.shape[-1] - crop_size),\n size=[n_crops * self.SAFETY_FACTOR],\n device=y_pixel.device,\n dtype=y_pixel.dtype,\n ).view(-1, 1) # low is included, high is excluded\n\n else:\n raise Exception(\"strategy is not recognized\", strategy)\n\n element_mask = (x_pixel >= x_corner) * \\\n (x_pixel < x_corner + crop_size) * \\\n (y_pixel >= y_corner) * \\\n (y_pixel < y_corner + crop_size) # shape: (n_crops * SAFETY_FACTOR, n_elements)\n\n n_elements = (values * element_mask).sum(dim=-1) # shape (n_crops * SAFETY_FACTOR)\n valid_patch = criterium_fn(n_elements)\n n_valid_patches = valid_patch.sum().item()\n if n_valid_patches < n_crops:\n # import warnings\n # warnings.warn(\"Warning. Not enough valid crops found. Change the parameters. \")\n print(\"Warning. Only {0} valid crops found when requested {1}. \\\n Change the parameters.\".format(n_valid_patches, n_crops))\n n_max = min(n_crops, n_valid_patches)\n\n ix = x_corner[valid_patch, 0][: n_max] # shape: n_max\n iy = y_corner[valid_patch, 0][: n_max] # shape: n_max\n mask = element_mask[valid_patch][: n_max] # shape: n_max, element_in_sparse_array\n dense_crop_shape = (ch, crop_size, crop_size)\n\n crops = []\n for n in range(n_max):\n mask_n = mask[n]\n codes_n = codes[mask_n]\n x_pixel_n = x_pixel[mask_n] - ix[n]\n y_pixel_n = y_pixel[mask_n] - iy[n]\n values_n = values[mask_n]\n\n crops.append(\n torch.sparse_coo_tensor(\n indices=torch.stack((codes_n, x_pixel_n, y_pixel_n), dim=0),\n values=values_n,\n size=dense_crop_shape,\n device=x_pixel.device,\n requires_grad=False,\n ).coalesce()\n )\n\n x_locs = [ix[n].item() for n in range(n_max)]\n y_locs = [iy[n].item() for n in range(n_max)]\n return crops, x_locs, y_locs\n\n\nclass CropperDataset(Dataset):\n \"\"\"\n Dataset with imgs, labels, metadata and possibly a cropper for cropping img on the fly\n \"\"\"\n\n def __init__(\n self,\n imgs: Union[\n List[torch.Tensor],\n List[torch.sparse.Tensor],\n List[\"SparseImage\"],\n ],\n labels: List[Any],\n metadatas: List[MetadataCropperDataset],\n cropper: Optional[CropperTensor] = None,\n ):\n \"\"\"\n Args:\n imgs: (list of) images representing spatial data.\n labels: (list of) labels.\n metadatas: (list of) metadata.\n cropper: Callable which crops the image on the fly\n \"\"\"\n assert isinstance(imgs, list)\n assert isinstance(labels, list)\n assert isinstance(metadatas, list)\n assert len(imgs) == len(labels) == len(metadatas), (\n \"These number should be the same {0} {1} {2}\".format(len(imgs),\n len(labels),\n len(metadatas))\n )\n assert len(imgs) >= 1, \"I can not create a dataset with less than 1 image.\"\n\n # check that all sparse_images have a _categories_to_code before putting them together into a dataset.\n if hasattr(imgs[0], '_categories_to_codes'):\n list_of_cat_to_code_dict = [img._categories_to_codes for img in imgs]\n for i in range(len(list_of_cat_to_code_dict)-1):\n assert list_of_cat_to_code_dict[i] == list_of_cat_to_code_dict[i+1], \\\n \"The sparse images have different cat_to_code dictionaries {0} and {1}. \\\n These images can not be combined into a dataset. \\\n You can re-create the sparse images and specify the cat_to_code dictionary \\\n to be used.\".format(list_of_cat_to_code_dict[i], list_of_cat_to_code_dict[i+1])\n print(\"All cat_to_codes dictionaries are identical {0}\".format(list_of_cat_to_code_dict[-1]))\n\n unique_y_labels = list(sorted(set(labels)))\n unique_y_codes = [i for i in range(len(unique_y_labels))]\n self._labels_to_codes = dict(zip(unique_y_labels, unique_y_codes))\n self.codes = [self._labels_to_codes[label] for label in labels] # list of integers\n self.metadatas = metadatas\n self.imgs = imgs\n self.cropper = cropper\n if self.cropper is None:\n self.duplicating_factor = 1\n self.n_crops_per_tissue = None\n elif self.cropper.strategy_ == 'random':\n # If n_crops >= batch_size then a single tissue generates all crops for the mini_batch.\n # This results in a very imbalanced mini_batch.\n # Here, we implement a trick for generating more balanced mini_batches.\n # We pretend to have more tissues and generate fewer crops from each tissue resulting in the same overall\n # number of crops but a more diverse mini_batch.\n # See __len__ and __getitem__ for how this trick is implemented.\n tmp_n_crops = self.cropper.n_crops_\n while tmp_n_crops > 10 and tmp_n_crops % 2 == 0:\n tmp_n_crops /= 2\n self.n_crops_per_tissue = int(tmp_n_crops)\n self.duplicating_factor = int(self.cropper.n_crops_ // self.n_crops_per_tissue)\n\n def to(self, device: torch.device) -> \"CropperDataset\":\n \"\"\" Move the images to a particular device \"\"\"\n self.imgs = [img.to(device) for img in self.imgs]\n return self\n\n def __len__(self):\n # We pretend that the dataset contains extra samples.\n # Note that the data_loader will generate RANDOM indices between 0 and this (inflated) length\n return len(self.imgs) * self.duplicating_factor\n\n def __getitem__(self, index: int) -> Union[\n Tuple[torch.Tensor, int, MetadataCropperDataset],\n List[Tuple[torch.Tensor, int, MetadataCropperDataset]]]:\n\n # Remap the index from the inflated interval to the original interval.\n new_index = index % len(self.imgs) # this is strictly in [0, len(self.imgs))\n\n if self.cropper is None:\n img = self.imgs[new_index]\n code = self.codes[new_index]\n metadata = self.metadatas[new_index]\n return img, code, metadata\n\n else:\n code_base = self.codes[new_index]\n crop_list, loc_x_list, loc_y_list = self.cropper(self.imgs[new_index], n_crops=self.n_crops_per_tissue)\n\n metadata_base: MetadataCropperDataset = self.metadatas[new_index]\n\n return [(crop, code_base, MetadataCropperDataset(f_name=metadata_base.f_name,\n loc_x=metadata_base.loc_x + x_loc,\n loc_y=metadata_base.loc_y + y_loc,\n moran=None)) for\n crop, x_loc, y_loc in zip(crop_list, loc_x_list, loc_y_list)]\n\n\nclass CollateFnListTuple:\n @staticmethod\n @torch.no_grad()\n def __call__(data):\n \"\"\"\n Args:\n data: Output of the batchloader calling the __getitem__ method i.e.:\n Either: List[Tuple]\n Or: List[List[Tuple]\n\n Returns:\n List[imgs], List[labels], List[Metadata]\n \"\"\"\n if isinstance(data, list) and isinstance(data[0], list):\n # I have to flatten a list of list\n data = [val for sublist in data for val in sublist]\n\n tuple_imgs, tuple_labels, tuple_metadata = zip(*data)\n return list(tuple_imgs), list(tuple_labels), list(tuple_metadata)\n\n\nclass DataLoaderWithLoad(DataLoader):\n def load(self, index: Union[List[int], torch.Tensor]):\n tmp = []\n for idx in index:\n tmp.append(self.dataset.__getitem__(idx))\n return self.collate_fn(tmp)\n",
"from typing import Tuple\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\ndef linear_warmup_and_cosine_protocol(\n f_values: Tuple[float, float, float],\n x_milestones: Tuple[int, int, int, int]):\n \"\"\"\n There are 5 regions:\n 1. constant at f0 for x < x0\n 2. linear increase from f0 to f1 for x0 < x < x1\n 3. constant at f1 for x1 < x < x2\n 4. cosine protocol from f1 to f2 for x2 < x < x3\n 5. constant at f2 for x > x3\n\n If you want a linear_ramp followed by a cosine_decay only simply set:\n 1. x0=0 (to eliminate the first constant piece)\n 2. x2=x1 (to eliminate the second constant piece)\n 3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)\n \"\"\"\n assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3]\n\n def fn(step):\n if step <= x_milestones[0]:\n return float(f_values[0])\n elif (step > x_milestones[0]) and (step <= x_milestones[1]):\n m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0]))\n return float(f_values[0]) + m * float(step - x_milestones[0])\n elif (step > x_milestones[1]) and (step <= x_milestones[2]):\n return float(f_values[1])\n elif (step > x_milestones[2]) and (step <= x_milestones[3]):\n progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1)\n tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0)\n return float(f_values[2]) + tmp * float(f_values[1] - f_values[2])\n else:\n return float(f_values[2])\n\n return fn\n\n\nclass LARS(Optimizer):\n \"\"\"\n Extends SGD in PyTorch with LARS scaling from the paper\n 'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001)\n eps (float, optional): eps for division denominator (default: 1e-8)\n\n Example:\n >>> model = torch.nn.Linear(10, 1)\n >>> input = torch.Tensor(10)\n >>> target = torch.Tensor([1.])\n >>> loss_fn = lambda input, target: (input - target) ** 2\n >>> #\n >>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n Note:\n The application of momentum in the SGD part is modified according to\n the PyTorch standards. LARS scaling fits into the equation in the\n following fashion.\n .. math::\n \\begin{aligned}\n g_{t+1} & = \\text{lars_lr} * (\\beta * p_{t} + g_{t+1}), \\\\\n v_{t+1} & = \\\\mu * v_{t} + g_{t+1}, \\\\\n p_{t+1} & = p_{t} - \\text{lr} * v_{t+1},\n \\\\end{aligned}\n where :math:`p`, :math:`g`, :math:`v`, :math:`\\\\mu` and :math:`\\beta` denote the\n parameters, gradient, velocity, momentum, and weight decay respectively.\n The :math:`lars_lr` is defined by Eq. 6 in the paper.\n The Nesterov version is analogously modified.\n .. warning::\n Parameters with weight decay set to 0 will automatically be excluded from\n layer-wise LR scaling. This is to ensure consistency with papers like SimCLR\n and BYOL.\n \"\"\"\n\n def __init__(\n self,\n params,\n lr=None,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n trust_coefficient=0.001,\n eps=1e-8,\n ):\n if lr is None or lr < 0.0:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if momentum < 0.0:\n raise ValueError(f\"Invalid momentum value: {momentum}\")\n if weight_decay < 0.0:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n trust_coefficient=trust_coefficient,\n eps=eps,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n\n for group in self.param_groups:\n group.setdefault(\"nesterov\", False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n # exclude scaling for params with 0 weight decay\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n d_p = p.grad\n p_norm = torch.norm(p.data)\n g_norm = torch.norm(p.grad.data)\n\n # lars scaling + weight decay part\n if weight_decay != 0:\n if p_norm != 0 and g_norm != 0:\n lars_lr = p_norm / (g_norm + p_norm * weight_decay + group[\"eps\"])\n lars_lr *= group[\"trust_coefficient\"]\n\n d_p = d_p.add(p, alpha=weight_decay)\n d_p *= lars_lr\n\n # sgd part\n if momentum != 0:\n param_state = self.state[p]\n if \"momentum_buffer\" not in param_state:\n buf = param_state[\"momentum_buffer\"] = torch.clone(d_p).detach()\n else:\n buf = param_state[\"momentum_buffer\"]\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(buf, alpha=momentum)\n else:\n d_p = buf\n\n p.add_(d_p, alpha=-group[\"lr\"])\n\n return loss\n"
] | [
[
"torch.max",
"torch.sum",
"torch.zeros_like",
"torch.from_numpy",
"torch.no_grad",
"torch.arange",
"torch.clamp",
"numpy.array",
"torch.ones_like"
],
[
"torch.randint",
"torch.randperm",
"torch.narrow",
"torch.tensor",
"torch.no_grad",
"torch.stack"
],
[
"torch.clone",
"torch.norm",
"torch.no_grad",
"torch.enable_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sergio0694/sepconv-gan | [
"82d908ed5c3dd55d7b2f8603450dac5108751a3b"
] | [
"training/networks/discriminators/vgg19.py"
] | [
"import tensorflow as tf\n\ndef get_network(x):\n '''Gets a discriminator network with the shared base of the VGG19 network.\n\n x(tf.Tensor) -- the VGG19 base network\n '''\n\n with tf.variable_scope('VGG19_top', None, [x], reuse=tf.AUTO_REUSE):\n conv1 = tf.layers.conv2d(x, 512, 3, activation=tf.nn.leaky_relu, padding='same')\n conv2 = tf.layers.conv2d(conv1, 512, 3, activation=tf.nn.leaky_relu, padding='same') + x\n pool = tf.layers.max_pooling2d(conv2, 3, 2, padding='valid')\n flat = tf.reshape(pool, [pool.shape[0], -1])\n d1 = tf.layers.dense(flat, 2048, activation=tf.nn.leaky_relu)\n dropout1 = tf.layers.dropout(d1, 0.8)\n d2 = tf.layers.dense(dropout1, 2048, activation=tf.nn.leaky_relu)\n dropout2 = tf.layers.dropout(d2, 0.8)\n d3 = tf.layers.dense(dropout2, 1)\n return d3"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.layers.dropout",
"tensorflow.reshape",
"tensorflow.layers.max_pooling2d",
"tensorflow.layers.dense",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
juanjosegarciaripoll/seeq | [
"3554550c3348fbaae398737cf4ae5510a34d6665"
] | [
"seeq/test/test_parametric_control.py"
] | [
"\nfrom seeq.control import *\n\nimport unittest\n\nclass TestQControl(unittest.TestCase):\n π = np.pi\n σz = np.array([[1., 0.],[0., -1.]])\n σx = np.array([[0., 1.],[1., 0.]])\n σy = np.array([[0., -1.j],[1.j, 0.]])\n ψ0 = np.eye(2)\n\n def test_nothing(self):\n \"\"\"For a qubit to remain the same, we do nothing.\"\"\"\n Ug = np.eye(2)\n H = lambda t, x, ψ: x * (self.σx @ ψ)\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, tol=1e-8, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], 0.0, delta=1e-7)\n\n def test_nothing2(self):\n \"\"\"For a qubit to remain the same, we cancel the frequency.\"\"\"\n Ug = np.eye(2)\n H = lambda t, x, ψ: x[0] * (self.σx @ ψ) + (1.0 - x[1]) * (self.σz @ ψ)\n r = parametric_control([1.0, 0.1], H, self.ψ0, T=1.0, Ug=Ug, tol=1e-8, method='expm')\n self.assertEqual(len(r.x), 2)\n self.assertAlmostEqual(r.x[0], 0.0, delta=1e-7)\n self.assertAlmostEqual(r.x[1], 1.0, delta=1e-7)\n\n def test_qubit_flip(self):\n \"\"\"Construct a π/2 pulse.\"\"\"\n Ug = -1j*self.σy\n H = lambda t, x, ψ: (x * self.σy) @ ψ\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, tol=1e-9, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], self.π/2., delta=1e-7)\n\n def test_nothing_derivative(self):\n \"\"\"For a qubit to remain the same, we do nothing (with gradients).\"\"\"\n Ug = np.eye(2)\n H = lambda t, x, ψ: x * (self.σx @ ψ)\n dH = lambda t, x, ψ: [self.σx @ ψ]\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, dH=dH, tol=1e-8, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], 0.0, delta=1e-7)\n\n def test_qubit_flip_derivative(self):\n \"\"\"Construct a π/2 pulse (with gradients).\"\"\"\n Ug = -1j*self.σy\n H = lambda t, x, ψ: (x * self.σy) @ ψ\n dH = lambda t, x, ψ: [self.σy @ ψ]\n r = parametric_control([1.0], H, self.ψ0, T=1.0, Ug=Ug, dH=dH, tol=1e-9, method='expm')\n self.assertEqual(len(r.x), 1)\n self.assertAlmostEqual(r.x[0], self.π/2., delta=1e-7)\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom seeq.states import *\n"
] | [
[
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangdingyan/hybridUQ | [
"c141a4bec0e716a12444f7e9ab0d7c975df93184",
"c141a4bec0e716a12444f7e9ab0d7c975df93184"
] | [
"chemprop/utils/uclass.py",
"chemprop/conformal/conformal.py"
] | [
"import numpy as np\n\n\nclass uncertainty:\n def __init__(self):\n pass\n\n\nclass uncertainties:\n\n def __init__(self):\n self.uncertainty_collection = {}\n self.uncertainty_count = {}\n self.norm_func = {'MinMax' : lambda x: (x-np.min(x)) / (np.max(x)-np.min(x)),\n 'Zscore' : lambda x: (x-np.mean(x)) / np.std(x),\n 'Simple' : lambda x: x,\n 'Argsort': lambda x: np.argsort(np.argsort(x))}\n\n def add_unc(self,\n uc_name,\n uc_value):\n\n if uc_name not in self.uncertainty_collection:\n self.uncertainty_count[uc_name] = 1\n self.uncertainty_collection[uc_name] = uc_value\n else:\n self.uncertainty_count[uc_name] += 1\n self.uncertainty_collection[uc_name] += uc_value\n\n def mean(self):\n for name in self.uncertainty_collection:\n self.uncertainty_collection[name] = self.uncertainty_collection[name] / self.uncertainty_count[name]\n\n def get_dict(self):\n return self.uncertainty_collection\n\n def get_names(self):\n return set(self.uncertainty_collection.keys())\n\n def simply_add(self, name_list):\n return np.sum([self.uncertainty_collection[name] for name in name_list], axis=0)\n\n def ensemble(self, weights=None, norm_methods=None):\n if norm_methods is None:\n norm_methods = self.norm_func\n\n output = {}\n for weight_name in weights:\n if np.sum(list(weights[weight_name].values())) == 0:\n weights[weight_name] = {k:1.0 for k in weights[weight_name].keys()}\n\n for norm_name in norm_methods:\n output[f'{weight_name}_{norm_name}'] = np.mean([self.norm_func[norm_name](self.uncertainty_collection[uc_name])\\\n *weights[weight_name].get(uc_name, 0.) for uc_name in self.get_names()], axis=0)\n return output\n\n\n",
"import numpy as np\nfrom sklearn.base import BaseEstimator\nfrom chemprop.conformal.nonconformist import *\n\n\n# class MCP:\n# def __init__(self,\n# y_calibrate: np.array,\n# y_calibrate_hat: np.array,\n# calibrate_error_estimated: np.array,\n# p=0.9):\n#\n# assert y_calibrate.shape == y_calibrate_hat.shape == calibrate_error_estimated.shape\n# assert len(y_calibrate.shape) == 1\n# self.y_calibrate = y_calibrate\n# self.y_calibrate_hat = y_calibrate_hat\n# self.calibrate_error_estimated = calibrate_error_estimated\n# self.nonconformity_values = np.abs(self.y_calibrate-self.y_calibrate_hat) / self.calibrate_error_estimated\n# self.p = p\n# self.alpha = np.sort(self.nonconformity_values)[int(len(self.nonconformity_values)*self.p)]\n#\n# def predict(self,\n# y_predicted_hat: np.array,\n# predicted_error_estimated: np.array):\n# return y_predicted_hat-self.alpha*predicted_error_estimated, \\\n# y_predicted_hat+self.alpha*predicted_error_estimated\n#\n# def evaluate(self,\n# y_predicted: np.array,\n# y_predicted_hat: np.array,\n# predicted_error_estimated:np.array):\n# intervals = self.alpha*predicted_error_estimated\n# absolute_errors = np.abs(y_predicted-y_predicted_hat)\n# reliability = (intervals > absolute_errors).mean()\n# efficiency = 2*np.mean(intervals)\n# return reliability, efficiency\n\n\nclass RegressorNC():\n def __init__(self,\n err_func=AbsErrorErrFunc(),\n normalizer=lambda x: np.exp(x),\n beta=1e-6):\n self.error_func = err_func\n self.normalizer = normalizer\n self.beta = beta\n\n def score(self,\n prediction,\n y,\n error_est=None,):\n n_test = prediction.shape[0]\n if error_est is not None:\n norm = self.normalizer(error_est) + self.beta\n else:\n norm = np.ones(n_test)\n if prediction.ndim > 1:\n ret_val = self.error_func.apply(prediction, y)\n else:\n ret_val = self.error_func.apply(prediction, y) / norm\n return ret_val\n\n def predict(self,\n prediction,\n nc,\n significance,\n error_est=None):\n n_test = prediction.shape[0]\n if error_est is not None:\n norm = self.normalizer(error_est) + self.beta\n else:\n norm = np.ones(n_test)\n\n intervals = np.zeros((n_test, 2))\n err_dist = self.error_func.apply_inverse(nc, significance)\n err_dist = np.hstack([err_dist] * n_test)\n if prediction.ndim > 1:\n intervals[:, 0] = prediction[:, 0] - err_dist[0, :]\n intervals[:, 1] = prediction[:, -1] + err_dist[1, :]\n else:\n err_dist *= norm\n intervals[:, 0] = prediction - err_dist[0, :]\n intervals[:, 1] = prediction + err_dist[1, :]\n return intervals\n\n def eval(self,\n interval,\n y):\n reliability = ((interval[:, 0] < y) * (y < interval[:, 1])).mean()\n efficiency = (np.abs(interval[:, 0]-interval[:, 1])).mean()\n return reliability, efficiency\n\n\ndef conformal_pipeline(significance,\n calibrate_y,\n calibrate_prediction,\n test_prediction,\n calibrate_error_est=None,\n test_error_est=None,\n err_func=AbsErrorErrFunc(),\n beta=1e-6,\n normalizer=lambda x: np.exp(x)):\n regressor = RegressorNC(err_func=err_func,\n normalizer=normalizer,\n beta=beta)\n nc = regressor.score(calibrate_prediction, calibrate_y, calibrate_error_est)\n interval = regressor.predict(test_prediction, nc, significance, test_error_est)\n return interval\n\n\n\n\n\n"
] | [
[
"numpy.min",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.argsort",
"numpy.sum"
],
[
"numpy.hstack",
"numpy.abs",
"numpy.ones",
"numpy.exp",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HSU-S21-CS232/final-th150 | [
"cf0004c7a9e72b08a0c1c9985c8c43e83a0fb650"
] | [
"EZ Queue/screen.py"
] | [
"import os\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom PIL import ImageGrab\n\n\nclass Screen(object):\n\n WINDOW_NAME = 'data'\n\n def __init__(self):\n self.image = None\n self.data = None\n self.event = None\n\n @property\n def inverted_image_size(self):\n return (self.image.size[1], self.image.size[0])\n\n def normalize_data(self):\n self.data = cv2.cvtColor(self.data, cv2.COLOR_RGB2BGR)\n\n def get_data(self):\n self.image = ImageGrab.grab()\n\n self.data = np.array(\n self.image.getdata(), dtype='uint8'\n ).reshape(self.inverted_image_size + (3,))\n\n self.normalize_data()\n\n def get_match(self, template):\n return cv2.matchTemplate(\n self.data, template.data, cv2.TM_CCOEFF_NORMED)\n\n def initialize_window(self):\n cv2.namedWindow(self.WINDOW_NAME, cv2.WINDOW_NORMAL)\n cv2.resizeWindow(self.WINDOW_NAME, 800, 600)\n\n def show_data(self, gray=False):\n cv2.imshow(self.WINDOW_NAME, self.data)\n cv2.waitKey(1)\n\n def draw_rectangle(self, point, size):\n cv2.rectangle(\n self.data, point,\n (point[0] + size[0], point[1] + size[1]),\n (0, 0, 255), 2)\n\n def capture(self):\n while True:\n self.get_data()\n location = self.check_template()\n if location:\n self.event.callback(location)\n break\n if (not location and self.event.timeout > 0\n and time.time() >= self.event.timeout):\n self.event.timeout_callback()\n break\n\n def assign_event(self, event):\n self.event = event\n self.capture()\n\n def check_template(self):\n match = self.get_match(self.event.template)\n locations = np.where(match >= self.event.template.threshold)\n\n try:\n location = next(zip(*locations[::-1]))\n except StopIteration:\n return\n\n return location if location else None\n"
] | [
[
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HeyLey/catboost | [
"f472aed90604ebe727537d9d4a37147985e10ec2",
"f472aed90604ebe727537d9d4a37147985e10ec2",
"f472aed90604ebe727537d9d4a37147985e10ec2",
"f472aed90604ebe727537d9d4a37147985e10ec2",
"f472aed90604ebe727537d9d4a37147985e10ec2",
"f472aed90604ebe727537d9d4a37147985e10ec2",
"f472aed90604ebe727537d9d4a37147985e10ec2"
] | [
"contrib/python/numpy/numpy/lib/tests/test_type_check.py",
"contrib/libs/onnx/onnx/mapping.py",
"contrib/python/numpy/numpy/lib/shape_base.py",
"contrib/python/numpy/numpy/matrixlib/tests/test_regression.py",
"contrib/python/numpy/numpy/polynomial/hermite_e.py",
"contrib/python/numpy/numpy/core/tests/test_longdouble.py",
"contrib/python/numpy/numpy/distutils/command/build_clib.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy.compat import long\nfrom numpy.testing import (\n TestCase, assert_, assert_equal, assert_array_equal, run_module_suite\n )\nfrom numpy.lib.type_check import (\n common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,\n nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close\n )\n\n\ndef assert_all(x):\n assert_(np.all(x), x)\n\n\nclass TestCommonType(TestCase):\n def test_basic(self):\n ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)\n af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)\n af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)\n af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)\n acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)\n acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)\n assert_(common_type(ai32) == np.float64)\n assert_(common_type(af16) == np.float16)\n assert_(common_type(af32) == np.float32)\n assert_(common_type(af64) == np.float64)\n assert_(common_type(acs) == np.csingle)\n assert_(common_type(acd) == np.cdouble)\n\n\nclass TestMintypecode(TestCase):\n\n def test_default_1(self):\n for itype in '1bcsuwil':\n assert_equal(mintypecode(itype), 'd')\n assert_equal(mintypecode('f'), 'f')\n assert_equal(mintypecode('d'), 'd')\n assert_equal(mintypecode('F'), 'F')\n assert_equal(mintypecode('D'), 'D')\n\n def test_default_2(self):\n for itype in '1bcsuwil':\n assert_equal(mintypecode(itype+'f'), 'f')\n assert_equal(mintypecode(itype+'d'), 'd')\n assert_equal(mintypecode(itype+'F'), 'F')\n assert_equal(mintypecode(itype+'D'), 'D')\n assert_equal(mintypecode('ff'), 'f')\n assert_equal(mintypecode('fd'), 'd')\n assert_equal(mintypecode('fF'), 'F')\n assert_equal(mintypecode('fD'), 'D')\n assert_equal(mintypecode('df'), 'd')\n assert_equal(mintypecode('dd'), 'd')\n #assert_equal(mintypecode('dF',savespace=1),'F')\n assert_equal(mintypecode('dF'), 'D')\n assert_equal(mintypecode('dD'), 'D')\n assert_equal(mintypecode('Ff'), 'F')\n #assert_equal(mintypecode('Fd',savespace=1),'F')\n assert_equal(mintypecode('Fd'), 'D')\n assert_equal(mintypecode('FF'), 'F')\n assert_equal(mintypecode('FD'), 'D')\n assert_equal(mintypecode('Df'), 'D')\n assert_equal(mintypecode('Dd'), 'D')\n assert_equal(mintypecode('DF'), 'D')\n assert_equal(mintypecode('DD'), 'D')\n\n def test_default_3(self):\n assert_equal(mintypecode('fdF'), 'D')\n #assert_equal(mintypecode('fdF',savespace=1),'F')\n assert_equal(mintypecode('fdD'), 'D')\n assert_equal(mintypecode('fFD'), 'D')\n assert_equal(mintypecode('dFD'), 'D')\n\n assert_equal(mintypecode('ifd'), 'd')\n assert_equal(mintypecode('ifF'), 'F')\n assert_equal(mintypecode('ifD'), 'D')\n assert_equal(mintypecode('idF'), 'D')\n #assert_equal(mintypecode('idF',savespace=1),'F')\n assert_equal(mintypecode('idD'), 'D')\n\n\nclass TestIsscalar(TestCase):\n\n def test_basic(self):\n assert_(np.isscalar(3))\n assert_(not np.isscalar([3]))\n assert_(not np.isscalar((3,)))\n assert_(np.isscalar(3j))\n assert_(np.isscalar(long(10)))\n assert_(np.isscalar(4.0))\n\n\nclass TestReal(TestCase):\n\n def test_real(self):\n y = np.random.rand(10,)\n assert_array_equal(y, np.real(y))\n\n def test_cmplx(self):\n y = np.random.rand(10,)+1j*np.random.rand(10,)\n assert_array_equal(y.real, np.real(y))\n\n\nclass TestImag(TestCase):\n\n def test_real(self):\n y = np.random.rand(10,)\n assert_array_equal(0, np.imag(y))\n\n def test_cmplx(self):\n y = np.random.rand(10,)+1j*np.random.rand(10,)\n assert_array_equal(y.imag, np.imag(y))\n\n\nclass TestIscomplex(TestCase):\n\n def test_fail(self):\n z = np.array([-1, 0, 1])\n res = iscomplex(z)\n assert_(not np.sometrue(res, axis=0))\n\n def test_pass(self):\n z = np.array([-1j, 1, 0])\n res = iscomplex(z)\n assert_array_equal(res, [1, 0, 0])\n\n\nclass TestIsreal(TestCase):\n\n def test_pass(self):\n z = np.array([-1, 0, 1j])\n res = isreal(z)\n assert_array_equal(res, [1, 1, 0])\n\n def test_fail(self):\n z = np.array([-1j, 1, 0])\n res = isreal(z)\n assert_array_equal(res, [0, 1, 1])\n\n\nclass TestIscomplexobj(TestCase):\n\n def test_basic(self):\n z = np.array([-1, 0, 1])\n assert_(not iscomplexobj(z))\n z = np.array([-1j, 0, -1])\n assert_(iscomplexobj(z))\n\n\nclass TestIsrealobj(TestCase):\n def test_basic(self):\n z = np.array([-1, 0, 1])\n assert_(isrealobj(z))\n z = np.array([-1j, 0, -1])\n assert_(not isrealobj(z))\n\n\nclass TestIsnan(TestCase):\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isnan(z) == 0\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore'):\n assert_all(np.isnan(np.array((1.,))/0.) == 0)\n\n def test_neginf(self):\n with np.errstate(divide='ignore'):\n assert_all(np.isnan(np.array((-1.,))/0.) == 0)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isnan(np.array((0.,))/0.) == 1)\n\n def test_integer(self):\n assert_all(np.isnan(1) == 0)\n\n def test_complex(self):\n assert_all(np.isnan(1+1j) == 0)\n\n def test_complex1(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isnan(np.array(0+0j)/0.) == 1)\n\n\nclass TestIsfinite(TestCase):\n # Fixme, wrong place, isfinite now ufunc\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isfinite(z) == 1\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((1.,))/0.) == 0)\n\n def test_neginf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((-1.,))/0.) == 0)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((0.,))/0.) == 0)\n\n def test_integer(self):\n assert_all(np.isfinite(1) == 1)\n\n def test_complex(self):\n assert_all(np.isfinite(1+1j) == 1)\n\n def test_complex1(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array(1+1j)/0.) == 0)\n\n\nclass TestIsinf(TestCase):\n # Fixme, wrong place, isinf now ufunc\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isinf(z) == 0\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((1.,))/0.) == 1)\n\n def test_posinf_scalar(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array(1.,)/0.) == 1)\n\n def test_neginf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((-1.,))/0.) == 1)\n\n def test_neginf_scalar(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array(-1.)/0.) == 1)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((0.,))/0.) == 0)\n\n\nclass TestIsposinf(TestCase):\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = isposinf(np.array((-1., 0, 1))/0.)\n assert_(vals[0] == 0)\n assert_(vals[1] == 0)\n assert_(vals[2] == 1)\n\n\nclass TestIsneginf(TestCase):\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = isneginf(np.array((-1., 0, 1))/0.)\n assert_(vals[0] == 1)\n assert_(vals[1] == 0)\n assert_(vals[2] == 0)\n\n\nclass TestNanToNum(TestCase):\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = nan_to_num(np.array((-1., 0, 1))/0.)\n assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))\n assert_(vals[1] == 0)\n assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))\n\n def test_integer(self):\n vals = nan_to_num(1)\n assert_all(vals == 1)\n vals = nan_to_num([1])\n assert_array_equal(vals, np.array([1], np.int))\n\n def test_complex_good(self):\n vals = nan_to_num(1+1j)\n assert_all(vals == 1+1j)\n\n def test_complex_bad(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n v = 1 + 1j\n v += np.array(0+1.j)/0.\n vals = nan_to_num(v)\n # !! This is actually (unexpectedly) zero\n assert_all(np.isfinite(vals))\n\n def test_complex_bad2(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n v = 1 + 1j\n v += np.array(-1+1.j)/0.\n vals = nan_to_num(v)\n assert_all(np.isfinite(vals))\n # Fixme\n #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))\n # !! This is actually (unexpectedly) positive\n # !! inf. Comment out for now, and see if it\n # !! changes\n #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))\n\n\nclass TestRealIfClose(TestCase):\n\n def test_basic(self):\n a = np.random.rand(10)\n b = real_if_close(a+1e-15j)\n assert_all(isrealobj(b))\n assert_array_equal(a, b)\n b = real_if_close(a+1e-7j)\n assert_all(iscomplexobj(b))\n b = real_if_close(a+1e-7j, tol=1e-6)\n assert_all(isrealobj(b))\n\n\nclass TestArrayConversion(TestCase):\n\n def test_asfarray(self):\n a = asfarray(np.array([1, 2, 3]))\n assert_equal(a.__class__, np.ndarray)\n assert_(np.issubdtype(a.dtype, np.float))\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom onnx import TensorProto\nimport numpy as np # type: ignore\n\nTENSOR_TYPE_TO_NP_TYPE = {\n int(TensorProto.FLOAT): np.dtype('float32'),\n int(TensorProto.UINT8): np.dtype('uint8'),\n int(TensorProto.INT8): np.dtype('int8'),\n int(TensorProto.UINT16): np.dtype('uint16'),\n int(TensorProto.INT16): np.dtype('int16'),\n int(TensorProto.INT32): np.dtype('int32'),\n int(TensorProto.INT64): np.dtype('int64'),\n int(TensorProto.BOOL): np.dtype('bool'),\n int(TensorProto.FLOAT16): np.dtype('float16'),\n int(TensorProto.DOUBLE): np.dtype('float64'),\n int(TensorProto.COMPLEX64): np.dtype('complex64'),\n int(TensorProto.COMPLEX128): np.dtype('complex128'),\n int(TensorProto.UINT32): np.dtype('uint32'),\n int(TensorProto.UINT64): np.dtype('uint64'),\n int(TensorProto.STRING): np.dtype('str'),\n}\n\nNP_TYPE_TO_TENSOR_TYPE = {v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items()}\n\nTENSOR_TYPE_TO_STORAGE_TENSOR_TYPE = {\n int(TensorProto.FLOAT): int(TensorProto.FLOAT),\n int(TensorProto.UINT8): int(TensorProto.INT32),\n int(TensorProto.INT8): int(TensorProto.INT32),\n int(TensorProto.UINT16): int(TensorProto.INT32),\n int(TensorProto.INT16): int(TensorProto.INT32),\n int(TensorProto.INT32): int(TensorProto.INT32),\n int(TensorProto.INT64): int(TensorProto.INT64),\n int(TensorProto.BOOL): int(TensorProto.INT32),\n int(TensorProto.FLOAT16): int(TensorProto.UINT16),\n int(TensorProto.BFLOAT16): int(TensorProto.UINT16),\n int(TensorProto.DOUBLE): int(TensorProto.DOUBLE),\n int(TensorProto.COMPLEX64): int(TensorProto.FLOAT),\n int(TensorProto.COMPLEX128): int(TensorProto.DOUBLE),\n int(TensorProto.UINT32): int(TensorProto.UINT32),\n int(TensorProto.UINT64): int(TensorProto.UINT64),\n int(TensorProto.STRING): int(TensorProto.STRING),\n}\n\nSTORAGE_TENSOR_TYPE_TO_FIELD = {\n int(TensorProto.FLOAT): 'float_data',\n int(TensorProto.INT32): 'int32_data',\n int(TensorProto.INT64): 'int64_data',\n int(TensorProto.UINT16): 'int32_data',\n int(TensorProto.DOUBLE): 'double_data',\n int(TensorProto.COMPLEX64): 'float_data',\n int(TensorProto.COMPLEX128): 'double_data',\n int(TensorProto.UINT32): 'uint64_data',\n int(TensorProto.UINT64): 'uint64_data',\n int(TensorProto.STRING): 'string_data',\n int(TensorProto.BOOL): 'int32_data',\n}\n",
"from __future__ import division, absolute_import, print_function\n\nimport warnings\n\nimport numpy.core.numeric as _nx\nfrom numpy.core.numeric import (\n asarray, zeros, outer, concatenate, isscalar, array, asanyarray\n )\nfrom numpy.core.fromnumeric import product, reshape\nfrom numpy.core import vstack, atleast_3d\n\n\n__all__ = [\n 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',\n 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'\n ]\n\n\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"\n Apply a function to 1-D slices along the given axis.\n\n Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`\n is a 1-D slice of `arr` along `axis`.\n\n Parameters\n ----------\n func1d : function\n This function should accept 1-D arrays. It is applied to 1-D\n slices of `arr` along the specified axis.\n axis : integer\n Axis along which `arr` is sliced.\n arr : ndarray\n Input array.\n args : any\n Additional arguments to `func1d`.\n kwargs: any\n Additional named arguments to `func1d`.\n\n .. versionadded:: 1.9.0\n\n\n Returns\n -------\n apply_along_axis : ndarray\n The output array. The shape of `outarr` is identical to the shape of\n `arr`, except along the `axis` dimension, where the length of `outarr`\n is equal to the size of the return value of `func1d`. If `func1d`\n returns a scalar `outarr` will have one fewer dimensions than `arr`.\n\n See Also\n --------\n apply_over_axes : Apply a function repeatedly over multiple axes.\n\n Examples\n --------\n >>> def my_func(a):\n ... \\\"\\\"\\\"Average first and last element of a 1-D array\\\"\\\"\\\"\n ... return (a[0] + a[-1]) * 0.5\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(my_func, 0, b)\n array([ 4., 5., 6.])\n >>> np.apply_along_axis(my_func, 1, b)\n array([ 2., 5., 8.])\n\n For a function that doesn't return a scalar, the number of dimensions in\n `outarr` is the same as `arr`.\n\n >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])\n >>> np.apply_along_axis(sorted, 1, b)\n array([[1, 7, 8],\n [3, 4, 9],\n [2, 5, 6]])\n\n \"\"\"\n arr = asarray(arr)\n nd = arr.ndim\n if axis < 0:\n axis += nd\n if (axis >= nd):\n raise ValueError(\"axis must be less than arr.ndim; axis=%d, rank=%d.\"\n % (axis, nd))\n ind = [0]*(nd-1)\n i = zeros(nd, 'O')\n indlist = list(range(nd))\n indlist.remove(axis)\n i[axis] = slice(None, None)\n outshape = asarray(arr.shape).take(indlist)\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())], *args, **kwargs)\n # if res is a number, then we have a smaller output array\n if isscalar(res):\n outarr = zeros(outshape, asarray(res).dtype)\n outarr[tuple(ind)] = res\n Ntot = product(outshape)\n k = 1\n while k < Ntot:\n # increment the index\n ind[-1] += 1\n n = -1\n while (ind[n] >= outshape[n]) and (n > (1-nd)):\n ind[n-1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())], *args, **kwargs)\n outarr[tuple(ind)] = res\n k += 1\n return outarr\n else:\n Ntot = product(outshape)\n holdshape = outshape\n outshape = list(arr.shape)\n outshape[axis] = len(res)\n outarr = zeros(outshape, asarray(res).dtype)\n outarr[tuple(i.tolist())] = res\n k = 1\n while k < Ntot:\n # increment the index\n ind[-1] += 1\n n = -1\n while (ind[n] >= holdshape[n]) and (n > (1-nd)):\n ind[n-1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())], *args, **kwargs)\n outarr[tuple(i.tolist())] = res\n k += 1\n return outarr\n\n\ndef apply_over_axes(func, a, axes):\n \"\"\"\n Apply a function repeatedly over multiple axes.\n\n `func` is called as `res = func(a, axis)`, where `axis` is the first\n element of `axes`. The result `res` of the function call must have\n either the same dimensions as `a` or one less dimension. If `res`\n has one less dimension than `a`, a dimension is inserted before\n `axis`. The call to `func` is then repeated for each axis in `axes`,\n with `res` as the first argument.\n\n Parameters\n ----------\n func : function\n This function must take two arguments, `func(a, axis)`.\n a : array_like\n Input array.\n axes : array_like\n Axes over which `func` is applied; the elements must be integers.\n\n Returns\n -------\n apply_over_axis : ndarray\n The output array. The number of dimensions is the same as `a`,\n but the shape can be different. This depends on whether `func`\n changes the shape of its output with respect to its input.\n\n See Also\n --------\n apply_along_axis :\n Apply a function to 1-D slices of an array along the given axis.\n\n Notes\n ------\n This function is equivalent to tuple axis arguments to reorderable ufuncs\n with keepdims=True. Tuple axis arguments to ufuncs have been availabe since\n version 1.7.0.\n\n Examples\n --------\n >>> a = np.arange(24).reshape(2,3,4)\n >>> a\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\n Sum over axes 0 and 2. The result has same number of dimensions\n as the original array:\n\n >>> np.apply_over_axes(np.sum, a, [0,2])\n array([[[ 60],\n [ 92],\n [124]]])\n\n Tuple axis arguments to ufuncs are equivalent:\n\n >>> np.sum(a, axis=(0,2), keepdims=True)\n array([[[ 60],\n [ 92],\n [124]]])\n\n \"\"\"\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n return val\n\ndef expand_dims(a, axis):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis, corresponding to a given position in the array shape.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int\n Position (amongst axes) where new axis is to be inserted.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n\n See Also\n --------\n doc.indexing, atleast_1d, atleast_2d, atleast_3d\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1, 2]])\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]\n >>> y\n array([[1],\n [2]])\n >>> y.shape\n (2, 1)\n\n Note that some examples may use ``None`` instead of ``np.newaxis``. These\n are the same objects:\n\n >>> np.newaxis is None\n True\n\n \"\"\"\n a = asarray(a)\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\nrow_stack = vstack\n\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n -------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1, 2],\n [2, 3],\n [3, 4]])\n\n \"\"\"\n arrays = []\n for v in tup:\n arr = array(v, copy=False, subok=True)\n if arr.ndim < 2:\n arr = array(arr, copy=False, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)\n\ndef dstack(tup):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n Takes a sequence of arrays and stack them along the third axis\n to make a single array. Rebuilds arrays divided by `dsplit`.\n This is a simple way to stack 2D arrays (images) into a single\n 3D array for processing.\n\n Parameters\n ----------\n tup : sequence of arrays\n Arrays to stack. All of them must have the same shape along all\n but the third axis.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack : Join a sequence of arrays along a new axis.\n vstack : Stack along first axis.\n hstack : Stack along second axis.\n concatenate : Join a sequence of arrays along an existing axis.\n dsplit : Split array along third axis.\n\n Notes\n -----\n Equivalent to ``np.concatenate(tup, axis=2)``.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n\n \"\"\"\n return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n\ndef _replace_zero_by_x_arrays(sub_arys):\n for i in range(len(sub_arys)):\n if len(_nx.shape(sub_arys[i])) == 0:\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n return sub_arys\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Please refer to the ``split`` documentation. The only difference\n between these functions is that ``array_split`` allows\n `indices_or_sections` to be an integer that does *not* equally\n divide the axis.\n\n See Also\n --------\n split : Split array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]\n\n \"\"\"\n try:\n Ntotal = ary.shape[axis]\n except AttributeError:\n Ntotal = len(ary)\n try:\n # handle scalar case.\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [Ntotal]\n except TypeError:\n # indices_or_sections is a scalar, not an array.\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.')\n Neach_section, extras = divmod(Ntotal, Nsections)\n section_sizes = ([0] +\n extras * [Neach_section+1] +\n (Nsections-extras) * [Neach_section])\n div_points = _nx.array(section_sizes).cumsum()\n\n sub_arys = []\n sary = _nx.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]\n end = div_points[i + 1]\n sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))\n\n return sub_arys\n\n\ndef split(ary,indices_or_sections,axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size. Does not raise an exception if\n an equal division cannot be made.\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]\n\n >>> x = np.arange(8.0)\n >>> np.split(x, [3, 5, 6, 10])\n [array([ 0., 1., 2.]),\n array([ 3., 4.]),\n array([ 5.]),\n array([ 6., 7.]),\n array([], dtype=float64)]\n\n \"\"\"\n try:\n len(indices_or_sections)\n except TypeError:\n sections = indices_or_sections\n N = ary.shape[axis]\n if N % sections:\n raise ValueError(\n 'array split does not result in an equal division')\n res = array_split(ary, indices_or_sections, axis)\n return res\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [ 12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [ 10., 11.],\n [ 14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [ 12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [ 11.],\n [ 15.]]),\n array([], dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n\n \"\"\"\n if len(_nx.shape(ary)) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if len(ary.shape) > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the ``split`` documentation. ``vsplit`` is equivalent\n to ``split`` with `axis=0` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]]),\n array([[ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])]\n >>> np.vsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]]),\n array([[ 12., 13., 14., 15.]]),\n array([], dtype=float64)]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[ 0., 1.],\n [ 2., 3.]]]),\n array([[[ 4., 5.],\n [ 6., 7.]]])]\n\n \"\"\"\n if len(_nx.shape(ary)) < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [ 12., 13.]]]),\n array([[[ 2., 3.],\n [ 6., 7.]],\n [[ 10., 11.],\n [ 14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [ 12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[ 11.],\n [ 15.]]]),\n array([], dtype=float64)]\n\n \"\"\"\n if len(_nx.shape(ary)) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n\ndef get_array_prepare(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_prepare__) for i, x in enumerate(args)\n if hasattr(x, '__array_prepare__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef get_array_wrap(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_wrap__) for i, x in enumerate(args)\n if hasattr(x, '__array_wrap__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef kron(a, b):\n \"\"\"\n Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : array_like\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n Notes\n -----\n The function assumes that the number of dimensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])\n\n >>> np.kron(np.eye(2), np.ones((2,2)))\n array([[ 1., 1., 0., 0.],\n [ 1., 1., 0., 0.],\n [ 0., 0., 1., 1.],\n [ 0., 0., 1., 1.]])\n\n >>> a = np.arange(100).reshape((2,5,2,5))\n >>> b = np.arange(24).reshape((2,3,4))\n >>> c = np.kron(a,b)\n >>> c.shape\n (2, 10, 6, 20)\n >>> I = (1,3,0,2)\n >>> J = (0,2,1)\n >>> J1 = (0,) + J # extend to ndim=4\n >>> S1 = (1,) + b.shape\n >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))\n >>> c[K] == a[I]*b[J]\n True\n\n \"\"\"\n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n nd = ndb\n if (ndb != nda):\n if (ndb > nda):\n as_ = (1,)*(ndb-nda) + as_\n else:\n bs = (1,)*(nda-ndb) + bs\n nd = nda\n result = outer(a, b).reshape(as_+bs)\n axis = nd-1\n for _ in range(nd):\n result = concatenate(result, axis=axis)\n wrapper = get_array_prepare(a, b)\n if wrapper is not None:\n result = wrapper(result)\n wrapper = get_array_wrap(a, b)\n if wrapper is not None:\n result = wrapper(result)\n return result\n\n\ndef tile(A, reps):\n \"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Note : Although tile may be used for broadcasting, it is strongly\n recommended to use numpy's broadcasting operations and functions.\n\n Parameters\n ----------\n A : array_like\n The input array.\n reps : array_like\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n broadcast_to : Broadcast an array to a new shape\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0, 1, 2, 0, 1, 2])\n >>> np.tile(a, (2, 2))\n array([[0, 1, 2, 0, 1, 2],\n [0, 1, 2, 0, 1, 2]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0, 1, 2, 0, 1, 2]],\n [[0, 1, 2, 0, 1, 2]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n >>> np.tile(b, (2, 1))\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4]])\n \"\"\"\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n d = len(tup)\n if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):\n # Fixes the problem that the function does not make a copy if A is a\n # numpy array and the repetitions are 1 in all dimensions\n return _nx.array(A, copy=True, subok=True, ndmin=d)\n else:\n # Note that no copy of zero-sized arrays is made. However since they\n # have no data there is no risk of an inadvertent overwrite.\n c = _nx.array(A, copy=False, subok=True, ndmin=d)\n if (d < c.ndim):\n tup = (1,)*(c.ndim-d) + tup\n shape_out = tuple(s*t for s, t in zip(c.shape, tup))\n n = c.size\n if n > 0:\n for dim_in, nrep in zip(c.shape, tup):\n if nrep != 1:\n c = c.reshape(-1, n).repeat(nrep, 0)\n n //= dim_in\n return c.reshape(shape_out)\n",
"from __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy.testing import TestCase, run_module_suite, assert_, assert_equal\n\nrlevel = 1\n\nclass TestRegression(TestCase):\n def test_kron_matrix(self, level=rlevel):\n # Ticket #71\n x = np.matrix('[1 0; 1 0]')\n assert_equal(type(np.kron(x, x)), type(x))\n\n def test_matrix_properties(self,level=rlevel):\n # Ticket #125\n a = np.matrix([1.0], dtype=float)\n assert_(type(a.real) is np.matrix)\n assert_(type(a.imag) is np.matrix)\n c, d = np.matrix([0.0]).nonzero()\n assert_(type(c) is np.ndarray)\n assert_(type(d) is np.ndarray)\n\n def test_matrix_multiply_by_1d_vector(self, level=rlevel):\n # Ticket #473\n def mul():\n np.mat(np.eye(2))*np.ones(2)\n\n self.assertRaises(ValueError, mul)\n\n def test_matrix_std_argmax(self,level=rlevel):\n # Ticket #83\n x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))\n self.assertEqual(x.std().shape, ())\n self.assertEqual(x.argmax().shape, ())\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\"\nObjects for dealing with Hermite_e series.\n\nThis module provides a number of objects (mostly functions) useful for\ndealing with Hermite_e series, including a `HermiteE` class that\nencapsulates the usual arithmetic operations. (General information\non how this module represents and works with such polynomials is in the\ndocstring for its \"parent\" sub-package, `numpy.polynomial`).\n\nConstants\n---------\n- `hermedomain` -- Hermite_e series default domain, [-1,1].\n- `hermezero` -- Hermite_e series that evaluates identically to 0.\n- `hermeone` -- Hermite_e series that evaluates identically to 1.\n- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.\n\nArithmetic\n----------\n- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.\n- `hermeadd` -- add two Hermite_e series.\n- `hermesub` -- subtract one Hermite_e series from another.\n- `hermemul` -- multiply two Hermite_e series.\n- `hermediv` -- divide one Hermite_e series by another.\n- `hermeval` -- evaluate a Hermite_e series at given points.\n- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.\n- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.\n- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.\n- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.\n\nCalculus\n--------\n- `hermeder` -- differentiate a Hermite_e series.\n- `hermeint` -- integrate a Hermite_e series.\n\nMisc Functions\n--------------\n- `hermefromroots` -- create a Hermite_e series with specified roots.\n- `hermeroots` -- find the roots of a Hermite_e series.\n- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.\n- `hermevander2d` -- Vandermonde-like matrix for 2D power series.\n- `hermevander3d` -- Vandermonde-like matrix for 3D power series.\n- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.\n- `hermeweight` -- Hermite_e weight function.\n- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.\n- `hermefit` -- least-squares fit returning a Hermite_e series.\n- `hermetrim` -- trim leading coefficients from a Hermite_e series.\n- `hermeline` -- Hermite_e series of given straight line.\n- `herme2poly` -- convert a Hermite_e series to a polynomial.\n- `poly2herme` -- convert a polynomial to a Hermite_e series.\n\nClasses\n-------\n- `HermiteE` -- A Hermite_e series class.\n\nSee also\n--------\n`numpy.polynomial`\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport warnings\nimport numpy as np\nimport numpy.linalg as la\n\nfrom . import polyutils as pu\nfrom ._polybase import ABCPolyBase\n\n__all__ = [\n 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',\n 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',\n 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',\n 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',\n 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',\n 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',\n 'hermegauss', 'hermeweight']\n\nhermetrim = pu.trimcoef\n\n\ndef poly2herme(pol):\n \"\"\"\n poly2herme(pol)\n\n Convert a polynomial to a Hermite series.\n\n Convert an array representing the coefficients of a polynomial (relative\n to the \"standard\" basis) ordered from lowest degree to highest, to an\n array of the coefficients of the equivalent Hermite series, ordered\n from lowest to highest degree.\n\n Parameters\n ----------\n pol : array_like\n 1-D array containing the polynomial coefficients\n\n Returns\n -------\n c : ndarray\n 1-D array containing the coefficients of the equivalent Hermite\n series.\n\n See Also\n --------\n herme2poly\n\n Notes\n -----\n The easy way to do conversions between polynomial basis sets\n is to use the convert method of a class instance.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import poly2herme\n >>> poly2herme(np.arange(4))\n array([ 2., 10., 2., 3.])\n\n \"\"\"\n [pol] = pu.as_series([pol])\n deg = len(pol) - 1\n res = 0\n for i in range(deg, -1, -1):\n res = hermeadd(hermemulx(res), pol[i])\n return res\n\n\ndef herme2poly(c):\n \"\"\"\n Convert a Hermite series to a polynomial.\n\n Convert an array representing the coefficients of a Hermite series,\n ordered from lowest degree to highest, to an array of the coefficients\n of the equivalent polynomial (relative to the \"standard\" basis) ordered\n from lowest to highest degree.\n\n Parameters\n ----------\n c : array_like\n 1-D array containing the Hermite series coefficients, ordered\n from lowest order term to highest.\n\n Returns\n -------\n pol : ndarray\n 1-D array containing the coefficients of the equivalent polynomial\n (relative to the \"standard\" basis) ordered from lowest order term\n to highest.\n\n See Also\n --------\n poly2herme\n\n Notes\n -----\n The easy way to do conversions between polynomial basis sets\n is to use the convert method of a class instance.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import herme2poly\n >>> herme2poly([ 2., 10., 2., 3.])\n array([ 0., 1., 2., 3.])\n\n \"\"\"\n from .polynomial import polyadd, polysub, polymulx\n\n [c] = pu.as_series([c])\n n = len(c)\n if n == 1:\n return c\n if n == 2:\n return c\n else:\n c0 = c[-2]\n c1 = c[-1]\n # i is the current degree of c1\n for i in range(n - 1, 1, -1):\n tmp = c0\n c0 = polysub(c[i - 2], c1*(i - 1))\n c1 = polyadd(tmp, polymulx(c1))\n return polyadd(c0, polymulx(c1))\n\n#\n# These are constant arrays are of integer type so as to be compatible\n# with the widest range of other types, such as Decimal.\n#\n\n# Hermite\nhermedomain = np.array([-1, 1])\n\n# Hermite coefficients representing zero.\nhermezero = np.array([0])\n\n# Hermite coefficients representing one.\nhermeone = np.array([1])\n\n# Hermite coefficients representing the identity x.\nhermex = np.array([0, 1])\n\n\ndef hermeline(off, scl):\n \"\"\"\n Hermite series whose graph is a straight line.\n\n\n\n Parameters\n ----------\n off, scl : scalars\n The specified line is given by ``off + scl*x``.\n\n Returns\n -------\n y : ndarray\n This module's representation of the Hermite series for\n ``off + scl*x``.\n\n See Also\n --------\n polyline, chebline\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeline\n >>> from numpy.polynomial.hermite_e import hermeline, hermeval\n >>> hermeval(0,hermeline(3, 2))\n 3.0\n >>> hermeval(1,hermeline(3, 2))\n 5.0\n\n \"\"\"\n if scl != 0:\n return np.array([off, scl])\n else:\n return np.array([off])\n\n\ndef hermefromroots(roots):\n \"\"\"\n Generate a HermiteE series with given roots.\n\n The function returns the coefficients of the polynomial\n\n .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),\n\n in HermiteE form, where the `r_n` are the roots specified in `roots`.\n If a zero has multiplicity n, then it must appear in `roots` n times.\n For instance, if 2 is a root of multiplicity three and 3 is a root of\n multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The\n roots can appear in any order.\n\n If the returned coefficients are `c`, then\n\n .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)\n\n The coefficient of the last term is not generally 1 for monic\n polynomials in HermiteE form.\n\n Parameters\n ----------\n roots : array_like\n Sequence containing the roots.\n\n Returns\n -------\n out : ndarray\n 1-D array of coefficients. If all roots are real then `out` is a\n real array, if some of the roots are complex, then `out` is complex\n even if all the coefficients in the result are real (see Examples\n below).\n\n See Also\n --------\n polyfromroots, legfromroots, lagfromroots, hermfromroots,\n chebfromroots.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval\n >>> coef = hermefromroots((-1, 0, 1))\n >>> hermeval((-1, 0, 1), coef)\n array([ 0., 0., 0.])\n >>> coef = hermefromroots((-1j, 1j))\n >>> hermeval((-1j, 1j), coef)\n array([ 0.+0.j, 0.+0.j])\n\n \"\"\"\n if len(roots) == 0:\n return np.ones(1)\n else:\n [roots] = pu.as_series([roots], trim=False)\n roots.sort()\n p = [hermeline(-r, 1) for r in roots]\n n = len(p)\n while n > 1:\n m, r = divmod(n, 2)\n tmp = [hermemul(p[i], p[i+m]) for i in range(m)]\n if r:\n tmp[0] = hermemul(tmp[0], p[-1])\n p = tmp\n n = m\n return p[0]\n\n\ndef hermeadd(c1, c2):\n \"\"\"\n Add one Hermite series to another.\n\n Returns the sum of two Hermite series `c1` + `c2`. The arguments\n are sequences of coefficients ordered from lowest order term to\n highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Array representing the Hermite series of their sum.\n\n See Also\n --------\n hermesub, hermemul, hermediv, hermepow\n\n Notes\n -----\n Unlike multiplication, division, etc., the sum of two Hermite series\n is a Hermite series (without having to \"reproject\" the result onto\n the basis set) so addition, just like that of \"standard\" polynomials,\n is simply \"component-wise.\"\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeadd\n >>> hermeadd([1, 2, 3], [1, 2, 3, 4])\n array([ 2., 4., 6., 4.])\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if len(c1) > len(c2):\n c1[:c2.size] += c2\n ret = c1\n else:\n c2[:c1.size] += c1\n ret = c2\n return pu.trimseq(ret)\n\n\ndef hermesub(c1, c2):\n \"\"\"\n Subtract one Hermite series from another.\n\n Returns the difference of two Hermite series `c1` - `c2`. The\n sequences of coefficients are from lowest order term to highest, i.e.,\n [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Of Hermite series coefficients representing their difference.\n\n See Also\n --------\n hermeadd, hermemul, hermediv, hermepow\n\n Notes\n -----\n Unlike multiplication, division, etc., the difference of two Hermite\n series is a Hermite series (without having to \"reproject\" the result\n onto the basis set) so subtraction, just like that of \"standard\"\n polynomials, is simply \"component-wise.\"\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermesub\n >>> hermesub([1, 2, 3, 4], [1, 2, 3])\n array([ 0., 0., 0., 4.])\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if len(c1) > len(c2):\n c1[:c2.size] -= c2\n ret = c1\n else:\n c2 = -c2\n c2[:c1.size] += c1\n ret = c2\n return pu.trimseq(ret)\n\n\ndef hermemulx(c):\n \"\"\"Multiply a Hermite series by x.\n\n Multiply the Hermite series `c` by x, where x is the independent\n variable.\n\n\n Parameters\n ----------\n c : array_like\n 1-D array of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Array representing the result of the multiplication.\n\n Notes\n -----\n The multiplication uses the recursion relationship for Hermite\n polynomials in the form\n\n .. math::\n\n xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermemulx\n >>> hermemulx([1, 2, 3])\n array([ 2., 7., 2., 3.])\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n # The zero series needs special treatment\n if len(c) == 1 and c[0] == 0:\n return c\n\n prd = np.empty(len(c) + 1, dtype=c.dtype)\n prd[0] = c[0]*0\n prd[1] = c[0]\n for i in range(1, len(c)):\n prd[i + 1] = c[i]\n prd[i - 1] += c[i]*i\n return prd\n\n\ndef hermemul(c1, c2):\n \"\"\"\n Multiply one Hermite series by another.\n\n Returns the product of two Hermite series `c1` * `c2`. The arguments\n are sequences of coefficients, from lowest order \"term\" to highest,\n e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Of Hermite series coefficients representing their product.\n\n See Also\n --------\n hermeadd, hermesub, hermediv, hermepow\n\n Notes\n -----\n In general, the (polynomial) product of two C-series results in terms\n that are not in the Hermite polynomial basis set. Thus, to express\n the product as a Hermite series, it is necessary to \"reproject\" the\n product onto said basis set, which may produce \"unintuitive\" (but\n correct) results; see Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermemul\n >>> hermemul([1, 2, 3], [0, 1, 2])\n array([ 14., 15., 28., 7., 6.])\n\n \"\"\"\n # s1, s2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n\n if len(c1) > len(c2):\n c = c2\n xs = c1\n else:\n c = c1\n xs = c2\n\n if len(c) == 1:\n c0 = c[0]*xs\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]*xs\n c1 = c[1]*xs\n else:\n nd = len(c)\n c0 = c[-2]*xs\n c1 = c[-1]*xs\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = hermesub(c[-i]*xs, c1*(nd - 1))\n c1 = hermeadd(tmp, hermemulx(c1))\n return hermeadd(c0, hermemulx(c1))\n\n\ndef hermediv(c1, c2):\n \"\"\"\n Divide one Hermite series by another.\n\n Returns the quotient-with-remainder of two Hermite series\n `c1` / `c2`. The arguments are sequences of coefficients from lowest\n order \"term\" to highest, e.g., [1,2,3] represents the series\n ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n [quo, rem] : ndarrays\n Of Hermite series coefficients representing the quotient and\n remainder.\n\n See Also\n --------\n hermeadd, hermesub, hermemul, hermepow\n\n Notes\n -----\n In general, the (polynomial) division of one Hermite series by another\n results in quotient and remainder terms that are not in the Hermite\n polynomial basis set. Thus, to express these results as a Hermite\n series, it is necessary to \"reproject\" the results onto the Hermite\n basis set, which may produce \"unintuitive\" (but correct) results; see\n Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermediv\n >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])\n (array([ 1., 2., 3.]), array([ 0.]))\n >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])\n (array([ 1., 2., 3.]), array([ 1., 2.]))\n\n \"\"\"\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if c2[-1] == 0:\n raise ZeroDivisionError()\n\n lc1 = len(c1)\n lc2 = len(c2)\n if lc1 < lc2:\n return c1[:1]*0, c1\n elif lc2 == 1:\n return c1/c2[-1], c1[:1]*0\n else:\n quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)\n rem = c1\n for i in range(lc1 - lc2, - 1, -1):\n p = hermemul([0]*i + [1], c2)\n q = rem[-1]/p[-1]\n rem = rem[:-1] - q*p[:-1]\n quo[i] = q\n return quo, pu.trimseq(rem)\n\n\ndef hermepow(c, pow, maxpower=16):\n \"\"\"Raise a Hermite series to a power.\n\n Returns the Hermite series `c` raised to the power `pow`. The\n argument `c` is a sequence of coefficients ordered from low to high.\n i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``\n\n Parameters\n ----------\n c : array_like\n 1-D array of Hermite series coefficients ordered from low to\n high.\n pow : integer\n Power to which the series will be raised\n maxpower : integer, optional\n Maximum power allowed. This is mainly to limit growth of the series\n to unmanageable size. Default is 16\n\n Returns\n -------\n coef : ndarray\n Hermite series of power.\n\n See Also\n --------\n hermeadd, hermesub, hermemul, hermediv\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermepow\n >>> hermepow([1, 2, 3], 2)\n array([ 23., 28., 46., 12., 9.])\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n power = int(pow)\n if power != pow or power < 0:\n raise ValueError(\"Power must be a non-negative integer.\")\n elif maxpower is not None and power > maxpower:\n raise ValueError(\"Power is too large\")\n elif power == 0:\n return np.array([1], dtype=c.dtype)\n elif power == 1:\n return c\n else:\n # This can be made more efficient by using powers of two\n # in the usual way.\n prd = c\n for i in range(2, power + 1):\n prd = hermemul(prd, c)\n return prd\n\n\ndef hermeder(c, m=1, scl=1, axis=0):\n \"\"\"\n Differentiate a Hermite_e series.\n\n Returns the series coefficients `c` differentiated `m` times along\n `axis`. At each iteration the result is multiplied by `scl` (the\n scaling factor is for use in a linear change of variable). The argument\n `c` is an array of coefficients from low to high degree along each\n axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``\n while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)\n + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1\n is ``y``.\n\n Parameters\n ----------\n c : array_like\n Array of Hermite_e series coefficients. If `c` is multidimensional\n the different axis correspond to different variables with the\n degree in each axis given by the corresponding index.\n m : int, optional\n Number of derivatives taken, must be non-negative. (Default: 1)\n scl : scalar, optional\n Each differentiation is multiplied by `scl`. The end result is\n multiplication by ``scl**m``. This is for use in a linear change of\n variable. (Default: 1)\n axis : int, optional\n Axis over which the derivative is taken. (Default: 0).\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n der : ndarray\n Hermite series of the derivative.\n\n See Also\n --------\n hermeint\n\n Notes\n -----\n In general, the result of differentiating a Hermite series does not\n resemble the same operation on a power series. Thus the result of this\n function may be \"unintuitive,\" albeit correct; see Examples section\n below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeder\n >>> hermeder([ 1., 1., 1., 1.])\n array([ 1., 2., 3.])\n >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)\n array([ 1., 2., 3.])\n\n \"\"\"\n c = np.array(c, ndmin=1, copy=1)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n cnt, iaxis = [int(t) for t in [m, axis]]\n\n if cnt != m:\n raise ValueError(\"The order of derivation must be integer\")\n if cnt < 0:\n raise ValueError(\"The order of derivation must be non-negative\")\n if iaxis != axis:\n raise ValueError(\"The axis must be integer\")\n if not -c.ndim <= iaxis < c.ndim:\n raise ValueError(\"The axis is out of range\")\n if iaxis < 0:\n iaxis += c.ndim\n\n if cnt == 0:\n return c\n\n c = np.rollaxis(c, iaxis)\n n = len(c)\n if cnt >= n:\n return c[:1]*0\n else:\n for i in range(cnt):\n n = n - 1\n c *= scl\n der = np.empty((n,) + c.shape[1:], dtype=c.dtype)\n for j in range(n, 0, -1):\n der[j - 1] = j*c[j]\n c = der\n c = np.rollaxis(c, 0, iaxis + 1)\n return c\n\n\ndef hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):\n \"\"\"\n Integrate a Hermite_e series.\n\n Returns the Hermite_e series coefficients `c` integrated `m` times from\n `lbnd` along `axis`. At each iteration the resulting series is\n **multiplied** by `scl` and an integration constant, `k`, is added.\n The scaling factor is for use in a linear change of variable. (\"Buyer\n beware\": note that, depending on what one is doing, one may want `scl`\n to be the reciprocal of what one might expect; for more information,\n see the Notes section below.) The argument `c` is an array of\n coefficients from low to high degree along each axis, e.g., [1,2,3]\n represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]\n represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +\n 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.\n\n Parameters\n ----------\n c : array_like\n Array of Hermite_e series coefficients. If c is multidimensional\n the different axis correspond to different variables with the\n degree in each axis given by the corresponding index.\n m : int, optional\n Order of integration, must be positive. (Default: 1)\n k : {[], list, scalar}, optional\n Integration constant(s). The value of the first integral at\n ``lbnd`` is the first value in the list, the value of the second\n integral at ``lbnd`` is the second value, etc. If ``k == []`` (the\n default), all constants are set to zero. If ``m == 1``, a single\n scalar can be given instead of a list.\n lbnd : scalar, optional\n The lower bound of the integral. (Default: 0)\n scl : scalar, optional\n Following each integration the result is *multiplied* by `scl`\n before the integration constant is added. (Default: 1)\n axis : int, optional\n Axis over which the integral is taken. (Default: 0).\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n S : ndarray\n Hermite_e series coefficients of the integral.\n\n Raises\n ------\n ValueError\n If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or\n ``np.isscalar(scl) == False``.\n\n See Also\n --------\n hermeder\n\n Notes\n -----\n Note that the result of each integration is *multiplied* by `scl`.\n Why is this important to note? Say one is making a linear change of\n variable :math:`u = ax + b` in an integral relative to `x`. Then\n .. math::`dx = du/a`, so one will need to set `scl` equal to\n :math:`1/a` - perhaps not what one would have first thought.\n\n Also note that, in general, the result of integrating a C-series needs\n to be \"reprojected\" onto the C-series basis set. Thus, typically,\n the result of this function is \"unintuitive,\" albeit correct; see\n Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeint\n >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.\n array([ 1., 1., 1., 1.])\n >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0\n array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])\n >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.\n array([ 2., 1., 1., 1.])\n >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1\n array([-1., 1., 1., 1.])\n >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)\n array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])\n\n \"\"\"\n c = np.array(c, ndmin=1, copy=1)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if not np.iterable(k):\n k = [k]\n cnt, iaxis = [int(t) for t in [m, axis]]\n\n if cnt != m:\n raise ValueError(\"The order of integration must be integer\")\n if cnt < 0:\n raise ValueError(\"The order of integration must be non-negative\")\n if len(k) > cnt:\n raise ValueError(\"Too many integration constants\")\n if iaxis != axis:\n raise ValueError(\"The axis must be integer\")\n if not -c.ndim <= iaxis < c.ndim:\n raise ValueError(\"The axis is out of range\")\n if iaxis < 0:\n iaxis += c.ndim\n\n if cnt == 0:\n return c\n\n c = np.rollaxis(c, iaxis)\n k = list(k) + [0]*(cnt - len(k))\n for i in range(cnt):\n n = len(c)\n c *= scl\n if n == 1 and np.all(c[0] == 0):\n c[0] += k[i]\n else:\n tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)\n tmp[0] = c[0]*0\n tmp[1] = c[0]\n for j in range(1, n):\n tmp[j + 1] = c[j]/(j + 1)\n tmp[0] += k[i] - hermeval(lbnd, tmp)\n c = tmp\n c = np.rollaxis(c, 0, iaxis + 1)\n return c\n\n\ndef hermeval(x, c, tensor=True):\n \"\"\"\n Evaluate an HermiteE series at points x.\n\n If `c` is of length `n + 1`, this function returns the value:\n\n .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)\n\n The parameter `x` is converted to an array only if it is a tuple or a\n list, otherwise it is treated as a scalar. In either case, either `x`\n or its elements must support multiplication and addition both with\n themselves and with the elements of `c`.\n\n If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If\n `c` is multidimensional, then the shape of the result depends on the\n value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +\n x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that\n scalars have shape (,).\n\n Trailing zeros in the coefficients will be used in the evaluation, so\n they should be avoided if efficiency is a concern.\n\n Parameters\n ----------\n x : array_like, compatible object\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\n it is left unchanged and treated as a scalar. In either case, `x`\n or its elements must support addition and multiplication with\n with themselves and with the elements of `c`.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree n are contained in c[n]. If `c` is multidimensional the\n remaining indices enumerate multiple polynomials. In the two\n dimensional case the coefficients may be thought of as stored in\n the columns of `c`.\n tensor : boolean, optional\n If True, the shape of the coefficient array is extended with ones\n on the right, one for each dimension of `x`. Scalars have dimension 0\n for this action. The result is that every column of coefficients in\n `c` is evaluated for every element of `x`. If False, `x` is broadcast\n over the columns of `c` for the evaluation. This keyword is useful\n when `c` is multidimensional. The default value is True.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n values : ndarray, algebra_like\n The shape of the return value is described above.\n\n See Also\n --------\n hermeval2d, hermegrid2d, hermeval3d, hermegrid3d\n\n Notes\n -----\n The evaluation uses Clenshaw recursion, aka synthetic division.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeval\n >>> coef = [1,2,3]\n >>> hermeval(1, coef)\n 3.0\n >>> hermeval([[1,2],[3,4]], coef)\n array([[ 3., 14.],\n [ 31., 54.]])\n\n \"\"\"\n c = np.array(c, ndmin=1, copy=0)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray) and tensor:\n c = c.reshape(c.shape + (1,)*x.ndim)\n\n if len(c) == 1:\n c0 = c[0]\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n nd = len(c)\n c0 = c[-2]\n c1 = c[-1]\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = c[-i] - c1*(nd - 1)\n c1 = tmp + c1*x\n return c0 + c1*x\n\n\ndef hermeval2d(x, y, c):\n \"\"\"\n Evaluate a 2-D HermiteE series at points (x, y).\n\n This function returns the values:\n\n .. math:: p(x,y) = \\\\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)\n\n The parameters `x` and `y` are converted to arrays only if they are\n tuples or a lists, otherwise they are treated as a scalars and they\n must have the same shape after conversion. In either case, either `x`\n and `y` or their elements must support multiplication and addition both\n with themselves and with the elements of `c`.\n\n If `c` is a 1-D array a one is implicitly appended to its shape to make\n it 2-D. The shape of the result will be c.shape[2:] + x.shape.\n\n Parameters\n ----------\n x, y : array_like, compatible objects\n The two dimensional series is evaluated at the points `(x, y)`,\n where `x` and `y` must have the same shape. If `x` or `y` is a list\n or tuple, it is first converted to an ndarray, otherwise it is left\n unchanged and if it isn't an ndarray it is treated as a scalar.\n c : array_like\n Array of coefficients ordered so that the coefficient of the term\n of multi-degree i,j is contained in ``c[i,j]``. If `c` has\n dimension greater than two the remaining indices enumerate multiple\n sets of coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the two dimensional polynomial at points formed with\n pairs of corresponding values from `x` and `y`.\n\n See Also\n --------\n hermeval, hermegrid2d, hermeval3d, hermegrid3d\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n try:\n x, y = np.array((x, y), copy=0)\n except:\n raise ValueError('x, y are incompatible')\n\n c = hermeval(x, c)\n c = hermeval(y, c, tensor=False)\n return c\n\n\ndef hermegrid2d(x, y, c):\n \"\"\"\n Evaluate a 2-D HermiteE series on the Cartesian product of x and y.\n\n This function returns the values:\n\n .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)\n\n where the points `(a, b)` consist of all pairs formed by taking\n `a` from `x` and `b` from `y`. The resulting points form a grid with\n `x` in the first dimension and `y` in the second.\n\n The parameters `x` and `y` are converted to arrays only if they are\n tuples or a lists, otherwise they are treated as a scalars. In either\n case, either `x` and `y` or their elements must support multiplication\n and addition both with themselves and with the elements of `c`.\n\n If `c` has fewer than two dimensions, ones are implicitly appended to\n its shape to make it 2-D. The shape of the result will be c.shape[2:] +\n x.shape.\n\n Parameters\n ----------\n x, y : array_like, compatible objects\n The two dimensional series is evaluated at the points in the\n Cartesian product of `x` and `y`. If `x` or `y` is a list or\n tuple, it is first converted to an ndarray, otherwise it is left\n unchanged and, if it isn't an ndarray, it is treated as a scalar.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\n greater than two the remaining indices enumerate multiple sets of\n coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the two dimensional polynomial at points in the Cartesian\n product of `x` and `y`.\n\n See Also\n --------\n hermeval, hermeval2d, hermeval3d, hermegrid3d\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n c = hermeval(x, c)\n c = hermeval(y, c)\n return c\n\n\ndef hermeval3d(x, y, z, c):\n \"\"\"\n Evaluate a 3-D Hermite_e series at points (x, y, z).\n\n This function returns the values:\n\n .. math:: p(x,y,z) = \\\\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)\n\n The parameters `x`, `y`, and `z` are converted to arrays only if\n they are tuples or a lists, otherwise they are treated as a scalars and\n they must have the same shape after conversion. In either case, either\n `x`, `y`, and `z` or their elements must support multiplication and\n addition both with themselves and with the elements of `c`.\n\n If `c` has fewer than 3 dimensions, ones are implicitly appended to its\n shape to make it 3-D. The shape of the result will be c.shape[3:] +\n x.shape.\n\n Parameters\n ----------\n x, y, z : array_like, compatible object\n The three dimensional series is evaluated at the points\n `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If\n any of `x`, `y`, or `z` is a list or tuple, it is first converted\n to an ndarray, otherwise it is left unchanged and if it isn't an\n ndarray it is treated as a scalar.\n c : array_like\n Array of coefficients ordered so that the coefficient of the term of\n multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension\n greater than 3 the remaining indices enumerate multiple sets of\n coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the multidimensional polynomial on points formed with\n triples of corresponding values from `x`, `y`, and `z`.\n\n See Also\n --------\n hermeval, hermeval2d, hermegrid2d, hermegrid3d\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n try:\n x, y, z = np.array((x, y, z), copy=0)\n except:\n raise ValueError('x, y, z are incompatible')\n\n c = hermeval(x, c)\n c = hermeval(y, c, tensor=False)\n c = hermeval(z, c, tensor=False)\n return c\n\n\ndef hermegrid3d(x, y, z, c):\n \"\"\"\n Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.\n\n This function returns the values:\n\n .. math:: p(a,b,c) = \\\\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)\n\n where the points `(a, b, c)` consist of all triples formed by taking\n `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form\n a grid with `x` in the first dimension, `y` in the second, and `z` in\n the third.\n\n The parameters `x`, `y`, and `z` are converted to arrays only if they\n are tuples or a lists, otherwise they are treated as a scalars. In\n either case, either `x`, `y`, and `z` or their elements must support\n multiplication and addition both with themselves and with the elements\n of `c`.\n\n If `c` has fewer than three dimensions, ones are implicitly appended to\n its shape to make it 3-D. The shape of the result will be c.shape[3:] +\n x.shape + y.shape + z.shape.\n\n Parameters\n ----------\n x, y, z : array_like, compatible objects\n The three dimensional series is evaluated at the points in the\n Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a\n list or tuple, it is first converted to an ndarray, otherwise it is\n left unchanged and, if it isn't an ndarray, it is treated as a\n scalar.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\n greater than two the remaining indices enumerate multiple sets of\n coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the two dimensional polynomial at points in the Cartesian\n product of `x` and `y`.\n\n See Also\n --------\n hermeval, hermeval2d, hermegrid2d, hermeval3d\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n c = hermeval(x, c)\n c = hermeval(y, c)\n c = hermeval(z, c)\n return c\n\n\ndef hermevander(x, deg):\n \"\"\"Pseudo-Vandermonde matrix of given degree.\n\n Returns the pseudo-Vandermonde matrix of degree `deg` and sample points\n `x`. The pseudo-Vandermonde matrix is defined by\n\n .. math:: V[..., i] = He_i(x),\n\n where `0 <= i <= deg`. The leading indices of `V` index the elements of\n `x` and the last index is the degree of the HermiteE polynomial.\n\n If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the\n array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and\n ``hermeval(x, c)`` are the same up to roundoff. This equivalence is\n useful both for least squares fitting and for the evaluation of a large\n number of HermiteE series of the same degree and sample points.\n\n Parameters\n ----------\n x : array_like\n Array of points. The dtype is converted to float64 or complex128\n depending on whether any of the elements are complex. If `x` is\n scalar it is converted to a 1-D array.\n deg : int\n Degree of the resulting matrix.\n\n Returns\n -------\n vander : ndarray\n The pseudo-Vandermonde matrix. The shape of the returned matrix is\n ``x.shape + (deg + 1,)``, where The last index is the degree of the\n corresponding HermiteE polynomial. The dtype will be the same as\n the converted `x`.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermevander\n >>> x = np.array([-1, 0, 1])\n >>> hermevander(x, 3)\n array([[ 1., -1., 0., 2.],\n [ 1., 0., -1., -0.],\n [ 1., 1., 0., -2.]])\n\n \"\"\"\n ideg = int(deg)\n if ideg != deg:\n raise ValueError(\"deg must be integer\")\n if ideg < 0:\n raise ValueError(\"deg must be non-negative\")\n\n x = np.array(x, copy=0, ndmin=1) + 0.0\n dims = (ideg + 1,) + x.shape\n dtyp = x.dtype\n v = np.empty(dims, dtype=dtyp)\n v[0] = x*0 + 1\n if ideg > 0:\n v[1] = x\n for i in range(2, ideg + 1):\n v[i] = (v[i-1]*x - v[i-2]*(i - 1))\n return np.rollaxis(v, 0, v.ndim)\n\n\ndef hermevander2d(x, y, deg):\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\n\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\n points `(x, y)`. The pseudo-Vandermonde matrix is defined by\n\n .. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),\n\n where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of\n `V` index the points `(x, y)` and the last index encodes the degrees of\n the HermiteE polynomials.\n\n If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`\n correspond to the elements of a 2-D coefficient array `c` of shape\n (xdeg + 1, ydeg + 1) in the order\n\n .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...\n\n and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same\n up to roundoff. This equivalence is useful both for least squares\n fitting and for the evaluation of a large number of 2-D HermiteE\n series of the same degrees and sample points.\n\n Parameters\n ----------\n x, y : array_like\n Arrays of point coordinates, all of the same shape. The dtypes\n will be converted to either float64 or complex128 depending on\n whether any of the elements are complex. Scalars are converted to\n 1-D arrays.\n deg : list of ints\n List of maximum degrees of the form [x_deg, y_deg].\n\n Returns\n -------\n vander2d : ndarray\n The shape of the returned matrix is ``x.shape + (order,)``, where\n :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same\n as the converted `x` and `y`.\n\n See Also\n --------\n hermevander, hermevander3d. hermeval2d, hermeval3d\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n ideg = [int(d) for d in deg]\n is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]\n if is_valid != [1, 1]:\n raise ValueError(\"degrees must be non-negative integers\")\n degx, degy = ideg\n x, y = np.array((x, y), copy=0) + 0.0\n\n vx = hermevander(x, degx)\n vy = hermevander(y, degy)\n v = vx[..., None]*vy[..., None,:]\n return v.reshape(v.shape[:-2] + (-1,))\n\n\ndef hermevander3d(x, y, z, deg):\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\n\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\n points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,\n then Hehe pseudo-Vandermonde matrix is defined by\n\n .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),\n\n where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading\n indices of `V` index the points `(x, y, z)` and the last index encodes\n the degrees of the HermiteE polynomials.\n\n If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns\n of `V` correspond to the elements of a 3-D coefficient array `c` of\n shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order\n\n .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...\n\n and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the\n same up to roundoff. This equivalence is useful both for least squares\n fitting and for the evaluation of a large number of 3-D HermiteE\n series of the same degrees and sample points.\n\n Parameters\n ----------\n x, y, z : array_like\n Arrays of point coordinates, all of the same shape. The dtypes will\n be converted to either float64 or complex128 depending on whether\n any of the elements are complex. Scalars are converted to 1-D\n arrays.\n deg : list of ints\n List of maximum degrees of the form [x_deg, y_deg, z_deg].\n\n Returns\n -------\n vander3d : ndarray\n The shape of the returned matrix is ``x.shape + (order,)``, where\n :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will\n be the same as the converted `x`, `y`, and `z`.\n\n See Also\n --------\n hermevander, hermevander3d. hermeval2d, hermeval3d\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n ideg = [int(d) for d in deg]\n is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]\n if is_valid != [1, 1, 1]:\n raise ValueError(\"degrees must be non-negative integers\")\n degx, degy, degz = ideg\n x, y, z = np.array((x, y, z), copy=0) + 0.0\n\n vx = hermevander(x, degx)\n vy = hermevander(y, degy)\n vz = hermevander(z, degz)\n v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]\n return v.reshape(v.shape[:-3] + (-1,))\n\n\ndef hermefit(x, y, deg, rcond=None, full=False, w=None):\n \"\"\"\n Least squares fit of Hermite series to data.\n\n Return the coefficients of a HermiteE series of degree `deg` that is\n the least squares fit to the data values `y` given at points `x`. If\n `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D\n multiple fits are done, one for each column of `y`, and the resulting\n coefficients are stored in the corresponding columns of a 2-D return.\n The fitted polynomial(s) are in the form\n\n .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),\n\n where `n` is `deg`.\n\n Parameters\n ----------\n x : array_like, shape (M,)\n x-coordinates of the M sample points ``(x[i], y[i])``.\n y : array_like, shape (M,) or (M, K)\n y-coordinates of the sample points. Several data sets of sample\n points sharing the same x-coordinates can be fitted at once by\n passing in a 2D-array that contains one dataset per column.\n deg : int or 1-D array_like\n Degree(s) of the fitting polynomials. If `deg` is a single integer\n all terms up to and including the `deg`'th term are included in the\n fit. For Numpy versions >= 1.11 a list of integers specifying the\n degrees of the terms to include may be used instead.\n rcond : float, optional\n Relative condition number of the fit. Singular values smaller than\n this relative to the largest singular value will be ignored. The\n default value is len(x)*eps, where eps is the relative precision of\n the float type, about 2e-16 in most cases.\n full : bool, optional\n Switch determining nature of return value. When it is False (the\n default) just the coefficients are returned, when True diagnostic\n information from the singular value decomposition is also returned.\n w : array_like, shape (`M`,), optional\n Weights. If not None, the contribution of each point\n ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the\n weights are chosen so that the errors of the products ``w[i]*y[i]``\n all have the same variance. The default value is None.\n\n Returns\n -------\n coef : ndarray, shape (M,) or (M, K)\n Hermite coefficients ordered from low to high. If `y` was 2-D,\n the coefficients for the data in column k of `y` are in column\n `k`.\n\n [residuals, rank, singular_values, rcond] : list\n These values are only returned if `full` = True\n\n resid -- sum of squared residuals of the least squares fit\n rank -- the numerical rank of the scaled Vandermonde matrix\n sv -- singular values of the scaled Vandermonde matrix\n rcond -- value of `rcond`.\n\n For more details, see `linalg.lstsq`.\n\n Warns\n -----\n RankWarning\n The rank of the coefficient matrix in the least-squares fit is\n deficient. The warning is only raised if `full` = False. The\n warnings can be turned off by\n\n >>> import warnings\n >>> warnings.simplefilter('ignore', RankWarning)\n\n See Also\n --------\n chebfit, legfit, polyfit, hermfit, polyfit\n hermeval : Evaluates a Hermite series.\n hermevander : pseudo Vandermonde matrix of Hermite series.\n hermeweight : HermiteE weight function.\n linalg.lstsq : Computes a least-squares fit from the matrix.\n scipy.interpolate.UnivariateSpline : Computes spline fits.\n\n Notes\n -----\n The solution is the coefficients of the HermiteE series `p` that\n minimizes the sum of the weighted squared errors\n\n .. math:: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\n\n where the :math:`w_j` are the weights. This problem is solved by\n setting up the (typically) overdetermined matrix equation\n\n .. math:: V(x) * c = w * y,\n\n where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`\n are the coefficients to be solved for, and the elements of `y` are the\n observed values. This equation is then solved using the singular value\n decomposition of `V`.\n\n If some of the singular values of `V` are so small that they are\n neglected, then a `RankWarning` will be issued. This means that the\n coefficient values may be poorly determined. Using a lower order fit\n will usually get rid of the warning. The `rcond` parameter can also be\n set to a value smaller than its default, but the resulting fit may be\n spurious and have large contributions from roundoff error.\n\n Fits using HermiteE series are probably most useful when the data can\n be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE\n weight. In that case the weight ``sqrt(w(x[i])`` should be used\n together with data values ``y[i]/sqrt(w(x[i])``. The weight function is\n available as `hermeweight`.\n\n References\n ----------\n .. [1] Wikipedia, \"Curve fitting\",\n http://en.wikipedia.org/wiki/Curve_fitting\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermefik, hermeval\n >>> x = np.linspace(-10, 10)\n >>> err = np.random.randn(len(x))/10\n >>> y = hermeval(x, [1, 2, 3]) + err\n >>> hermefit(x, y, 2)\n array([ 1.01690445, 1.99951418, 2.99948696])\n\n \"\"\"\n x = np.asarray(x) + 0.0\n y = np.asarray(y) + 0.0\n deg = np.asarray(deg)\n\n # check arguments.\n if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:\n raise TypeError(\"deg must be an int or non-empty 1-D array of int\")\n if deg.min() < 0:\n raise ValueError(\"expected deg >= 0\")\n if x.ndim != 1:\n raise TypeError(\"expected 1D vector for x\")\n if x.size == 0:\n raise TypeError(\"expected non-empty vector for x\")\n if y.ndim < 1 or y.ndim > 2:\n raise TypeError(\"expected 1D or 2D array for y\")\n if len(x) != len(y):\n raise TypeError(\"expected x and y to have same length\")\n\n if deg.ndim == 0:\n lmax = deg\n order = lmax + 1\n van = hermevander(x, lmax)\n else:\n deg = np.sort(deg)\n lmax = deg[-1]\n order = len(deg)\n van = hermevander(x, lmax)[:, deg]\n\n # set up the least squares matrices in transposed form\n lhs = van.T\n rhs = y.T\n if w is not None:\n w = np.asarray(w) + 0.0\n if w.ndim != 1:\n raise TypeError(\"expected 1D vector for w\")\n if len(x) != len(w):\n raise TypeError(\"expected x and w to have same length\")\n # apply weights. Don't use inplace operations as they\n # can cause problems with NA.\n lhs = lhs * w\n rhs = rhs * w\n\n # set rcond\n if rcond is None:\n rcond = len(x)*np.finfo(x.dtype).eps\n\n # Determine the norms of the design matrix columns.\n if issubclass(lhs.dtype.type, np.complexfloating):\n scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))\n else:\n scl = np.sqrt(np.square(lhs).sum(1))\n scl[scl == 0] = 1\n\n # Solve the least squares problem.\n c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)\n c = (c.T/scl).T\n\n # Expand c to include non-fitted coefficients which are set to zero\n if deg.ndim > 0:\n if c.ndim == 2:\n cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)\n else:\n cc = np.zeros(lmax+1, dtype=c.dtype)\n cc[deg] = c\n c = cc\n\n # warn on rank reduction\n if rank != order and not full:\n msg = \"The fit may be poorly conditioned\"\n warnings.warn(msg, pu.RankWarning)\n\n if full:\n return c, [resids, rank, s, rcond]\n else:\n return c\n\n\ndef hermecompanion(c):\n \"\"\"\n Return the scaled companion matrix of c.\n\n The basis polynomials are scaled so that the companion matrix is\n symmetric when `c` is an HermiteE basis polynomial. This provides\n better eigenvalue estimates than the unscaled case and for basis\n polynomials the eigenvalues are guaranteed to be real if\n `numpy.linalg.eigvalsh` is used to obtain them.\n\n Parameters\n ----------\n c : array_like\n 1-D array of HermiteE series coefficients ordered from low to high\n degree.\n\n Returns\n -------\n mat : ndarray\n Scaled companion matrix of dimensions (deg, deg).\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n if len(c) < 2:\n raise ValueError('Series must have maximum degree of at least 1.')\n if len(c) == 2:\n return np.array([[-c[0]/c[1]]])\n\n n = len(c) - 1\n mat = np.zeros((n, n), dtype=c.dtype)\n scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))\n scl = np.multiply.accumulate(scl)[::-1]\n top = mat.reshape(-1)[1::n+1]\n bot = mat.reshape(-1)[n::n+1]\n top[...] = np.sqrt(np.arange(1, n))\n bot[...] = top\n mat[:, -1] -= scl*c[:-1]/c[-1]\n return mat\n\n\ndef hermeroots(c):\n \"\"\"\n Compute the roots of a HermiteE series.\n\n Return the roots (a.k.a. \"zeros\") of the polynomial\n\n .. math:: p(x) = \\\\sum_i c[i] * He_i(x).\n\n Parameters\n ----------\n c : 1-D array_like\n 1-D array of coefficients.\n\n Returns\n -------\n out : ndarray\n Array of the roots of the series. If all the roots are real,\n then `out` is also real, otherwise it is complex.\n\n See Also\n --------\n polyroots, legroots, lagroots, hermroots, chebroots\n\n Notes\n -----\n The root estimates are obtained as the eigenvalues of the companion\n matrix, Roots far from the origin of the complex plane may have large\n errors due to the numerical instability of the series for such\n values. Roots with multiplicity greater than 1 will also show larger\n errors as the value of the series near such points is relatively\n insensitive to errors in the roots. Isolated roots near the origin can\n be improved by a few iterations of Newton's method.\n\n The HermiteE series basis polynomials aren't powers of `x` so the\n results of this function may seem unintuitive.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots\n >>> coef = hermefromroots([-1, 0, 1])\n >>> coef\n array([ 0., 2., 0., 1.])\n >>> hermeroots(coef)\n array([-1., 0., 1.])\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n if len(c) <= 1:\n return np.array([], dtype=c.dtype)\n if len(c) == 2:\n return np.array([-c[0]/c[1]])\n\n m = hermecompanion(c)\n r = la.eigvals(m)\n r.sort()\n return r\n\n\ndef _normed_hermite_e_n(x, n):\n \"\"\"\n Evaluate a normalized HermiteE polynomial.\n\n Compute the value of the normalized HermiteE polynomial of degree ``n``\n at the points ``x``.\n\n\n Parameters\n ----------\n x : ndarray of double.\n Points at which to evaluate the function\n n : int\n Degree of the normalized HermiteE function to be evaluated.\n\n Returns\n -------\n values : ndarray\n The shape of the return value is described above.\n\n Notes\n -----\n .. versionadded:: 1.10.0\n\n This function is needed for finding the Gauss points and integration\n weights for high degrees. The values of the standard HermiteE functions\n overflow when n >= 207.\n\n \"\"\"\n if n == 0:\n return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))\n\n c0 = 0.\n c1 = 1./np.sqrt(np.sqrt(2*np.pi))\n nd = float(n)\n for i in range(n - 1):\n tmp = c0\n c0 = -c1*np.sqrt((nd - 1.)/nd)\n c1 = tmp + c1*x*np.sqrt(1./nd)\n nd = nd - 1.0\n return c0 + c1*x\n\n\ndef hermegauss(deg):\n \"\"\"\n Gauss-HermiteE quadrature.\n\n Computes the sample points and weights for Gauss-HermiteE quadrature.\n These sample points and weights will correctly integrate polynomials of\n degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`\n with the weight function :math:`f(x) = \\exp(-x^2/2)`.\n\n Parameters\n ----------\n deg : int\n Number of sample points and weights. It must be >= 1.\n\n Returns\n -------\n x : ndarray\n 1-D ndarray containing the sample points.\n y : ndarray\n 1-D ndarray containing the weights.\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n The results have only been tested up to degree 100, higher degrees may\n be problematic. The weights are determined by using the fact that\n\n .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))\n\n where :math:`c` is a constant independent of :math:`k` and :math:`x_k`\n is the k'th root of :math:`He_n`, and then scaling the results to get\n the right value when integrating 1.\n\n \"\"\"\n ideg = int(deg)\n if ideg != deg or ideg < 1:\n raise ValueError(\"deg must be a non-negative integer\")\n\n # first approximation of roots. We use the fact that the companion\n # matrix is symmetric in this case in order to obtain better zeros.\n c = np.array([0]*deg + [1])\n m = hermecompanion(c)\n x = la.eigvalsh(m)\n\n # improve roots by one application of Newton\n dy = _normed_hermite_e_n(x, ideg)\n df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)\n x -= dy/df\n\n # compute the weights. We scale the factor to avoid possible numerical\n # overflow.\n fm = _normed_hermite_e_n(x, ideg - 1)\n fm /= np.abs(fm).max()\n w = 1/(fm * fm)\n\n # for Hermite_e we can also symmetrize\n w = (w + w[::-1])/2\n x = (x - x[::-1])/2\n\n # scale w to get the right value\n w *= np.sqrt(2*np.pi) / w.sum()\n\n return x, w\n\n\ndef hermeweight(x):\n \"\"\"Weight function of the Hermite_e polynomials.\n\n The weight function is :math:`\\exp(-x^2/2)` and the interval of\n integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are\n orthogonal, but not normalized, with respect to this weight function.\n\n Parameters\n ----------\n x : array_like\n Values at which the weight function will be computed.\n\n Returns\n -------\n w : ndarray\n The weight function at `x`.\n\n Notes\n -----\n\n .. versionadded::1.7.0\n\n \"\"\"\n w = np.exp(-.5*x**2)\n return w\n\n\n#\n# HermiteE series class\n#\n\nclass HermiteE(ABCPolyBase):\n \"\"\"An HermiteE series class.\n\n The HermiteE class provides the standard Python numerical methods\n '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the\n attributes and methods listed in the `ABCPolyBase` documentation.\n\n Parameters\n ----------\n coef : array_like\n HermiteE coefficients in order of increasing degree, i.e,\n ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.\n domain : (2,) array_like, optional\n Domain to use. The interval ``[domain[0], domain[1]]`` is mapped\n to the interval ``[window[0], window[1]]`` by shifting and scaling.\n The default value is [-1, 1].\n window : (2,) array_like, optional\n Window, see `domain` for its use. The default value is [-1, 1].\n\n .. versionadded:: 1.6.0\n\n \"\"\"\n # Virtual Functions\n _add = staticmethod(hermeadd)\n _sub = staticmethod(hermesub)\n _mul = staticmethod(hermemul)\n _div = staticmethod(hermediv)\n _pow = staticmethod(hermepow)\n _val = staticmethod(hermeval)\n _int = staticmethod(hermeint)\n _der = staticmethod(hermeder)\n _fit = staticmethod(hermefit)\n _line = staticmethod(hermeline)\n _roots = staticmethod(hermeroots)\n _fromroots = staticmethod(hermefromroots)\n\n # Virtual properties\n nickname = 'herme'\n domain = np.array(hermedomain)\n window = np.array(hermedomain)\n",
"from __future__ import division, absolute_import, print_function\n\nimport locale\n\nimport numpy as np\nfrom numpy.testing import (\n run_module_suite, assert_, assert_equal, dec, assert_raises,\n assert_array_equal, TestCase, temppath,\n)\nfrom numpy.compat import sixu\nfrom test_print import in_foreign_locale\n\nlongdouble_longer_than_double = (np.finfo(np.longdouble).eps\n < np.finfo(np.double).eps)\n\n\n_o = 1 + np.finfo(np.longdouble).eps\nstring_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))\ndel _o\n\n\ndef test_scalar_extraction():\n \"\"\"Confirm that extracting a value doesn't convert to python float\"\"\"\n o = 1 + np.finfo(np.longdouble).eps\n a = np.array([o, o, o])\n assert_equal(a[1], o)\n\n\n# Conversions string -> long double\n\n\ndef test_repr_roundtrip():\n o = 1 + np.finfo(np.longdouble).eps\n assert_equal(np.longdouble(repr(o)), o,\n \"repr was %s\" % repr(o))\n\n\ndef test_unicode():\n np.longdouble(sixu(\"1.2\"))\n\n\ndef test_string():\n np.longdouble(\"1.2\")\n\n\ndef test_bytes():\n np.longdouble(b\"1.2\")\n\n\n@in_foreign_locale\ndef test_fromstring_foreign():\n f = 1.234\n a = np.fromstring(repr(f), dtype=float, sep=\" \")\n assert_equal(a[0], f)\n\n\[email protected](string_to_longdouble_inaccurate, \"Need strtold_l\")\ndef test_repr_roundtrip_bytes():\n o = 1 + np.finfo(np.longdouble).eps\n assert_equal(np.longdouble(repr(o).encode(\"ascii\")), o)\n\n\n@in_foreign_locale\ndef test_repr_roundtrip_foreign():\n o = 1.5\n assert_equal(o, np.longdouble(repr(o)))\n\n\ndef test_bogus_string():\n assert_raises(ValueError, np.longdouble, \"spam\")\n assert_raises(ValueError, np.longdouble, \"1.0 flub\")\n\n\[email protected](string_to_longdouble_inaccurate, \"Need strtold_l\")\ndef test_fromstring():\n o = 1 + np.finfo(np.longdouble).eps\n s = (\" \" + repr(o))*5\n a = np.array([o]*5)\n assert_equal(np.fromstring(s, sep=\" \", dtype=np.longdouble), a,\n err_msg=\"reading '%s'\" % s)\n\n\n@in_foreign_locale\ndef test_fromstring_best_effort_float():\n assert_equal(np.fromstring(\"1,234\", dtype=float, sep=\" \"),\n np.array([1.]))\n\n\n@in_foreign_locale\ndef test_fromstring_best_effort():\n assert_equal(np.fromstring(\"1,234\", dtype=np.longdouble, sep=\" \"),\n np.array([1.]))\n\n\ndef test_fromstring_bogus():\n assert_equal(np.fromstring(\"1. 2. 3. flop 4.\", dtype=float, sep=\" \"),\n np.array([1., 2., 3.]))\n\n\ndef test_fromstring_empty():\n assert_equal(np.fromstring(\"xxxxx\", sep=\"x\"),\n np.array([]))\n\n\ndef test_fromstring_missing():\n assert_equal(np.fromstring(\"1xx3x4x5x6\", sep=\"x\"),\n np.array([1]))\n\n\nclass FileBased(TestCase):\n\n ldbl = 1 + np.finfo(np.longdouble).eps\n tgt = np.array([ldbl]*5)\n out = ''.join([repr(t) + '\\n' for t in tgt])\n\n def test_fromfile_bogus(self):\n with temppath() as path:\n with open(path, 'wt') as f:\n f.write(\"1. 2. 3. flop 4.\\n\")\n res = np.fromfile(path, dtype=float, sep=\" \")\n assert_equal(res, np.array([1., 2., 3.]))\n\n @dec.knownfailureif(string_to_longdouble_inaccurate, \"Need strtold_l\")\n def test_fromfile(self):\n with temppath() as path:\n with open(path, 'wt') as f:\n f.write(self.out)\n res = np.fromfile(path, dtype=np.longdouble, sep=\"\\n\")\n assert_equal(res, self.tgt)\n\n @dec.knownfailureif(string_to_longdouble_inaccurate, \"Need strtold_l\")\n def test_genfromtxt(self):\n with temppath() as path:\n with open(path, 'wt') as f:\n f.write(self.out)\n res = np.genfromtxt(path, dtype=np.longdouble)\n assert_equal(res, self.tgt)\n\n @dec.knownfailureif(string_to_longdouble_inaccurate, \"Need strtold_l\")\n def test_loadtxt(self):\n with temppath() as path:\n with open(path, 'wt') as f:\n f.write(self.out)\n res = np.loadtxt(path, dtype=np.longdouble)\n assert_equal(res, self.tgt)\n\n @dec.knownfailureif(string_to_longdouble_inaccurate, \"Need strtold_l\")\n def test_tofile_roundtrip(self):\n with temppath() as path:\n self.tgt.tofile(path, sep=\" \")\n res = np.fromfile(path, dtype=np.longdouble, sep=\" \")\n assert_equal(res, self.tgt)\n\n\n@in_foreign_locale\ndef test_fromstring_foreign():\n s = \"1.234\"\n a = np.fromstring(s, dtype=np.longdouble, sep=\" \")\n assert_equal(a[0], np.longdouble(s))\n\n\n@in_foreign_locale\ndef test_fromstring_foreign_sep():\n a = np.array([1, 2, 3, 4])\n b = np.fromstring(\"1,2,3,4,\", dtype=np.longdouble, sep=\",\")\n assert_array_equal(a, b)\n\n\n@in_foreign_locale\ndef test_fromstring_foreign_value():\n b = np.fromstring(\"1,234\", dtype=np.longdouble, sep=\" \")\n assert_array_equal(b[0], 1)\n\n\n# Conversions long double -> string\n\n\ndef test_repr_exact():\n o = 1 + np.finfo(np.longdouble).eps\n assert_(repr(o) != '1')\n\n\[email protected](longdouble_longer_than_double, \"BUG #2376\")\[email protected](string_to_longdouble_inaccurate, \"Need strtold_l\")\ndef test_format():\n o = 1 + np.finfo(np.longdouble).eps\n assert_(\"{0:.40g}\".format(o) != '1')\n\n\[email protected](longdouble_longer_than_double, \"BUG #2376\")\[email protected](string_to_longdouble_inaccurate, \"Need strtold_l\")\ndef test_percent():\n o = 1 + np.finfo(np.longdouble).eps\n assert_(\"%.40g\" % o != '1')\n\n\[email protected](longdouble_longer_than_double, \"array repr problem\")\[email protected](string_to_longdouble_inaccurate, \"Need strtold_l\")\ndef test_array_repr():\n o = 1 + np.finfo(np.longdouble).eps\n a = np.array([o])\n b = np.array([1], dtype=np.longdouble)\n if not np.all(a != b):\n raise ValueError(\"precision loss creating arrays\")\n assert_(repr(a) != repr(b))\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\" Modified version of build_clib that handles fortran source files.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nfrom glob import glob\nimport shutil\nfrom distutils.command.build_clib import build_clib as old_build_clib\nfrom distutils.errors import DistutilsSetupError, DistutilsError, \\\n DistutilsFileError\n\nfrom numpy.distutils import log\nfrom distutils.dep_util import newer_group\nfrom numpy.distutils.misc_util import filter_sources, has_f_sources,\\\n has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \\\n get_numpy_include_dirs\n\n# Fix Python distutils bug sf #1718574:\n_l = old_build_clib.user_options\nfor _i in range(len(_l)):\n if _l[_i][0] in ['build-clib', 'build-temp']:\n _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:]\n#\n\nclass build_clib(old_build_clib):\n\n description = \"build C/C++/F libraries used by Python extensions\"\n\n user_options = old_build_clib.user_options + [\n ('fcompiler=', None,\n \"specify the Fortran compiler type\"),\n ('inplace', 'i', 'Build in-place'),\n ('parallel=', 'j',\n \"number of parallel jobs\"),\n ]\n\n boolean_options = old_build_clib.boolean_options + ['inplace']\n\n def initialize_options(self):\n old_build_clib.initialize_options(self)\n self.fcompiler = None\n self.inplace = 0\n self.parallel = None\n\n def finalize_options(self):\n if self.parallel:\n try:\n self.parallel = int(self.parallel)\n except ValueError:\n raise ValueError(\"--parallel/-j argument must be an integer\")\n old_build_clib.finalize_options(self)\n self.set_undefined_options('build', ('parallel', 'parallel'))\n\n def have_f_sources(self):\n for (lib_name, build_info) in self.libraries:\n if has_f_sources(build_info.get('sources', [])):\n return True\n return False\n\n def have_cxx_sources(self):\n for (lib_name, build_info) in self.libraries:\n if has_cxx_sources(build_info.get('sources', [])):\n return True\n return False\n\n def run(self):\n if not self.libraries:\n return\n\n # Make sure that library sources are complete.\n languages = []\n\n # Make sure that extension sources are complete.\n self.run_command('build_src')\n\n for (lib_name, build_info) in self.libraries:\n l = build_info.get('language', None)\n if l and l not in languages: languages.append(l)\n\n from distutils.ccompiler import new_compiler\n self.compiler = new_compiler(compiler=self.compiler,\n dry_run=self.dry_run,\n force=self.force)\n self.compiler.customize(self.distribution,\n need_cxx=self.have_cxx_sources())\n\n libraries = self.libraries\n self.libraries = None\n self.compiler.customize_cmd(self)\n self.libraries = libraries\n\n self.compiler.show_customization()\n\n if self.have_f_sources():\n from numpy.distutils.fcompiler import new_fcompiler\n self._f_compiler = new_fcompiler(compiler=self.fcompiler,\n verbose=self.verbose,\n dry_run=self.dry_run,\n force=self.force,\n requiref90='f90' in languages,\n c_compiler=self.compiler)\n if self._f_compiler is not None:\n self._f_compiler.customize(self.distribution)\n\n libraries = self.libraries\n self.libraries = None\n self._f_compiler.customize_cmd(self)\n self.libraries = libraries\n\n self._f_compiler.show_customization()\n else:\n self._f_compiler = None\n\n self.build_libraries(self.libraries)\n\n if self.inplace:\n for l in self.distribution.installed_libraries:\n libname = self.compiler.library_filename(l.name)\n source = os.path.join(self.build_clib, libname)\n target = os.path.join(l.target_dir, libname)\n self.mkpath(l.target_dir)\n shutil.copy(source, target)\n\n def get_source_files(self):\n self.check_library_list(self.libraries)\n filenames = []\n for lib in self.libraries:\n filenames.extend(get_lib_source_files(lib))\n return filenames\n\n def build_libraries(self, libraries):\n for (lib_name, build_info) in libraries:\n self.build_a_library(build_info, lib_name, libraries)\n\n def build_a_library(self, build_info, lib_name, libraries):\n # default compilers\n compiler = self.compiler\n fcompiler = self._f_compiler\n\n sources = build_info.get('sources')\n if sources is None or not is_sequence(sources):\n raise DistutilsSetupError((\"in 'libraries' option (library '%s'), \" +\n \"'sources' must be present and must be \" +\n \"a list of source filenames\") % lib_name)\n sources = list(sources)\n\n c_sources, cxx_sources, f_sources, fmodule_sources \\\n = filter_sources(sources)\n requiref90 = not not fmodule_sources or \\\n build_info.get('language', 'c')=='f90'\n\n # save source type information so that build_ext can use it.\n source_languages = []\n if c_sources: source_languages.append('c')\n if cxx_sources: source_languages.append('c++')\n if requiref90: source_languages.append('f90')\n elif f_sources: source_languages.append('f77')\n build_info['source_languages'] = source_languages\n\n lib_file = compiler.library_filename(lib_name,\n output_dir=self.build_clib)\n depends = sources + build_info.get('depends', [])\n if not (self.force or newer_group(depends, lib_file, 'newer')):\n log.debug(\"skipping '%s' library (up-to-date)\", lib_name)\n return\n else:\n log.info(\"building '%s' library\", lib_name)\n\n config_fc = build_info.get('config_fc', {})\n if fcompiler is not None and config_fc:\n log.info('using additional config_fc from setup script '\\\n 'for fortran compiler: %s' \\\n % (config_fc,))\n from numpy.distutils.fcompiler import new_fcompiler\n fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,\n verbose=self.verbose,\n dry_run=self.dry_run,\n force=self.force,\n requiref90=requiref90,\n c_compiler=self.compiler)\n if fcompiler is not None:\n dist = self.distribution\n base_config_fc = dist.get_option_dict('config_fc').copy()\n base_config_fc.update(config_fc)\n fcompiler.customize(base_config_fc)\n\n # check availability of Fortran compilers\n if (f_sources or fmodule_sources) and fcompiler is None:\n raise DistutilsError(\"library %s has Fortran sources\"\\\n \" but no Fortran compiler found\" % (lib_name))\n\n if fcompiler is not None:\n fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or []\n fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or []\n\n macros = build_info.get('macros')\n include_dirs = build_info.get('include_dirs')\n if include_dirs is None:\n include_dirs = []\n extra_postargs = build_info.get('extra_compiler_args') or []\n\n include_dirs.extend(get_numpy_include_dirs())\n # where compiled F90 module files are:\n module_dirs = build_info.get('module_dirs') or []\n module_build_dir = os.path.dirname(lib_file)\n if requiref90: self.mkpath(module_build_dir)\n\n if compiler.compiler_type=='msvc':\n # this hack works around the msvc compiler attributes\n # problem, msvc uses its own convention :(\n c_sources += cxx_sources\n cxx_sources = []\n\n objects = []\n if c_sources:\n log.info(\"compiling C sources\")\n objects = compiler.compile(c_sources,\n output_dir=self.build_temp,\n macros=macros,\n include_dirs=include_dirs,\n debug=self.debug,\n extra_postargs=extra_postargs)\n\n if cxx_sources:\n log.info(\"compiling C++ sources\")\n cxx_compiler = compiler.cxx_compiler()\n cxx_objects = cxx_compiler.compile(cxx_sources,\n output_dir=self.build_temp,\n macros=macros,\n include_dirs=include_dirs,\n debug=self.debug,\n extra_postargs=extra_postargs)\n objects.extend(cxx_objects)\n\n if f_sources or fmodule_sources:\n extra_postargs = []\n f_objects = []\n\n if requiref90:\n if fcompiler.module_dir_switch is None:\n existing_modules = glob('*.mod')\n extra_postargs += fcompiler.module_options(\\\n module_dirs, module_build_dir)\n\n if fmodule_sources:\n log.info(\"compiling Fortran 90 module sources\")\n f_objects += fcompiler.compile(fmodule_sources,\n output_dir=self.build_temp,\n macros=macros,\n include_dirs=include_dirs,\n debug=self.debug,\n extra_postargs=extra_postargs)\n\n if requiref90 and self._f_compiler.module_dir_switch is None:\n # move new compiled F90 module files to module_build_dir\n for f in glob('*.mod'):\n if f in existing_modules:\n continue\n t = os.path.join(module_build_dir, f)\n if os.path.abspath(f)==os.path.abspath(t):\n continue\n if os.path.isfile(t):\n os.remove(t)\n try:\n self.move_file(f, module_build_dir)\n except DistutilsFileError:\n log.warn('failed to move %r to %r' \\\n % (f, module_build_dir))\n\n if f_sources:\n log.info(\"compiling Fortran sources\")\n f_objects += fcompiler.compile(f_sources,\n output_dir=self.build_temp,\n macros=macros,\n include_dirs=include_dirs,\n debug=self.debug,\n extra_postargs=extra_postargs)\n else:\n f_objects = []\n\n objects.extend(f_objects)\n\n # assume that default linker is suitable for\n # linking Fortran object files\n compiler.create_static_lib(objects, lib_name,\n output_dir=self.build_clib,\n debug=self.debug)\n\n # fix library dependencies\n clib_libraries = build_info.get('libraries', [])\n for lname, binfo in libraries:\n if lname in clib_libraries:\n clib_libraries.extend(binfo.get('libraries', []))\n if clib_libraries:\n build_info['libraries'] = clib_libraries\n"
] | [
[
"numpy.lib.type_check.iscomplexobj",
"numpy.imag",
"numpy.lib.type_check.isreal",
"numpy.issubdtype",
"numpy.all",
"numpy.testing.assert_equal",
"numpy.lib.type_check.nan_to_num",
"numpy.lib.type_check.common_type",
"numpy.compat.long",
"numpy.lib.type_check.isrealobj",
"numpy.real",
"numpy.isnan",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.errstate",
"numpy.array",
"numpy.sometrue",
"numpy.lib.type_check.iscomplex",
"numpy.testing.run_module_suite",
"numpy.isfinite",
"numpy.lib.type_check.mintypecode",
"numpy.testing.assert_array_equal",
"numpy.lib.type_check.real_if_close",
"numpy.isscalar",
"numpy.isinf"
],
[
"numpy.dtype"
],
[
"numpy.core.numeric.concatenate",
"numpy.core.numeric.multiply",
"numpy.core.numeric.zeros",
"numpy.core.numeric.empty",
"numpy.core.atleast_3d",
"numpy.core.fromnumeric.product",
"numpy.core.numeric.array",
"numpy.core.numeric.asarray",
"numpy.core.numeric.outer",
"numpy.core.numeric.asanyarray",
"numpy.core.numeric.shape",
"numpy.core.numeric.isscalar",
"numpy.core.numeric.swapaxes",
"numpy.core.fromnumeric.reshape"
],
[
"numpy.matrix",
"numpy.testing.run_module_suite",
"numpy.eye",
"numpy.kron",
"numpy.ones",
"numpy.random.uniform"
],
[
"numpy.rollaxis",
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.asarray",
"numpy.all",
"numpy.exp",
"numpy.square",
"numpy.arange",
"numpy.multiply.accumulate",
"numpy.finfo",
"numpy.zeros",
"numpy.linalg.lstsq",
"numpy.iterable",
"numpy.array",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"numpy.linalg.eigvalsh",
"numpy.empty"
],
[
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite",
"numpy.fromfile",
"numpy.testing.dec.knownfailureif",
"numpy.finfo",
"numpy.testing.assert_array_equal",
"numpy.longdouble",
"numpy.all",
"numpy.fromstring",
"numpy.testing.assert_raises",
"numpy.testing.temppath",
"numpy.testing.assert_",
"numpy.compat.sixu",
"numpy.genfromtxt",
"numpy.array",
"numpy.loadtxt"
],
[
"numpy.distutils.misc_util.filter_sources",
"numpy.distutils.log.debug",
"numpy.distutils.misc_util.get_numpy_include_dirs",
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.log.warn",
"numpy.distutils.misc_util.get_lib_source_files",
"numpy.distutils.log.info",
"numpy.distutils.fcompiler.new_fcompiler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.11",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.24",
"1.22",
"1.23"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yusonghust/gcn | [
"4cacba4bd3d889a2139b19385774b2ee1cde80d4"
] | [
"graph.py"
] | [
"# -*- coding: utf-8 -*-\nimport networkx as nx\nimport numpy as np\nfrom utils import sparse_to_tuple\nimport scipy.sparse as sp\n\nclass Graph():\n def __init__(self,edgelist,weighted,directed,labelfile,featurefile):\n self.edgelist = edgelist\n self.weighted = weighted\n self.directed = directed\n self.G = self.build_graph()\n self.node_list = list(self.G.nodes())\n self.look_up = {}\n self.node_size = 0\n for node in self.node_list:\n self.look_up[node] = self.node_size\n self.node_size += 1\n self.labels = self.read_node_labels(labelfile)\n if featurefile is None:\n self.features = np.identity(n=len(self.node_list))\n #scipy.sparse.coo_matrix: A sparse matrix in COOrdinate format.\n #Where A[i[k], j[k]] = data[k].\n self.features = sparse_to_tuple(sp.coo_matrix(self.features))\n else:\n self.features = self.read_node_features(featurefile)\n\n\n def build_graph(self):\n '''\n Reads the input network using networkx.\n '''\n if self.weighted:\n G = nx.read_edgelist(self.edgelist, nodetype=int, data=(('weight',float),), create_using=nx.DiGraph())\n else:\n G = nx.read_edgelist(self.edgelist, nodetype=int, create_using=nx.DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n\n if not self.directed:\n G = G.to_undirected()\n return G\n\n def read_node_labels(self,filename):\n '''\n read node labels\n '''\n fin = open(filename, 'r')\n while 1:\n l = fin.readline()\n if l == '':\n break\n vec = l.split()\n self.G.nodes[int(vec[0])]['label'] = vec[1:]\n fin.close()\n\n def read_node_features(self,filename):\n '''\n read node features\n '''\n fin = open(filename, 'r')\n for l in fin.readlines():\n vec = l.split()\n self.G.nodes[int(vec[0])]['feature'] = np.array([float(x) for x in vec[1:]])\n fin.close()\n\n\n"
] | [
[
"scipy.sparse.coo_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
joesider9/forecasting_library | [
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197",
"db07ff8f0f2693983058d49004f2fc6f8849d197"
] | [
"Fuzzy_clustering/ver_tf2/Models_predict_manager.py",
"Fuzzy_clustering/version3/project_manager/PredictModelManager/CombineModelPredict.py",
"Fuzzy_clustering/version2/sklearn_models/sklearn_models_skopt.py",
"Fuzzy_clustering/version3/FeatureSelectionManager/Feature_selection_linearsearch.py",
"Fuzzy_clustering/ver_tf2/Combine_predict_model.py",
"Fuzzy_clustering/ver_tf2/test_modules.py",
"Fuzzy_clustering/version2/feature_selection_manager/feature_selection_permutation.py",
"Fuzzy_clustering/version3/project_manager/ProbaDataManager.py",
"Fuzzy_clustering/ver_tf2/Models_train_manager (DESKTOP-NDKJEQV's conflicted copy 2020-09-23).py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport logging, shutil, glob\nimport pymongo, joblib\nfrom Fuzzy_clustering.ver_tf2.Clusterer import clusterer\nfrom Fuzzy_clustering.ver_tf2.Cluster_predict_regressors import cluster_predict\nfrom Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict\nfrom Fuzzy_clustering.ver_tf2.Combine_predict_model import Combine_overall_predict\nfrom Fuzzy_clustering.ver_tf2.util_database import write_database\n\nclass ModelPredictManager_ver2(object):\n\n def __init__(self, path_model):\n self.istrained = False\n self.path_model = path_model\n try:\n self.load()\n except:\n pass\n\n def init(self, static_data, data_variables, use_db=False):\n self.data_variables = data_variables\n self.static_data = static_data\n self.thres_split = static_data['clustering']['thres_split']\n self.thres_act = static_data['clustering']['thres_act']\n self.n_clusters = static_data['clustering']['n_clusters']\n self.rated = static_data['rated']\n self.var_imp = static_data['clustering']['var_imp']\n self.var_lin = static_data['clustering']['var_lin']\n self.var_nonreg = static_data['clustering']['var_nonreg']\n\n self.create_logger()\n self.use_db = use_db\n if use_db:\n self.db = self.open_db()\n\n\n def open_db(self):\n try:\n myclient = pymongo.MongoClient(\n \"mongodb://\" + self.static_data['url'] + \":\" + self.static_data['port'] + \"/\")\n\n project_db = myclient[self.static_data['_id']]\n except:\n self.logger.info('Cannot open Database')\n self.use_db = False\n project_db = None\n raise ConnectionError('Cannot open Database')\n self.logger.info('Open Database successfully')\n return project_db\n\n def load_data(self):\n data_path = self.static_data['path_data']\n X = pd.read_csv(os.path.join(data_path, 'dataset_X_test.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n if os.path.exists(os.path.join(data_path, 'dataset_y_test.csv')):\n y = pd.read_csv(os.path.join(data_path, 'dataset_y_test.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n else:\n y=None\n\n if os.path.exists(os.path.join(data_path, 'dataset_cnn_test.pickle')):\n X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn_test.pickle'))\n X_cnn = X_cnn.transpose([0, 2, 3, 1])\n else:\n X_cnn = np.array([])\n\n if os.path.exists(os.path.join(data_path, 'dataset_lstm_test.pickle')):\n X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm_test.pickle'))\n else:\n X_lstm = np.array([])\n\n self.logger.info('Data loaded successfully')\n return X, X_cnn, X_lstm, y\n\n\n def create_logger(self):\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.INFO)\n handler = logging.FileHandler(os.path.join(self.static_data['path_project'], 'log_model_evaluation.log'), 'a')\n handler.setLevel(logging.INFO)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n self.logger.addHandler(handler)\n\n\n def predict_regressors(self, X_test, X_cnn_test, X_lstm_test, y_test=None):\n data_path = self.static_data['path_data']\n pred_cluster = dict()\n X_test = pd.DataFrame(self.sc.transform(X_test.values), columns=X_test.columns, index=X_test.index)\n if not hasattr(self, 'clusterer'):\n self.clusterer = clusterer(self.static_data['path_fuzzy_models'],\n self.static_data['clustering']['cluster_file'], self.static_data['type'])\n act_test = self.clusterer.compute_activations(X_test)\n act_test = self.check_if_all_nans(act_test)\n for clust in self.regressors.keys():\n if clust == 'Global':\n if len(self.regressors['Global']['models']) > 0:\n predict_module = global_predict(self.static_data)\n pred_cluster['Global'] = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm=X_lstm_test)\n if y_test is not None:\n pred_cluster['Global']['metrics'] = predict_module.evaluate(pred_cluster['Global'], self.scale_y.transform(y_test.values))\n pred_cluster['Global']['dates'] = X_test.index\n pred_cluster['Global']['index'] = np.arange(0, X_test.shape[0])\n else:\n dates = X_test.index[act_test[clust] >= self.thres_act]\n nind = np.where(act_test[clust] >= self.thres_act)[0]\n nind.sort()\n\n x = X_test.loc[dates]\n if y_test is not None:\n targ = y_test.loc[dates].values\n else:\n targ = None\n if len(X_cnn_test.shape) > 1:\n x_cnn = X_cnn_test[nind]\n else:\n x_cnn = np.array([])\n if len(X_lstm_test.shape) > 1:\n x_lstm = X_lstm_test[nind]\n else:\n x_lstm = np.array([])\n predict_module = cluster_predict(self.static_data, clust)\n pred_cluster[clust] = predict_module.predict(x.values, X_cnn=x_cnn, X_lstm=x_lstm)\n if targ is not None and targ.shape[0]>0:\n pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster[clust], self.scale_y.transform(targ))\n pred_cluster[clust]['dates'] = dates\n pred_cluster[clust]['index'] = nind\n predictions = dict()\n result_clust = pd.DataFrame()\n for clust in pred_cluster.keys():\n for method in pred_cluster[clust].keys():\n if not method in {'dates', 'index', 'metrics'}:\n if not method in predictions.keys():\n predictions[method] = pd.DataFrame(index=X_test.index, columns=[cl for cl in pred_cluster.keys()])\n predictions[method].loc[pred_cluster[clust]['dates'], clust] = pred_cluster[clust][method].ravel()\n elif method in {'metrics'}:\n result_clust = pd.concat([result_clust, pred_cluster[clust][method]['mae'].rename(clust)], axis=1)\n\n combine_overall = Combine_overall_predict(self.static_data)\n predictions_final = combine_overall.predict(pred_cluster, predictions)\n\n for method, pred in predictions_final.items():\n pred = self.scale_y.inverse_transform(pred.reshape(-1, 1))\n pred[np.where(pred<0)] = 0\n predictions_final[method] = pred\n\n if y_test is not None:\n result_clust.to_csv(os.path.join(data_path, 'result_of_clusters.csv'))\n\n return predictions_final\n\n def compute_metrics(self, pred, y):\n if self.rated is None:\n rated = y.ravel()\n else:\n rated = self.rated\n err = np.abs(pred.ravel() - y.ravel()) / rated\n sse = np.sum(np.square(pred.ravel() - y.ravel()))\n rms = np.sqrt(np.mean(np.square(err)))\n mae = np.mean(err)\n mse = sse / y.shape[0]\n\n return [sse, rms, mae, mse]\n\n def evaluate(self, pred_all, y):\n result = pd.DataFrame(index=[method for method in pred_all.keys()], columns=['sse', 'rms', 'mae', 'mse'])\n for method, pred in pred_all.items():\n if isinstance(pred, pd.DataFrame):\n result.loc[method] = self.compute_metrics(pred.values, y)\n else:\n result.loc[method] = self.compute_metrics(pred, y)\n\n return result\n\n def predict(self):\n if self.istrained:\n X, X_cnn, X_lstm, y = self.load_data()\n\n indices = X.index\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n X_cnn = X_cnn[index]\n else:\n index = indices\n\n predictions_final_temp = self.predict_regressors(X, X_cnn, X_lstm)\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=indices, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n\n return predictions_final\n else:\n raise ModuleNotFoundError('Model %s is not trained', self.static_data['_id'])\n\n def predict_online(self, X, X_cnn= np.array([]), X_lstm= np.array([])):\n if len(X_cnn.shape)>1:\n X_cnn = X_cnn.transpose([0, 2, 3, 1])\n if self.istrained:\n indices = X.index\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = X.index[np.where(X['flux'] > 1e-8)[0]]\n X = X.loc[index]\n X_cnn = X_cnn[np.where(X['flux'] > 1e-8)[0]]\n else:\n index = indices\n\n predictions_final_temp = self.predict_regressors(X, X_cnn, X_lstm)\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=indices, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n\n return predictions_final\n else:\n raise ModuleNotFoundError('Model %s is not trained', self.static_data['_id'])\n\n def evaluate_all(self):\n data_path = self.static_data['path_data']\n if self.istrained:\n X, X_cnn, X_lstm, y = self.load_data()\n y_test = y.copy()\n indices = X.index\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n y = y.iloc[index]\n X_cnn = X_cnn[index]\n index = indices[index]\n else:\n index = indices\n\n predictions_final_temp = self.predict_regressors(X, X_cnn, X_lstm, y)\n\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=indices, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n\n if y_test is not None:\n result_all = self.evaluate(predictions_final, y_test.values)\n result_all.to_csv(os.path.join(data_path, 'result_final.csv'))\n joblib.dump(predictions_final, os.path.join(data_path, 'predictions_final.pickle'))\n y_test.to_csv(os.path.join(data_path, 'target_test.csv'))\n else:\n raise ModuleNotFoundError('Model %s is not trained', self.static_data['_id'])\n\n def check_if_all_nans(self, activations):\n\n\n if activations.isna().all(axis=1).any() == True:\n indices = activations.index[activations.isna().all(axis=1).to_numpy().ravel()]\n if indices.shape[0]>50:\n raise RuntimeError('Too many nans. Please check your model')\n for ind in indices:\n act = activations.loc[ind]\n clust = act.idxmax()\n activations.loc[ind, clust] = 0.1\n\n return activations\n\n def load(self):\n if os.path.exists(os.path.join(self.path_model, 'manager' + '.pickle')):\n try:\n f = open(os.path.join(self.path_model, 'manager' + '.pickle'), 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n if 'path_model' in tmp_dict.keys():\n del tmp_dict['path_model']\n self.__dict__.update(tmp_dict)\n except:\n raise ValueError('Cannot find model for %s', self.path_model)\n else:\n raise ValueError('Cannot find model for %s', self.path_model)\n\nif __name__ == '__main__':\n from util_database import write_database\n from Fuzzy_clustering.ver_tf2.Projects_train_manager import ProjectsTrainManager\n\n static_data = write_database()\n project_manager = ProjectsTrainManager(static_data)\n project_manager.initialize()\n project_manager.create_datasets(project_manager.data_eval, test=True)\n project = [pr for pr in project_manager.group_static_data if pr['_id'] == 'Lach'][0]\n static_data = project['static_data']\n\n model = ModelPredictManager(static_data['path_model'])\n model.init(project['static_data'], project_manager.data_variables)\n model.evaluate_all()",
"import os\nimport numpy as np\nimport joblib\nfrom Fuzzy_clustering.version3.project_manager.PredictModelManager.Sklearn_combine_predict import sklearn_model_predict\n\nclass CombineModelPredict(object):\n def __init__(self, static_data):\n self.static_data = static_data\n self.istrained = False\n self.model_dir = os.path.join(self.static_data['path_model'], 'Combine_module')\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n\n self.model_type = self.static_data['type']\n self.combine_methods = self.static_data['combine_methods']\n methods = [method for method in self.static_data['project_methods'].keys() if\n self.static_data['project_methods'][method] == True]\n\n\n\n try:\n self.load(self.model_dir)\n except:\n pass\n self.methods = []\n for method in methods:\n if method == 'RBF_ALL_CNN':\n self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN', 'RBF-CNN'])\n elif method == 'RBF_ALL':\n self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN'])\n else:\n self.methods.append(method)\n self.methods += self.combine_methods\n self.weight_size_full = len(self.methods)\n self.weight_size = len(self.combine_methods)\n self.rated = self.static_data['rated']\n self.n_jobs = self.static_data['sklearn']['njobs']\n self.data_dir = self.static_data['path_data']\n\n def bcp_predict(self, X, w):\n preds = []\n for inp in X:\n inp=inp.reshape(-1,1)\n mask=~np.isnan(inp)\n pred = np.matmul(w[mask.T]/np.sum(w[mask.T]), inp[mask])\n preds.append(pred)\n\n return np.array(preds)\n\n def predict(self, predictions):\n if self.istrained==True:\n pred_combine = dict()\n self.combine_methods = [method for method in self.combine_methods if method in predictions.keys()]\n combine_method = 'average'\n for method in self.methods:\n pred = predictions[method].mean(axis=1).values.astype('float').reshape(-1, 1)\n pred[np.where(pred < 0)] = 0\n pred_combine['average_' + method] = pred\n\n combine_method = 'bcp'\n for method in self.combine_methods:\n if 'bcp_'+method in self.models.keys():\n pred = self.bcp_predict(predictions[method].values.astype('float'), self.models['bcp_'+method])\n pred[np.where(pred < 0)] = 0\n pred_combine['bcp_' + method] = pred\n\n for method in self.combine_methods:\n X_pred = predictions[method].values.astype('float')\n X_pred[np.where(np.isnan(X_pred))] = 0\n mlp_model = sklearn_model_predict(self.model_dir + '/' + method, self.rated, 'mlp', self.n_jobs)\n if mlp_model.istrained == True:\n pred = mlp_model.predict(X_pred)\n pred[np.where(pred < 0)] = 0\n pred_combine['mlp_' + method] = pred\n else:\n raise ImportError('Combine overall model seems not trained')\n\n return pred_combine\n\n def load(self, pathname):\n cluster_dir = os.path.join(pathname)\n if os.path.exists(os.path.join(cluster_dir, 'combine_models.pickle')):\n try:\n f = open(os.path.join(cluster_dir, 'combine_models.pickle'), 'rb')\n tmp_dict = joblib.load(f)\n f.close()\n del tmp_dict['model_dir']\n self.__dict__.update(tmp_dict)\n except:\n raise ImportError('Cannot open RLS model')\n else:\n raise ImportError('Cannot find RLS model')",
"import logging\nimport os\nimport pickle\n\nimport joblib\nimport numpy as np\nimport xgboost as xgb\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.svm import NuSVR\nfrom sklearn.svm import SVR\n\nfrom Fuzzy_clustering.ver_tf2.skopt import forest_minimize\nfrom Fuzzy_clustering.ver_tf2.skopt.space import Categorical\nfrom Fuzzy_clustering.ver_tf2.skopt.space import Integer\nfrom Fuzzy_clustering.ver_tf2.skopt.space import Real\nfrom Fuzzy_clustering.ver_tf2.skopt.utils import use_named_args\n\n\nclass sklearn_model(object):\n\n def __init__(self, static_data, cluster_dir, rated, model_type, njobs, init_params=None, path_group=None):\n self.static_data = static_data\n self.path_group = path_group\n self.init_params = init_params\n self.njobs = njobs\n self.rated = rated\n self.cluster = os.path.basename(cluster_dir)\n self.model_dir = os.path.join(cluster_dir, str.upper(model_type))\n self.istrained = False\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n self.model_type = model_type\n self.optimizer = 'optuna'\n\n logger = logging.getLogger('skopt_train_' + '_' + self.model_type + self.cluster)\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(os.path.join(cluster_dir, 'log_skopt_train_' + self.cluster + '.log'), 'w')\n handler.setLevel(logging.INFO)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n logger.addHandler(handler)\n self.logger = logger\n try:\n self.load(self.model_dir)\n except:\n pass\n\n def create_model(self):\n if 'xgb' in str.lower(self.model_type):\n\n params = [\n Real(0.4, 1, name=\"colsample_bylevel\"),\n Real(0.4, 1, name=\"colsample_bytree\"),\n Real(0.01, 1, name=\"gamma\"),\n Real(0.00001, 1, name=\"learning_rate\"),\n Integer(1, 100, name=\"max_depth\"),\n Real(1, 10, name=\"min_child_weight\"),\n Real(0.001, 2, name=\"reg_alpha\"),\n Real(0.4, 1, name=\"subsample\"),\n ]\n model = xgb.XGBRegressor(objective=\"reg:squarederror\", random_state=42, n_jobs=self.njobs)\n elif 'rf' in str.lower(self.model_type):\n params = [\n Integer(1, 80, name=\"max_depth\"),\n Categorical(['auto', 'sqrt', 'log2', None, 0.8, 0.6, 0.4], name=\"max_features\"),\n Integer(1, 250, name=\"min_samples_leaf\"),\n Integer(2, 250, name=\"min_samples_split\"),\n ]\n model = RandomForestRegressor(n_estimators=500, random_state=42)\n elif str.lower(self.model_type) == 'svm':\n\n params = [\n Real(1e-3, 10, name=\"gamma\"),\n Real(1e-2, 1e5, name=\"C\"),\n ]\n model = SVR(max_iter=1000000)\n elif str.lower(self.model_type) == 'nusvm':\n\n params = [\n Real(1e-3, 10, name=\"gamma\"),\n Real(1e-2, 1e5, name=\"C\"),\n Real(0.01, 0.99, name=\"nu\"),\n ]\n model = NuSVR(max_iter=1000000)\n elif 'mlp' in str.lower(self.model_type):\n params = [\n Integer(5, 800, name=\"hidden_layer_sizes\"),\n Real(1e-5, 1e-1, name=\"alpha\")]\n\n model = MLPRegressor(max_iter=4000, early_stopping=True)\n\n return params, model\n\n def apply_params(self, X, y, model, **params):\n\n model.set_params(**params)\n model.fit(X, y.ravel())\n return model\n\n def compute_metrics(self, pred, y, rated):\n if rated is None:\n rated = y.ravel()\n else:\n rated = 1\n err = np.abs(pred.ravel() - y.ravel()) / rated\n sse = np.sum(np.square(pred.ravel() - y.ravel()))\n rms = np.sqrt(np.mean(np.square(err)))\n mae = np.mean(err)\n mse = sse / y.shape[0]\n\n return [sse, rms, mae, mse]\n\n def fit_model1(self, model, params, cvs):\n model.set_params(**params)\n rms_val = []\n rms_test = []\n for cv in cvs:\n model.fit(cv[0], cv[1].ravel())\n ypred = model.predict(cv[2]).ravel()\n if self.rated is None:\n acc = np.mean(np.abs(ypred - cv[3].ravel()) / cv[3].ravel())\n else:\n acc = np.mean(np.abs(ypred - cv[3].ravel()))\n rms_val.append(acc)\n ypred = model.predict(cv[4]).ravel()\n if self.rated is None:\n acc = np.mean(np.abs(ypred - cv[5].ravel()) / cv[5].ravel())\n else:\n acc = np.mean(np.abs(ypred - cv[5].ravel()))\n rms_test.append(acc)\n\n return 0.4 * np.mean(rms_val) + 0.6 * np.mean(rms_test), np.mean(rms_test)\n\n def train(self, cvs, init_params=[]):\n X = np.vstack((cvs[0][0], cvs[0][2], cvs[0][4]))\n\n if len(cvs[0][1].shape) == 1 and len(cvs[0][5].shape) == 1:\n y = np.hstack((cvs[0][1], cvs[0][3], cvs[0][5]))\n else:\n y = np.vstack((cvs[0][1], cvs[0][3], cvs[0][5])).ravel()\n self.D, self.N = X.shape\n print('training...')\n print('%s training...begin for %s ', self.model_type, self.cluster)\n self.logger.info('%s training...begin for %s ', self.model_type, self.cluster)\n self.logger.info('Begin train for model %s', self.model_type)\n\n params, model = self.create_model()\n rated = self.rated\n\n @use_named_args(params)\n def fit_model(**params):\n model.set_params(**params)\n rms_val = []\n rms_test = []\n for cv in cvs:\n model.fit(cv[0], cv[1].ravel())\n if rated is None:\n ypred = model.predict(cv[2]).ravel()\n rms_val.append(np.sqrt(np.mean(np.square(np.abs(ypred - cv[3].ravel()) / cv[3].ravel()))))\n ypred = model.predict(cv[4]).ravel()\n rms_test.append(np.sqrt(np.mean(np.square(np.abs(ypred - cv[5].ravel()) / cv[5].ravel()))))\n else:\n ypred = model.predict(cv[2]).ravel()\n rms_val.append(np.sqrt(np.mean(np.square(np.abs(ypred - cv[3].ravel())))))\n ypred = model.predict(cv[4]).ravel()\n rms_test.append(np.sqrt(np.mean(np.square(np.abs(ypred - cv[5].ravel())))))\n\n return 0.4 * np.mean(rms_val) + 0.6 * np.mean(rms_test)\n\n gp_result = forest_minimize(func=fit_model,\n dimensions=params,\n n_calls=30,\n n_random_starts=41,\n x0=self.init_params,\n n_jobs=self.njobs)\n best_params = dict()\n for param, value in zip(params, gp_result.x):\n best_params[param.name] = value\n self.best_params = best_params\n self.model = model\n self.accuracy, self.acc_test = self.fit_model1(self.model, self.best_params, cvs)\n\n self.model.set_params(**best_params)\n self.model.fit(X, y.ravel())\n self.logger.info('Best params')\n self.logger.info(self.best_params)\n self.logger.info('Final mae %s', str(self.acc_test))\n self.logger.info('Final rms %s', str(self.accuracy))\n self.logger.info('finish train for model %s', self.model_type)\n self.istrained = True\n self.save(self.model_dir)\n\n return self.to_dict()\n\n def train_TL(self, cvs, params):\n self.best_params = params\n X = np.vstack((cvs[0][0], cvs[0][2], cvs[0][4]))\n\n if len(cvs[0][1].shape) == 1 and len(cvs[0][5].shape) == 1:\n y = np.hstack((cvs[0][1], cvs[0][3], cvs[0][5]))\n else:\n y = np.vstack((cvs[0][1], cvs[0][3], cvs[0][5])).ravel()\n self.D, self.N = X.shape\n print('training...')\n print('%s training...begin for %s ', self.model_type, self.cluster)\n self.logger.info('%s training...begin for %s ', self.model_type, self.cluster)\n self.logger.info('Begin train for model %s', self.model_type)\n\n params, model = self.create_model()\n\n self.model = model\n self.accuracy, self.acc_test = self.fit_model1(self.model, self.best_params, cvs)\n self.model.set_params(**self.best_params)\n self.model.fit(X, y.ravel())\n self.logger.info('Best params')\n self.logger.info(self.best_params)\n self.logger.info('Final mae %s', str(self.acc_test))\n self.logger.info('Final rms %s', str(self.accuracy))\n self.logger.info('finish train for model %s', self.model_type)\n self.istrained = True\n self.save(self.model_dir)\n\n return self.to_dict()\n\n def to_dict(self):\n dict = {}\n for k in self.__dict__.keys():\n if k not in ['logger', 'model']:\n dict[k] = self.__dict__[k]\n return dict\n\n def predict(self, X):\n self.load(self.model_dir)\n return self.model.predict(X)\n\n def load(self, model_dir):\n self.model = joblib.load(os.path.join(model_dir, 'model.pkl'))\n if os.path.exists(os.path.join(model_dir, 'model_all' + '.pickle')):\n try:\n f = open(os.path.join(model_dir, 'model_all' + '.pickle'), 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n del tmp_dict['model_dir']\n self.__dict__.update(tmp_dict)\n except:\n raise ImportError('Cannot open model_skopt model')\n else:\n raise ImportError('Cannot find model_skopt model')\n\n def save(self, model_dir):\n joblib.dump(self.model, os.path.join(model_dir, 'model.pkl'))\n f = open(os.path.join(model_dir, 'model_all' + '.pickle'), 'wb')\n dict = {}\n for k in self.__dict__.keys():\n if k not in ['logger']:\n dict[k] = self.__dict__[k]\n pickle.dump(dict, f)\n f.close()\n",
"import numpy as np\nimport pandas as pd\nimport logging, os, joblib\nfrom sklearn.linear_model import ElasticNetCV, LinearRegression\nfrom sklearn.decomposition import PCA\nimport copy\n\nclass FS(object):\n def __init__(self, static_data, model_path, njobs, inner_jobs, path_group=None):\n self.static_data = static_data\n self.path_group =path_group\n self.njobs=njobs\n self.inner_jobs=inner_jobs\n self.log_dir=os.path.join(model_path, 'FS/PERM')\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n def reduce_dim(self, cvs):\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n\n for i in range(3):\n cvs[i][0] = cvs[i][0][:, self.features]\n cvs[i][2] = cvs[i][2][:, self.features]\n cvs[i][4] = cvs[i][4][:, self.features]\n\n X_train = cvs[0][0]\n y_train = cvs[0][1].reshape(-1, 1)\n X_val = cvs[0][2]\n y_val = cvs[0][3].reshape(-1, 1)\n X_test = cvs[0][4]\n y_test = cvs[0][5].reshape(-1, 1)\n\n X_train = np.vstack((X_train, X_val, X_test))\n y_train = np.vstack((y_train, y_val, y_test))\n\n\n reduction = np.linspace(48, self.N_tot, self.N_tot - 48) / np.logspace(0, 0.3, self.N_tot - 48)\n n_components = reduction[int(X_train.shape[1]-48-1)]\n pca = PCA(n_components=int(n_components))\n pca.fit(X_train)\n\n return pca\n\n def fit(self, cvs):\n # logger = logging.getLogger('log_fs_permutation')\n # logger.setLevel(logging.INFO)\n # handler = logging.FileHandler(os.path.join(self.log_dir, 'log_fs_perm.log'), 'w')\n # handler.setLevel(logging.INFO)\n #\n # # create a logging format\n # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # handler.setFormatter(formatter)\n #\n # # add the handlers to the logger\n # logger.addHandler(handler)\n\n print()\n print('Training the model (Fitting to the training data) ')\n # logger.info('Training the feature extraction ')\n\n method = 'rf'\n scale_y = joblib.load(os.path.join(self.static_data['path_data'], 'Y_scaler.pickle'))\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n X_train = cvs[0][0]\n y_train = cvs[0][1].reshape(-1, 1)\n X_val = cvs[0][2]\n y_val = cvs[0][3].reshape(-1, 1)\n X_test = cvs[0][4]\n y_test = scale_y.inverse_transform(cvs[0][5].reshape(-1, 1)).ravel()\n rated = self.static_data['rated']\n if rated is None:\n rated =y_test\n else:\n rated = 1\n X_train = np.vstack((X_train, X_val))\n y_train = np.vstack((y_train, y_val))\n\n self.N_tot = X_train.shape[1]\n\n if np.std(y_train.ravel())>1e-3:\n regressor = ElasticNetCV(cv=5, random_state=42, max_iter=500000)\n else:\n regressor = LinearRegression()\n\n regressor.fit(X_train[:, 0].reshape(-1, 1), y_train.ravel())\n # regressor = sklearn_model(self.static_data, self.log_dir, 1, method, njobs, FS=True, path_group=self.path_group)\n # regressor.train(cvs)\n\n features = np.arange(cvs[0][0].shape[1])\n\n pred = regressor.predict(X_test[:, 0].reshape(-1, 1)).reshape(-1, 1)\n acc_test = np.mean(np.abs(scale_y.inverse_transform(pred).ravel() - y_test) / rated)\n\n # cv_result = regressor.cv_results.nlargest(10, 'acc')['params'].to_list()\n flag = True\n\n cvs_temp = copy.deepcopy(cvs)\n\n remove_features = []\n keep_features = [0]\n for f in features[1:]:\n\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n\n features_temp = keep_features + [f]\n if np.std(y_train.ravel()) > 1e-3:\n regressor = ElasticNetCV(cv=5, random_state=42, max_iter=500000)\n else:\n regressor = LinearRegression()\n\n regressor.fit(X_train[:, features_temp], y_train.ravel())\n pred = regressor.predict(X_test[:, features_temp]).reshape(-1, 1)\n acc_test_new = np.mean(np.abs(scale_y.inverse_transform(pred).ravel() - y_test.ravel()) /rated)\n\n # cv_result = reg_temp.cv_results.nlargest(5, 'acc')['params'].to_list()\n if (acc_test_new - acc_test) < 0 :\n # logger.info('Remove feature %s accuracy: %s', str(f), str(reg_temp.acc_test))\n print('ADD feature ',str(f),' accuracy:', str(acc_test_new))\n # logger.info('ADD feature %s accuracy: %s', str(f), str(reg_temp.acc_test))\n keep_features.append(f)\n acc_test = acc_test_new\n else:\n remove_features.append(f)\n flag = True\n while flag==True and len(remove_features)>0:\n flag=False\n rm_feats = copy.deepcopy(remove_features)\n for f in rm_feats:\n\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n\n features_temp = keep_features + [f]\n if np.std(y_train.ravel()) > 1e-3:\n regressor = ElasticNetCV(cv=5, random_state=42, max_iter=500000)\n else:\n regressor = LinearRegression()\n regressor.fit(X_train[:, features_temp], y_train.ravel())\n pred = regressor.predict(X_test[:, features_temp]).reshape(-1, 1)\n acc_test_new = np.mean(np.abs(scale_y.inverse_transform(pred).ravel() - y_test.ravel()) /rated)\n\n # cv_result = reg_temp.cv_results.nlargest(5, 'acc')['params'].to_list()\n if (acc_test_new - acc_test) < 0 :\n # logger.info('Remove feature %s accuracy: %s', str(f), str(reg_temp.acc_test))\n print('ADD feature ',str(f),' accuracy:', str(acc_test_new))\n # logger.info('ADD feature %s accuracy: %s', str(f), str(reg_temp.acc_test))\n keep_features.append(f)\n acc_test = acc_test_new\n remove_features.remove(f)\n flag = True\n features = np.array(keep_features)\n self.features = features\n\n\n if self.features.shape[0]>48:\n pca = self.reduce_dim(cvs_temp)\n else:\n pca = None\n if self.features.shape[0] <= 1:\n self.features = np.stack((self.features, remove_features[:2-self.features.shape[0]])).ravel()\n # logger.info('Number of variables %s', str(self.features.shape[0]))\n # logger.info('Finish the feature extraction ')\n return features, pca\n#\n# def test_fs_permute(cvs, X_test1, y_test1, cluster_dir):\n#\n# logger = logging.getLogger('log_rbf_cnn_test.log')\n# logger.setLevel(logging.INFO)\n# handler = logging.FileHandler(os.path.join(cluster_dir, 'log_rbf_cnn_test.log'), 'a')\n# handler.setLevel(logging.INFO)\n#\n# # create a logging format\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# handler.setFormatter(formatter)\n#\n# # add the handlers to the logger\n# logger.addHandler(handler)\n#\n# rated = None\n#\n# static_data = write_database()\n#\n# logger.info('Permutation Evaluation')\n# logger.info('/n')\n# method = 'svm'\n# model_sklearn = sklearn_model(self.static_data, cluster_dir, rated, method, static_data['sklearn']['njobs'])\n# model_sklearn.train(cvs)\n# pred = model_sklearn.predict(X_test1)\n#\n# metrics_svm = model_sklearn.compute_metrics(pred, y_test1, rated)\n# logger.info('before feature selection metrics')\n# logger.info('sse, %s rms %s, mae %s, mse %s', *metrics_svm)\n#\n# fs = FS(cluster_dir, static_data['sklearn']['njobs'])\n# features = fs.fit(cvs)\n# logger.info('Number of variables %s', str(features.shape[0]))\n#\n# for i in range(3):\n# cvs[i][0] = cvs[i][0][:,features]\n# cvs[i][2] = cvs[i][2][:,features]\n# cvs[i][4] = cvs[i][4][:,features]\n#\n# model_sklearn = sklearn_model(self.static_data, cluster_dir, rated, method, static_data['sklearn']['njobs'])\n# model_sklearn.train(cvs)\n# pred = model_sklearn.predict(X_test1[:,features])\n#\n# metrics_svm = model_sklearn.compute_metrics(pred, y_test1, rated)\n# logger.info('After feature selection metrics')\n# logger.info('sse, %s rms %s, mae %s, mse %s', *metrics_svm)\n",
"import os\nimport numpy as np\nimport joblib\nfrom Fuzzy_clustering.ver_tf2.Sklearn_predict import sklearn_model_predict\nfrom Fuzzy_clustering.ver_tf2.LSTM_predict_3d import LSTM_3d_predict\n\nclass Combine_overall_predict(object):\n def __init__(self, static_data):\n self.istrained = False\n self.model_dir = os.path.join(static_data['path_model'], 'Combine_module')\n try:\n self.load(self.model_dir)\n except:\n pass\n self.static_data = static_data\n self.model_type = static_data['type']\n self.combine_methods = static_data['combine_methods']\n self.methods = []\n for method in static_data['project_methods'].keys():\n if self.static_data['project_methods'][method]['Global'] == True and static_data['project_methods'][method][\n 'status'] == 'train':\n if method == 'ML_RBF_ALL_CNN':\n self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN', 'RBF-CNN'])\n elif method == 'ML_RBF_ALL':\n self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN'])\n else:\n self.methods.append(method)\n self.methods += self.combine_methods\n\n self.rated = static_data['rated']\n self.n_jobs = 2 * static_data['njobs']\n\n self.data_dir = self.static_data['path_data']\n\n def bcp_predict(self, X, w):\n preds = []\n for inp in X:\n inp=inp.reshape(-1,1)\n mask=~np.isnan(inp)\n pred = np.matmul(w[mask.T]/np.sum(w[mask.T]), inp[mask])\n preds.append(pred)\n\n return np.array(preds)\n\n\n def lstm_predict(self, X, full=False):\n if full:\n cluster_dir = os.path.join(self.model_dir, 'LSTM_best')\n else:\n cluster_dir = os.path.join(self.model_dir, 'LSTM_combine')\n\n lstm_model = LSTM_3d_predict(self.static_data, self.rated, cluster_dir)\n if lstm_model.istrained==True:\n model = lstm_model.predict(X)\n else:\n raise ImportError('Cannot find LSTM for overall combine')\n\n return model\n\n def predict(self, pred_cluster, predictions, lstm=False):\n if self.istrained==True:\n pred_combine = dict()\n\n combine_method = 'average'\n for method in self.methods:\n if method in predictions.keys():\n pred = predictions[method].mean(axis=1).values.astype('float').reshape(-1, 1)\n pred[np.where(pred < 0)] = 0\n pred_combine['average_' + method] = pred\n\n if hasattr(self, 'models'):\n combine_method = 'bcp'\n for method in self.combine_methods:\n if 'bcp_'+method in self.models.keys():\n pred = self.bcp_predict(predictions[method].values.astype('float'), self.models['bcp_'+method])\n pred[np.where(pred < 0)] = 0\n pred_combine['bcp_' + method] = pred\n\n for method in self.combine_methods:\n X_pred = predictions[method].values.astype('float')\n X_pred[np.where(np.isnan(X_pred))] = 0\n X_pred /= 20\n mlp_model = sklearn_model_predict(self.model_dir + '/' + method, self.rated, 'mlp', self.n_jobs)\n if mlp_model.istrained == True:\n pred = mlp_model.predict(X_pred)\n pred[np.where(pred < 0)] = 0\n pred_combine['mlp_' + method] = 20 * pred\n if lstm:\n X = np.array([])\n combine_method = 'lstm_full'\n N = predictions['average'].values.shape[0]\n for clust in pred_cluster.keys():\n x = np.array([])\n for method in pred_cluster[clust]:\n if method in self.methods:\n tmp = np.zeros([N, 1])\n try:\n tmp[pred_cluster[clust]['index']] = pred_cluster[clust][method]\n except:\n tmp[pred_cluster[clust]['index']] = pred_cluster[clust][method].reshape(-1, 1)\n if x.shape[0] == 0:\n x = tmp\n else:\n x = np.hstack((x, tmp))\n if X.shape[0] == 0:\n X = np.copy(x)\n elif len(X.shape) == 2:\n X = np.stack((X, x))\n else:\n X = np.vstack((X, x[np.newaxis, :, :]))\n X = np.transpose(X, [1, 0, 2]).astype('float')\n\n pred = self.lstm_predict(X, full=True)\n pred[np.where(pred < 0)] = 0\n pred_combine[combine_method] = 20 * pred\n\n X = np.array([])\n combine_method = 'lstm_combine'\n\n for clust in pred_cluster.keys():\n x = np.array([])\n for method in pred_cluster[clust]:\n if method in self.combine_methods:\n tmp = np.zeros([N, 1])\n try:\n tmp[pred_cluster[clust]['index']] = pred_cluster[clust][method]\n except:\n tmp[pred_cluster[clust]['index']] = pred_cluster[clust][method].reshape(-1, 1)\n if x.shape[0] == 0:\n x = tmp\n else:\n x = np.hstack((x, tmp))\n if X.shape[0] == 0:\n X = np.copy(x)\n elif len(X.shape) == 2:\n X = np.stack((X, x))\n else:\n X = np.vstack((X, x[np.newaxis, :, :]))\n X = np.transpose(X, [1, 0, 2]).astype('float')\n pred = self.lstm_predict(X)\n pred[np.where(pred < 0)] = 0\n pred_combine[combine_method] = 20 * pred\n else:\n raise ImportError('Combine overall model seems not trained')\n\n return pred_combine\n\n def load(self, pathname):\n cluster_dir = os.path.join(pathname)\n if os.path.exists(os.path.join(cluster_dir, 'combine_models.pickle')):\n try:\n f = open(os.path.join(cluster_dir, 'combine_models.pickle'), 'rb')\n tmp_dict = joblib.load(f)\n f.close()\n del tmp_dict['model_dir']\n self.__dict__.update(tmp_dict)\n except:\n raise ImportError('Cannot open RLS model')\n else:\n raise ImportError('Cannot find RLS model')",
"import joblib, os\nimport pandas as pd\nimport numpy as np\nfrom Fuzzy_clustering.ver_tf2.Models_train_manager import ModelTrainManager\n\nmodel_path = 'D:/models/my_projects/APE_net_ver1/pv/APE_net/model_ver0'\nrule = 'rule.8'\n\ncluster_dir = os.path.join(model_path, 'Regressor_layer/' + rule)\ndata_path = os.path.join(cluster_dir, 'data')\n\nstatic_data = joblib.load(os.path.join(model_path, 'static_data.pickle'))\nmodel = ModelTrainManager(path_model=model_path)\nmodel.load()\n\n\ndef split_test_data(X, y, act, X_cnn=np.array([]), X_lstm=np.array([]), test_indices=None):\n N_tot, D = X.shape\n if not test_indices is None:\n X_test = X.loc[test_indices['dates_test']]\n y_test = y.loc[test_indices['dates_test']]\n act_test = act.loc[test_indices['dates_test']]\n\n X = X.loc[test_indices['dates_train']]\n y = y.loc[test_indices['dates_train']]\n act = act.loc[test_indices['dates_train']]\n\n if len(X_cnn.shape) > 1:\n X_cnn_test = X_cnn[test_indices['indices_test']]\n X_cnn = X_cnn[test_indices['indices_train']]\n else:\n X_cnn_test = np.array([])\n\n if len(X_lstm.shape) > 1:\n X_lstm_test = X_lstm[test_indices['indices_test']]\n X_lstm = X_lstm[test_indices['indices_train']]\n else:\n X_lstm_test = np.array([])\n else:\n X_test = pd.DataFrame([])\n y_test = pd.DataFrame([])\n act_test = pd.DataFrame([])\n X_cnn_test = np.array([])\n X_lstm_test = np.array([])\n\n N_test = X_test.shape[0]\n return X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test\n\ndef load_data():\n X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n act = pd.read_csv(os.path.join(data_path, 'dataset_act.csv'), index_col=0, header=0, parse_dates=True,\n dayfirst=True)\n\n if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):\n X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))\n if X_cnn.shape[1] == 6:\n X_cnn = X_cnn.transpose([0, 2, 3, 1])\n else:\n X_cnn = np.array([])\n\n if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):\n X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))\n else:\n X_lstm = np.array([])\n if os.path.exists(os.path.join(data_path, 'test_indices.pickle')):\n test_indices = joblib.load(os.path.join(data_path, 'test_indices.pickle'))\n else:\n test_indices = None\n\n return X, y, act, X_cnn, X_lstm, test_indices\n\n\ndef test_combine_module():\n from Fuzzy_clustering.ver_tf2.Combine_module_train import combine_model\n\n X, y, act, X_cnn, X_lstm, test_indices = load_data()\n X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test = split_test_data(X, y,\n act,\n X_cnn=X_cnn,\n X_lstm=X_lstm,\n test_indices=test_indices)\n comb_model = combine_model(static_data, cluster_dir, model.sc)\n comb_model.istrained = False\n comb_model.train(X_test, y_test, act_test, X_cnn_test, X_lstm_test)\n\ndef test_cluster_module():\n from Fuzzy_clustering.ver_tf2.Cluster_train_regressors import cluster_train\n\n cluster_model = cluster_train(static_data, rule, model.sc)\n cluster_model.istrained=False\n cluster_model.fit()\n\nif __name__ == '__main__':\n # test_combine_module()\n test_cluster_module()",
"import copy\nimport joblib\nimport os\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nfrom Fuzzy_clustering.version2.sklearn_models.sklearn_models_optuna import sklearn_model\n\n\nclass FS(object):\n def __init__(self, static_data, model_path, njobs, inner_jobs, path_group=None):\n self.static_data = static_data\n self.path_group = path_group\n self.njobs = njobs\n self.inner_jobs = inner_jobs\n self.log_dir = os.path.join(model_path, 'FS/PERM')\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n def reduce_dim(self, cvs):\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n\n for i in range(3):\n cvs[i][0] = cvs[i][0][:, self.features]\n cvs[i][2] = cvs[i][2][:, self.features]\n cvs[i][4] = cvs[i][4][:, self.features]\n\n X_train = cvs[0][0]\n y_train = cvs[0][1].reshape(-1, 1)\n X_val = cvs[0][2]\n y_val = cvs[0][3].reshape(-1, 1)\n X_test = cvs[0][4]\n y_test = cvs[0][5].reshape(-1, 1)\n\n X_train = np.vstack((X_train, X_val, X_test))\n y_train = np.vstack((y_train, y_val, y_test))\n\n reduction = np.linspace(48, self.N_tot, self.N_tot - 48) / np.logspace(0, 0.3, self.N_tot - 48)\n n_components = reduction[int(X_train.shape[1] - 48 - 1)]\n pca = PCA(n_components=n_components)\n pca.fit(X_train)\n\n return pca\n\n def fit(self, cvs):\n # logger = logging.getLogger('log_fs_permutation')\n # logger.setLevel(logging.INFO)\n # handler = logging.FileHandler(os.path.join(self.log_dir, 'log_fs_perm.log'), 'w')\n # handler.setLevel(logging.INFO)\n #\n # # create a logging format\n # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # handler.setFormatter(formatter)\n #\n # # add the handlers to the logger\n # logger.addHandler(handler)\n\n print()\n print('Training the model (Fitting to the training data) ')\n # logger.info('Training the feature extraction ')\n\n method = 'rf'\n\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n\n regressor = sklearn_model(self.static_data, self.log_dir, 1, method, njobs, FS=True, path_group=self.path_group)\n regressor.train(cvs)\n\n self.N_tot = cvs[0][0].shape[1]\n\n features = np.arange(cvs[0][0].shape[1])\n np.random.shuffle(features)\n # features=features[np.argsort(regressor.model.feature_importances_)[::-1]]\n\n acc_test = regressor.acc_test\n\n # cv_result = regressor.cv_results.nlargest(10, 'acc')['params'].to_list()\n flag = True\n\n cvs_temp = copy.deepcopy(cvs)\n\n remove_features = []\n keep_features = []\n unchecked = np.copy(features)\n while flag:\n for f in unchecked:\n\n ncpus = joblib.load(os.path.join(self.path_group, 'total_cpus.pickle'))\n gpu_status = joblib.load(os.path.join(self.path_group, 'gpu_status.pickle'))\n\n njobs = int(ncpus - gpu_status)\n cpu_status = njobs\n joblib.dump(cpu_status, os.path.join(self.path_group, 'cpu_status.pickle'))\n\n features_temp = np.hstack(\n (np.array(keep_features), np.delete(unchecked, np.where(unchecked == f)))).astype('int')\n reg_temp = sklearn_model(self.static_data, os.path.join(self.log_dir, 'temp'), 1, method, njobs,\n FS=True, path_group=self.path_group)\n for i in range(3):\n cvs_temp[i][0] = copy.deepcopy(cvs[i][0][:, features_temp])\n cvs_temp[i][2] = copy.deepcopy(cvs[i][2][:, features_temp])\n cvs_temp[i][4] = copy.deepcopy(cvs[i][4][:, features_temp])\n reg_temp.train(cvs_temp)\n\n # cv_result = reg_temp.cv_results.nlargest(5, 'acc')['params'].to_list()\n if (reg_temp.acc_test - acc_test) < -0.005:\n # logger.info('Remove feature %s accuracy: %s', str(f), str(reg_temp.acc_test))\n print('Remove feature ', str(f), ' accuracy: ', str(reg_temp.acc_test))\n remove_features.append(f)\n unchecked = np.delete(unchecked, np.where(unchecked == f))\n acc_test = reg_temp.acc_test\n break\n else:\n print('ADD feature ', str(f), ' accuracy:', str(reg_temp.acc_test))\n # logger.info('ADD feature %s accuracy: %s', str(f), str(reg_temp.acc_test))\n keep_features.append(f)\n unchecked = np.delete(unchecked, np.where(unchecked == f))\n\n if unchecked.shape[0] == 0:\n flag = False\n else:\n np.random.shuffle(unchecked)\n\n features = np.array(keep_features)\n self.features = features\n\n if self.features.shape[0] > 48:\n pca = self.reduce_dim(cvs)\n else:\n pca = None\n # logger.info('Number of variables %s', str(self.features.shape[0]))\n # logger.info('Finish the feature extraction ')\n return features, pca\n#\n# def test_fs_permute(cvs, X_test1, y_test1, cluster_dir):\n#\n# logger = logging.getLogger('log_rbf_cnn_test.log')\n# logger.setLevel(logging.INFO)\n# handler = logging.FileHandler(os.path.join(cluster_dir, 'log_rbf_cnn_test.log'), 'a')\n# handler.setLevel(logging.INFO)\n#\n# # create a logging format\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# handler.setFormatter(formatter)\n#\n# # add the handlers to the logger\n# logger.addHandler(handler)\n#\n# rated = None\n#\n# static_data = write_database()\n#\n# logger.info('Permutation Evaluation')\n# logger.info('/n')\n# method = 'svm'\n# model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])\n# model_sklearn.train(cvs)\n# pred = model_sklearn.predict(X_test1)\n#\n# metrics_svm = model_sklearn.compute_metrics(pred, y_test1, rated)\n# logger.info('before feature selection metrics')\n# logger.info('sse, %s rms %s, mae %s, mse %s', *metrics_svm)\n#\n# fs = FS(cluster_dir, static_data['sklearn']['njobs'])\n# features = fs.fit(cvs)\n# logger.info('Number of variables %s', str(features.shape[0]))\n#\n# for i in range(3):\n# cvs[i][0] = cvs[i][0][:,features]\n# cvs[i][2] = cvs[i][2][:,features]\n# cvs[i][4] = cvs[i][4][:,features]\n#\n# model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])\n# model_sklearn.train(cvs)\n# pred = model_sklearn.predict(X_test1[:,features])\n#\n# metrics_svm = model_sklearn.compute_metrics(pred, y_test1, rated)\n# logger.info('After feature selection metrics')\n# logger.info('sse, %s rms %s, mae %s, mse %s', *metrics_svm)\n",
"import joblib, os\nimport pandas as pd\nimport numpy as np\nfrom Fuzzy_clustering.version3.project_manager.PredictModelManager.FullClusterPredictManager import FullClusterPredictManager\nfrom Fuzzy_clustering.version3.project_manager.PredictModelManager.FullModelPredictManager import FullModelPredictManager\nfrom Fuzzy_clustering.version3.project_manager.Proba_Model_manager import proba_model_manager\n\n\nclass ProbaDataManager(object):\n\n def __init__(self, static_data):\n self.path_model = static_data['path_model']\n self.static_data = static_data\n\n def prepare_data(self):\n clusters_predict_manager = FullClusterPredictManager(self.path_model, self.static_data)\n pred_cluster, predictions_cluster, y_all, y, index, index_all = clusters_predict_manager.predict_clusters(test = False)\n model_predict_manager = FullModelPredictManager(self.path_model, self.static_data)\n predictions_final_temp = model_predict_manager.predict_model(pred_cluster, predictions_cluster, scale=False)\n predictions_final = dict()\n for method, pred in predictions_final_temp.items():\n pred_temp = pd.DataFrame(0, index=index_all, columns=[method])\n pred_temp.loc[index, method] = pred\n predictions_final[method] = pred_temp\n proba_model = proba_model_manager(self.static_data)\n if not proba_model.istrained:\n from sklearn.model_selection import train_test_split\n scale_y = joblib.load(os.path.join(self.static_data['path_data'], 'Y_scaler.pickle'))\n X_pred = np.array([])\n for method, pred in predictions_final.items():\n if X_pred.shape[0] == 0:\n X_pred = scale_y.transform(predictions_final[method].reshape(-1, 1))\n else:\n X_pred = np.hstack((X_pred, scale_y.transform(predictions_final[method].reshape(-1, 1))))\n X_pred[np.where(X_pred < 0)] = 0\n\n cvs = []\n for _ in range(3):\n X_train1, X_test1, y_train1, y_test1 = train_test_split(X_pred, y_all, test_size=0.15)\n X_train, X_val, y_train, y_val = train_test_split(X_train1, y_train1, test_size=0.15)\n cvs.append([X_train, y_train, X_val, y_val, X_test1, y_test1])\n\n joblib.dump(X_pred, os.path.join(self.static_data['path_data'], 'cvs_proba.pickle'))\n",
"import os\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport logging, shutil, glob\nimport pymongo, joblib\nfrom joblib import Parallel, delayed\nfrom Fuzzy_clustering.ver_tf2.Clusterer_optimize_deep import cluster_optimize, clusterer\nfrom sklearn.preprocessing import MinMaxScaler\nfrom Fuzzy_clustering.ver_tf2.Cluster_train_regressors import cluster_train\nfrom Fuzzy_clustering.ver_tf2.Global_train_regressor import global_train\nfrom Fuzzy_clustering.ver_tf2.Cluster_train_regressor_TL import cluster_train_tl\nfrom Fuzzy_clustering.ver_tf2.Global_train_regressor_TL import global_train_tl\nfrom Fuzzy_clustering.ver_tf2.NWP_sampler import nwp_sampler\nfrom Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict\nfrom Fuzzy_clustering.ver_tf2.Cluster_predict_regressors import cluster_predict\nfrom Fuzzy_clustering.ver_tf2.Combine_train_model import Combine_train\n\nimport time\n# for timing\nfrom contextlib import contextmanager\nfrom timeit import default_timer\n\n\n\n@contextmanager\ndef elapsed_timer():\n start = default_timer()\n elapser = lambda: default_timer() - start\n yield lambda: elapser()\n end = default_timer()\n elapser = lambda: end-start\n\n\nclass ModelTrainManager(object):\n\n def __init__(self, path_model):\n self.istrained = False\n self.path_model = path_model\n try:\n self.load()\n except:\n pass\n\n def init(self, static_data, data_variables, use_db=False):\n self.data_variables = data_variables\n self.static_data = static_data\n self.thres_split = static_data['clustering']['thres_split']\n self.thres_act = static_data['clustering']['thres_act']\n self.n_clusters = static_data['clustering']['n_clusters']\n self.rated = static_data['rated']\n self.var_imp = static_data['clustering']['var_imp']\n self.var_lin = static_data['clustering']['var_lin']\n self.var_nonreg = static_data['clustering']['var_nonreg']\n\n self.create_logger()\n self.use_db = use_db\n if use_db:\n self.db = self.open_db()\n\n def open_db(self):\n try:\n myclient = pymongo.MongoClient(\"mongodb://\" + self.static_data['url'] + \":\" + self.static_data['port'] + \"/\")\n\n project_db = myclient[self.static_data['_id']]\n except:\n self.logger.info('Cannot open Database')\n self.use_db=False\n project_db=None\n raise ConnectionError('Cannot open Database')\n self.logger.info('Open Database successfully')\n return project_db\n\n def create_logger(self):\n self.logger = logging.getLogger(self.static_data['_id'])\n self.logger.setLevel(logging.INFO)\n handler = logging.FileHandler(os.path.join(self.path_model, 'log_model.log'), 'a')\n handler.setLevel(logging.INFO)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n self.logger.addHandler(handler)\n\n def merge_old_data(self,X, y, X_cnn=np.array([]), X_lstm=np.array([])):\n data_path=self.static_data['path_data']\n if os.path.exists(os.path.join(data_path,'dataset_X.csv')):\n X1 = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n y1 = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n try:\n X=X.append(X1)\n y=y.append(y1)\n X=X.round(4)\n y=y.round(4)\n X['target'] = y\n X=X.drop_duplicates()\n y = X['target'].copy(deep=True)\n y = y.to_frame()\n y.columns=['target']\n X = X.drop(columns='target')\n except ImportError:\n raise AssertionError('Cannot merge the historical data with the new ones')\n X.to_csv(os.path.join(data_path, 'dataset_X.csv'))\n y.to_csv(os.path.join(data_path, 'dataset_y.csv'))\n if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):\n X_3d = joblib.load(os.path.join(self.static_data['path_data'], 'dataset_cnn.pickle'))\n X_cnn = np.vstack([X_cnn, X_3d])\n joblib.dump(X_cnn, os.path.join(self.static_data['path_data'], 'dataset_cnn.pickle'))\n if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):\n X_2d = joblib.load(os.path.join(self.static_data['path_data'], 'dataset_lstm.pickle'))\n X_lstm = np.vstack([X_lstm, X_2d])\n joblib.dump(X_lstm, os.path.join(self.static_data['path_data'], 'dataset_lstm.pickle'))\n\n self.logger.info('Data merged successfully')\n\n return X, y, X_cnn, X_lstm\n\n def load_data(self):\n data_path = self.static_data['path_data']\n X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)\n\n if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):\n X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))\n X_cnn = X_cnn.transpose([0, 2, 3, 1])\n else:\n X_cnn = np.array([])\n\n if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):\n X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))\n else:\n X_lstm = np.array([])\n\n self.logger.info('Data loaded successfully')\n return X, y, X_cnn, X_lstm\n\n def backup(self,hor=None):\n #TODO write to backup checking the version of the model (if there are previous versions, write current model in different folder)\n if hor is None:\n for filename in glob.glob(os.path.join(self.path_model, '*.*')):\n shutil.copy(filename, self.static_data['path_backup'])\n else:\n for filename in glob.glob(os.path.join(self.path_model, '*.*')):\n shutil.copy(filename, os.path.join(self.static_data['path_backup'],'hor_'+str(hor)))\n\n def scale(self,X):\n self.sc = MinMaxScaler(feature_range=(0, 1)).fit(X.values)\n self.save()\n return pd.DataFrame(self.sc.transform(X.values),columns=X.columns,index=X.index)\n\n def train_fuzzy_clustering(self, X, y1):\n N, D = X.shape\n n_split = int(np.round(N * 0.7))\n X_test = X.iloc[n_split + 1:]\n y_test = y1.iloc[n_split + 1:]\n\n X_train = X.iloc[:n_split]\n y_train = y1.iloc[:n_split]\n optimizer = cluster_optimize(self.static_data)\n if self.rated is None:\n rated = None\n else:\n rated = 20\n if self.static_data['type'] == 'fa':\n optimizer.run(X_train, y_train, X_test, y_test, rated, num_samples=300)\n else:\n optimizer.run(X_train, y_train, X_test, y_test, rated)\n self.save()\n\n def find_clusters_for_training(self, X_new, train):\n act_new = self.clusterer.compute_activations(X_new)\n\n if len(self.var_nonreg) > 0:\n X_new = X_new.drop(columns=self.var_nonreg).copy(deep=True)\n\n train_clust = []\n if not len(train) == 0:\n for clust in train:\n indices = act_new[clust].index[act_new[clust] >= self.thres_act].tolist()\n if len(indices) > 0:\n inputs = X_new.loc[act_new[clust] >= self.thres_act]\n cluster_dir = os.path.join(self.path_model, 'Regressor_layer/' + clust)\n if not os.path.exists(cluster_dir):\n os.makedirs(cluster_dir)\n if not os.path.exists(os.path.join(cluster_dir, 'data')):\n os.makedirs(os.path.join(cluster_dir, 'data'))\n if not inputs.shape[0] == 0:\n train_clust.append(clust)\n else:\n for clust in act_new.columns:\n indices = act_new[clust].index[act_new[clust] >= self.thres_act].tolist()\n if len(indices) > 0:\n inputs = X_new.loc[act_new[clust] >= self.thres_act]\n cluster_dir = os.path.join(self.path_model, 'Regressor_layer/' + clust)\n if not os.path.exists(cluster_dir):\n os.makedirs(cluster_dir)\n if not os.path.exists(os.path.join(cluster_dir, 'data')):\n os.makedirs(os.path.join(cluster_dir, 'data'))\n if not inputs.shape[0] == 0:\n train_clust.append(clust)\n\n return train_clust\n\n def split_test_data(self, activations, X1, y1, X_cnn, X_lstm):\n split_indices = []\n for clust in activations.columns:\n indices = activations[clust].index[activations[clust] >= self.thres_act].tolist()\n if len(indices) > 0:\n if len(indices) > 1000:\n n_split = int(np.round(len(indices) * 0.75))\n split_indices.append(indices[n_split + 1])\n else:\n n_split = int(np.round(len(indices) * 0.85))\n split_indices.append(indices[n_split + 1])\n split_test = pd.Series(split_indices).min()\n\n X_test = X1.loc[split_test:]\n if X_test.shape[0] > 0.35 * X1.shape[0]:\n split_test = None\n self.split_test = split_test\n return split_test\n\n def save_global_data(self, activations, X1, y1, X_cnn, X_lstm):\n # VARIABLES USED ONLY FOR CLUSTERING\n if len(self.var_nonreg) > 0:\n X1 = X1.drop(columns=self.var_nonreg).copy(deep=True)\n split_test = self.split_test\n\n self.logger.info('Save datasets for global model')\n\n cluster_dir=os.path.join(self.static_data['path_model'], 'Global_regressor')\n cluster_data_dir = os.path.join(cluster_dir, 'data')\n if not os.path.exists(cluster_data_dir):\n os.makedirs(cluster_data_dir)\n act = activations\n inputs = X1\n targets = y1\n\n inputs = inputs.drop(targets.index[pd.isnull(targets).values.ravel()])\n targets = targets.drop(targets.index[pd.isnull(targets).values.ravel()])\n\n targets = targets.drop(inputs.index[pd.isnull(inputs).any(1).values.ravel()])\n inputs = inputs.drop(inputs.index[pd.isnull(inputs).any(1).values.ravel()])\n if not split_test is None:\n test_indices = dict()\n test_indices['dates_train'] = inputs.index[inputs.index < split_test]\n test_ind = np.where(inputs.index < split_test)[0]\n test_ind.sort()\n test_indices['indices_train'] = test_ind\n\n test_indices['dates_test'] = inputs.index[inputs.index >= split_test]\n test_ind = np.where(inputs.index>=split_test)[0]\n test_ind.sort()\n test_indices['indices_test'] = test_ind\n joblib.dump(test_indices, os.path.join(cluster_data_dir, 'test_indices.pickle'))\n\n if not self.static_data['train_online']:\n inputs.to_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'))\n targets.to_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'))\n act.to_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'))\n self.logger.info('Data saved for global model')\n if len(X_cnn.shape) > 1:\n x_cnn = X_cnn\n joblib.dump(x_cnn, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n\n if len(X_lstm.shape) > 1:\n x_lstm = X_lstm\n joblib.dump(x_lstm, os.path.join(cluster_data_dir, 'dataset_lstm.pickle'))\n else:\n if not os.path.exists(os.path.join(cluster_data_dir, 'dataset_X.csv')):\n inputs.to_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'))\n targets.to_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'))\n act.to_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'))\n if len(X_cnn.shape) > 1:\n x_cnn = X_cnn\n joblib.dump(x_cnn, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n if len(X_lstm.shape) > 1:\n x_lstm = X_lstm\n joblib.dump(x_lstm, os.path.join(cluster_data_dir, 'dataset_lstm.pickle'))\n self.logger.info('Data saved for for global model')\n else:\n self.logger.info('load data from previous train loop for global model')\n x_old = pd.read_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'), index_col=0, header=[0], parse_dates=True, dayfirst=True)\n y_old = pd.read_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'), index_col=0, header=[0],\n parse_dates=True, dayfirst=True)\n act_old = pd.read_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'), index_col=0, header=[0],\n parse_dates=True, dayfirst=True)\n try:\n self.logger.info('Merge data from previous train loop for global model')\n inputs = x_old.append(inputs)\n targets = y_old.append(targets)\n act = act_old.append(act)\n inputs = inputs.round(6)\n targets = targets.round(6)\n act = act.round(6)\n inputs['target'] = targets\n inputs['activation'] = act\n inputs = inputs.drop_duplicates()\n targets = inputs['target'].copy(deep=True)\n act = inputs['activation'].copy(deep=True)\n targets = targets.to_frame()\n act = act.to_frame()\n targets.columns = ['target']\n act.columns = ['activation']\n inputs = inputs.drop(columns=['target', 'activation'])\n inputs.to_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'))\n targets.to_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'))\n act.to_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'))\n if os.path.exists(os.path.join(cluster_data_dir, 'dataset_cnn.pickle')):\n x_cnn = joblib.load(os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n X_cnn = np.vstack([x_cnn, X_cnn])\n joblib.dump(X_cnn, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n if os.path.exists(os.path.join(cluster_data_dir, 'dataset_lstm.pickle')):\n x_lstm = joblib.load(os.path.join(cluster_data_dir, 'dataset_lstm.pickle'))\n X_lstm = np.vstack([x_lstm, X_lstm])\n joblib.dump(X_lstm, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n self.logger.info('Data merged and saved for global model')\n except ImportError:\n print('Cannot merge the historical data with the new ones')\n self.logger.info('/n')\n\n def save_cluster_data(self, activations, X1, y1, X_cnn, X_lstm, train_clust_list):\n # VARIABLES USED ONLY FOR CLUSTERING\n if len(self.var_nonreg) > 0:\n X1 = X1.drop(columns=self.var_nonreg).copy(deep=True)\n split_test = self.split_test_data(activations, X1, y1, X_cnn, X_lstm)\n\n for clust in train_clust_list:\n self.logger.info('Save datasets for ' + clust)\n\n cluster_dir = os.path.join(self.path_model, 'Regressor_layer/' + clust)\n cluster_data_dir = os.path.join(cluster_dir, 'data')\n if (not os.path.exists(os.path.join(cluster_data_dir, 'dataset_X.csv')) and not self.static_data['train_online']) or \\\n (self.static_data['recreate_datasets'] and not self.static_data['train_online']):\n nind = np.where(activations[clust] >= self.thres_act)[0]\n nind.sort()\n\n act = activations.loc[activations[clust] >= self.thres_act, clust]\n inputs = X1.loc[activations[clust] >= self.thres_act]\n targets = y1.loc[activations[clust] >= self.thres_act]\n\n inputs = inputs.drop(targets.index[pd.isnull(targets).values.ravel()])\n targets = targets.drop(targets.index[pd.isnull(targets).values.ravel()])\n\n targets = targets.drop(inputs.index[pd.isnull(inputs).any(1).values.ravel()])\n inputs = inputs.drop(inputs.index[pd.isnull(inputs).any(1).values.ravel()])\n if not split_test is None:\n test_indices = dict()\n test_indices['dates_train'] = inputs.index[inputs.index < split_test]\n test_ind = np.where(inputs.index < split_test)[0]\n test_ind.sort()\n test_indices['indices_train'] = test_ind\n\n test_indices['dates_test'] = inputs.index[inputs.index >= split_test]\n test_ind = np.where(inputs.index>=split_test)[0]\n test_ind.sort()\n test_indices['indices_test'] = test_ind\n joblib.dump(test_indices, os.path.join(cluster_data_dir, 'test_indices.pickle'))\n\n if not self.static_data['train_online']:\n inputs.to_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'))\n targets.to_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'))\n act.to_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'))\n self.logger.info('Data saved for cluster %s', clust)\n if len(X_cnn.shape) > 1:\n x_cnn = X_cnn[nind]\n joblib.dump(x_cnn, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n\n if len(X_lstm.shape) > 1:\n x_lstm = X_lstm[nind]\n joblib.dump(x_lstm, os.path.join(cluster_data_dir, 'dataset_lstm.pickle'))\n elif self.static_data['train_online']:\n if not os.path.exists(os.path.join(cluster_data_dir, 'dataset_X.csv')):\n inputs.to_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'))\n targets.to_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'))\n act.to_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'))\n if len(X_cnn.shape) > 1:\n x_cnn = X_cnn[nind]\n joblib.dump(x_cnn, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n if len(X_lstm.shape) > 1:\n x_lstm = X_lstm[nind]\n joblib.dump(x_lstm, os.path.join(cluster_data_dir, 'dataset_lstm.pickle'))\n self.logger.info('Data saved for cluster %s', clust)\n else:\n self.logger.info('load data from previous train loop for cluster %s', clust)\n x_old = pd.read_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'), index_col=0, header=[0], parse_dates=True, dayfirst=True)\n y_old = pd.read_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'), index_col=0, header=[0],\n parse_dates=True, dayfirst=True)\n act_old = pd.read_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'), index_col=0, header=[0],\n parse_dates=True, dayfirst=True)\n try:\n self.logger.info('Merge data from previous train loop for cluster %s', clust)\n inputs = x_old.append(inputs)\n targets = y_old.append(targets)\n act = act_old.append(act)\n inputs = inputs.round(6)\n targets = targets.round(6)\n act = act.round(6)\n inputs['target'] = targets\n inputs['activation'] = act\n inputs = inputs.drop_duplicates()\n targets = inputs['target'].copy(deep=True)\n act = inputs['activation'].copy(deep=True)\n targets = targets.to_frame()\n act = act.to_frame()\n targets.columns = ['target']\n act.columns = ['activation']\n inputs = inputs.drop(columns=['target', 'activation'])\n inputs.to_csv(os.path.join(cluster_data_dir, 'dataset_X.csv'))\n targets.to_csv(os.path.join(cluster_data_dir, 'dataset_y.csv'))\n act.to_csv(os.path.join(cluster_data_dir, 'dataset_act.csv'))\n if os.path.exists(os.path.join(cluster_data_dir, 'dataset_cnn.pickle')):\n x_cnn = joblib.load(os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n X_cnn = np.vstack([x_cnn, X_cnn])\n joblib.dump(X_cnn, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n if os.path.exists(os.path.join(cluster_data_dir, 'dataset_lstm.pickle')):\n x_lstm = joblib.load(os.path.join(cluster_data_dir, 'dataset_lstm.pickle'))\n X_lstm = np.vstack([x_lstm, X_lstm])\n joblib.dump(X_lstm, os.path.join(cluster_data_dir, 'dataset_cnn.pickle'))\n\n self.logger.info('Data merged and saved for cluster %s', clust)\n except ImportError:\n print('Cannot merge the historical data with the new ones')\n self.logger.info('/n')\n\n def train(self, train=[]):\n\n X, y, X_cnn, X_lstm = self.load_data()\n if y.isna().any().values[0]:\n X = X.drop(y.index[np.where(y.isna())[0]])\n if len(X_cnn.shape) > 1:\n X_cnn = np.delete(X_cnn, np.where(y.isna())[0], axis=0)\n if len(X_lstm.shape) > 1:\n X_lstm = np.delete(X_lstm, np.where(y.isna())[0], axis=0)\n y = y.drop(y.index[np.where(y.isna())[0]])\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux']>1e-8)[0]\n X = X.iloc[index]\n y = y.iloc[index]\n X_cnn = X_cnn[index]\n X_new=X.copy(deep=True)\n\n if self.static_data['train_online']:\n X, y, X_cnn, X_lstm = self.merge_old_data(X, y, X_cnn=X_cnn, X_lstm=X_lstm)\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n y = y.iloc[index]\n X_cnn = X_cnn[index]\n X1 = self.scale(X)\n\n self.scale_y = MinMaxScaler(feature_range=(.1, 20)).fit(y.values)\n\n X_new = pd.DataFrame(self.sc.transform(X_new.values), columns=X_new.columns, index=X_new.index)\n\n y1 = pd.DataFrame(self.scale_y.transform(y.values), columns=y.columns, index=y.index)\n\n if not self.static_data['clustering']['is_clustering_trained'] and not os.path.exists(os.path.join(self.static_data['path_fuzzy_models'],self.static_data['clustering']['cluster_file'])):\n self.train_fuzzy_clustering(X1, y1)\n\n self.clusterer=clusterer(self.static_data['path_fuzzy_models'],self.static_data['clustering']['cluster_file'],self.static_data['type'])\n self.logger.info('Clusters created')\n\n train_clust_list = self.find_clusters_for_training(X_new, train)\n\n activations = self.clusterer.compute_activations(X1)\n\n self.save_cluster_data(activations, X1, y1, X_cnn, X_lstm, train_clust_list)\n self.save_global_data(activations, X1, y1, X_cnn, X_lstm)\n\n # Obsolete\n # if self.static_data['type'] in {'wind', 'pv'}:\n # create_nwp_sampler = nwp_sampler(self.static_data)\n # if create_nwp_sampler.istrained == False:\n # create_nwp_sampler.train(X1, X_cnn, gpu_id=self.static_data['CNN']['gpus'][0])\n\n self.regressors=dict()\n glob_regressor = global_train(self.static_data, self.sc)\n if glob_regressor.istrained==False:\n self.logger.info('Global regressor is training..')\n self.regressors['Global'] = glob_regressor.fit()\n self.logger.info('Global regressor trained..')\n self.save()\n else:\n self.regressors['Global'] = glob_regressor.to_dict()\n with elapsed_timer() as eval_elapsed:\n for clust in train_clust_list:\n t = time.process_time()\n print('Begin training of ' + clust)\n self.logger.info('Begin training of ' + clust)\n\n clust_regressor = cluster_train(self.static_data, clust, self.sc)\n if clust_regressor.istrained==False:\n self.regressors[clust] = clust_regressor.fit()\n else:\n self.regressors[clust] = clust_regressor.to_dict()\n\n self.save()\n\n print('time %s' % str(eval_elapsed() / 60))\n self.logger.info('time %s', str((eval_elapsed() - t) / 60))\n print('finish training of ' + clust)\n self.logger.info('finish training of ' + clust)\n\n t=eval_elapsed()\n self.predict_regressors(X1, y1, X_cnn, X_lstm)\n\n combine_model_ = Combine_train(self.static_data)\n self.combine_model = combine_model_.train()\n\n self.istrained = True\n self.full_trained = True\n\n self.save()\n\n def train_tl_rules(self, static_data, clust, gpu, rule_model):\n\n clust_regressor = cluster_train_tl(static_data, clust, self.sc, gpu)\n regressor = clust_regressor.fit(rule_model=rule_model)\n return (clust, regressor)\n\n def train_TL(self, path_model_tl, train=[]):\n model_tl = self.load_to_transfer(path_model_tl)\n static_data_tl = self.static_data['tl_project']['static_data']\n self.sc = model_tl['sc']\n self.scale_y = model_tl['scale_y']\n X, y, X_cnn, X_lstm = self.load_data()\n if y.isna().any().values[0]:\n X = X.drop(y.index[np.where(y.isna())[0]])\n if len(X_cnn.shape) > 1:\n X_cnn = np.delete(X_cnn, np.where(y.isna())[0], axis=0)\n if len(X_lstm.shape) > 1:\n X_lstm = np.delete(X_lstm, np.where(y.isna())[0], axis=0)\n y = y.drop(y.index[np.where(y.isna())[0]])\n if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n y = y.iloc[index]\n X_cnn = X_cnn[index]\n X_new = X.copy(deep=True)\n\n if self.static_data['train_online']:\n X, y, X_cnn, X_lstm = self.merge_old_data(X, y, X_cnn=X_cnn, X_lstm=X_lstm)\n if self.static_data['type'] == 'pv':\n index = np.where(X['flux'] > 1e-8)[0]\n X = X.iloc[index]\n y = y.iloc[index]\n X_cnn = X_cnn[index]\n X1 = self.scale(X)\n # Obsolete\n # create_nwp_sampler = nwp_sampler(self.static_data)\n # if create_nwp_sampler.istrained == False:\n # create_nwp_sampler.train(X1, X_cnn, gpu_id=self.static_data['CNN']['gpus'][0])\n\n self.scale_y = MinMaxScaler(feature_range=(.1, 20)).fit(y.values)\n\n X_new = pd.DataFrame(self.sc.transform(X_new.values), columns=X_new.columns, index=X_new.index)\n\n y1 = pd.DataFrame(self.scale_y.transform(y.values), columns=y.columns, index=y.index)\n\n fuzzy_file = os.path.join(static_data_tl['path_fuzzy_models'], static_data_tl['clustering']['cluster_file'])\n fmodel = joblib.load(fuzzy_file)\n joblib.dump(fmodel, os.path.join(self.static_data['path_fuzzy_models'], self.static_data['clustering']['cluster_file']))\n\n self.clusterer = clusterer(self.static_data['path_fuzzy_models'],\n self.static_data['clustering']['cluster_file'], self.static_data['type'])\n self.logger.info('Clusters created')\n\n train_clust_list = self.find_clusters_for_training(X_new, train)\n\n activations = self.clusterer.compute_activations(X1)\n\n self.save_cluster_data(activations, X1, y1, X_cnn, X_lstm, train_clust_list)\n self.save_global_data(activations, X1, y1, X_cnn, X_lstm)\n self.regressors = dict()\n\n gpus = np.tile(self.static_data['CNN']['gpus'], len(train_clust_list))\n glob_regressor = global_train_tl(self.static_data, self.sc, gpus[0])\n if glob_regressor.istrained == False:\n self.logger.info('Global regressor is training..')\n\n self.regressors['Global'] = glob_regressor.fit(rule_model=model_tl['regressors']['Global'])\n self.logger.info('Global regressor trained')\n else:\n self.regressors['Global'] = glob_regressor.to_dict()\n with elapsed_timer() as eval_elapsed:\n for k, clust in enumerate(train_clust_list):\n t = time.process_time()\n print('Begin training of ' +clust)\n self.logger.info('Begin training of ' + clust)\n\n\n clust_regressor = cluster_train_tl(self.static_data, clust, self.sc, gpus[k])\n if clust_regressor.istrained==False:\n self.regressors[clust] = clust_regressor.fit(rule_model=model_tl['regressors'][clust])\n else:\n self.regressors[clust] = clust_regressor.to_dict()\n\n print('time %s' % str(eval_elapsed() / 60))\n self.logger.info('time %s', str((eval_elapsed() - t) / 60))\n print('finish training of ' + clust)\n self.logger.info('finish training of ' + clust)\n self.save()\n t = eval_elapsed()\n self.predict_regressors(X1, y1, X_cnn, X_lstm)\n combine_model_ = Combine_train(self.static_data)\n self.combine_model = combine_model_.train()\n self.istrained = True\n self.full_trained = True\n self.save()\n\n def predict_regressors(self, X1, y1, X_cnn, X_lstm):\n data_path = self.static_data['path_data']\n if not self.split_test is None:\n X_test = X1.loc[X1.index >= self.split_test]\n y_test = y1.loc[X1.index >= self.split_test]\n test_ind = np.where(X1.index >= self.split_test)[0]\n test_ind.sort()\n if len(X_cnn.shape) > 1:\n X_cnn_test = X_cnn[test_ind]\n else:\n X_cnn_test = np.array([])\n if len(X_lstm.shape) > 1:\n X_lstm_test = X_lstm[test_ind]\n else:\n X_lstm_test = np.array([])\n\n pred_cluster = dict()\n act_test = self.clusterer.compute_activations(X_test)\n for clust in self.regressors.keys():\n if clust == 'Global':\n if len(self.regressors['Global']['models']) > 0:\n predict_module = global_predict(self.static_data)\n pred_cluster['Global'] = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm=X_lstm_test)\n pred_cluster['Global']['metrics'] = predict_module.evaluate(pred_cluster['Global'], y_test.values)\n pred_cluster['Global']['dates'] = X_test.index\n pred_cluster['Global']['index'] = np.arange(0, X_test.shape[0])\n else:\n dates = X_test.index[act_test[clust] >= self.thres_act]\n nind = np.where(act_test[clust] >= self.thres_act)[0]\n nind.sort()\n\n x = X_test.loc[dates]\n targ = y_test.loc[dates].values\n if len(X_cnn_test.shape) > 1:\n x_cnn = X_cnn_test[nind]\n else:\n x_cnn = np.array([])\n if len(X_lstm_test.shape) > 1:\n x_lstm = X_lstm_test[nind]\n else:\n x_lstm = np.array([])\n predict_module = cluster_predict(self.static_data, clust)\n pred_cluster[clust] = predict_module.predict(x.values, X_cnn=x_cnn, X_lstm=x_lstm)\n pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster[clust], targ)\n pred_cluster[clust]['dates'] = dates\n pred_cluster[clust]['index'] = nind\n predictions = dict()\n result_clust = pd.DataFrame()\n for clust in pred_cluster.keys():\n for method in pred_cluster[clust].keys():\n if not method in {'dates', 'index', 'metrics'}:\n if not method in predictions.keys():\n predictions[method] = pd.DataFrame(index=X_test.index, columns=[cl for cl in pred_cluster.keys()])\n predictions[method].loc[pred_cluster[clust]['dates'], clust] = pred_cluster[clust][method].ravel()\n elif method in {'metrics'}:\n result_clust = pd.concat([result_clust, pred_cluster[clust][method]['mae'].rename(clust)], axis=1)\n result_clust.to_csv(os.path.join(data_path, 'result_of_clusters.csv'))\n joblib.dump(pred_cluster, os.path.join(data_path, 'predictions_by_cluster.pickle'))\n joblib.dump(predictions, os.path.join(data_path, 'predictions_by_method.pickle'))\n y_test.to_csv(os.path.join(data_path, 'target_test.csv'))\n else:\n self.static_data['combine_methods'] = ['average']\n\n def load(self):\n if os.path.exists(os.path.join(self.path_model, 'manager' + '.pickle')):\n try:\n f = open(os.path.join(self.path_model, 'manager' + '.pickle'), 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n if 'path_model' in tmp_dict.keys():\n del tmp_dict['path_model']\n self.__dict__.update(tmp_dict)\n except:\n raise ValueError('Cannot find model for %s', self.path_model)\n else:\n raise ValueError('Cannot find model for %s', self.path_model)\n\n def load_to_transfer(self, path_model):\n if os.path.exists(os.path.join(path_model, 'manager' + '.pickle')):\n try:\n f = open(os.path.join(path_model, 'manager' + '.pickle'), 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n return tmp_dict\n except:\n raise ValueError('Cannot find model for %s', path_model)\n else:\n raise ValueError('Cannot find model for %s', path_model)\n\n def save(self):\n f = open(os.path.join(self.path_model, 'manager' + '.pickle'), 'wb')\n dict = {}\n for k in self.__dict__.keys():\n if k not in ['logger','db', 'path_model', 'static_data','thres_act','thres_split','use_db']:\n dict[k] = self.__dict__[k]\n pickle.dump(dict, f)\n f.close()\n\nif __name__ == '__main__':\n from util_database import write_database\n from Fuzzy_clustering.ver_tf2.Projects_train_manager import ProjectsTrainManager\n\n static_data = write_database()\n project_manager = ProjectsTrainManager(static_data)\n project_manager.initialize()\n project_manager.create_datasets()\n project_manager.create_projects_relations()\n project = [pr for pr in project_manager.group_static_data if pr['_id'] == 'Lach'][0]\n static_data = project['static_data']\n\n model = ModelTrainManager(static_data['path_model'])\n model.init(project['static_data'], project_manager.data_variables)\n model.train()"
] | [
[
"numpy.square",
"numpy.arange",
"pandas.DataFrame",
"numpy.mean",
"numpy.array",
"numpy.where"
],
[
"numpy.isnan",
"numpy.array",
"numpy.where",
"numpy.sum"
],
[
"sklearn.ensemble.RandomForestRegressor",
"numpy.hstack",
"numpy.square",
"sklearn.svm.NuSVR",
"sklearn.svm.SVR",
"numpy.mean",
"numpy.vstack",
"sklearn.neural_network.MLPRegressor"
],
[
"numpy.linspace",
"numpy.logspace",
"numpy.arange",
"sklearn.linear_model.ElasticNetCV",
"numpy.stack",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"numpy.vstack"
],
[
"numpy.hstack",
"numpy.isnan",
"numpy.vstack",
"numpy.stack",
"numpy.copy",
"numpy.transpose",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
],
[
"numpy.array",
"pandas.DataFrame"
],
[
"numpy.linspace",
"numpy.logspace",
"numpy.arange",
"numpy.random.shuffle",
"numpy.copy",
"numpy.array",
"numpy.where",
"sklearn.decomposition.PCA",
"numpy.vstack"
],
[
"numpy.array",
"numpy.where",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame"
],
[
"pandas.Series",
"pandas.isnull",
"numpy.arange",
"numpy.vstack",
"pandas.DataFrame",
"numpy.round",
"numpy.array",
"numpy.where",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Johnson-Lsx/espnet | [
"01214cff08cdd737bcab93dd62e127169394d073",
"01214cff08cdd737bcab93dd62e127169394d073"
] | [
"espnet/nets/pytorch_backend/transducer/transformer_decoder.py",
"test/test_e2e_asr_sa_transducer.py"
] | [
"\"\"\"Decoder definition for transformer-transducer models.\"\"\"\n\nimport torch\n\nfrom espnet.nets.pytorch_backend.transducer.blocks import build_blocks\nfrom espnet.nets.pytorch_backend.transducer.joint_network import JointNetwork\nfrom espnet.nets.pytorch_backend.transducer.utils import check_state\nfrom espnet.nets.pytorch_backend.transducer.utils import pad_batch_state\nfrom espnet.nets.pytorch_backend.transducer.utils import pad_sequence\nfrom espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm\nfrom espnet.nets.pytorch_backend.transformer.mask import subsequent_mask\nfrom espnet.nets.transducer_decoder_interface import TransducerDecoderInterface\n\n\nclass DecoderTT(TransducerDecoderInterface, torch.nn.Module):\n \"\"\"Decoder module for transformer-transducer models.\n\n Args:\n odim (int): dimension of outputs\n edim (int): dimension of encoder outputs\n jdim (int): dimension of joint-space\n dec_arch (list): list of layer definitions\n input_layer (str): input layer type\n repeat_block (int): repeat provided blocks N times if N > 1\n joint_activation_type (str) joint network activation type\n positional_encoding_type (str): positional encoding type\n positionwise_layer_type (str): linear\n positionwise_activation_type (str): positionwise activation type\n dropout_rate_embed (float): dropout rate for embedding layer (if specified)\n blank (int): blank symbol ID\n\n \"\"\"\n\n def __init__(\n self,\n odim,\n edim,\n jdim,\n dec_arch,\n input_layer=\"embed\",\n repeat_block=0,\n joint_activation_type=\"tanh\",\n positional_encoding_type=\"abs_pos\",\n positionwise_layer_type=\"linear\",\n positionwise_activation_type=\"relu\",\n dropout_rate_embed=0.0,\n blank=0,\n ):\n \"\"\"Construct a Decoder object for transformer-transducer models.\"\"\"\n torch.nn.Module.__init__(self)\n\n self.embed, self.decoders, ddim, _ = build_blocks(\n \"decoder\",\n odim,\n input_layer,\n dec_arch,\n repeat_block=repeat_block,\n positional_encoding_type=positional_encoding_type,\n positionwise_layer_type=positionwise_layer_type,\n positionwise_activation_type=positionwise_activation_type,\n dropout_rate_embed=dropout_rate_embed,\n padding_idx=blank,\n )\n\n self.after_norm = LayerNorm(ddim)\n\n self.joint_network = JointNetwork(odim, edim, ddim, jdim, joint_activation_type)\n\n self.dunits = ddim\n self.odim = odim\n\n self.blank = blank\n\n def init_state(self, batch_size=None, device=None, dtype=None):\n \"\"\"Initialize decoder states.\n\n Args:\n init_tensor (torch.Tensor): batch of input features (B, dec_dim)\n\n Returns:\n state (list): batch of decoder decoder states [L x None]\n\n \"\"\"\n state = [None] * len(self.decoders)\n\n return state\n\n def forward(self, tgt, tgt_mask, memory):\n \"\"\"Forward transformer-transducer decoder.\n\n Args:\n tgt (torch.Tensor): input token ids, int64 (batch, maxlen_out)\n if input_layer == \"embed\"\n input tensor\n (batch, maxlen_out, #mels) in the other cases\n tgt_mask (torch.Tensor): input token mask, (batch, maxlen_out)\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (include 1.2)\n memory (torch.Tensor): encoded memory, float32 (batch, maxlen_in, feat)\n\n Return:\n z (torch.Tensor): joint output (batch, maxlen_in, maxlen_out, odim)\n tgt_mask (torch.Tensor): score mask before softmax (batch, maxlen_out)\n\n \"\"\"\n tgt = self.embed(tgt)\n\n tgt, tgt_mask = self.decoders(tgt, tgt_mask)\n tgt = self.after_norm(tgt)\n\n h_enc = memory.unsqueeze(2)\n h_dec = tgt.unsqueeze(1)\n\n z = self.joint_network(h_enc, h_dec)\n\n return z, tgt_mask\n\n def score(self, hyp, cache):\n \"\"\"Forward one step.\n\n Args:\n hyp (dataclass): hypothesis\n cache (dict): states cache\n\n Returns:\n y (torch.Tensor): decoder outputs (1, dec_dim)\n (list): decoder states\n [L x (1, max_len, dec_dim)]\n lm_tokens (torch.Tensor): token id for LM (1)\n\n \"\"\"\n device = next(self.parameters()).device\n\n tgt = torch.tensor(hyp.yseq).unsqueeze(0).to(device=device)\n lm_tokens = tgt[:, -1]\n\n str_yseq = \"\".join([str(x) for x in hyp.yseq])\n\n if str_yseq in cache:\n y, new_state = cache[str_yseq]\n else:\n tgt_mask = subsequent_mask(len(hyp.yseq)).unsqueeze(0).to(device=device)\n\n state = check_state(hyp.dec_state, (tgt.size(1) - 1), self.blank)\n\n tgt = self.embed(tgt)\n\n new_state = []\n for s, decoder in zip(state, self.decoders):\n tgt, tgt_mask = decoder(tgt, tgt_mask, cache=s)\n new_state.append(tgt)\n\n y = self.after_norm(tgt[:, -1])\n\n cache[str_yseq] = (y, new_state)\n\n return y[0], new_state, lm_tokens\n\n def batch_score(self, hyps, batch_states, cache):\n \"\"\"Forward batch one step.\n\n Args:\n hyps (list): batch of hypotheses\n batch_states (list): decoder states\n [L x (B, max_len, dec_dim)]\n cache (dict): states cache\n\n Returns:\n batch_y (torch.Tensor): decoder output (B, dec_dim)\n batch_states (list): decoder states\n [L x (B, max_len, dec_dim)]\n lm_tokens (torch.Tensor): batch of token ids for LM (B)\n\n \"\"\"\n final_batch = len(hyps)\n device = next(self.parameters()).device\n\n process = []\n done = [None for _ in range(final_batch)]\n\n for i, hyp in enumerate(hyps):\n str_yseq = \"\".join([str(x) for x in hyp.yseq])\n\n if str_yseq in cache:\n done[i] = (*cache[str_yseq], hyp.yseq)\n else:\n process.append((str_yseq, hyp.yseq, hyp.dec_state))\n\n if process:\n batch = len(process)\n _tokens = pad_sequence([p[1] for p in process], self.blank)\n _states = [p[2] for p in process]\n\n batch_tokens = torch.LongTensor(_tokens).view(batch, -1).to(device=device)\n tgt_mask = (\n subsequent_mask(batch_tokens.size(-1))\n .unsqueeze(0)\n .expand(batch, -1, -1)\n .to(device=device)\n )\n\n dec_state = self.init_state()\n dec_state = self.create_batch_states(\n dec_state,\n _states,\n _tokens,\n )\n\n tgt = self.embed(batch_tokens)\n\n next_state = []\n for s, decoder in zip(dec_state, self.decoders):\n tgt, tgt_mask = decoder(tgt, tgt_mask, cache=s)\n next_state.append(tgt)\n\n tgt = self.after_norm(tgt[:, -1])\n\n j = 0\n for i in range(final_batch):\n if done[i] is None:\n new_state = self.select_state(next_state, j)\n\n done[i] = (tgt[j], new_state, process[j][2])\n cache[process[j][0]] = (tgt[j], new_state)\n\n j += 1\n\n batch_states = self.create_batch_states(\n batch_states, [d[1] for d in done], [d[2] for d in done]\n )\n batch_y = torch.stack([d[0] for d in done])\n\n lm_tokens = (\n torch.LongTensor([hyp.yseq[-1] for hyp in hyps])\n .view(final_batch)\n .to(device=device)\n )\n\n return batch_y, batch_states, lm_tokens\n\n def select_state(self, batch_states, idx):\n \"\"\"Get decoder state from batch of states, for given id.\n\n Args:\n batch_states (list): batch of decoder states\n [L x (B, max_len, dec_dim)]\n idx (int): index to extract state from batch of states\n\n Returns:\n state_idx (list): decoder states for given id\n [L x (1, max_len, dec_dim)]\n\n \"\"\"\n if batch_states[0] is None:\n return batch_states\n\n state_idx = [batch_states[layer][idx] for layer in range(len(self.decoders))]\n\n return state_idx\n\n def create_batch_states(self, batch_states, l_states, l_tokens):\n \"\"\"Create batch of decoder states.\n\n Args:\n batch_states (list): batch of decoder states\n [L x (B, max_len, dec_dim)]\n l_states (list): list of decoder states\n [B x [L x (1, max_len, dec_dim)]]\n l_tokens (list): list of token sequences for batch\n\n Returns:\n batch_states (list): batch of decoder states\n [L x (B, max_len, dec_dim)]\n\n \"\"\"\n if batch_states[0] is None:\n return batch_states\n\n max_len = max([len(t) for t in l_tokens])\n\n for layer in range(len(self.decoders)):\n batch_states[layer] = pad_batch_state(\n [s[layer] for s in l_states], max_len, self.blank\n )\n\n return batch_states\n",
"# coding: utf-8\n\nimport argparse\n\nimport pytest\nimport torch\n\nfrom espnet.nets.beam_search_transducer import BeamSearchTransducer\nfrom espnet.nets.pytorch_backend.e2e_asr_transducer import E2E\nfrom espnet.nets.pytorch_backend.transducer.blocks import build_blocks\n\n\ndef make_train_args(**kwargs):\n train_defaults = dict(\n transformer_init=\"pytorch\",\n etype=\"transformer\",\n transformer_enc_input_layer=\"conv2d\",\n transformer_enc_self_attn_type=\"selfattn\",\n transformer_enc_positional_encoding_type=\"abs_pos\",\n transformer_enc_pw_activation_type=\"relu\",\n transformer_enc_conv_mod_activation_type=\"relu\",\n enc_block_arch=[{\"type\": \"transformer\", \"d_hidden\": 2, \"d_ff\": 2, \"heads\": 1}],\n enc_block_repeat=1,\n dtype=\"transformer\",\n transformer_dec_input_layer=\"embed\",\n dec_block_arch=[{\"type\": \"transformer\", \"d_hidden\": 2, \"d_ff\": 2, \"heads\": 1}],\n dec_block_repeat=1,\n transformer_dec_pw_activation_type=\"relu\",\n dropout_rate_embed_decoder=0.0,\n joint_dim=2,\n joint_activation_type=\"tanh\",\n mtlalpha=1.0,\n trans_type=\"warp-transducer\",\n rnnt_mode=\"rnnt_mode\",\n char_list=[\"a\", \"e\", \"i\", \"o\", \"u\"],\n sym_space=\"<space>\",\n sym_blank=\"<blank>\",\n report_cer=False,\n report_wer=False,\n search_type=\"default\",\n score_norm_transducer=False,\n beam_size=1,\n nbest=1,\n verbose=0,\n outdir=None,\n rnnlm=None,\n )\n train_defaults.update(kwargs)\n\n return argparse.Namespace(**train_defaults)\n\n\ndef make_recog_args(**kwargs):\n recog_defaults = dict(\n batchsize=0,\n beam_size=1,\n nbest=1,\n verbose=0,\n search_type=\"default\",\n nstep=1,\n max_sym_exp=2,\n u_max=5,\n prefix_alpha=2,\n score_norm_transducer=True,\n rnnlm=None,\n )\n recog_defaults.update(kwargs)\n\n return argparse.Namespace(**recog_defaults)\n\n\ndef get_default_scope_inputs():\n bs = 2\n idim = 12\n odim = 5\n\n ilens = [12, 4]\n olens = [5, 4]\n\n return bs, idim, odim, ilens, olens\n\n\ndef prepare(args):\n bs, idim, odim, ilens, olens = get_default_scope_inputs()\n n_token = odim - 1\n\n model = E2E(idim, odim, args)\n\n x = torch.randn(bs, max(ilens), idim)\n y = (torch.rand(bs, max(olens)) * n_token % n_token).long()\n\n for i in range(bs):\n x[i, ilens[i] :] = -1\n y[i, olens[i] :] = model.ignore_id\n\n data = {}\n uttid_list = []\n for i in range(bs):\n data[\"utt%d\" % i] = {\n \"input\": [{\"shape\": [ilens[i], idim]}],\n \"output\": [{\"shape\": [olens[i]]}],\n }\n uttid_list.append(\"utt%d\" % i)\n\n return model, x, torch.tensor(ilens), y, data, uttid_list\n\n\[email protected](\n \"train_dic, recog_dic\",\n [\n ({}, {}),\n ({\"enc_block_repeat\": 2}, {}),\n ({\"dec_block_repeat\": 2}, {}),\n (\n {\n \"enc_block_arch\": [\n {\n \"type\": \"conformer\",\n \"d_hidden\": 2,\n \"d_ff\": 2,\n \"heads\": 1,\n \"macaron_style\": True,\n \"use_conv_mod\": True,\n \"conv_mod_kernel\": 1,\n }\n ],\n \"transformer_enc_input_layer\": \"vgg2l\",\n \"transformer_enc_self_attn_type\": \"rel_self_attn\",\n \"transformer_enc_positional_encoding_type\": \"rel_pos\",\n },\n {},\n ),\n (\n {\n \"enc_block_arch\": [\n {\n \"type\": \"conformer\",\n \"d_hidden\": 2,\n \"d_ff\": 2,\n \"heads\": 2,\n \"macaron_style\": False,\n \"use_conv_mod\": True,\n \"conv_mod_kernel\": 1,\n \"transformer_enc_pw_activation_type\": \"swish\",\n \"transformer_enc_conv_mod_activation_type\": \"relu\",\n }\n ],\n },\n {\"transformer_dec_pw_activation_type\": \"swish\"},\n ),\n (\n {\n \"enc_block_arch\": [\n {\n \"type\": \"tdnn\",\n \"idim\": 2,\n \"odim\": 2,\n \"ctx_size\": 2,\n \"dilation\": 1,\n \"stride\": 1,\n \"dropout-rate\": 0.3,\n \"use-relu\": True,\n \"use-batch-norm\": True,\n },\n {\n \"type\": \"transformer\",\n \"d_hidden\": 2,\n \"d_ff\": 2,\n \"heads\": 1,\n \"dropout-rate\": 0.3,\n \"att-dropout-rate\": 0.2,\n \"pos-dropout-rate\": 0.1,\n },\n ],\n },\n {},\n ),\n (\n {\n \"enc_block_arch\": [\n {\n \"type\": \"tdnn\",\n \"idim\": 2,\n \"odim\": 2,\n \"ctx_size\": 2,\n \"dilation\": 1,\n \"stride\": 1,\n \"dropout-rate\": 0.3,\n \"use-relu\": True,\n \"use-batch-norm\": True,\n },\n {\n \"type\": \"conformer\",\n \"d_hidden\": 2,\n \"d_ff\": 2,\n \"heads\": 1,\n \"macaron_style\": False,\n \"use_conv_mod\": False,\n },\n ],\n \"transformer_enc_input_layer\": \"linear\",\n \"transformer_enc_self_attn_type\": \"rel_self_attn\",\n \"transformer_enc_positional_encoding_type\": \"rel_pos\",\n },\n {},\n ),\n (\n {\n \"enc_block_arch\": [\n {\n \"type\": \"tdnn\",\n \"idim\": 2,\n \"odim\": 2,\n \"ctx_size\": 2,\n \"dilation\": 1,\n \"stride\": 1,\n }\n ]\n },\n {},\n ),\n (\n {\n \"dec_block_arch\": [\n {\"type\": \"causal-conv1d\", \"idim\": 2, \"odim\": 2, \"kernel_size\": 3},\n {\"type\": \"transformer\", \"d_hidden\": 2, \"d_ff\": 2, \"heads\": 1},\n ]\n },\n {},\n ),\n ({\"transformer_enc_pw_activation_type\": \"swish\"}, {}),\n ({\"transformer_enc_pw_activation_type\": \"hardtanh\"}, {}),\n ({\"transformer_dec_pw_activation_type\": \"swish\"}, {}),\n ({\"transformer_dec_pw_activation_type\": \"hardtanh\"}, {}),\n ({\"transformer_enc_positional_encoding_type\": \"scaled_abs_pos\"}, {}),\n ({\"joint_activation_type\": \"relu\"}, {}),\n ({\"joint_activation_type\": \"swish\"}, {}),\n ({\"transformer_enc_input_layer\": \"vgg2l\"}, {}),\n ({\"transformer_enc_input_layer\": \"linear\"}, {}),\n ({\"report_cer\": True, \"report_wer\": True}, {}),\n ({\"report_cer\": True, \"beam_size\": 2}, {}),\n ({}, {\"beam_size\": 2}),\n ({}, {\"beam_size\": 2, \"nbest\": 2, \"score_norm_transducer\": False}),\n ({}, {\"beam_size\": 2, \"search_type\": \"nsc\", \"nstep\": 3, \"prefix_alpha\": 1}),\n ({}, {\"beam_size\": 2, \"search_type\": \"tsd\", \"max_sym_exp\": 3}),\n ({}, {\"beam_size\": 2, \"search_type\": \"alsd\"}),\n ({}, {\"beam_size\": 2, \"search_type\": \"alsd\", \"u_max\": 10}),\n ],\n)\ndef test_sa_transducer_trainable_and_decodable(train_dic, recog_dic):\n train_args = make_train_args(**train_dic)\n recog_args = make_recog_args(**recog_dic)\n\n model, x, ilens, y, data, uttid_list = prepare(train_args)\n\n optim = torch.optim.Adam(model.parameters(), 0.01)\n loss = model(x, ilens, y)\n\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n beam_search = BeamSearchTransducer(\n decoder=model.decoder,\n beam_size=recog_args.beam_size,\n lm=None,\n lm_weight=0.0,\n search_type=recog_args.search_type,\n max_sym_exp=recog_args.max_sym_exp,\n u_max=recog_args.u_max,\n nstep=recog_args.nstep,\n prefix_alpha=recog_args.prefix_alpha,\n score_norm=recog_args.score_norm_transducer,\n )\n\n with torch.no_grad():\n nbest = model.recognize(x[0, : ilens[0]].numpy(), beam_search)\n\n print(y[0])\n print(nbest[0][\"yseq\"][1:-1])\n\n\ndef test_calculate_plot_attention():\n from espnet.nets.pytorch_backend.transformer import plot\n\n train_args = make_train_args(report_cer=True)\n\n model, x, ilens, y, data, uttid_list = prepare(train_args)\n\n attn_dict = model.calculate_all_attentions(x[0:1], ilens[0:1], y[0:1])\n plot.plot_multi_head_attention(data, uttid_list, attn_dict, \"/tmp/espnet-test\")\n\n\ndef test_invalid_input_layer_type():\n architecture = [\n {\n \"type\": \"transformer\",\n \"d_hidden\": 2,\n \"d_ff\": 2,\n \"heads\": 1,\n },\n ]\n\n with pytest.raises(NotImplementedError):\n _, _, _ = build_blocks(\"encoder\", 4, \"foo\", architecture)\n\n\ndef test_invalid_architecture_layer_type():\n\n with pytest.raises(NotImplementedError):\n _, _, _ = build_blocks(\"encoder\", 4, \"linear\", [{\"type\": \"foo\"}])\n\n\ndef test_invalid_block():\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\"encoder\", 4, \"linear\", [{\"foo\": \"foo\"}])\n\n\ndef test_invalid_block_arguments():\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\"encoder\", 4, \"linear\", [{\"type\": \"transformer\"}])\n\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\"encoder\", 4, \"linear\", [{\"type\": \"conformer\"}])\n\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\n \"encoder\",\n 4,\n \"linear\",\n [\n {\n \"type\": \"conformer\",\n \"d_hidden\": 4,\n \"d_ff\": 8,\n \"heads\": 1,\n \"macaron_style\": False,\n \"use_conv_mod\": True,\n }\n ],\n )\n\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\"decoder\", 4, \"embed\", [{\"type\": \"conformer\"}])\n\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\"encoder\", 4, \"linear\", [{\"type\": \"tdnn\"}])\n\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\"decoder\", 4, \"embed\", [{\"type\": \"causal-conv1d\"}])\n\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\n \"encoder\",\n 4,\n \"embed\",\n [\n {\n \"type\": \"transformer\",\n \"d_hidden\": 2,\n \"d_ff\": 8,\n \"heads\": 1,\n },\n ],\n positional_encoding_type=\"rel_pos\",\n self_attn_type=\"self_attn\",\n )\n\n\ndef test_invalid_block_io():\n with pytest.raises(ValueError):\n _, _, _ = build_blocks(\n \"encoder\",\n 4,\n \"linear\",\n [\n {\n \"type\": \"transformer\",\n \"d_hidden\": 2,\n \"d_ff\": 8,\n \"heads\": 1,\n },\n {\n \"type\": \"transformer\",\n \"d_hidden\": 4,\n \"d_ff\": 8,\n \"heads\": 1,\n },\n ],\n )\n"
] | [
[
"torch.stack",
"torch.LongTensor",
"torch.nn.Module.__init__",
"torch.tensor"
],
[
"torch.no_grad",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rgap/storm | [
"5f477d6fa58c6c1ec8d8e2b57c3b21844cae17ac"
] | [
"storm_kit/mpc/control/control_utils.py"
] | [
"#\n# MIT License\n#\n# Copyright (c) 2020-2021 NVIDIA CORPORATION.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.#\nimport math\n\nimport numpy as np\nimport torch\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nimport ghalton\n\n\ndef scale_ctrl(ctrl, action_lows, action_highs, squash_fn='clamp'):\n if len(ctrl.shape) == 1:\n ctrl = ctrl[np.newaxis, :, np.newaxis]\n act_half_range = (action_highs - action_lows) / 2.0\n act_mid_range = (action_highs + action_lows) / 2.0\n if squash_fn == 'clamp':\n # ctrl = torch.clamp(ctrl, action_lows[0], action_highs[0])\n ctrl = torch.max(torch.min(ctrl, action_highs), action_lows)\n return ctrl\n elif squash_fn == 'clamp_rescale':\n ctrl = torch.clamp(ctrl, -1.0, 1.0)\n elif squash_fn == 'tanh':\n ctrl = torch.tanh(ctrl)\n elif squash_fn == 'identity':\n return ctrl\n return act_mid_range.unsqueeze(0) + ctrl * act_half_range.unsqueeze(0)\n\n#######################\n## STOMP Covariance ##\n#######################\n\ndef get_stomp_cov(horizon, d_action,\n tensor_args={'device':torch.device('cpu'),'dtype':torch.float32},\n cov_mode='vel', RETURN_R=False):\n \"\"\" Computes the covariance matrix following STOMP motion planner\n\n Coefficients from here: https://en.wikipedia.org/wiki/Finite_difference_coefficient\n More info here: https://github.com/ros-industrial/stomp_ros/blob/7fe40fbe6ad446459d8d4889916c64e276dbf882/stomp_core/src/utils.cpp#L36\n \"\"\"\n acc_fd_array = [0,-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12, 0]\n #acc_fd_array = [1/90, -3/20, 3/2, -49/18, 3/2 , -3/20, 1/90 ]\n\n #jerk_fd_array = [0, 1 / 12.0, -17 / 12.0, 46 / 12.0, -46 / 12.0, 17 / 12.0, -1 / 12.0]\n jerk_fd_array = [1 / 8.0, -1, 13/8, 0 , -13/8, 1, -1/8]\n\n #snap_fd_array = [-1/6, 2.0, -13/2, 28/3, -13/2, 2, -1/6]\n snap_fd_array = [0, 1, -4, 6, -4, 1, 0]\n #vel_fd_array = [0, 1.0/12.0 , -2.0/3.0 , 0 , 2.0/3.0 , -1.0/12.0 , 0 ]\n vel_fd_array = [0, 0 , 1, -2 , 1,0, 0 ]\n \n fd_array = acc_fd_array\n A = torch.zeros((d_action * horizon, d_action * horizon), device=tensor_args['device'],dtype=torch.float64)\n\n\n if(cov_mode == 'vel'):\n for k in range(d_action):\n for i in range(0, horizon):\n for j in range(-3,4):\n #print(j)\n index = i + j\n if(index < 0):\n index = 0\n continue\n if(index >= horizon):\n index = horizon - 1\n continue\n A[k * horizon + i,k * horizon + index] = fd_array[j + 3]\n elif(cov_mode == 'acc'):\n for k in range(d_action):\n for i in range(0, horizon):\n for j in range(-3,4):\n #print(j)\n index = i + j\n if(index < 0):\n index = 0\n continue\n if(index >= horizon):\n index = horizon - 1\n continue\n if(index >= horizon/2):\n #print(k * horizon + index - horizon//2)\n A[k * horizon + i,k * horizon - index - horizon//2 -1] = fd_array[j + 3] #* float((horizon-index) / horizon)\n else:\n A[k * horizon + i,k * horizon + index] = fd_array[j + 3] #* float(index/horizon) \n #plt.imshow(A)\n #plt.show()\n\n R = torch.matmul(A.transpose(-2,-1), A)\n #print(R[:horizon, :horizon])\n #plt.imshow(R)\n #plt.show()\n #print(R)\n #print(torch.det(R))\n \n cov = torch.inverse(R)\n cov = cov / torch.max(torch.abs(cov))\n #plt.imshow(cov)\n #plt.show()\n\n # also compute the cholesky decomposition:\n scale_tril = torch.zeros((d_action * horizon, d_action * horizon), **tensor_args)\n scale_tril = torch.linalg.cholesky(cov)\n '''\n k = 0\n act_cov_matrix = cov[k * horizon:k * horizon + horizon, k * horizon:k * horizon + horizon]\n print(act_cov_matrix.shape)\n print(torch.det(act_cov_matrix))\n local_cholesky = matrix_cholesky(act_cov_matrix)\n for k in range(d_action):\n \n scale_tril[k * horizon:k * horizon + horizon,k * horizon:k * horizon + horizon] = local_cholesky\n '''\n cov = cov.to(**tensor_args)\n scale_tril = scale_tril.to(**tensor_args) #* 0.1\n scale_tril = scale_tril / torch.max(scale_tril)\n if(RETURN_R):\n return cov, scale_tril, R\n return cov, scale_tril\n \n\n\n#######################\n## Gaussian Sampling ##\n#######################\n\n\ndef generate_noise(cov, shape, base_seed, filter_coeffs=None, device=torch.device('cpu')):\n \"\"\"\n Generate correlated Gaussian samples using autoregressive process\n \"\"\"\n torch.manual_seed(base_seed)\n beta_0, beta_1, beta_2 = filter_coeffs\n N = cov.shape[0]\n m = MultivariateNormal(loc=torch.zeros(N).to(device), covariance_matrix=cov)\n eps = m.sample(sample_shape=shape)\n # eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)\n if filter_coeffs is not None:\n for i in range(2, eps.shape[1]):\n eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]\n return eps \n\ndef generate_noise_np(cov, shape, base_seed, filter_coeffs=None):\n \"\"\"\n Generate correlated noisy samples using autoregressive process\n \"\"\"\n np.random.seed(base_seed)\n beta_0, beta_1, beta_2 = filter_coeffs\n N = cov.shape[0]\n eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)\n if filter_coeffs is not None:\n for i in range(2, eps.shape[1]):\n eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]\n return eps \n\n###########################\n## Quasi-Random Sampling ##\n###########################\n\ndef generate_prime_numbers(num):\n def is_prime(n):\n for j in range(2, ((n //2) + 1),1):\n if n % j == 0:\n return False\n return True\n\n primes = [0] * num #torch.zeros(num, device=device)\n primes[0] = 2\n curr_num = 1\n for i in range(1, num):\n while True:\n curr_num += 2\n if is_prime(curr_num):\n primes[i] = curr_num\n break\n \n return primes\n\ndef generate_van_der_corput_sample(idx, base):\n f, r = 1.0, 0\n while idx > 0:\n f /= base*1.0\n r += f * (idx % base)\n idx = idx // base\n return r\n\ndef generate_van_der_corput_samples_batch(idx_batch, base):\n inp_device = idx_batch.device\n batch_size = idx_batch.shape[0]\n f = 1.0 #torch.ones(batch_size, device=inp_device)\n r = torch.zeros(batch_size, device=inp_device)\n while torch.any(idx_batch > 0):\n f /= base*1.0\n r += f * (idx_batch % base) #* (idx_batch > 0)\n idx_batch = idx_batch // base\n return r\n\n\n# def generate_van_der_corput_samples_batch_2(idx_batch, bases):\n# inp_device = idx_batch.device\n# batch_size = idx_batch.shape[0]\n# f = torch.ones(batch_size, device=inp_device)\n# r = torch.zeros(batch_size, device=inp_device)\n \n# while torch.any(idx_batch > 0):\n# f /= bases*1.0\n# r += f * (idx_batch % base) #* (idx_batch > 0)\n# idx_batch = idx_batch // base\n \n# return r\n\ndef generate_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):\n if not use_ghalton:\n samples = torch.zeros(num_samples, ndims, device=device, dtype=float_dtype)\n if not bases:\n bases = generate_prime_numbers(ndims)\n idx_batch = torch.arange(1,num_samples+1, device=device)\n for dim in range(ndims):\n samples[:, dim] = generate_van_der_corput_samples_batch(idx_batch, bases[dim])\n else:\n \n if ndims <= 100:\n perms = ghalton.EA_PERMS[:ndims]\n sequencer = ghalton.GeneralizedHalton(perms)\n else:\n sequencer = ghalton.GeneralizedHalton(ndims, seed_val)\n samples = torch.tensor(sequencer.get(num_samples), device=device, dtype=float_dtype)\n return samples\n\n\ndef generate_gaussian_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):\n uniform_halton_samples = generate_halton_samples(num_samples, ndims, bases, use_ghalton, seed_val, device, float_dtype)\n\n gaussian_halton_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_halton_samples - 1)\n \n return gaussian_halton_samples\n\n\ndef generate_gaussian_sobol_samples(num_samples, ndims, seed_val, device=torch.device('cpu'), float_dtype=torch.float64):\n soboleng = torch.quasirandom.SobolEngine(dimension=ndims, scramble=True, seed=seed_val)\n uniform_sobol_samples = soboleng.draw(num_samples).to(device)\n\n gaussian_sobol_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_sobol_samples - 1)\n return gaussian_sobol_samples\n \n########################\n## Gaussian Utilities ##\n########################\n\n\ndef gaussian_logprob(mean, cov, x, cov_type=\"full\"):\n \"\"\"\n Calculate gaussian log prob for given input batch x\n Parameters\n ----------\n mean (np.ndarray): [N x num_samples] batch of means\n cov (np.ndarray): [N x N] covariance matrix\n x (np.ndarray): [N x num_samples] batch of sample values\n\n Returns\n --------\n log_prob (np.ndarray): [num_sampls] log probability of each sample\n \"\"\"\n N = cov.shape[0]\n if cov_type == \"diagonal\":\n cov_diag = cov.diagonal()\n cov_inv = np.diag(1.0 / cov_diag)\n cov_logdet = np.sum(np.log(cov_diag))\n else:\n cov_logdet = np.log(np.linalg.det(cov))\n cov_inv = np.linalg.inv(cov)\n diff = (x - mean).T\n mahalanobis_dist = -0.5 * np.sum((diff @ cov_inv) * diff, axis=1)\n const1 = -0.5 * N * np.log(2.0 * np.pi) \n const2 = -0.5*cov_logdet\n log_prob = mahalanobis_dist + const1 + const2\n return log_prob\n\ndef gaussian_logprobgrad(mean, cov, x, cov_type=\"full\"):\n if cov_type == \"diagonal\":\n cov_inv = np.diag(1.0/cov.diagonal())\n else:\n cov_inv = np.linalg.inv(cov)\n diff = (x - mean).T\n grad = diff @ cov_inv\n return grad\n\ndef gaussian_entropy(cov=None, L=None): #, cov_type=\"full\"):\n \"\"\"\n Entropy of multivariate gaussian given either covariance\n or cholesky decomposition of covariance\n \n \"\"\"\n if cov is not None:\n inp_device = cov.device\n cov_logdet = torch.log(torch.det(cov))\n # print(np.linalg.det(cov.cpu().numpy()))\n # print(torch.det(cov))\n N = cov.shape[0]\n\n else:\n inp_device = L.device\n cov_logdet = 2.0 * torch.sum(torch.log(torch.diagonal(L)))\n N = L.shape[0]\n # if cov_type == \"diagonal\":\n # cov_logdet = np.sum(np.log(cov.diagonal())) \n # else:\n # cov_logdet = np.log(np.linalg.det(cov))\n\n term1 = 0.5 * cov_logdet\n # pi = torch.tensor([math.pi], device=inp_device)\n # pre-calculate 1.0 + torch.log(2.0*pi) = 2.837877066\n term2 = 0.5 * N * 2.837877066\n\n ent = term1 + term2\n return ent.to(inp_device)\n\ndef gaussian_kl(mean0, cov0, mean1, cov1, cov_type=\"full\"):\n \"\"\"\n KL-divergence between Gaussians given mean and covariance\n KL(p||q) = E_{p}[log(p) - log(q)]\n\n \"\"\"\n N = cov0.shape[0]\n if cov_type == \"diagonal\":\n cov1_diag = cov1.diagonal()\n cov1_inv = np.diag(1.0 / cov1_diag)\n cov0_logdet = np.sum(np.log(cov0.diagonal()))\n cov1_logdet = np.sum(np.log(cov1_diag))\n else:\n cov1_inv = np.linalg.inv(cov1)\n cov0_logdet = np.log(np.linalg.det(cov0))\n cov1_logdet = np.log(np.linalg.det(cov1))\n\n term1 = 0.5 * np.trace(cov1_inv @ cov0)\n diff = (mean1 - mean0).T\n mahalanobis_dist = 0.5 * np.sum((diff @ cov1_inv) * diff, axis=1)\n term3 = 0.5 * (-1.0*N + cov1_logdet - cov0_logdet)\n return term1 + mahalanobis_dist + term3\n\n\n\ndef cost_to_go(cost_seq, gamma_seq):\n \"\"\"\n Calculate (discounted) cost to go for given cost sequence\n \"\"\"\n # if torch.any(gamma_seq == 0):\n # return cost_seq\n cost_seq = gamma_seq * cost_seq # discounted cost sequence\n # cost_seq = torch.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq = torch.fliplr(torch.cumsum(torch.fliplr(cost_seq), axis=-1)) # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq /= gamma_seq # un-scale it to get true discounted cost to go\n return cost_seq\n\ndef cost_to_go_np(cost_seq, gamma_seq):\n \"\"\"\n Calculate (discounted) cost to go for given cost sequence\n \"\"\"\n # if np.any(gamma_seq == 0):\n # return cost_seq\n cost_seq = gamma_seq * cost_seq # discounted reward sequence\n cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq /= gamma_seq # un-scale it to get true discounted cost to go\n return cost_seq\n\n\n############\n##Cholesky##\n############\ndef matrix_cholesky(A):\n L = torch.zeros_like(A) \n for i in range(A.shape[-1]):\n for j in range(i+1):\n s = 0.0\n for k in range(j):\n s = s + L[i,k] * L[j,k] \n \n L[i,j] = torch.sqrt(A[i,i] - s) if (i == j) else \\\n (1.0 / L[j,j] * (A[i,j] - s))\n return L\n\n# Batched Cholesky decomp\ndef batch_cholesky(A):\n L = torch.zeros_like(A)\n\n for i in range(A.shape[-1]):\n for j in range(i+1):\n s = 0.0\n for k in range(j):\n s = s + L[...,i,k] * L[...,j,k]\n\n L[...,i,j] = torch.sqrt(A[...,i,i] - s) if (i == j) else \\\n (1.0 / L[...,j,j] * (A[...,i,j] - s))\n return L\n"
] | [
[
"numpy.diag",
"torch.abs",
"torch.max",
"torch.zeros",
"numpy.cumsum",
"torch.tanh",
"torch.device",
"numpy.trace",
"torch.quasirandom.SobolEngine",
"torch.erfinv",
"torch.fliplr",
"torch.sqrt",
"torch.inverse",
"torch.tensor",
"numpy.linalg.det",
"torch.arange",
"numpy.zeros",
"numpy.log",
"torch.linalg.cholesky",
"numpy.linalg.inv",
"torch.min",
"torch.zeros_like",
"numpy.sum",
"torch.diagonal",
"numpy.random.seed",
"torch.manual_seed",
"torch.det",
"torch.any",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SamuelCahyawijaya/fast-transformers | [
"6ae8ed4cc50bd037968db4f5062e4d328aae73fe",
"6ae8ed4cc50bd037968db4f5062e4d328aae73fe",
"6ae8ed4cc50bd037968db4f5062e4d328aae73fe",
"6ae8ed4cc50bd037968db4f5062e4d328aae73fe"
] | [
"tests/sparse_product/test_clustered_sparse_product_backward_cpu.py",
"tests/sparse_product/test_sparse_product_backward_gpu.py",
"tests/recurrent/test_transformer_encoder.py",
"tests/aggregate/test_clustered_aggregate_cpu.py"
] | [
"#\n# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/\n# Written by Angelos Katharopoulos <[email protected]>,\n# Apoorv Vyas <[email protected]>\n#\n\nimport os\nfrom os import getenv\nimport time\nimport unittest\n\nimport torch\nfrom torch.nn.init import normal_\n\nfrom fast_transformers.aggregate import aggregate, broadcast\nfrom fast_transformers.hashing import compute_hashes\nfrom fast_transformers.clustering.hamming import cluster\nfrom fast_transformers.sparse_product import clustered_sparse_dot_product\n\ndef cluster_queries(Q, query_lengths, C, I, B):\n N, H, L, E = Q.shape\n planes = Q.new_empty((B, E+1))\n normal_(planes)\n planes[:, -1] = 0\n hashes = compute_hashes(Q.view(N*H*L, E), planes).view(N, H, L)\n # Cluster the hashes and return the cluster index per query\n groups, counts = cluster(\n hashes,\n query_lengths,\n clusters=C,\n iterations=I,\n bits=B\n )\n\n return groups, counts\n\n\nclass TestSparseProductBackward(unittest.TestCase):\n @property\n def device(self):\n return \"cpu\"\n\n def _zero_grad(self, Q, K):\n for x in [Q, K]:\n if x.grad is not None:\n x.grad[...] = 0\n\n def test_simple_grad(self):\n N = 2\n H = 2\n L = 1000\n E = 32\n S = 1000\n k = 32\n C = 50\n I = 5\n B = 16\n\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n\n lengths = torch.full((N,), L, dtype=torch.int32).to(self.device)\n groups, counts = cluster_queries(Q, lengths, C, I, B)\n Q_grouped = aggregate(Q, groups, 1/counts.float())\n QK = torch.einsum(\"nhle,nhse->nhls\", Q_grouped, K)\n _, topk = torch.topk(QK, k, dim=-1)\n topk = topk.contiguous()\n topk_broadcast = broadcast(\n topk.float(),\n groups,\n torch.ones_like(counts, dtype=torch.float32),\n torch.zeros((N, H, L, k), device=Q.device)\n )\n\n\n self._zero_grad(Q, K)\n QK_full = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK_selected = QK_full[\n torch.arange(N).view(N, 1, 1, 1).to(self.device),\n torch.arange(H).view(1, H, 1, 1).to(self.device),\n torch.arange(L).view(1, 1, L, 1).to(self.device),\n topk_broadcast.long()\n ]\n\n QK_selected.sum().backward()\n grad = [torch.clone(Q.grad), torch.clone(K.grad)]\n\n\n self._zero_grad(Q, K)\n QK_selected_hat = clustered_sparse_dot_product(\n Q, K, topk,\n groups, counts,\n lengths\n )\n\n QK_selected_hat.sum().backward()\n grad_hat = [torch.clone(Q.grad), torch.clone(K.grad)]\n\n self.assertLess(\n torch.abs(QK_selected - QK_selected_hat).max(),\n 1e-4\n )\n for g1, g2 in zip(grad, grad_hat):\n self.assertLess(\n torch.abs(g1 - g2).max(),\n 1e-4\n )\n\n @unittest.skipUnless(os.getenv(\"BENCHMARK_TESTS\", \"\"), \"no benchmarks\")\n def test_benchmark_forward(self):\n N = 12\n H = 8\n L = 1024\n S = 1024\n E = 32\n k = 32\n C = 100\n I = 10\n B = 32\n\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n lengths = torch.full((N,), L, dtype=torch.int32).to(self.device)\n groups, counts = cluster_queries(Q, lengths, C, I, B)\n Q_grouped = aggregate(Q, groups, 1/counts.float())\n QK = torch.einsum(\"nhle,nhse->nhls\", Q_grouped, K)\n _, topk = torch.topk(QK, k, dim=-1)\n topk = topk.contiguous()\n\n self._zero_grad(Q, K)\n n_runs = 10\n s = time.time()\n for i in range(n_runs):\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n e = time.time()\n t_full = (e - s) / n_runs\n\n self._zero_grad(Q, K)\n s = time.time()\n for i in range(n_runs):\n QK = clustered_sparse_dot_product(\n Q, K, topk,\n groups, counts,\n lengths\n )\n e = time.time()\n t_sparse = (e - s) / n_runs\n print(\"Benchmark Forward: T_Full: {}, T_Sparse: {}\".format(t_full, t_sparse))\n\n @unittest.skipUnless(os.getenv(\"BENCHMARK_TESTS\", \"\"), \"no benchmarks\")\n def test_benchmark_forward_backward(self):\n N = 12\n H = 8\n L = 1024\n S = 1024\n E = 32\n k = 32\n C = 100\n I = 10\n B = 32\n\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n lengths = torch.full((N,), L, dtype=torch.int32).to(self.device)\n groups, counts = cluster_queries(Q, lengths, C, I, B)\n Q_grouped = aggregate(Q, groups, 1/counts.float())\n QK = torch.einsum(\"nhle,nhse->nhls\", Q_grouped, K)\n _, topk = torch.topk(QK, k, dim=-1)\n topk = topk.contiguous()\n\n self._zero_grad(Q, K)\n n_runs = 10\n s = time.time()\n for i in range(n_runs):\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK.sum().backward()\n e = time.time()\n t_full = (e - s) / n_runs\n\n self._zero_grad(Q, K)\n s = time.time()\n for i in range(n_runs):\n QK = clustered_sparse_dot_product(\n Q, K, topk,\n groups, counts,\n lengths\n )\n QK.sum().backward()\n e = time.time()\n t_sparse = (e - s) / n_runs\n print(\"Benchmark Forward-Backward: T_Full: {}, T_Sparse: {}\".format(t_full, t_sparse))\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"#\n# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/\n# Written by Angelos Katharopoulos <[email protected]>,\n# Apoorv Vyas <[email protected]>\n#\n\nimport os\nfrom os import getenv\nimport unittest\n\nimport torch\n\nfrom fast_transformers.sparse_product import sparse_dot_product\n\n\nclass TestSparseProductBackward(unittest.TestCase):\n @property\n def device(self):\n return \"cuda\"\n\n @classmethod\n def setUpClass(cls):\n if not torch.cuda.is_available():\n raise unittest.SkipTest(\"No CUDA capable device detected\")\n\n def _zero_grad(self, Q, K):\n for x in [Q, K]:\n if x.grad is not None:\n x.grad[...] = 0\n\n def test_simple_grad(self):\n N = 2\n H = 4\n L = 100\n S = 100\n E = 32\n k = 10\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n topk = torch.round(\n torch.cumsum(torch.rand(N, H, L, k)*10, dim=-1)\n ).long().to(self.device)\n\n self._zero_grad(Q, K)\n QK_full = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK_selected = QK_full[\n torch.arange(N).view(N, 1, 1, 1).to(self.device),\n torch.arange(H).view(1, H, 1, 1).to(self.device),\n torch.arange(L).view(1, 1, L, 1).to(self.device),\n topk\n ]\n QK_selected.sum().backward()\n grad = [torch.clone(Q.grad), torch.clone(K.grad)]\n\n self._zero_grad(Q, K)\n QK_selected_hat = sparse_dot_product(Q, K, topk)\n QK_selected_hat.sum().backward()\n grad_hat = [torch.clone(Q.grad), torch.clone(K.grad)]\n\n self.assertLess(\n torch.abs(QK_selected - QK_selected_hat).max(),\n 1e-4\n )\n for g1, g2 in zip(grad, grad_hat):\n self.assertLess(\n torch.abs(g1 - g2).max(),\n 1e-4\n )\n\n @unittest.skipUnless(os.getenv(\"BENCHMARK_TESTS\", \"\"), \"no benchmarks\")\n def test_benchmark_backward(self):\n N = 12\n H = 8\n L = 1024\n S = 1024\n E = 32\n k = 32\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n topk = torch.round(\n torch.cumsum(torch.rand(N, H, L, k)*(S//k), dim=-1)\n ).long().to(self.device)\n\n self._zero_grad(Q, K)\n for i in range(2000):\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK.sum().backward()\n self._zero_grad(Q, K)\n\n s = torch.cuda.Event(enable_timing=True)\n e = torch.cuda.Event(enable_timing=True)\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n s.record()\n QK.sum().backward()\n e.record()\n torch.cuda.synchronize()\n t_full = s.elapsed_time(e)\n\n self._zero_grad(Q, K)\n for i in range(2000):\n QK = sparse_dot_product(Q, K, topk)\n QK.sum().backward()\n self._zero_grad(Q, K)\n\n s = torch.cuda.Event(enable_timing=True)\n e = torch.cuda.Event(enable_timing=True)\n QK = sparse_dot_product(Q, K, topk)\n s.record()\n QK.sum().backward()\n e.record()\n torch.cuda.synchronize()\n t_sparse = s.elapsed_time(e)\n print(\"Benchmark Backward: T_Full: {}, T_Sparse: {}\".format(t_full, t_sparse))\n\n @unittest.skipUnless(os.getenv(\"BENCHMARK_TESTS\", \"\"), \"no benchmarks\")\n def test_benchmark_forward_backward(self):\n N = 12\n H = 8\n L = 1024\n S = 1024\n E = 32\n k = 32\n Q = torch.randn(N, H, L, E).to(self.device).requires_grad_(True)\n K = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)\n topk = torch.round(\n torch.cumsum(torch.rand(N, H, L, k)*(S//k), dim=-1)\n ).long().to(self.device)\n\n self._zero_grad(Q, K)\n for i in range(2000):\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK.sum().backward()\n self._zero_grad(Q, K)\n\n s = torch.cuda.Event(enable_timing=True)\n e = torch.cuda.Event(enable_timing=True)\n s.record()\n QK = torch.einsum(\"nhle,nhse->nhls\", Q, K)\n QK.sum().backward()\n e.record()\n torch.cuda.synchronize()\n t_full = s.elapsed_time(e)\n\n self._zero_grad(Q, K)\n for i in range(2000):\n QK = sparse_dot_product(Q, K, topk)\n QK.sum().backward()\n self._zero_grad(Q, K)\n\n s = torch.cuda.Event(enable_timing=True)\n e = torch.cuda.Event(enable_timing=True)\n s.record()\n QK = sparse_dot_product(Q, K, topk)\n QK.sum().backward()\n e.record()\n torch.cuda.synchronize()\n t_sparse = s.elapsed_time(e)\n print(\"Benchmark Forward-Backward: T_Full: {}, T_Sparse: {}\".format(t_full, t_sparse))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"#\n# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/\n# Written by Angelos Katharopoulos <[email protected]>,\n# Apoorv Vyas <[email protected]>\n#\n\n\nimport unittest\n\nimport torch\n\nfrom fast_transformers.recurrent.attention import RecurrentAttentionLayer, \\\n RecurrentFullAttention, RecurrentLinearAttention\nfrom fast_transformers.recurrent.transformers import \\\n RecurrentTransformerEncoderLayer, RecurrentTransformerEncoder\n\n\nclass TestRecurrentTransformerEncoder(unittest.TestCase):\n def test_full_attention_forward(self):\n d_model = 128\n n_heads = 4\n transformer = RecurrentTransformerEncoder([\n RecurrentTransformerEncoderLayer(\n RecurrentAttentionLayer(\n RecurrentFullAttention(),\n d_model,\n n_heads\n ),\n d_model,\n n_heads\n )\n for i in range(6)\n ])\n\n xs = []\n memory = None\n for i in range(7):\n x, memory = transformer(torch.rand(10, d_model), state=memory)\n xs.append(x)\n for i in range(7):\n self.assertEqual(xs[i].shape, (10, d_model))\n self.assertEqual(len(memory), 6)\n for i in range(6):\n self.assertEqual(len(memory[i]), 2)\n self.assertEqual(memory[i][0].shape, (10, n_heads, 7, 32))\n self.assertEqual(memory[i][1].shape, (10, n_heads, 7, 32))\n\n def test_linear_attention_forward(self):\n d_model = 128\n n_heads = 4\n d_head = d_model // n_heads\n transformer = RecurrentTransformerEncoder([\n RecurrentTransformerEncoderLayer(\n RecurrentAttentionLayer(\n RecurrentLinearAttention(d_head),\n d_model,\n n_heads\n ),\n d_model,\n n_heads\n )\n for i in range(6)\n ])\n\n xs = []\n memory = None\n for i in range(7):\n x, memory = transformer(torch.rand(10, d_model), state=memory)\n xs.append(x)\n for i in range(7):\n self.assertEqual(xs[i].shape, (10, d_model))\n self.assertEqual(len(memory), 6)\n for i in range(6):\n self.assertEqual(len(memory[i]), 2)\n self.assertEqual(memory[i][0].shape, (10, n_heads, 32, 32))\n self.assertEqual(memory[i][1].shape, (10, n_heads, 32))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"#\n# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/\n# Written by Angelos Katharopoulos <[email protected]>,\n# Apoorv Vyas <[email protected]>\n#\n\n\nimport unittest\nimport os\nimport numpy as np\nimport time\n\nimport torch\n\ntry:\n from fast_transformers.aggregate import clustered_aggregate, \\\n clustered_broadcast\nexcept ImportError:\n pass\n\n\nclass TestAggregateCPU(unittest.TestCase):\n\n def test_aggregate(self):\n N = 2\n H = 4\n L = 80\n E = 2\n C = 4\n\n for i in range(30):\n C = np.random.randint(5, 10)\n L = np.random.randint(1, 30) * C\n E = np.random.randint(10, 128)\n if os.getenv(\"VERBOSE_TESTS\", \"\"):\n print((\"Testing: N H L E C: \"\n \"{} {} {} {} {}\").format(N, H, L, E, C))\n\n x = torch.rand((N, H, L, E)).cpu()\n g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int().cpu()\n f = torch.ones(N, H, C).cpu() * (C / L)\n counts = torch.ones_like(f, dtype=torch.int32) * (L // C)\n y = torch.zeros(N, H, C, E).cpu()\n lengths = torch.full((N,), L, dtype=torch.int32).to(x.device)\n\n sorted_g, sorted_gi = torch.sort(g.view(N*H, -1), dim=-1)\n sorted_rev_gi = torch.argsort(sorted_gi, dim=-1)\n\n q_offset = torch.arange(N*H, device=x.device).unsqueeze(-1) * L\n q_flat = (sorted_gi + q_offset).reshape(-1)\n\n # sorted queries, keys, values\n s_x = x.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E)\n y = clustered_aggregate(\n s_x, sorted_g.view(N, H, -1), f, lengths, y\n )\n for i in range(C):\n self.assertLess(\n torch.abs(\n x[:, :, i::C, :].mean(2) - y[:, :, i, :]\n ).max().item(),\n 1e-6\n )\n\n def test_aggregate_masked(self):\n N = 10\n H = 3\n L = 40\n E = 32\n C = 4\n\n for i in range(30):\n C = np.random.randint(5, 10)\n L = np.random.randint(2, 30) * C\n E = np.random.randint(10, 128)\n if os.getenv(\"VERBOSE_TESTS\", \"\"):\n print((\"Testing: N H L E C: \"\n \"{} {} {} {} {}\").format(N, H, L, E, C))\n\n x = torch.rand((N, H, L, E)).cpu()\n g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int().cpu()\n g[:, :, -C:] = C + 1\n c = (L // C) - 1\n\n lengths = torch.full((N,), L-C, dtype=torch.int32).to(x.device)\n f = torch.ones(N, H, C).cpu() / float(c)\n counts = torch.ones_like(f, dtype=torch.int32) * c\n y = torch.zeros(N, H, C, E).cpu()\n\n sorted_g, sorted_gi = torch.sort(g.view(N*H, -1), dim=-1)\n sorted_rev_gi = torch.argsort(sorted_gi, dim=-1)\n\n q_offset = torch.arange(N*H, device=x.device).unsqueeze(-1) * L\n q_flat = (sorted_gi + q_offset).reshape(-1)\n\n # sorted queries, keys, values\n s_x = x.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E)\n y = clustered_aggregate(\n s_x, sorted_g.view(N, H, -1), f, lengths, y\n )\n\n for i in range(C):\n x_m = x[:, :, i::C, :][:, :, :-1, :].mean(2)\n self.assertLess(\n torch.abs(\n x_m - y[:, :, i, :]\n ).max().item(),\n 1e-6\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.abs",
"torch.full",
"torch.zeros",
"torch.einsum",
"torch.clone",
"torch.randn",
"torch.nn.init.normal_",
"torch.arange",
"torch.topk",
"torch.ones_like"
],
[
"torch.abs",
"torch.cuda.synchronize",
"torch.einsum",
"torch.clone",
"torch.cuda.Event",
"torch.randn",
"torch.rand",
"torch.cuda.is_available",
"torch.arange"
],
[
"torch.rand"
],
[
"torch.abs",
"torch.ones",
"torch.full",
"torch.zeros",
"torch.rand",
"torch.arange",
"torch.argsort",
"torch.ones_like",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caitsithx/dogs-vs-cats-redux | [
"3ff588cac9048a3c9f5a76de842a9cd2a4140218"
] | [
"cscreendataset.py"
] | [
"import os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torchvision import transforms\n\nimport settings\n\n# import transforms\n\nDATA_DIR = settings.DATA_DIR\nTRAIN_DIR = DATA_DIR + '/train-640'\nTEST_DIR = DATA_DIR + '/test-640'\n\n\ndef pil_load(img_path):\n with open(img_path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\nclass CommonDataSet(data.Dataset):\n def __init__(self, file_list_path, train_data=True, has_label=True,\n transform=None, split=0.8):\n df_train = pd.read_csv(file_list_path)\n df_value = df_train.values\n df_value = np.random.permutation(df_value)\n if has_label:\n split_index = int(df_value.shape[0] * split)\n if train_data:\n split_data = df_value[:split_index]\n else:\n split_data = df_value[split_index:]\n # print(split_data.shape)\n file_names = [None] * split_data.shape[0]\n labels = []\n\n for index, line in enumerate(split_data):\n f = line[0]\n labels.append(line[1:])\n file_names[index] = os.path.join(TRAIN_DIR, str(f) + '.jpg')\n\n else:\n file_names = [None] * df_train.values.shape[0]\n for index, line in enumerate(df_train.values):\n f = line[0]\n file_names[index] = TEST_DIR + '/' + str(int(f)) + '.jpg'\n # print(filenames[:100])\n self.transform = transform\n self.num = len(file_names)\n self.file_names = file_names\n self.train_data = train_data\n self.has_label = has_label\n\n if has_label:\n self.labels = np.array(labels, dtype=np.float32)\n\n def __getitem__(self, index):\n img = pil_load(self.file_names[index])\n if self.transform is not None:\n img = self.transform(img)\n\n if self.has_label:\n label = self.labels[index]\n return img, label, self.file_names[index]\n else:\n return img, self.file_names[index]\n\n def __len__(self):\n return self.num\n\n\ndef randomRotate(img):\n d = random.randint(0, 4) * 90\n img2 = img.rotate(d, resample=Image.NEAREST)\n return img2\n\n\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.Scale(320),\n transforms.RandomSizedCrop(224),\n # transforms.Scale(224),\n transforms.RandomHorizontalFlip(),\n transforms.Lambda(lambda x: randomRotate(x)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'trainv3': transforms.Compose([\n transforms.Scale(480),\n transforms.RandomSizedCrop(299),\n transforms.RandomHorizontalFlip(),\n transforms.Lambda(lambda x: randomRotate(x)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'valid': transforms.Compose([\n transforms.Scale(224),\n # transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'validv3': transforms.Compose([\n transforms.Scale(299),\n # transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'test': transforms.Compose([\n transforms.Scale(224),\n # transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'testv3': transforms.Compose([\n transforms.Scale(299),\n # transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n}\n\n'''\ndsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n for x in ['train', 'valid']}\ndset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size,\n shuffle=True, num_workers=4)\n for x in ['train', 'valid']}\n\ndset_sizes = {x: len(dsets[x]) for x in ['train', 'valid']}\ndset_classes = dsets['train'].classes\nsave_array(CLASSES_FILE, dset_classes)\n'''\n\n\ndef get_train_loader(model, batch_size=16, shuffle=True):\n if model.name.startswith('inception'):\n transkey = 'trainv3'\n else:\n transkey = 'train'\n if hasattr(model, 'batch_size'):\n batch_size = model.batch_size\n print(\"train batch_size %d \" % batch_size)\n dset = CommonDataSet(DATA_DIR + '/train_labels.csv',\n transform=data_transforms[transkey])\n dloader = torch.utils.data.DataLoader(dset, batch_size=batch_size,\n shuffle=shuffle, num_workers=4)\n dloader.num = dset.num\n return dloader\n\n\ndef get_val_loader(model, batch_size=16, shuffle=True):\n if model.name.startswith('inception'):\n transkey = 'validv3'\n else:\n transkey = 'valid'\n if hasattr(model, 'batch_size'):\n batch_size = model.batch_size\n # train_v2.csv\n dset = CommonDataSet(DATA_DIR + '/train_labels.csv', train_data=False,\n transform=data_transforms[transkey])\n dloader = torch.utils.data.DataLoader(dset, batch_size=batch_size,\n shuffle=shuffle, num_workers=4)\n dloader.num = dset.num\n return dloader\n\n\ndef get_test_loader(model, batch_size=16, shuffle=False):\n if model.name.startswith('inception'):\n transkey = 'testv3'\n else:\n transkey = 'test'\n if hasattr(model, 'batch_size'):\n batch_size = model.batch_size\n\n dset = CommonDataSet(DATA_DIR + '/sample_submission.csv', has_label=False,\n transform=data_transforms[transkey])\n dloader = torch.utils.data.DataLoader(dset, batch_size=batch_size,\n shuffle=shuffle, num_workers=4)\n dloader.num = dset.num\n return dloader\n\n\nif __name__ == '__main__':\n loader = get_train_loader()\n print(loader.num)\n for i, data in enumerate(loader):\n img, label, fn = data\n # print(fn)\n # print(label)\n if i > 10:\n break\n loader = get_val_loader()\n print(loader.num)\n for i, data in enumerate(loader):\n img, label, fn = data\n # print(fn)\n # print(label)\n if i > 10:\n break\n loader = get_test_loader()\n print(loader.num)\n for i, data in enumerate(loader):\n img, fn = data\n # print(fn)\n # print(label)\n if i > 10:\n break\n"
] | [
[
"numpy.random.permutation",
"numpy.array",
"pandas.read_csv",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
akashAD98/detectron2 | [
"295fbb8b96eda271869fc6955280d16596781766"
] | [
"detectron2/layers/batch_norm.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport torch\nimport torch.distributed as dist\nfrom fvcore.nn.distributed import differentiable_all_reduce\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ..utils import comm, env\n\nfrom .wrappers import BatchNorm2d\n\n\nclass FrozenBatchNorm2d(nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n It contains non-trainable buffers called\n \"weight\" and \"bias\", \"running_mean\", \"running_var\",\n initialized to perform identity transformation.\n\n The pre-trained backbone models from Caffe2 only contain \"weight\" and \"bias\",\n which are computed from the original four parameters of BN.\n The affine transform `x * weight + bias` will perform the equivalent\n computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.\n When loading a backbone model from Caffe2, \"running_mean\" and \"running_var\"\n will be left unchanged as identity transformation.\n\n Other pre-trained backbone models may contain all 4 parameters.\n\n The forward is implemented by `F.batch_norm(..., training=False)`.\n \"\"\"\n\n _version = 3\n\n def __init__(self, num_features, eps=1e-5):\n super().__init__()\n self.num_features = num_features\n self.eps = eps\n self.register_buffer(\"weight\", torch.ones(num_features))\n self.register_buffer(\"bias\", torch.zeros(num_features))\n self.register_buffer(\"running_mean\", torch.zeros(num_features))\n self.register_buffer(\"running_var\", torch.ones(num_features) - eps)\n\n def forward(self, x):\n if x.requires_grad:\n # When gradients are needed, F.batch_norm will use extra memory\n # because its backward op computes gradients for weight/bias as well.\n scale = self.weight * (self.running_var + self.eps).rsqrt()\n bias = self.bias - self.running_mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n out_dtype = x.dtype # may be half\n return x * scale.to(out_dtype) + bias.to(out_dtype)\n else:\n # When gradients are not needed, F.batch_norm is a single fused op\n # and provide more optimization opportunities.\n return F.batch_norm(\n x,\n self.running_mean,\n self.running_var,\n self.weight,\n self.bias,\n training=False,\n eps=self.eps,\n )\n\n def _load_from_state_dict(\n self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs\n ):\n version = local_metadata.get(\"version\", None)\n\n if version is None or version < 2:\n # No running_mean/var in early versions\n # This will silent the warnings\n if prefix + \"running_mean\" not in state_dict:\n state_dict[prefix + \"running_mean\"] = torch.zeros_like(self.running_mean)\n if prefix + \"running_var\" not in state_dict:\n state_dict[prefix + \"running_var\"] = torch.ones_like(self.running_var)\n\n super()._load_from_state_dict(\n state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs\n )\n\n def __repr__(self):\n return \"FrozenBatchNorm2d(num_features={}, eps={})\".format(self.num_features, self.eps)\n\n @classmethod\n def convert_frozen_batchnorm(cls, module):\n \"\"\"\n Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.\n\n Args:\n module (torch.nn.Module):\n\n Returns:\n If module is BatchNorm/SyncBatchNorm, returns a new module.\n Otherwise, in-place convert module and return it.\n\n Similar to convert_sync_batchnorm in\n https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py\n \"\"\"\n bn_module = nn.modules.batchnorm\n bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)\n res = module\n if isinstance(module, bn_module):\n res = cls(module.num_features)\n if module.affine:\n res.weight.data = module.weight.data.clone().detach()\n res.bias.data = module.bias.data.clone().detach()\n res.running_mean.data = module.running_mean.data\n res.running_var.data = module.running_var.data\n res.eps = module.eps\n else:\n for name, child in module.named_children():\n new_child = cls.convert_frozen_batchnorm(child)\n if new_child is not child:\n res.add_module(name, new_child)\n return res\n\n\ndef get_norm(norm, out_channels):\n \"\"\"\n Args:\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\n or a callable that takes a channel number and returns\n the normalization layer as a nn.Module.\n\n Returns:\n nn.Module or None: the normalization layer\n \"\"\"\n if norm is None:\n return None\n if isinstance(norm, str):\n if len(norm) == 0:\n return None\n norm = {\n \"BN\": BatchNorm2d,\n # Fixed in https://github.com/pytorch/pytorch/pull/36382\n \"SyncBN\": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,\n \"FrozenBN\": FrozenBatchNorm2d,\n \"GN\": lambda channels: nn.GroupNorm(32, channels),\n # for debugging:\n \"nnSyncBN\": nn.SyncBatchNorm,\n \"naiveSyncBN\": NaiveSyncBatchNorm,\n }[norm]\n return norm(out_channels)\n\n\nclass NaiveSyncBatchNorm(BatchNorm2d):\n \"\"\"\n In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient\n when the batch size on each worker is different.\n (e.g., when scale augmentation is used, or when it is applied to mask head).\n\n This is a slower but correct alternative to `nn.SyncBatchNorm`.\n\n Note:\n There isn't a single definition of Sync BatchNorm.\n\n When ``stats_mode==\"\"``, this module computes overall statistics by using\n statistics of each worker with equal weight. The result is true statistics\n of all samples (as if they are all on one worker) only when all workers\n have the same (N, H, W). This mode does not support inputs with zero batch size.\n\n When ``stats_mode==\"N\"``, this module computes overall statistics by weighting\n the statistics of each worker by their ``N``. The result is true statistics\n of all samples (as if they are all on one worker) only when all workers\n have the same (H, W). It is slower than ``stats_mode==\"\"``.\n\n Even though the result of this module may not be the true statistics of all samples,\n it may still be reasonable because it might be preferrable to assign equal weights\n to all workers, regardless of their (H, W) dimension, instead of putting larger weight\n on larger images. From preliminary experiments, little difference is found between such\n a simplified implementation and an accurate computation of overall mean & variance.\n \"\"\"\n\n def __init__(self, *args, stats_mode=\"\", **kwargs):\n super().__init__(*args, **kwargs)\n assert stats_mode in [\"\", \"N\"]\n self._stats_mode = stats_mode\n\n def forward(self, input):\n if comm.get_world_size() == 1 or not self.training:\n return super().forward(input)\n\n B, C = input.shape[0], input.shape[1]\n\n half_input = input.dtype == torch.float16\n if half_input:\n # fp16 does not have good enough numerics for the reduction here\n input = input.float()\n mean = torch.mean(input, dim=[0, 2, 3])\n meansqr = torch.mean(input * input, dim=[0, 2, 3])\n\n if self._stats_mode == \"\":\n assert B > 0, 'SyncBatchNorm(stats_mode=\"\") does not support zero batch size.'\n vec = torch.cat([mean, meansqr], dim=0)\n vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())\n mean, meansqr = torch.split(vec, C)\n momentum = self.momentum\n else:\n if B == 0:\n vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)\n vec = vec + input.sum() # make sure there is gradient w.r.t input\n else:\n vec = torch.cat(\n [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0\n )\n vec = differentiable_all_reduce(vec * B)\n\n total_batch = vec[-1].detach()\n momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0\n mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero\n\n var = meansqr - mean * mean\n invstd = torch.rsqrt(var + self.eps)\n scale = self.weight * invstd\n bias = self.bias - mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n\n self.running_mean += momentum * (mean.detach() - self.running_mean)\n self.running_var += momentum * (var.detach() - self.running_var)\n ret = input * scale + bias\n if half_input:\n ret = ret.half()\n return ret\n"
] | [
[
"torch.nn.functional.batch_norm",
"torch.mean",
"torch.ones",
"torch.zeros",
"torch.cat",
"torch.zeros_like",
"torch.rsqrt",
"torch.split",
"torch.nn.GroupNorm",
"torch.distributed.get_world_size",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DeuroIO/Deuro-tensorflow | [
"7d0fa4948a6232976c4828ef9041f92993503fd5",
"7d0fa4948a6232976c4828ef9041f92993503fd5",
"7d0fa4948a6232976c4828ef9041f92993503fd5",
"7d0fa4948a6232976c4828ef9041f92993503fd5",
"7d0fa4948a6232976c4828ef9041f92993503fd5",
"7d0fa4948a6232976c4828ef9041f92993503fd5"
] | [
"tensorflow/contrib/distribute/python/mirrored_strategy.py",
"tensorflow/python/ops/ragged/ragged_conversion_ops.py",
"tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py",
"tensorflow/python/ops/losses/losses_impl.py",
"tensorflow/python/kernel_tests/scatter_ops_test.py",
"tensorflow/python/keras/optimizer_v2/adam.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class MirroredStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nfrom functools import partial\nimport threading\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import shared_variable_creator\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import device_util\nfrom tensorflow.python.training import distribute as distribute_lib\nfrom tensorflow.python.util import nest\n\n\n# TODO(josh11b): Replace asserts in this file with if ...: raise ...\n\n\[email protected]\ndef _enter_graph(g):\n if context.executing_eagerly():\n with g.as_default(), context.eager_mode():\n yield\n else:\n with g.as_default():\n yield\n\n\ndef _cpu_device(device):\n cpu_device = tf_device.DeviceSpec.from_string(device)\n cpu_device.merge_from(tf_device.DeviceSpec(device_type=\"CPU\", device_index=0))\n return cpu_device.to_string()\n\n\nclass _RequestedStop(Exception):\n pass\n\n\n# _call_for_each_replica and _reduce_non_distributed_value are not members of\n# MirroredStrategy so that they are generally not allowed to use anything\n# specific to MirroredStrategy and thus can be shared with other distribution\n# strategies.\n\n\n# TODO(yuefengz): maybe create a common class for those who need to call this\n# _call_for_each_replica.\ndef _call_for_each_replica(distribution, fn, args, kwargs):\n \"\"\"Run `fn` in separate threads, once per replica/worker device.\n\n Args:\n distribution: the DistributionStrategy object.\n fn: function to run (will be run once per device, each in its own thread).\n args: positional arguments for `fn`\n kwargs: keyword arguments for `fn`.\n\n Returns:\n Merged return value of `fn` across all replicas.\n\n Raises:\n RuntimeError: If fn() calls get_replica_context().merge_call() a different\n number of times from the available devices.\n \"\"\"\n # TODO(josh11b): Add this option once we add synchronization to variable\n # creation. Until then, this is pretty unsafe to use.\n run_concurrently = False\n if not context.executing_eagerly():\n # Needed for per-thread device, etc. contexts in graph mode.\n ops.get_default_graph().switch_to_thread_local()\n\n coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))\n\n shared_variable_store = {}\n\n # TODO(isaprykin): Create these threads once instead of during every run()\n # call.\n threads = []\n for index, d in enumerate(distribution.extended.worker_devices):\n variable_creator_fn = shared_variable_creator.make_fn(\n shared_variable_store, index)\n t = MirroredExtended._MirroredReplicaThread( # pylint: disable=protected-access\n distribution, coord, d, variable_creator_fn, fn,\n *values.select_device(d, args), **values.select_device(d, kwargs))\n threads.append(t)\n\n for t in threads:\n t.start()\n\n # When `fn` starts `should_run` event is set on _MirroredReplicaThread\n # (`MRT`) threads. The execution waits until\n # `MRT.has_paused` is set, which indicates that either `fn` is\n # complete or a `get_replica_context().merge_call()` is called. If `fn` is\n # complete, then `MRT.done` is set to True. Otherwise, arguments\n # of `get_replica_context().merge_call` from all paused threads are grouped\n # and the `merge_fn` is performed. Results of the\n # `get_replica_context().merge_call` are then set to `MRT.merge_result`.\n # Each such `get_replica_context().merge_call` call returns the\n # `MRT.merge_result` for that thread when `MRT.should_run` event\n # is reset again. Execution of `fn` resumes.\n\n try:\n with coord.stop_on_exception():\n all_done = False\n while not all_done and not coord.should_stop():\n done = []\n if run_concurrently:\n for t in threads:\n t.should_run.set()\n for t in threads:\n t.has_paused.wait()\n t.has_paused.clear()\n if coord.should_stop():\n return None\n done.append(t.done)\n else:\n for t in threads:\n t.should_run.set()\n t.has_paused.wait()\n t.has_paused.clear()\n if coord.should_stop():\n return None\n done.append(t.done)\n if coord.should_stop():\n return None\n all_done = all(done)\n if not all_done:\n if any(done):\n raise RuntimeError(\"Some replicas made a different number of \"\n \"replica_context().merge_call() calls.\")\n # get_replica_context().merge_call() case\n merge_args = values.regroup({t.device: t.merge_args for t in threads})\n merge_kwargs = values.regroup(\n {t.device: t.merge_kwargs for t in threads})\n # We capture the name_scope of the MRT when we call merge_fn\n # to ensure that if we have opened a name scope in the MRT,\n # it will be respected when executing the merge function. We only\n # capture the name_scope from the first MRT and assume it is\n # the same for all other MRTs.\n mtt_captured_name_scope = threads[0].captured_name_scope\n with ops.name_scope(mtt_captured_name_scope):\n merge_result = threads[0].merge_fn(distribution, *merge_args,\n **merge_kwargs)\n for t in threads:\n t.merge_result = values.select_device(t.device, merge_result)\n finally:\n for t in threads:\n t.should_run.set()\n coord.join(threads)\n\n return values.regroup({t.device: t.main_result for t in threads})\n\n\ndef _reduce_non_distributed_value(extended, reduce_op, value, destinations):\n \"\"\"Reduce a non-DistributedValue `value` to `destinations`.\"\"\"\n if isinstance(value, values.DistributedValues):\n raise ValueError(\"You are passing a `DistributedValue` to \"\n \"`_reduce_non_distributed_value`, which is not allowed.\")\n\n # If the same value is present on all replicas then the PerReplica value will\n # be a single value. We also handle the case when `value` is a single value\n # and equal to 0.\n if value == 0:\n return 0\n # If there is only a single value and the reduce op is MEAN,\n # that value should be on all destinations.\n if reduce_op == reduce_util.ReduceOp.MEAN:\n return value\n\n cross_device_ops_lib.validate_destinations(destinations)\n # We do not support a reduce op of SUM if the value is the same across\n # all replicas. We call this as part of assign functions for MirroredVariables\n # and summing up identical values across replicas is not clearly defined.\n if (len(extended.worker_devices) != 1 or\n not cross_device_ops_lib.check_destinations(destinations)):\n raise ValueError(\"A non-DistributedValues value %s cannot be reduced with \"\n \"the given reduce op %s.\" % (value, reduce_op))\n # TODO(anjalisridhar): Moves these methods to a device utility file?\n devices = cross_device_ops_lib.get_devices_from(destinations)\n if len(devices) == 1:\n with ops.device(devices[0]):\n return array_ops.identity(value)\n else:\n value_updates = {}\n for d in devices:\n with ops.device(d):\n value_updates[d] = array_ops.identity(value)\n return values.Mirrored(value_updates)\n\n\ndef _create_mirrored_variable(devices, real_mirrored_creator, *args, **kwargs): # pylint: disable=g-missing-docstring\n # Figure out what collections this variable should be added to.\n # We'll add the MirroredVariable to those collections instead.\n collections = kwargs.pop(\"collections\", None)\n if collections is None:\n collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n kwargs[\"collections\"] = []\n\n # Get synchronization value\n synchronization = kwargs.get(\"synchronization\",\n variable_scope.VariableSynchronization.ON_WRITE)\n if synchronization == variable_scope.VariableSynchronization.NONE:\n raise ValueError(\"`NONE` variable synchronization mode is not \"\n \"supported with `Mirrored` distribution strategy. Please\"\n \" change the `synchronization` for variable: \" +\n kwargs[\"name\"])\n elif synchronization == variable_scope.VariableSynchronization.ON_READ:\n # Variables that are to be synced on read are replica local.\n is_replica_local = True\n kwargs[\"trainable\"] = False\n elif (synchronization == variable_scope.VariableSynchronization.ON_WRITE or\n synchronization == variable_scope.VariableSynchronization.AUTO):\n # `AUTO` synchronization for `MirroredStrategy` is `ON_WRITE`.\n is_replica_local = False\n else:\n raise ValueError(\"Invalid variable synchronization mode: \" +\n synchronization + \" for variable: \" + kwargs[\"name\"])\n\n # Get aggregation value\n aggregation = kwargs.pop(\"aggregation\",\n variable_scope.VariableAggregation.NONE)\n if aggregation not in (\n variable_scope.VariableAggregation.NONE,\n variable_scope.VariableAggregation.SUM,\n variable_scope.VariableAggregation.MEAN,\n variable_scope.VariableAggregation.ONLY_FIRST_REPLICA\n ):\n raise ValueError(\"Invalid variable aggregation mode: \" + aggregation +\n \" for variable: \" + kwargs[\"name\"])\n\n # Ignore user-specified caching device, not needed for mirrored variables.\n kwargs.pop(\"caching_device\", None)\n\n # TODO(josh11b,apassos): It would be better if variable initialization\n # was never recorded on the tape instead of having to do this manually\n # here.\n with tape.stop_recording():\n index = real_mirrored_creator(devices, *args, **kwargs)\n\n if is_replica_local:\n result = values.ReplicaLocalVariable(\n index, index[devices[0]], aggregation)\n else:\n result = values.MirroredVariable(index, index[devices[0]], aggregation)\n\n # Add the wrapped variable to the requested collections.\n # The handling of eager mode and the global step matches\n # ResourceVariable._init_from_args().\n if not context.executing_eagerly():\n g = ops.get_default_graph()\n # If \"trainable\" is True, next_creator() will add the member variables\n # to the TRAINABLE_VARIABLES collection, so we manually remove\n # them and replace with the MirroredVariable. We can't set\n # \"trainable\" to False for next_creator() since that causes functions\n # like implicit_gradients to skip those variables.\n if kwargs.get(\"trainable\", True):\n collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)\n l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)\n for v in index.values():\n if v in l:\n l.remove(v)\n g.add_to_collections(collections, result)\n elif ops.GraphKeys.GLOBAL_STEP in collections:\n ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result)\n\n return result\n\n\nclass CoreMirroredStrategy(distribute_lib.DistributionStrategy):\n \"\"\"Mirrors vars to distribute across multiple devices and machines.\n\n *** core version ***\n\n This strategy uses one replica per device and sync replication for its\n multi-GPU version.\n\n When `cluster_spec` is given by the `configure` method., it turns into the\n mulit-worker version that works on multiple workers with in-graph replication.\n Note: `configure` will be called by higher-level APIs if running in\n distributed environment.\n\n There are several important concepts for distributed TensorFlow, e.g.\n `client`, `job`, 'task', `cluster`, `in-graph replication` and\n 'synchronous training' and they have already been defined in the\n [TensorFlow's documentation](https://www.tensorflow.org/deploy/distributed).\n The distribution strategy inherits these concepts as well and in addition to\n that we also clarify several more concepts:\n\n * **In-graph replication**: the `client` creates a single `tf.Graph` that\n specifies tasks for devices on all workers. The `client` then creates a\n client session which will talk to the `master` service of a `worker`. Then\n the `master` will partition the graph and distribute the work to all\n participating workers.\n * **Worker**: A `worker` is a TensorFlow `task` that usually maps to one\n physical machine. We will have multiple `worker`s with different `task`\n index. They all do similar things except for one worker checkpointing model\n variables, writing summaries, etc. in addition to its ordinary work.\n\n The multi-worker version of this class maps one replica to one device on a\n worker. It mirrors all model variables on all replicas. For example, if you\n have two `worker`s and each `worker` has 4 GPUs, it will create 8 copies of\n the model variables on these 8 GPUs. Then like in MirroredStrategy, each\n replica performs their computation with their own copy of variables unless in\n cross-replica model where variable or tensor reduction happens.\n\n Args:\n devices: a list of device strings.\n num_gpus: number of GPUs. For local training, either specify `devices` or\n `num_gpus`. In distributed training, this must be specified as number of\n GPUs on each worker.\n num_gpus_per_worker: number of GPUs per worker. This is the same as\n `num_gpus` and only one of `num_gpus` and `num_gpus_per_worker` can be\n specified.\n cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not\n set, the `configure` method will try to find the best one.\n auto_shard_dataset: whether to auto-shard the dataset when there are\n multiple workers.\n \"\"\"\n\n def __init__(self,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False):\n extended = CoreMirroredExtended(\n self, devices, num_gpus, num_gpus_per_worker,\n cross_device_ops, auto_shard_dataset)\n super(CoreMirroredStrategy, self).__init__(extended)\n\n\nclass CoreMirroredExtended(distribute_lib.DistributionStrategyExtended):\n \"\"\"Implementation of CoreMirroredStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False):\n super(CoreMirroredExtended, self).__init__(container_strategy)\n self._cross_device_ops = cross_device_ops\n self._auto_shard_dataset = auto_shard_dataset\n # Remember num GPUs which might be needed by `configure` method.\n if num_gpus is not None and num_gpus_per_worker is not None:\n raise ValueError(\n \"You cannot specify both `num_gpus` and `num_gpus_per_worker`.\")\n if num_gpus is not None:\n self._num_gpus = num_gpus\n else:\n self._num_gpus = num_gpus_per_worker\n\n self._initialize_local(self._num_gpus, devices)\n\n def _initialize_local(self, num_gpus, devices):\n \"\"\"Initializes the object for local training.\"\"\"\n self._cluster_spec = None\n # Convert `num_gpus` into `devices`, shouldn't specify both.\n if devices is None:\n if num_gpus is None:\n num_gpus = context.num_gpus()\n if num_gpus == 0:\n devices = [\"/device:CPU:0\"]\n else:\n devices = [\"/device:GPU:%d\" % d for d in range(num_gpus)]\n elif num_gpus is not None:\n raise ValueError(\"Must only specify one of `devices` and `num_gpus`.\")\n self._num_gpus = num_gpus\n # TODO(yuefengz): consider setting the default device.\n\n assert devices, \"Must specify at least one device.\"\n assert len(set(devices)) == len(devices), (\n \"No duplicates allowed in `devices` argument.\")\n # TODO(josh11b): Require at least 2 devices?\n self._devices = [device_util.resolve(d) for d in devices]\n self._canonical_device_set = set(self._devices)\n self._device_index = values.PerReplica(\n {d: i for i, d in enumerate(devices)})\n\n def _initialize_multi_worker(self, num_gpus, cluster_spec):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)\n self._cluster_spec = cluster_spec\n\n self._workers = []\n for job in [\"chief\", \"worker\"]:\n for task in range(len(cluster_spec.as_dict().get(job, []))):\n self._workers.append(\"/job:%s/task:%d\" % (job, task))\n\n if num_gpus is None:\n raise ValueError(\"`num_gpus` is required if `cluster_spec` is given.\")\n if num_gpus > 0:\n self._worker_devices = [\n (worker, [\n device_util.canonicalize(worker + \"/device:GPU:%d\" % gpu)\n for gpu in range(num_gpus)\n ]) for worker in self._workers\n ]\n else:\n self._worker_devices = [\n (worker, [device_util.canonicalize(worker, \"/device:CPU:0\")])\n for worker in self._workers\n ]\n\n devices = nest.flatten([l for _, l in self._worker_devices])\n\n # Setting `_default_device` will add a device scope in the\n # distribution.scope. We set the default device to the first worker. When\n # users specify device under distribution.scope by\n # with tf.device(\"/cpu:0\"):\n # ...\n # their ops will end up on the cpu device of its first worker, e.g.\n # \"/job:worker/task:0/device:CPU:0\". Note this is not used in replica mode.\n self._default_device = self._workers[0]\n\n assert devices, \"Must specify at least one device.\"\n assert len(set(devices)) == len(devices), (\n \"No duplicates allowed in `devices` argument.\")\n # TODO(josh11b): Require at least 2 devices?\n self._devices = [device_util.resolve(d) for d in devices]\n self._canonical_device_set = set(self._devices)\n self._device_index = values.PerReplica(\n {d: i for i, d in enumerate(devices)})\n\n def _create_variable(self, next_creator, *args, **kwargs):\n \"\"\"Create a mirrored variable. See `DistributionStrategy.scope`.\"\"\"\n colocate_with = kwargs.pop(\"colocate_with\", None)\n devices = self._get_devices_from(colocate_with)\n\n def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring\n index = {}\n for i, d in enumerate(devices):\n with ops.device(d):\n if i > 0:\n # Give replicas meaningful distinct names:\n var0name = index[devices[0]].name.split(\":\")[0]\n # We append a / to variable names created on replicas with id > 0 to\n # ensure that we ignore the name scope and instead use the given\n # name as the absolute name of the variable.\n kwargs[\"name\"] = \"%s/replica_%d/\" % (var0name, i)\n # Initialize replicas with the same value:\n def initial_value_fn(device=d):\n if context.executing_eagerly():\n init_value = index[devices[0]].value()\n return array_ops.identity(init_value)\n else:\n with ops.device(device):\n init_value = index[devices[0]].initial_value\n return array_ops.identity(init_value)\n kwargs[\"initial_value\"] = initial_value_fn\n with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):\n # Don't record operations (e.g. other variable reads) during\n # variable creation.\n with tape.stop_recording():\n v = next_creator(*args, **kwargs)\n assert not isinstance(v, values.DistributedVariable)\n index[d] = v\n return index\n\n return _create_mirrored_variable(devices, _real_mirrored_creator, *args,\n **kwargs)\n\n def _distribute_dataset(self, dataset_fn):\n if self._cluster_spec:\n return values.MultiWorkerDataset(\n partial(self._call_dataset_fn, dataset_fn), self._worker_devices,\n auto_shard=self._auto_shard_dataset)\n else:\n return values.PerReplicaDataset(\n self._call_dataset_fn(dataset_fn), self._devices)\n\n def _make_dataset_iterator(self, dataset):\n if self._cluster_spec:\n worker_device_pairs = self._worker_devices\n else:\n worker_device_pairs = [(\"/job:localhost\", self._devices)]\n return values.DatasetIterator(dataset, worker_device_pairs,\n self._num_replicas_in_sync)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n input_contexts = []\n if self._cluster_spec:\n num_workers = len(self._worker_devices)\n worker_device_pairs = self._worker_devices\n else:\n num_workers = 1\n worker_device_pairs = [(\"/job:localhost\", self._devices)]\n for i in range(num_workers):\n input_contexts.append(distribute_lib.InputContext(\n num_input_pipelines=num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=self._num_replicas_in_sync))\n return values.InputFunctionIterator(\n input_fn, worker_device_pairs, input_contexts)\n\n # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.\n def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,\n initial_loop_values=None):\n if initial_loop_values is None:\n initial_loop_values = {}\n initial_loop_values = nest.flatten(initial_loop_values)\n\n ctx = values.MultiStepContext()\n def body(i, *args):\n \"\"\"A wrapper around `fn` to create the while loop body.\"\"\"\n del args\n fn_inputs = iterator.get_next()\n if not isinstance(fn_inputs, tuple):\n fn_inputs = (fn_inputs,)\n fn_result = fn(ctx, fn_inputs)\n for (name, output) in ctx.last_step_outputs.items():\n # Convert all outputs to tensors, potentially from `DistributedValues`.\n ctx.last_step_outputs[name] = self._unwrap(output)\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs\n\n # We capture the control_flow_context at this point, before we run `fn`\n # inside a while_loop. This is useful in cases where we might need to exit\n # these contexts and get back to the outer context to do some things, for\n # e.g. create an op which should be evaluated only once at the end of the\n # loop on the host. One such usage is in creating metrics' value op.\n self._outer_control_flow_context = (\n ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access\n\n cond = lambda i, *args: i < iterations\n i = constant_op.constant(0)\n loop_result = control_flow_ops.while_loop(\n cond, body, [i] + initial_loop_values, name=\"\",\n parallel_iterations=1, back_prop=False, swap_memory=False,\n return_same_structure=True)\n del self._outer_control_flow_context\n\n ctx.run_op = control_flow_ops.group(loop_result)\n\n # Convert the last_step_outputs from a list to the original dict structure\n # of last_step_outputs.\n last_step_tensor_outputs = loop_result[1:]\n last_step_tensor_outputs_dict = nest.pack_sequence_as(\n ctx.last_step_outputs, last_step_tensor_outputs)\n\n for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access\n output = last_step_tensor_outputs_dict[name]\n # For outputs that have already been reduced, wrap them in a Mirrored\n # container, else in a PerReplica container.\n if reduce_op is None:\n last_step_tensor_outputs_dict[name] = values.regroup(\n {d: t for d, t in zip(self._devices, output)}, values.PerReplica)\n else:\n assert len(output) == 1\n last_step_tensor_outputs_dict[name] = output[0]\n\n ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access\n return ctx\n\n def _broadcast_to(self, tensor, destinations):\n # This is both a fast path for Python constants, and a way to delay\n # converting Python values to a tensor until we know what type it\n # should be converted to. Otherwise we have trouble with:\n # global_step.assign_add(1)\n # since the `1` gets broadcast as an int32 but global_step is int64.\n if isinstance(tensor, (float, int)):\n return tensor\n # TODO(josh11b): In eager mode, use one thread per device, or async mode.\n return self._get_cross_device_ops().broadcast(\n tensor, destinations or self._devices)\n\n def _call_for_each_replica(self, fn, args, kwargs):\n return _call_for_each_replica(self._container_strategy(), fn, args, kwargs)\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n del task_type, task_id\n\n if session_config:\n session_config.isolate_session_state = True\n\n if cluster_spec:\n self._initialize_multi_worker(self._num_gpus, cluster_spec)\n\n if self._cross_device_ops is None:\n if self._cluster_spec:\n # It currently cannot detect the toplogy of remote workers. So we\n # hard-code the multi-worker all-reduce algorithm for now.\n if len(self._workers) == 1:\n # The default is \"nccl\".\n self._cross_device_ops = (\n cross_device_ops_lib.AllReduceCrossDeviceOps())\n else:\n # The default is hierarchical reduce and broadcast.\n self._cross_device_ops = cross_device_ops_lib.MultiWorkerAllReduce(\n self._workers, self._num_gpus)\n else:\n self._cross_device_ops = cross_device_ops_lib.choose_the_best(\n self._devices, session_config=session_config)\n\n def _get_cross_device_ops(self):\n if self._cross_device_ops is None:\n self._cross_device_ops = (\n cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps())\n return self._cross_device_ops\n\n def _reduce_to(self, reduce_op, value, destinations):\n assert not isinstance(value, values.Mirrored)\n if not isinstance(value, values.DistributedValues):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return _reduce_non_distributed_value(self, reduce_op, value,\n destinations)\n return self._get_cross_device_ops().reduce(\n reduce_op, value, destinations=destinations)\n\n def _batch_reduce_to(self, reduce_op, value_destination_pairs):\n return self._get_cross_device_ops().batch_reduce(reduce_op,\n value_destination_pairs)\n\n def _update(self, var, fn, args, kwargs, group):\n # TODO(josh11b): In eager mode, use one thread per device.\n assert isinstance(var, values.DistributedVariable)\n updates = {}\n for d, v in var._index.items(): # pylint: disable=protected-access\n name = \"update_%d\" % self._device_index.get(d)\n with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):\n # If args and kwargs are not mirrored, the value is returned as is.\n updates[d] = fn(v,\n *values.select_device_mirrored(d, args),\n **values.select_device_mirrored(d, kwargs))\n return values.update_regroup(self, updates, group)\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n assert isinstance(colocate_with, list)\n # TODO(josh11b): In eager mode, use one thread per device.\n updates = {}\n for d in colocate_with:\n name = \"update_%d\" % self._device_index.get(d)\n with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):\n updates[d] = fn(*values.select_device_mirrored(d, args),\n **values.select_device_mirrored(d, kwargs))\n return values.update_regroup(self, updates, group)\n\n def read_var(self, replica_local_var):\n \"\"\"Read the aggregate value of a replica-local variable.\"\"\"\n if isinstance(replica_local_var, values.ReplicaLocalVariable):\n return replica_local_var._get_cross_replica() # pylint: disable=protected-access\n assert isinstance(replica_local_var, values.Mirrored)\n return array_ops.identity(replica_local_var.get())\n\n def _unwrap(self, val):\n if isinstance(val, values.DistributedValues):\n # Return in a deterministic order.\n if set(val.devices) == self._canonical_device_set:\n return [val.get(device=d) for d in self._devices]\n return [val.get(device=d) for d in sorted(val.devices)]\n return [val]\n\n def value_container(self, val):\n return values.value_container(val)\n\n @property\n def _num_replicas_in_sync(self):\n return len(self._devices)\n\n @property\n def worker_devices(self):\n # Make a copy to prevent users from accidentally mutating our copy.\n return list(self._devices)\n\n @property\n def parameter_devices(self):\n return list(self._devices)\n\n @property\n def experimental_between_graph(self):\n return False\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return True\n\n @property\n def should_save_summary(self):\n return True\n\n def non_slot_devices(self, var_list):\n del var_list\n return list(self._devices)\n\n def _get_devices_from(self, colocate_with=None):\n if colocate_with is None:\n return self._devices\n else:\n return cross_device_ops_lib.get_devices_from(colocate_with)\n\n class _MirroredReplicaThread(threading.Thread):\n \"\"\"A thread that runs() a function on a device.\"\"\"\n\n def __init__(self, dist, coord, device, variable_creator_fn, fn, *args,\n **kwargs):\n super(CoreMirroredExtended._MirroredReplicaThread, self).__init__() # pylint: disable=protected-access\n self.coord = coord\n self.distribution = dist\n self.device = device\n self.replica_id = dist.worker_devices.index(device)\n self.variable_creator_fn = variable_creator_fn\n # State needed to run and return the results of `fn`.\n self.main_fn = fn\n self.main_args = args\n self.main_kwargs = kwargs\n self.main_result = None\n self.done = False\n # State needed to run the next merge_call() (if any) requested via\n # ReplicaContext.\n self.merge_fn = None\n self.merge_args = None\n self.merge_kwargs = None\n self.merge_result = None\n self.captured_name_scope = None\n # We use a thread.Event for the main thread to signal when this\n # thread should start running (`should_run`), and another for\n # this thread to transfer control back to the main thread\n # (`has_paused`, either when it gets to a\n # `get_replica_context().merge_call` or when `fn` returns). In\n # either case the event starts cleared, is signaled by calling\n # set(). The receiving thread waits for the signal by calling\n # wait() and then immediately clearing the event using clear().\n self.should_run = threading.Event()\n self.has_paused = threading.Event()\n # These fields have to do with inheriting various contexts from the\n # parent thread:\n # pylint: disable=protected-access\n self.context_mode = context.context()._eager_context.mode\n if not context.context()._context_handle:\n context.context()._initialize_handle_and_devices()\n self.context_device_policy = (\n pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(\n context.context()._context_handle))\n self.graph = ops.get_default_graph()\n self._variable_creator_stack = self.graph._variable_creator_stack[:]\n self._captured_var_scope = variable_scope.get_variable_scope()\n # Adding a \"/\" at end lets us re-enter this scope later.\n self._name_scope = self.graph.get_name_scope()\n if self._name_scope:\n self._name_scope += \"/\"\n if self.replica_id > 0:\n if not self._name_scope:\n self._name_scope = \"\"\n self._name_scope += \"replica_%d/\" % self.replica_id\n\n def run(self):\n # pylint: disable=protected-access\n self.graph._variable_creator_stack = self._variable_creator_stack\n self.should_run.wait()\n self.should_run.clear()\n try:\n if self.coord.should_stop():\n return\n with self.coord.stop_on_exception(), \\\n context.context()._mode(self.context_mode), \\\n context.context().device_policy(self.context_device_policy), \\\n _enter_graph(self.graph), \\\n MirroredReplicaContext(self.distribution, constant_op.constant(\n self.replica_id, dtypes.int32)), \\\n ops.device(self.device), \\\n ops.name_scope(self._name_scope), \\\n variable_scope.variable_scope(\n self._captured_var_scope, reuse=self.replica_id > 0), \\\n variable_scope.variable_creator_scope(self.variable_creator_fn):\n self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)\n self.done = True\n finally:\n self.has_paused.set()\n\n\nclass MirroredStrategy(distribute_lib.DistributionStrategy):\n \"\"\"Mirrors vars to distribute across multiple devices and machines.\n\n *** contrib version ***\n\n This strategy uses one replica per device and sync replication for its\n multi-GPU version.\n\n When `cluster_spec` is given by the `configure` method., it turns into the\n mulit-worker version that works on multiple workers with in-graph replication.\n Note: `configure` will be called by higher-level APIs if running in\n distributed environment.\n\n There are several important concepts for distributed TensorFlow, e.g.\n `client`, `job`, 'task', `cluster`, `in-graph replication` and\n 'synchronous training' and they have already been defined in the\n [TensorFlow's documentation](https://www.tensorflow.org/deploy/distributed).\n The distribution strategy inherits these concepts as well and in addition to\n that we also clarify several more concepts:\n\n * **In-graph replication**: the `client` creates a single `tf.Graph` that\n specifies tasks for devices on all workers. The `client` then creates a\n client session which will talk to the `master` service of a `worker`. Then\n the `master` will partition the graph and distribute the work to all\n participating workers.\n * **Worker**: A `worker` is a TensorFlow `task` that usually maps to one\n physical machine. We will have multiple `worker`s with different `task`\n index. They all do similar things except for one worker checkpointing model\n variables, writing summaries, etc. in addition to its ordinary work.\n\n The multi-worker version of this class maps one replica to one device on a\n worker. It mirrors all model variables on all replicas. For example, if you\n have two `worker`s and each `worker` has 4 GPUs, it will create 8 copies of\n the model variables on these 8 GPUs. Then like in MirroredStrategy, each\n replica performs their computation with their own copy of variables unless in\n cross-replica model where variable or tensor reduction happens.\n\n Args:\n devices: a list of device strings.\n num_gpus: number of GPUs. For local training, either specify `devices` or\n `num_gpus`. In distributed training, this must be specified as number of\n GPUs on each worker.\n num_gpus_per_worker: number of GPUs per worker. This is the same as\n `num_gpus` and only one of `num_gpus` and `num_gpus_per_worker` can be\n specified.\n cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not\n set, the `configure` method will try to find the best one.\n auto_shard_dataset: whether to auto-shard the dataset when there are\n multiple workers.\n cross_tower_ops: Deprecated alias for `cross_device_ops`.\n \"\"\"\n\n def __init__(self,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False,\n cross_tower_ops=None):\n assert not (cross_device_ops and cross_tower_ops)\n extended = MirroredExtended(\n self, devices, num_gpus, num_gpus_per_worker,\n cross_device_ops or cross_tower_ops, auto_shard_dataset)\n super(MirroredStrategy, self).__init__(extended)\n\n\nclass MirroredExtended(CoreMirroredExtended):\n \"\"\"Implementation of (contrib) MirroredStrategy.\"\"\"\n\n # pylint: disable=useless-super-delegation\n def __init__(self,\n container_strategy,\n devices=None,\n num_gpus=None,\n num_gpus_per_worker=None,\n cross_device_ops=None,\n auto_shard_dataset=False):\n super(MirroredExtended, self).__init__(\n container_strategy, devices, num_gpus, num_gpus_per_worker,\n cross_device_ops, auto_shard_dataset)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Make iterator from dataset without splitting the batch.\n\n This implementation is different than the one in\n `tf.distribute.MirroredStrategy` for purposes of backward compatibility.\n We treat the incoming dataset's batch size as per replica batch size.\n\n Args:\n dataset: `tf.data.Dataset` for input.\n Returns:\n An `InputIterator` which returns inputs for each step of the computation.\n \"\"\"\n if self._cluster_spec:\n worker_device_pairs = self._worker_devices\n else:\n worker_device_pairs = [(\"/job:localhost\", self._devices)]\n return values.DatasetIterator(dataset, worker_device_pairs)\n\n\nclass MirroredReplicaContext(distribute_lib.ReplicaContext):\n \"\"\"ReplicaContext used in MirroredStrategy.call_for_each_replica().\n\n Opened in `_MirroredReplicaThread`, to allow the user to invoke\n `MirroredStrategy`'s specific implementation of `merge_call()`,\n which works by delegating the function and its arguments to\n the main thread (the one that invoked\n `MirroredStrategy.call_for_each_replica()`).\n \"\"\"\n\n def _merge_call(self, fn, args, kwargs):\n \"\"\"Delegate to the main thread to actually perform merge_call().\"\"\"\n t = threading.current_thread() # a _MirroredReplicaThread\n t.merge_fn = fn\n t.merge_args = args\n t.merge_kwargs = kwargs\n t.captured_name_scope = t.graph.get_name_scope()\n # Adding a \"/\" at end lets us re-enter this scope later.\n if t.captured_name_scope:\n t.captured_name_scope += \"/\"\n t.has_paused.set()\n t.should_run.wait()\n t.should_run.clear()\n if t.coord.should_stop():\n raise _RequestedStop()\n return t.merge_result\n\n @property\n def devices(self):\n distribute_lib.require_replica_context(self)\n replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)\n return [self._distribution_strategy.worker_devices[replica_id]]\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Ops to convert between RaggedTensors and other tensor types.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_ragged_conversion_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged import ragged_util\n\n\n#===============================================================================\n# RaggedTensor <-> Tensor conversion\n#===============================================================================\ndef from_tensor(tensor, lengths=None, padding=None, ragged_rank=1, name=None):\n \"\"\"Converts a `Tensor` into a `RaggedTensor`.\n\n The set of absent/default values may be specified using a vector of lengths\n or a padding value (but not both). If `lengths` is specified, then the\n output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`.\n If `padding` is specified, then any row *suffix* consisting entirely of\n `padding` will be excluded from the returned `RaggedTensor`. If neither\n `lengths` nor `padding` is specified, then the returned `RaggedTensor` will\n have no absent/default values.\n\n Examples:\n\n ```python\n >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])\n >>> ragged.from_tensor(dt).eval().tolist()\n [[5, 7, 0], [0, 3, 0], [6, 0, 0]]\n >>> ragged.from_tensor(dt, lengths=[2, 0, 3]).eval().tolist()\n [[5, 7], [], [6, 0, 0]]\n >>> ragged.from_tensor(dt, padding=0).eval().tolist()\n [[5, 7], [0, 3], [6]]\n ```\n\n Args:\n tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or\n higher.\n lengths: An optional set of row lengths, specified using a 1-D integer\n `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows in\n `tensor`). If specified, then `output[row]` will contain\n `tensor[row][:lengths[row]]`. Negative lengths are treated as zero.\n padding: An optional padding value. If specified, then any row suffix\n consisting entirely of `padding` will be excluded from the returned\n RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`\n and with `shape=tensor.shape[ragged_rank + 1:]`.\n ragged_rank: Integer specifying the ragged rank for the returned\n `RaggedTensor`. Must be greater than zero.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `RaggedTensor` with the specified `ragged_rank`. The shape of the\n returned ragged tensor is compatible with the shape of `tensor`.\n Raises:\n ValueError: If both `lengths` and `padding` are specified.\n \"\"\"\n if lengths is not None and padding is not None:\n raise ValueError('Specify lengths or padding, but not both')\n if not isinstance(ragged_rank, int):\n raise TypeError('ragged_rank expected int, got %r' % ragged_rank)\n if ragged_rank <= 0:\n raise ValueError('ragged_rank must be greater than 0; got %s' % ragged_rank)\n\n with ops.name_scope(name, 'RaggedFromTensor', [tensor, lengths, padding]):\n tensor = ops.convert_to_tensor(tensor, name='tensor')\n tensor.shape.with_rank_at_least(ragged_rank + 1)\n input_shape = array_ops.shape(tensor, out_type=dtypes.int64)\n ncols = input_shape[1]\n\n # Handle ragged_rank>1 via recursion:\n # If the output should have multiple ragged dimensions, then first\n # flatten the tensor to eliminate all but the last ragged dimension,\n # and recursively convert that flattened tensor. Then add on the splits\n # for the dimensions that we flattened out.\n if ragged_rank > 1:\n # Flatten `tensor` to eliminate all but the last ragged dimension.\n new_shape = array_ops.concat(\n [constant_op.constant([-1], dtypes.int64), input_shape[ragged_rank:]],\n axis=0)\n flattened = array_ops.reshape(tensor, new_shape)\n # Recursively convert the flattened tensor.\n values = from_tensor(flattened, lengths, padding)\n # The total number of elements in each dimension. E.g., if\n # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.\n dim_size = math_ops.cumprod(input_shape)\n # Construct splits tensors for the dimensions that were flattened.\n new_splits = [\n math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]\n for dim in range(1, ragged_rank)\n ]\n return ragged_factory_ops.from_nested_row_splits(values, new_splits)\n\n # If padding was specified, then use it to find row lengths.\n if padding is not None:\n padding = ops.convert_to_tensor(\n padding, name='padding', dtype=tensor.dtype)\n padding.shape.assert_is_compatible_with(tensor.shape[2:])\n\n # Find places where the padding is equal to the tensor. (This will\n # broadcast `padding` across the outermost 2 dimensions of `tensor`,\n # so `has_default_value.shape = tensor.shape`.)\n has_default_value = math_ops.equal(padding, tensor)\n\n # If the padding isn't a scalar, then require that all values in the\n # padding match each item in the tensor. After this block of code,\n # `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just\n # use reduce_all for both cases, becaue when you pass an empty `axis`\n # list to reduce_all, it reduces all axes; but we want it to reduce no\n # axes -- i.e., to be a no-op.)\n tensor_rank = array_ops.rank(tensor)\n reduce_axis = math_ops.range(2, tensor_rank)\n has_default = control_flow_ops.cond(\n tensor_rank > 2,\n lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),\n lambda: has_default_value)\n has_default.set_shape(tensor_shape.TensorShape([None, None]))\n has_default.set_shape(tensor.shape[:2])\n\n # Use has_default it to find the length of each row: for each non-default\n # item in a row, calculate the length that the row needs to have to\n # include that item; and then take the max of those values (across each\n # row).\n has_nondefault = math_ops.logical_not(has_default)\n has_nondefault = math_ops.cast(has_nondefault, dtypes.int64)\n length_for_nondefault_value = (\n has_nondefault * array_ops.expand_dims(\n math_ops.range(1, ncols + 1), 0))\n lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)\n\n # If we have lengths (either directly supplied, or computed from paddings),\n # then use those to construct splits; and then use masking to get the\n # corresponding values.\n if lengths is not None:\n lengths = ragged_util.convert_to_int_tensor(lengths, 'lengths',\n dtypes.int64)\n lengths.shape.assert_has_rank(1)\n lengths = math_ops.minimum(lengths, ncols)\n lengths = math_ops.maximum(lengths, 0)\n limits = math_ops.cumsum(lengths)\n splits = array_ops.concat(\n [array_ops.zeros([1], dtypes.int64), limits], axis=0)\n mask = array_ops.sequence_mask(lengths, maxlen=ncols)\n values = array_ops.boolean_mask(tensor, mask)\n return ragged_factory_ops.from_row_splits(values, splits)\n\n # If neither padding nor lengths were specified, then create a splits\n # vector that contains no default values, and reshape the input tensor\n # to form the values for the RaggedTensor.\n nrows = input_shape[0]\n nvals = nrows * ncols\n splits = math_ops.range(nrows + 1) * ncols\n values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0)\n values = array_ops.reshape(tensor, values_shape)\n return ragged_factory_ops.from_row_splits(values, splits)\n\n\ndef to_tensor(rt_input, default_value=None, name=None):\n \"\"\"Converts a `RaggedTensor` into a `Tensor`.\n\n Example:\n\n ```python\n >>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]])\n >>> print ragged.to_tensor(rt).eval()\n [[9 8 7]\n [0 0 0]\n [6 5 0]\n [4 0 0]]\n ```\n\n Args:\n rt_input: The input `RaggedTensor`.\n default_value: Value to set for indices not specified in `rt_input`.\n Defaults to zero. `default_value` must be broadcastable to\n `rt_input.shape[rt_input.ragged_rank + 1:]`.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `Tensor` with shape `ragged.bounding_shape(rt_input)` and the\n values specified by the non-empty values in `rt_input`. Empty values are\n assigned `default_value`.\n \"\"\"\n with ops.name_scope(name, 'RaggedToTensor', [rt_input, default_value]):\n rt_input = ragged_factory_ops.convert_to_tensor_or_ragged_tensor(\n rt_input, name='rt_input')\n if not ragged_tensor.is_ragged(rt_input):\n return rt_input # already dense\n if default_value is not None:\n default_value = ops.convert_to_tensor(\n default_value, name='default_value', dtype=rt_input.dtype)\n\n # If ragged_rank > 1, then recursively convert the ragged values into a\n # `Tensor` before we proceed.\n values = rt_input.values\n if ragged_tensor.is_ragged(values):\n values = to_tensor(values, default_value)\n\n # Tile the default value, if necessary.\n if default_value is not None:\n if values.shape.ndims is not None:\n default_value.shape.with_rank_at_most(values.shape.ndims - 1)\n if (values.shape.ndims is None or default_value.shape.ndims is None or\n values.shape.ndims != default_value.shape.ndims + 1):\n value_shape = array_ops.shape(values)[1:]\n default_value = array_ops.broadcast_to(default_value, value_shape)\n default_value.shape.assert_is_compatible_with(values.shape[1:])\n\n # Get the expected dense shape ([nrows, ncols] + value_shape).\n rt_row_lengths = [rt_input.row_splits[1:] - rt_input.row_splits[:-1]]\n nrows = array_ops.shape(rt_input.row_splits, out_type=dtypes.int64)[0] - 1\n ncols = math_ops.maximum(math_ops.reduce_max(rt_row_lengths), 0)\n values_shape = array_ops.shape(values, out_type=dtypes.int64)\n value_shape = values_shape[1:]\n nvals = values_shape[0]\n\n # Build a default value if none was supplied.\n if default_value is None:\n default_value = array_ops.zeros(value_shape, dtype=values.dtype)\n default_value.shape.assert_is_compatible_with(values.shape[1:])\n default_value.set_shape(values.shape[1:])\n\n # Get the row start indices, and expand to shape=[nrows, 1].\n starts = array_ops.expand_dims(rt_input.row_splits[:-1], 1)\n\n # Get the row limit indices, and expand to shape=[nrows, 1].\n limits = array_ops.expand_dims(rt_input.row_splits[1:], 1)\n\n # Get the column indices, and expand to shape=[1, ncols].\n columns = array_ops.expand_dims(math_ops.range(0, ncols), 0)\n\n # Build a list containing the values plus the default value. We will use\n # tf.gather to collect values from this list for the `Tensor` (using\n # nvals as the index for the default value).\n values_and_default = array_ops.concat(\n [values, array_ops.stack([default_value])], axis=0)\n\n # Construct a matrix \"indices\" pointing into values_and_default. I.e.,\n # output[r, c] = values_and_default[indices[r, c].\n nondefault_index = starts + columns\n has_value = nondefault_index < limits\n default_index = array_ops.fill(array_ops.stack([nrows, ncols]), nvals)\n indices = array_ops.where(has_value, nondefault_index, default_index)\n\n # Gather the results into a `Tensor`.\n return array_ops.gather(values_and_default, indices)\n\n\n#===============================================================================\n# RaggedTensor <-> SparseTensor conversion\n#===============================================================================\ndef to_sparse(rt_input, name=None):\n \"\"\"Converts a `RaggedTensor` into a sparse tensor.\n\n Example:\n\n ```python\n >>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]])\n >>> ragged.to_sparse(rt).eval()\n SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]],\n values=[1, 2, 3, 4, 5, 6],\n dense_shape=[4, 3])\n ```\n\n Args:\n rt_input: The input `RaggedTensor`.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A SparseTensor with the same values as `rt_input`.\n \"\"\"\n if not ragged_tensor.is_ragged(rt_input):\n raise TypeError('Expected RaggedTensor, got %s' % type(rt_input).__name__)\n with ops.name_scope(name, 'RaggedToSparse', [rt_input]):\n rt_input = ragged_factory_ops.convert_to_tensor_or_ragged_tensor(\n rt_input, name='rt_input')\n result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(\n rt_input.nested_row_splits, rt_input.inner_values, name=name)\n return sparse_tensor.SparseTensor(\n result.sparse_indices, result.sparse_values, result.sparse_dense_shape)\n\n\[email protected]('RaggedTensorToSparse')\ndef _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,\n sparse_values_grad,\n unused_sparse_shape_grad):\n \"\"\"Gradient for ragged.to_sparse.\"\"\"\n op_inputs_nested_row_splits = op.inputs[:-1]\n op_inputs_inner_values = op.inputs[-1]\n\n # No gradient for the RaggedTensor's nested_row_splits.\n nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)\n\n # Gradient for the RaggedTensor's inner_values is formed by reshaping\n # the gradient for the SparseTensor's values.\n inner_values_shape = array_ops.shape(op_inputs_inner_values)\n inner_values_gradient = array_ops.reshape(sparse_values_grad,\n inner_values_shape)\n\n return nested_row_splits_gradient + [inner_values_gradient]\n\n\ndef from_sparse(st_input, name=None):\n \"\"\"Converts a 2D `SparseTensor` to a `RaggedTensor`.\n\n Each row of the `output` `RaggedTensor` will contain the explicit values from\n the same row in `st_input`. `st_input` must be ragged-right. If not it is\n not ragged-right, then an error will be generated.\n\n Example:\n\n ```python\n >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],\n ... values=[1, 2, 3, 4, 5],\n ... dense_shape=[4, 3])\n >>> ragged.from_sparse(st).eval().tolist()\n [[1, 2, 3], [4], [], [5]]\n ```\n\n Currently, only two-dimensional `SparseTensors` are supported.\n\n Args:\n st_input: The sparse tensor to convert. Must have rank 2.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `RaggedTensor` with the same values as `st_input`.\n `output.ragged_rank = rank(st_input) - 1`.\n `output.shape = [st_input.dense_shape[0], None]`.\n Raises:\n ValueError: If the number of dimensions in `st_input` is not known\n statically, or is not two.\n \"\"\"\n if not sparse_tensor.is_sparse(st_input):\n raise TypeError('Expected SparseTensor, got %s' % type(st_input).__name__)\n with ops.name_scope(name, 'RaggedFromSparse', [st_input]):\n st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(\n st_input, name='rt_input')\n\n if (st_input.dense_shape.shape.ndims != 2 and\n st_input.indices.shape.ndims is None or\n st_input.indices.shape.dims[1].value != 2):\n raise ValueError('rank(st_input) must be 2')\n\n with ops.control_dependencies(\n _assert_sparse_indices_are_ragged_right(st_input.indices)):\n # Treat sparse row indices as segment ids to generate a splits tensor that\n # we can pair with the sparse tensor values. (Ignore sparse column\n # indices.)\n segment_ids = st_input.indices[:, 0]\n num_segments = st_input.dense_shape[0]\n return ragged_factory_ops.from_value_rowids(st_input.values, segment_ids,\n num_segments)\n\n\ndef _assert_sparse_indices_are_ragged_right(indices):\n \"\"\"Checks that the given SparseTensor.indices tensor is ragged-right.\n\n Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right\n because the entry `[3, 1]` skips a cell.\n\n Args:\n indices: The SparseTensor indices to check.\n\n Returns:\n A list of control dependency op tensors.\n \"\"\"\n index_prefix = indices[:, :-1]\n index_suffix = indices[:, -1]\n\n # Check whether each index is starting a new row in the innermost dimension\n # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).\n # (Note: this skips the first index; we will check that separately below.)\n index_prefix_changed = math_ops.reduce_any(\n math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)\n\n # Check two cases:\n # * For indices that start a new row: index_suffix[i] must be zero.\n # * For indices that continue a row: index_suffix[i] must be equal to\n # index_suffix[i-1]+1.\n index_ok = array_ops.where(\n index_prefix_changed, math_ops.equal(index_suffix[1:], 0),\n math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))\n\n # Also check that the very first index didn't skip any cells. The first\n # index starts a new row (by definition), so its suffix should be zero.\n sparse_indices_are_ragged_right = math_ops.logical_and(\n math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),\n math_ops.reduce_all(index_ok))\n\n message = [\n 'SparseTensor is not right-ragged',\n 'SparseTensor.indices =', indices\n ]\n return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ragged.from_sparse.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import ragged\nfrom tensorflow.python.platform import googletest\n\n\nclass RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):\n\n def testDocStringExample(self):\n st = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]],\n values=[1, 2, 3, 4, 5],\n dense_shape=[4, 3])\n rt = ragged.from_sparse(st)\n\n with self.test_session():\n self.assertEqual(rt.eval().tolist(), [[1, 2, 3], [4], [], [5]])\n\n def testEmpty(self):\n st = sparse_tensor.SparseTensor(\n indices=array_ops.zeros([0, 2], dtype=dtypes.int64),\n values=[],\n dense_shape=[4, 3])\n rt = ragged.from_sparse(st)\n\n with self.test_session():\n self.assertEqual(rt.eval().tolist(), [[], [], [], []])\n\n def testBadSparseTensorRank(self):\n st1 = sparse_tensor.SparseTensor(indices=[[0]], values=[0], dense_shape=[3])\n st2 = sparse_tensor.SparseTensor(\n indices=[[0, 0, 0]], values=[0], dense_shape=[3, 3, 3])\n st3 = sparse_tensor.SparseTensor(\n indices=array_ops.placeholder(dtypes.int64),\n values=[0],\n dense_shape=array_ops.placeholder(dtypes.int64))\n self.assertRaisesRegexp(ValueError, r'rank\\(st_input\\) must be 2',\n ragged.from_sparse, st1)\n self.assertRaisesRegexp(ValueError, r'rank\\(st_input\\) must be 2',\n ragged.from_sparse, st2)\n self.assertRaisesRegexp(ValueError, r'rank\\(st_input\\) must be 2',\n ragged.from_sparse, st3)\n\n def testNonRaggedSparseTensor(self):\n # \"index_suffix\" means the value of the innermost dimension of the index\n # (i.e., indices[i][-1]).\n # See comments in _assert_sparse_indices_are_ragged_right() for more\n # details/background.\n\n # index_suffix of first index is not zero.\n st1 = sparse_tensor.SparseTensor(\n indices=[[0, 1], [0, 2], [2, 0]], values=[1, 2, 3], dense_shape=[3, 3])\n # index_suffix of an index that starts a new row is not zero.\n st2 = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 1], [2, 1]], values=[1, 2, 3], dense_shape=[3, 3])\n # index_suffix of an index that continues a row skips a cell.\n st3 = sparse_tensor.SparseTensor(\n indices=[[0, 1], [0, 1], [0, 3]], values=[1, 2, 3], dense_shape=[3, 3])\n rt1 = ragged.from_sparse(st1)\n rt2 = ragged.from_sparse(st2)\n rt3 = ragged.from_sparse(st3)\n with self.test_session():\n self.assertRaisesRegexp(errors.InvalidArgumentError,\n r'.*SparseTensor is not right-ragged', rt1.eval)\n self.assertRaisesRegexp(errors.InvalidArgumentError,\n r'.*SparseTensor is not right-ragged', rt2.eval)\n self.assertRaisesRegexp(errors.InvalidArgumentError,\n r'.*SparseTensor is not right-ragged', rt3.eval)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of Loss operations for use in neural networks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import confusion_matrix\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.ops.losses import util\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.deprecation import deprecated_argument_lookup\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"losses.Reduction\", v1=[])\nclass ReductionV2(object):\n \"\"\"Types of loss reduction.\n\n Contains the following values:\n `NONE`: Un-reduced weighted losses with the same shape as input.\n `SUM`: Scalar sum of weighted losses.\n `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.\n \"\"\"\n\n NONE = \"none\"\n SUM = \"weighted_sum\"\n SUM_OVER_BATCH_SIZE = \"weighted_sum_over_batch_size\"\n\n @classmethod\n def all(cls):\n return (cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)\n\n @classmethod\n def validate(cls, key):\n if key not in cls.all():\n raise ValueError(\"Invalid Reduction Key %s.\" % key)\n\n\n@tf_export(v1=[\"losses.Reduction\"])\nclass Reduction(ReductionV2):\n \"\"\"Types of loss reduction.\n\n Contains the following values:\n `NONE`: Un-reduced weighted losses with the same shape as input.\n `SUM`: Scalar sum of weighted losses.\n `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED.\n `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.\n `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero\n weights. DEPRECATED.\n `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`.\n \"\"\"\n\n MEAN = \"weighted_mean\"\n SUM_BY_NONZERO_WEIGHTS = \"weighted_sum_by_nonzero_weights\"\n SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS\n\n @classmethod\n def all(cls):\n return (\n cls.NONE,\n cls.SUM,\n cls.MEAN,\n cls.SUM_OVER_BATCH_SIZE,\n cls.SUM_OVER_NONZERO_WEIGHTS,\n cls.SUM_BY_NONZERO_WEIGHTS)\n\n @classmethod\n def validate(cls, key):\n if key not in cls.all():\n raise ValueError(\"Invalid Reduction Key %s.\" % key)\n\n\ndef _safe_div(numerator, denominator, name=\"value\"):\n \"\"\"Computes a safe divide which returns 0 if the denominator is zero.\n\n Note that the function contains an additional conditional check that is\n necessary for avoiding situations where the loss is zero causing NaNs to\n creep into the gradient computation.\n\n Args:\n numerator: An arbitrary `Tensor`.\n denominator: A `Tensor` whose shape matches `numerator` and whose values are\n assumed to be non-negative.\n name: An optional name for the returned op.\n\n Returns:\n The element-wise value of the numerator divided by the denominator.\n \"\"\"\n if compat.forward_compatible(2018, 11, 1):\n return math_ops.div_no_nan(numerator, denominator, name=name)\n return array_ops.where(\n math_ops.greater(denominator, 0),\n math_ops.div(numerator,\n array_ops.where(\n math_ops.equal(denominator, 0),\n array_ops.ones_like(denominator), denominator)),\n array_ops.zeros_like(numerator),\n name=name)\n\n\ndef _safe_mean(losses, num_present):\n \"\"\"Computes a safe mean of the losses.\n\n Args:\n losses: `Tensor` whose elements contain individual loss measurements.\n num_present: The number of measurable elements in `losses`.\n\n Returns:\n A scalar representing the mean of `losses`. If `num_present` is zero,\n then zero is returned.\n \"\"\"\n total_loss = math_ops.reduce_sum(losses)\n return _safe_div(total_loss, num_present)\n\n\ndef _num_present(losses, weights, per_batch=False):\n \"\"\"Computes the number of elements in the loss function induced by `weights`.\n\n A given weights tensor induces different numbers of usable elements in the\n `losses` tensor. The `weights` tensor is broadcast across `losses` for all\n possible dimensions. For example, if `losses` is a tensor of dimension\n `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,\n in effect, tiled to match the shape of `losses`. Following this effective\n tile, the total number of present elements is the number of non-zero weights.\n\n Args:\n losses: `Tensor` of shape `[batch_size, d1, ... dN]`.\n weights: `Tensor` of shape `[]`, `[batch_size]` or\n `[batch_size, d1, ... dK]`, where K < N.\n per_batch: Whether to return the number of elements per batch or as a sum\n total.\n\n Returns:\n The number of present (non-zero) elements in the losses tensor. If\n `per_batch` is `True`, the value is returned as a tensor of size\n `[batch_size]`. Otherwise, a single scalar tensor is returned.\n \"\"\"\n if ((isinstance(weights, float) and weights != 0.0) or\n (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access\n and not math_ops.equal(weights, 0.0))):\n return _num_elements(losses)\n with ops.name_scope(None, \"num_present\", (losses, weights)) as scope:\n weights = math_ops.to_float(weights)\n present = array_ops.where(\n math_ops.equal(weights, 0.0),\n array_ops.zeros_like(weights),\n array_ops.ones_like(weights))\n present = weights_broadcast_ops.broadcast_weights(present, losses)\n if per_batch:\n return math_ops.reduce_sum(\n present,\n axis=math_ops.range(1, array_ops.rank(present)),\n keepdims=True,\n name=scope)\n return math_ops.reduce_sum(present, name=scope)\n\n\ndef _num_elements(losses):\n \"\"\"Computes the number of elements in `losses` tensor.\"\"\"\n with ops.name_scope(None, \"num_elements\", values=[losses]) as scope:\n return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)\n\n\n@tf_export(\"losses.compute_weighted_loss\")\ndef compute_weighted_loss(\n losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Computes the weighted loss.\n\n Args:\n losses: `Tensor` of shape `[batch_size, d1, ... dN]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `losses`, and must be broadcastable to `losses` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n scope: the scope for the operations performed in computing the loss.\n loss_collection: the loss will be added to these collections.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss `Tensor` of the same type as `losses`. If `reduction` is\n `NONE`, this has the same shape as `losses`; otherwise, it is scalar.\n\n Raises:\n ValueError: If `weights` is `None` or the shape is not compatible with\n `losses`, or if the number of dimensions (rank) of either `losses` or\n `weights` is missing.\n\n Note:\n When calculating the gradient of a weighted loss contributions from\n both `losses` and `weights` are considered. If your `weights` depend\n on some model parameters but you do not want this to affect the loss\n gradient, you need to apply `tf.stop_gradient` to `weights` before\n passing them to `compute_weighted_loss`.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n Reduction.validate(reduction)\n with ops.name_scope(scope, \"weighted_loss\", (losses, weights)):\n # Save the `reduction` argument for loss normalization when distributing\n # to multiple replicas.\n # TODO(josh11b): Associate it with the returned op for more precision.\n ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access\n\n with ops.control_dependencies((\n weights_broadcast_ops.assert_broadcastable(weights, losses),)):\n losses = ops.convert_to_tensor(losses)\n input_dtype = losses.dtype\n losses = math_ops.to_float(losses)\n weights = math_ops.to_float(weights)\n weighted_losses = math_ops.multiply(losses, weights)\n if reduction == Reduction.NONE:\n loss = weighted_losses\n else:\n loss = math_ops.reduce_sum(weighted_losses)\n if reduction == Reduction.MEAN:\n loss = _safe_mean(\n loss,\n math_ops.reduce_sum(array_ops.ones_like(losses) * weights))\n elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or\n reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS):\n loss = _safe_mean(loss, _num_present(losses, weights))\n elif reduction == Reduction.SUM_OVER_BATCH_SIZE:\n loss = _safe_mean(loss, _num_elements(losses))\n\n # Convert the result back to the input type.\n loss = math_ops.cast(loss, input_dtype)\n util.add_loss(loss, loss_collection)\n return loss\n\n\n@tf_export(\"losses.absolute_difference\")\ndef absolute_difference(\n labels, predictions, weights=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Adds an Absolute Difference loss to the training procedure.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided, then\n the loss is simply scaled by the given value. If `weights` is a `Tensor` of\n shape `[batch_size]`, then the total loss for each sample of the batch is\n rescaled by the corresponding element in the `weights` vector. If the shape of\n `weights` matches the shape of `predictions`, then the loss of each\n measurable element of `predictions` is scaled by the corresponding value of\n `weights`.\n\n Args:\n labels: The ground truth output tensor, same dimensions as 'predictions'.\n predictions: The predicted outputs.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `predictions` doesn't match that of\n `labels` or if the shape of `weights` is invalid or if `labels`\n or `predictions` is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if predictions is None:\n raise ValueError(\"predictions must not be None.\")\n with ops.name_scope(scope, \"absolute_difference\",\n (predictions, labels, weights)) as scope:\n predictions = math_ops.to_float(predictions)\n labels = math_ops.to_float(labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n losses = math_ops.abs(math_ops.subtract(predictions, labels))\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n@tf_export(\"losses.cosine_distance\")\n@deprecated_args(None, \"dim is deprecated, use axis instead\", \"dim\")\ndef cosine_distance(\n labels, predictions, axis=None, weights=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS,\n dim=None):\n \"\"\"Adds a cosine-distance loss to the training procedure.\n\n Note that the function assumes that `predictions` and `labels` are already\n unit-normalized.\n\n Args:\n labels: `Tensor` whose shape matches 'predictions'\n predictions: An arbitrary matrix.\n axis: The dimension along which the cosine distance is computed.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: Type of reduction to apply to loss.\n dim: The old (deprecated) name for `axis`.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If `predictions` shape doesn't match `labels` shape, or\n `axis`, `labels`, `predictions` or `weights` is `None`.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n axis = deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n if axis is None:\n raise ValueError(\"You must specify 'axis'.\")\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if predictions is None:\n raise ValueError(\"predictions must not be None.\")\n with ops.name_scope(scope, \"cosine_distance_loss\",\n (predictions, labels, weights)) as scope:\n predictions = math_ops.to_float(predictions)\n labels = math_ops.to_float(labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n radial_diffs = math_ops.multiply(predictions, labels)\n losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n@tf_export(\"losses.hinge_loss\")\ndef hinge_loss(labels, logits, weights=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Adds a hinge loss to the training procedure.\n\n Args:\n labels: The ground truth output tensor. Its shape should match the shape of\n logits. The values of the tensor are expected to be 0.0 or 1.0. Internally\n the {0,1} labels are converted to {-1,1} when calculating the hinge loss.\n logits: The logits, a float tensor. Note that logits are assumed to be\n unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive\n (resp. negative) binary prediction.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shapes of `logits` and `labels` don't match or\n if `labels` or `logits` is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if logits is None:\n raise ValueError(\"logits must not be None.\")\n with ops.name_scope(scope, \"hinge_loss\", (logits, labels, weights)) as scope:\n logits = math_ops.to_float(logits)\n labels = math_ops.to_float(labels)\n logits.get_shape().assert_is_compatible_with(labels.get_shape())\n # We first need to convert binary labels to -1/1 labels (as floats).\n all_ones = array_ops.ones_like(labels)\n labels = math_ops.subtract(2 * labels, all_ones)\n losses = nn_ops.relu(\n math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n@tf_export(\"losses.huber_loss\")\ndef huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Adds a Huber Loss term to the training procedure.\n\n For each value x in `error=labels-predictions`, the following is calculated:\n\n ```\n 0.5 * x^2 if |x| <= d\n 0.5 * d^2 + d * (|x| - d) if |x| > d\n ```\n\n where d is `delta`.\n\n See: https://en.wikipedia.org/wiki/Huber_loss\n\n `weights` acts as a coefficient for the loss. If a scalar is provided, then\n the loss is simply scaled by the given value. If `weights` is a tensor of size\n `[batch_size]`, then the total loss for each sample of the batch is rescaled\n by the corresponding element in the `weights` vector. If the shape of\n `weights` matches the shape of `predictions`, then the loss of each\n measurable element of `predictions` is scaled by the corresponding value of\n `weights`.\n\n Args:\n labels: The ground truth output tensor, same dimensions as 'predictions'.\n predictions: The predicted outputs.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n delta: `float`, the point where the huber loss function\n changes from a quadratic to linear.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `predictions` doesn't match that of `labels` or\n if the shape of `weights` is invalid. Also if `labels` or\n `predictions` is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if predictions is None:\n raise ValueError(\"predictions must not be None.\")\n with ops.name_scope(scope, \"huber_loss\",\n (predictions, labels, weights)) as scope:\n predictions = math_ops.to_float(predictions)\n labels = math_ops.to_float(labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n error = math_ops.subtract(predictions, labels)\n abs_error = math_ops.abs(error)\n quadratic = math_ops.minimum(abs_error, delta)\n # The following expression is the same in value as\n # tf.maximum(abs_error - delta, 0), but importantly the gradient for the\n # expression when abs_error == delta is 0 (for tf.maximum it would be 1).\n # This is necessary to avoid doubling the gradient, since there is already a\n # nonzero contribution to the gradient from the quadratic term.\n linear = math_ops.subtract(abs_error, quadratic)\n losses = math_ops.add(\n math_ops.multiply(\n ops.convert_to_tensor(0.5, dtype=quadratic.dtype),\n math_ops.multiply(quadratic, quadratic)),\n math_ops.multiply(delta, linear))\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n@tf_export(\"losses.log_loss\")\ndef log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Adds a Log Loss term to the training procedure.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided, then\n the loss is simply scaled by the given value. If `weights` is a tensor of size\n `[batch_size]`, then the total loss for each sample of the batch is rescaled\n by the corresponding element in the `weights` vector. If the shape of\n `weights` matches the shape of `predictions`, then the loss of each\n measurable element of `predictions` is scaled by the corresponding value of\n `weights`.\n\n Args:\n labels: The ground truth output tensor, same dimensions as 'predictions'.\n predictions: The predicted outputs.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n epsilon: A small increment to add to avoid taking a log of zero.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `predictions` doesn't match that of `labels` or\n if the shape of `weights` is invalid. Also if `labels` or `predictions`\n is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if predictions is None:\n raise ValueError(\"predictions must not be None.\")\n with ops.name_scope(scope, \"log_loss\",\n (predictions, labels, weights)) as scope:\n predictions = math_ops.to_float(predictions)\n labels = math_ops.to_float(labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n losses = -math_ops.multiply(\n labels,\n math_ops.log(predictions + epsilon)) - math_ops.multiply(\n (1 - labels), math_ops.log(1 - predictions + epsilon))\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n# TODO(b/37208492): Add reduction arg.\n@tf_export(\"losses.mean_pairwise_squared_error\")\ndef mean_pairwise_squared_error(\n labels, predictions, weights=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES):\n \"\"\"Adds a pairwise-errors-squared loss to the training procedure.\n\n Unlike `mean_squared_error`, which is a measure of the differences between\n corresponding elements of `predictions` and `labels`,\n `mean_pairwise_squared_error` is a measure of the differences between pairs of\n corresponding elements of `predictions` and `labels`.\n\n For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are\n three pairs of differences are summed to compute the loss:\n loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3\n\n Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the\n corresponding pairs are computed within each batch sample but not across\n samples within a batch. For example, if `predictions` represents a batch of\n 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs\n is drawn from each image, but not across images.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided, then\n the loss is simply scaled by the given value. If `weights` is a tensor of size\n `[batch_size]`, then the total loss for each sample of the batch is rescaled\n by the corresponding element in the `weights` vector.\n\n Args:\n labels: The ground truth output tensor, whose shape must match the shape of\n `predictions`.\n predictions: The predicted outputs, a tensor of size\n `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in\n `predictions`.\n weights: Coefficients for the loss a scalar, a tensor of shape\n `[batch_size]` or a tensor whose shape matches `predictions`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n\n Returns:\n A scalar `Tensor` that returns the weighted loss.\n\n Raises:\n ValueError: If the shape of `predictions` doesn't match that of `labels` or\n if the shape of `weights` is invalid. Also if `labels` or `predictions`\n is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if predictions is None:\n raise ValueError(\"predictions must not be None.\")\n with ops.name_scope(scope, \"mean_pairwise_squared_error\",\n (predictions, labels, weights)) as scope:\n weights = math_ops.to_float(weights)\n labels = math_ops.to_float(labels)\n with ops.control_dependencies((\n weights_broadcast_ops.assert_broadcastable(weights, labels),)):\n predictions = math_ops.to_float(predictions)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n diffs = math_ops.subtract(predictions, labels)\n\n reduction_indices = math_ops.range(1, array_ops.rank(diffs))\n\n sum_squares_diff_per_batch = math_ops.reduce_sum(\n math_ops.square(diffs),\n reduction_indices=reduction_indices,\n keepdims=True)\n num_present_per_batch = _num_present(diffs, weights, per_batch=True)\n\n term1 = 2.0 * _safe_div(\n sum_squares_diff_per_batch,\n math_ops.maximum(num_present_per_batch - 1, 0))\n\n sum_diff = math_ops.reduce_sum(\n diffs, reduction_indices=reduction_indices, keepdims=True)\n term2 = 2.0 * _safe_div(\n math_ops.square(sum_diff),\n math_ops.maximum(\n math_ops.multiply(num_present_per_batch,\n num_present_per_batch - 1),\n 0))\n\n weighted_losses = math_ops.multiply(term1 - term2, weights)\n loss = math_ops.reduce_sum(weighted_losses)\n\n mean_loss = array_ops.where(\n math_ops.reduce_sum(num_present_per_batch) > 0,\n loss,\n array_ops.zeros_like(loss),\n name=\"value\")\n util.add_loss(mean_loss, loss_collection)\n return mean_loss\n\n\n@tf_export(\"losses.mean_squared_error\")\ndef mean_squared_error(\n labels, predictions, weights=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Adds a Sum-of-Squares loss to the training procedure.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided, then\n the loss is simply scaled by the given value. If `weights` is a tensor of size\n `[batch_size]`, then the total loss for each sample of the batch is rescaled\n by the corresponding element in the `weights` vector. If the shape of\n `weights` matches the shape of `predictions`, then the loss of each\n measurable element of `predictions` is scaled by the corresponding value of\n `weights`.\n\n Args:\n labels: The ground truth output tensor, same dimensions as 'predictions'.\n predictions: The predicted outputs.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `predictions` doesn't match that of `labels` or\n if the shape of `weights` is invalid. Also if `labels` or `predictions`\n is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if predictions is None:\n raise ValueError(\"predictions must not be None.\")\n with ops.name_scope(scope, \"mean_squared_error\",\n (predictions, labels, weights)) as scope:\n predictions = math_ops.to_float(predictions)\n labels = math_ops.to_float(labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n losses = math_ops.squared_difference(predictions, labels)\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n@tf_export(\"losses.sigmoid_cross_entropy\")\ndef sigmoid_cross_entropy(\n multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided,\n then the loss is simply scaled by the given value. If `weights` is a\n tensor of shape `[batch_size]`, then the loss weights apply to each\n corresponding sample.\n\n If `label_smoothing` is nonzero, smooth the labels towards 1/2:\n\n new_multiclass_labels = multiclass_labels * (1 - label_smoothing)\n + 0.5 * label_smoothing\n\n Args:\n multi_class_labels: `[batch_size, num_classes]` target integer labels in\n `{0, 1}`.\n logits: Float `[batch_size, num_classes]` logits outputs of the network.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n label_smoothing: If greater than `0` then smooth the labels.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss `Tensor` of the same type as `logits`. If `reduction` is\n `NONE`, this has the same shape as `logits`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `logits` doesn't match that of\n `multi_class_labels` or if the shape of `weights` is invalid, or if\n `weights` is None. Also if `multi_class_labels` or `logits` is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if multi_class_labels is None:\n raise ValueError(\"multi_class_labels must not be None.\")\n if logits is None:\n raise ValueError(\"logits must not be None.\")\n with ops.name_scope(scope, \"sigmoid_cross_entropy_loss\",\n (logits, multi_class_labels, weights)) as scope:\n logits = ops.convert_to_tensor(logits)\n multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)\n logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())\n\n if label_smoothing > 0:\n multi_class_labels = (multi_class_labels * (1 - label_smoothing) +\n 0.5 * label_smoothing)\n\n losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,\n logits=logits,\n name=\"xentropy\")\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n@tf_export(\"losses.softmax_cross_entropy\")\ndef softmax_cross_entropy(\n onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided,\n then the loss is simply scaled by the given value. If `weights` is a\n tensor of shape `[batch_size]`, then the loss weights apply to each\n corresponding sample.\n\n If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:\n new_onehot_labels = onehot_labels * (1 - label_smoothing)\n + label_smoothing / num_classes\n\n Note that `onehot_labels` and `logits` must have the same shape,\n e.g. `[batch_size, num_classes]`. The shape of `weights` must be\n broadcastable to loss, whose shape is decided by the shape of `logits`.\n In case the shape of `logits` is `[batch_size, num_classes]`, loss is\n a `Tensor` of shape `[batch_size]`.\n\n Args:\n onehot_labels: One-hot-encoded labels.\n logits: Logits outputs of the network.\n weights: Optional `Tensor` that is broadcastable to loss.\n label_smoothing: If greater than 0 then smooth the labels.\n scope: the scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss `Tensor` of the same type as `logits`. If `reduction` is\n `NONE`, this has shape `[batch_size]`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `logits` doesn't match that of `onehot_labels`\n or if the shape of `weights` is invalid or if `weights` is None. Also if\n `onehot_labels` or `logits` is None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if onehot_labels is None:\n raise ValueError(\"onehot_labels must not be None.\")\n if logits is None:\n raise ValueError(\"logits must not be None.\")\n with ops.name_scope(scope, \"softmax_cross_entropy_loss\",\n (logits, onehot_labels, weights)) as scope:\n logits = ops.convert_to_tensor(logits)\n onehot_labels = math_ops.cast(onehot_labels, logits.dtype)\n logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())\n\n if label_smoothing > 0:\n num_classes = math_ops.cast(\n array_ops.shape(onehot_labels)[1], logits.dtype)\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n onehot_labels = onehot_labels * smooth_positives + smooth_negatives\n\n onehot_labels = array_ops.stop_gradient(\n onehot_labels, name=\"labels_stop_gradient\")\n losses = nn.softmax_cross_entropy_with_logits_v2(\n labels=onehot_labels, logits=logits, name=\"xentropy\")\n\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n\n\n# TODO(ptucker): Merge this with similar method in metrics_impl.\ndef _remove_squeezable_dimensions(\n labels, predictions, weights=None, expected_rank_diff=0):\n \"\"\"Internal version of _remove_squeezable_dimensions which handles weights.\n\n Squeezes `predictions` and `labels` if their ranks differ from expected by\n exactly 1.\n Squeezes `weights` if its rank is 1 more than the new rank of `predictions`\n\n This will use static shape if available. Otherwise, it will add graph\n operations, which could result in a performance hit.\n\n Args:\n labels: Label values, a `Tensor` whose dimensions match `predictions`.\n predictions: Predicted values, a `Tensor` of arbitrary dimensions.\n weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,\n and its rank is 1 more than the new rank of `labels`.\n expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.\n\n Returns:\n Tuple of `predictions`, `labels` and `weights`, possibly with the last\n dimension squeezed.\n \"\"\"\n labels, predictions = confusion_matrix.remove_squeezable_dimensions(\n labels, predictions, expected_rank_diff=expected_rank_diff)\n\n if weights is not None:\n weights = ops.convert_to_tensor(weights)\n labels_rank = labels.get_shape().ndims\n weights_shape = weights.get_shape()\n weights_rank = weights_shape.ndims\n\n if (labels_rank is not None) and (weights_rank is not None):\n # Use static rank.\n rank_diff = weights_rank - labels_rank\n if rank_diff == 1:\n weights = array_ops.squeeze(weights, [-1])\n return labels, predictions, weights\n\n # Use dynamic rank.\n rank_diff = array_ops.rank(weights) - array_ops.rank(labels)\n if (weights_rank is None) or (\n weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):\n weights = control_flow_ops.cond(\n math_ops.equal(1, rank_diff),\n lambda: array_ops.squeeze(weights, [-1]),\n lambda: weights)\n\n return labels, predictions, weights\n\n\n@tf_export(\"losses.sparse_softmax_cross_entropy\")\ndef sparse_softmax_cross_entropy(\n labels, logits, weights=1.0, scope=None,\n loss_collection=ops.GraphKeys.LOSSES,\n reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n \"\"\"Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.\n\n `weights` acts as a coefficient for the loss. If a scalar is provided,\n then the loss is simply scaled by the given value. If `weights` is a\n tensor of shape `[batch_size]`, then the loss weights apply to each\n corresponding sample.\n\n Args:\n labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of\n `labels` and result) and dtype `int32` or `int64`. Each entry in `labels`\n must be an index in `[0, num_classes)`. Other values will raise an\n exception when this op is run on CPU, and return `NaN` for corresponding\n loss and gradient rows on GPU.\n logits: Unscaled log probabilities of shape\n `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or\n `float64`.\n weights: Coefficients for the loss. This must be scalar or broadcastable to\n `labels` (i.e. same rank and each dimension is either 1 or the same).\n scope: the scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\n Returns:\n Weighted loss `Tensor` of the same type as `logits`. If `reduction` is\n `NONE`, this has the same shape as `labels`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shapes of `logits`, `labels`, and `weights` are\n incompatible, or if any of them are None.\n\n @compatibility(eager)\n The `loss_collection` argument is ignored when executing eagerly. Consider\n holding on to the return value or collecting losses via a `tf.keras.Model`.\n @end_compatibility\n \"\"\"\n if labels is None:\n raise ValueError(\"labels must not be None.\")\n if logits is None:\n raise ValueError(\"logits must not be None.\")\n with ops.name_scope(scope, \"sparse_softmax_cross_entropy_loss\",\n (logits, labels, weights)) as scope:\n # As documented above in Args, labels contain class IDs and logits contains\n # 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;\n # therefore, expected_rank_diff=1.\n labels, logits, weights = _remove_squeezable_dimensions(\n labels, logits, weights, expected_rank_diff=1)\n losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,\n logits=logits,\n name=\"xentropy\")\n return compute_weighted_loss(\n losses, weights, scope, loss_collection, reduction=reduction)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.tf.scatter.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef _AsType(v, vtype):\n return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)\n\n\ndef _NumpyAdd(ref, indices, updates):\n # Since numpy advanced assignment does not support repeated indices,\n # we run a simple loop to perform scatter_add.\n for i, indx in np.ndenumerate(indices):\n ref[indx] += updates[i]\n\n\ndef _NumpyAddScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] += update\n\n\ndef _NumpySub(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n ref[indx] -= updates[i]\n\n\ndef _NumpySubScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] -= update\n\n\ndef _NumpyMul(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n ref[indx] *= updates[i]\n\n\ndef _NumpyMulScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] *= update\n\n\ndef _NumpyDiv(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n ref[indx] /= updates[i]\n\n\ndef _NumpyDivScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] /= update\n\n\ndef _NumpyMin(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n ref[indx] = np.minimum(ref[indx], updates[i])\n\n\ndef _NumpyMinScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] = np.minimum(ref[indx], update)\n\n\ndef _NumpyMax(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n ref[indx] = np.maximum(ref[indx], updates[i])\n\n\ndef _NumpyMaxScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] = np.maximum(ref[indx], update)\n\n\ndef _NumpyUpdate(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n ref[indx] = updates[i]\n\n\ndef _NumpyUpdateScalar(ref, indices, update):\n for _, indx in np.ndenumerate(indices):\n ref[indx] = update\n\n\n_TF_OPS_TO_NUMPY = {\n state_ops.scatter_update: _NumpyUpdate,\n state_ops.scatter_add: _NumpyAdd,\n state_ops.scatter_sub: _NumpySub,\n state_ops.scatter_mul: _NumpyMul,\n state_ops.scatter_div: _NumpyDiv,\n state_ops.scatter_min: _NumpyMin,\n state_ops.scatter_max: _NumpyMax,\n}\n\n_TF_OPS_TO_NUMPY_SCALAR = {\n state_ops.scatter_update: _NumpyUpdateScalar,\n state_ops.scatter_add: _NumpyAddScalar,\n state_ops.scatter_sub: _NumpySubScalar,\n state_ops.scatter_mul: _NumpyMulScalar,\n state_ops.scatter_div: _NumpyDivScalar,\n state_ops.scatter_min: _NumpyMinScalar,\n state_ops.scatter_max: _NumpyMaxScalar,\n}\n\n\nclass ScatterTest(test.TestCase):\n\n def _VariableRankTest(self,\n tf_scatter,\n vtype,\n itype,\n repeat_indices=False,\n updates_are_scalar=False):\n np.random.seed(8)\n with self.cached_session(use_gpu=True):\n for indices_shape in (), (2,), (3, 7), (3, 4, 7):\n for extra_shape in (), (5,), (5, 9):\n # Generate random indices with no duplicates for easy numpy comparison\n size = np.prod(indices_shape, dtype=itype)\n first_dim = 3 * size\n indices = np.arange(first_dim)\n np.random.shuffle(indices)\n indices = indices[:size]\n if size > 1 and repeat_indices:\n # Add some random repeats.\n indices = indices[:size // 2]\n for _ in range(size - size // 2):\n # Randomly append some repeats.\n indices = np.append(indices,\n indices[np.random.randint(size // 2)])\n np.random.shuffle(indices)\n indices = indices.reshape(indices_shape)\n if updates_are_scalar:\n updates = _AsType(np.random.randn(), vtype)\n else:\n updates = _AsType(\n np.random.randn(*(indices_shape + extra_shape)), vtype)\n\n # Clips small values to avoid division by zero.\n def clip_small_values(x):\n threshold = 1e-4\n sign = np.sign(x)\n\n if isinstance(x, np.int32):\n threshold = 1\n sign = np.random.choice([-1, 1])\n return threshold * sign if np.abs(x) < threshold else x\n\n updates = np.vectorize(clip_small_values)(updates)\n old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)\n\n # Scatter via numpy\n new = old.copy()\n if updates_are_scalar:\n np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]\n else:\n np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]\n np_scatter(new, indices, updates)\n # Scatter via tensorflow\n ref = variables.VariableV1(old)\n ref.initializer.run()\n tf_scatter(ref, indices, updates).eval()\n self.assertAllClose(ref.eval(), new)\n\n def _VariableRankTests(self,\n tf_scatter,\n repeat_indices=False,\n updates_are_scalar=False):\n vtypes = [np.float32, np.float64]\n if tf_scatter != state_ops.scatter_div:\n vtypes.append(np.int32)\n\n for vtype in vtypes:\n for itype in (np.int32, np.int64):\n self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,\n updates_are_scalar)\n\n def testVariableRankUpdate(self):\n self._VariableRankTests(state_ops.scatter_update, False)\n\n def testVariableRankAdd(self):\n self._VariableRankTests(state_ops.scatter_add, False)\n\n def testVariableRankSub(self):\n self._VariableRankTests(state_ops.scatter_sub, False)\n\n def testVariableRankMul(self):\n self._VariableRankTests(state_ops.scatter_mul, False)\n\n def testVariableRankDiv(self):\n self._VariableRankTests(state_ops.scatter_div, False)\n\n def testVariableRankMin(self):\n self._VariableRankTests(state_ops.scatter_min, False)\n\n def testVariableRankMax(self):\n self._VariableRankTests(state_ops.scatter_max, False)\n\n def testRepeatIndicesAdd(self):\n self._VariableRankTests(state_ops.scatter_add, True)\n\n def testRepeatIndicesSub(self):\n self._VariableRankTests(state_ops.scatter_sub, True)\n\n def testRepeatIndicesMul(self):\n self._VariableRankTests(state_ops.scatter_mul, True)\n\n def testRepeatIndicesDiv(self):\n self._VariableRankTests(state_ops.scatter_div, True)\n\n def testRepeatIndicesMin(self):\n self._VariableRankTests(state_ops.scatter_min, True)\n\n def testRepeatIndicesMax(self):\n self._VariableRankTests(state_ops.scatter_max, True)\n\n def testVariableRankUpdateScalar(self):\n self._VariableRankTests(state_ops.scatter_update, False, True)\n\n def testVariableRankAddScalar(self):\n self._VariableRankTests(state_ops.scatter_add, False, True)\n\n def testVariableRankSubScalar(self):\n self._VariableRankTests(state_ops.scatter_sub, False, True)\n\n def testVariableRankMulScalar(self):\n self._VariableRankTests(state_ops.scatter_mul, False, True)\n\n def testVariableRankDivScalar(self):\n self._VariableRankTests(state_ops.scatter_div, False, True)\n\n def testVariableRankMinScalar(self):\n self._VariableRankTests(state_ops.scatter_min, False, True)\n\n def testVariableRankMaxScalar(self):\n self._VariableRankTests(state_ops.scatter_max, False, True)\n\n def testRepeatIndicesAddScalar(self):\n self._VariableRankTests(state_ops.scatter_add, True, True)\n\n def testRepeatIndicesSubScalar(self):\n self._VariableRankTests(state_ops.scatter_sub, True, True)\n\n def testRepeatIndicesMulScalar(self):\n self._VariableRankTests(state_ops.scatter_mul, True, True)\n\n def testRepeatIndicesDivScalar(self):\n self._VariableRankTests(state_ops.scatter_div, True, True)\n\n def testRepeatIndicesMinScalar(self):\n self._VariableRankTests(state_ops.scatter_min, True, True)\n\n def testRepeatIndicesMaxScalar(self):\n self._VariableRankTests(state_ops.scatter_max, True, True)\n\n def testBooleanScatterUpdate(self):\n if not test.is_gpu_available():\n with self.session(use_gpu=False) as session:\n var = variables.Variable([True, False])\n update0 = state_ops.scatter_update(var, 1, True)\n update1 = state_ops.scatter_update(\n var, constant_op.constant(\n 0, dtype=dtypes.int64), False)\n var.initializer.run()\n\n session.run([update0, update1])\n\n self.assertAllEqual([False, True], self.evaluate(var))\n\n def testScatterOutOfRangeCpu(self):\n for op, _ in _TF_OPS_TO_NUMPY.items():\n params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n updates = np.array([-3, -4, -5]).astype(np.float32)\n if not test.is_gpu_available():\n with self.session(use_gpu=False):\n ref = variables.VariableV1(params)\n ref.initializer.run()\n\n # Indices all in range, no problem.\n indices = np.array([2, 0, 5])\n op(ref, indices, updates).eval()\n\n # Test some out of range errors.\n indices = np.array([-1, 0, 5])\n with self.assertRaisesOpError(\n r'indices\\[0\\] = -1 is not in \\[0, 6\\)'):\n op(ref, indices, updates).eval()\n\n indices = np.array([2, 0, 6])\n with self.assertRaisesOpError(r'indices\\[2\\] = 6 is not in \\[0, 6\\)'):\n op(ref, indices, updates).eval()\n\n # TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.\n def _disabledTestScatterOutOfRangeGpu(self):\n if test.is_gpu_available():\n return\n for op, _ in _TF_OPS_TO_NUMPY.items():\n params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n updates = np.array([-3, -4, -5]).astype(np.float32)\n # With GPU, the code ignores indices that are out of range.\n # We don't test the implementation; just test there's no failures.\n with self.cached_session(force_gpu=True):\n ref = variables.Variable(params)\n ref.initializer.run()\n\n # Indices all in range, no problem.\n indices = np.array([2, 0, 5])\n op(ref, indices, updates).eval()\n\n # Indicies out of range should not fail.\n indices = np.array([-1, 0, 5])\n op(ref, indices, updates).eval()\n indices = np.array([2, 0, 6])\n op(ref, indices, updates).eval()\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Adam for TensorFlow.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.training import training_ops\n\n\nclass Adam(optimizer_v2.OptimizerV2):\n \"\"\"Optimizer that implements the Adam algorithm.\n\n Adam optimization is a stochastic gradient descent method that is based on\n adaptive estimation of first-order and second-order moments. According to the\n reference, the method is 'computationally efficient, has little memory\n requirement, invariant to diagonal rescaling of gradients, and is well suited\n for problems that are large in terms of data/parameters'.\n\n # References\n See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)\n ([pdf](http://arxiv.org/pdf/1412.6980.pdf)).\n \"\"\"\n\n def __init__(self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-8,\n name='Adam'):\n r\"\"\"Construct a new Adam optimizer.\n\n Initialization:\n\n $$m_0 := 0 \\text{(Initialize initial 1st moment vector)}$$\n $$v_0 := 0 \\text{(Initialize initial 2nd moment vector)}$$\n $$t := 0 \\text{(Initialize timestep)}$$\n\n The update rule for `variable` with gradient `g` uses an optimization\n described at the end of section2 of the paper:\n\n $$t := t + 1$$\n $$lr_t := \\text{learning\\_rate} * \\sqrt{1 - beta_2^t} / (1 - beta_1^t)$$\n\n $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$\n $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$\n $$variable := variable - lr_t * m_t / (\\sqrt{v_t} + \\epsilon)$$\n\n The default value of 1e-8 for epsilon might not be a good default in\n general. For example, when training an Inception network on ImageNet a\n current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the\n formulation just before Section 2.1 of the Kingma and Ba paper rather than\n the formulation in Algorithm 1, the \"epsilon\" referred to here is \"epsilon\n hat\" in the paper.\n\n The sparse implementation of this algorithm (used when the gradient is an\n IndexedSlices object, typically because of `tf.gather` or an embedding\n lookup in the forward pass) does apply momentum to variable slices even if\n they were not used in the forward pass (meaning they have a gradient equal\n to zero). Momentum decay (beta1) is also applied to the entire momentum\n accumulator. This means that the sparse behavior is equivalent to the dense\n behavior (in contrast to some momentum implementations which ignore momentum\n unless a variable slice was actually used).\n\n Args:\n learning_rate: A Tensor or a floating point value. The learning rate.\n beta_1: A float value or a constant float tensor. The exponential decay\n rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor. The exponential decay\n rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability. This epsilon is\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper.\n name: Optional name for the operations created when applying gradients.\n Defaults to \"Adam\". @compatibility(eager) When eager execution is\n enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be\n a callable that takes no arguments and returns the actual value to use.\n This can be useful for changing these values across different\n invocations of optimizer functions. @end_compatibility\n \"\"\"\n\n super(Adam, self).__init__(name)\n self._set_hyper('learning_rate', learning_rate)\n self._set_hyper('beta_1', beta_1)\n self._set_hyper('beta_2', beta_2)\n self._set_hyper('epsilon', epsilon)\n\n def _create_slots(self, var_list):\n # Create slots for the first and second moments.\n for var in var_list:\n self.add_slot(var, 'm')\n self.add_slot(var, 'v')\n\n def _resource_apply_dense(self, grad, var):\n grad_dtype = grad.dtype.base_dtype\n m = self.get_slot(var, 'm')\n v = self.get_slot(var, 'v')\n local_step = math_ops.cast(self.iterations + 1, grad_dtype)\n beta_1_t = math_ops.cast(self._get_hyper('beta_1'), grad_dtype)\n beta_2_t = math_ops.cast(self._get_hyper('beta_2'), grad_dtype)\n beta_1_power = math_ops.pow(beta_1_t, local_step)\n beta_2_power = math_ops.pow(beta_2_t, local_step)\n return training_ops.resource_apply_adam(\n var.handle,\n m.handle,\n v.handle,\n beta_1_power,\n beta_2_power,\n math_ops.cast(self._get_hyper('learning_rate'), grad_dtype),\n beta_1_t,\n beta_2_t,\n math_ops.cast(self._get_hyper('epsilon'), grad_dtype),\n grad,\n use_locking=self._use_locking)\n\n def _resource_apply_sparse(self, grad, var, indices):\n var_dtype = var.dtype.base_dtype\n local_step = math_ops.cast(self.iterations + 1, var_dtype)\n beta_1_t = math_ops.cast(self._get_hyper('beta_1'), var_dtype)\n beta_2_t = math_ops.cast(self._get_hyper('beta_2'), var_dtype)\n beta_1_power = math_ops.pow(beta_1_t, local_step)\n beta_2_power = math_ops.pow(beta_2_t, local_step)\n lr_t = math_ops.cast(self._get_hyper('learning_rate'), var_dtype)\n epsilon_t = math_ops.cast(self._get_hyper('epsilon'), var_dtype)\n lr = (lr_t * math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power))\n\n # m_t = beta1 * m + (1 - beta1) * g_t\n m = self.get_slot(var, 'm')\n m_scaled_g_values = grad * (1 - beta_1_t)\n m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking)\n with ops.control_dependencies([m_t]):\n m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)\n\n # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)\n v = self.get_slot(var, 'v')\n v_scaled_g_values = (grad * grad) * (1 - beta_2_t)\n v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)\n with ops.control_dependencies([v_t]):\n v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)\n\n v_sqrt = math_ops.sqrt(v_t)\n var_update = state_ops.assign_sub(\n var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)\n return control_flow_ops.group(*[var_update, m_t, v_t])\n\n def _resource_scatter_add(self, x, i, v):\n with ops.control_dependencies(\n [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\n return x.value()\n\n def get_config(self):\n config = super(Adam, self).get_config()\n config.update({\n 'learning_rate': self._serialize_hyperparameter('learning_rate'),\n 'beta_1': self._serialize_hyperparameter('beta_1'),\n 'beta_2': self._serialize_hyperparameter('beta_2'),\n 'epsilon': self._serialize_hyperparameter('epsilon'),\n })\n return config\n"
] | [
[
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.distribute.cross_device_ops.MultiWorkerAllReduce",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.distribute.cross_device_ops.choose_the_best",
"tensorflow.python.framework.device.DeviceSpec",
"tensorflow.python.eager.tape.stop_recording",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.distribute.values.update_regroup",
"tensorflow.python.distribute.cross_device_ops.check_destinations",
"tensorflow.python.training.distribute.UpdateContext",
"tensorflow.python.distribute.cross_device_ops.get_devices_from",
"tensorflow.python.distribute.shared_variable_creator.make_fn",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.distribute.values.InputFunctionIterator",
"tensorflow.python.distribute.cross_device_ops.ReductionToOneDeviceCrossDeviceOps",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.distribute.values.DatasetIterator",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.training.device_util.canonicalize",
"tensorflow.python.distribute.values.MultiStepContext",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.training.coordinator.Coordinator",
"tensorflow.python.distribute.values.MirroredVariable",
"tensorflow.python.distribute.values.regroup",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.distribute.values.Mirrored",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.distribute.values.select_device_mirrored",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.cross_device_ops.AllReduceCrossDeviceOps",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.distribute.values.value_container",
"tensorflow.python.training.distribute.InputContext",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.training.distribute.require_replica_context",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.training.device_util.resolve",
"tensorflow.python.distribute.values.ReplicaLocalVariable",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.distribute.values.select_device",
"tensorflow.python.distribute.cross_device_ops.validate_destinations",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.array_ops.sequence_mask",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.ops.control_flow_ops.Assert",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.cumprod",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.ops.ragged.ragged_factory_ops.from_nested_row_splits",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.broadcast_to",
"tensorflow.python.ops.gen_ragged_conversion_ops.ragged_tensor_to_sparse",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.ragged.ragged_util.convert_to_int_tensor",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.ops.ragged.ragged_factory_ops.convert_to_tensor_or_ragged_tensor",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.ragged.ragged_factory_ops.from_row_splits",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.ragged.ragged_factory_ops.from_value_rowids",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.ragged.from_sparse",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.ops.losses.util.add_loss",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.compat.compat.forward_compatible",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.stop_gradient",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.div_no_nan",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.weights_broadcast_ops.broadcast_weights",
"tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.math_ops.squared_difference",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.weights_broadcast_ops.assert_broadcastable",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.confusion_matrix.remove_squeezable_dimensions",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.deprecation.deprecated_argument_lookup"
],
[
"numpy.minimum",
"tensorflow.python.ops.variables.Variable",
"numpy.random.randn",
"numpy.random.randint",
"numpy.arange",
"tensorflow.python.platform.test.main",
"numpy.random.choice",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.variables.VariableV1",
"numpy.ndenumerate",
"numpy.array",
"numpy.maximum",
"numpy.abs",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.sign",
"numpy.vectorize",
"numpy.prod",
"tensorflow.python.ops.state_ops.scatter_update",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_add",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
joeybose/Adversarial-Example-Games | [
"4219137e5263cd7de86687ed74cc1cef7497bb78",
"4219137e5263cd7de86687ed74cc1cef7497bb78",
"4219137e5263cd7de86687ed74cc1cef7497bb78",
"4219137e5263cd7de86687ed74cc1cef7497bb78",
"4219137e5263cd7de86687ed74cc1cef7497bb78"
] | [
"attacks/wilcoxon.py",
"flows/flows.py",
"attacks/momentum_iterative_attack.py",
"cnn_models/lenet.py",
"attacks/diverse_input_attack.py"
] | [
"import numpy as np\nimport ipdb\nfrom scipy.stats import wilcoxon, ttest_rel\n\n# MNIST\nmi_attack = [90.000000, 87.575768, 81.515160, 90.909088, 84.848480, 88.787872,\n 89.090904]\ndi_attack = [90.606056, 90.000000, 85.454552, 91.818176, 88.484856, 89.696968,\n 0.606071]\ntid_attack = [90.000000, 83.939400, 84.545456, 86.666664, 83.333336, 83.333336,\n 86.060608]\naeg_mnist = [88.095, 91.071, 88.690, 89.881, 85.714, 91.071, 91.667]\n\nw_mi, p_mi = wilcoxon(mi_attack, aeg_mnist, alternative='less', zero_method='zsplit')\nprint(\"MNIST-- MI-Attack vs. AEG: W: %f , P: %f\" %(w_mi, p_mi))\n\nw_di, p_di = wilcoxon(di_attack, aeg_mnist, alternative='less', zero_method='zsplit')\nprint(\"MNIST-- DI-Attack vs. AEG: W: %f , P: %f\" %(w_di, p_di))\n\nw_tid, p_tid = wilcoxon(tid_attack, aeg_mnist, alternative='less', zero_method='zsplit')\nprint(\"MNIST-- TID-Attack vs. AEG: W: %f , P: %f\" %(w_tid, p_tid))\n\n# CIFAR\nc_mi_attack = [48.176666, 60.848335, 57.434998, 49.005005, 64.980003,\n 60.071667]\nc_di_attack = [83.571671, 85.126671, 84.953331, 79.344994, 83.279999, 87.748329]\nc_tid_attack = [8.991667, 8.716668, 9.298335, 9.150001, 9.185000, 9.225000]\nc_sgm_attack = [55.240002, 63.230000, 58.849995, 49.519997, 66.979996,\n 68.919998]\naeg_cifar = [87.51, 87.353, 87.197, 86.761, 86.683, 86.529]\n\nc_w_mi, c_p_mi = wilcoxon(c_mi_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- MI-Attack vs. AEG: W: %f , P: %f\" %(c_w_mi, c_p_mi))\n\nc_w_di, c_p_di = wilcoxon(c_di_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- DI-Attack vs. AEG: W: %f , P: %f\" %(c_w_di, c_p_di))\n\nc_w_tid, c_p_tid = wilcoxon(c_tid_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- TID-Attack vs. AEG: W: %f , P: %f\" %(c_w_tid, c_p_tid))\n\nc_w_sgm, c_p_sgm = wilcoxon(c_sgm_attack, aeg_cifar, alternative='less', zero_method='zsplit')\nprint(\"CIFAR-- SGM-Attack vs. AEG: W: %f , P: %f\" %(c_w_sgm, c_p_sgm))\n\n# T Test- MNIST\nw_mi, p_mi = ttest_rel(mi_attack, aeg_mnist)\nprint(\"T-Test MNIST-- MI-Attack vs. AEG: W: %f , P: %f\" %(w_mi, p_mi))\n\nw_di, p_di = ttest_rel(di_attack, aeg_mnist)\nprint(\"T-Test MNIST-- DI-Attack vs. AEG: W: %f , P: %f\" %(w_di, p_di))\n\nw_tid, p_tid = ttest_rel(tid_attack, aeg_mnist)\nprint(\"T-Test MNIST-- TID-Attack vs. AEG: W: %f , P: %f\" %(w_tid, p_tid))\n\n# T Test- CIFAR\nc_w_mi, c_p_mi = ttest_rel(c_mi_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- MI-Attack vs. AEG: W: %f , P: %f\" %(c_w_mi, c_p_mi))\n\nc_w_di, c_p_di = ttest_rel(c_di_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- DI-Attack vs. AEG: W: %f , P: %f\" %(c_w_di, c_p_di))\n\nc_w_tid, c_p_tid = ttest_rel(c_tid_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- TID-Attack vs. AEG: W: %f , P: %f\" %(c_w_tid, c_p_tid))\n\nc_w_sgm, c_p_sgm = ttest_rel(c_sgm_attack, aeg_cifar)\nprint(\"T-Test CIFAR-- SGM-Attack vs. AEG: W: %f , P: %f\" %(c_w_sgm, c_p_sgm))\n",
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nfrom torch import distributions\nfrom torch.nn.parameter import Parameter\nfrom torch_geometric.nn import GCNConv, GATConv\n\nfrom sklearn import cluster, datasets, mixture\nfrom sklearn.preprocessing import StandardScaler\nfrom flows.flow_helpers import *\nfrom utils.math_ops import clamp, expand_proj_dims, logsinh, e_i\nfrom utils.hyperbolics import inverse_parallel_transport_mu0, parallel_transport_mu0,inverse_sample_projection_mu0,exp_map, inverse_exp_map\nfrom utils.hyperbolics import proj_vec_to_tang, proj_vec, lorentz_norm,inverse_exp_map_mu0, exp_map_mu0, _logdet, logmap_logdet\nfrom distributions.normal import EuclideanNormal\nfrom distributions.wrapped_normal import HyperboloidWrappedNormal\n\n\n#Reference: https://github.com/ritheshkumar95/pytorch-normalizing-flows/blob/master/modules.py\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\nepsilon = 1e-6\nmax_clamp_norm = 40\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef weights_init_(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n torch.nn.init.xavier_uniform_(m.weight, gain=1)\n torch.nn.init.constant_(m.bias, 0)\n\n# All code below this line is taken from\n# https://github.com/kamenbliznashki/normalizing_flows/blob/master/maf.py\n\nclass FlowSequential(nn.Sequential):\n \"\"\" Container for layers of a normalizing flow \"\"\"\n def forward(self, x, y):\n sum_log_abs_det_jacobians = 0\n i = len(self)\n for module in self:\n x, log_abs_det_jacobian = module(x, y)\n sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian\n i -= 1\n return x, sum_log_abs_det_jacobians\n\n def inverse(self, u, y):\n i = 0\n sum_log_abs_det_jacobians = 0\n for module in reversed(self):\n u, log_abs_det_jacobian = module.inverse(u, y)\n sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian\n i += 1\n return u, sum_log_abs_det_jacobians\n\n# --------------------\n# Models\n# --------------------\n\nclass MAFRealNVP(nn.Module):\n def __init__(self, n_blocks, input_size, hidden_size, n_hidden,\n radius=torch.Tensor([0]), cond_label_size=None, batch_norm=False):\n super().__init__()\n\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.zeros(input_size))\n self.register_buffer('base_dist_var', torch.ones(input_size))\n self.p_z = EuclideanNormal\n self.radius = radius\n\n # construct model\n modules = []\n mask = torch.arange(input_size).float() % 2\n for i in range(n_blocks):\n modules += [LinearMaskedCoupling(input_size, hidden_size, n_hidden, mask, cond_label_size)]\n mask = 1 - mask\n # modules += batch_norm * [BatchNorm(input_size)]\n\n self.net = FlowSequential(*modules)\n\n @property\n def base_dist(self):\n return D.Normal(self.base_dist_mean, self.base_dist_var)\n\n def forward(self, x, y=None):\n return self.net(x, y)\n\n def inverse(self, u, y=None):\n return self.net.inverse(u, y)\n\n def log_prob(self, x, y=None):\n u, sum_log_abs_det_jacobians = self.forward(x, y)\n return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)\n\n## Taken from: https://github.com/senya-ashukha/real-nvp-pytorch/blob/master/real-nvp-pytorch.ipynb\nclass RealNVP(nn.Module):\n def __init__(self, n_blocks, input_size, hidden_size, n_hidden,\n layer_type='Linear', radius=torch.Tensor([0])):\n super(RealNVP, self).__init__()\n mask = torch.arange(input_size).float() % 2\n self.n_blocks = n_blocks\n self.n_hidden = n_hidden\n self.radius = radius\n self.layer_type = layer_type\n self.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n i_mask = 1 - mask\n mask = torch.stack([mask,i_mask]).repeat(int(n_blocks/2),1)\n self.p_z = EuclideanNormal\n self.s, self.t = create_real_nvp_blocks(input_size, hidden_size,\n n_blocks, n_hidden, layer_type)\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.zeros(input_size))\n self.register_buffer('base_dist_var', torch.ones(input_size))\n self.mask = nn.Parameter(mask, requires_grad=False)\n\n def inverse(self, z, edge_index=None):\n log_det_J, x = z.new_zeros(z.shape[0]), z\n for i in range(0,self.n_blocks):\n x_ = x*self.mask[i]\n if self.layer_type != 'Linear':\n s = self.s[i](x_, edge_index)\n t = self.t[i](x_, edge_index)\n else:\n s = self.s[i](x_)\n t = self.t[i](x_)\n x = x_ + (1 - self.mask[i]) * (x * torch.exp(s) + t)\n log_det_J += ((1-self.mask[i])*s).sum(dim=1) # log det dx/du\n return x, log_det_J\n\n def forward(self, x, edge_index=None):\n log_det_J, z = x.new_zeros(x.shape[0]), x\n for i in reversed(range(0,self.n_blocks)):\n z_ = self.mask[i] * z\n if self.layer_type != 'Linear':\n s = self.s[i](z_, edge_index)\n t = self.t[i](z_, edge_index)\n else:\n s = self.s[i](z_)\n t = self.t[i](z_)\n z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_\n log_det_J -= ((1-self.mask[i])*s).sum(dim=1)\n return z, log_det_J\n\n def log_prob(self, x, edge_index=None):\n z, logp = self.forward(x, edge_index)\n p_z = self.p_z(torch.zeros_like(x, device=self.dev),\n torch.ones_like(x))\n return p_z.log_prob(z) + logp\n\n def sample(self, batchSize):\n # TODO: Update this method for edge_index\n z = self.prior.sample((batchSize, 1))\n logp = self.prior.log_prob(z)\n x = self.inverse(z)\n return x\n\nclass WrappedRealNVP(nn.Module):\n def __init__(self, n_blocks, input_size, hidden_size, n_hidden, radius,\n layer_type='Linear'):\n super(WrappedRealNVP, self).__init__()\n self.radius = radius\n self.n_blocks = n_blocks\n self.n_hidden = n_hidden\n self.preclamp_norm = torch.Tensor([0])\n self.input_size = input_size\n self.layer_type = layer_type\n mask = torch.arange(input_size).float() % 2\n self.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.p_z = HyperboloidWrappedNormal\n i_mask = 1 - mask\n mask = torch.stack([mask,i_mask]).repeat(int(n_blocks/2),1)\n self.s, self.t = create_wrapped_real_nvp_blocks(input_size, hidden_size,\n n_blocks, n_hidden, layer_type)\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.zeros(input_size+1))\n self.register_buffer('base_dist_var', torch.ones(input_size+1))\n self.mask = nn.Parameter(mask, requires_grad=False)\n\n def create_masked_t(self, mask, t1, t_rest):\n count = 0\n zero_vector = torch.zeros(len(t1), 1).to(t1.device)\n for i in range(0,len(mask)):\n if mask[i].item() == 0:\n t1 = torch.cat((t1, zero_vector), dim=1)\n else:\n column = t_rest[:,count].view(-1,1)\n t1 = torch.cat((t1, column), dim=1)\n count += 1\n return t1\n\n def inverse(self, z_hyper, edge_index=None):\n z = inverse_exp_map_mu0(z_hyper, self.radius)\n z_mu0 = z[..., 1:]\n log_det_J, x = z_mu0.new_zeros(z_mu0.shape[0]), z_mu0\n log_det_J = logmap_logdet(z, self.radius)\n preclamp_norm_list = []\n for i in range(0,self.n_blocks):\n x_ = x*self.mask[i]\n if self.layer_type != 'Linear':\n s = self.s[i](x_, edge_index)\n t_out = self.t[i](x_, edge_index)\n else:\n s = self.s[i](x_)\n t_out = self.t[i](x_)\n t_proj = proj_vec(t_out, self.radius)\n t1, t_rest = t_proj[:,0].unsqueeze(1), t_proj[:,1:]\n t = self.create_masked_t((1-self.mask[i]), t1, t_rest)\n # (1-b) \\odot \\tilde{x} \\odot exp(s(b \\odot \\tilde{x}))\n x_pt_arg = expand_proj_dims((1 - self.mask[i]) * x * torch.exp(s))\n\n # (1-b) \\odot \\textnormal{PT}_{\\textbf{o}\\to t(b \\odot \\tilde{x})\n pt = parallel_transport_mu0(x_pt_arg, dst=t, radius=self.radius)\n preclamp_norm = pt.max()\n pt = clamp(pt, min=-max_clamp_norm, max=max_clamp_norm)\n if pt.max() == max_clamp_norm:\n preclamp_norm_list.append(preclamp_norm)\n x_t = exp_map(x=pt, at_point=t, radius=self.radius)\n log_det_J += _logdet(pt, self.radius, subdim=(self.mask[i]).sum())\n preclamp_norm = x_t.max()\n x_t = clamp(x_t, min=-max_clamp_norm, max=max_clamp_norm)\n if x_t.max() == max_clamp_norm:\n preclamp_norm_list.append(preclamp_norm)\n\n #\\log_{\\textbf{o}}(\\textnormal{exp}_{t()}(\\textnormal{PT}_{\\textbf{o}\\to t()))\n x_0 = inverse_exp_map_mu0(x_t, self.radius)[...,1:]\n log_det_J += logmap_logdet(x_0, self.radius, subdim=(self.mask[i]).sum())\n x = x_ + (1 - self.mask[i]) * x_0\n log_det_J += ((1-self.mask[i])*s).sum(dim=1) # log det dx/du\n\n preclamp_norm = x.max()\n x = clamp(x, min=-max_clamp_norm, max=max_clamp_norm)\n if x.max() == max_clamp_norm:\n preclamp_norm_list.append(preclamp_norm)\n\n x_mu0 = expand_proj_dims(x)\n # Project back to Manifold\n x = exp_map_mu0(x_mu0, self.radius)\n log_det_J += _logdet(x_mu0, self.radius)\n\n self.preclamp_norm = torch.Tensor([sum(preclamp_norm_list)\n /len(preclamp_norm_list)]) if preclamp_norm_list else self.preclamp_norm\n return x, log_det_J\n\n def forward(self, x_hyper, edge_index=None):\n x = inverse_exp_map_mu0(x_hyper, self.radius)\n x_mu0 = x[..., 1:]\n log_det_J, z = x.new_zeros(x_mu0.shape[0]), x_mu0\n log_det_J = -1*logmap_logdet(x, self.radius)\n for i in reversed(range(0,self.n_blocks)):\n z_ = self.mask[i] * z\n if self.layer_type != 'Linear':\n s = self.s[i](z_, edge_index)\n t_out = self.t[i](z_, edge_index)\n else:\n s = self.s[i](z_)\n t_out = self.t[i](z_)\n t_proj = proj_vec(t_out, self.radius)\n\n t1, t_rest = t_proj[:,0].unsqueeze(1), t_proj[:,1:]\n t = self.create_masked_t((1-self.mask[i]), t1, t_rest)\n\n z_2 = expand_proj_dims((1 - self.mask[i]) * z)\n z_2 = clamp(z_2, min=-max_clamp_norm, max=max_clamp_norm)\n z_exp_2 = exp_map_mu0(z_2, self.radius)\n log_det_J -= _logdet(z_2, self.radius, subdim=(self.mask[i]).sum())\n\n z_exp_2 = clamp(z_exp_2, min=-max_clamp_norm, max=max_clamp_norm)\n z_inv_pt_arg = inverse_exp_map(x=z_exp_2, at_point=t, radius=self.radius)\n log_det_J -= logmap_logdet(z_inv_pt_arg, self.radius, subdim=(self.mask[i]).sum())\n\n z_inv_pt_arg = clamp(z_inv_pt_arg, min=-max_clamp_norm, max=max_clamp_norm)\n pt = inverse_parallel_transport_mu0(z_inv_pt_arg, src=t, radius=self.radius)\n pt = pt[..., 1:]\n\n z = (1 - self.mask[i]) * pt * torch.exp(-s) + z_\n log_det_J -= ((1-self.mask[i])*s).sum(dim=1)\n\n z_mu0 = expand_proj_dims(z)\n z = exp_map_mu0(z_mu0, self.radius)\n log_det_J -= _logdet(z_mu0, self.radius)\n return z, log_det_J\n\n def log_prob(self,x, edge_index=None):\n z, logp = self.forward(x, edge_index)\n mu_0 = e_i(i=0, shape=self.base_dist_mean.shape,\n device=self.base_dist_mean.device) * self.radius\n p_z = self.p_z(self.radius, torch.zeros_like(mu_0, device=self.dev),\n torch.ones_like(self.base_dist_var))\n return p_z.log_prob(z) + logp\n\n def sample(self, batchSize):\n z = self.prior.sample((batchSize, 1))\n logp = self.prior.log_prob(z)\n x = self.inverse(z)\n return x\n\nclass AllTangentRealNVP(nn.Module):\n def __init__(self, n_blocks, input_size, hidden_size, n_hidden, radius,\n layer_type='Linear'):\n super(AllTangentRealNVP, self).__init__()\n self.radius = radius\n mask = torch.arange(input_size).float() % 2\n self.n_blocks = n_blocks\n self.n_hidden = n_hidden\n self.layer_type = layer_type\n self.preclamp_norm = torch.Tensor([0])\n self.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.p_z = HyperboloidWrappedNormal\n i_mask = 1 - mask\n mask = torch.stack([mask,i_mask]).repeat(int(n_blocks/2),1)\n self.s, self.t = create_real_nvp_blocks(input_size, hidden_size,\n n_blocks, n_hidden, layer_type)\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.zeros(input_size+1))\n self.register_buffer('base_dist_var', torch.ones(input_size+1))\n self.mask = nn.Parameter(mask, requires_grad=False)\n\n def inverse(self, z_hyper, edge_index=None):\n z = inverse_exp_map_mu0(z_hyper, self.radius)\n z_mu0 = z[..., 1:]\n log_det_J, x = z_mu0.new_zeros(z_mu0.shape[0]), z_mu0\n log_det_J = logmap_logdet(z, self.radius)\n for i in range(0,self.n_blocks):\n x_ = x*self.mask[i]\n if self.layer_type != 'Linear':\n s = self.s[i](x_, edge_index)\n t = self.t[i](x_, edge_index)\n else:\n s = self.s[i](x_)\n t = self.t[i](x_)\n x = x_ + (1 - self.mask[i]) * (x * torch.exp(s) + t)\n self.preclamp_norm = x.max()\n x = clamp(x, min=-max_clamp_norm, max=max_clamp_norm)\n log_det_J += ((1-self.mask[i])*s).sum(dim=1) # log det dx/du\n x_mu0 = expand_proj_dims(x)\n x = exp_map_mu0(x_mu0, self.radius)\n log_det_J += _logdet(x_mu0, self.radius)\n return x, log_det_J\n\n def forward(self, x_hyper, edge_index=None):\n x = inverse_exp_map_mu0(x_hyper, self.radius)\n x_mu0 = x[..., 1:]\n log_det_J, z = x.new_zeros(x_mu0.shape[0]), x_mu0\n log_det_J = -1*logmap_logdet(x, self.radius)\n for i in reversed(range(0,self.n_blocks)):\n z_ = self.mask[i] * z\n if self.layer_type != 'Linear':\n s = self.s[i](z_, edge_index)\n t = self.t[i](z_, edge_index)\n else:\n s = self.s[i](z_)\n t = self.t[i](z_)\n z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_\n log_det_J -= ((1-self.mask[i])*s).sum(dim=1)\n z_mu0 = expand_proj_dims(z)\n z = exp_map_mu0(z_mu0, self.radius)\n log_det_J -= _logdet(z_mu0, self.radius)\n return z, log_det_J\n\n def log_prob(self, x, edge_index=None):\n z, logp = self.forward(x, edge_index)\n mu_0 = e_i(i=0, shape=self.base_dist_mean.shape,\n device=self.base_dist_mean.device) * self.radius\n p_z = self.p_z(self.radius, torch.zeros_like(mu_0, device=self.dev),\n torch.ones_like(self.base_dist_var))\n return p_z.log_prob(z) + logp\n\n def sample(self, batchSize):\n z = self.prior.sample((batchSize, 1))\n logp = self.prior.log_prob(z)\n x = self.inverse(z)\n return x\n\nclass TangentRealNVP(nn.Module):\n def __init__(self, n_blocks, input_size, hidden_size, n_hidden, radius,\n layer_type='Linear'):\n super(TangentRealNVP, self).__init__()\n self.radius = radius\n self.n_blocks = n_blocks\n self.n_hidden = n_hidden\n mask = torch.arange(input_size).float() % 2\n self.preclamp_norm = torch.Tensor([0])\n self.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.p_z = HyperboloidWrappedNormal\n i_mask = 1 - mask\n mask = torch.stack([mask,i_mask]).repeat(int(n_blocks/2),1)\n nets, nett = [], []\n self.s, self.t = create_real_nvp_blocks(input_size, hidden_size,\n n_blocks, n_hidden, layer_type)\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.zeros(input_size+1))\n self.register_buffer('base_dist_var', torch.ones(input_size+1))\n self.mask = nn.Parameter(mask, requires_grad=False)\n\n def inverse(self, z_hyper):\n z = inverse_exp_map_mu0(z_hyper, self.radius)\n z_mu0 = z[..., 1:]\n log_det_J, x = z_mu0.new_zeros(z_mu0.shape[0]), z_mu0\n log_det_J = logmap_logdet(z, self.radius)\n for i in range(0,self.n_blocks):\n if i > 0:\n # Project between Flow Layers\n x_proj_mu0 = inverse_exp_map_mu0(x, self.radius)\n x = x_proj_mu0[..., 1:]\n log_det_J += logmap_logdet(x_proj_mu0, self.radius)\n x_ = x*self.mask[i]\n if self.layer_type != 'Linear':\n s = self.s[i](x_, edge_index)\n t = self.t[i](x_, edge_index)\n else:\n s = self.s[i](x_)\n t = self.t[i](x_)\n x = x_ + (1 - self.mask[i]) * (x * torch.exp(s) + t)\n self.preclamp_norm = x.max()\n x = clamp(x, min=-max_clamp_norm, max=max_clamp_norm)\n log_det_J += ((1-self.mask[i])*s).sum(dim=1) # log det dx/du\n x_mu0 = expand_proj_dims(x)\n # Project back to Manifold\n x = exp_map_mu0(x_mu0, self.radius)\n log_det_J += _logdet(x_mu0, self.radius)\n return x, log_det_J\n\n def forward(self, x_hyper):\n x = inverse_exp_map_mu0(x_hyper, self.radius)\n x_mu0 = x[..., 1:]\n log_det_J, z = x.new_zeros(x_mu0.shape[0]), x_mu0\n log_det_J = -1*logmap_logdet(x, self.radius)\n for i in reversed(range(0,self.n_blocks)):\n if i > 0:\n # Project between Flow Layers\n z_proj_mu0 = inverse_exp_map_mu0(z, self.radius)\n z = z_proj_mu0[..., 1:]\n log_det_J -= logmap_logdet(z_proj_mu0, self.radius)\n z_ = self.mask[i] * z\n if self.layer_type != 'Linear':\n s = self.s[i](z_, edge_index)\n t = self.t[i](z_, edge_index)\n else:\n s = self.s[i](z_)\n t = self.t[i](z_)\n z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_\n log_det_J -= ((1-self.mask[i])*s).sum(dim=1)\n z_mu0 = expand_proj_dims(z)\n # Project back to Manifold\n z = exp_map_mu0(z_mu0, self.radius)\n log_det_J -= _logdet(z_mu0, self.radius)\n return z, log_det_J\n\n def log_prob(self, x, edge_index=None):\n z, logp = self.forward(x, edge_index)\n mu_0 = e_i(i=0, shape=self.base_dist_mean.shape,\n device=self.base_dist_mean.device) * self.radius\n p_z = self.p_z(self.radius, torch.zeros_like(mu_0, device=self.dev),\n torch.ones_like(self.base_dist_var))\n return p_z.log_prob(z) + logp\n\n def sample(self, batchSize):\n z = self.p_z.sample((batchSize, 1))\n logp = self.p_z.log_prob(z)\n x = self.inverse(z)\n return x\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom advertorch.context import ctx_noparamgrad_and_eval\nfrom advertorch.attacks import LinfMomentumIterativeAttack, L2MomentumIterativeAttack\n\nimport argparse\nimport os\nimport ipdb\nimport json\nimport sys\nfrom __init__ import data_and_model_setup, load_data, eval\nsys.path.append(\"..\") # Adds higher directory to python modules path.\nfrom utils.utils import create_loaders, load_unk_model, test_classifier\nfrom classifiers import load_all_classifiers, load_list_classifiers, load_dict_classifiers\nfrom cnn_models import LeNet as Net\nfrom cnn_models import ResNet18\nfrom cnn_models.mnist_ensemble_adv_train_models import *\nfrom eval import baseline_transfer, baseline_eval_classifier\nfrom defenses.ensemble_adver_train_mnist import *\n\ndef batch(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, default='cifar')\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--end', type=int, default=100)\n parser.add_argument('--n_iter', type=int, default=1000)\n parser.add_argument('--query_step', type=int, default=1)\n parser.add_argument('--transfer', action='store_true')\n parser.add_argument('--debug', action='store_true')\n parser.add_argument('--sweep', action='store_true')\n parser.add_argument(\"--wandb\", action=\"store_true\", default=False, help='Use wandb for logging')\n parser.add_argument('--ensemble_adv_trained', action='store_true')\n parser.add_argument('--noise', type=float, default=0.3)\n parser.add_argument('--batch_size', type=int, default=256, metavar='S')\n parser.add_argument('--test_batch_size', type=int, default=32, metavar='S')\n parser.add_argument('--train_set', default='test',\n choices=['train_and_test','test','train'],\n help='add the test set in the training set')\n parser.add_argument('--modelIn', type=str,\n default='../pretrained_classifiers/cifar/res18/model_0.pt')\n parser.add_argument('--robust_model_path', type=str,\n default=\"../madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt\")\n parser.add_argument('--dir_test_models', type=str,\n default=\"../\",\n help=\"The path to the directory containing the classifier models for evaluation.\")\n parser.add_argument(\"--max_test_model\", type=int, default=2,\n help=\"The maximum number of pretrained classifiers to use for testing.\")\n parser.add_argument('--train_on_madry', default=False, action='store_true',\n help='Train using Madry tf grad')\n parser.add_argument('--train_on_list', default=False, action='store_true',\n help='train on a list of classifiers')\n parser.add_argument('--attack_ball', type=str, default=\"Linf\",\n choices= ['L2','Linf'])\n parser.add_argument('--source_arch', default=\"res18\",\n help=\"The architecture we want to attack on CIFAR.\")\n parser.add_argument('--target_arch', default=None,\n help=\"The architecture we want to blackbox transfer to on CIFAR.\")\n parser.add_argument('--split', type=int, default=None,\n help=\"Which subsplit to use.\")\n parser.add_argument('--epsilon', type=float, default=0.1, metavar='M',\n help='Epsilon for Delta (default: 0.1)')\n parser.add_argument('--num_test_samples', default=None, type=int,\n help=\"The number of samples used to train and test the attacker.\")\n parser.add_argument('--train_with_critic_path', type=str, default=None,\n help='Train generator with saved critic model')\n parser.add_argument('--model', help='path to model')\n parser.add_argument('--adv_models', nargs='*', help='path to adv model(s)')\n parser.add_argument('--type', type=int, default=0, help='Model type (default: 0)')\n parser.add_argument('--namestr', type=str, default='NoBox', \\\n help='additional info in output filename to describe experiments')\n args = parser.parse_args()\n args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n train_loader, test_loader, split_train_loader, split_test_loader = create_loaders(args,\n root='../data', split=args.split)\n\n if args.split is not None:\n train_loader = split_train_loader\n test_loader = split_test_loader\n\n if os.path.isfile(\"../settings.json\"):\n with open('../settings.json') as f:\n data = json.load(f)\n args.wandb_apikey = data.get(\"wandbapikey\")\n\n if args.wandb:\n os.environ['WANDB_API_KEY'] = args.wandb_apikey\n wandb.init(project='NoBox-sweeps', name='MI-Attack-{}'.format(args.dataset))\n\n model, adv_models, l_test_classif_paths, model_type = data_and_model_setup(args)\n model.to(args.dev)\n model.eval()\n\n print(\"Testing on %d Test Classifiers with Source Model %s\" %(len(l_test_classif_paths), args.source_arch))\n\n if args.attack_ball == 'Linf':\n attacker = LinfMomentumIterativeAttack(model,\n loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"),\n eps=args.epsilon,\n nb_iter=args.n_iter,\n decay_factor=1., eps_iter=0.01,\n clip_min=0., clip_max=1.,\n targeted=False)\n elif args.attack_ball == 'L2':\n attacker = L2MomentumIterativeAttack(model,\n loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"),\n eps=args.epsilon,\n nb_iter=args.n_iter,\n decay_factor=1., eps_iter=0.01,\n clip_min=0., clip_max=1.,\n targeted=False)\n else:\n raise NotImplementedError\n\n eval_helpers = [model, model_type, adv_models, l_test_classif_paths, test_loader]\n total_fool_rate = eval(args, attacker, \"MI-Attack\", eval_helpers)\n\nif __name__ == '__main__':\n main()\n",
"'''LeNet in PyTorch.'''\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass LeNet(nn.Module):\n def __init__(self, nc=3, h=32, w=32):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(nc, 6, 5)\n h, w = round((h-4)/2), round((w-4)/2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n h, w = round((h-4)/2), round((w-4)/2)\n self.fc1 = nn.Linear(16*h*w, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n out = F.relu(self.conv1(x))\n out = F.max_pool2d(out, 2)\n out = F.relu(self.conv2(out))\n out = F.max_pool2d(out, 2)\n out = out.view(out.size(0), -1)\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n out = self.fc3(out)\n return out\n\n\nclass MadryLeNet(nn.Module):\n def __init__(self, nc=1, h=28, w=28):\n super(MadryLeNet, self).__init__()\n self.conv1 = nn.Conv2d(nc, 32, 5, padding=1)\n self.conv2 = nn.Conv2d(32, 64, 5, padding=1)\n self.fc1 = nn.Linear(7*7*64, 1024)\n self.fc2 = nn.Linear(1024, 10)\n\n def forward(self, x):\n out = F.relu(self.conv1(x))\n out = F.max_pool2d(out, 2, padding=1)\n out = F.relu(self.conv2(out))\n out = F.max_pool2d(out, 2, padding=1)\n out = out.view(out.size(0), -1)\n out = F.relu(self.fc1(out))\n out = self.fc2(out)\n return out\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.distributions.bernoulli import Bernoulli\nimport torchvision.transforms as transforms\nfrom advertorch.attacks import LinfMomentumIterativeAttack, L2MomentumIterativeAttack\nfrom advertorch.context import ctx_noparamgrad_and_eval\nfrom advertorch.utils import normalize_by_pnorm\nfrom advertorch.utils import clamp_by_pnorm\nfrom advertorch.utils import clamp\nfrom advertorch.utils import batch_clamp\nfrom advertorch.utils import batch_multiply\n\nimport argparse\nimport os\nimport ipdb\nimport json\nimport sys\nsys.path.insert(0, \"..\") # Adds higher directory to python modules path.\nfrom attacks import data_and_model_setup, load_data, eval\nfrom attacks.iterative_attacks import BIM\nfrom utils.utils import create_loaders, load_unk_model\nfrom classifiers import load_all_classifiers, load_list_classifiers, load_dict_classifiers\nfrom cnn_models import LeNet as Net\nfrom cnn_models import ResNet18\nfrom cnn_models.mnist_ensemble_adv_train_models import *\nfrom eval import baseline_transfer, baseline_eval_classifier\nfrom defenses.ensemble_adver_train_mnist import *\n\nclass DIM(LinfMomentumIterativeAttack):\n def __init__(self, args, model,\n loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"), decay_factor=1.,\n attack_ball='Linf', eps=0.3, eps_iter=0.01, n_iter=50,\n clip_max=1., clip_min=-0.):\n super(DIM, self).__init__(model, loss_fn=loss_fn, eps=eps,\n nb_iter=n_iter, decay_factor=decay_factor,\n eps_iter=eps_iter, clip_min=clip_min,\n clip_max=clip_max)\n self.model = model\n self.eps = eps\n self.eps_iter = eps_iter\n self.n_iter = n_iter\n self.clip_min = clip_min\n self.clip_max = clip_max\n self.attack_ball = attack_ball\n self.momentum = args.momentum\n self.transform_prob = args.transform_prob\n self.apply_transform = Bernoulli(torch.tensor([self.transform_prob]))\n self.resize_factor = args.resize_factor\n self.args = args\n\n def input_diversity(self, input_tensor):\n _, c, h, w = input_tensor.size()\n image_resize = int(self.resize_factor* w)\n rnd = torch.randint(h, image_resize, [1])\n h_rem = image_resize - rnd\n w_rem = image_resize - rnd\n self.pad_top = torch.randint(low=0, high=h_rem.item(), size=[1])\n self.pad_bottom = h_rem - self.pad_top\n self.pad_left = torch.randint(0, w_rem.item(), [1])\n self.pad_right = w_rem - self.pad_left\n device = input_tensor[0].device\n apply_prob = self.apply_transform.sample()\n if apply_prob:\n inp = F.interpolate(input_tensor, size=(rnd.item(), rnd.item()),\n mode='bilinear')\n out = F.pad(inp, pad=(self.pad_left, self.pad_right,\n self.pad_top, self.pad_bottom))\n else:\n out = input_tensor\n return out.to(device)\n\n def perturb(self, x, y):\n x, y = self._verify_and_process_inputs(x, y)\n delta = torch.zeros_like(x)\n g = torch.zeros_like(x)\n delta = nn.Parameter(delta)\n\n for i in range(self.nb_iter):\n if delta.grad is not None:\n delta.grad.detach_()\n delta.grad.zero_()\n\n imgadv = x + delta\n diverse_x = self.input_diversity(imgadv)\n outputs = self.predict(diverse_x)\n loss = self.loss_fn(outputs, y)\n if self.targeted:\n loss = -loss\n loss.backward()\n\n g = self.decay_factor * g + normalize_by_pnorm(\n delta.grad.data, p=1)\n # according to the paper it should be .sum(), but in their\n # implementations (both cleverhans and the link from the paper)\n # it is .mean(), but actually it shouldn't matter\n if self.attack_ball == 'Linf':\n delta.data += self.eps_iter * torch.sign(g)\n delta.data = clamp(\n delta.data, min=-self.eps, max=self.eps)\n delta.data = clamp(\n x + delta.data, min=self.clip_min, max=self.clip_max) - x\n elif self.attack_ball == 'L2':\n delta.data += self.eps_iter * normalize_by_pnorm(g, p=2)\n delta.data *= clamp(\n (self.eps * normalize_by_pnorm(delta.data, p=2) /\n delta.data),\n max=1.)\n delta.data = clamp(\n x + delta.data, min=self.clip_min, max=self.clip_max) - x\n else:\n error = \"Only ord = inf and ord = 2 have been implemented\"\n raise NotImplementedError(error)\n\n rval = x + delta.data\n return rval\n\ndef main(args):\n args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if os.path.isfile(\"../settings.json\"):\n with open('../settings.json') as f:\n data = json.load(f)\n args.wandb_apikey = data.get(\"wandbapikey\")\n\n if args.wandb:\n os.environ['WANDB_API_KEY'] = args.wandb_apikey\n wandb.init(project='NoBox-sweeps', name='AutoAttack-{}'.format(args.dataset))\n\n train_loader, test_loader, split_train_loader, split_test_loader = create_loaders(args,\n root='../data', split=args.split)\n\n if args.split is not None:\n train_loader = split_train_loader\n test_loader = split_test_loader\n\n model, adv_models, l_test_classif_paths, model_type = data_and_model_setup(args, di_attack=True)\n model.to(args.dev)\n model.eval()\n\n print(\"Testing on %d Test Classifiers with Source Model %s\" %(len(l_test_classif_paths), args.source_arch))\n\n attacker = DIM(args, model, attack_ball=args.attack_ball, eps=args.epsilon,\n n_iter=args.n_iter, decay_factor=args.momentum)\n\n\n eval_helpers = [model, model_type, adv_models, l_test_classif_paths, test_loader]\n total_fool_rate = eval(args, attacker, \"DI-Attack\", eval_helpers)\n return total_fool_rate\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='cifar')\nparser.add_argument('--start', type=int, default=0)\nparser.add_argument('--end', type=int, default=100)\nparser.add_argument('--n_iter', type=int, default=1000)\nparser.add_argument('--transfer', action='store_true')\nparser.add_argument('--debug', action='store_true')\nparser.add_argument('--sweep', action='store_true')\nparser.add_argument(\"--wandb\", action=\"store_true\", default=False, help='Use wandb for logging')\nparser.add_argument('--ensemble_adv_trained', action='store_true')\nparser.add_argument('--batch_size', type=int, default=256, metavar='S')\nparser.add_argument('--test_batch_size', type=int, default=32, metavar='S')\nparser.add_argument('--train_set', default='test',\n choices=['train_and_test','test','train'],\n help='add the test set in the training set')\nparser.add_argument('--modelIn', type=str,\n default='../pretrained_classifiers/cifar/res18/model_0.pt')\nparser.add_argument('--robust_model_path', type=str,\n default=\"../madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt\")\nparser.add_argument('--dir_test_models', type=str,\n default=\"../\",\n help=\"The path to the directory containing the classifier models for evaluation.\")\nparser.add_argument(\"--max_test_model\", type=int, default=2,\n help=\"The maximum number of pretrained classifiers to use for testing.\")\nparser.add_argument('--train_on_madry', default=False, action='store_true',\n help='Train using Madry tf grad')\nparser.add_argument('--train_on_list', default=False, action='store_true',\n help='train on a list of classifiers')\nparser.add_argument('--attack_ball', type=str, default=\"Linf\",\n choices= ['L2','Linf'])\nparser.add_argument('--source_arch', default=\"res18\",\n help=\"The architecture we want to attack on CIFAR.\")\nparser.add_argument('--target_arch', default=None,\n help=\"The architecture we want to blackbox transfer to on CIFAR.\")\nparser.add_argument('--momentum', type=float, default=0.0, metavar='M',\n help='Randomly apply input Transformation')\nparser.add_argument('--transform_prob', type=float, default=0.5, metavar='M',\n help='Randomly apply input Transformation')\nparser.add_argument('--resize_factor', type=float, default=1.1, metavar='M',\n help='Resize Factor for Random Resizing')\nparser.add_argument('--split', type=int, default=None,\n help=\"Which subsplit to use.\")\nparser.add_argument('--epsilon', type=float, default=0.1, metavar='M',\n help='Epsilon for Delta (default: 0.1)')\nparser.add_argument('--train_with_critic_path', type=str, default=None,\n help='Train generator with saved critic model')\nparser.add_argument('--num_test_samples', default=None, type=int,\n help=\"The number of samples used to train and test the attacker.\")\nparser.add_argument('--model', help='path to model')\nparser.add_argument('--adv_models', nargs='*', help='path to adv model(s)')\nparser.add_argument('--type', type=int, default=0, help='Model type (default: 0)')\nparser.add_argument('--namestr', type=str, default='NoBox', \\\n help='additional info in output filename to describe experiments')\nif __name__ == '__main__':\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"scipy.stats.ttest_rel",
"scipy.stats.wilcoxon"
],
[
"torch.nn.Parameter",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"torch.nn.init.constant_",
"torch.cat",
"torch.zeros_like",
"torch.arange",
"torch.exp",
"torch.cuda.is_available",
"torch.nn.init.xavier_uniform_",
"torch.stack",
"torch.ones_like"
],
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available"
],
[
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d"
],
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Parameter",
"torch.randint",
"torch.sign",
"torch.zeros_like",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
patozavala/spectrareader | [
"ebd77ca568726936832e909c2f38c7b35fb35134"
] | [
"readers/readers.py"
] | [
"import os\nimport glob\nimport pandas as pd\n\nclass BaseReader():\n \"\"\"\n Implements several verifications and utilities for handling spectral files.\n \"\"\"\n def __init__(self):\n pass\n\n def check_file_if_exists(self,filepath):\n \"\"\"\n Verifies that a required file exists.\n \"\"\"\n try:\n f = open(filepath)\n f.close()\n except:\n raise Exception (filepath + ' does not exists.') \n\n def check_file_is_readable(self,filepath):\n \"\"\"\n Verifies that a required file is readable.\n \"\"\"\n try:\n f = open(filepath)\n f.readable()\n f.close()\n except:\n raise Exception (filepath + ' is not readable.')\n \n def check_dir_if_exist(self,dirpath):\n \"\"\"\n Verifies that a directory exists.\n \"\"\"\n if os.path.isdir(dirpath):\n return True\n else:\n raise Exception (dirpath + 'does not exists.')\n\nclass SpectraReader(BaseReader):\n \"\"\"\n SpectraReader reads .csv file with spectral information from objects. The spectrum is measured with laboratory and field spectrometers.\n SpectraReader allows handling the spectral information into a pandas dataframe. Each spectral measurement must follow the current protocols of the company.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n \n def read_spectrum(self, filepath: str) -> dict:\n \"\"\"\n Reads a .csv file with an spectroradiometer measurement. \n \"\"\"\n\n self.check_file_if_exists()\n self.check_file_is_readable()\n\n data = pd.read_csv(filepath)\n label = filepath.split(sep='_')[0]\n\n spectrum = {\n 'label': label,\n 'data': data,\n }\n return spectrum\n\n def read_multiple_spectra(self, dirpath: str) -> list:\n \"\"\"\n Reads multiple files from a directory an store each measurement into a Spectrum object.\n \"\"\"\n\n self.check_dir_if_exist()\n filepaths = glob.glob(dirpath + '/*.txt')\n spectra = []\n for file in filepaths:\n spectrum = self.read_single_file(file)\n spectra.append(spectrum)\n\n return spectra\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mandubian/codenets | [
"63be72b706d57dbfb2ecec94adc203fc7bdfa3cf"
] | [
"codenets/codesearchnet/query_code_ast/dataset.py"
] | [
"import os\nimport sys\nfrom typing import Iterable, Union, Dict, Tuple, List, Callable, TypeVar, Optional, Any, cast\nimport numpy as np\nfrom pathlib import Path\nfrom loguru import logger\nfrom pathos.pools import ProcessPool\nimport itertools\nimport pickle\nimport random\nfrom dpu_utils.codeutils import split_identifier_into_parts\n\nfrom codenets.utils import _to_subtoken_stream, get_data_files_from_directory\nfrom codenets.codesearchnet.data import DatasetParams\nfrom codenets.codesearchnet.tokenizer_recs import TokenizerRecordable\nfrom codenets.codesearchnet.copied_code.utils import read_file_samples\nfrom codenets.codesearchnet.dataset_utils import (\n Samples,\n LangDataset,\n Compose,\n InputFeaturesToNpArray_RandomReplace,\n Tensorize,\n compute_language_weightings,\n)\nfrom codenets.codesearchnet.copied_code.metadata import QueryType\nfrom codenets.codesearchnet.data import InputFeatures\n\n\ndef convert_and_pad_token_sequence(\n tokenizer: TokenizerRecordable,\n token_sequence: List[str],\n output_tensor_size: int,\n token: str,\n prefix: Optional[str],\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Tensorise token sequence with padding; returning a mask for used elements as well.\n\n Args:\n tokenizer: Tokenizer.\n token_sequence: List of tokens in string form\n output_tensor_size: Size of the resulting tensor (i.e., length up which we pad / down to which we truncate.\n pad_from_left: Indicate if we are padding/truncating on the left side of string. [Default: False]\n\n Returns:\n Pair of numpy arrays. First is the actual tensorised token sequence, the second is a masking tensor\n that is 1.0 for those token indices that are actually used.\n \"\"\"\n if prefix is not None:\n token_sequence = [prefix, token] + token_sequence\n else:\n token_sequence = [token] + token_sequence\n token_ids, token_mask = tokenizer.encode_tokens([token_sequence], max_length=output_tensor_size)\n return token_ids[0], token_mask[0]\n\n\ndef load_data_from_sample_siamese(\n language: str,\n encoder_label: str,\n data_to_load: Any,\n function_name: Optional[str],\n tokenizer: TokenizerRecordable,\n fraction_using_func_name: float,\n min_len_func_name_for_query: int,\n use_subtokens: bool,\n mark_subtoken_end: bool,\n max_num_tokens: int,\n lang_token: str,\n query_token: str,\n) -> Optional[Dict[str, np.ndarray]]:\n \"\"\"\n Save two versions of both the code and the query: one using the docstring as the query and the other using the\n function-name as the query, and replacing the function name in the code with an out-of-vocab token.\n Sub-tokenizes, converts, and pads both versions, and rejects empty samples.\n \"\"\"\n result_holder: Dict[str, Any] = {}\n # Save the two versions of the code and query:\n data_holder = {QueryType.DOCSTRING.value: data_to_load, QueryType.FUNCTION_NAME.value: None}\n # Skip samples where the function name is very short, because it probably has too little information\n # to be a good search query.\n if fraction_using_func_name > 0.0 and function_name and len(function_name) >= min_len_func_name_for_query:\n if encoder_label == \"query\":\n # Set the query tokens to the function name, broken up into its sub-tokens:\n data_holder[QueryType.FUNCTION_NAME.value] = split_identifier_into_parts(function_name)\n elif encoder_label == \"code\":\n # In the code, replace the function name with the out-of-vocab token everywhere it appears:\n data_holder[QueryType.FUNCTION_NAME.value] = [\n tokenizer.unk_token() if token == function_name else token for token in data_to_load\n ]\n else:\n return None\n\n # Sub-tokenize, convert, and pad both versions:\n for key, data in data_holder.items():\n # if hyperparameters[f\"{encoder_label}_use_subtokens\"]:\n if use_subtokens:\n data = _to_subtoken_stream(data, mark_subtoken_end=mark_subtoken_end)\n\n logger.debug(\"\")\n if encoder_label == \"code\":\n tokens, tokens_mask = convert_and_pad_token_sequence(\n tokenizer=tokenizer,\n token_sequence=list(data),\n output_tensor_size=max_num_tokens,\n token=lang_token,\n prefix=language,\n )\n elif encoder_label == \"query\":\n tokens, tokens_mask = convert_and_pad_token_sequence(\n tokenizer=tokenizer,\n token_sequence=list(data),\n output_tensor_size=max_num_tokens,\n token=query_token,\n prefix=None,\n )\n # Note that we share the result_holder with different encoders, and so we need to make our identifiers\n # unique-ish\n result_holder[f\"{encoder_label}_tokens_{key}\"] = tokens\n result_holder[f\"{encoder_label}_tokens_mask_{key}\"] = tokens_mask\n\n if (\n result_holder[f\"{encoder_label}_tokens_mask_{QueryType.DOCSTRING.value}\"] is None\n or int(np.sum(result_holder[f\"{encoder_label}_tokens_mask_{QueryType.DOCSTRING.value}\"])) == 0\n ):\n return None\n\n return result_holder\n\n\ndef parse_data_file_siamese_tokenizer(\n data_file: Path, data_params: DatasetParams, tokenizer: TokenizerRecordable, lang_token: str, query_token: str\n) -> Tuple[str, int, Samples]:\n logger.info(f\"Reading samples from {data_file}\")\n filename = os.path.basename(data_file)\n file_language = filename.split(\"_\")[0]\n\n samples = list(read_file_samples(data_file))\n\n ds: List[Dict[str, Union[str, int]]] = []\n for raw_sample in samples:\n language = raw_sample[\"language\"]\n if language.startswith(\"python\"): # In some datasets, we use 'python-2.7' and 'python-3'\n language = \"python\"\n\n if language != file_language:\n logger.error(f\"file with different language {language} from filename {file_language}\")\n sys.exit(f\"file with multiple language {language} from filename {file_language}\")\n\n # the load_data_from_sample method call places processed data into sample, and\n # returns a boolean flag indicating if sample should be used\n function_name = raw_sample.get(\"func_name\")\n data_code = load_data_from_sample_siamese(\n language=language,\n encoder_label=\"code\",\n data_to_load=raw_sample[\"code_tokens\"],\n function_name=function_name,\n tokenizer=tokenizer,\n fraction_using_func_name=data_params.fraction_using_func_name,\n min_len_func_name_for_query=data_params.min_len_func_name_for_query,\n use_subtokens=data_params.use_subtokens,\n mark_subtoken_end=data_params.mark_subtoken_end,\n max_num_tokens=data_params.code_max_num_tokens,\n lang_token=lang_token,\n query_token=query_token,\n )\n\n # query doesn't use the language\n data_query = load_data_from_sample_siamese(\n language=language,\n encoder_label=\"query\",\n data_to_load=[d.lower() for d in raw_sample[\"docstring_tokens\"]],\n function_name=function_name,\n tokenizer=tokenizer,\n fraction_using_func_name=data_params.fraction_using_func_name,\n min_len_func_name_for_query=data_params.min_len_func_name_for_query,\n use_subtokens=data_params.use_subtokens,\n mark_subtoken_end=data_params.mark_subtoken_end,\n max_num_tokens=data_params.query_max_num_tokens,\n lang_token=lang_token,\n query_token=query_token,\n )\n\n if data_code is not None and data_query is not None:\n d = {\"language\": language, \"similarity\": 1, **data_code, **data_query}\n ds.append(d)\n\n logger.debug(f\"Parsed file {data_file}: language {file_language} [{len(ds)} samples]\")\n\n return (file_language, len(ds), ds)\n\n\nT_Single = TypeVar(\"T_Single\")\n\n\ndef load_data_from_files(\n data_files: Iterable[Path],\n data_params: DatasetParams,\n tokenizer: TokenizerRecordable,\n # humm that is not very nice type signature... need to create interface for that\n parse_callback: Callable[[Path, DatasetParams, TokenizerRecordable], Tuple[str, int, Iterable[T_Single]]],\n parallelize: bool = True,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n tasks_as_args = [[data_file, data_params, tokenizer] for data_file in data_files]\n\n if parallelize:\n pool = ProcessPool()\n\n # needed that hack to work... issues with serialization of classes\n # doesn't work with basic multiprocessing so needed pathos\n def cb(x):\n return parse_callback(*x)\n\n per_file_results = list(pool.map(cb, tasks_as_args))\n else:\n per_file_results = [parse_callback(*task_args) for task_args in tasks_as_args] # type: ignore\n\n lang_samples_iter: Dict[str, Tuple[int, List[Iterable[T_Single]]]] = {}\n for (lang, lg, samples_iter) in per_file_results:\n if lang not in lang_samples_iter:\n lang_samples_iter[lang] = (0, [])\n (lg0, iters) = lang_samples_iter[lang]\n iters.append(samples_iter)\n lang_samples_iter[lang] = (lg0 + lg, iters)\n\n lang_samples: Dict[str, Tuple[int, Iterable[T_Single]]] = {}\n for (lang, (lg, iters)) in lang_samples_iter.items():\n lang_samples[lang] = (lg, itertools.chain(*iters))\n\n return lang_samples\n\n\ndef load_data_from_files_raw(\n data_files: Iterable[Path],\n # humm that is not very nice type signature... need to create interface for that\n parse_callback: Callable[..., Tuple[str, int, Iterable[T_Single]]], # type: ignore\n parallelize: bool,\n *args,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n tasks_as_args = [[data_file, *args] for data_file in data_files]\n\n if parallelize:\n pool = ProcessPool()\n\n # needed that hack to work... issues with serialization of classes\n # doesn't work with basic multiprocessing so needed pathos\n def cb(x):\n return parse_callback(*x)\n\n per_file_results = list(pool.map(cb, tasks_as_args))\n else:\n per_file_results = [parse_callback(*task_args) for task_args in tasks_as_args] # type: ignore\n\n lang_samples_iter: Dict[str, Tuple[int, List[Iterable[T_Single]]]] = {}\n for (lang, lg, samples_iter) in per_file_results:\n if lang not in lang_samples_iter:\n lang_samples_iter[lang] = (0, [])\n (lg0, iters) = lang_samples_iter[lang]\n iters.append(samples_iter)\n lang_samples_iter[lang] = (lg0 + lg, iters)\n\n lang_samples: Dict[str, Tuple[int, Iterable[T_Single]]] = {}\n for (lang, (lg, iters)) in lang_samples_iter.items():\n lang_samples[lang] = (lg, itertools.chain(*iters))\n\n return lang_samples\n\n\ndef load_data_from_dirs_siamese_tokenizer(\n data_dirs: List[Path],\n tokenizer: TokenizerRecordable,\n data_params: DatasetParams,\n parse_callback: Callable[[Path, DatasetParams, TokenizerRecordable], Tuple[str, int, Iterable[T_Single]]],\n max_files_per_dir: Optional[int] = None,\n parallelize: bool = True,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n return load_data_from_files(\n data_files=list(get_data_files_from_directory(data_dirs, max_files_per_dir)),\n data_params=data_params,\n tokenizer=tokenizer,\n parse_callback=parse_callback,\n parallelize=parallelize,\n )\n\n\ndef load_data_from_dirs(\n data_dirs: List[Path],\n parse_callback: Callable[..., Tuple[str, int, Iterable[T_Single]]], # type: ignore\n max_files_per_dir: Optional[int],\n parallelize: bool,\n *args,\n) -> Dict[str, Tuple[int, Iterable[T_Single]]]:\n return load_data_from_files_raw(\n list(get_data_files_from_directory(data_dirs, max_files_per_dir)), parse_callback, parallelize, *args\n )\n\n\ndef build_lang_dataset_siamese_tokenizer(\n dirs: List[Path],\n name: str,\n data_params: DatasetParams,\n tokenizer: TokenizerRecordable,\n lang_token: str,\n query_token: str,\n fraction_using_func_name: float,\n query_random_token_frequency: float,\n common_tokens: Dict[int, List[int]], # list of token ID\n use_lang_weights: bool,\n lang_ids: Dict[str, int],\n pickle_path=\".\",\n parallelize: bool = False,\n embedding_model=None,\n) -> LangDataset:\n def build_input_features_from_dict(sample: Dict[str, Union[str, int, np.ndarray]]) -> InputFeatures:\n \"\"\"Build InputFeature from Dict by randomizing between using docstring or function name for query\"\"\"\n return InputFeatures(\n language=data_params.lang_ids[cast(str, sample[\"language\"])],\n similarity=cast(int, sample[\"similarity\"]),\n query_tokens=sample[\"query_tokens_func_name_as_query\"],\n query_tokens_mask=sample[\"query_tokens_mask_func_name_as_query\"],\n query_docstring_tokens=sample[\"query_tokens_docstring_as_query\"],\n query_docstring_tokens_mask=sample[\"query_tokens_mask_docstring_as_query\"],\n code_tokens=sample[\"code_tokens_func_name_as_query\"],\n code_tokens_mask=sample[\"code_tokens_mask_func_name_as_query\"],\n )\n\n def parser(\n data_file: Path, data_params: DatasetParams, tokenizer: TokenizerRecordable\n ) -> Tuple[str, int, Iterable[InputFeatures]]:\n (lang, lg, feats) = parse_data_file_siamese_tokenizer(\n data_file, data_params, tokenizer, lang_token, query_token\n )\n return (lang, lg, list(map(build_input_features_from_dict, feats)))\n\n # Train Data\n if not os.path.exists(pickle_path):\n os.makedirs(pickle_path)\n\n pickle_file = Path(pickle_path) / f\"{name}_samples.p\"\n loaded_samples: Dict[str, Tuple[int, Iterable[InputFeatures]]]\n\n if os.path.exists(pickle_file):\n logger.debug(f\"Loading dataset {name} raw samples from pickled {pickle_file}\")\n loaded_samples = pickle.load(open(pickle_file, \"rb\"))\n else:\n logger.debug(f\"Building dataset {name} from {dirs}\")\n loaded_samples = load_data_from_dirs_siamese_tokenizer(\n data_dirs=dirs, tokenizer=tokenizer, data_params=data_params, parse_callback=parser, parallelize=parallelize\n )\n nb = 0\n for lang, (lg, ss) in loaded_samples.items():\n ll = list(ss)\n loaded_samples[lang] = (lg, ll)\n nb += len(ll)\n pickle.dump(loaded_samples, open(pickle_file, \"wb\"))\n logger.debug(f\"Pickled dataset {name} [{nb} raw samples] to {pickle_file}\")\n\n lang_weights = compute_language_weightings(loaded_samples, lang_ids)\n logger.debug(f\"lang_weights {lang_weights}\")\n\n transform = Compose(\n [\n InputFeaturesToNpArray_RandomReplace(\n lang_weights=lang_weights,\n fraction_using_func_name=fraction_using_func_name,\n query_random_token_frequency=query_random_token_frequency,\n common_tokens=common_tokens,\n ),\n Tensorize(),\n ]\n )\n dataset = LangDataset(\n loaded_samples,\n lang_ids=data_params.lang_ids,\n transform=transform,\n use_lang_weights=use_lang_weights,\n embedding_model=embedding_model,\n tokenizer=tokenizer,\n emb_annoy_path=Path(pickle_path) / f\"{name}_embeddings.ann\",\n )\n logger.debug(f\"Loaded {name} lang dataset [{len(dataset)} samples]\")\n return dataset\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
will-duncan/ramp_systems | [
"7db1964af6bdb26ee4fed25131a12f9294c4cc1d"
] | [
"src/ramp_to_hill/hill_system.py"
] | [
"import numpy as np\nfrom scipy.integrate import solve_ivp\n\n\ndef HS_ode(t,y,HS):\n rhs = -HS.gamma*y + HS.lambda_value(y)\n return rhs\n\ndef at_HS_equilibrium(t,y,HS,tol = 1e-3):\n val = np.linalg.norm(HS_ode(t,y,HS)) - tol\n if val < 0:\n return 0\n else:\n return val\n\ndef simulate_HS(x0,HS,max_time,tol = 1e-3):\n \"\"\"\n Simulate the hill system ODE. Terminate simulation if an equilibrium is found. \n Input:\n x0 - initial condition\n HS - HillSystemParameter object\n max_time - time at which to terminate the simulation if an equilibrium hasn't been found\n Output:\n sol - output of solve_ivp\n \"\"\"\n ode = lambda t,y: HS_ode(t,y,HS)\n at_equilibrium = lambda t,y: at_HS_equilibrium(t,y,HS,tol)\n at_equilibrium.terminal = True\n integration_interval = (0,max_time)\n sol = solve_ivp(ode,integration_interval,x0,method = 'BDF',events = at_equilibrium)\n return sol\n\n\ndef find_equilibrium(x0,HS,max_time,tol = 1e-3):\n \"\"\"\n Simulate the ODE to equilibrium starting from x0\n Input: \n x0 - initial condition\n HS - HillSystemParameter object\n max_time - run the ode from time points [0,max_time]. If the solver reaches\n max_time before finding an equilibrium, then report that an equilibrium \n was not found\n Output:\n x - value of the equilibrium, if found within max_time. If not found, returns -1\n \"\"\"\n ode = lambda t,y: HS_ode(t,y,HS)\n at_equilibrium = lambda t,y: at_HS_equilibrium(t,y,HS,tol)\n # def ode(t,y,HS = HS):\n # rhs = -HS.gamma*y + HS.lambda_value(y)\n # return rhs\n # def at_equilibrium(t,y,HS = HS,tol = tol):\n # val = np.linalg.norm(ode(t,y)) - tol\n # if val < 0:\n # return 0\n # else:\n # return val\n at_equilibrium.terminal = True\n integration_interval = (0,max_time)\n sol = solve_ivp(ode,integration_interval,x0,method = 'BDF',events = at_equilibrium)\n if sol.status == 1: #at_equilibrium triggered stopping integration\n return sol.y[:,-1]\n else: \n return -1\n\ndef find_hill_equilibria_from_FPs(FPs,HS,RS,max_time,tol = 1e-3):\n \"\"\"\n Use DSGRN equilibria as initial conditions for finding Hill equilibria. \n Input: \n FPs - list of fixed point coordinates computed by DSGRN\n HS - HillSystemParameter object\n RS - RampSystem object\n max_time - maximum time to run the ODE for each equilibrium search attempt. \n Output:\n eq - list of Nx1 numpy arrays. An entry is -1 if find_equilibrium didn't find an\n equilibrium within max_time. len(eq) == len(FPs)\n \"\"\"\n reg_DSGRN_equilibria = RS.reg_equilibria_from_FPs(FPs)\n hill_eq = [find_equilibrium(x0.reshape([x0.shape[0]]),HS,max_time,tol = tol) for x0 in reg_DSGRN_equilibria]\n return hill_eq\n\ndef num_unique_vectors(vectors,tol = 1e-3):\n \"\"\"\n Given a list of vectors, count the number which are unique up to some tolerance\n \"\"\"\n repeat_indices = []\n num_unique = 0\n for j, vec0 in enumerate(vectors):\n if j in repeat_indices:\n continue\n num_unique += 1\n for i, vec1 in enumerate(vectors[j+1:]):\n i = i+j+1\n if i in repeat_indices:\n continue\n if np.allclose(vec0,vec1,rtol = tol):\n repeat_indices.append(i)\n return num_unique\n\ndef hill_value(x,hill_parameter):\n sign = hill_parameter.sign\n theta = hill_parameter.theta\n Delta = hill_parameter.Delta\n L = hill_parameter.L\n n = hill_parameter.n\n if sign == 1:\n return L + Delta/((theta/x)**n + 1)\n if sign == -1:\n return L + Delta/((x/theta)**n + 1)\n\ndef hill_second_derivative_root(*args):\n if len(args) == 1:\n hill_parameter = args[0]\n theta = hill_parameter.theta\n n = hill_parameter.n\n elif len(args) == 2:\n theta = args[0]\n n = args[1]\n else:\n raise TypeError('hill_second_derivative_root() takes 1 or 2 position arguments\\\n but {} were given.'.format(len(args)))\n return theta*((n-1)/(n+1))**(1/n)\n\ndef hill_derivative_magnitude(x,*args):\n if len(args) == 1:\n hill_parameter = args[0]\n sign = hill_parameter.sign\n Delta = hill_parameter.Delta\n theta = hill_parameter.theta\n n = hill_parameter.n\n elif len(args) == 4:\n sign = args[0]\n Delta = args[1]\n theta = args[2]\n n = args[3]\n else: \n raise TypeError('hill_derivative() takes 1 or 4 positiional arguments\\\n but {} were given.'.format(len(args)))\n if n == np.inf:\n if theta == x:\n return np.inf\n else: \n return 0\n return Delta*n/(theta*(theta/x)**(n-1) + 2*x + x*(x/theta)**n)\n\ndef make_hill_coefficient_array(Network,n):\n \"\"\"\n Make a hill coefficient array consistent with the network topology with each\n hill coefficient equal to n\n Input:\n Network - DSGRN network object\n n - float or integer greater than 1\n Output:\n numpy array with entry [i,j] equal to n if j->i is an edge and 0 otherwise\n \"\"\"\n N = Network.size()\n hill_coefficients = np.zeros([N,N])\n for j in range(N):\n for i in Network.outputs(j):\n hill_coefficients[i,j] = n\n return hill_coefficients\n\ndef make_sign_from_network(Network):\n \"\"\"\n Make an NxN numpy array describing the interaction sign between edges\n Input:\n Network - DSGRN network object\n Output:\n numpy array with 1 if j->i, -1 if j-|i, and 0 otherwise. \n \"\"\"\n N = Network.size()\n sign = np.zeros([N,N])\n for j in range(N):\n for i in Network.outputs(j):\n sign[i,j] = 1 if Network.interaction(j,i) else -1\n return sign\n\nclass HillParameter:\n\n def __init__(self,sign,L,Delta,theta,n):\n \"\"\"\n Input:\n sign - either 1 or -1\n L,Delta,theta,n - parameters for a hill function\n \"\"\"\n self.sign = sign\n self.L = L\n self.Delta = Delta\n self.theta = theta\n self.n = n\n \n def __repr__(self):\n sign = self.sign\n L = self.L\n Delta = self.Delta\n theta = self.theta\n n = self.n\n return 'HillParameter({},{},{},{},{})'.format(sign,L,Delta,theta,n)\n\n def func_value(self,x):\n return hill_value(x,self)\n\n def dx_value(self,x):\n return self.sign*hill_derivative_magnitude(x,self)\n\n\nclass HillSystemParameter:\n\n def __init__(self,Network,sign,L,Delta,theta,n,gamma):\n \"\"\"\n Input:\n gamma - length N lists\n sign,L,Delta,theta,n - NxN arrays\n \"\"\"\n self.Network = Network\n N = Network.size()\n self.sign = np.array(sign)\n self.L = np.array(L)\n self.Delta = np.array(Delta)\n self.theta = np.array(theta)\n self.n = np.array(n)\n self.gamma = np.array(gamma).reshape([N])\n \n def __eq__(self,other):\n if isinstance(other,HillSystemParameter):\n return np.array_equal(self.sign,other.sign) and np.array_equal(self.L, other.L) \\\n and np.array_equal(self.Delta, other.Delta) and np.array_equal(self.theta,other.theta) \\\n and np.array_equal(self.n, other.n) and np.array_equal(self.gamma,other.gamma)\n else: \n return False\n \n\n def hill_parameter(self,i,j):\n return HillParameter(self.sign[i,j],self.L[i,j],self.Delta[i,j],self.theta[i,j],self.n[i,j])\n\n\n def lambda_value(self,x):\n Network = self.Network\n N = Network.size()\n val = np.zeros([N])\n for i in range(N):\n cur_prod = 1\n for source_set in Network.logic(i):\n cur_sum = 0\n for j in source_set:\n cur_param = self.hill_parameter(i,j)\n cur_sum += cur_param.func_value(x[j])\n cur_prod *= cur_sum\n val[i] = cur_prod\n return val\n\n def is_equilibrium(self,x,tol = 1e-4):\n N = self.Network.size()\n x = np.array(x).reshape([N])\n return np.allclose(self.lambda_value(x)-self.gamma*x,np.zeros([N]),atol=tol)\n\n def Jacobian(self,x):\n N = self.Network.size()\n J = np.diag(-self.gamma)\n for i in range(N):\n for j in self.Network.inputs(i):\n cur_prod = 1\n for source_set in self.Network.logic(i):\n cur_sum = 0\n if j in source_set:\n cur_sum = self.hill_parameter(i,j).dx_value(x[j])\n else:\n for k in source_set:\n cur_sum += self.hill_parameter(i,k).func_value(x[k])\n cur_prod *= cur_sum\n J[i,j] += cur_prod\n return j\n\n\n def is_saddle(self,x,tol = 1e-4):\n N = self.Network.size()\n x = np.array(x).reshape([N,1])\n if not self.is_equilibrium(x,tol=tol):\n return False\n J = self.Jacobian(x)\n if np.linalg.matrix_rank(J) == N:\n return False\n return True\n\n\n"
] | [
[
"numpy.diag",
"numpy.allclose",
"numpy.linalg.matrix_rank",
"numpy.array_equal",
"scipy.integrate.solve_ivp",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
taureandyernv/cuml | [
"c92b594d3bda342c64d88a9c44b5d6e507b13f6c",
"c92b594d3bda342c64d88a9c44b5d6e507b13f6c"
] | [
"python/cuml/test/test_tsne.py",
"python/cuml/test/test_metrics.py"
] | [
"\nfrom cuml.manifold import TSNE\n\nfrom sklearn.manifold.t_sne import trustworthiness\nfrom sklearn import datasets\nimport pandas as pd\nimport numpy as np\nimport cudf\nimport pytest\n\ndataset_names = ['digits', 'boston', 'iris', 'breast_cancer',\n 'diabetes']\n\n\[email protected]('name', dataset_names)\ndef test_tsne(name):\n \"\"\"\n This tests how TSNE handles a lot of input data across time.\n (1) cuDF DataFrames are passed input\n (2) Numpy arrays are passed in\n (3) Params are changed in the TSNE class\n (4) The class gets re-used across time\n (5) Trustworthiness is checked\n (6) Tests NAN in TSNE output for learning rate explosions\n (7) Tests verbosity\n \"\"\"\n datasets\n X = eval(\"datasets.load_{}\".format(name))().data\n X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X))\n\n for i in range(3):\n print(\"iteration = \", i)\n\n tsne = TSNE(2, random_state=i, verbose=0, learning_rate=2+i)\n\n Y = tsne.fit_transform(X_cudf).to_pandas().values\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n\n # Reuse\n Y = tsne.fit_transform(X)\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n\n # Again\n tsne = TSNE(2, random_state=i+2, verbose=1, learning_rate=2+i+2)\n\n Y = tsne.fit_transform(X_cudf).to_pandas().values\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n\n # Reuse\n Y = tsne.fit_transform(X)\n nans = np.sum(np.isnan(Y))\n trust = trustworthiness(X, Y)\n print(\"Trust = \", trust)\n assert trust > 0.76\n assert nans == 0\n del Y\n",
"# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cuml\nimport numpy as np\nimport pytest\n\nfrom cuml.ensemble import RandomForestClassifier as curfc\nfrom cuml.metrics.cluster import adjusted_rand_score as cu_ars\nfrom cuml.metrics import accuracy_score as cu_acc_score\nfrom cuml.test.utils import get_handle, \\\n fit_predict, get_pattern, array_equal\n\nfrom numba import cuda\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import accuracy_score as sk_acc_score\nfrom sklearn.metrics.cluster import adjusted_rand_score as sk_ars\nfrom sklearn.preprocessing import StandardScaler\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('use_handle', [True, False])\ndef test_r2_score(datatype, use_handle):\n a = np.array([0.1, 0.2, 0.3, 0.4, 0.5], dtype=datatype)\n b = np.array([0.12, 0.22, 0.32, 0.42, 0.52], dtype=datatype)\n\n a_dev = cuda.to_device(a)\n b_dev = cuda.to_device(b)\n\n handle, stream = get_handle(use_handle)\n\n score = cuml.metrics.r2_score(a_dev, b_dev, handle=handle)\n\n np.testing.assert_almost_equal(score, 0.98, decimal=7)\n\n\ndef test_sklearn_search():\n \"\"\"Test ensures scoring function works with sklearn machinery\n \"\"\"\n import numpy as np\n from cuml import Ridge as cumlRidge\n import cudf\n from sklearn import datasets\n from sklearn.model_selection import train_test_split, GridSearchCV\n diabetes = datasets.load_diabetes()\n X_train, X_test, y_train, y_test = train_test_split(diabetes.data,\n diabetes.target,\n test_size=0.2,\n shuffle=False,\n random_state=1)\n\n alpha = np.array([1.0])\n fit_intercept = True\n normalize = False\n\n params = {'alpha': np.logspace(-3, -1, 10)}\n cu_clf = cumlRidge(alpha=alpha, fit_intercept=fit_intercept,\n normalize=normalize, solver=\"eig\")\n\n assert getattr(cu_clf, 'score', False)\n sk_cu_grid = GridSearchCV(cu_clf, params, cv=5, iid=False)\n\n record_data = (('fea%d' % i, X_train[:, i]) for i in\n range(X_train.shape[1]))\n gdf_data = cudf.DataFrame(record_data)\n gdf_train = cudf.DataFrame(dict(train=y_train))\n\n sk_cu_grid.fit(gdf_data, gdf_train.train)\n assert sk_cu_grid.best_params_ == {'alpha': 0.1}\n\n\ndef unit_param(*args, **kwargs):\n return pytest.param(*args, **kwargs, marks=pytest.mark.unit)\n\n\ndef quality_param(*args, **kwargs):\n return pytest.param(*args, **kwargs, marks=pytest.mark.quality)\n\n\ndef stress_param(*args, **kwargs):\n return pytest.param(*args, **kwargs, marks=pytest.mark.stress)\n\n\[email protected]('nrows', [unit_param(30), quality_param(5000),\n stress_param(500000)])\[email protected]('ncols', [unit_param(10), quality_param(100),\n stress_param(200)])\[email protected]('n_info', [unit_param(7), quality_param(50),\n stress_param(100)])\[email protected]('datatype', [np.float32])\ndef test_accuracy(nrows, ncols, n_info, datatype):\n\n use_handle = True\n train_rows = np.int32(nrows*0.8)\n X, y = make_classification(n_samples=nrows, n_features=ncols,\n n_clusters_per_class=1, n_informative=n_info,\n random_state=123, n_classes=5)\n\n X_test = np.asarray(X[train_rows:, 0:]).astype(datatype)\n y_test = np.asarray(y[train_rows:, ]).astype(np.int32)\n X_train = np.asarray(X[0:train_rows, :]).astype(datatype)\n y_train = np.asarray(y[0:train_rows, ]).astype(np.int32)\n # Create a handle for the cuml model\n handle, stream = get_handle(use_handle, n_streams=8)\n\n # Initialize, fit and predict using cuML's\n # random forest classification model\n cuml_model = curfc(max_features=1.0,\n n_bins=8, split_algo=0, split_criterion=0,\n min_rows_per_node=2,\n n_estimators=40, handle=handle, max_leaves=-1,\n max_depth=16)\n\n cuml_model.fit(X_train, y_train)\n cu_predict = cuml_model.predict(X_test)\n cu_acc = cu_acc_score(y_test, cu_predict)\n cu_acc_using_sk = sk_acc_score(y_test, cu_predict)\n # compare the accuracy of the two models\n assert array_equal(cu_acc, cu_acc_using_sk)\n\n\ndataset_names = ['noisy_circles', 'noisy_moons', 'aniso'] + \\\n [pytest.param(ds, marks=pytest.mark.xfail)\n for ds in ['blobs', 'varied']]\n\n\[email protected]('name', dataset_names)\[email protected]('nrows', [unit_param(20), quality_param(5000),\n stress_param(500000)])\ndef test_rand_index_score(name, nrows):\n\n default_base = {'quantile': .3,\n 'eps': .3,\n 'damping': .9,\n 'preference': -200,\n 'n_neighbors': 10,\n 'n_clusters': 3}\n\n pat = get_pattern(name, nrows)\n\n params = default_base.copy()\n params.update(pat[1])\n\n cuml_kmeans = cuml.KMeans(n_clusters=params['n_clusters'])\n\n X, y = pat[0]\n\n X = StandardScaler().fit_transform(X)\n\n cu_y_pred, _ = fit_predict(cuml_kmeans,\n 'cuml_Kmeans', X)\n\n cu_score = cu_ars(y, cu_y_pred)\n cu_score_using_sk = sk_ars(y, cu_y_pred)\n\n assert array_equal(cu_score, cu_score_using_sk)\n"
] | [
[
"numpy.isnan",
"sklearn.manifold.t_sne.trustworthiness",
"pandas.DataFrame"
],
[
"sklearn.model_selection.GridSearchCV",
"sklearn.datasets.make_classification",
"numpy.logspace",
"numpy.asarray",
"numpy.int32",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_diabetes",
"numpy.testing.assert_almost_equal",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"sklearn.metrics.cluster.adjusted_rand_score",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KiriLev/albumentations | [
"c91b67c710d20755d04166b7b5e41d430aef9662"
] | [
"tests/test_serialization.py"
] | [
"import random\nfrom unittest.mock import patch\n\nimport cv2\nimport pytest\nimport numpy as np\nimport imgaug as ia\n\nimport albumentations as A\nimport albumentations.augmentations.functional as F\nfrom .utils import OpenMock\n\nTEST_SEEDS = (0, 1, 42, 111, 9999)\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.GaussianBlur, {}],\n [A.GaussNoise, {}],\n [A.CLAHE, {}],\n [A.ChannelShuffle, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.Cutout, {}],\n [A.CoarseDropout, {}],\n [A.RandomSnow, {}],\n [A.RandomRain, {}],\n [A.RandomFog, {}],\n [A.RandomSunFlare, {}],\n [A.RandomShadow, {}],\n [A.PadIfNeeded, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.ShiftScaleRotate, {}],\n [A.OpticalDistortion, {}],\n [A.GridDistortion, {}],\n [A.ElasticTransform, {}],\n [A.ToFloat, {}],\n [A.Normalize, {}],\n [A.RandomBrightness, {}],\n [A.RandomContrast, {}],\n [A.RandomScale, {}],\n [A.SmallestMaxSize, {}],\n [A.LongestMaxSize, {}],\n [A.RandomGridShuffle, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.Downscale, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\nAUGMENTATION_CLS_PARAMS = (\n [\n [\n A.ImageCompression,\n {\n \"quality_lower\": 10,\n \"quality_upper\": 80,\n \"compression_type\": A.ImageCompression.ImageCompressionType.WEBP,\n },\n ],\n [A.JpegCompression, {\"quality_lower\": 10, \"quality_upper\": 80}],\n [A.HueSaturationValue, {\"hue_shift_limit\": 70, \"sat_shift_limit\": 95, \"val_shift_limit\": 55}],\n [A.RGBShift, {\"r_shift_limit\": 70, \"g_shift_limit\": 80, \"b_shift_limit\": 40}],\n [A.RandomBrightnessContrast, {\"brightness_limit\": 0.5, \"contrast_limit\": 0.8}],\n [A.Blur, {\"blur_limit\": 3}],\n [A.MotionBlur, {\"blur_limit\": 3}],\n [A.MedianBlur, {\"blur_limit\": 3}],\n [A.GaussianBlur, {\"blur_limit\": 3}],\n [A.GaussNoise, {\"var_limit\": (20, 90)}],\n [A.CLAHE, {\"clip_limit\": 2, \"tile_grid_size\": (12, 12)}],\n [A.RandomGamma, {\"gamma_limit\": (10, 90)}],\n [A.Cutout, {\"num_holes\": 4, \"max_h_size\": 4, \"max_w_size\": 4}],\n [A.CoarseDropout, {\"max_holes\": 4, \"max_height\": 4, \"max_width\": 4}],\n [A.RandomSnow, {\"snow_point_lower\": 0.2, \"snow_point_upper\": 0.4, \"brightness_coeff\": 4}],\n [\n A.RandomRain,\n {\n \"slant_lower\": -5,\n \"slant_upper\": 5,\n \"drop_length\": 15,\n \"drop_width\": 2,\n \"drop_color\": (100, 100, 100),\n \"blur_value\": 3,\n \"brightness_coefficient\": 0.5,\n \"rain_type\": \"heavy\",\n },\n ],\n [A.RandomFog, {\"fog_coef_lower\": 0.2, \"fog_coef_upper\": 0.8, \"alpha_coef\": 0.11}],\n [\n A.RandomSunFlare,\n {\n \"flare_roi\": (0.1, 0.1, 0.9, 0.6),\n \"angle_lower\": 0.1,\n \"angle_upper\": 0.95,\n \"num_flare_circles_lower\": 7,\n \"num_flare_circles_upper\": 11,\n \"src_radius\": 300,\n \"src_color\": (200, 200, 200),\n },\n ],\n [\n A.RandomShadow,\n {\n \"shadow_roi\": (0.1, 0.4, 0.9, 0.9),\n \"num_shadows_lower\": 2,\n \"num_shadows_upper\": 4,\n \"shadow_dimension\": 8,\n },\n ],\n [\n A.PadIfNeeded,\n {\"min_height\": 512, \"min_width\": 512, \"border_mode\": cv2.BORDER_CONSTANT, \"value\": (10, 10, 10)},\n ],\n [\n A.Rotate,\n {\n \"limit\": 120,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.ShiftScaleRotate,\n {\n \"shift_limit\": 0.2,\n \"scale_limit\": 0.2,\n \"rotate_limit\": 70,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.ShiftScaleRotate,\n {\n \"shift_limit_x\": 0.3,\n \"shift_limit_y\": 0.4,\n \"scale_limit\": 0.2,\n \"rotate_limit\": 70,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.OpticalDistortion,\n {\n \"distort_limit\": 0.2,\n \"shift_limit\": 0.2,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.GridDistortion,\n {\n \"num_steps\": 10,\n \"distort_limit\": 0.5,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [\n A.ElasticTransform,\n {\n \"alpha\": 2,\n \"sigma\": 25,\n \"alpha_affine\": 40,\n \"interpolation\": cv2.INTER_CUBIC,\n \"border_mode\": cv2.BORDER_CONSTANT,\n \"value\": (10, 10, 10),\n },\n ],\n [A.CenterCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomCrop, {\"height\": 10, \"width\": 10}],\n [A.CropNonEmptyMaskIfExists, {\"height\": 10, \"width\": 10}],\n [A.RandomSizedCrop, {\"min_max_height\": (4, 8), \"height\": 10, \"width\": 10}],\n [A.Crop, {\"x_max\": 64, \"y_max\": 64}],\n [A.ToFloat, {\"max_value\": 16536}],\n [A.Normalize, {\"mean\": (0.385, 0.356, 0.306), \"std\": (0.129, 0.124, 0.125), \"max_pixel_value\": 100.0}],\n [A.RandomBrightness, {\"limit\": 0.4}],\n [A.RandomContrast, {\"limit\": 0.4}],\n [A.RandomScale, {\"scale_limit\": 0.2, \"interpolation\": cv2.INTER_CUBIC}],\n [A.Resize, {\"height\": 64, \"width\": 64}],\n [A.SmallestMaxSize, {\"max_size\": 64, \"interpolation\": cv2.INTER_CUBIC}],\n [A.LongestMaxSize, {\"max_size\": 128, \"interpolation\": cv2.INTER_CUBIC}],\n [A.RandomGridShuffle, {\"grid\": (5, 5)}],\n [A.Solarize, {\"threshold\": 32}],\n [A.Posterize, {\"num_bits\": 1}],\n [A.Equalize, {\"mode\": \"pil\", \"by_channels\": False}],\n [A.MultiplicativeNoise, {\"multiplier\": (0.7, 2.3), \"per_channel\": True, \"elementwise\": True}],\n [\n A.ColorJitter,\n {\"brightness\": [0.2, 0.3], \"contrast\": [0.7, 0.9], \"saturation\": [1.2, 1.7], \"hue\": [-0.2, 0.1]},\n ],\n [\n A.Perspective,\n {\n \"scale\": 0.5,\n \"keep_size\": False,\n \"pad_mode\": cv2.BORDER_REFLECT_101,\n \"pad_val\": 10,\n \"mask_pad_val\": 100,\n \"fit_output\": True,\n \"interpolation\": cv2.INTER_CUBIC,\n },\n ],\n [A.Sharpen, {\"alpha\": [0.2, 0.5], \"lightness\": [0.5, 1.0]}],\n ],\n)\n\n\[email protected]([\"augmentation_cls\", \"params\"], *AUGMENTATION_CLS_PARAMS)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_serialization_with_custom_parameters(\n augmentation_cls, params, p, seed, image, mask, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected]([\"augmentation_cls\", \"params\"], *AUGMENTATION_CLS_PARAMS)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\[email protected](\"data_format\", (\"yaml\",))\ndef test_augmentations_serialization_to_file_with_custom_parameters(\n augmentation_cls, params, p, seed, image, mask, always_apply, data_format\n):\n with patch(\"builtins.open\", OpenMock()):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n filepath = \"serialized.{}\".format(data_format)\n A.save(aug, filepath, data_format=data_format)\n deserialized_aug = A.load(filepath, data_format=data_format)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.GaussianBlur, {}],\n [A.GaussNoise, {}],\n [A.CLAHE, {}],\n [A.ChannelShuffle, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.Cutout, {}],\n [A.GaussNoise, {}],\n [A.RandomSnow, {}],\n [A.RandomRain, {}],\n [A.RandomFog, {}],\n [A.RandomSunFlare, {}],\n [A.RandomShadow, {}],\n [A.PadIfNeeded, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.ShiftScaleRotate, {}],\n [A.CenterCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomSizedCrop, {\"min_max_height\": (4, 8), \"height\": 10, \"width\": 10}],\n [A.Crop, {\"x_max\": 64, \"y_max\": 64}],\n [A.FromFloat, {}],\n [A.ToFloat, {}],\n [A.Normalize, {}],\n [A.RandomBrightness, {}],\n [A.RandomContrast, {}],\n [A.RandomScale, {}],\n [A.Resize, {\"height\": 64, \"width\": 64}],\n [A.SmallestMaxSize, {}],\n [A.LongestMaxSize, {}],\n [A.RandomSizedBBoxSafeCrop, {\"height\": 50, \"width\": 50}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_for_bboxes_serialization(\n augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, bboxes=albumentations_bboxes)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.GaussianBlur, {}],\n [A.GaussNoise, {}],\n [A.CLAHE, {}],\n [A.ChannelShuffle, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.Cutout, {}],\n [A.GaussNoise, {}],\n [A.RandomSnow, {}],\n [A.RandomRain, {}],\n [A.RandomFog, {}],\n [A.RandomSunFlare, {}],\n [A.RandomShadow, {}],\n [A.PadIfNeeded, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.ShiftScaleRotate, {}],\n [A.CenterCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomCrop, {\"height\": 10, \"width\": 10}],\n [A.RandomSizedCrop, {\"min_max_height\": (4, 8), \"height\": 10, \"width\": 10}],\n [A.FromFloat, {}],\n [A.ToFloat, {}],\n [A.Normalize, {}],\n [A.RandomBrightness, {}],\n [A.RandomContrast, {}],\n [A.RandomScale, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_for_keypoints_serialization(augmentation_cls, params, p, seed, image, keypoints, always_apply):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, keypoints=keypoints)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.IAAEmboss, {}],\n [A.IAASuperpixels, {}],\n [A.IAAAdditiveGaussianNoise, {}],\n [A.IAACropAndPad, {}],\n [A.IAAFliplr, {}],\n [A.IAAFlipud, {}],\n [A.IAAAffine, {}],\n [A.IAAPiecewiseAffine, {}],\n [A.IAAPerspective, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_imgaug_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n ia.seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n ia.seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.IAAEmboss, {}],\n [A.IAASuperpixels, {}],\n [A.IAAAdditiveGaussianNoise, {}],\n [A.IAACropAndPad, {}],\n [A.IAAFliplr, {}],\n [A.IAAFlipud, {}],\n [A.IAAAffine, {}],\n [A.IAAPiecewiseAffine, {}],\n [A.IAAPerspective, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_imgaug_augmentations_for_bboxes_serialization(\n augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n ia.seed(seed)\n aug_data = aug(image=image, bboxes=albumentations_bboxes)\n set_seed(seed)\n ia.seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.IAAEmboss, {}],\n [A.IAASuperpixels, {}],\n [A.IAAAdditiveGaussianNoise, {}],\n [A.IAACropAndPad, {}],\n [A.IAAFliplr, {}],\n [A.IAAFlipud, {}],\n [A.IAAAffine, {}],\n [A.IAAPiecewiseAffine, {}],\n [A.IAAPerspective, {}],\n ],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_imgaug_augmentations_for_keypoints_serialization(\n augmentation_cls, params, p, seed, image, keypoints, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n ia.seed(seed)\n aug_data = aug(image=image, keypoints=keypoints)\n set_seed(seed)\n ia.seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\", \"call_params\"],\n [[A.RandomCropNearBBox, {\"max_part_shift\": 0.15}, {\"cropping_bbox\": [-59, 77, 177, 231]}]],\n)\[email protected](\"p\", [0.5, 1])\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"always_apply\", (False, True))\ndef test_augmentations_serialization_with_call_params(\n augmentation_cls, params, call_params, p, seed, image, always_apply\n):\n aug = augmentation_cls(p=p, always_apply=always_apply, **params)\n annotations = {\"image\": image, **call_params}\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(**annotations)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(**annotations)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n\n\ndef test_from_float_serialization(float_image):\n aug = A.FromFloat(p=1, dtype=\"uint8\")\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n aug_data = aug(image=float_image)\n deserialized_aug_data = deserialized_aug(image=float_image)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n\n\[email protected](\"seed\", TEST_SEEDS)\ndef test_transform_pipeline_serialization(seed, image, mask):\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose(\n [\n A.Resize(1024, 1024),\n A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),\n A.OneOf(\n [\n A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),\n A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),\n ]\n ),\n ]\n ),\n A.Compose(\n [\n A.Resize(1024, 1024),\n A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),\n A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),\n ]\n ),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ]\n )\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, mask=mask)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n\n\[email protected](\n [\"bboxes\", \"bbox_format\", \"labels\"],\n [\n ([(20, 30, 40, 50)], \"coco\", [1]),\n ([(20, 30, 40, 50, 99), (10, 40, 30, 20, 9)], \"coco\", [1, 2]),\n ([(20, 30, 60, 80)], \"pascal_voc\", [2]),\n ([(20, 30, 60, 80, 99)], \"pascal_voc\", [1]),\n ([(0.2, 0.3, 0.4, 0.5)], \"yolo\", [2]),\n ([(0.2, 0.3, 0.4, 0.5, 99)], \"yolo\", [1]),\n ],\n)\[email protected](\"seed\", TEST_SEEDS)\ndef test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),\n A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ],\n bbox_params={\"format\": bbox_format, \"label_fields\": [\"labels\"]},\n )\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, bboxes=bboxes, labels=labels)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n\n\[email protected](\n [\"keypoints\", \"keypoint_format\", \"labels\"],\n [\n ([(20, 30, 40, 50)], \"xyas\", [1]),\n ([(20, 30, 40, 50, 99), (10, 40, 30, 20, 9)], \"xy\", [1, 2]),\n ([(20, 30, 60, 80)], \"yx\", [2]),\n ([(20, 30, 60, 80, 99)], \"xys\", [1]),\n ],\n)\[email protected](\"seed\", TEST_SEEDS)\ndef test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):\n aug = A.Compose(\n [\n A.OneOrOther(\n A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),\n A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),\n ),\n A.HorizontalFlip(p=1),\n A.RandomBrightnessContrast(p=0.5),\n ],\n keypoint_params={\"format\": keypoint_format, \"label_fields\": [\"labels\"]},\n )\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, keypoints=keypoints, labels=labels)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n\n\[email protected](\n [\"augmentation_cls\", \"params\"],\n [\n [A.ChannelShuffle, {}],\n [A.GaussNoise, {}],\n [A.Cutout, {}],\n [A.ImageCompression, {}],\n [A.JpegCompression, {}],\n [A.HueSaturationValue, {}],\n [A.RGBShift, {}],\n [A.RandomBrightnessContrast, {}],\n [A.Blur, {}],\n [A.MotionBlur, {}],\n [A.MedianBlur, {}],\n [A.CLAHE, {}],\n [A.InvertImg, {}],\n [A.RandomGamma, {}],\n [A.ToGray, {}],\n [A.VerticalFlip, {}],\n [A.HorizontalFlip, {}],\n [A.Flip, {}],\n [A.Transpose, {}],\n [A.RandomRotate90, {}],\n [A.Rotate, {}],\n [A.OpticalDistortion, {}],\n [A.GridDistortion, {}],\n [A.ElasticTransform, {}],\n [A.Normalize, {}],\n [A.ToFloat, {}],\n [A.FromFloat, {}],\n [A.RandomGridShuffle, {}],\n [A.Solarize, {}],\n [A.Posterize, {}],\n [A.Equalize, {}],\n [A.MultiplicativeNoise, {}],\n [A.ColorJitter, {}],\n [A.Perspective, {}],\n [A.Sharpen, {}],\n ],\n)\[email protected](\"seed\", TEST_SEEDS)\ndef test_additional_targets_for_image_only_serialization(augmentation_cls, params, image, seed):\n aug = A.Compose([augmentation_cls(always_apply=True, **params)], additional_targets={\"image2\": \"image\"})\n image2 = image.copy()\n\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug)\n set_seed(seed)\n aug_data = aug(image=image, image2=image2)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, image2=image2)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"image2\"], deserialized_aug_data[\"image2\"])\n\n\[email protected](\"seed\", TEST_SEEDS)\[email protected](\"p\", [1])\ndef test_lambda_serialization(image, mask, albumentations_bboxes, keypoints, seed, p):\n def vflip_image(image, **kwargs):\n return F.vflip(image)\n\n def vflip_mask(mask, **kwargs):\n return F.vflip(mask)\n\n def vflip_bbox(bbox, **kwargs):\n return F.bbox_vflip(bbox, **kwargs)\n\n def vflip_keypoint(keypoint, **kwargs):\n return F.keypoint_vflip(keypoint, **kwargs)\n\n aug = A.Lambda(name=\"vflip\", image=vflip_image, mask=vflip_mask, bbox=vflip_bbox, keypoint=vflip_keypoint, p=p)\n\n serialized_aug = A.to_dict(aug)\n deserialized_aug = A.from_dict(serialized_aug, lambda_transforms={\"vflip\": aug})\n set_seed(seed)\n aug_data = aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)\n set_seed(seed)\n deserialized_aug_data = deserialized_aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)\n assert np.array_equal(aug_data[\"image\"], deserialized_aug_data[\"image\"])\n assert np.array_equal(aug_data[\"mask\"], deserialized_aug_data[\"mask\"])\n assert np.array_equal(aug_data[\"bboxes\"], deserialized_aug_data[\"bboxes\"])\n assert np.array_equal(aug_data[\"keypoints\"], deserialized_aug_data[\"keypoints\"])\n"
] | [
[
"numpy.array_equal",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SarahGuo1999/SiamR-CNN | [
"df9b428aeb90da0c8b2c8076f54f632efb07366c"
] | [
"train.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: train.py\n\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport cv2\nimport six\nimport shutil\n\nassert six.PY3, \"FasterRCNN requires Python 3!\"\nimport tensorflow as tf\nimport tqdm\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack import *\nfrom tensorpack.tfutils import optimizer\nfrom tensorpack.tfutils.common import get_tf_version_tuple, get_tensors_by_names\nfrom tensorpack.tfutils.summary import add_moving_summary\nfrom tensorpack.tfutils.varreplace import freeze_variables\n\nimport model_frcnn\nimport model_mrcnn\nfrom basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone, backbone_scope\nfrom dataset import DetectionDataset\nfrom config import finalize_configs, config as cfg\nfrom data import get_all_anchors, get_all_anchors_fpn, get_train_dataflow\nfrom eval_utils import EvalCallback\nfrom model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align\nfrom model_cascade import CascadeRCNNHead, CascadeRCNNHeadWithHardExamples\nfrom model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses\nfrom model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets\nfrom model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head\nfrom model_rpn import generate_rpn_proposals, rpn_head, rpn_losses\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nclass DetectionModel(ModelDesc):\n def preprocess(self, image):\n image = tf.expand_dims(image, 0)\n image = image_preprocess(image, bgr=True)\n return tf.transpose(image, [0, 3, 1, 2])\n\n @property\n def training(self):\n return get_current_tower_context().is_training\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)\n tf.summary.scalar('learning_rate-summary', lr)\n\n # The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.\n lr = lr / 8.\n opt = tf.train.MomentumOptimizer(lr, 0.9)\n if cfg.TRAIN.NUM_GPUS < 8:\n opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)\n return opt\n\n def get_inference_tensor_names(self):\n \"\"\"\n Returns two lists of tensor names to be used to create an inference callable.\n\n Returns:\n [str]: input names\n [str]: output names\n \"\"\"\n if cfg.MODE_THIRD_STAGE:\n out = ['output/boxes', 'output/scores', 'third_stage_features_out', 'ff_gt_tracklet_scores',\n 'sparse_tracklet_scores', 'tracklet_score_indices']\n else:\n out = ['output/boxes', 'output/scores', 'output/labels']\n if cfg.MODE_MASK:\n out.append('output/masks')\n if cfg.EXTRACT_GT_FEATURES:\n return ['image', 'roi_boxes'], ['boxes_for_extraction', 'features_for_extraction']\n else:\n return ['image'], out\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n\n image = self.preprocess(inputs['image']) # 1CHW\n\n features = self.backbone(image)\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n if cfg.EXTRACT_GT_FEATURES:\n anchor_inputs[\"roi_boxes\"] = inputs[\"roi_boxes\"]\n proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?\n\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n head_losses = self.roi_heads(image, features, proposals, targets)\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n\nclass ResNetC4Model(DetectionModel):\n def inputs(self):\n ret = [\n tf.placeholder(tf.float32, (None, None, 3), 'image'),\n tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),\n tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),\n tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),\n tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def backbone(self, image):\n return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]\n\n def rpn(self, image, features, inputs):\n featuremap = features[0]\n rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)\n anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])\n anchors = anchors.narrow_to(featuremap)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox\n proposal_boxes, proposal_scores = generate_rpn_proposals(\n tf.reshape(pred_boxes_decoded, [-1, 4]),\n tf.reshape(rpn_label_logits, [-1]),\n image_shape2d,\n cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,\n cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)\n\n if self.training:\n losses = rpn_losses(\n anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n featuremap = features[0]\n\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n # sample proposal boxes in training\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n # The boxes to be used to crop RoIs.\n # Use all proposal boxes in inference\n\n boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)\n roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)\n\n feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7\n # Keep C5 feature to be shared with mask branch\n feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)\n\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,\n tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n # In training, mask branch shares the same C5 feature.\n fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 14,\n pad_border=False) # nfg x 1x14x14\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n\n if cfg.MODE_MASK:\n roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)\n feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNModel(DetectionModel):\n\n def inputs(self):\n ret = [\n tf.placeholder(tf.float32, (None, None, 3), 'image')]\n num_anchors = len(cfg.RPN.ANCHOR_RATIOS)\n for k in range(len(cfg.FPN.ANCHOR_STRIDES)):\n ret.extend([\n tf.placeholder(tf.int32, (None, None, num_anchors),\n 'anchor_labels_lvl{}'.format(k + 2)),\n tf.placeholder(tf.float32, (None, None, num_anchors, 4),\n 'anchor_boxes_lvl{}'.format(k + 2))])\n ret.extend([\n tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),\n tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')\n ) # NR_GT x height x width\n if cfg.EXTRACT_GT_FEATURES:\n ret.append(tf.placeholder(tf.float32, (None, 4,), 'roi_boxes'))\n return ret\n\n def slice_feature_and_anchors(self, p23456, anchors):\n for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):\n with tf.name_scope('FPN_slice_lvl{}'.format(i)):\n anchors[i] = anchors[i].narrow_to(p23456[i])\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n p23456 = fpn_model('fpn', c2345)\n return p23456\n\n def rpn(self, image, features, inputs):\n if cfg.EXTRACT_GT_FEATURES:\n boxes = inputs['roi_boxes']\n return BoxProposals(boxes), tf.constant(0, dtype=tf.float32)\n\n assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n all_anchors_fpn = get_all_anchors_fpn()\n multilevel_anchors = [RPNAnchors(\n all_anchors_fpn[i],\n inputs['anchor_labels_lvl{}'.format(i + 2)],\n inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]\n self.slice_feature_and_anchors(features, multilevel_anchors)\n\n # Multi-Level RPN Proposals\n rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))\n for pi in features]\n multilevel_label_logits = [k[0] for k in rpn_outputs]\n multilevel_box_logits = [k[1] for k in rpn_outputs]\n multilevel_pred_boxes = [anchor.decode_logits(logits)\n for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]\n\n proposal_boxes, proposal_scores = generate_fpn_proposals(\n multilevel_pred_boxes, multilevel_label_logits, image_shape2d)\n\n if self.training:\n losses = multilevel_rpn_losses(\n multilevel_anchors, multilevel_label_logits, multilevel_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if not cfg.FPN.CASCADE:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n\n head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(\n 'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,\n gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n else:\n def roi_func(boxes):\n return multilevel_roi_align(features[:4], boxes, 7)\n\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if cfg.EXTRACT_GT_FEATURES:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n tf.identity(roi_feature_fastrcnn, \"rpn/feature\")\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNTrackModel(ResNetFPNModel):\n def inputs(self):\n ret = super().inputs()\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n ret.append(tf.placeholder(tf.float32, (256, 7, 7), 'ref_features'))\n else:\n ret.append(tf.placeholder(tf.float32, (None, None, 3), 'ref_image'))\n ret.append(tf.placeholder(tf.float32, (4,), 'ref_box'))\n if cfg.MODE_THIRD_STAGE:\n ret.append(tf.placeholder(tf.float32, (256, 7, 7), 'ff_gt_tracklet_feat'))\n ret.append(tf.placeholder(tf.float32, (None, 256, 7, 7), 'active_tracklets_feats'))\n ret.append(tf.placeholder(tf.float32, (None, 4), 'active_tracklets_boxes'))\n ret.append(tf.placeholder(tf.float32, (), 'tracklet_distance_threshold'))\n if cfg.MODE_HARD_MINING:\n ret.append(tf.placeholder(tf.float32, (None, 3, 256, 7, 7), 'hard_negative_features'))\n if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:\n ret.append(tf.placeholder(tf.float32, (None, 3, 256, 7, 7), 'hard_positive_features'))\n ret.append(tf.placeholder(tf.float32, (None, 3), 'hard_positive_ious'))\n ret.append(tf.placeholder(tf.float32, (None, 4), 'hard_positive_gt_boxes'))\n ret.append(tf.placeholder(tf.float32, (None, 3, 4), 'hard_positive_jitter_boxes'))\n if cfg.EXTRACT_GT_FEATURES:\n ret.append(tf.placeholder(tf.float32, (None, 4,), 'roi_boxes'))\n return ret\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n with backbone_scope(freeze=cfg.BACKBONE.FREEZE_AT > 3):\n p23456 = fpn_model('fpn', c2345)\n return p23456, c2345\n\n def rpn(self, image, features, inputs):\n if cfg.EXTRACT_GT_FEATURES:\n boxes = inputs['roi_boxes']\n return BoxProposals(boxes), tf.constant(0, dtype=tf.float32)\n\n if cfg.BACKBONE.FREEZE_AT > 3:\n with freeze_variables(stop_gradient=False, skip_collection=True):\n return super().rpn(image, features, inputs)\n else:\n return super().rpn(image, features, inputs)\n\n def roi_heads(self, image, ref_features, ref_box, features, proposals, targets, hard_negative_features=None,\n hard_positive_features=None, hard_positive_ious=None, hard_positive_gt_boxes=None,\n hard_positive_jitter_boxes=None, precomputed_ref_features=None):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if precomputed_ref_features is None:\n roi_aligned_ref_features = multilevel_roi_align(ref_features[:4], ref_box[tf.newaxis], 7)\n else:\n roi_aligned_ref_features = precomputed_ref_features[tf.newaxis]\n\n if cfg.MODE_SHARED_CONV_REDUCE:\n scope = tf.get_variable_scope()\n else:\n scope = \"\"\n\n assert cfg.FPN.CASCADE\n\n def roi_func(boxes, already_aligned_features=None):\n if already_aligned_features is None:\n aligned_features = multilevel_roi_align(features[:4], boxes, 7)\n else:\n # for hard example mining\n aligned_features = already_aligned_features\n tiled = tf.tile(roi_aligned_ref_features, [tf.shape(aligned_features)[0], 1, 1, 1])\n concat_features = tf.concat((tiled, aligned_features), axis=1)\n\n with argscope(Conv2D, data_format='channels_first',\n kernel_initializer=tf.variance_scaling_initializer(\n scale=2.0, mode='fan_out',\n distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n reduced_features = Conv2D('conv_reduce', concat_features, 256, 1, activation=None)\n return reduced_features\n\n if cfg.MODE_HARD_MINING and self.training:\n fastrcnn_head = CascadeRCNNHeadWithHardExamples(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS, hard_negative_features,\n hard_positive_features, cfg.HARD_NEGATIVE_LOSS_SCALING_FACTOR,\n cfg.HARD_POSITIVE_LOSS_SCALING_FACTOR, hard_positive_ious, hard_positive_gt_boxes,\n hard_positive_jitter_boxes)\n else:\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if cfg.EXTRACT_GT_FEATURES:\n # get boxes and features for each of the three cascade stages!\n b0 = proposals.boxes\n b1, b2, _ = fastrcnn_head._cascade_boxes\n f0 = multilevel_roi_align(features[:4], b0, 7)\n f1 = multilevel_roi_align(features[:4], b1, 7)\n f2 = multilevel_roi_align(features[:4], b2, 7)\n tf.concat([b0, b1, b2], axis=0, name=\"boxes_for_extraction\")\n tf.concat([f0, f1, f2], axis=0, name=\"features_for_extraction\")\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n\n if cfg.MEASURE_IOU_DURING_TRAINING:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output_train')\n # if predictions are empty, this might break...\n # to prevent, stack dummy box\n boxes_for_iou = tf.concat([final_boxes[:1], tf.constant([[0.0, 0.0, 1.0, 1.0]],\n dtype=tf.float32)], axis=0)\n from examples.FasterRCNN.utils.box_ops import pairwise_iou\n iou_at_1 = tf.identity(pairwise_iou(gt_boxes[:1], boxes_for_iou)[0, 0], name=\"train_iou_at_1\")\n add_moving_summary(iou_at_1)\n\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n image = self.preprocess(inputs['image']) # 1CHW\n\n fpn_features, backbone_features = self.backbone(image)\n\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n ref_features = None\n ref_box = None\n else:\n ref_image = self.preprocess(inputs['ref_image']) # 1CHW\n ref_box = inputs['ref_box']\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n ref_features, _ = self.backbone(ref_image)\n\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n if cfg.EXTRACT_GT_FEATURES:\n anchor_inputs[\"roi_boxes\"] = inputs[\"roi_boxes\"]\n proposals, rpn_losses = self.rpn(image, fpn_features, anchor_inputs) # inputs?\n\n second_stage_features = fpn_features\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n\n hard_negative_features = None\n hard_positive_features = None\n hard_positive_ious = None\n hard_positive_gt_boxes = None\n hard_positive_jitter_boxes = None\n if cfg.MODE_HARD_MINING:\n hard_negative_features = inputs['hard_negative_features']\n if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:\n hard_positive_features = inputs['hard_positive_features']\n hard_positive_ious = inputs['hard_positive_ious']\n hard_positive_gt_boxes = inputs['hard_positive_gt_boxes']\n hard_positive_jitter_boxes = inputs['hard_positive_jitter_boxes']\n\n precomputed_ref_features = None\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n precomputed_ref_features = inputs['ref_features']\n\n # Extend proposals by previous frame detections\n if not self.training and cfg.MODE_THIRD_STAGE and cfg.EXTEND_PROPOSALS_BY_ACTIVE_TRACKLETS:\n proposal_boxes = proposals.boxes\n tracklet_boxes = inputs['active_tracklets_boxes']\n concat_boxes = tf.concat([proposal_boxes, tracklet_boxes], axis=0)\n proposals = BoxProposals(concat_boxes)\n\n head_losses = self.roi_heads(image, ref_features, ref_box, second_stage_features, proposals, targets,\n hard_negative_features, hard_positive_features, hard_positive_ious,\n hard_positive_gt_boxes, hard_positive_jitter_boxes,\n precomputed_ref_features=precomputed_ref_features)\n\n if cfg.MODE_THIRD_STAGE:\n self._run_third_stage(inputs, second_stage_features, tf.shape(image)[2:4])\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n def _run_third_stage(self, inputs, second_stage_features, image_hw):\n boxes, scores = get_tensors_by_names(['output/boxes', 'output/scores'])\n # let's fix (as in finalize) the boxes, so we can roi align only one time\n aligned_features_curr = multilevel_roi_align(second_stage_features[:4], boxes, 7)\n # these also need to be extracted!\n aligned_features_curr = tf.identity(aligned_features_curr, name='third_stage_features_out')\n\n ff_gt_tracklet_scores, _ = self._score_for_third_stage(ref_feats=inputs['ff_gt_tracklet_feat'][tf.newaxis],\n det_feats=aligned_features_curr)\n tf.identity(ff_gt_tracklet_scores, name='ff_gt_tracklet_scores')\n sparse_tracklet_scores, tracklet_score_indices = self._score_for_third_stage(\n ref_feats=inputs['active_tracklets_feats'], det_feats=aligned_features_curr,\n dense=False, ref_boxes=inputs['active_tracklets_boxes'], det_boxes=boxes, image_hw=image_hw,\n tracklet_distance_threshold=inputs['tracklet_distance_threshold'])\n tf.identity(sparse_tracklet_scores, name='sparse_tracklet_scores')\n tf.identity(tracklet_score_indices, name='tracklet_score_indices')\n\n def _score_for_third_stage(self, ref_feats, det_feats, dense=True, ref_boxes=None, det_boxes=None, image_hw=None,\n tracklet_distance_threshold=0.08):\n # build all pairs\n n_refs = tf.shape(ref_feats)[0]\n n_dets = tf.shape(det_feats)[0]\n active_tracklets_tiled = tf.tile(ref_feats[:, tf.newaxis], multiples=[1, n_dets, 1, 1, 1])\n dets_tiled = tf.tile(det_feats[tf.newaxis], multiples=[n_refs, 1, 1, 1, 1])\n concated = tf.concat([active_tracklets_tiled, dets_tiled], axis=2)\n\n if not dense:\n # use boxes to prune the connectivity\n assert ref_boxes is not None\n assert det_boxes is not None\n assert image_hw is not None\n\n def xyxy_to_cxcywh(boxes_xyxy):\n wh = boxes_xyxy[:, 2:] - boxes_xyxy[:, :2]\n c = boxes_xyxy[:, :2] + wh / 2\n boxes_cwh = tf.concat((c, wh), axis=1)\n return boxes_cwh\n\n active_tracklets_boxes_cxcywh = xyxy_to_cxcywh(ref_boxes)\n boxes_cxcywh = xyxy_to_cxcywh(det_boxes)\n # normalize by image size\n h = image_hw[0]\n w = image_hw[1]\n norm = tf.cast(tf.stack([w, h, w, h], axis=0), tf.float32)\n diffs = tf.abs(active_tracklets_boxes_cxcywh[:, tf.newaxis] - boxes_cxcywh[tf.newaxis]) / norm[\n tf.newaxis, tf.newaxis]\n\n # use distances of boxes, first frame scores (\"scores\") to prune\n thresholds = tf.stack([tracklet_distance_threshold] * 4, axis=0)\n keep_mask = tf.reduce_all(diffs < thresholds, axis=2)\n\n indices = tf.where(keep_mask)\n flattened = tf.boolean_mask(concated, keep_mask)\n else:\n indices = None\n flattened = tf.reshape(\n concated, [tf.shape(concated)[0] * tf.shape(concated)[1]] + [int(x) for x in concated.shape[2:]])\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if cfg.MODE_SHARED_CONV_REDUCE:\n scope = tf.get_variable_scope()\n else:\n scope = \"\"\n all_posteriors = []\n # do this for every cascade stage\n for idx in range(3):\n with tf.variable_scope('cascade_rcnn_stage{}'.format(idx + 1), reuse=True):\n with argscope(Conv2D, data_format='channels_first'):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n reduced_features = Conv2D('conv_reduce', flattened, 256, 1, activation=None)\n head_feats = fastrcnn_head_func('head', reduced_features)\n with tf.variable_scope('outputs_new', reuse=True):\n classification = FullyConnected('class', head_feats, 2)\n posteriors = tf.nn.softmax(classification)\n all_posteriors.append(posteriors)\n posteriors = (all_posteriors[0] + all_posteriors[1] + all_posteriors[2]) / tf.constant(3.0, dtype=tf.float32)\n scores = posteriors[:, 1]\n return scores, indices\n\n def get_inference_tensor_names(self):\n inp, out = super().get_inference_tensor_names()\n if cfg.USE_PRECOMPUTED_REF_FEATURES:\n inp.append('ref_features')\n else:\n inp.append('ref_image')\n inp.append('ref_box')\n if cfg.MODE_THIRD_STAGE:\n inp.append('ff_gt_tracklet_feat')\n inp.append('active_tracklets_feats')\n inp.append('active_tracklets_boxes')\n inp.append('tracklet_distance_threshold')\n return inp, out\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')\n parser.add_argument('--logdir', help='log directory', default='train_log/siamrcnn')\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n if get_tf_version_tuple() < (1, 6):\n # https://github.com/tensorflow/tensorflow/issues/14657\n logger.warn(\"TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.\")\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n\n MODEL = ResNetFPNTrackModel()\n DetectionDataset() # initialize the config with information from our dataset\n\n is_horovod = cfg.TRAINER == 'horovod'\n if is_horovod:\n hvd.init()\n logger.info(\"Horovod Rank={}, Size={}\".format(hvd.rank(), hvd.size()))\n\n if not is_horovod or hvd.rank() == 0:\n # keep the old log folder if already existing! (before it would just delete it)\n logger.set_logger_dir(args.logdir, 'k')\n # logger.set_logger_dir(args.logdir, 'd')\n\n finalize_configs(is_training=True)\n stepnum = cfg.TRAIN.STEPS_PER_EPOCH\n\n # warmup is step based, lr is epoch based\n init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)\n warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]\n warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum\n lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]\n\n factor = 8. / cfg.TRAIN.NUM_GPUS\n for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):\n mult = 0.1 ** (idx + 1)\n lr_schedule.append(\n (steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))\n logger.info(\"Warm Up Schedule (steps, value): \" + str(warmup_schedule))\n logger.info(\"LR Schedule (epochs, value): \" + str(lr_schedule))\n train_dataflow = get_train_dataflow()\n # This is what's commonly referred to as \"epochs\"\n total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()\n logger.info(\"Total passes of the training set is: {:.5g}\".format(total_passes))\n\n callbacks = [\n PeriodicCallback(\n ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),\n # every_k_epochs=1),\n every_k_epochs=20),\n # linear warmup\n ScheduledHyperParamSetter(\n 'learning_rate', warmup_schedule, interp='linear', step_based=True),\n ScheduledHyperParamSetter('learning_rate', lr_schedule),\n PeakMemoryTracker(),\n EstimatedTimeLeft(median=True),\n SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout\n ] + [\n EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)\n for dataset in cfg.DATA.VAL\n ]\n if not is_horovod:\n callbacks.append(GPUUtilizationTracker())\n\n start_epoch = cfg.TRAIN.STARTING_EPOCH\n if is_horovod and hvd.rank() > 0:\n session_init = None\n else:\n # first try to find existing model\n checkpoint_path = os.path.join(args.logdir, \"checkpoint\")\n if os.path.exists(checkpoint_path):\n session_init = get_model_loader(checkpoint_path)\n start_step = int(session_init.path.split(\"-\")[-1])\n start_epoch = start_step // stepnum\n logger.info(\n \"initializing from existing model, \" + session_init.path + \", starting from epoch \" + str(start_epoch))\n else:\n if args.load:\n session_init = get_model_loader(args.load)\n else:\n session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None\n\n max_epoch = min(cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum, cfg.TRAIN.MAX_NUM_EPOCHS)\n\n traincfg = TrainConfig(\n model=MODEL,\n data=QueueInput(train_dataflow),\n callbacks=callbacks,\n steps_per_epoch=stepnum,\n # max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,\n max_epoch=max_epoch,\n session_init=session_init,\n starting_epoch=start_epoch\n )\n if is_horovod:\n trainer = HorovodTrainer(average=False)\n else:\n # nccl mode appears faster than cpu mode\n trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')\n launch_train_with_config(traincfg, trainer)\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.where",
"tensorflow.add_n",
"tensorflow.summary.scalar",
"tensorflow.boolean_mask",
"tensorflow.squeeze",
"tensorflow.train.MomentumOptimizer",
"tensorflow.tile",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.size",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.reduce_all",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
yxfish13/plan_enumerator | [
"e081b4c6eb3b373c4b8d97fdb88c5c4de9c77ba3"
] | [
"TreeLSTM.py"
] | [
"# Copyright 2018-2021 Xiang Yu(x-yu17(at)mails.tsinghua.edu.cn)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport torch\nfrom torch.nn import init\nimport torchfold\nimport torch.nn as nn\nfrom ImportantConfig import Config\n\nconfig = Config()\n\nclass TreeLSTM(nn.Module):\n def __init__(self, num_units):\n super(TreeLSTM, self).__init__()\n self.num_units = num_units\n self.FC1 = nn.Linear(num_units, 5 * num_units)\n self.FC2 = nn.Linear(num_units, 5 * num_units)\n self.FC0 = nn.Linear(num_units, 5 * num_units)\n self.LNh = nn.LayerNorm(num_units,elementwise_affine = False)\n self.LNc = nn.LayerNorm(num_units,elementwise_affine = False)\n def forward(self, left_in, right_in,inputX):\n lstm_in = self.FC1(left_in[0])\n lstm_in += self.FC2(right_in[0])\n lstm_in += self.FC0(inputX)\n a, i, f1, f2, o = lstm_in.chunk(5, 1)\n c = (a.tanh() * i.sigmoid() + f1.sigmoid() * left_in[1] +\n f2.sigmoid() * right_in[1])\n h = o.sigmoid() * c.tanh()\n h = self.LNh(h)\n return h,c\nclass TreeRoot(nn.Module):\n def __init__(self,num_units):\n super(TreeRoot, self).__init__()\n self.num_units = num_units\n self.FC = nn.Linear(num_units, num_units)\n if config.rootPool == 'meanPool':\n self.sum_pooling = nn.AdaptiveAvgPool2d((1,num_units))\n else:\n self.sum_pooling = nn.AdaptiveMaxPool2d((1,num_units))\n \n # self.sum_pooling = nn.AdaptiveMaxPool2d((1,num_units))\n # self.max_pooling = nn.AdaptiveAvgPool2d((1,num_units))\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n def forward(self, tree_list):\n\n return self.relu(self.FC(self.sum_pooling(tree_list)).view(-1,self.num_units))\n\nclass SPINN(nn.Module):\n\n def __init__(self, n_classes, size, n_words, mask_size,device,max_column_in_table = 15):\n super(SPINN, self).__init__()\n self.size = size\n self.tree_lstm = TreeLSTM(size)\n self.tree_root = TreeRoot(size)\n self.FC = nn.Linear(size*2, size)\n self.table_embeddings = nn.Embedding(n_words, size)#2 * max_column_in_table * size)\n self.column_embeddings = nn.Embedding(n_words, (1+2 * max_column_in_table) * size)\n self.out = nn.Linear(size*2, size)\n self.out2 = nn.Linear(size, n_classes)\n self.outFc = nn.Linear(mask_size, size)\n\n if config.rootPool == 'meanPool':\n self.max_pooling = nn.AdaptiveAvgPool2d((1,size))\n else:\n self.max_pooling = nn.AdaptiveMaxPool2d((1,size))\n self.max_pooling = nn.AdaptiveMaxPool2d((1,size))\n self.relu = nn.ReLU()\n self.sigmoid = nn.ReLU()\n self.leafFC = nn.Linear(size, size)\n self.sigmoid = nn.Sigmoid()\n self.LN1 = nn.LayerNorm(size,)\n self.LN2 = nn.LayerNorm(size,)\n self.max_column_in_table = max_column_in_table\n self.leafLn = nn.LayerNorm(size,elementwise_affine = False)\n self.device = device\n self.sigmoid = nn.Sigmoid()\n\n def leaf(self, word_id, table_fea=None):\n # print('tlstm_wi',word_id)\n all_columns = table_fea.view(-1,self.max_column_in_table*2+1,1)*self.column_embeddings(word_id).reshape(-1,2 * self.max_column_in_table+1,self.size)\n all_columns = self.relu(self.leafFC(all_columns))\n table_emb = self.max_pooling(all_columns.view(-1,self.max_column_in_table*2+1,self.size)).view(-1,self.size)\n return self.leafLn(table_emb), torch.zeros(word_id.size()[0], self.size,device = self.device,dtype = torch.float32)\n def inputX(self,left_emb,right_emb):\n cat_emb = torch.cat([left_emb,right_emb],dim = 1)\n return self.relu(self.FC(cat_emb))\n def childrenNode(self, left_h, left_c, right_h, right_c,inputX):\n return self.tree_lstm((left_h, left_c), (right_h, right_c),inputX)\n def root(self,tree_list):\n return self.tree_root(tree_list).view(-1,self.size)\n def logits(self, encoding,join_matrix,prt=False):\n encoding = self.root(encoding.view(1,-1,self.size))\n # if prt:\n # print(encoding)\n matrix = self.relu(self.outFc(join_matrix))\n # outencoding = torch.cat([encoding,encoding],dim = 1)\n outencoding = torch.cat([encoding,matrix],dim = 1)\n return self.out2(self.relu(self.out(outencoding)))"
] | [
[
"torch.nn.AdaptiveMaxPool2d",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.Sigmoid",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChristianOrr/Real-time-self-adaptive-deep-stereo | [
"29bbfb212ff7a62769d39f0fe15ecb2f408ac535",
"29bbfb212ff7a62769d39f0fe15ecb2f408ac535"
] | [
"custom_models_functional.py",
"losses_and_metrics.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom keras.engine import data_adapter\nfrom matplotlib import cm\n\n\ndef colorize_img(value, vmin=None, vmax=None, cmap='jet'):\n \"\"\"\n A utility function for TensorFlow that maps a grayscale image to a matplotlib colormap for use with TensorBoard image summaries.\n By default it will normalize the input value to the range 0..1 before mapping to a grayscale colormap.\n Arguments:\n - value: 4D Tensor of shape [batch_size,height, width,1]\n - vmin: the minimum value of the range used for normalization. (Default: value minimum)\n - vmax: the maximum value of the range used for normalization. (Default: value maximum)\n - cmap: a valid cmap named for use with matplotlib's 'get_cmap'.(Default: 'gray')\n \n Returns a 3D tensor of shape [batch_size,height, width,3].\n \"\"\"\n # Uncomment the code below if disparity isnt normalised already\n # # normalize\n # vmin = tf.reduce_min(value) if vmin is None else vmin\n # vmax = tf.reduce_max(value) if vmax is None else vmax\n # value = (value - vmin) / (vmax - vmin) # vmin..vmax\n\n # quantize\n indices = tf.cast(tf.round(value[:,:,:,0]*255), dtype=tf.int32)\n\n # gather\n color_map = cm.get_cmap(cmap)\n colors = color_map(np.arange(256))[:,:3]\n colors = tf.constant(colors, dtype=tf.float32)\n value = tf.gather(colors, indices)\n return value\n\n\n# https://github.com/philferriere/tfoptflow/blob/bdc7a72e78008d1cd6db46e4667dffc2bab1fe9e/tfoptflow/core_costvol.py\ndef StereoCostVolume(name=\"cost_volume\", search_range=2):\n \"\"\"Build cost volume for associating a pixel from the left image with its corresponding pixels in the right image.\n Args:\n c1: Level of the feature pyramid of the left image\n warp: Warped level of the feature pyramid of the right image\n search_range: Search range (maximum displacement)\n \"\"\"\n def _block(inputs):\n\n def internal_fn(inputs):\n c1, warp = inputs\n padded_lvl = tf.pad(warp, [[0, 0], [0, 0], [search_range, search_range], [0, 0]])\n width = c1.shape.as_list()[2]\n max_offset = search_range * 2 + 1\n\n cost_vol = []\n for i in range(0, max_offset):\n slice = padded_lvl[:, :, i:width+i, :]\n cost = tf.reduce_mean(c1 * slice, axis=3, keepdims=True)\n cost_vol.append(cost)\n\n cost_vol = tf.concat(cost_vol, axis=3)\n cost_curve = tf.concat([c1, cost_vol], axis=3)\n\n return cost_curve\n\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # keras_output = internal_fn(inputs)\n #\n # cost_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # cost_model.shape = keras_output.shape\n #\n # return cost_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\ndef BuildIndices(name=\"build_indices\", batch_size=1):\n \"\"\"\n Given a flow or disparity generate the coordinates\n of source pixels to sample from [batch, height_t, width_t, 2]\n Args:\n coords: Generic optical flow or disparity\n Returns:\n coordinates to sample from.\n\n \"\"\"\n def _block(coords):\n\n def internal_fn(coords):\n _, height, width, _ = coords.get_shape().as_list()\n\n pixel_coords = np.ones((1, height, width, 2), dtype=np.float32)\n batches_coords = np.ones((batch_size, height, width, 1), dtype=np.float32)\n\n for i in range(0, batch_size):\n batches_coords[i][:][:][:] = i\n # build pixel coordinates and their disparity\n for i in range(0, height):\n for j in range(0, width):\n pixel_coords[0][i][j][0] = j\n pixel_coords[0][i][j][1] = i\n\n pixel_coords = tf.constant(pixel_coords, tf.float32)\n output = tf.concat([batches_coords, pixel_coords + coords], -1)\n return output\n\n # keras_inputs = tf.keras.layers.Input(shape=coords.shape[1:])\n # keras_output = internal_fn(keras_inputs)\n #\n # indices_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # indices_model.shape = keras_output.shape\n #\n # return indices_model(coords)\n return internal_fn(coords)\n\n return _block\n\n\ndef Warp(name=\"warp\"):\n \"\"\"\n Construct a new image by bilinear sampling from the input image.\n The right image is warpt into the lefts position.\n Points falling outside the source image boundary have value 0.\n Args:\n imgs: source right images to be sampled from [batch, height_s, width_s, channels]\n coords: coordinates of source pixels to sample from [batch, height_t, width_t, 2]. \n height_t/width_t correspond to the dimensions of the outputimage (don't need to be the same as height_s/width_s). \n The two channels correspond to x and y coordinates respectively.\n Returns:\n A new sampled image [batch, height_t, width_t, channels],\n which ideally is very similar to the left image\n \"\"\"\n\n def _block(inputs):\n def internal_fn(inputs):\n imgs, coords = inputs\n coord_b, coords_x, coords_y = tf.split(coords, [1, 1, 1], axis=3)\n\n coords_x = tf.cast(coords_x, 'float32')\n coords_y = tf.cast(coords_y, 'float32')\n\n x0 = tf.floor(coords_x)\n x1 = x0 + 1\n y0 = tf.floor(coords_y)\n\n y_max = tf.cast(tf.shape(imgs)[1] - 1, 'float32')\n x_max = tf.cast(tf.shape(imgs)[2] - 1, 'float32')\n zero = tf.zeros([1],dtype=tf.float32)\n\n x0_safe = tf.clip_by_value(x0, zero[0], x_max)\n y0_safe = tf.clip_by_value(y0, zero[0], y_max)\n x1_safe = tf.clip_by_value(x1, zero[0], x_max)\n\n # bilinear interp weights, with points outside the grid having weight 0\n wt_x0 = (x1 - coords_x) * tf.cast(tf.equal(x0, x0_safe), 'float32')\n wt_x1 = (coords_x - x0) * tf.cast(tf.equal(x1, x1_safe), 'float32')\n\n\n im00 = tf.cast(tf.gather_nd(imgs, tf.cast(\n tf.concat([coord_b, y0_safe, x0_safe], -1), 'int32')), 'float32')\n im01 = tf.cast(tf.gather_nd(imgs, tf.cast(\n tf.concat([coord_b, y0_safe, x1_safe], -1), 'int32')), 'float32')\n\n output = tf.add_n([\n wt_x0 * im00, wt_x1 * im01\n ])\n return output\n\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # keras_output = internal_fn(keras_inputs)\n #\n # warp_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # warp_model.shape = keras_output.shape\n #\n # return warp_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\ndef StereoContextNetwork(name=\"residual_refinement_network\", batch_size=1, output_height=320, output_width=1216):\n \"\"\"\n Final Layer in MADNet.\n Calculates the reprojection loss if training=True.\n Args:\n input: left_F2 tensor\n disp: D2 disparity from M2 module\n final_left: full resolution RGB left image\n final_right: full resolution RGB right image\n Returns:\n Full resolution disparity in float32 normalized 0-1\n \"\"\"\n act = tf.keras.layers.Activation(tf.nn.leaky_relu)\n context1 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), dilation_rate=1, padding=\"same\", activation=act, use_bias=True, name=\"context1\")\n context2 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), dilation_rate=2, padding=\"same\", activation=act, use_bias=True, name=\"context2\")\n context3 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), dilation_rate=4, padding=\"same\", activation=act, use_bias=True, name=\"context3\")\n context4 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), dilation_rate=8, padding=\"same\", activation=act, use_bias=True, name=\"context4\")\n context5 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), dilation_rate=16, padding=\"same\", activation=act, use_bias=True, name=\"context5\")\n context6 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), dilation_rate=1, padding=\"same\", activation=act, use_bias=True, name=\"context6\")\n context7 = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), dilation_rate=1, padding=\"same\", activation=\"linear\", use_bias=True, name=\"context7\")\n add = tf.keras.layers.Add(name=\"context_disp\")\n concat = tf.keras.layers.Concatenate(axis=-1)\n\n def _block(inputs):\n def internal_fn(inputs):\n input, disp = inputs\n #volume = concat([input, disp])\n volume = tf.keras.layers.concatenate([input, disp], axis=-1)\n\n x = context1(volume)\n x = context2(x)\n x = context3(x)\n x = context4(x)\n x = context5(x)\n x = context6(x)\n x = context7(x)\n\n context_disp = add([disp, x])\n final_disparity = tf.keras.layers.Resizing(name=\"final_disparity\", height=output_height, width=output_width, interpolation='bilinear')(context_disp)\n\n return final_disparity\n\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # keras_output = internal_fn(keras_inputs)\n #\n # refinement_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # refinement_model.shape = keras_output.shape\n # return refinement_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\ndef StereoEstimator(name=\"volume_filtering\"):\n \"\"\"\n This is the stereo estimation network at resolution n.\n It uses the costs (from the pixel difference between the warped right image \n and the left image) combined with the upsampled disparity from the previous\n layer (when the layer is not the last layer).\n\n The output is predicted disparity for the network at resolution n.\n \"\"\"\n act = tf.keras.layers.Activation(tf.nn.leaky_relu)\n disp1 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp1\")\n disp2 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp2\")\n disp3 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp3\")\n disp4 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp4\")\n disp5 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"disp5\")\n disp6 = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), strides=1, padding=\"same\", activation=\"linear\", use_bias=True, name=\"disp6\")\n concat = tf.keras.layers.Concatenate(axis=-1)\n\n def _block(inputs):\n def internal_fn(inputs):\n if type(inputs) is list:\n costs, upsampled_disp = inputs\n # volume = concat([costs, upsampled_disp])\n volume = tf.keras.layers.concatenate([costs, upsampled_disp], axis=-1)\n else:\n volume = inputs\n\n x = disp1(volume)\n x = disp2(x)\n x = disp3(x)\n x = disp4(x)\n x = disp5(x)\n x = disp6(x)\n return x\n\n # if type(inputs) is list:\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # else:\n # keras_inputs = tf.keras.layers.Input(shape=inputs.shape[1:])\n #\n # keras_output = internal_fn(keras_inputs)\n #\n # estimator_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # estimator_model.shape = keras_output.shape\n # return estimator_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\ndef ModuleM(name, layer, search_range=2, batch_size=1):\n \"\"\"\n Module MX is a sub-module of MADNet, which can be trained individually for \n online adaptation using the MAD (Modular ADaptaion) method.\n \"\"\"\n cost_volume = StereoCostVolume(name=f\"cost_{layer}\", search_range=search_range)\n stereo_estimator = StereoEstimator(name=f\"volume_filtering_{layer}\")\n build_indices = BuildIndices(name=f\"build_indices_{layer}\", batch_size=batch_size)\n warp = Warp(name=f\"warp_{layer}\")\n\n def _block(inputs):\n def internal_fn(inputs):\n # Check if layer is the bottom of the pyramid\n if len(inputs) == 3:\n left, right, prev_disp = inputs\n # Upsample disparity from previous layer\n upsampled_disp = tf.keras.layers.Resizing(name=f\"upsampled_disp_{layer}\", height=height, width=width, interpolation='bilinear')(prev_disp)\n coords = tf.keras.layers.concatenate([upsampled_disp, tf.zeros_like(upsampled_disp)], -1)\n indices = build_indices(coords)\n # Warp the right image into the left using upsampled disparity\n warped_left = warp([right, indices])\n else:\n left, right = inputs\n # No previous disparity exits, so use right image instead of warped left\n warped_left = right\n\n costs = cost_volume([left, warped_left])\n\n # Get the disparity using cost volume between left and warped left images\n if len(inputs) == 3:\n module_disparity = stereo_estimator([costs, prev_disp])\n else:\n module_disparity = stereo_estimator(costs)\n\n return module_disparity\n\n # if len(inputs) == 3:\n # keras_inputs = [tf.keras.layers.Input(shape=input.shape[1:]) for input in inputs]\n # else:\n # keras_inputs = [tf.keras.layers.Input(shape=inputs[0].shape[1:]), tf.keras.layers.Input(shape=inputs[1].shape[1:])]\n # keras_output = internal_fn(keras_inputs)\n #\n # module_model = tf.keras.Model(inputs=keras_inputs, outputs=keras_output, name=name)\n # module_model.shape = keras_output.shape\n # return module_model(inputs)\n return internal_fn(inputs)\n\n return _block\n\n\n\n\nheight = 320\nwidth = 1216\nsearch_range = 2\nbatch_size = 1\n\n# Initializing the layers\nact = tf.keras.layers.Activation(tf.nn.leaky_relu)\n# Left image feature pyramid (feature extractor)\n# F1\nleft_conv1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv1\", \ninput_shape=(height, width, 3, ))\nleft_conv2 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv2\")\n# F2\nleft_conv3 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv3\")\nleft_conv4 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv4\")\n# F3\nleft_conv5 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv5\")\nleft_conv6 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv6\")\n# F4\nleft_conv7 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv7\")\nleft_conv8 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv8\")\n# F5\nleft_conv9 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv9\")\nleft_conv10 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv10\")\n# F6\nleft_conv11 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"left_conv11\")\nleft_conv12 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"left_conv12\") \n# Right image feature pyramid (feature extractor)\n# F1\nright_conv1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv1\", \ninput_shape=(height, width, 3, ))\nright_conv2 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv2\")\n# F2\nright_conv3 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv3\")\nright_conv4 = tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv4\")\n# F3\nright_conv5 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv5\")\nright_conv6 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv6\")\n# F4\nright_conv7 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv7\")\nright_conv8 = tf.keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv8\")\n# F5\nright_conv9 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv9\")\nright_conv10 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv10\")\n# F6\nright_conv11 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=2, padding=\"same\", activation=act, use_bias=True, name=\"right_conv11\")\nright_conv12 = tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), strides=1, padding=\"same\", activation=act, use_bias=True, name=\"right_conv12\")\n\n#############################SCALE 6#################################\nM6 = ModuleM(name=\"M6\", layer=\"6\", search_range=search_range, batch_size=batch_size)\n############################SCALE 5###################################\nM5 = ModuleM(name=\"M5\", layer=\"5\", search_range=search_range, batch_size=batch_size)\n############################SCALE 4###################################\nM4 = ModuleM(name=\"M4\", layer=\"4\", search_range=search_range, batch_size=batch_size)\n############################SCALE 3###################################\nM3 = ModuleM(name=\"M3\", layer=\"3\", search_range=search_range, batch_size=batch_size)\n############################SCALE 2###################################\nM2 = ModuleM(name=\"M2\", layer=\"2\", search_range=search_range, batch_size=batch_size)\n############################REFINEMENT################################\nrefinement_module = StereoContextNetwork(batch_size=batch_size, output_height=height, output_width=width)\n\n\n# Build the model\n# Left and right image inputs\nleft_input = tf.keras.layers.Input(shape=[height, width, 3])\nright_input = tf.keras.layers.Input(shape=[height, width, 3])\n\n#######################PYRAMID FEATURES###############################\n# Left image feature pyramid (feature extractor)\n# F1\nleft_pyramid = left_conv1(left_input)\nleft_F1 = left_conv2(left_pyramid)\n# F2\nleft_pyramid = left_conv3(left_F1)\nleft_F2 = left_conv4(left_pyramid)\n# F3\nleft_pyramid = left_conv5(left_F2)\nleft_F3 = left_conv6(left_pyramid)\n# F4\nleft_pyramid = left_conv7(left_F3)\nleft_F4 = left_conv8(left_pyramid)\n# F5\nleft_pyramid = left_conv9(left_F4)\nleft_F5 = left_conv10(left_pyramid)\n# F6\nleft_pyramid = left_conv11(left_F5)\nleft_F6 = left_conv12(left_pyramid)\n\n# Right image feature pyramid (feature extractor)\n# F1\nright_pyramid = right_conv1(right_input)\nright_F1 = right_conv2(right_pyramid)\n# F2\nright_pyramid = right_conv3(right_F1)\nright_F2 = right_conv4(right_pyramid)\n# F3\nright_pyramid = right_conv5(right_F2)\nright_F3 = right_conv6(right_pyramid)\n# F4\nright_pyramid = right_conv7(right_F3)\nright_F4 = right_conv8(right_pyramid)\n# F5\nright_pyramid = right_conv9(right_F4)\nright_F5 = right_conv10(right_pyramid)\n# F6\nright_pyramid = right_conv11(right_F5)\nright_F6 = right_conv12(right_pyramid)\n\n\n#############################SCALE 6#################################\nD6 = M6([left_F6, right_F6])\n############################SCALE 5###################################\nD5 = M5([left_F5, right_F5, D6])\n############################SCALE 4###################################\nD4 = M4([left_F4, right_F4, D5])\n############################SCALE 3###################################\nD3 = M3([left_F3, right_F3, D4])\n############################SCALE 2###################################\nD2 = M2([left_F2, right_F2, D3])\n############################REFINEMENT################################\nfinal_disparity = refinement_module([left_F2, D2])\n\n\nmodel = tf.keras.Model(inputs={\"left_input\": left_input, \"right_input\": right_input}, outputs=final_disparity, name=\"MADNet\")\nmodel.summary()\n",
"import tensorflow as tf\n\n\n#---------------Metrics-------------------\nclass EndPointError(tf.keras.metrics.Metric):\n \"\"\"\n End point error metric.\n Calculates the average absolute difference \n between pixels in predicted disparity \n and groundtruth.\n \n \"\"\"\n def __init__(self, name=\"EPE\", **kwargs):\n super(EndPointError, self).__init__(name=name, **kwargs)\n self.end_point_error = self.add_weight(name='EPE', initializer='zeros')\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n # Remove normalisation\n y_true *= 256\n y_pred *= 256\n abs_errors = tf.abs(y_pred - y_true)\n # Valid map has all non-zero pixels set to 1 and 0 pixels remain 0\n valid_map = tf.where(tf.equal(y_true, 0), tf.zeros_like(y_true, dtype=tf.float32), tf.ones_like(y_true, dtype=tf.float32))\n # Remove the errors with 0 groundtruth disparity\n filtered_error = abs_errors * valid_map\n # Get the mean error (non-zero groundtruth pixels)\n self.end_point_error.assign_add(tf.reduce_sum(filtered_error) / tf.reduce_sum(valid_map))\n\n def result(self):\n return self.end_point_error\n\n def reset_state(self):\n # The state of the metric will be reset at the start of each epoch.\n self.end_point_error.assign(0.0)\n\n\nclass Bad3(tf.keras.metrics.Metric):\n \"\"\"\n Bad3 also called D1-all is the percentage\n of pixels with disparity difference >= 3\n between predicted disparity and groundtruth.\n \n \"\"\"\n def __init__(self, name=\"Bad3(%)\", **kwargs):\n super(Bad3, self).__init__(name=name, **kwargs)\n self.pixel_threshold = 3\n self.bad3 = self.add_weight(name='bad3_percent', initializer='zeros')\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n # Remove normalisation\n y_true *= 256\n y_pred *= 256\n abs_errors = tf.abs(y_pred - y_true)\n # Valid map has all non-zero pixels set to 1 and 0 pixels remain 0\n valid_map = tf.where(tf.equal(y_true, 0), tf.zeros_like(y_true, dtype=tf.float32), tf.ones_like(y_true, dtype=tf.float32))\n # Remove the errors with 0 groundtruth disparity\n filtered_error = abs_errors * valid_map\n # 1 assigned to all errors greater than threshold, 0 to the rest\n bad_pixel_abs = tf.where(tf.greater(filtered_error, self.pixel_threshold), tf.ones_like(filtered_error, dtype=tf.float32), tf.zeros_like(filtered_error, dtype=tf.float32))\n # (number of errors greater than threshold) / (number of errors) \n self.bad3.assign_add(tf.reduce_sum(bad_pixel_abs) / tf.reduce_sum(valid_map) * 100)\n\n def result(self):\n return self.bad3\n\n def reset_state(self):\n # The state of the metric will be reset at the start of each epoch.\n self.bad3.assign(0.0)\n\n\ndef calculate_metrics(y_true, y_pred, pixel_threshold):\n \"\"\"\n Calculates all metrics and returns them in a dictionary.\n Used inside train_step.\n \"\"\"\n # Remove normalisation\n y_true *= 256\n y_pred *= 256\n abs_errors = tf.abs(y_pred - y_true)\n # Valid map has all non-zero pixels set to 1 and 0 pixels remain 0\n valid_map = tf.where(tf.equal(y_true, 0), tf.zeros_like(y_true, dtype=tf.float32), tf.ones_like(y_true, dtype=tf.float32))\n # Remove the errors with 0 groundtruth disparity\n filtered_error = abs_errors * valid_map\n # Get the mean error (non-zero groundtruth pixels)\n end_point_error = tf.reduce_sum(filtered_error) / tf.reduce_sum(valid_map) \n # 1 assigned to all errors greater than threshold, 0 to the rest\n bad_pixel_abs = tf.where(tf.greater(filtered_error, pixel_threshold), tf.ones_like(filtered_error, dtype=tf.float32), tf.zeros_like(filtered_error, dtype=tf.float32))\n # (number of errors greater than threshold) / (number of errors) \n bad3 = tf.reduce_sum(bad_pixel_abs) / tf.reduce_sum(valid_map) * 100\n return {\"EPE\": end_point_error, \"Bad3(%)\": bad3}\n\n\n#---------------Losses-------------------\nclass SSIMLoss(tf.keras.losses.Loss):\n \"\"\"\n SSIM dissimilarity measure\n Used for self-supervised training\n Args:\n y_true: target image\n y_pred: predicted image\n \"\"\"\n def __init__(self, name=\"mean_SSIM_l1\"):\n super(SSIMLoss, self).__init__(name=name)\n self.pool = tf.keras.layers.AveragePooling2D(pool_size=(3,3) ,strides=(1,1), padding='valid')\n self.reduction = tf.keras.losses.Reduction.SUM\n\n def call(self, y_true, y_pred):\n C1 = 0.01**2\n C2 = 0.03**2\n mu_x = self.pool(y_true)\n mu_y = self.pool(y_pred)\n\n sigma_x = self.pool(y_true**2) - mu_x**2\n sigma_y = self.pool(y_pred**2) - mu_y**2\n sigma_xy = self.pool(y_true*y_pred) - mu_x * mu_y\n\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n\n SSIM = SSIM_n / SSIM_d\n SSIM = tf.clip_by_value((1-SSIM)/2, 0 ,1)\n\n mean_SSIM = tf.reduce_mean(SSIM)\n\n sum_l1 = tf.reduce_sum(tf.abs(y_true - y_pred))\n\n return 0.85 * mean_SSIM + 0.15 * sum_l1\n\n\nclass ReconstructionLoss(tf.keras.losses.Loss):\n \"\"\"\n Reconstruction loss function (mean l1)\n Per pixel absolute error between groundtruth \n disparity and predicted disparity\n Used for supervised training\n Args:\n y_true: target image\n y_pred: predicted image\n \"\"\"\n def __init__(self, name=\"mean_l1\"):\n super(ReconstructionLoss, self).__init__(name=name)\n self.reduction = tf.keras.losses.Reduction.SUM\n\n def call(self, y_true, y_pred):\n return tf.reduce_sum(tf.abs(y_true-y_pred))\n"
] | [
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.pad",
"tensorflow.add_n",
"tensorflow.keras.layers.Concatenate",
"numpy.arange",
"tensorflow.keras.layers.Conv2D",
"tensorflow.floor",
"tensorflow.gather",
"tensorflow.keras.layers.Add",
"tensorflow.shape",
"tensorflow.keras.Model",
"tensorflow.zeros_like",
"tensorflow.keras.layers.Resizing",
"tensorflow.split",
"tensorflow.round",
"tensorflow.clip_by_value",
"tensorflow.keras.layers.Activation",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.keras.layers.concatenate",
"numpy.ones",
"matplotlib.cm.get_cmap",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.clip_by_value",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.greater",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
antonevenepoel/open_spiel | [
"f2f0c786410018675fc40e9a5b82c40814555fa8",
"f2f0c786410018675fc40e9a5b82c40814555fa8"
] | [
".nox/tests/lib/python3.7/site-packages/nashpy/polytope/polytope.py",
"open_spiel/python/Project/part_2/project_plots_code/cfr+_adjustments_to_cfr_plot/cfr+_adjustments_to_cfr_plot_kuhn.py"
] | [
"\"\"\"A class for a normal form game\"\"\"\nfrom itertools import product\n\nimport numpy as np\nfrom scipy.optimize import linprog\nfrom scipy.spatial import HalfspaceIntersection\n\n\ndef build_halfspaces(M):\n \"\"\"\n Build a matrix representation for a halfspace corresponding to:\n\n Mx <= 1 and x >= 0\n\n This is of the form:\n\n [M: -1]\n [-1: 0]\n\n As specified in\n https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html\n\n Parameters\n ----------\n\n M: a numpy array\n\n Returns:\n --------\n\n Numpy array\n \"\"\"\n number_of_strategies, dimension = M.shape\n b = np.append(-np.ones(number_of_strategies), np.zeros(dimension))\n M = np.append(M, -np.eye(dimension), axis=0)\n halfspaces = np.column_stack((M, b.transpose()))\n return halfspaces\n\n\ndef find_feasible_point(halfspaces):\n \"\"\"\n Use linear programming to find a point inside the halfspaces (needed to\n define it).\n\n Code taken from scipy documentation:\n https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html\n\n Parameters\n ----------\n\n halfspaces: a matrix representation of halfspaces\n\n Returns:\n --------\n\n numpy array\n \"\"\"\n norm_vector = np.reshape(\n np.linalg.norm(halfspaces[:, :-1], axis=1), (halfspaces.shape[0], 1)\n )\n c = np.zeros((halfspaces.shape[1],))\n c[-1] = -1\n A = np.hstack((halfspaces[:, :-1], norm_vector))\n b = -halfspaces[:, -1:]\n res = linprog(c, A_ub=A, b_ub=b)\n return res.x[:-1]\n\n\ndef labels(vertex, halfspaces):\n \"\"\"\n Return the labels of the facets on which lie a given vertex. This is\n calculated by carrying out the matrix multiplictation.\n\n Parameters\n ----------\n\n vertex: a numpy array\n halfspaces: a numpy array\n\n Returns\n -------\n\n set\n \"\"\"\n b = halfspaces[:, -1]\n M = halfspaces[:, :-1]\n return set(np.where(np.isclose(np.dot(M, vertex), -b))[0])\n\n\ndef non_trivial_vertices(halfspaces):\n \"\"\"\n Returns all vertex, label pairs (ignoring the origin).\n\n Parameters:\n\n halfspaces: a numpy array\n\n Returns:\n\n generator\n \"\"\"\n feasible_point = find_feasible_point(halfspaces)\n hs = HalfspaceIntersection(halfspaces, feasible_point)\n hs.close()\n return ((v, labels(v, halfspaces)) for v in hs.intersections if max(v) > 0)\n",
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom open_spiel.python.project.part_2 import path_file\n\n########\n# PLOT #\n########\nplt.plot(\n np.loadtxt(\"kuhn_data/cfr_10k_iterations_ITER8.txt\"),\n [i*10**3 for i in np.loadtxt(\"kuhn_data/cfr_10k_iterations_EXPL8.txt\")],\n label=\"CFR\"\n)\nplt.plot(\n np.loadtxt(\"kuhn_data/cfr_10k_iterations_ITER2.txt\"),\n [i*10**3 for i in np.loadtxt(\"kuhn_data/cfr_10k_iterations_EXPL2.txt\")],\n label=\"CFR and alternating updates\"\n)\nplt.plot(\n np.loadtxt(\"kuhn_data/cfr_10k_iterations_ITER4.txt\"),\n [i*10**3 for i in np.loadtxt(\"kuhn_data/cfr_10k_iterations_EXPL4.txt\")],\n label=\"CFR and linear averaging\"\n)\nplt.plot(\n np.loadtxt(\"kuhn_data/cfr_10k_iterations_ITER3.txt\"),\n [i*10**3 for i in np.loadtxt(\"kuhn_data/cfr_10k_iterations_EXPL3.txt\")],\n label=\"CFR and regret matching+\"\n)\nplt.plot(\n np.loadtxt(\"kuhn_data/cfr_10k_iterations_ITER6.txt\"),\n [i*10**3 for i in np.loadtxt(\"kuhn_data/cfr_10k_iterations_EXPL6.txt\")],\n label=\"CFR+\"\n)\nplt.xlabel(\"Iterations\", fontweight=\"bold\")\nplt.ylabel(\"Exploitability (mbb/g)\", fontweight=\"bold\")\nplt.loglog()\nplt.legend()\nplt.title(\"Comparison of individual CFR+ adjustments to CFR in Kuhn poker\", fontweight=\"bold\")\nplt.savefig(path_file.path\n + \"project_plots_code/cfr+_adjustments_to_cfr_plot/\"\n + \"cfr+_adjustments_kuhn\"\n + path_file.plot_type,\n bbox_inches=\"tight\")\nplt.show()\n"
] | [
[
"numpy.hstack",
"numpy.dot",
"scipy.spatial.HalfspaceIntersection",
"numpy.eye",
"numpy.linalg.norm",
"scipy.optimize.linprog",
"numpy.ones",
"numpy.zeros"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amspector100/knockpy | [
"c4980ebd506c110473babd85836dbd8ae1d548b7"
] | [
"knockpy/kpytorch/deeppink.py"
] | [
"import warnings\nimport numpy as np\nimport scipy as sp\nfrom scipy import stats\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .. import utilities\n\n\ndef create_batches(features, y, batchsize):\n\n # Create random indices to reorder datapoints\n n = features.shape[0]\n p = features.shape[1]\n inds = torch.randperm(n)\n\n # Iterate through and create batches\n i = 0\n batches = []\n while i < n:\n batches.append([features[inds][i : i + batchsize], y[inds][i : i + batchsize]])\n i += batchsize\n return batches\n\n\nclass DeepPinkModel(nn.Module):\n def __init__(self, p, hidden_sizes=[64], y_dist=\"gaussian\", normalize_Z=True):\n \"\"\"\n Adapted from https://arxiv.org/pdf/1809.01185.pdf.\n\n The module has two components:\n 1. A sparse linear layer with dimension 2*p to p.\n However, there are only 2*p weights (each feature\n and knockoff points only to their own unique node).\n This is (maybe?) followed by a ReLU activation.\n 2. A multilayer perceptron (MLP)\n\n Parameters\n ----------\n p : int\n The dimensionality of the data\n hidden_sizes: list\n A list of hidden sizes for the mlp layer(s). \n Defaults to [64].\n normalize_Z : bool\n If True, the first sparse linear layer is normalized\n so the weights for each feature/knockoff pair have an\n l1 norm of 1. This can modestly improve power in some\n settings.\n \"\"\"\n\n super().__init__()\n\n # Initialize weight for first layer\n self.p = p\n self.y_dist = y_dist\n self.Z_weight = nn.Parameter(torch.ones(2 * p))\n self.norm_Z_weight = normalize_Z\n\n # Save indices/reverse indices to prevent violations of FDR control\n self.inds, self.rev_inds = utilities.random_permutation_inds(2 * p)\n self.feature_inds = self.rev_inds[0:self.p]\n self.ko_inds = self.rev_inds[self.p:]\n\n # Create MLP layers\n mlp_layers = [nn.Linear(p, hidden_sizes[0])]\n for i in range(len(hidden_sizes) - 1):\n mlp_layers.append(nn.ReLU())\n mlp_layers.append(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))\n # Prepare for either MSE loss or cross entropy loss\n mlp_layers.append(nn.ReLU())\n if y_dist == \"gaussian\":\n mlp_layers.append(nn.Linear(hidden_sizes[-1], 1))\n else:\n mlp_layers.append(nn.Linear(hidden_sizes[-1], 2))\n\n # Then create MLP\n self.mlp = nn.Sequential(*mlp_layers)\n\n def _fetch_Z_weight(self):\n\n # Possibly don't normalize\n if not self.norm_Z_weight:\n return self.Z_weight\n\n # Else normalize, first construct denominator \n normalizer = torch.abs(self.Z_weight[self.feature_inds]) + torch.abs(\n self.Z_weight[self.ko_inds]\n )\n # Normalize\n Z = torch.abs(self.Z_weight[self.feature_inds]) / normalizer\n Ztilde = torch.abs(self.Z_weight[self.ko_inds]) / normalizer\n # Concatenate and reshuffle\n return torch.cat([Z, Ztilde], dim=0)[self.inds]\n\n def forward(self, features):\n \"\"\"\n Note: features are now shuffled\n \"\"\"\n\n # First layer: pairwise weights (and sum)\n if not isinstance(features, torch.Tensor):\n features = torch.tensor(features).float()\n features = features[:, self.inds] # shuffle features to prevent FDR violations\n features = self._fetch_Z_weight().unsqueeze(dim=0) * features\n features = features[:, self.feature_inds] - features[:, self.ko_inds]\n\n # Apply MLP\n return self.mlp(features)\n\n def predict(self, features):\n \"\"\"\n Wraps forward method, for compatibility\n with sklearn classes.\n \"\"\"\n with torch.no_grad():\n return self.forward(features).numpy()\n\n def l1norm(self):\n out = 0\n for parameter in self.mlp.parameters():\n out += torch.abs(parameter).sum()\n out += torch.abs(self.Z_weight).sum() # This is just for stability\n return out\n\n def l2norm(self):\n out = 0\n for parameter in self.mlp.parameters():\n out += (parameter ** 2).sum()\n out += (self.Z_weight ** 2).sum()\n return out\n\n def feature_importances(self, weight_scores=True):\n\n with torch.no_grad():\n # Calculate weights from MLP\n if weight_scores:\n layers = list(self.mlp.named_children())\n W = layers[0][1].weight.detach().numpy().T\n for layer in layers[1:]:\n if isinstance(layer[1], nn.ReLU):\n continue\n weight = layer[1].weight.detach().numpy().T\n W = np.dot(W, weight)\n W = W.squeeze(-1)\n else:\n W = np.ones(self.p)\n\n # Multiply by Z weights\n Z = self._fetch_Z_weight().numpy()\n feature_imp = Z[self.feature_inds] * W\n knockoff_imp = Z[self.ko_inds] * W\n return np.concatenate([feature_imp, knockoff_imp])\n\n\ndef train_deeppink(\n model,\n features,\n y,\n batchsize=100,\n num_epochs=50,\n lambda1=None,\n lambda2=None,\n verbose=True,\n **kwargs,\n):\n\n # Infer n, p, set default lambda1, lambda2\n n = features.shape[0]\n p = int(features.shape[1] / 2)\n if lambda1 is None:\n lambda1 = 10 * np.sqrt(np.log(p) / n)\n if lambda2 is None:\n lambda2 = 0\n\n # Batchsize can't be bigger than n\n batchsize = min(features.shape[0], batchsize)\n\n # Create criterion\n features, y = map(lambda x: torch.tensor(x).detach().float(), (features, y))\n if model.y_dist == \"gaussian\":\n criterion = nn.MSELoss(reduction=\"sum\")\n else:\n criterion = nn.CrossEntropyLoss(reduction=\"sum\")\n y = y.long()\n\n # Create optimizer\n opt = torch.optim.Adam(model.parameters(), **kwargs)\n\n # Loop through epochs\n for j in range(num_epochs):\n\n # Create batches, loop through\n batches = create_batches(features, y, batchsize=batchsize)\n predictive_loss = 0\n for Xbatch, ybatch in batches:\n\n # Forward pass and loss\n output = model(Xbatch)\n loss = criterion(output, ybatch.unsqueeze(-1))\n predictive_loss += loss\n\n # Add l1 and l2 regularization\n loss += lambda1 * model.l1norm()\n loss += lambda2 * model.l2norm()\n\n # Step\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n if verbose and j % 10 == 0:\n print(f\"At epoch {j}, mean loss is {predictive_loss / n}\")\n\n return model\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.CrossEntropyLoss",
"torch.abs",
"torch.ones",
"numpy.dot",
"numpy.log",
"torch.cat",
"torch.randperm",
"numpy.ones",
"numpy.concatenate",
"torch.nn.Linear",
"torch.tensor",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
netoaraujjo/hal | [
"0cd66d5548659c4dde70381ad21ba5b9d8213365"
] | [
"clustering/agglomerative_clustering.py"
] | [
"#-*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering\nfrom sklearn.externals.joblib import Memory\nfrom .clustering import Clustering\n\nclass AgglomerativeClustering(Clustering):\n \"\"\"docstring for AgglomerativeClustering.\"\"\"\n def __init__(self, data, n_clusters = 2, affinity = 'euclidean',\n memory = Memory(cachedir = None), connectivity = None,\n compute_full_tree = 'auto', linkage = 'ward',\n pooling_func = np.mean):\n super(AgglomerativeClustering, self).__init__()\n self.data = data\n self.n_clusters = n_clusters\n self.affinity = affinity\n self.memory = memory\n self.connectivity = connectivity\n self.compute_full_tree = compute_full_tree\n self.linkage = linkage\n self.pooling_func = pooling_func\n\n\n\n def execute(self):\n \"\"\"Constroi o modelo de clusterizacao.\"\"\"\n self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters,\n affinity = self.affinity,\n memory = self.memory,\n connectivity = self.connectivity,\n compute_full_tree = self.compute_full_tree,\n linkage = self.linkage,\n pooling_func = self.pooling_func).fit(self.data)\n\n self.clusters = super().make_clusters(self.data, self.model.labels_)\n\n\n @property\n def labels_(self):\n \"\"\"Retorna os labels dos elementos do dataset.\"\"\"\n return self.model.labels_\n\n\n @property\n def clusters_(self):\n \"\"\"Retorna um dicionaro onde os indices dos grupos sao as chaves.\"\"\"\n return self.clusters\n\n\n @property\n def model_(self):\n \"\"\"Retorna o modelo de agrupamento.\"\"\"\n return self.model\n"
] | [
[
"sklearn.cluster.AgglomerativeClustering",
"sklearn.externals.joblib.Memory"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
renier/qiskit-terra | [
"1f5e4c8f6768dfac5d68f39e9d38fdd783ba1346"
] | [
"qiskit/quantum_info/states/statevector.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nStatevector quantum state class.\n\"\"\"\n\nimport copy\nimport re\nimport warnings\nfrom numbers import Number\n\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.instruction import Instruction\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.states.quantum_state import QuantumState\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.predicates import matrix_equal\n\n\nclass Statevector(QuantumState):\n \"\"\"Statevector class\"\"\"\n\n def __init__(self, data, dims=None):\n \"\"\"Initialize a statevector object.\n\n Args:\n data (vector_like): a complex statevector.\n dims (int or tuple or list): Optional. The subsystem dimension of\n the state (See additional information).\n\n Raises:\n QiskitError: if input data is not valid.\n\n Additional Information:\n The ``dims`` kwarg can be None, an integer, or an iterable of\n integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` or ``None`` -- the length of the input vector\n specifies the total dimension of the density matrix. If it is a\n power of two the state will be initialized as an N-qubit state.\n If it is not a power of two the state will have a single\n d-dimensional subsystem.\n \"\"\"\n if isinstance(data, (list, np.ndarray)):\n # Finally we check if the input is a raw vector in either a\n # python list or numpy array format.\n self._data = np.asarray(data, dtype=complex)\n elif isinstance(data, Statevector):\n self._data = data._data\n if dims is None:\n dims = data._dims\n elif isinstance(data, Operator):\n # We allow conversion of column-vector operators to Statevectors\n input_dim, _ = data.dim\n if input_dim != 1:\n raise QiskitError(\"Input Operator is not a column-vector.\")\n self._data = np.ravel(data.data)\n else:\n raise QiskitError(\"Invalid input data format for Statevector\")\n # Check that the input is a numpy vector or column-vector numpy\n # matrix. If it is a column-vector matrix reshape to a vector.\n ndim = self._data.ndim\n shape = self._data.shape\n if ndim != 1:\n if ndim == 2 and shape[1] == 1:\n self._data = np.reshape(self._data, shape[0])\n elif ndim != 2 or shape[1] != 1:\n raise QiskitError(\"Invalid input: not a vector or column-vector.\")\n super().__init__(self._automatic_dims(dims, shape[0]))\n\n def __eq__(self, other):\n return super().__eq__(other) and np.allclose(\n self._data, other._data, rtol=self.rtol, atol=self.atol)\n\n def __repr__(self):\n prefix = 'Statevector('\n pad = len(prefix) * ' '\n return '{}{},\\n{}dims={})'.format(\n prefix, np.array2string(\n self.data, separator=', ', prefix=prefix),\n pad, self._dims)\n\n @property\n def data(self):\n \"\"\"Return data.\"\"\"\n return self._data\n\n def is_valid(self, atol=None, rtol=None):\n \"\"\"Return True if a Statevector has norm 1.\"\"\"\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n norm = np.linalg.norm(self.data)\n return np.allclose(norm, 1, rtol=rtol, atol=atol)\n\n def to_operator(self):\n \"\"\"Convert state to a rank-1 projector operator\"\"\"\n mat = np.outer(self.data, np.conj(self.data))\n return Operator(mat, input_dims=self.dims(), output_dims=self.dims())\n\n def conjugate(self):\n \"\"\"Return the conjugate of the operator.\"\"\"\n return Statevector(np.conj(self.data), dims=self.dims())\n\n def trace(self):\n \"\"\"Return the trace of the quantum state as a density matrix.\"\"\"\n return np.sum(np.abs(self.data) ** 2)\n\n def purity(self):\n \"\"\"Return the purity of the quantum state.\"\"\"\n # For a valid statevector the purity is always 1, however if we simply\n # have an arbitrary vector (not correctly normalized) then the\n # purity is equivalent to the trace squared:\n # P(|psi>) = Tr[|psi><psi|psi><psi|] = |<psi|psi>|^2\n return self.trace() ** 2\n\n def tensor(self, other):\n \"\"\"Return the tensor product state self ⊗ other.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the tensor product operator self ⊗ other.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n dims = other.dims() + self.dims()\n data = np.kron(self._data, other._data)\n return Statevector(data, dims)\n\n def expand(self, other):\n \"\"\"Return the tensor product state other ⊗ self.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the tensor product state other ⊗ self.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n dims = self.dims() + other.dims()\n data = np.kron(other._data, self._data)\n return Statevector(data, dims)\n\n def _add(self, other):\n \"\"\"Return the linear combination self + other.\n\n Args:\n other (Statevector): a quantum state object.\n\n Returns:\n Statevector: the linear combination self + other.\n\n Raises:\n QiskitError: if other is not a quantum state, or has\n incompatible dimensions.\n \"\"\"\n if not isinstance(other, Statevector):\n other = Statevector(other)\n if self.dim != other.dim:\n raise QiskitError(\"other Statevector has different dimensions.\")\n return Statevector(self.data + other.data, self.dims())\n\n def _multiply(self, other):\n \"\"\"Return the scalar multiplied state self * other.\n\n Args:\n other (complex): a complex number.\n\n Returns:\n Statevector: the scalar multiplied state other * self.\n\n Raises:\n QiskitError: if other is not a valid complex number.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n return Statevector(other * self.data, self.dims())\n\n def evolve(self, other, qargs=None):\n \"\"\"Evolve a quantum state by the operator.\n\n Args:\n other (Operator): The operator to evolve by.\n qargs (list): a list of Statevector subsystem positions to apply\n the operator on.\n\n Returns:\n Statevector: the output quantum state.\n\n Raises:\n QiskitError: if the operator dimension does not match the\n specified Statevector subsystem dimensions.\n \"\"\"\n if qargs is None:\n qargs = getattr(other, 'qargs', None)\n\n # Get return vector\n ret = copy.copy(self)\n\n # Evolution by a circuit or instruction\n if isinstance(other, QuantumCircuit):\n other = other.to_instruction()\n if isinstance(other, Instruction):\n if self.num_qubits is None:\n raise QiskitError(\"Cannot apply QuantumCircuit to non-qubit Statevector.\")\n return self._evolve_instruction(ret, other, qargs=qargs)\n\n # Evolution by an Operator\n if not isinstance(other, Operator):\n other = Operator(other)\n\n # check dimension\n if self.dims(qargs) != other.input_dims():\n raise QiskitError(\n \"Operator input dimensions are not equal to statevector subsystem dimensions.\"\n )\n return Statevector._evolve_operator(ret, other, qargs=qargs)\n\n def equiv(self, other, rtol=None, atol=None):\n \"\"\"Return True if statevectors are equivalent up to global phase.\n\n Args:\n other (Statevector): a statevector object.\n rtol (float): relative tolerance value for comparison.\n atol (float): absolute tolerance value for comparison.\n\n Returns:\n bool: True if statevectors are equivalent up to global phase.\n \"\"\"\n if not isinstance(other, Statevector):\n try:\n other = Statevector(other)\n except QiskitError:\n return False\n if self.dim != other.dim:\n return False\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n return matrix_equal(self.data, other.data, ignore_phase=True,\n rtol=rtol, atol=atol)\n\n def expectation_value(self, oper, qargs=None):\n \"\"\"Compute the expectation value of an operator.\n\n Args:\n oper (Operator): an operator to evaluate expval of.\n qargs (None or list): subsystems to apply operator on.\n\n Returns:\n complex: the expectation value.\n \"\"\"\n val = self.evolve(oper, qargs=qargs)\n conj = self.conjugate()\n return np.dot(conj.data, val.data)\n\n def probabilities(self, qargs=None, decimals=None):\n \"\"\"Return the subsystem measurement probability vector.\n\n Measurement probabilities are with respect to measurement in the\n computation (diagonal) basis.\n\n Args:\n qargs (None or list): subsystems to return probabilities for,\n if None return for all subsystems (Default: None).\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done (Default: None).\n\n Returns:\n np.array: The Numpy vector array of probabilities.\n\n Examples:\n\n Consider a 2-qubit product state\n :math:`|\\\\psi\\\\rangle=|+\\\\rangle\\\\otimes|0\\\\rangle`.\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = psi.probabilities()\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring only qubit-0\n probs_qubit_0 = psi.probabilities([0])\n print('Qubit-0 probs: {}'.format(probs_qubit_0))\n\n # Probabilities for measuring only qubit-1\n probs_qubit_1 = psi.probabilities([1])\n print('Qubit-1 probs: {}'.format(probs_qubit_1))\n\n We can also permute the order of qubits in the ``qargs`` list\n to change the qubit position in the probabilities output\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = psi.probabilities([0, 1])\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring both qubits\n # but swapping qubits 0 and 1 in output\n probs_swapped = psi.probabilities([1, 0])\n print('Swapped probs: {}'.format(probs_swapped))\n \"\"\"\n probs = self._subsystem_probabilities(\n np.abs(self.data) ** 2, self._dims, qargs=qargs)\n if decimals is not None:\n probs = probs.round(decimals=decimals)\n return probs\n\n def reset(self, qargs=None):\n \"\"\"Reset state or subsystems to the 0-state.\n\n Args:\n qargs (list or None): subsystems to reset, if None all\n subsystems will be reset to their 0-state\n (Default: None).\n\n Returns:\n Statevector: the reset state.\n\n Additional Information:\n If all subsystems are reset this will return the ground state\n on all subsystems. If only a some subsystems are reset this\n function will perform a measurement on those subsystems and\n evolve the subsystems so that the collapsed post-measurement\n states are rotated to the 0-state. The RNG seed for this\n sampling can be set using the :meth:`seed` method.\n \"\"\"\n if qargs is None:\n # Resetting all qubits does not require sampling or RNG\n state = np.zeros(self._dim, dtype=complex)\n state[0] = 1\n return Statevector(state, dims=self._dims)\n\n # Sample a single measurement outcome\n dims = self.dims(qargs)\n probs = self.probabilities(qargs)\n sample = self._rng.choice(len(probs), p=probs, size=1)\n\n # Convert to projector for state update\n proj = np.zeros(len(probs), dtype=complex)\n proj[sample] = 1 / np.sqrt(probs[sample])\n\n # Rotate outcome to 0\n reset = np.eye(len(probs))\n reset[0, 0] = 0\n reset[sample, sample] = 0\n reset[0, sample] = 1\n\n # compose with reset projection\n reset = np.dot(reset, np.diag(proj))\n return self.evolve(\n Operator(reset, input_dims=dims, output_dims=dims),\n qargs=qargs)\n\n def to_counts(self):\n \"\"\"Returns the statevector as a counts dict\n of probabilities.\n\n DEPRECATED: use :meth:`probabilities_dict` instead.\n\n Returns:\n dict: Counts of probabilities.\n \"\"\"\n warnings.warn(\n 'The `Statevector.to_counts` method is deprecated as of 0.13.0,'\n ' and will be removed no earlier than 3 months after that '\n 'release date. You should use the `Statevector.probabilities_dict`'\n ' method instead.', DeprecationWarning, stacklevel=2)\n return self.probabilities_dict()\n\n @classmethod\n def from_label(cls, label):\n \"\"\"Return a tensor product of Pauli X,Y,Z eigenstates.\n\n .. list-table:: Single-qubit state labels\n :header-rows: 1\n\n * - Label\n - Statevector\n * - ``\"0\"``\n - :math:`[1, 0]`\n * - ``\"1\"``\n - :math:`[0, 1]`\n * - ``\"+\"``\n - :math:`[1 / \\\\sqrt{2}, 1 / \\\\sqrt{2}]`\n * - ``\"-\"``\n - :math:`[1 / \\\\sqrt{2}, -1 / \\\\sqrt{2}]`\n * - ``\"r\"``\n - :math:`[1 / \\\\sqrt{2}, i / \\\\sqrt{2}]`\n * - ``\"l\"``\n - :math:`[1 / \\\\sqrt{2}, -i / \\\\sqrt{2}]`\n\n Args:\n label (string): a eigenstate string ket label (see table for\n allowed values).\n\n Returns:\n Statevector: The N-qubit basis state density matrix.\n\n Raises:\n QiskitError: if the label contains invalid characters, or the\n length of the label is larger than an explicitly\n specified num_qubits.\n \"\"\"\n # Check label is valid\n if re.match(r'^[01rl\\-+]+$', label) is None:\n raise QiskitError('Label contains invalid characters.')\n # We can prepare Z-eigenstates by converting the computational\n # basis bit-string to an integer and preparing that unit vector\n # However, for X-basis states, we will prepare a Z-eigenstate first\n # then apply Hadamard gates to rotate 0 and 1s to + and -.\n z_label = label\n xy_states = False\n if re.match('^[01]+$', label) is None:\n # We have X or Y eigenstates so replace +,r with 0 and\n # -,l with 1 and prepare the corresponding Z state\n xy_states = True\n z_label = z_label.replace('+', '0')\n z_label = z_label.replace('r', '0')\n z_label = z_label.replace('-', '1')\n z_label = z_label.replace('l', '1')\n # Initialize Z eigenstate vector\n num_qubits = len(label)\n data = np.zeros(1 << num_qubits, dtype=complex)\n pos = int(z_label, 2)\n data[pos] = 1\n state = Statevector(data)\n if xy_states:\n # Apply hadamards to all qubits in X eigenstates\n x_mat = np.array([[1, 1], [1, -1]], dtype=complex) / np.sqrt(2)\n # Apply S.H to qubits in Y eigenstates\n y_mat = np.dot(np.diag([1, 1j]), x_mat)\n for qubit, char in enumerate(reversed(label)):\n if char in ['+', '-']:\n state = state.evolve(x_mat, qargs=[qubit])\n elif char in ['r', 'l']:\n state = state.evolve(y_mat, qargs=[qubit])\n return state\n\n @staticmethod\n def from_int(i, dims):\n \"\"\"Return a computational basis statevector.\n\n Args:\n i (int): the basis state element.\n dims (int or tuple or list): The subsystem dimensions of the statevector\n (See additional information).\n\n Returns:\n Statevector: The computational basis state :math:`|i\\\\rangle`.\n\n Additional Information:\n The ``dims`` kwarg can be an integer or an iterable of integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` -- the integer specifies the total dimension of the\n state. If it is a power of two the state will be initialized\n as an N-qubit state. If it is not a power of two the state\n will have a single d-dimensional subsystem.\n \"\"\"\n size = np.product(dims)\n state = np.zeros(size, dtype=complex)\n state[i] = 1.0\n return Statevector(state, dims=dims)\n\n @classmethod\n def from_instruction(cls, instruction):\n \"\"\"Return the output statevector of an instruction.\n\n The statevector is initialized in the state :math:`|{0,\\\\ldots,0}\\\\rangle` of the\n same number of qubits as the input instruction or circuit, evolved\n by the input instruction, and the output statevector returned.\n\n Args:\n instruction (qiskit.circuit.Instruction or QuantumCircuit): instruction or circuit\n\n Returns:\n Statevector: The final statevector.\n\n Raises:\n QiskitError: if the instruction contains invalid instructions for\n the statevector simulation.\n \"\"\"\n # Convert circuit to an instruction\n if isinstance(instruction, QuantumCircuit):\n instruction = instruction.to_instruction()\n # Initialize an the statevector in the all |0> state\n init = np.zeros(2 ** instruction.num_qubits, dtype=complex)\n init[0] = 1.0\n vec = Statevector(init, dims=instruction.num_qubits * (2,))\n return Statevector._evolve_instruction(vec, instruction)\n\n def to_dict(self, decimals=None):\n r\"\"\"Convert the statevector to dictionary form.\n\n This dictionary representation uses a Ket-like notation where the\n dictionary keys are qudit strings for the subsystem basis vectors.\n If any subsystem has a dimension greater than 10 comma delimiters are\n inserted between integers so that subsystems can be distinguished.\n\n Args:\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done\n (Default: None).\n\n Returns:\n dict: the dictionary form of the Statevector.\n\n Example:\n\n The ket-form of a 2-qubit statevector\n :math:`|\\psi\\rangle = |-\\rangle\\otimes |0\\rangle`\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import Statevector\n\n psi = Statevector.from_label('-0')\n print(psi.to_dict())\n\n For non-qubit subsystems the integer range can go from 0 to 9. For\n example in a qutrit system\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import Statevector\n\n vec = np.zeros(9)\n vec[0] = 1 / np.sqrt(2)\n vec[-1] = 1 / np.sqrt(2)\n psi = Statevector(vec, dims=(3, 3))\n print(psi.to_dict())\n\n For large subsystem dimensions delimeters are required. The\n following example is for a 20-dimensional system consisting of\n a qubit and 10-dimensional qudit.\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import Statevector\n\n vec = np.zeros(2 * 10)\n vec[0] = 1 / np.sqrt(2)\n vec[-1] = 1 / np.sqrt(2)\n psi = Statevector(vec, dims=(2, 10))\n print(psi.to_dict())\n \"\"\"\n return self._vector_to_dict(self.data,\n self._dims,\n decimals=decimals,\n string_labels=True)\n\n @property\n def _shape(self):\n \"\"\"Return the tensor shape of the matrix operator\"\"\"\n return tuple(reversed(self.dims()))\n\n @staticmethod\n def _evolve_operator(statevec, oper, qargs=None):\n \"\"\"Evolve a qudit statevector\"\"\"\n is_qubit = bool(statevec.num_qubits and oper.num_qubits)\n\n if qargs is None:\n # Full system evolution\n statevec._data = np.dot(oper._data, statevec._data)\n if not is_qubit:\n statevec._set_dims(oper._output_dims)\n return statevec\n\n # Calculate contraction dimensions\n if is_qubit:\n # Qubit contraction\n new_dim = statevec._dim\n num_qargs = statevec.num_qubits\n else:\n # Qudit contraction\n new_dims = list(statevec._dims)\n for i, qubit in enumerate(qargs):\n new_dims[qubit] = oper._output_dims[i]\n new_dim = np.product(new_dims)\n num_qargs = len(new_dims)\n\n # Get transpose axes\n indices = [num_qargs - 1 - i for i in reversed(qargs)]\n axes = indices + [i for i in range(num_qargs) if i not in indices]\n axes_inv = np.argsort(axes).tolist()\n\n # Calculate contraction dimensions\n if is_qubit:\n pre_tensor_shape = num_qargs * (2,)\n post_tensor_shape = pre_tensor_shape\n contract_shape = (1 << oper.num_qubits, 1 << (num_qargs - oper.num_qubits))\n else:\n contract_dim = np.product(oper._input_dims)\n pre_tensor_shape = statevec._shape\n contract_shape = (contract_dim, statevec._dim // contract_dim)\n post_tensor_shape = list(reversed(oper._output_dims)) + [\n pre_tensor_shape[i] for i in range(num_qargs) if i not in indices]\n\n # reshape input for contraction\n statevec._data = np.reshape(np.transpose(\n np.reshape(statevec.data, pre_tensor_shape), axes), contract_shape)\n statevec._data = np.reshape(np.dot(oper.data, statevec._data), post_tensor_shape)\n statevec._data = np.reshape(np.transpose(statevec._data, axes_inv), new_dim)\n\n # Update dimension\n if not is_qubit:\n statevec._set_dims(new_dims)\n return statevec\n\n @staticmethod\n def _evolve_instruction(statevec, obj, qargs=None):\n \"\"\"Update the current Statevector by applying an instruction.\"\"\"\n from qiskit.circuit.reset import Reset\n from qiskit.circuit.barrier import Barrier\n\n mat = Operator._instruction_to_matrix(obj)\n if mat is not None:\n # Perform the composition and inplace update the current state\n # of the operator\n return Statevector._evolve_operator(statevec, Operator(mat), qargs=qargs)\n\n # Special instruction types\n if isinstance(obj, Reset):\n statevec._data = statevec.reset(qargs)._data\n return statevec\n if isinstance(obj, Barrier):\n return statevec\n\n # If the instruction doesn't have a matrix defined we use its\n # circuit decomposition definition if it exists, otherwise we\n # cannot compose this gate and raise an error.\n if obj.definition is None:\n raise QiskitError('Cannot apply Instruction: {}'.format(obj.name))\n if not isinstance(obj.definition, QuantumCircuit):\n raise QiskitError('{} instruction definition is {}; expected QuantumCircuit'.format(\n obj.name, type(obj.definition)))\n if obj.definition.global_phase:\n statevec._data *= np.exp(1j * float(obj.definition.global_phase))\n qubits = {qubit: i for i, qubit in enumerate(obj.definition.qubits)}\n for instr, qregs, cregs in obj.definition:\n if cregs:\n raise QiskitError(\n 'Cannot apply instruction with classical registers: {}'.format(\n instr.name))\n # Get the integer position of the flat register\n if qargs is None:\n new_qargs = [qubits[tup] for tup in qregs]\n else:\n new_qargs = [qargs[qubits[tup]] for tup in qregs]\n Statevector._evolve_instruction(statevec, instr, qargs=new_qargs)\n return statevec\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.product",
"numpy.allclose",
"numpy.conj",
"numpy.sqrt",
"numpy.asarray",
"numpy.reshape",
"numpy.abs",
"numpy.ravel",
"numpy.kron",
"numpy.linalg.norm",
"numpy.transpose",
"numpy.argsort",
"numpy.array2string",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amorehead/metrics | [
"2e4cb70c46bd775629ceb9d710bc581af8bf92c5"
] | [
"torchmetrics/classification/f_beta.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.f_beta import _fbeta_compute, _fbeta_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass FBeta(Metric):\n r\"\"\"\n Computes `F-score <https://en.wikipedia.org/wiki/F-score>`_, specifically:\n\n .. math::\n F_\\beta = (1 + \\beta^2) * \\frac{\\text{precision} * \\text{recall}}\n {(\\beta^2 * \\text{precision}) + \\text{recall}}\n\n Where :math:`\\beta` is some positive real factor. Works with binary, multiclass, and multilabel data.\n Accepts probabilities from a model output or integer class values in prediction.\n Works with multi-dimensional preds and target.\n\n Forward accepts\n\n - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes\n - ``target`` (long tensor): ``(N, ...)``\n\n If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument\n to convert into integer labels. This is the case for binary and multi-label probabilities.\n\n If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.\n\n Args:\n num_classes: Number of classes in the dataset.\n beta: Beta coefficient in the F measure.\n threshold:\n Threshold value for binary or multi-label probabilities. default: 0.5\n\n average:\n - ``'micro'`` computes metric globally\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``'none'`` or ``None`` computes and returns the metric per class\n\n multilabel: If predictions are from multilabel classification.\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n\n >>> from torchmetrics import FBeta\n >>> target = torch.tensor([0, 1, 2, 0, 1, 2])\n >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])\n >>> f_beta = FBeta(num_classes=3, beta=0.5)\n >>> f_beta(preds, target)\n tensor(0.3333)\n\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n beta: float = 1.0,\n threshold: float = 0.5,\n average: str = \"micro\",\n multilabel: bool = False,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n\n self.num_classes = num_classes\n self.beta = beta\n self.threshold = threshold\n self.average = average\n self.multilabel = multilabel\n\n allowed_average = (\"micro\", \"macro\", \"weighted\", \"none\", None)\n if self.average not in allowed_average:\n raise ValueError(\n 'Argument `average` expected to be one of the following:'\n f' {allowed_average} but got {self.average}'\n )\n\n self.add_state(\"true_positives\", default=torch.zeros(num_classes), dist_reduce_fx=\"sum\")\n self.add_state(\"predicted_positives\", default=torch.zeros(num_classes), dist_reduce_fx=\"sum\")\n self.add_state(\"actual_positives\", default=torch.zeros(num_classes), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor):\n \"\"\"\n Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n true_positives, predicted_positives, actual_positives = _fbeta_update(\n preds, target, self.num_classes, self.threshold, self.multilabel\n )\n\n self.true_positives += true_positives\n self.predicted_positives += predicted_positives\n self.actual_positives += actual_positives\n\n def compute(self) -> Tensor:\n \"\"\"\n Computes fbeta over state.\n \"\"\"\n return _fbeta_compute(\n self.true_positives, self.predicted_positives, self.actual_positives, self.beta, self.average\n )\n\n\nclass F1(FBeta):\n \"\"\"\n Computes F1 metric. F1 metrics correspond to a harmonic mean of the\n precision and recall scores.\n\n Works with binary, multiclass, and multilabel data.\n Accepts logits from a model output or integer class values in prediction.\n Works with multi-dimensional preds and target.\n\n Forward accepts\n\n - ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes\n - ``target`` (long tensor): ``(N, ...)``\n\n If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument.\n This is the case for binary and multi-label logits.\n\n If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.\n\n Args:\n num_classes: Number of classes in the dataset.\n threshold:\n Threshold value for binary or multi-label logits. default: 0.5\n\n average:\n - ``'micro'`` computes metric globally\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``'none'`` or ``None`` computes and returns the metric per class\n\n multilabel: If predictions are from multilabel classification.\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n >>> from torchmetrics import F1\n >>> target = torch.tensor([0, 1, 2, 0, 1, 2])\n >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])\n >>> f1 = F1(num_classes=3)\n >>> f1(preds, target)\n tensor(0.3333)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n threshold: float = 0.5,\n average: str = \"micro\",\n multilabel: bool = False,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n if multilabel is not False:\n rank_zero_warn(f'The `multilabel={multilabel}` parameter is unused and will not have any effect.')\n\n super().__init__(\n num_classes=num_classes,\n beta=1.0,\n threshold=threshold,\n average=average,\n multilabel=multilabel,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n"
] | [
[
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stewue/masterthesis-evaluation | [
"0fb825e196f386c628f95524aa9c80af2126617e"
] | [
"RQ1_Python/execution_time_per_benchmark.py"
] | [
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.ticker import PercentFormatter\n\ndata = pd.read_csv('C:\\\\Users\\\\stewue\\\\OneDrive - Wuersten\\\\Uni\\\\19_HS\\\\Masterarbeit\\\\Repo\\\\Evaluation\\\\RQ1_Results\\\\aggregated\\\\executiontime.csv')\ntotalTime = data['executionTime'] * data['parameterizationCombinations'] / 60\n\nall, base = np.histogram(totalTime, bins=1000, range=[0, 30], weights=np.ones(len(totalTime)) / len(totalTime))\ncumulative = np.cumsum(all)\n\nfig = plt.figure()\ntotal = totalTime.shape[0]\n\n# absolute\nax1 = fig.add_subplot()\nax1.plot(base[:-1], cumulative * total)\nax1.set_ylabel('# benchmarks')\n\n# relative\nax2 = ax1.twinx()\nplt.gca().yaxis.set_major_formatter(PercentFormatter(1, 0))\nax2.plot(base[:-1], cumulative)\nax2.set_ylabel('# benchmarks [cumulative %]')\n\nax1.set_xlabel('execution time [min]')\nplt.yticks(np.arange(0, 0.91, 0.1))\nplt.tight_layout()\n#plt.show()\n#plt.savefig('C:\\\\Users\\\\stewue\\\\OneDrive - Wuersten\\\\Uni\\\\19_HS\\\\Masterarbeit\\\\Repo\\\\Evaluation\\\\RQ1_Results\\\\images\\\\execution_time_per_benchmark.pdf')\n\nprint(\"max: \" + str(np.max(totalTime)))\nprint(\"median: \" + str(np.median(totalTime)))\nprint(\"total: \" + str(total))\n\ns10 = totalTime[totalTime < 10]\nprint(\"<10min: \" + str(len(s10) / total))\nprint(\"<10min: \" + str(len(s10)))\n\ns30 = totalTime[totalTime < 30]\nprint(\"<30min: \" + str(len(s30) / total))\nprint(\"<30min: \" + str(len(s30)))"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.median",
"numpy.cumsum",
"numpy.max",
"matplotlib.ticker.PercentFormatter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
VITA-Group/BERT-Tickets | [
"4d8e0356939e7045e2f5ee908412a5026051d162",
"4d8e0356939e7045e2f5ee908412a5026051d162"
] | [
"squad_trans.py",
"transformers-master/examples/prun_utils.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).\"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nimport timeit\nimport torch.nn.utils.prune as prune\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertForQuestionAnswering,\n AlbertTokenizer,\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n CamembertConfig,\n CamembertForQuestionAnswering,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertForQuestionAnswering,\n DistilBertTokenizer,\n RobertaConfig,\n RobertaForQuestionAnswering,\n RobertaTokenizer,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n squad_convert_examples_to_features,\n BertPreTrainedModel,\n PreTrainedModel,\n PreTrainedTokenizer,\n)\nfrom transformers.data.metrics.squad_metrics import (\n compute_predictions_log_probs,\n compute_predictions_logits,\n squad_evaluate,\n)\nfrom transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum(\n (\n tuple(conf.pretrained_config_archive_map.keys())\n for conf in (BertConfig, CamembertConfig, RobertaConfig, XLNetConfig, XLMConfig)\n ),\n (),\n)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForQuestionAnswering, BertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForQuestionAnswering, CamembertTokenizer),\n \"roberta\": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),\n}\n\ndef rewind(pre_weight):\n\n recover_dict = {}\n name_list = []\n for ii in range(12):\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.self.query.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.self.key.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.self.value.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight')\n name_list.append('bert.encoder.layer.'+str(ii)+'.output.dense.weight')\n name_list.append('bert.pooler.dense.weight')\n\n for key in pre_weight.keys():\n\n if 'bert' in key:\n if key in name_list:\n new_key = key+'_orig'\n else:\n new_key = key\n\n recover_dict[new_key] = pre_weight[key]\n\n return recover_dict\n\ndef see_weight_rate(model):\n\n sum_list = 0\n zero_sum = 0\n for ii in range(12):\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.query.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.query.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.key.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.key.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.value.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.value.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.output.dense.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].intermediate.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].intermediate.dense.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].output.dense.weight == 0))\n\n\n sum_list = sum_list+float(model.bert.pooler.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.pooler.dense.weight == 0))\n \n\n return 100*zero_sum/sum_list\n\ndef pruning_model_custom(model, mask_dict):\n\n parameters_to_prune =[]\n mask_list = []\n for ii in range(12):\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.query)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.key)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.value)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.output.dense)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].intermediate.dense)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight_mask'])\n parameters_to_prune.append(model.bert.encoder.layer[ii].output.dense)\n mask_list.append(mask_dict['bert.encoder.layer.'+str(ii)+'.output.dense.weight_mask'])\n\n parameters_to_prune.append(model.bert.pooler.dense)\n mask_list.append(mask_dict['bert.pooler.dense.weight_mask'])\n\n for ii in range(len(parameters_to_prune)):\n prune.CustomFromMask.apply(parameters_to_prune[ii], 'weight', mask=mask_list[ii])\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ndef set_seed_new(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\ndef train(args, train_dataset, model, tokenizer):\n record_result = []\n\n zero_rate = see_weight_rate(model)\n record_result.append(zero_rate)\n\n\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n # if args.max_steps > 0:\n # t_total = args.max_steps\n # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n # else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 1\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n # Added here for reproductibility\n set_seed(args)\n\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n \"start_positions\": batch[3],\n \"end_positions\": batch[4],\n }\n\n if args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\"]:\n del inputs[\"token_type_ids\"]\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\n if args.version_2_with_negative:\n inputs.update({\"is_impossible\": batch[7]})\n if hasattr(model, \"config\") and hasattr(model.config, \"lang2id\"):\n inputs.update(\n {\"langs\": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}\n )\n\n outputs = model(**inputs)\n # model outputs are always tuple in transformers (see doc)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n # Log metrics\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training:\n results = evaluate(args, model, tokenizer)\n record_result.append(results)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n # Save model checkpoint\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n torch.save(model,os.path.join(output_dir, \"model.pt\"))\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n \n results = evaluate(args, model, tokenizer)\n record_result.append(results)\n\n torch.save(record_result, os.path.join(args.output_dir, 'result.pt'))\n\n return global_step, tr_loss / global_step\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)\n\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n all_results = []\n start_time = timeit.default_timer()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n if args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\"]:\n del inputs[\"token_type_ids\"]\n\n example_indices = batch[3]\n\n # XLNet and XLM use more arguments for their predictions\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n # for lang_id-sensitive xlm models\n if hasattr(model, \"config\") and hasattr(model.config, \"lang2id\"):\n inputs.update(\n {\"langs\": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}\n )\n\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n\n output = [to_list(output[i]) for output in outputs]\n\n # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other \"simpler\"\n # models only use two.\n if len(output) >= 5:\n start_logits = output[0]\n start_top_index = output[1]\n end_logits = output[2]\n end_top_index = output[3]\n cls_logits = output[4]\n\n result = SquadResult(\n unique_id,\n start_logits,\n end_logits,\n start_top_index=start_top_index,\n end_top_index=end_top_index,\n cls_logits=cls_logits,\n )\n\n else:\n start_logits, end_logits = output\n result = SquadResult(unique_id, start_logits, end_logits)\n\n all_results.append(result)\n\n evalTime = timeit.default_timer() - start_time\n logger.info(\" Evaluation done in total %f secs (%f sec per example)\", evalTime, evalTime / len(dataset))\n\n # Compute predictions\n output_prediction_file = os.path.join(args.output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(args.output_dir, \"nbest_predictions_{}.json\".format(prefix))\n\n if args.version_2_with_negative:\n output_null_log_odds_file = os.path.join(args.output_dir, \"null_odds_{}.json\".format(prefix))\n else:\n output_null_log_odds_file = None\n\n # XLNet and XLM use a more complex post-processing procedure\n if args.model_type in [\"xlnet\", \"xlm\"]:\n start_n_top = model.config.start_n_top if hasattr(model, \"config\") else model.module.config.start_n_top\n end_n_top = model.config.end_n_top if hasattr(model, \"config\") else model.module.config.end_n_top\n\n predictions = compute_predictions_log_probs(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n start_n_top,\n end_n_top,\n args.version_2_with_negative,\n tokenizer,\n args.verbose_logging,\n )\n else:\n predictions = compute_predictions_logits(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n args.do_lower_case,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n args.verbose_logging,\n args.version_2_with_negative,\n args.null_score_diff_threshold,\n tokenizer,\n )\n\n # Compute the F1 and exact scores.\n results = squad_evaluate(examples, predictions)\n return results\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n # Load data features from cache or dataset file\n input_dir = args.data_dir if args.data_dir else \".\"\n cached_features_file = os.path.join(\n input_dir,\n \"cached_{}_{}_{}\".format(\n \"dev\" if evaluate else \"train\",\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n ),\n )\n\n # Init features and dataset from cache if it exists\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features_and_dataset = torch.load(cached_features_file)\n features, dataset, examples = (\n features_and_dataset[\"features\"],\n features_and_dataset[\"dataset\"],\n features_and_dataset[\"examples\"],\n )\n else:\n logger.info(\"Creating features from dataset file at %s\", input_dir)\n\n if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)):\n try:\n import tensorflow_datasets as tfds\n except ImportError:\n raise ImportError(\"If not data_dir is specified, tensorflow_datasets needs to be installed.\")\n\n if args.version_2_with_negative:\n logger.warn(\"tensorflow_datasets does not handle version 2 of SQuAD.\")\n\n tfds_examples = tfds.load(\"squad\")\n examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)\n else:\n processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()\n if evaluate:\n examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file)\n else:\n examples = processor.get_train_examples(args.data_dir, filename=args.train_file)\n\n features, dataset = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n return_dataset=\"pt\",\n threads=args.threads,\n )\n\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save({\"features\": features, \"dataset\": dataset, \"examples\": examples}, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints and predictions will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n help=\"The input data dir. Should contain the .json files for the task.\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--train_file\",\n default=None,\n type=str,\n help=\"The input training file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--predict_file\",\n default=None,\n type=str,\n help=\"The input evaluation file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--dir\",\n default=None,\n type=str,\n required=False,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--mask_dir\",\n default=None,\n type=str,\n required=False,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n\n parser.add_argument(\n \"--version_2_with_negative\",\n action=\"store_true\",\n help=\"If true, the SQuAD examples contain some that do not have an answer.\",\n )\n parser.add_argument(\n \"--null_score_diff_threshold\",\n type=float,\n default=0.0,\n help=\"If null_score - best_non_null is greater than the threshold predict null.\",\n )\n\n parser.add_argument(\n \"--max_seq_length\",\n default=384,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. Sequences \"\n \"longer than this will be truncated, and sequences shorter than this will be padded.\",\n )\n parser.add_argument(\n \"--doc_stride\",\n default=128,\n type=int,\n help=\"When splitting up a long document into chunks, how much stride to take between chunks.\",\n )\n parser.add_argument(\n \"--max_query_length\",\n default=64,\n type=int,\n help=\"The maximum number of tokens for the question. Questions longer than this will \"\n \"be truncated to this length.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=20, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\n \"--n_best_size\",\n default=20,\n type=int,\n help=\"The total number of n-best predictions to generate in the nbest_predictions.json output file.\",\n )\n parser.add_argument(\n \"--max_answer_length\",\n default=30,\n type=int,\n help=\"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\",\n )\n parser.add_argument(\n \"--verbose_logging\",\n action=\"store_true\",\n help=\"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\",\n )\n parser.add_argument(\n \"--lang_id\",\n default=0,\n type=int,\n help=\"language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)\",\n )\n\n parser.add_argument(\n \"--weight_pertub\",\n default=None,\n type=str,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Whether not to use CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n\n parser.add_argument(\"--threads\", type=int, default=1, help=\"multiple threads for converting example to features\")\n args = parser.parse_args()\n\n if args.doc_stride >= args.max_seq_length - args.max_query_length:\n logger.warning(\n \"WARNING - You've set a doc stride which may be superior to the document length in some \"\n \"examples. This could result in errors when building features from the examples. Please reduce the doc \"\n \"stride or increase the maximum length to ensure the features are correctly built.\"\n )\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n \n if args.dir == 'pre':\n\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n elif args.dir == 'rand':\n\n model = model_class(config=config)\n \n model.to(args.device)\n\n\n if args.weight_pertub:\n load_weight = torch.load(args.weight_pertub, map_location=args.device)\n model_dict = model.state_dict()\n model_dict.update(load_weight)\n model.load_state_dict(model_dict)\n\n\n if args.mask_dir:\n mask = torch.load(args.mask_dir, map_location=args.device)\n pruning_model_custom(model, mask)\n zero_rate = see_weight_rate(model)\n print('model 0:',zero_rate)\n\n\n if args.local_rank == 0:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.\n # Otherwise it'll default to \"promote\" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level=\"O2\"` will\n # remove the need for this code, but it is still valid.\n if args.fp16:\n try:\n import apex\n\n apex.amp.register_half_function(torch, \"einsum\")\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # # Save the trained model and the tokenizer\n # if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # # Create output directory if needed\n # if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n # os.makedirs(args.output_dir)\n\n # logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # # They can then be reloaded using `from_pretrained()`\n # # Take care of distributed/parallel training\n # model_to_save = model.module if hasattr(model, \"module\") else model\n # model_to_save.save_pretrained(args.output_dir)\n # tokenizer.save_pretrained(args.output_dir)\n # torch.save(model,os.path.join(args.output_dir, \"model.pt\"))\n\n # # Good practice: save your training arguments together with the trained model\n # torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # # Load a trained model and vocabulary that you have fine-tuned\n # model = model_class.from_pretrained(args.output_dir) # , force_download=True)\n # model = torch.load(os.path.join(args.output_dir, \"model.pt\"))\n # tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n # model.to(args.device)\n\n # # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory\n # results = {}\n # if args.do_eval and args.local_rank in [-1, 0]:\n # if args.do_train:\n # logger.info(\"Loading checkpoints saved during training for evaluation\")\n # checkpoints = [args.output_dir]\n # if args.eval_all_checkpoints:\n # checkpoints = list(\n # os.path.dirname(c)\n # for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n # )\n # logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce model loading logs\n # else:\n # logger.info(\"Loading checkpoint %s for evaluation\", args.model_name_or_path)\n # checkpoints = [args.model_name_or_path]\n\n # logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n\n # for checkpoint in checkpoints:\n # # Reload the model\n # global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n # if '/' in global_step:\n # global_step = 'last'\n # # model = model_class.from_pretrained(checkpoint) # , force_download=True)\n # model = torch.load(os.path.join(checkpoint, \"model.pt\"))\n # model.to(args.device)\n\n # # Evaluate\n # result = evaluate(args, model, tokenizer, prefix=global_step)\n\n # result = dict((k + (\"_{}\".format(global_step) if global_step else \"\"), v) for k, v in result.items())\n # results.update(result)\n # for key in results.keys():\n # print(key, results[key])\n\n # logger.info(\"Results: {}\".format(results))\n\n # return results\n\n\nif __name__ == \"__main__\":\n main()\n",
"import torch.nn.utils.prune as prune\nimport numpy as np \nimport torch \n\ndef see_weight_varience(pre_weight):\n recover_dict = []\n for ii in range(12):\n recover_dict.append(pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight'].view(-1))\n recover_dict.append(pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight'].view(-1))\n recover_dict.append(pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight'].view(-1))\n recover_dict.append(pre_weight['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight'].view(-1))\n recover_dict.append(pre_weight['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight'].view(-1))\n recover_dict.append(pre_weight['bert.encoder.layer.'+str(ii)+'.output.dense.weight'].view(-1))\n recover_dict.append(pre_weight['bert.pooler.dense.weight'].view(-1))\n\n weight = torch.cat(recover_dict, dim=0)\n print(weight.size())\n print(torch.sqrt(torch.var(weight)))\n\ndef adding_noise(pre_weight, noise):\n recover_dict = {}\n for ii in range(12):\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.output.dense.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.output.dense.weight']\n recover_dict['bert.pooler.dense.weight'] = pre_weight['bert.pooler.dense.weight']\n\n for key in recover_dict.keys():\n print(key)\n weight_key = recover_dict[key]\n weight_key = weight_key + torch.randn_like(weight_key)*noise\n recover_dict[key] = weight_key\n\n return recover_dict\n\ndef rewind(pre_weight):\n recover_dict = {}\n for ii in range(12):\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.output.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.output.dense.weight']\n # recover_dict['bert.pooler.dense.weight_orig'] = pre_weight['bert.pooler.dense.weight']\n\n return recover_dict\n\ndef rewind_first(pre_weight):\n recover_dict = {}\n for ii in range(12):\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight']\n recover_dict['bert.encoder.layer.'+str(ii)+'.output.dense.weight'] = pre_weight['bert.encoder.layer.'+str(ii)+'.output.dense.weight']\n recover_dict['bert.pooler.dense.weight'] = pre_weight['bert.pooler.dense.weight']\n\n return recover_dict\n\ndef rewind_orig(pre_weight):\n recover_dict = {}\n for ii in range(12):\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight_orig']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight_orig']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight_orig']\n recover_dict['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight_orig']\n recover_dict['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight_orig']\n recover_dict['bert.encoder.layer.'+str(ii)+'.output.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.output.dense.weight_orig']\n recover_dict['bert.pooler.dense.weight_orig'] = pre_weight['bert.pooler.dense.weight_orig']\n\n return recover_dict\n\ndef rewind_distribution(pre_weight):\n recover_dict = {}\n for ii in range(12):\n recover_dict['module.bert.encoder.layer.'+str(ii)+'.attention.self.query.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.query.weight']\n recover_dict['module.bert.encoder.layer.'+str(ii)+'.attention.self.key.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.key.weight']\n recover_dict['module.bert.encoder.layer.'+str(ii)+'.attention.self.value.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.self.value.weight']\n recover_dict['module.bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.attention.output.dense.weight']\n recover_dict['module.bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.intermediate.dense.weight']\n recover_dict['module.bert.encoder.layer.'+str(ii)+'.output.dense.weight_orig'] = pre_weight['bert.encoder.layer.'+str(ii)+'.output.dense.weight']\n recover_dict['module.bert.pooler.dense.weight_orig'] = pre_weight['bert.pooler.dense.weight']\n\n return recover_dict\n\ndef pruning_model(model,px):\n\n parameters_to_prune =[]\n for ii in range(12):\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.query, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.key, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.value, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.output.dense, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].intermediate.dense, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].output.dense, 'weight'))\n\n # parameters_to_prune.append((model.bert.pooler.dense, 'weight'))\n parameters_to_prune = tuple(parameters_to_prune)\n\n prune.global_unstructured(\n parameters_to_prune,\n pruning_method=prune.L1Unstructured,\n amount=px,\n )\n\ndef pruning_model_random(model,px):\n\n parameters_to_prune =[]\n for ii in range(12):\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.query, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.key, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.value, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.output.dense, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].intermediate.dense, 'weight'))\n parameters_to_prune.append((model.bert.encoder.layer[ii].output.dense, 'weight'))\n\n # parameters_to_prune.append((model.bert.pooler.dense, 'weight'))\n parameters_to_prune = tuple(parameters_to_prune)\n\n prune.global_unstructured(\n parameters_to_prune,\n pruning_method=prune.RandomUnstructured,\n amount=px,\n )\n\ndef pruning_model_custom(model, mask):\n\n parameters_to_prune =[]\n for ii in range(12):\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.query, mask['bert.encoder.layer.'+str(ii)+'.attention.self.query']))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.key, mask['bert.encoder.layer.'+str(ii)+'.attention.self.key']))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.self.value, mask['bert.encoder.layer.'+str(ii)+'.attention.self.value']))\n parameters_to_prune.append((model.bert.encoder.layer[ii].attention.output.dense, mask['bert.encoder.layer.'+str(ii)+'.attention.output.dense']))\n parameters_to_prune.append((model.bert.encoder.layer[ii].intermediate.dense, mask['bert.encoder.layer.'+str(ii)+'.intermediate.dense']))\n parameters_to_prune.append((model.bert.encoder.layer[ii].output.dense, mask['bert.encoder.layer.'+str(ii)+'.output.dense']))\n parameters_to_prune.append((model.bert.pooler.dense, mask['bert.pooler.dense']))\n\n for idx in range(len(parameters_to_prune)):\n prune.CustomFromMask.apply(parameters_to_prune[idx][0], 'weight', mask=parameters_to_prune[idx][1])\n\ndef remove_prune_model_custom(model):\n\n parameters_to_prune =[]\n for ii in range(12):\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.query)\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.key)\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.self.value)\n parameters_to_prune.append(model.bert.encoder.layer[ii].attention.output.dense)\n parameters_to_prune.append(model.bert.encoder.layer[ii].intermediate.dense)\n parameters_to_prune.append(model.bert.encoder.layer[ii].output.dense)\n parameters_to_prune.append(model.bert.pooler.dense)\n\n for idx in range(len(parameters_to_prune)):\n prune.remove(parameters_to_prune[idx], 'weight')\n\ndef pruning_model_distribution(model,px):\n\n parameters_to_prune =[]\n for ii in range(12):\n parameters_to_prune.append((model.module.bert.encoder.layer[ii].attention.self.query, 'weight'))\n parameters_to_prune.append((model.module.bert.encoder.layer[ii].attention.self.key, 'weight'))\n parameters_to_prune.append((model.module.bert.encoder.layer[ii].attention.self.value, 'weight'))\n parameters_to_prune.append((model.module.bert.encoder.layer[ii].attention.output.dense, 'weight'))\n parameters_to_prune.append((model.module.bert.encoder.layer[ii].intermediate.dense, 'weight'))\n parameters_to_prune.append((model.module.bert.encoder.layer[ii].output.dense, 'weight'))\n\n parameters_to_prune.append((model.module.bert.pooler.dense, 'weight'))\n parameters_to_prune = tuple(parameters_to_prune)\n\n prune.global_unstructured(\n parameters_to_prune,\n pruning_method=prune.L1Unstructured,\n amount=px,\n )\n\ndef see_weight_rate(model):\n\n sum_list = 0\n zero_sum = 0\n for ii in range(12):\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.query.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.query.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.key.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.key.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.self.value.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.self.value.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].attention.output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].attention.output.dense.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].intermediate.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].intermediate.dense.weight == 0))\n\n sum_list = sum_list+float(model.bert.encoder.layer[ii].output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.bert.encoder.layer[ii].output.dense.weight == 0))\n\n\n # sum_list = sum_list+float(model.bert.pooler.dense.weight.nelement())\n # zero_sum = zero_sum+float(torch.sum(model.bert.pooler.dense.weight == 0))\n \n\n return 100*zero_sum/sum_list\n\ndef see_weight_rate_distribution(model):\n\n sum_list = 0\n zero_sum = 0\n for ii in range(12):\n sum_list = sum_list+float(model.module.bert.encoder.layer[ii].attention.self.query.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.encoder.layer[ii].attention.self.query.weight == 0))\n\n sum_list = sum_list+float(model.module.bert.encoder.layer[ii].attention.self.key.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.encoder.layer[ii].attention.self.key.weight == 0))\n\n sum_list = sum_list+float(model.module.bert.encoder.layer[ii].attention.self.value.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.encoder.layer[ii].attention.self.value.weight == 0))\n\n sum_list = sum_list+float(model.module.bert.encoder.layer[ii].attention.output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.encoder.layer[ii].attention.output.dense.weight == 0))\n\n sum_list = sum_list+float(model.module.bert.encoder.layer[ii].intermediate.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.encoder.layer[ii].intermediate.dense.weight == 0))\n\n sum_list = sum_list+float(model.module.bert.encoder.layer[ii].output.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.encoder.layer[ii].output.dense.weight == 0))\n\n\n sum_list = sum_list+float(model.module.bert.pooler.dense.weight.nelement())\n zero_sum = zero_sum+float(torch.sum(model.module.bert.pooler.dense.weight == 0))\n \n\n return 100*zero_sum/sum_list\n\n\n\n"
] | [
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.nn.utils.prune.CustomFromMask.apply",
"torch.save",
"torch.ones",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.barrier",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
],
[
"torch.randn_like",
"torch.cat",
"torch.nn.utils.prune.remove",
"torch.sum",
"torch.var",
"torch.nn.utils.prune.global_unstructured",
"torch.nn.utils.prune.CustomFromMask.apply"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ayushkumar63123/seqio | [
"23bcb59df59798074d7d5896a131980137c69ec8",
"23bcb59df59798074d7d5896a131980137c69ec8",
"23bcb59df59798074d7d5896a131980137c69ec8",
"23bcb59df59798074d7d5896a131980137c69ec8"
] | [
"seqio/loggers.py",
"seqio/feature_converters_test.py",
"seqio/vocabularies.py",
"seqio/test_utils.py"
] | [
"# Copyright 2021 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classes for logging evaluation metrics and inference results.\"\"\"\n\nimport abc\nimport base64\nimport itertools\nimport json\nimport os\nimport time\nfrom typing import Any, Mapping, Optional, Sequence, Type\n\nfrom absl import logging\nimport numpy as np\nfrom seqio import metrics as metrics_lib\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\nclass Logger(abc.ABC):\n \"\"\"Abstract base class for logging.\n\n Attributes:\n output_dir: a directory to save the logging results (e.g., TensorBoard\n summary) as well as the evaluation results (e.g., \"inputs_pretokenized\",\n \"target_pretokenize\" and \"prediction\").\n \"\"\"\n\n def __init__(self, output_dir):\n self.output_dir = output_dir\n\n @abc.abstractmethod\n def __call__(self, task_name: str, step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset, inferences: Mapping[str,\n Sequence[Any]],\n targets: Sequence[Any]) -> None:\n \"\"\"Logs the metrics and inferences for each task.\n\n Args:\n task_name: The name of the task these datapoints are relevant to.\n step: The timestep to place this datapoint at.\n metrics: A mapping from series names to numeric datapoints to be added to\n that series.\n dataset: The Task dataset.\n inferences: Mapping from inference type (\"predictions\", \"scores\") to the\n model outputs, aligned with the dataset.\n targets: The postprocessed targets, aligned with the dataset.\n \"\"\"\n ...\n\n\nclass PyLoggingLogger(Logger):\n \"\"\"A logger that writes metrics using the standard Python log.\"\"\"\n\n def __init__(self, output_dir: str, level: int = logging.INFO):\n self._level = level\n super().__init__(output_dir)\n\n def __call__(self, task_name: str, step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset, inferences: Mapping[str,\n Sequence[Any]],\n targets: Sequence[Any]) -> None:\n del dataset\n del inferences\n del targets\n for metric_name, metric_value in metrics.items():\n if isinstance(metric_value, metrics_lib.Scalar):\n strvalue = f\"{metric_value.value:.3f}\"\n elif isinstance(metric_value, metrics_lib.Text):\n strvalue = metric_value.textdata\n else:\n strvalue = f\"unloggable type {type(metric_value)}\"\n logging.info(\"%s/%s at step %d: %s\", task_name, metric_name, step,\n strvalue)\n\n\nclass TensorBoardLogger(Logger):\n \"\"\"A logger that writes metrics to TensorBoard summaries.\"\"\"\n\n def __init__(self, output_dir: str):\n \"\"\"TensorBoardLogger initializer.\n\n Args:\n output_dir: The base directory where all logs will be written.\n \"\"\"\n super().__init__(output_dir)\n self._summary_writers = {}\n\n def _get_summary_writer(self, summary_dir: str) -> tf.summary.SummaryWriter:\n \"\"\"Get or create a summary writer for a specific task.\n\n Args:\n summary_dir: The task we are getting the writer for.\n\n Returns:\n The summary writer associated with the directory.\n \"\"\"\n if summary_dir not in self._summary_writers:\n self._summary_writers[summary_dir] = tf.summary.create_file_writer(\n summary_dir, flush_millis=120)\n return self._summary_writers[summary_dir]\n\n def _write_metric(self, tag: str, value: metrics_lib.MetricValue, step: int,\n writer: tf.summary.SummaryWriter):\n \"\"\"Log a metric value to tensorboard, dispatched on value type.\"\"\"\n if isinstance(value, metrics_lib.Scalar):\n value: metrics_lib.Scalar = value\n value = float(np.array(value.value))\n with writer.as_default():\n tf.summary.scalar(name=tag, data=value, step=step)\n elif isinstance(value, metrics_lib.Image):\n value: metrics_lib.Image = value\n image = tf.convert_to_tensor(value.image)\n with writer.as_default():\n tf.summary.image(\n name=tag, data=image, step=step, max_outputs=value.max_outputs)\n elif isinstance(value, metrics_lib.Audio):\n value: metrics_lib.Audio = value\n audio = tf.convert_to_tensor(value.audiodata, dtype=tf.float32)\n with writer.as_default():\n tf.summary.audio(\n name=tag,\n data=audio,\n sample_rate=value.sample_rate,\n step=step,\n max_outputs=value.max_outputs,\n encoding=\"wav\")\n elif isinstance(value, metrics_lib.Histogram):\n value: metrics_lib.Histogram = value\n values = np.array(value.values)\n with writer.as_default():\n tf.summary.histogram(\n name=tag, data=values, step=step, buckets=value.bins)\n elif isinstance(value, metrics_lib.Text):\n value: metrics_lib.Text = value\n if not isinstance(value.textdata, (str, bytes)):\n raise ValueError(\"`textdata` should be of the type `str` or `bytes`.\")\n with writer.as_default():\n tf.summary.text(name=tag, data=tf.constant(value.textdata), step=step)\n elif isinstance(value, metrics_lib.Generic):\n with writer.as_default():\n tf.summary.write(\n tag=tag, tensor=value.tensor, metadata=value.metadata, step=step)\n else:\n raise TypeError(\n f\"Value type not understood, got '{type(value).__name__}'.\")\n\n def __call__(self, task_name: str, step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset, inferences: Mapping[str,\n Sequence[Any]],\n targets: Sequence[Any]) -> None:\n \"\"\"Log metrics to tensorboard.\n\n Args:\n task_name: The name of the task these datapoints are relevant to.\n step: The timestep to place this datapoint at.\n metrics: A mapping from series names to numeric datapoints to be added to\n that series.\n dataset: The Task dataset, which is unused by this logger.\n inferences: The model outputs, which are unused by this logger.\n targets: The postprocessed targets, which are unused by this logger.\n \"\"\"\n del dataset\n del inferences\n del targets\n if step is None:\n logging.warning(\"Step number for the logging session is not provided. \"\n \"A dummy value of -1 will be used.\")\n step = -1\n\n writer = self._get_summary_writer(os.path.join(self.output_dir, task_name))\n for metric_name, metric_value in metrics.items():\n # We prefix the tag with \"eval/\" for backward compatibility.\n # TODO(adarob): Find a way to remove this or make it an option.\n self._write_metric(\n tag=f\"eval/{metric_name}\",\n value=metric_value,\n step=step,\n writer=writer)\n writer.flush()\n\n\nclass TensorBoardLoggerV1(Logger):\n \"\"\"A logger that writes metrics to TensorBoard summaries in TF1.\"\"\"\n\n def __init__(self, output_dir: str):\n \"\"\"TensorBoardLoggerV1 initializer.\n\n Args:\n output_dir: The base directory where all logs will be written.\n \"\"\"\n super().__init__(output_dir)\n self._summary_writers = {}\n\n def _get_summary_writer(self, task_name: str) -> tf.summary.SummaryWriter:\n \"\"\"Create (if needed) and return a SummaryWriter for a given task.\"\"\"\n if task_name not in self._summary_writers:\n with tf.compat.v1.Graph().as_default():\n self._summary_writers[task_name] = tf.compat.v1.summary.FileWriter(\n os.path.join(self.output_dir, task_name))\n return self._summary_writers[task_name]\n\n def __call__(self,\n task_name: str,\n step: int,\n metrics: Mapping[str, metrics_lib.Scalar],\n dataset: tf.data.Dataset,\n inferences: Mapping[str, Sequence[Any]],\n targets: Sequence[Any]) -> None:\n \"\"\"Log the eval results and optionally write summaries for TensorBoard.\n\n Note:\n This is the default implementation using tensorflow v1 operations. This\n only supports logging metrics of the Scalar type.\n\n Args:\n task_name: The name of the task these datapoints are relevant to.\n step: The timestep to place this datapoint at.\n metrics: A mapping from series names to numeric datapoints to be added to\n that series.\n dataset: The Task dataset, which is unused by this logger.\n inferences: The model outputs, which are unused by this logger.\n targets: The postprocessed targets, which are unused by this logger.\n \"\"\"\n del dataset\n del inferences\n del targets\n if step is None:\n logging.warning(\"Step number for the logging session is not provided. \"\n \"A dummy value of -1 will be used.\")\n step = -1\n\n summary_writer = self._get_summary_writer(task_name)\n\n for metric_name, metric_value in metrics.items():\n if not isinstance(metric_value, metrics_lib.Scalar):\n raise ValueError(f\"Value for metric '{metric_name}' should be of \"\n f\"type 'Scalar, got '{type(metric_value).__name__}'.\")\n summary = tf.compat.v1.Summary()\n\n tag = f\"eval/{metric_name}\"\n logging.info(\"%s at step %d: %.3f\", tag, step, metric_value.value)\n\n summary.value.add(tag=tag, simple_value=metric_value.value)\n summary_writer.add_summary(summary, step)\n\n summary_writer.flush()\n\n\nclass TensorAndNumpyEncoder(json.JSONEncoder):\n \"\"\"JSON Encoder to use when encoding dicts with tensors and numpy arrays.\"\"\"\n\n def __init__(self, *args, max_ndarray_size=32, **kwargs):\n self.max_ndarray_size = max_ndarray_size\n super().__init__(*args, **kwargs)\n\n def default(self, obj):\n if isinstance(obj, tf.Tensor):\n if obj.dtype == tf.bfloat16:\n # bfloat16 not supported, convert to float32.\n obj = tf.cast(obj, tf.float32)\n obj = obj.numpy()\n\n if isinstance(obj, np.ndarray):\n obj_dtype = obj.dtype\n if str(obj.dtype) == \"bfloat16\":\n # bfloat16 not supported, convert to float32.\n obj = obj.astype(np.float32)\n if obj.size <= self.max_ndarray_size:\n return obj.tolist() # Convert arrays to lists of py-native types.\n else:\n # If the ndarray is larger than allowed, return a summary string\n # instead of the entire array.\n first_five_str = str(obj.reshape([-1])[:5].tolist())[1:-1]\n return (\n f\"{type(obj).__name__}(shape={obj.shape}, dtype={obj_dtype}); \"\n f\"first: {first_five_str} ...\")\n elif (np.issubdtype(type(obj), np.number) or\n np.issubdtype(type(obj), np.bool_)):\n return obj.item() # Convert most primitive np types to py-native types.\n elif hasattr(obj, \"dtype\") and obj.dtype == tf.bfloat16.as_numpy_dtype:\n return float(obj)\n elif isinstance(obj, bytes):\n # JSON doesn't support bytes. First, try to decode using utf-8 in case\n # it's text. Otherwise, just base64 encode the bytes.\n try:\n return obj.decode(\"utf-8\")\n except UnicodeDecodeError:\n return base64.b64encode(obj)\n\n return json.JSONEncoder.default(self, obj)\n\n\nclass JSONLogger(Logger):\n \"\"\"A logger that writes metrics and model outputs to JSONL files.\"\"\"\n\n def __init__(\n self,\n output_dir: str,\n write_n_results: Optional[int] = None,\n json_encoder_cls: Type[json.JSONEncoder] = TensorAndNumpyEncoder):\n \"\"\"JSONLogger constructor.\n\n Args:\n output_dir: The base directory where all logs will be written.\n write_n_results: number of scores/predictions to be written to the file at\n each step. If None, scores and predictions from all examples are\n written.\n json_encoder_cls: Class to use for serializing JSON to file.\n \"\"\"\n super().__init__(output_dir)\n self._write_n_results = write_n_results\n self._json_encoder_cls = json_encoder_cls\n\n def __call__(self,\n task_name: str,\n step: int,\n metrics: Mapping[str, metrics_lib.MetricValue],\n dataset: tf.data.Dataset,\n inferences: Mapping[str, Sequence[Any]],\n targets: Sequence[Any]) -> None:\n if step is None:\n logging.warning(\"Step number for the logging session is not provided. \"\n \"A dummy value of -1 will be used.\")\n step = -1\n\n metrics_fname = os.path.join(self.output_dir, f\"{task_name}-metrics.jsonl\")\n\n serializable_metrics = {}\n for metric_name, metric_value in metrics.items():\n if isinstance(metric_value, metrics_lib.Scalar):\n serializable_metrics[metric_name] = metric_value.value\n elif isinstance(metric_value, metrics_lib.Text):\n serializable_metrics[metric_name] = metric_value.textdata\n else:\n logging.warning(\n \"Skipping JSON logging of non-serializable metric '%s' of type %s.\",\n metric_name, type(metric_value))\n\n if metrics:\n logging.info(\"Appending metrics to %s\", metrics_fname)\n # We simulate an atomic append for filesystems that do not suppport\n # mode=\"a\".\n file_contents = \"\"\n if tf.io.gfile.exists(metrics_fname):\n with tf.io.gfile.GFile(metrics_fname, \"r\") as f:\n file_contents = f.read()\n with tf.io.gfile.GFile(metrics_fname + \".tmp\", \"w\") as f:\n f.write(file_contents)\n f.write(\n json.dumps({\n \"step\": step,\n **serializable_metrics\n }, cls=self._json_encoder_cls))\n f.write(\"\\n\")\n tf.io.gfile.rename(metrics_fname + \".tmp\", metrics_fname, overwrite=True)\n\n if self._write_n_results == 0:\n return\n\n write_tick = time.time()\n inferences_fname = os.path.join(self.output_dir,\n f\"{task_name}-{step:06}.jsonl\")\n logging.info(\"Writing inferences to %s\", inferences_fname)\n with tf.io.gfile.GFile(inferences_fname, \"w\") as f:\n examples_with_scores = itertools.zip_longest(\n tfds.as_numpy(dataset), inferences.get(\"predictions\", []),\n targets, inferences.get(\"scores\", []))\n if self._write_n_results:\n examples_with_scores = itertools.islice(\n examples_with_scores, 0, self._write_n_results)\n\n for inp, prediction, target, score in examples_with_scores:\n\n # tfds.as_numpy does not convert ragged tensors\n for k in inp:\n if isinstance(inp[k], tf.RaggedTensor):\n inp[k] = inp[k].numpy()\n\n json_dict = {\"input\": inp}\n\n # Only write `prediction` if it is JSON serializable.\n if prediction is not None:\n try:\n json.dumps(prediction, cls=self._json_encoder_cls)\n json_dict[\"prediction\"] = prediction\n except TypeError:\n logging.warning(\"`prediction` is not JSON serializable\",\n exc_info=True)\n\n # Only write `target` if it is JSON serializable.\n try:\n json.dumps(target, cls=self._json_encoder_cls)\n json_dict[\"target\"] = target\n except TypeError:\n logging.warning(\"`target` is not JSON serializable\", exc_info=True)\n\n if score is not None:\n json_dict[\"score\"] = score\n\n json_str = json.dumps(json_dict, cls=self._json_encoder_cls)\n f.write(json_str + \"\\n\")\n write_time = time.time() - write_tick\n logging.info(\"Writing completed in %02f seconds (%02f examples/sec).\",\n write_time,\n len(inferences) / write_time)\n",
"# Copyright 2021 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for seqio.feature_converters.\"\"\"\n\nimport re\nfrom unittest import mock\nfrom seqio import feature_converters\nfrom seqio import test_utils\nimport tensorflow.compat.v2 as tf\n\nFeatureSpec = feature_converters.FeatureConverter.FeatureSpec\n\ntf.compat.v1.enable_eager_execution()\n\nassert_dataset = test_utils.assert_dataset\ncreate_default_dataset = test_utils.create_default_dataset\n\n\nclass HelperFunctionsTest(tf.test.TestCase):\n\n def test_non_padding_position(self):\n x = tf.constant([3, 8, 5, 0, 0, 2, 0])\n non_padding_position = feature_converters.non_padding_position(x)\n expected = [1, 1, 1, 0, 0, 1, 0]\n actual = self.evaluate(non_padding_position)\n self.assertAllEqual(actual, expected)\n\n def test_check_lengths_strict_no_exception(self):\n x = [{\"inputs\": [9, 4, 3, 8, 1], \"targets\": [3, 9, 4, 5]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n sequence_axis_mapping = {\"inputs\": 0, \"targets\": 0}\n ds = feature_converters._check_lengths(\n ds,\n task_feature_lengths,\n sequence_axis_mapping,\n strict=True,\n error_label=\"initial\")\n list(ds.as_numpy_iterator())\n\n def test_check_lengths_strict_exception(self):\n x = [{\"inputs\": [9, 4, 3, 8, 1], \"targets\": [3, 9, 4, 5]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 7, \"targets\": 4}\n sequence_axis_mapping = {\"inputs\": 0, \"targets\": 0}\n expected_msg = (\n r\".*Feature \\\\'inputs\\\\' has length not equal to the expected length of\"\n r\" 7 during initial validation.*\")\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, expected_msg):\n ds = feature_converters._check_lengths(\n ds,\n task_feature_lengths,\n sequence_axis_mapping,\n strict=True,\n error_label=\"initial\")\n list(ds.as_numpy_iterator())\n\n def test_check_lengths_not_strict_no_exception(self):\n x = [{\"inputs\": [9, 4, 3, 8, 1], \"targets\": [3, 9, 4, 5]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 7, \"targets\": 4}\n sequence_axis_mapping = {\"inputs\": 0, \"targets\": 0}\n ds = feature_converters._check_lengths(\n ds,\n task_feature_lengths,\n sequence_axis_mapping,\n strict=False,\n error_label=\"initial\")\n list(ds.as_numpy_iterator())\n\n def test_check_lengths_not_strict_exception(self):\n x = [{\"inputs\": [9, 4, 3, 8, 1], \"targets\": [3, 9, 4, 5]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 4, \"targets\": 4}\n sequence_axis_mapping = {\"inputs\": 0, \"targets\": 0}\n expected_msg = (\n r\".*Feature \\\\'inputs\\\\' has length not less than or equal to the \"\n r\"expected length of 4 during initial validation.*\")\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, expected_msg):\n ds = feature_converters._check_lengths(\n ds,\n task_feature_lengths,\n sequence_axis_mapping,\n strict=False,\n error_label=\"initial\")\n list(ds.as_numpy_iterator())\n\n def test_check_lengths_extra_features(self):\n x = [{\"targets\": [3, 9, 4, 5], \"targets_pretokenized\": \"some text\"}]\n output_types = {\"targets\": tf.int64, \"targets_pretokenized\": tf.string}\n output_shapes = {\"targets\": [4], \"targets_pretokenized\": []}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=output_types, output_shapes=output_shapes)\n task_feature_lengths = {\"targets\": 4}\n sequence_axis_mapping = {\"targets\": 0}\n ds = feature_converters._check_lengths(\n ds,\n task_feature_lengths,\n sequence_axis_mapping,\n strict=True,\n error_label=\"initial\")\n list(ds.as_numpy_iterator())\n\n def test_check_lengths_seq_axis_1(self):\n x = [{\n \"targets\": [[1, 2, 3], [4, 5, 6]],\n \"targets_pretokenized\": \"some text\"\n }]\n output_types = {\"targets\": tf.int64, \"targets_pretokenized\": tf.string}\n output_shapes = {\"targets\": [2, 3], \"targets_pretokenized\": []}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=output_types, output_shapes=output_shapes)\n task_feature_lengths = {\"targets\": 3}\n sequence_axis_mapping = {\"targets\": 1}\n ds = feature_converters._check_lengths(\n ds,\n task_feature_lengths,\n sequence_axis_mapping,\n strict=True,\n error_label=\"initial\")\n list(ds.as_numpy_iterator())\n\n def test_check_exact_match_redundant_features(self):\n expected_msg = (\n \"The input_dataset contains extra features not specified in the \"\n \"task_features: ({'random', 'inputs'}|{'inputs', 'random'})\")\n expected_msg = re.compile(expected_msg)\n with self.assertRaisesRegex(ValueError, expected_msg):\n feature_converters._check_exact_match(\n expected_features=[\"targets\"],\n actual_features=[\"inputs\", \"targets\", \"random\"],\n expected_feature_source=\"task_features\",\n actual_feature_source=\"input_dataset\")\n\n def test_check_exact_match_missing_features(self):\n expected_msg = (\n \"The input_dataset is missing features specified in the \"\n \"task_features: ({'random', 'inputs'}|{'inputs', 'random'})\")\n expected_msg = re.compile(expected_msg)\n with self.assertRaisesRegex(ValueError, expected_msg):\n feature_converters._check_exact_match(\n expected_features=[\"inputs\", \"targets\", \"random\"],\n actual_features=[\"targets\"],\n expected_feature_source=\"task_features\",\n actual_feature_source=\"input_dataset\")\n\n\nclass FeatureConvertersTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n feature_converters.FeatureConverter.TASK_FEATURES = {}\n feature_converters.FeatureConverter.MODEL_FEATURES = {}\n feature_converters.FeatureConverter.PACKING_FEATURE_DTYPES = {}\n\n def tearDown(self):\n del feature_converters.FeatureConverter.TASK_FEATURES\n del feature_converters.FeatureConverter.MODEL_FEATURES\n del feature_converters.FeatureConverter.PACKING_FEATURE_DTYPES\n super().tearDown()\n\n def test_validate_dataset_missing_feature(self):\n x = [{\"targets\": [3, 9, 4, 5]}]\n ds = create_default_dataset(x, feature_names=[\"targets\"])\n task_feature_lengths = {\"inputs\": 4, \"targets\": 4}\n\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()):\n converter = feature_converters.FeatureConverter() # pytype: disable=not-instantiable\n expected_msg = (\"Dataset is missing an expected feature during \"\n \"initial validation: 'inputs'\")\n with self.assertRaisesRegex(ValueError, expected_msg):\n converter._validate_dataset(\n ds,\n expected_features={\n \"inputs\": FeatureSpec(dtype=tf.int32),\n \"targets\": FeatureSpec(dtype=tf.int32)\n },\n expected_lengths=task_feature_lengths,\n strict=False,\n error_label=\"initial\")\n\n def test_validate_dataset_incorrect_dtype(self):\n x = [{\"inputs\": [9, 4, 3, 8, 6], \"targets\": [3, 9, 4, 5]}]\n task_feature_dtypes = {\"inputs\": tf.int32, \"targets\": tf.int64}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=task_feature_dtypes,\n output_shapes={\"inputs\": [None], \"targets\": [None]})\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()):\n feature_converters.FeatureConverter.TASK_FEATURES = {\n k: FeatureSpec(v) for k, v in task_feature_dtypes.items()}\n converter = feature_converters.FeatureConverter() # pytype: disable=not-instantiable\n expected_msg = (\"Dataset has incorrect type for feature 'inputs' during \"\n \"initial validation: Got int32, expected int64\")\n with self.assertRaisesRegex(ValueError, expected_msg):\n converter._validate_dataset(\n ds,\n expected_features={\n \"inputs\": FeatureSpec(dtype=tf.int64),\n \"targets\": FeatureSpec(dtype=tf.int64)\n },\n expected_lengths=task_feature_lengths,\n strict=False,\n error_label=\"initial\")\n\n def test_validate_dataset_incorrect_rank(self):\n x = [{\"inputs\": [[9, 4, 3, 8, 6]], \"targets\": [3, 9, 4, 5]}]\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types={\"inputs\": tf.int64, \"targets\": tf.int64},\n output_shapes={\"inputs\": [None, 1], \"targets\": [None]})\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()):\n converter = feature_converters.FeatureConverter() # pytype: disable=not-instantiable\n expected_msg = (\"Dataset has incorrect rank for feature 'inputs' during \"\n \"initial validation: Got 2, expected 1\")\n with self.assertRaisesRegex(ValueError, expected_msg):\n converter._validate_dataset(\n ds,\n expected_features={\n \"inputs\": FeatureSpec(dtype=tf.int64),\n \"targets\": FeatureSpec(dtype=tf.int64)\n },\n expected_lengths=task_feature_lengths,\n strict=False,\n error_label=\"initial\")\n\n def test_validate_dataset_rank_2(self):\n x = [{\"inputs\": [[9, 4, 3, 8, 6]], \"targets\": [3, 9, 4, 5]}]\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types={\"inputs\": tf.int64, \"targets\": tf.int64},\n output_shapes={\"inputs\": [None, 1], \"targets\": [None]})\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()):\n converter = feature_converters.FeatureConverter() # pytype: disable=not-instantiable\n converter._validate_dataset(\n ds,\n expected_features={\n \"inputs\": FeatureSpec(dtype=tf.int64, rank=2),\n \"targets\": FeatureSpec(dtype=tf.int64)\n },\n expected_lengths=task_feature_lengths,\n strict=False,\n error_label=\"initial\")\n\n def test_validate_dataset_rank_2_with_pack(self):\n x = [{\"inputs\": [[9, 4, 3, 8, 6]], \"targets\": [3, 9, 4, 5]}]\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types={\"inputs\": tf.int64, \"targets\": tf.int64},\n output_shapes={\"inputs\": [None, 1], \"targets\": [None]})\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()),\\\n mock.patch.object(feature_converters.FeatureConverter,\n \"_convert_features\", return_value=ds):\n converter = feature_converters.FeatureConverter(pack=True) # pytype: disable=not-instantiable\n feature_converters.FeatureConverter.TASK_FEATURES = {\n \"inputs\": FeatureSpec(tf.int64, rank=2),\n \"targets\": FeatureSpec(tf.int64)\n }\n feature_converters.FeatureConverter.MODEL_FEATURES = {\n \"inputs\": FeatureSpec(tf.int64, rank=2),\n \"targets\": FeatureSpec(tf.int64)\n }\n expected_msg = (\"When packing is enabled, expected ranks must be 1 or \"\n \"use_custom_packing_ops must be set. Got expected rank 2 \"\n \"for feature inputs.\")\n with self.assertRaisesRegex(ValueError, expected_msg):\n converter(ds, task_feature_lengths)\n\n def test_call_missing_input_lengths(self):\n x = [{\"inputs\": [9, 4, 3, 8, 6], \"targets\": [3, 9, 4, 5]}]\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types={\"inputs\": tf.int64, \"targets\": tf.int64},\n output_shapes={\"inputs\": [5], \"targets\": [5]})\n task_feature_lengths = {\"inputs\": 5}\n\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()):\n converter = feature_converters.FeatureConverter() # pytype: disable=not-instantiable\n feature_converters.FeatureConverter.TASK_FEATURES = {\n \"inputs\": FeatureSpec(tf.int64),\n \"targets\": FeatureSpec(tf.int64)\n }\n expected_msg = (\"The task_feature_lengths is missing features specified \"\n \"in the TASK_FEATURES: {'targets'}\")\n with self.assertRaisesRegex(ValueError, expected_msg):\n converter(ds, task_feature_lengths)\n\n def test_validate_dataset_pretokenized_field(self):\n x = [{\"targets\": [3, 9, 4, 5], \"targets_pretokenized\": \"some text\"}]\n output_types = {\"targets\": tf.int64, \"targets_pretokenized\": tf.string}\n output_shapes = {\"targets\": [4], \"targets_pretokenized\": []}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=output_types, output_shapes=output_shapes)\n\n task_feature_lengths = {\"targets\": 4}\n with mock.patch.object(feature_converters.FeatureConverter,\n \"__abstractmethods__\", set()):\n converter = feature_converters.FeatureConverter() # pytype: disable=not-instantiable\n # _validate_dataset works even if ds has targets and targets_pretokenized\n ds = converter._validate_dataset(\n ds,\n expected_features={\"targets\": FeatureSpec(dtype=tf.int64)},\n expected_lengths=task_feature_lengths,\n strict=True,\n error_label=\"initial\")\n\n\nclass EncDecFeatureConverterTest(tf.test.TestCase):\n\n def test_encoder_decoder_unpacked(self):\n x = [{\"inputs\": [9, 4, 3, 8, 1], \"targets\": [3, 9, 4, 1]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 7, \"targets\": 5}\n\n converter = feature_converters.EncDecFeatureConverter(pack=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"encoder_input_tokens\": [9, 4, 3, 8, 1, 0, 0],\n \"decoder_target_tokens\": [3, 9, 4, 1, 0],\n # mtf.transformer.autoregressive_inputs does not zero out the last eos\n # when the data is not packed. This test mimic the behavior.\n \"decoder_input_tokens\": [0, 3, 9, 4, 1],\n \"decoder_loss_weights\": [1, 1, 1, 1, 0],\n }\n assert_dataset(converted_ds, expected)\n\n def test_encoder_decoder_targets_max_length(self):\n x = [{\"inputs\": [9, 4, 3, 8, 1], \"targets\": [3, 9, 4, 5, 1]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 5, \"targets\": 5}\n\n converter = feature_converters.EncDecFeatureConverter(pack=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"encoder_input_tokens\": [9, 4, 3, 8, 1],\n \"decoder_target_tokens\": [3, 9, 4, 5, 1],\n \"decoder_input_tokens\": [0, 3, 9, 4, 5],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1],\n }\n assert_dataset(converted_ds, expected)\n\n def test_encoder_decoder_extra_long_inputs(self):\n x = [{\"inputs\": [9, 4, 3, 8, 4, 5, 1], \"targets\": [3, 9, 4, 7, 8, 1]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 5, \"targets\": 8}\n expected_msg = (\n r\".*Feature \\\\'inputs\\\\' has length not less than or equal to the \"\n r\"expected length of 5 during input_validation.*\")\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, expected_msg):\n converter = feature_converters.EncDecFeatureConverter(pack=False)\n converted_ds = converter(ds, task_feature_lengths)\n list(converted_ds.as_numpy_iterator())\n\n def test_encoder_decoder_packed(self):\n x = [{\"inputs\": [7, 8, 5, 1], \"targets\": [3, 9, 1]},\n {\"inputs\": [8, 4, 9, 3, 1], \"targets\": [4, 1]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 10, \"targets\": 7}\n\n converter = feature_converters.EncDecFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n expected = {\n \"encoder_input_tokens\": [7, 8, 5, 1, 8, 4, 9, 3, 1, 0],\n \"encoder_segment_ids\": [1, 1, 1, 1, 2, 2, 2, 2, 2, 0],\n \"encoder_positions\": [0, 1, 2, 3, 0, 1, 2, 3, 4, 0],\n \"decoder_target_tokens\": [3, 9, 1, 4, 1, 0, 0],\n \"decoder_input_tokens\": [0, 3, 9, 0, 4, 0, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1, 0, 0],\n \"decoder_segment_ids\": [1, 1, 1, 2, 2, 0, 0],\n \"decoder_positions\": [0, 1, 2, 0, 1, 0, 0],\n }\n assert_dataset(converted_ds, expected)\n\n def test_encoder_decoder_packed_long_sequences(self):\n x = [{\"inputs\": [7, 8, 5, 6, 9, 4, 1], \"targets\": [3, 9, 1]},\n {\"inputs\": [8, 4, 9, 3, 5, 1], \"targets\": [4, 1]}]\n ds = create_default_dataset(x)\n task_feature_lengths = {\"inputs\": 7, \"targets\": 3}\n\n converter = feature_converters.EncDecFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n\n # Corner case: packing is true but task_feature_lengths are too long for\n # packing to happen. We should still get the *_segment_id, *_position\n # fields.\n expected = [{\n \"encoder_input_tokens\": [7, 8, 5, 6, 9, 4, 1],\n \"encoder_segment_ids\": [1, 1, 1, 1, 1, 1, 1],\n \"encoder_positions\": [0, 1, 2, 3, 4, 5, 6],\n \"decoder_target_tokens\": [3, 9, 1],\n \"decoder_input_tokens\": [0, 3, 9],\n \"decoder_loss_weights\": [1, 1, 1],\n \"decoder_segment_ids\": [1, 1, 1],\n \"decoder_positions\": [0, 1, 2],\n }, {\n \"encoder_input_tokens\": [8, 4, 9, 3, 5, 1, 0],\n \"encoder_segment_ids\": [1, 1, 1, 1, 1, 1, 0],\n \"encoder_positions\": [0, 1, 2, 3, 4, 5, 0],\n \"decoder_target_tokens\": [4, 1, 0],\n \"decoder_input_tokens\": [0, 4, 0],\n \"decoder_loss_weights\": [1, 1, 0],\n \"decoder_segment_ids\": [1, 1, 0],\n \"decoder_positions\": [0, 1, 0],\n }]\n assert_dataset(converted_ds, expected)\n\n def test_encoder_decoder_pretokenized_field(self):\n x = [{\n \"inputs\": [7, 8, 5, 1],\n \"targets\": [3, 9, 1],\n \"targets_pretokenized\": \"abc\"\n }, {\n \"inputs\": [8, 4, 9, 3, 1],\n \"targets\": [4, 1],\n \"targets_pretokenized\": \"def\"\n }]\n types = {\n \"inputs\": tf.int32,\n \"targets\": tf.int32,\n \"targets_pretokenized\": tf.string\n }\n shapes = {\"inputs\": [None], \"targets\": [None], \"targets_pretokenized\": []}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=types, output_shapes=shapes)\n\n task_feature_lengths = {\"inputs\": 10, \"targets\": 7}\n converter = feature_converters.EncDecFeatureConverter(pack=True)\n # Check whether convert_features raise error because targets_pretokenized is\n # present in the ds but not in the task_feature_lengths\n converter(ds, task_feature_lengths)\n\n\nclass LMFeatureConverter(tf.test.TestCase):\n\n def test_lm_unpacked(self):\n x = [{\"targets\": [3, 9, 1]}]\n ds = create_default_dataset(x, feature_names=[\"targets\"])\n task_feature_lengths = {\"targets\": 5}\n\n converter = feature_converters.LMFeatureConverter(pack=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [3, 9, 1, 0, 0],\n \"decoder_input_tokens\": [0, 3, 9, 1, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n }\n assert_dataset(converted_ds, expected)\n\n def test_lm_only_packed(self):\n x = [{\"targets\": [3, 9, 1]}, {\"targets\": [4, 1]}]\n ds = create_default_dataset(x, feature_names=[\"targets\"])\n task_feature_lengths = {\"targets\": 6}\n\n converter = feature_converters.LMFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [3, 9, 1, 4, 1, 0],\n \"decoder_input_tokens\": [0, 3, 9, 0, 4, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1, 0],\n \"decoder_positions\": [0, 1, 2, 0, 1, 0],\n \"decoder_segment_ids\": [1, 1, 1, 2, 2, 0]\n }\n assert_dataset(converted_ds, expected)\n\n def test_lm_pack_long_sequences(self):\n x = [{\"targets\": [3, 9, 4, 5, 1]}, {\"targets\": [4, 3, 2, 1]}]\n ds = create_default_dataset(x, feature_names=[\"targets\"])\n task_feature_lengths = {\"targets\": 5}\n\n converter = feature_converters.LMFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = [{\n \"decoder_target_tokens\": [3, 9, 4, 5, 1],\n \"decoder_input_tokens\": [0, 3, 9, 4, 5],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1],\n \"decoder_positions\": [0, 1, 2, 3, 4],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1]\n }, {\n \"decoder_target_tokens\": [4, 3, 2, 1, 0],\n \"decoder_input_tokens\": [0, 4, 3, 2, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 0],\n \"decoder_positions\": [0, 1, 2, 3, 0],\n \"decoder_segment_ids\": [1, 1, 1, 1, 0]\n }]\n assert_dataset(converted_ds, expected)\n\n def test_lm_plaintext_field(self):\n x = [{\"targets\": [3, 9, 1], \"targets_plaintext\": \"abc\"},\n {\"targets\": [4, 1], \"targets_plaintext\": \"abc\"}]\n types = {\"targets\": tf.int32, \"targets_plaintext\": tf.string}\n shapes = {\"targets\": [None], \"targets_plaintext\": []}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=types, output_shapes=shapes)\n task_feature_lengths = {\"targets\": 6}\n\n converter = feature_converters.LMFeatureConverter(pack=True)\n converter(ds, task_feature_lengths)\n\n\nclass PrefixLMFeatureConverter(tf.test.TestCase):\n\n def test_prefix_lm_unpacked(self):\n x = [{\"inputs\": [9, 4, 6, 1], \"targets\": [3, 9, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n converter = feature_converters.PrefixLMFeatureConverter(pack=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [9, 4, 6, 1, 3, 9, 1, 0, 0],\n # The last EOS token is kept if unpacked.\n \"decoder_input_tokens\": [0, 9, 4, 6, 1, 3, 9, 1, 0],\n \"decoder_loss_weights\": [0, 0, 0, 0, 1, 1, 1, 0, 0],\n \"decoder_causal_attention\": [1, 1, 1, 1, 1, 0, 0, 0, 0]\n }\n assert_dataset(converted_ds, expected)\n\n def test_prefix_lm_long_inputs_feature_length(self):\n x = [{\"inputs\": [9, 4, 6, 1], \"targets\": [3, 9, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 10, \"targets\": 4}\n converter = feature_converters.PrefixLMFeatureConverter(pack=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [9, 4, 6, 1, 3, 9, 1, 0, 0, 0, 0, 0, 0, 0],\n # The last EOS token is kept if unpacked.\n \"decoder_input_tokens\": [0, 9, 4, 6, 1, 3, 9, 1, 0, 0, 0, 0, 0, 0],\n \"decoder_loss_weights\": [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n \"decoder_causal_attention\": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n assert_dataset(converted_ds, expected)\n\n def test_prefix_lm_packed(self):\n x = [{\"inputs\": [7, 8, 5, 1], \"targets\": [3, 9, 1]},\n {\"inputs\": [8, 4, 9, 3, 1], \"targets\": [4, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 8, \"targets\": 7}\n converter = feature_converters.PrefixLMFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [7, 8, 5, 1, 3, 9, 1, 8, 4, 9, 3, 1, 4, 1, 0],\n \"decoder_input_tokens\": [0, 7, 8, 5, 1, 3, 9, 0, 8, 4, 9, 3, 1, 4, 0],\n \"decoder_loss_weights\": [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0],\n \"decoder_positions\": [0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0],\n \"decoder_causal_attention\": [\n 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0\n ]\n }\n assert_dataset(converted_ds, expected)\n\n def test_prefix_lm_unpacked_loss_on_inputs_and_targets(self):\n x = [{\"inputs\": [9, 4, 6, 1], \"targets\": [3, 9, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 5, \"targets\": 4}\n converter = feature_converters.PrefixLMFeatureConverter(\n pack=False, loss_on_targets_only=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [9, 4, 6, 1, 3, 9, 1, 0, 0],\n \"decoder_input_tokens\": [0, 9, 4, 6, 1, 3, 9, 1, 0],\n # Loss weights on the inputs portion and padding should be zeroed out.\n \"decoder_loss_weights\": [1, 1, 1, 1, 1, 1, 1, 0, 0],\n \"decoder_causal_attention\": [1, 1, 1, 1, 1, 0, 0, 0, 0]\n }\n assert_dataset(converted_ds, expected)\n\n def test_prefix_lm_packed_loss_on_inputs_and_targets(self):\n x = [{\"inputs\": [7, 8, 5, 1], \"targets\": [3, 9, 1]},\n {\"inputs\": [8, 4, 9, 3, 1], \"targets\": [4, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 8, \"targets\": 7}\n converter = feature_converters.PrefixLMFeatureConverter(\n pack=True, loss_on_targets_only=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [7, 8, 5, 1, 3, 9, 1, 8, 4, 9, 3, 1, 4, 1, 0],\n \"decoder_input_tokens\": [0, 7, 8, 5, 1, 3, 9, 0, 8, 4, 9, 3, 1, 4, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n \"decoder_positions\": [0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0],\n \"decoder_causal_attention\": [\n 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0\n ]\n }\n assert_dataset(converted_ds, expected)\n\n def test_prefix_lm_long_inputs(self):\n x = [{\"inputs\": [7, 8, 5, 6, 1], \"targets\": [3, 9, 7, 1]},\n {\"inputs\": [8, 4, 9, 3, 8, 1], \"targets\": [4, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 4, \"targets\": 3}\n expected_msg = (\n r\".*Feature \\\\'inputs\\\\' has length not less than or equal to the \"\n r\"expected length of 4 during input_validation.*\")\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, expected_msg):\n converter = feature_converters.PrefixLMFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n list(converted_ds.as_numpy_iterator())\n\n def test_prefix_lm_pack_long_sequences(self):\n x = [{\"inputs\": [7, 8, 5, 1], \"targets\": [3, 9, 1]},\n {\"inputs\": [8, 4, 1], \"targets\": [5, 1]}]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 4, \"targets\": 3}\n converter = feature_converters.PrefixLMFeatureConverter(pack=True)\n converted_ds = converter(ds, task_feature_lengths)\n\n # The examples should not be packed because examples are not short enough.\n expected = [{\n \"decoder_target_tokens\": [7, 8, 5, 1, 3, 9, 1],\n \"decoder_input_tokens\": [0, 7, 8, 5, 1, 3, 9],\n \"decoder_loss_weights\": [0, 0, 0, 0, 1, 1, 1],\n \"decoder_positions\": [0, 1, 2, 3, 4, 5, 6],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1, 1, 1],\n \"decoder_causal_attention\": [1, 1, 1, 1, 1, 0, 0]\n }, {\n \"decoder_target_tokens\": [8, 4, 1, 5, 1, 0, 0],\n \"decoder_input_tokens\": [0, 8, 4, 1, 5, 0, 0],\n \"decoder_loss_weights\": [0, 0, 0, 1, 1, 0, 0],\n \"decoder_positions\": [0, 1, 2, 3, 4, 0, 0],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1, 0, 0],\n \"decoder_causal_attention\": [1, 1, 1, 1, 0, 0, 0]\n }]\n assert_dataset(converted_ds, expected)\n\n def test_convert_example(self):\n features = {\n \"targets\": tf.constant([7, 8, 5, 1, 3, 9, 1, 0]),\n \"inputs_width\": tf.constant([4, 4, 4, 4, 4, 4, 4, 0]),\n \"inputs_width_add_pos\": tf.constant([5, 5, 5, 5, 5, 5, 5, 0])\n }\n converter = feature_converters.PrefixLMFeatureConverter(pack=False)\n expected = {\"decoder_target_tokens\": [7, 8, 5, 1, 3, 9, 1, 0],\n \"decoder_input_tokens\": [0, 7, 8, 5, 1, 3, 9, 1],\n \"decoder_loss_weights\": [0, 0, 0, 0, 1, 1, 1, 0],\n \"decoder_causal_attention\": [1, 1, 1, 1, 1, 0, 0, 0]}\n actual = converter._convert_example(features)\n for feat, tensor in actual.items():\n self.assertAllEqual(expected[feat], self.evaluate(tensor))\n\n\nclass DecoderFeatureConverterTest(FeatureConvertersTest):\n\n def test_prefixlm(self):\n x = [{\n \"inputs\": [7, 8, 5, 1],\n \"targets\": [3, 9, 1]\n }, {\n \"inputs\": [8, 4, 9, 3, 1],\n \"targets\": [4, 1]\n }]\n ds = create_default_dataset(x)\n\n task_feature_lengths = {\"inputs\": 8, \"targets\": 7}\n converter = feature_converters.DecoderFeatureConverter(\n pack=True, loss_on_targets_only=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = {\n \"decoder_target_tokens\": [7, 8, 5, 1, 3, 9, 1, 8, 4, 9, 3, 1, 4, 1, 0],\n \"decoder_input_tokens\": [0, 7, 8, 5, 1, 3, 9, 0, 8, 4, 9, 3, 1, 4, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n \"decoder_positions\": [0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0],\n \"decoder_causal_attention\": [\n 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0\n ]\n }\n assert_dataset(converted_ds, expected)\n\n def test_lm(self):\n x = [{\"targets\": [3, 9, 4, 5, 1]}, {\"targets\": [4, 3, 2, 1]}]\n ds = create_default_dataset(x, feature_names=[\"targets\"])\n task_feature_lengths = {\"targets\": 5}\n\n converter = feature_converters.DecoderFeatureConverter(\n pack=True, loss_on_targets_only=False)\n converted_ds = converter(ds, task_feature_lengths)\n\n expected = [{\n \"decoder_target_tokens\": [3, 9, 4, 5, 1],\n \"decoder_input_tokens\": [0, 3, 9, 4, 5],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1],\n \"decoder_positions\": [0, 1, 2, 3, 4],\n \"decoder_segment_ids\": [1, 1, 1, 1, 1]\n }, {\n \"decoder_target_tokens\": [4, 3, 2, 1, 0],\n \"decoder_input_tokens\": [0, 4, 3, 2, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 0],\n \"decoder_positions\": [0, 1, 2, 3, 0],\n \"decoder_segment_ids\": [1, 1, 1, 1, 0]\n }]\n assert_dataset(converted_ds, expected)\n\n\nclass EncoderFeatureConverterTest(FeatureConvertersTest):\n\n def test_encoder_unpacked(self):\n x = [{\n # Assume 9 is the sentinel used to indicate prediction-tokens (e.g., for\n # MLM this would be [MASK] token).\n \"inputs\": [8, 9, 4, 9, 1],\n \"targets\": [8, 7, 4, 6, 1]\n }]\n\n ds = create_default_dataset(x)\n input_lengths = {\"inputs\": 6, \"targets\": 6}\n converter = feature_converters.EncoderFeatureConverter(\n mask_id=9, pack=False)\n converted_ds = converter(ds, input_lengths)\n\n # Determine the loss weight by tf.equal(inputs == mask_sentinel)\n # Let 8 be the index of the sentinel used for classification. For BERT this\n # corresponds to [CLS] token.\n expected = {\n \"encoder_input_tokens\": [8, 9, 4, 9, 1, 0],\n \"encoder_target_tokens\": [8, 7, 4, 6, 1, 0],\n \"encoder_loss_weights\": [0, 1, 0, 1, 0, 0],\n }\n assert_dataset(converted_ds, expected)\n\n def test_encoder_packed(self):\n x = [{\"inputs\": [8, 9, 9, 3, 4, 1], \"targets\": [8, 7, 4, 3, 4, 1]},\n {\"inputs\": [8, 3, 9, 1], \"targets\": [8, 3, 6, 1]}]\n\n ds = create_default_dataset(x)\n input_lengths = {\"inputs\": 11, \"targets\": 11}\n converter = feature_converters.EncoderFeatureConverter(mask_id=9)\n converted_ds = converter(ds, input_lengths)\n\n expected = {\n \"encoder_input_tokens\": [8, 9, 9, 3, 4, 1, 8, 3, 9, 1, 0],\n \"encoder_target_tokens\": [8, 7, 4, 3, 4, 1, 8, 3, 6, 1, 0],\n \"encoder_segment_ids\": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 0],\n \"encoder_positions\": [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0],\n \"encoder_loss_weights\": [0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0],\n }\n assert_dataset(converted_ds, expected)\n\n def test_encoder_pack_long_sequences(self):\n x = [{\"inputs\": [8, 9, 9, 3, 1], \"targets\": [8, 7, 4, 3, 1]},\n {\"inputs\": [8, 3, 9, 1], \"targets\": [8, 3, 6, 1]}]\n\n ds = create_default_dataset(x)\n input_lengths = {\"inputs\": 5, \"targets\": 5}\n converter = feature_converters.EncoderFeatureConverter(mask_id=9)\n converted_ds = converter(ds, input_lengths)\n\n expected = [{\n \"encoder_input_tokens\": [8, 9, 9, 3, 1],\n \"encoder_target_tokens\": [8, 7, 4, 3, 1],\n \"encoder_segment_ids\": [1, 1, 1, 1, 1],\n \"encoder_positions\": [0, 1, 2, 3, 4],\n \"encoder_loss_weights\": [0, 1, 1, 0, 0],\n }, {\n \"encoder_input_tokens\": [8, 3, 9, 1, 0],\n \"encoder_target_tokens\": [8, 3, 6, 1, 0],\n \"encoder_segment_ids\": [1, 1, 1, 1, 0],\n \"encoder_positions\": [0, 1, 2, 3, 0],\n \"encoder_loss_weights\": [0, 0, 1, 0, 0],\n }]\n assert_dataset(converted_ds, expected)\n\n def test_encoder_plaintext_field(self):\n x = [{\n \"inputs\": [8, 9, 9, 3, 4, 1],\n \"targets\": [8, 7, 4, 3, 4, 1],\n \"targets_plaintext\": \"abc\"\n }, {\n \"inputs\": [8, 3, 9, 1],\n \"targets\": [8, 3, 6, 1],\n \"targets_plaintext\": \"def\"\n }]\n types = {\n \"inputs\": tf.int32,\n \"targets\": tf.int32,\n \"targets_plaintext\": tf.string\n }\n shapes = {\"inputs\": [None], \"targets\": [None], \"targets_plaintext\": []}\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=types, output_shapes=shapes)\n\n input_lengths = {\"inputs\": 7, \"targets\": 7}\n converter = feature_converters.EncoderFeatureConverter(mask_id=9)\n # Check whether convert_features raise error because targets_plaintext is\n # present in the ds but not in the output_features\n converter(ds, input_lengths)\n\n\nclass PassThroughFeatureConverterTest(tf.test.TestCase):\n\n def test_equivalence(self):\n x = [{\n \"decoder_target_tokens\": [7, 8, 5, 1, 3, 9, 1, 8, 4, 9, 3, 1, 4, 1, 0],\n \"decoder_input_tokens\": [0, 7, 8, 5, 1, 3, 9, 0, 8, 4, 9, 3, 1, 4, 0],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n }]\n ds = create_default_dataset(\n x,\n feature_names=[\n \"decoder_target_tokens\", \"decoder_input_tokens\",\n \"decoder_loss_weights\"\n ])\n converter = feature_converters.PassThroughFeatureConverter()\n converted_ds = converter(ds, task_feature_lengths={})\n test_utils.assert_datasets_eq(converted_ds, ds)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2021 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Vocabularies.\"\"\"\n\nimport abc\nimport hashlib\nfrom typing import Any, Dict, Iterable, Optional, Sequence, Union\nfrom absl import logging\nimport tensorflow.compat.v2 as tf\nimport tensorflow_text as tf_text\n\nfrom sentencepiece import sentencepiece_model_pb2\nimport sentencepiece as sentencepiece_processor\n\nPAD_ID = 0\n\n\nclass Vocabulary(metaclass=abc.ABCMeta):\n \"\"\"Abstract class for all vocabularies.\n\n Subclasses must implement methods for converting between strings and tokens\n both in pure python (`_encode`/`_decode`) and in TensorFlow\n (`_encode_tf`/`_decode_tf`).\n\n Subclasses are responsible for reserving PAD_ID=0 as well as optionally\n reserving EOS_ID and UNK_ID\n\n `_base_vocab_size` should account for PAD, EOS, and UNK but not `extra_ids`.\n \"\"\"\n\n def __init__(self, extra_ids: int = 0):\n \"\"\"Vocabulary constructor.\n\n Args:\n extra_ids: The number of extra IDs to reserve.\n \"\"\"\n self._extra_ids = extra_ids or 0\n\n @abc.abstractproperty\n def eos_id(self) -> Optional[int]:\n raise NotImplementedError(\"need to implement eos_id\")\n\n @property\n def pad_id(self) -> int:\n return PAD_ID\n\n @abc.abstractproperty\n def unk_id(self) -> Optional[int]:\n raise NotImplementedError(\"need to implement unk_id\")\n\n @property\n def extra_ids(self) -> int:\n return self._extra_ids\n\n @property\n def vocab_size(self) -> int:\n \"\"\"Vocabulary size, including extra ids.\"\"\"\n return self._base_vocab_size + self.extra_ids\n\n @abc.abstractproperty\n def _base_vocab_size(self) -> int:\n \"\"\"Vocabulary size, excluding extra ids but including PAD/EOS/UNK.\"\"\"\n # TODO(fjord): add a check that pad_id and unk_id (if present)\n # are less than _base_vocab_size.\n raise NotImplementedError\n\n @abc.abstractmethod\n def _encode(self, s: str) -> Sequence[int]:\n raise NotImplementedError\n\n def encode(self, s: Union[Sequence[int], str]) -> Sequence[int]:\n \"\"\"Tokenizes string to an int sequence, without adding EOS.\"\"\"\n return self._encode(s)\n\n @abc.abstractmethod\n def _decode(self, ids):\n raise NotImplementedError\n\n def decode(self, ids: Iterable[int]):\n \"\"\"Detokenizes int32 iterable to a string, up through first EOS.\"\"\"\n clean_ids = list(ids)\n\n if self.unk_id is not None:\n vocab_size = self._base_vocab_size\n clean_ids = [\n self.unk_id if i >= vocab_size else i\n for i in clean_ids\n ]\n\n if self.eos_id is not None and self.eos_id in clean_ids:\n clean_ids = clean_ids[:clean_ids.index(self.eos_id) + 1]\n\n return self._decode(clean_ids)\n\n @abc.abstractmethod\n def _encode_tf(self, s: tf.Tensor) -> tf.Tensor:\n raise NotImplementedError\n\n def encode_tf(self, s: tf.Tensor) -> tf.Tensor:\n \"\"\"Tokenizes string Scalar to an int32 Tensor, without adding EOS.\"\"\"\n return self._encode_tf(s)\n\n @abc.abstractmethod\n def _decode_tf(self, ids: tf.Tensor) -> tf.Tensor:\n raise NotImplementedError\n\n def decode_tf(self, ids: tf.Tensor) -> tf.Tensor:\n \"\"\"Detokenizes int32 batched Tensor through first EOS.\"\"\"\n clean_ids = ids\n\n if self.unk_id is not None:\n clean_ids = tf.where(\n tf.less(clean_ids, self._base_vocab_size), clean_ids, self.unk_id)\n\n if self.eos_id is not None:\n # Replace everything after the first eos_id with pad_id.\n after_eos = tf.cumsum(\n tf.cast(tf.equal(clean_ids, self.eos_id), tf.int32),\n exclusive=True, axis=-1)\n clean_ids = tf.where(tf.cast(after_eos, tf.bool), self.pad_id, clean_ids)\n\n return self._decode_tf(clean_ids)\n\n\nclass PassThroughVocabulary(Vocabulary):\n \"\"\"Vocabulary that passes through inputs unchanged.\"\"\"\n\n def __init__(\n self,\n size: int,\n eos_id: Optional[int] = None):\n \"\"\"PassThroughVocabulary constructor.\n\n Args:\n size: the full size of the vocabulary.\n eos_id: the end-of-sequence token.\n \"\"\"\n self._size = size\n self._eos_id = eos_id\n super().__init__()\n\n @property\n def _base_vocab_size(self):\n return self._size\n\n def _encode(self, s: Sequence[int]) -> Sequence[int]:\n return s\n\n def _decode(self, ids: Sequence[int]) -> Sequence[int]:\n return ids\n\n def _encode_tf(self, s: tf.Tensor) -> tf.Tensor:\n return s\n\n def _decode_tf(self, ids: tf.Tensor) -> tf.Tensor:\n return ids\n\n @property\n def eos_id(self) -> Optional[int]:\n return self._eos_id\n\n @property\n def unk_id(self) -> Optional[int]:\n return None\n\n def __eq__(self, other):\n if not isinstance(other, PassThroughVocabulary):\n return False\n return (self._size == other._size and\n self.eos_id == other.eos_id)\n\n\nclass SentencePieceVocabulary(Vocabulary):\n \"\"\"Wrapper for nlp/sentencepiece encoder.\n\n Assumes the model was built using flags to reserve ID=0 for padding, ID=1 for\n EOS, and ID=2 for UNK.\n\n If using extra ids, you can represent them in string-form as `<extra_id_0>`,\n `<extra_id_1>`, etc. They will be indexed starting from the end of the\n vocabulary to match how the masking preprocessors are set up.\n\n IMPORTANT NOTE: these placeholders only work properly when they are used at\n word starts (e.g., \"I like peanut butter and <extra_id_0> sandwiches.\" or\n \"I like peanut butter and <extra_id_0>ly sandwiches\" are both okay, but\n \"I like peanut butter and jel<extra_id_0> sandwiches is not.\").\n \"\"\"\n\n def __init__(self, sentencepiece_model_file, extra_ids=None):\n \"\"\"Create a SentencePieceVocabulary.\n\n Optionally, specify a number of extra ids to add to the end of the\n vocabulary for use as sentinels.\n\n Args:\n sentencepiece_model_file: a string\n extra_ids: an optional integer\n \"\"\"\n self._sentencepiece_model_file = sentencepiece_model_file\n self._tokenizer = None\n self._sp_model = None\n super().__init__(extra_ids=extra_ids)\n\n def _load_model(self):\n \"\"\"Load SPM and Python tokenizer.\"\"\"\n # Handle cases where SP can't load the file, but gfile can.\n with tf.io.gfile.GFile(self._sentencepiece_model_file, \"rb\") as f:\n self._sp_model = f.read()\n # Add placeholder strings for extra IDs.\n model = sentencepiece_model_pb2.ModelProto.FromString(self._sp_model)\n if self._extra_ids:\n # We name them in reverse order to match their use in span corruption.\n for i in reversed(range(self._extra_ids)):\n model.pieces.add(\n piece=f\"▁<extra_id_{i}>\", score=0.0,\n type=\n sentencepiece_model_pb2.ModelProto.SentencePiece.USER_DEFINED)\n self._sp_model = model.SerializeToString()\n # Load Python tokenizer and ensure the EOS and PAD IDs are correct.\n self._tokenizer = sentencepiece_processor.SentencePieceProcessor()\n self._tokenizer.LoadFromSerializedProto(self._sp_model)\n if self._tokenizer.pad_id() != PAD_ID:\n logging.warning(\n \"T5 library uses PAD_ID=%s, which is different from the \"\n \"sentencepiece vocabulary, which defines pad_id=%s\",\n PAD_ID, self._tokenizer.pad_id())\n\n @property\n def eos_id(self) -> Optional[int]:\n return self.tokenizer.eos_id()\n\n @property\n def unk_id(self) -> Optional[int]:\n return self.tokenizer.unk_id()\n\n @property\n def sp_model(self):\n \"\"\"Retrieve the SPM.\"\"\"\n if self._sp_model is None:\n self._load_model()\n return self._sp_model\n\n @property\n def sentencepiece_model_file(self):\n return self._sentencepiece_model_file\n\n @property\n def tokenizer(self):\n \"\"\"Returns the Python tokenizer.\"\"\"\n if not self._tokenizer:\n self._load_model()\n return self._tokenizer\n\n @property\n def tf_tokenizer(self):\n \"\"\"Instantiate and return a TF tokenizer.\"\"\"\n return tf_text.SentencepieceTokenizer(model=self.sp_model)\n\n @property\n def vocab_size(self):\n return self._base_vocab_size\n\n @property\n def _base_vocab_size(self):\n \"\"\"Number of ids (including 0=PAD, 1=EOS, and 2=UNK).\n\n Returns:\n an integer, the vocabulary size\n \"\"\"\n return self.tokenizer.GetPieceSize()\n\n def _encode(self, s):\n \"\"\"Encode a python string as a list of integers.\n\n Args:\n s: a string\n Returns:\n a list of integers (not terminated by EOS)\n \"\"\"\n return self.tokenizer.EncodeAsIds(s)\n\n def _decode(self, ids):\n \"\"\"Decode a list of integers to a python string.\n\n Args:\n ids: a list of integers (not terminated by EOS)\n Returns:\n a string\n \"\"\"\n # convert all the extra ids (sentinels) to UNK=2\n ids = [\n self.tokenizer.unk_id() if i >= self.tokenizer.GetPieceSize()\n else i for i in ids]\n return self.tokenizer.DecodeIds(ids)\n\n def _encode_tf(self, s):\n \"\"\"Encode a tf.Scalar string to a tf.Tensor.\n\n This will be necessary for on-the-fly tokenization.\n\n Args:\n s: a tf.Scalar with dtype tf.string\n Returns:\n a 1d tf.Tensor with dtype tf.int32\n \"\"\"\n return self.tf_tokenizer.tokenize(s)\n\n def _decode_tf(self, ids):\n \"\"\"Decode in TensorFlow.\n\n Args:\n ids: a 1d tf.Tensor with dtype tf.int32\n Returns:\n a tf Scalar with dtype tf.string\n \"\"\"\n return self.tf_tokenizer.detokenize(ids)\n\n def __eq__(self, other):\n if not isinstance(other, SentencePieceVocabulary):\n return False\n try:\n their_md5 = hashlib.md5(other.sp_model).hexdigest()\n # If other has no sp_model attribute, we can't test for equality\n except AttributeError:\n return False\n our_md5 = hashlib.md5(self.sp_model).hexdigest()\n return our_md5 == their_md5\n\n def __str__(self) -> str:\n return (f\"SentencePieceVocabulary(file={self._sentencepiece_model_file}, \"\n f\"extra_ids={self.extra_ids}, \"\n f\"spm_md5={hashlib.md5(self.sp_model).hexdigest()})\")\n\n\nclass ByteVocabulary(Vocabulary):\n \"\"\"Byte-level vocabulary.\n\n Encode/decode text directly to 256 \"byte IDs\" using UTF-8 encoding. Three\n special IDs are reserved (0=padding, 1=EOS, 2=UNK), so our encoded byte IDs\n are +3 greater than UTF-8 byte values.\n\n This is the vocabulary used by the ByT5 models:\n https://arxiv.org/abs/2105.13626\n \"\"\"\n\n def __init__(self, extra_ids: int = 0):\n \"\"\"Create a ByteVocabulary.\n\n Optionally, specify a number of extra ids to add to the end of the\n vocabulary for use as sentinels.\n\n Args:\n extra_ids: an optional integer\n \"\"\"\n self._byte_size = 256\n # The special tokens: 0=PAD, 1=EOS,and 2=UNK\n self._num_special_tokens = 3\n super().__init__(extra_ids=extra_ids)\n\n @property\n def eos_id(self) -> Optional[int]:\n return 1\n\n @property\n def unk_id(self) -> Optional[int]:\n return 2\n\n def _convert_strings_to_ids(self, s):\n \"\"\"Convert a python string to integers based on UTF-8 encoding.\n\n Args:\n s: a string\n Returns:\n ids: a list of integers\n \"\"\"\n return list(s.encode(\"utf-8\"))\n\n def _convert_ids_to_strings(self, ids):\n \"\"\"Convert ids to a python string based on UTF-8 encoding.\n\n Args:\n ids: a list of integers\n Returns:\n s: a string\n \"\"\"\n return bytes(ids).decode(\"utf-8\", errors=\"ignore\")\n\n def _filter_non_string_ids(self, ids):\n \"\"\"Filter special token ids and extra ids if there are any.\n\n Args:\n ids: a list of integers\n Returns:\n ids: a list of integers\n \"\"\"\n lower_bound = self._num_special_tokens\n upper_bound = self._byte_size + self._num_special_tokens\n return [id for id in ids if lower_bound <= id < upper_bound]\n\n @property\n def _base_vocab_size(self):\n \"\"\"Number of ids.\n\n Returns:\n an integer, the vocabulary size\n \"\"\"\n return self._num_special_tokens + self._byte_size\n\n def _encode(self, s):\n \"\"\"Encode a python string as a list of integers.\n\n To keep the first few ids for special tokens, increase ids by the number\n of special tokens.\n\n Args:\n s: a string\n Returns:\n a list of integers (not terminated by EOS)\n \"\"\"\n ids = self._convert_strings_to_ids(s)\n return [i + self._num_special_tokens for i in ids]\n\n def _decode(self, ids):\n \"\"\"Decode a list of integers to a python string.\n\n The special tokens of PAD, EOS, and UNK will not be represented in the\n output string. This is different from the SentencePieceVocabulary, where\n UNK will show up as a '?' character.\n\n Args:\n ids: a list of integers (not terminated by EOS)\n Returns:\n a string\n \"\"\"\n\n ids = self._filter_non_string_ids(ids)\n ids = [i - self._num_special_tokens for i in ids]\n return self._convert_ids_to_strings(ids)\n\n def _encode_tf(self, s):\n \"\"\"Encode a tf.Scalar string to a tf.Tensor.\n\n Args:\n s: a tf.Scalar with dtype tf.string\n Returns:\n a 1d tf.Tensor with dtype tf.int32\n \"\"\"\n tf_ids = tf.io.decode_raw(s, tf.uint8) + self._num_special_tokens\n return tf.dtypes.cast(tf_ids, tf.int32)\n\n def _decode_tf(self, ids):\n \"\"\"Decode in TensorFlow.\n\n Args:\n ids: a 1d tf.Tensor with dtype tf.int32\n Returns:\n a tf Scalar with dtype tf.string\n \"\"\"\n return tf.py_function(func=self.decode, inp=[ids], Tout=tf.string)\n\n def __eq__(self, other):\n if not isinstance(other, ByteVocabulary):\n return False\n return (self.extra_ids == other.extra_ids and\n self.eos_id == other.eos_id and\n self.unk_id == other.unk_id)\n\n\nclass FullCodepointVocabulary(Vocabulary):\n \"\"\"Encodes and decodes text as codepoint sequences.\n\n This \"vocabulary\" is lexicon-free (i.e. it is static), and is an exhaustive\n representation of all codepoints. This is well-suited to encoders (especially\n with a hash-based embedding strategy) or a decoder that does not softmax over\n the whole vocabulary.\n\n A Unicode codepoint is effectively a single character. Unicode provides a\n well-defined mapping from the set of codepoint integers onto the set of all\n Unicode characters.\n \"\"\"\n # While this should generally match `sys.maxunicode`, we want to provide this\n # as a constant to avoid architecture/system-dependent array overruns. If\n # downstream preprocessors choose to use `vocab_size-1` as a sentinel ID,\n # then this will still map such characters onto the Unicode private range on\n # planes 15-16. See:\n # https://en.wikipedia.org/wiki/Unicode#Code_planes_and_blocks.\n LARGEST_CODEPOINT = 0x10ffff # Decimal: 1,114,111\n # Padding is always index zero. This means that the NULL character is\n # technically not embeddable. This seems fine according to all reasonable\n # interpretations of the NULL character as a past-end-of-string marker.\n PAD_CODEPOINT = 0\n # Special symbols are represented using codepoints values that are valid,\n # but designated as \"Private Use\", meaning that they will never by assigned\n # characters by the Unicode Consortium, and are thus safe for use here.\n EOS_CODEPOINT = 0xE005\n\n @property\n def eos_id(self) -> int:\n return self.EOS_CODEPOINT\n\n @property\n def pad_id(self) -> int:\n return self.PAD_CODEPOINT\n\n @property\n def unk_id(self) -> Optional[int]:\n # Because `FullCodepointVocabulary` exhaustively embeds all codepoints\n # possible in Unicode, unknown characters are not possible.\n return None\n\n @property\n def _base_vocab_size(self) -> int:\n return self.LARGEST_CODEPOINT\n\n def _encode(self, s: str) -> Sequence[int]:\n return [ord(i) for i in s]\n\n def _decode(self, ids: Sequence[int]) -> str:\n return \"\".join(chr(id_) for id_ in ids if id_ != self.EOS_CODEPOINT)\n\n def _encode_tf(self, s: tf.Tensor) -> tf.Tensor:\n return tf.strings.unicode_decode(s, input_encoding=\"UTF-8\")\n\n def _decode_tf(self, ids: tf.Tensor) -> tf.Tensor:\n return tf.strings.unicode_encode(ids, output_encoding=\"UTF-8\")\n\n def __eq__(self, other):\n return isinstance(other, FullCodepointVocabulary)\n\n\nclass PartialCodepointVocabulary(Vocabulary):\n \"\"\"Encodes and decodes text as a fixed set of codepoints.\n\n A Unicode codepoint is effectively a single character. Unicode provides a\n well-defined mapping from the set of codepoint integers onto the set of all\n Unicode characters.\n\n Unlike `FullCodepointVocabulary`, this uses only a subset of codepoints which\n are read in from a provided file. The format of the file is as decimal\n integers, where each integer is the codepoint integer as defined by Unicode.\n These can be obtained in Python 3 by converting a single character `str` to\n an `int` using `codepoint = ord(char)`.\n\n This sort of vocabulary is especially useful for decoder vocabularies where\n one might want to control the size of the output softmax and for encoders\n that do not use a hash embedding strategy.\n \"\"\"\n\n # Padding is always index zero. This means that the NULL character is\n # technically not embeddable. This seems fine according to all reasonable\n # interpretations of the NULL character as a past-end-of-string marker.\n PAD_CODEPOINT = FullCodepointVocabulary.PAD_CODEPOINT\n # Special symbols are represented using codepoints values that are valid,\n # but designated as \"Private Use\", meaning that they will never by assigned\n # characters by the Unicode Consortium, and are thus safe for use here.\n EOS_CODEPOINT = FullCodepointVocabulary.EOS_CODEPOINT\n UNK_CODEPOINT = 0xE004\n\n PAD_ID = 0\n EOS_ID = 1\n UNK_ID = 2\n\n def __init__(self, codepoints: Sequence[int], extra_ids: int = 0):\n \"\"\"Format of vocab file assumes one codepoint per line.\"\"\"\n self._codepoint_to_id = {\n self.PAD_CODEPOINT: self.PAD_ID,\n self.EOS_CODEPOINT: self.EOS_ID,\n self.UNK_CODEPOINT: self.UNK_ID,\n }\n for codepoint in codepoints:\n if codepoint not in self._codepoint_to_id:\n self._codepoint_to_id[codepoint] = len(self._codepoint_to_id)\n self._id_to_codepoint = {v: k for k, v in self._codepoint_to_id.items()}\n\n self._codepoint_to_id_tf = PartialCodepointVocabulary.convert_dict_to_tf(\n self._codepoint_to_id, default_value=self.UNK_ID)\n self._id_to_codepoint_tf = PartialCodepointVocabulary.convert_dict_to_tf(\n self._id_to_codepoint, default_value=self.unk_id)\n super().__init__(extra_ids=extra_ids)\n\n @classmethod\n def create_from_file(cls, vocab_file: str, extra_ids: int = 0):\n codepoint_list = []\n with tf.io.gfile.GFile(vocab_file, \"r\") as f:\n for line in f:\n codepoint_list.append(int(line.strip()))\n return cls(codepoint_list, extra_ids)\n\n @property\n def eos_id(self) -> int:\n return self.EOS_ID\n\n @property\n def pad_id(self) -> int:\n return self.PAD_ID\n\n @property\n def unk_id(self) -> int:\n return self.UNK_ID\n\n @property\n def _base_vocab_size(self) -> int:\n return len(self._codepoint_to_id)\n\n @staticmethod\n def convert_dict_to_tf(\n d: Dict[Any, Any],\n default_value: Optional[Any] = None) -> tf.lookup.StaticHashTable:\n keys_tensor = tf.constant(list(d))\n vals_tensor = tf.constant(list(d.values()))\n return tf.lookup.StaticHashTable(\n tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor),\n default_value=default_value)\n\n def _encode(self, s: str) -> Sequence[int]:\n output_ids = []\n for c in s:\n codepoint = ord(c)\n output_ids.append(self._codepoint_to_id.get(codepoint, self.unk_id))\n return output_ids\n\n def _decode(self, ids: Sequence[int]) -> str:\n output_str = \"\"\n for id_ in ids:\n codepoint = self._id_to_codepoint.get(id_, self.UNK_CODEPOINT)\n if codepoint == self.EOS_CODEPOINT: continue\n output_str += chr(codepoint)\n return output_str\n\n def _encode_tf(self, s: tf.Tensor) -> tf.Tensor:\n return self._codepoint_to_id_tf[tf.strings.unicode_decode(\n s, input_encoding=\"UTF-8\")]\n\n def _decode_tf(self, ids: tf.Tensor) -> tf.Tensor:\n return tf.strings.unicode_encode(\n self._id_to_codepoint_tf[ids], output_encoding=\"UTF-8\")\n\n def __eq__(self, other):\n if not isinstance(other, PartialCodepointVocabulary):\n return False\n return (self._codepoint_to_id == other._codepoint_to_id and\n self.extra_ids == other.extra_ids)\n\n\nclass BertWordPieceVocabulary(Vocabulary):\n \"\"\"Wrapper for Bert wordpiece encoder.\n\n This \"vocabulary\" wraps the tensorflow_text's BertTokenizer, which applies an\n end-to-end, text string to wordpiece tokenization.\n \"\"\"\n\n def __init__(self,\n vocab_lookup_table: str,\n suffix_indicator: str = \"##\",\n max_bytes_per_word: int = 100,\n max_chars_per_token: Optional[int] = None,\n token_out_type: tf.dtypes.DType = tf.dtypes.int64,\n unknown_token: str = \"[UNK]\",\n split_unknown_characters: bool = False,\n lower_case: bool = False,\n keep_whitespace: bool = False,\n normalization_form: Optional[str] = None,\n preserve_unused_token: bool = False,\n pad_id: int = 0,\n start_of_sequence_id: int = 101,\n end_of_sequence_id: int = 102):\n r\"\"\"Create a Bert WordPieceVocabulary.\n\n Args:\n vocab_lookup_table: A lookup table implementing the LookupInterface\n containing the vocabulary of subwords or a string which is the file path\n to the vocab.txt file.\n suffix_indicator: (optional) The characters prepended to a wordpiece to\n indicate that it is a suffix to another subword. Default is '##'.\n max_bytes_per_word: (optional) Max size of input token. Default is 100.\n max_chars_per_token: (optional) Max size of subwords, excluding suffix\n indicator. If known, providing this improves the efficiency of decoding\n long words.\n token_out_type: (optional) The type of the token to return. This can be\n `tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.\n unknown_token: (optional) The value to use when an unknown token is found.\n Default is \"[UNK]\". If this is set to a string, and `token_out_type` is\n `tf.int64`, the `vocab_lookup_table` is used to convert the\n `unknown_token` to an integer. If this is set to `None`,\n out-of-vocabulary tokens are left as is.\n split_unknown_characters: (optional) Whether to split out single unknown\n characters as subtokens. If False (default), words containing unknown\n characters will be treated as single unknown tokens.\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead\n of stripping them away.\n normalization_form: If set to a valid value and lower_case=False, the\n input text will be normalized to `normalization_form`. See\n normalize_utf8() op for a list of valid values.\n preserve_unused_token: If true, text in the regex format\n `\\\\[unused\\\\d+\\\\]` will be treated as a token and thus remain preserved\n as is to be looked up in the vocabulary.\n pad_id: ID for the `[PAD]` token.\n start_of_sequence_id: ID for the `[CLS]` token.\n end_of_sequence_id: ID for the `[SEP]` token.\n \"\"\"\n self._vocab_lookup_table = vocab_lookup_table\n self._suffix_indicator = suffix_indicator\n self._max_bytes_per_word = max_bytes_per_word\n self._max_chars_per_token = max_chars_per_token\n self._token_out_type = token_out_type\n self._unknown_token = unknown_token\n self._split_unknown_characters = split_unknown_characters\n self._lower_case = lower_case\n self._keep_whitespace = keep_whitespace\n self._normalization_form = normalization_form\n self._preserve_unused_token = preserve_unused_token\n self._tokenizer = tf_text.BertTokenizer(\n vocab_lookup_table=vocab_lookup_table,\n suffix_indicator=suffix_indicator,\n max_bytes_per_word=max_bytes_per_word,\n max_chars_per_token=max_chars_per_token,\n token_out_type=token_out_type,\n unknown_token=unknown_token,\n split_unknown_characters=split_unknown_characters,\n lower_case=lower_case,\n keep_whitespace=keep_whitespace,\n normalization_form=normalization_form,\n preserve_unused_token=preserve_unused_token,\n )\n self._vocab = self._tokenizer._wordpiece_tokenizer._vocab_lookup_table\n self._pad_id = pad_id\n self._unk_id = self._vocab.lookup(tf.constant(unknown_token)).numpy()\n self._sos_id = start_of_sequence_id\n self._eos_id = end_of_sequence_id\n with tf.io.gfile.GFile(vocab_lookup_table, \"rb\") as f:\n self._wp_model = f.read()\n # We won't pass in extra_ids for Bert vocabulary.\n super().__init__()\n\n @property\n def sos_id(self) -> Optional[int]:\n return self._sos_id\n\n @property\n def eos_id(self) -> Optional[int]:\n return self._eos_id\n\n @property\n def unk_id(self) -> Optional[int]:\n return self._unk_id\n\n @property\n def pad_id(self) -> Optional[int]:\n return self._pad_id\n\n @property\n def _base_vocab_size(self):\n \"\"\"Returns the vocabulary size.\"\"\"\n return self._vocab.size().numpy()\n\n @property\n def tokenizer(self):\n \"\"\"Returns the Python tokenizer.\"\"\"\n return self._tokenizer\n\n @property\n def tf_tokenizer(self):\n \"\"\"Instantiate and return a TF tokenizer.\"\"\"\n return self._tokenizer\n\n @property\n def vocab_size(self):\n return self._base_vocab_size\n\n def _encode(self, s):\n \"\"\"Encode a python string as a list of integers.\n\n Args:\n s: a string\n Returns:\n a list of integers (not terminated by EOS)\n \"\"\"\n return self._encode_tf(s).numpy()\n\n def _decode(self, ids):\n \"\"\"Decode a list of integers to a python string.\n\n Args:\n ids: a list of integers (not terminated by EOS)\n Returns:\n a string\n \"\"\"\n ids = tf.constant(ids)\n str_text = self._decode_tf(ids)\n return str_text.numpy().decode(\"UTF-8\")\n\n def _encode_tf(self, s):\n \"\"\"Encode a tf.Scalar string to a tf.Tensor.\n\n This will be necessary for on-the-fly tokenization.\n\n Args:\n s: a tf.Scalar with dtype tf.string\n Returns:\n a 1d tf.Tensor with dtype tf.int32\n \"\"\"\n tokens = self.tokenizer.tokenize(s)\n # Convert tf.RaggedTensor to tf.Tensor\n return tf.squeeze(tokens.to_tensor())\n\n def _decode_tf(self, ids):\n \"\"\"Decode in TensorFlow.\n\n Args:\n ids: a 1d tf.Tensor with dtype tf.int32\n Returns:\n a tf Scalar with dtype tf.string\n \"\"\"\n # Convert tf.Tensor to tf.RaggedTensor\n ids = tf.RaggedTensor.from_tensor(tf.expand_dims(ids, axis=1))\n tokens = self.tf_tokenizer.detokenize(ids)\n # Flatten tf.RaggedTensor and convert tokens into a string\n return tf.strings.join(tokens.flat_values, \" \")\n\n def __eq__(self, other):\n try:\n their_md5 = hashlib.md5(other._wp_model).hexdigest()\n their_sos_id = other._sos_id\n except AttributeError:\n return False\n our_md5 = hashlib.md5(self._wp_model).hexdigest()\n return our_md5 == their_md5 and self._sos_id == their_sos_id\n",
"# Copyright 2021 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"SeqIO test utilities.\"\"\"\n\nimport collections\nimport copy\nimport functools\nimport os\nimport shutil\nimport sys\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Union, Tuple\n\nfrom absl import flags\nfrom absl import logging\nfrom absl.testing import absltest\nimport numpy as np\nfrom seqio import dataset_providers\nfrom seqio import evaluation\nfrom seqio import feature_converters\nfrom seqio import preprocessors\nfrom seqio import utils as dataset_utils\nfrom seqio import vocabularies\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\n\nTaskRegistry = dataset_providers.TaskRegistry\nMixtureRegistry = dataset_providers.MixtureRegistry\n\nmock = absltest.mock\n\nTEST_DATA_DIR = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_data\")\n\n\n# _ProxyTest is required because py2 does not allow instantiating\n# absltest.TestCase directly.\nclass _ProxyTest(absltest.TestCase):\n \"\"\"Instance of TestCase to reuse methods for testing.\"\"\"\n maxDiff = None\n\n def runTest(self):\n pass\n\n\n_pyunit_proxy = _ProxyTest()\n\n_FAKE_DATASET = {\n \"train\": [{\n \"prefix\": \"this\",\n \"suffix\": \"is a test\",\n \"2d_feature\": ((1, 2, 3),),\n \"3d_feature\": (((1, 2, 3), (4, 5, 6)),),\n }, {\n \"prefix\": \"that\",\n \"suffix\": \"was a test\",\n \"2d_feature\": ((1, 2, 3),),\n \"3d_feature\": (((1, 2, 3), (4, 5, 6)),),\n }, {\n \"prefix\": \"those\",\n \"suffix\": \"were tests\",\n \"2d_feature\": ((1, 2, 3),),\n \"3d_feature\": (((1, 2, 3), (4, 5, 6)),),\n }],\n \"validation\": [\n {\n \"idx\": 0,\n \"idxs\": (100,),\n \"id\": \"a\",\n \"ids\": (\"a1\", \"a2\"),\n \"prefix\": \"this\",\n \"suffix\": \"is a validation\",\n \"2d_feature\": ((3, 2, 1),),\n \"3d_feature\": (((6, 5, 4), (3, 2, 1)),),\n },\n {\n \"idx\": 1,\n \"idxs\": (200, 201),\n \"id\": \"b\",\n \"ids\": (\"b1\",),\n \"prefix\": \"that\",\n \"suffix\": \"was another validation\",\n \"2d_feature\": ((3, 2, 1),),\n \"3d_feature\": (((6, 5, 4), (3, 2, 1)),),\n },\n ]\n}\n\n# Text preprocessed and tokenized.\n_FAKE_TOKENIZED_DATASET = {\n \"train\": [\n {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 8, 6),\n \"inputs_pretokenized\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 10),\n \"targets_pretokenized\": \"is a test\"\n }, {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 18),\n \"inputs_pretokenized\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 10),\n \"targets_pretokenized\": \"was a test\"\n }, {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 7, 6, 4),\n \"inputs_pretokenized\": \"complete: those\",\n \"targets\": (17, 4, 23, 4, 10, 6),\n \"targets_pretokenized\": \"were tests\"\n },\n ],\n \"validation\": [\n {\n \"idx\": 0, \"idxs\": (100,), \"id\": \"a\", \"ids\": (\"a1\", \"a2\"),\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 8, 6),\n \"inputs_pretokenized\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 3, 25, 5, 9, 8, 21, 18, 8, 7, 22),\n \"targets_pretokenized\": \"is a validation\",\n }, {\n \"idx\": 1, \"idxs\": (200, 201), \"id\": \"b\", \"ids\": (\"b1\",),\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 18),\n \"inputs_pretokenized\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 22, 7, 24, 20, 4, 23, 3, 25, 5, 9, 8,\n 21, 18, 8, 7, 22),\n \"targets_pretokenized\": \"was another validation\",\n }\n ]\n}\n\n# Text preprocessed and tokenized.\n# Simulates legacy cached dataset that used '_plaintext' suffix instead of\n# '_pretokenized'.\n_FAKE_PLAINTEXT_TOKENIZED_DATASET = {\n \"train\": [\n {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 8, 6),\n \"inputs_plaintext\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 10),\n \"targets_plaintext\": \"is a test\"\n }, {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 18),\n \"inputs_plaintext\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 10),\n \"targets_plaintext\": \"was a test\"\n }, {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 16, 12, 11, 7, 6, 4),\n \"inputs_plaintext\": \"complete: those\",\n \"targets\": (17, 4, 23, 4, 10, 6),\n \"targets_plaintext\": \"were tests\"\n },\n ],\n}\n\n# Text preprocessed and tokenized.\n_FAKE_TOKEN_PREPROCESSED_DATASET = {\n \"train\": [\n {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 8, 6),\n \"inputs_pretokenized\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 10),\n \"targets_pretokenized\": \"is a test\"\n }, {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 50),\n \"inputs_pretokenized\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 10),\n \"targets_pretokenized\": \"was a test\"\n }, {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 7, 6, 4),\n \"inputs_pretokenized\": \"complete: those\",\n \"targets\": (17, 4, 23, 4, 10, 6),\n \"targets_pretokenized\": \"were tests\"\n },\n ],\n \"validation\": [\n {\n \"idx\": 0, \"idxs\": (100,), \"id\": \"a\", \"ids\": (\"a1\", \"a2\"),\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 8, 6),\n \"inputs_pretokenized\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 3, 25, 5, 9, 8, 21, 18, 8, 7, 22),\n \"targets_pretokenized\": \"is a validation\",\n }, {\n \"idx\": 1, \"idxs\": (200, 201), \"id\": \"b\", \"ids\": (\"b1\",),\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 50),\n \"inputs_pretokenized\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 22, 7, 24, 20, 4, 23, 3, 25, 5, 9, 8,\n 21, 18, 8, 7, 22),\n \"targets_pretokenized\": \"was another validation\",\n }\n ]\n}\n\n_FAKE_TOKEN_PREPROCESSED_NDFEATURES_DATASET = {\n \"train\": [\n {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 8, 6),\n \"inputs_pretokenized\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 10),\n \"targets_pretokenized\": \"is a test\",\n \"2d_feature\": ((1, 2, 3),),\n \"3d_feature\": (((1, 2, 3), (4, 5, 6)),),\n \"2d_feature_pretokenized\": ((1, 2, 3),),\n \"3d_feature_pretokenized\": (((1, 2, 3), (4, 5, 6)),),\n },\n {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 50),\n \"inputs_pretokenized\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 10),\n \"targets_pretokenized\": \"was a test\",\n \"2d_feature\": ((1, 2, 3),),\n \"3d_feature\": (((1, 2, 3), (4, 5, 6)),),\n \"2d_feature_pretokenized\": ((1, 2, 3),),\n \"3d_feature_pretokenized\": (((1, 2, 3), (4, 5, 6)),),\n },\n {\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 7, 6, 4),\n \"inputs_pretokenized\": \"complete: those\",\n \"targets\": (17, 4, 23, 4, 10, 6),\n \"targets_pretokenized\": \"were tests\",\n \"2d_feature\": ((1, 2, 3),),\n \"3d_feature\": (((1, 2, 3), (4, 5, 6)),),\n \"2d_feature_pretokenized\": ((1, 2, 3),),\n \"3d_feature_pretokenized\": (((1, 2, 3), (4, 5, 6)),),\n },\n ],\n \"validation\": [{\n \"idx\": 0,\n \"idxs\": (100,),\n \"id\": \"a\",\n \"ids\": (\"a1\", \"a2\"),\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 8, 6),\n \"inputs_pretokenized\": \"complete: this\",\n \"targets\": (3, 8, 6, 3, 5, 3, 25, 5, 9, 8, 21, 18, 8, 7, 22),\n \"targets_pretokenized\": \"is a validation\",\n \"2d_feature\": ((3, 2, 1),),\n \"3d_feature\": (((6, 5, 4), (3, 2, 1)),),\n \"2d_feature_pretokenized\": ((3, 2, 1),),\n \"3d_feature_pretokenized\": (((6, 5, 4), (3, 2, 1)),),\n }, {\n \"idx\": 1,\n \"idxs\": (200, 201),\n \"id\": \"b\",\n \"ids\": (\"b1\",),\n \"inputs\": (3, 13, 7, 14, 15, 9, 4, 50, 12, 11, 50),\n \"inputs_pretokenized\": \"complete: that\",\n \"targets\": (17, 5, 6, 3, 5, 22, 7, 24, 20, 4, 23, 3, 25, 5, 9, 8, 21,\n 18, 8, 7, 22),\n \"targets_pretokenized\": \"was another validation\",\n \"2d_feature\": ((3, 2, 1),),\n \"3d_feature\": (((6, 5, 4), (3, 2, 1)),),\n \"2d_feature_pretokenized\": ((3, 2, 1),),\n \"3d_feature_pretokenized\": (((6, 5, 4), (3, 2, 1)),),\n }]\n}\n\n_FAKE_DATASETS = {\n \"input\":\n _FAKE_DATASET,\n \"tokenized\":\n _FAKE_TOKENIZED_DATASET,\n \"token_preprocessed\":\n _FAKE_TOKEN_PREPROCESSED_DATASET,\n \"token_preprocessed_ndfeatures\":\n _FAKE_TOKEN_PREPROCESSED_NDFEATURES_DATASET,\n}\n\n_DEFAULT_SEQUENCE_LENGTH = {\"inputs\": 13, \"targets\": 13}\n\n\ndef get_fake_dataset(split,\n shuffle_files=False,\n seed=None,\n shard_info=None,\n ndfeatures=False):\n \"\"\"Returns a tf.data.Dataset with fake data.\"\"\"\n del shuffle_files # Unused, to be compatible with TFDS API.\n del seed\n\n output_types = {\"prefix\": tf.string, \"suffix\": tf.string}\n if split == \"validation\":\n output_types.update(\n {\"idx\": tf.int64, \"idxs\": tf.int32,\n \"id\": tf.string, \"ids\": tf.string})\n output_shapes = {k: [] for k in output_types}\n if split == \"validation\":\n output_shapes.update({\"idxs\": [None], \"ids\": [None]})\n\n if ndfeatures:\n # If we are using ndfeatures fake dataset add the info.\n output_types.update({\"2d_feature\": tf.int32, \"3d_feature\": tf.int32})\n output_shapes.update({\"2d_feature\": [None, 3], \"3d_feature\": [None, 2, 3]})\n\n # Keep only defined features.\n examples = list(\n map(lambda ex: {k: ex[k] for k in output_types}, _FAKE_DATASET[split]))\n\n ds = tf.data.Dataset.from_generator(lambda: examples, output_types,\n output_shapes)\n if shard_info:\n ds = ds.shard(num_shards=shard_info.num_shards, index=shard_info.index)\n return ds\n\n\ndef _get_comparable_examples_from_ds(ds):\n \"\"\"Puts dataset into format that allows examples to be compared in Py2/3.\"\"\"\n examples = []\n\n def _to_tuple(v):\n if isinstance(v, list):\n return tuple(_to_tuple(i) for i in v)\n else:\n return v\n def _clean_value(v):\n if isinstance(v, bytes):\n return tf.compat.as_text(v)\n if isinstance(v, np.ndarray):\n if isinstance(v[0], bytes):\n return tuple(tf.compat.as_text(s) for s in v)\n return _to_tuple(v.tolist())\n return v\n\n for ex in tfds.as_numpy(ds):\n examples.append(\n tuple((k, _clean_value(v)) for k, v in sorted(ex.items())))\n return examples\n\n\ndef _dump_examples_to_tfrecord(path, examples):\n \"\"\"Writes list of example dicts to a TFRecord file of tf.Example protos.\"\"\"\n logging.info(\"Writing examples to TFRecord: %s\", path)\n with tf.io.TFRecordWriter(path) as writer:\n for ex in examples:\n writer.write(dataset_utils.dict_to_tfexample(ex).SerializeToString())\n\n\ndef _dump_examples_to_tsv(path, examples, field_names=(\"prefix\", \"suffix\")):\n \"\"\"Writes list of example dicts to a TSV.\"\"\"\n logging.info(\"Writing examples to TSV: %s\", path)\n with tf.io.gfile.GFile(path, \"w\") as writer:\n writer.write(\"\\t\".join(field_names) + \"\\n\")\n for ex in examples:\n writer.write(\"\\t\".join([ex[field] for field in field_names]) + \"\\n\")\n\n\ndef _dump_fake_dataset(path, fake_examples, shard_sizes, dump_fn):\n \"\"\"Dumps the fake dataset split to sharded TFRecord file.\"\"\"\n offsets = np.cumsum([0] + shard_sizes)\n for i in range(len(offsets) - 1):\n start, end = offsets[i:i+2]\n shard_path = \"%s-%05d-of-%05d\" % (path, i, len(shard_sizes))\n dump_fn(shard_path, fake_examples[start:end])\n\n\ndef _maybe_as_bytes(v):\n if isinstance(v, list):\n return [_maybe_as_bytes(x) for x in v]\n if isinstance(v, str):\n return tf.compat.as_bytes(v)\n return v\n\n\ndef _maybe_as_text(v):\n if isinstance(v, list):\n return [_maybe_as_text(x) for x in v]\n if isinstance(v, bytes):\n return tf.compat.as_text(v)\n return v\n\n\ndef dataset_as_text(ds):\n for ex in tfds.as_numpy(ds):\n yield {k: _maybe_as_text(v) for k, v in ex.items()}\n\n\ndef assert_dataset(\n dataset: tf.data.Dataset,\n expected: Union[Mapping[str, Any], Sequence[Mapping[str, Any]]],\n expected_dtypes: Optional[Mapping[str, tf.DType]] = None):\n \"\"\"Tests whether the entire dataset == expected or [expected].\n\n Args:\n dataset: a tf.data dataset\n expected: either a single example, or a list of examples. Each example is a\n dictionary.\n expected_dtypes: an optional mapping from feature key to expected dtype.\n \"\"\"\n\n if not isinstance(expected, list):\n expected = [expected]\n actual = list(tfds.as_numpy(dataset))\n _pyunit_proxy.assertEqual(len(actual), len(expected))\n\n def _compare_dict(actual_dict, expected_dict):\n _pyunit_proxy.assertEqual(\n set(actual_dict.keys()), set(expected_dict.keys()))\n for key, actual_value in actual_dict.items():\n if isinstance(actual_value, dict):\n _compare_dict(actual_value, expected_dict[key])\n continue\n if (isinstance(actual_value, tf.RaggedTensor) or\n isinstance(actual_value, tf.compat.v1.ragged.RaggedTensorValue)):\n actual_value = actual_value.to_list()\n np.testing.assert_array_equal(\n actual_value, _maybe_as_bytes(expected_dict[key]), key)\n\n for actual_ex, expected_ex in zip(actual, expected):\n _compare_dict(actual_ex, expected_ex)\n\n if expected_dtypes:\n actual_dtypes = {k: dataset.element_spec[k].dtype for k in expected_dtypes}\n _pyunit_proxy.assertDictEqual(expected_dtypes, actual_dtypes)\n\n\ndef assert_datasets_eq(dataset1: tf.data.Dataset, dataset2: tf.data.Dataset):\n \"\"\"Assert that two tfds datasets are equal.\"\"\"\n\n dataset1 = list(tfds.as_numpy(dataset1))\n dataset2 = list(tfds.as_numpy(dataset2))\n _pyunit_proxy.assertEqual(len(dataset1), len(dataset2))\n\n def _compare_dict(dataset1, dataset2):\n _pyunit_proxy.assertEqual(\n set(dataset1.keys()), set(dataset2.keys()))\n for key, value1 in dataset1.items():\n if isinstance(value1, dict):\n _compare_dict(value1, dataset2[key])\n continue\n if isinstance(value1, tf.RaggedTensor):\n value1 = value1.to_list()\n np.testing.assert_array_equal(\n value1, _maybe_as_bytes(dataset2[key]), key)\n\n for ex1, ex2 in zip(dataset1, dataset2):\n _compare_dict(ex1, ex2)\n\n\ndef assert_datasets_neq(dataset1, dataset2):\n \"\"\"Assert that two tfds datasets are unequal.\"\"\"\n\n _pyunit_proxy.assertRaises(AssertionError,\n assert_datasets_eq, dataset1, dataset2)\n\n\ndef _assert_compare_to_fake_dataset(ds: tf.data.Dataset,\n split: str,\n features,\n sequence_length: Optional[Mapping[str,\n int]],\n token_preprocessed: bool = False,\n ndfeatures: bool = False):\n \"\"\"Calls assertion to compare fake examples to actual dataaset.\"\"\"\n dataset = \"token_preprocessed\" if token_preprocessed else \"tokenized\"\n dataset = dataset if not ndfeatures else \"token_preprocessed_ndfeatures\"\n fake_examples = copy.deepcopy(_FAKE_DATASETS[dataset][split])\n\n for key, feat in features.items():\n for n, ex in enumerate(fake_examples):\n if sequence_length and key in sequence_length:\n fake_examples[n][key] = ex[key][:sequence_length[key] -\n int(feat.add_eos)]\n if feat.add_eos:\n fake_examples[n][key] = fake_examples[n][key] + (\n feat.vocabulary.eos_id,)\n\n expected_output_shapes = {\n \"inputs\": [None], \"targets\": [None],\n \"inputs_pretokenized\": [], \"targets_pretokenized\": []}\n expected_output_dtypes = {\n \"inputs\": tf.int32, \"targets\": tf.int32,\n \"inputs_pretokenized\": tf.string, \"targets_pretokenized\": tf.string}\n if split == \"validation\":\n expected_output_shapes.update(\n {\"id\": [], \"ids\": [None], \"idx\": [], \"idxs\": [None]})\n expected_output_dtypes.update(\n {\"id\": tf.string, \"ids\": tf.string, \"idx\": tf.int64, \"idxs\": tf.int32})\n if ndfeatures:\n # If we are using ndfeatures fake dataset add the info.\n expected_output_dtypes.update({\n \"2d_feature\": tf.int32,\n \"3d_feature\": tf.int32,\n \"2d_feature_pretokenized\": tf.int32,\n \"3d_feature_pretokenized\": tf.int32,\n })\n expected_output_shapes.update({\n \"2d_feature\": [None, 3],\n \"3d_feature\": [None, 2, 3],\n \"2d_feature_pretokenized\": [None, 3],\n \"3d_feature_pretokenized\": [None, 2, 3],\n })\n # Override with Feature dtypes.\n for k, f in features.items():\n expected_output_dtypes[k] = f.dtype\n _pyunit_proxy.assertDictEqual(\n expected_output_shapes,\n {k: v.shape.as_list() for k, v in ds.element_spec.items()})\n _pyunit_proxy.assertDictEqual(\n expected_output_dtypes,\n {k: v.dtype for k, v in ds.element_spec.items()})\n\n actual_examples = _get_comparable_examples_from_ds(ds)\n expected_examples = [\n tuple(sorted(ex.items())) for ex in fake_examples]\n _pyunit_proxy.assertCountEqual(expected_examples, actual_examples)\n\n\ndef create_default_dataset(\n x: Sequence[Mapping[str, Sequence[int]]],\n feature_names: Sequence[str] = (\"inputs\", \"targets\"),\n output_types: Optional[Mapping[str, tf.dtypes.DType]] = None,\n output_shapes: Optional[Mapping[str,\n Tuple[None]]] = None) -> tf.data.Dataset:\n \"\"\"Creates a dataset from the given sequence.\"\"\"\n if output_types is None:\n output_types = {feature_name: tf.int32 for feature_name in feature_names}\n if output_shapes is None:\n output_shapes = {feature_name: [None] for feature_name in feature_names}\n\n ds = tf.data.Dataset.from_generator(\n lambda: x, output_types=output_types, output_shapes=output_shapes)\n return ds\n\n\ndef test_text_preprocessor(dataset):\n \"\"\"Performs preprocessing on the text dataset.\"\"\"\n\n def my_fn(ex):\n res = dict(ex)\n del res[\"prefix\"]\n del res[\"suffix\"]\n res.update({\n \"inputs\": tf.strings.join([\"complete: \", ex[\"prefix\"]]),\n \"targets\": ex[\"suffix\"]\n })\n return res\n\n return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef split_tsv_preprocessor(dataset, field_names=(\"prefix\", \"suffix\")):\n \"\"\"Splits TSV into dictionary.\"\"\"\n\n def parse_line(line):\n return dict(zip(\n field_names,\n tf.io.decode_csv(\n line, record_defaults=[\"\"] * len(field_names),\n field_delim=\"\\t\", use_quote_delim=False)\n ))\n\n return dataset.map(\n parse_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef test_token_preprocessor(dataset, output_features, sequence_length):\n \"\"\"Change all occurrences of non-zero even numbered tokens in inputs to 50.\"\"\"\n del output_features\n del sequence_length\n\n def my_fn(ex):\n inputs = ex[\"inputs\"]\n res = ex.copy()\n res[\"inputs\"] = tf.where(\n tf.greater(inputs, 15),\n tf.constant(50, inputs.dtype),\n inputs)\n return res\n\n return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\n@dataset_utils.map_over_dataset(num_seeds=1)\ndef random_token_preprocessor(ex, seed):\n \"\"\"Selects a random shift to roll the tokens by for each feature.\"\"\"\n for feat in [\"inputs\", \"targets\"]:\n tokens = ex[feat]\n res = ex.copy()\n n_tokens = tf.size(tokens)\n random_shift = tf.random.stateless_uniform(\n [], maxval=n_tokens, dtype=tf.int32, seed=seed)\n res[feat] = tf.roll(tokens, shift=random_shift, axis=0)\n return res\n\n\ndef token_preprocessor_no_sequence_length(dataset, output_features):\n return test_token_preprocessor(dataset, output_features, sequence_length=None)\n\n\nclass DataInjector():\n \"\"\"Inject `per_split_data` into `task` while within the scope of this object.\n\n This context takes `per_split_data`, wraps it in a FunctionDataSource,\n and replaces the data source in `task` with it. After calling this function,\n `task`'s `get_dataset(split)` function will return `per_split_data[split]`.\n\n Attributes:\n task_name: A SeqIO task name.\n per_split_data: A string-keyed dict of string-keyed dicts. The top-level\n dict should be keyed by dataset splits, and the second-level dict should\n hold the dataset data.\n \"\"\"\n\n def __init__(self, task_name, per_split_data):\n self._task = dataset_providers.get_mixture_or_task(task_name)\n\n self.per_split_data = per_split_data\n self._saved_source = self._task._source\n\n def __enter__(self):\n\n def ds_fn(split, shuffle_files, seed=None):\n del shuffle_files, seed\n data = self.per_split_data[split]\n ds = tf.data.Dataset.from_tensors(data)\n return ds\n\n mock_source = dataset_providers.FunctionDataSource(\n ds_fn, splits=self.per_split_data.keys())\n self._task._source = mock_source\n self._mock_source = mock_source\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n if self._task._source == self._mock_source:\n self._task._source = self._saved_source\n else:\n raise RuntimeError(\n \"The task source was changed and not restored within the DataInjector scope.\"\n )\n\n\ndef assert_dict_values_equal(a, b):\n \"\"\"Assert that a and b contain equivalent numpy arrays.\"\"\"\n tf.nest.map_structure(np.testing.assert_equal, a, b)\n\n\ndef assert_dict_contains(expected, actual):\n \"\"\"Assert that 'expected' is a subset of the data in 'actual'.\"\"\"\n for k, v in expected.items():\n np.testing.assert_equal(actual[k], v)\n\n\ndef encode_str(task_name, s, output_feature_name=\"targets\"):\n task = dataset_providers.get_mixture_or_task(task_name)\n return task.output_features[output_feature_name].vocabulary.encode(s)\n\n\ndef create_prediction(task_name, s, output_feature_name=\"targets\"):\n task = dataset_providers.get_mixture_or_task(task_name)\n return [(0, task.output_features[output_feature_name].vocabulary.encode(s))]\n\n\ndef test_task(\n task_name: str,\n raw_data: Mapping[str, Any],\n output_feature_name: str = \"targets\",\n feature_encoder: feature_converters.FeatureConverter = (\n feature_converters.EncDecFeatureConverter(pack=False)),\n seed: Optional[int] = None) -> Tuple[Mapping[str, Any], Mapping[str, Any]]:\n \"\"\"Test the preprocessing and metrics functionality for a given task.\n\n This function injects `raw_data` into the task, then creates an Evaluator\n based on that task. It runs the task preprocessing on that raw data and\n extracts the expected value based on `output_feature_name`. Then, it\n creates an `Evaluator` object based on the `task_name` and runs `evaluate`\n using the expected value, returning both the result of the preprocessing\n and the metrics from the `evaluate` call.\n\n The expected format for `raw_data` is a nested dict of the form\n {'split_name': {'data_key': data}}.\n\n Note that testing metrics that use score_outputs from this API is currently\n unsupported.\n\n Args:\n task_name: A SeqIO task name.\n raw_data: A string-keyed dict of string-keyed dicts. The top-level dict\n should be keyed by dataset splits, and the second-level dict should hold\n the dataset data.\n output_feature_name: A string key for the output feature. Used to extract\n the expected target from the preprocessing output.\n feature_encoder: An optional feature encoder object. Defaults to\n EncDecFeatureEncoder.\n seed: optional seeed used for deterministic Task preprocessing.\n Specifically, this seed is passed to the Task to be used in\n map_seed_manager() wrappers around preprocessor functions.\n\n Returns:\n A tuple (preprocessing_output, metrics), where `preprocessing_output`\n is the result of running the tasks' preprocessing code on `raw_data` and\n `metrics` is a mapping from task name to computed metrics.\n \"\"\"\n output = test_preprocessing_single(task_name, raw_data, seed=seed)\n\n eval_output = test_postprocessing(\n task_name,\n raw_data,\n predict_output=output[output_feature_name],\n feature_encoder=feature_encoder)\n return output, eval_output\n\n\ndef test_preprocessing(\n task_name: str,\n raw_data: Mapping[str, Any],\n seed: Optional[int] = None) -> Iterator[Mapping[str, Any]]:\n \"\"\"Test task preprocessing, returning iterator of the generated dataset.\n\n This function injects `raw_data` into `task` and runs the preprocessing\n routines from `task`, returning the output of\n `task.get_dataset().as_numpy_iterator()`.\n\n Args:\n task_name: A SeqIO task name.\n raw_data: A string-keyed dict of string-keyed dicts. The top-level dict\n should be keyed by dataset splits, and the second-level dict should hold\n the dataset data.\n seed: optional seeed used for deterministic Task preprocessing.\n Specifically, this seed is passed to the Task to be used in\n map_seed_manager() wrappers around preprocessor functions.\n\n Returns:\n Iterator with the result of running the tasks' preprocessing code on\n `raw_data`.\n \"\"\"\n if len(raw_data) > 1:\n raise ValueError(\"test_preprocessing supports a single split in raw_data.\")\n\n with DataInjector(task_name, raw_data):\n split = list(raw_data.keys())[0]\n task = dataset_providers.get_mixture_or_task(task_name)\n iterator = task.get_dataset(\n sequence_length=None, split=split, shuffle=False,\n seed=seed).as_numpy_iterator()\n return iterator\n\n\ndef test_preprocessing_single(task_name: str,\n raw_data: Mapping[str, Any],\n seed: Optional[int] = None) -> Mapping[str, Any]:\n \"\"\"Test task preprocessing, where a single item is expected to be generated.\n\n This is similar to test_preprocessing, but returns a single generated item.\n This also asserts that no more than a single item is generated during\n preprocessing.\n\n This function injects `raw_data` into `task` and runs the preprocessing\n routines from `task`, returning the output of\n `next(task.get_dataset().as_numpy_iterator())`.\n\n Args:\n task_name: A SeqIO task name.\n raw_data: A string-keyed dict of string-keyed dicts. The top-level dict\n should be keyed by dataset splits, and the second-level dict should hold\n the dataset data.\n seed: optional seeed used for deterministic Task preprocessing.\n Specifically, this seed is passed to the Task to be used in\n map_seed_manager() wrappers around preprocessor functions.\n\n Returns:\n The result of running the tasks' preprocessing code on `raw_data`.\n \"\"\"\n iterator = test_preprocessing(task_name, raw_data, seed=seed)\n item = next(iterator)\n # Verify that we've reached the end of the generator.\n _pyunit_proxy.assertIsNone(\n next(iterator, None),\n msg=\"Expected dataset with a single item, but more were generated.\")\n return item\n\n\ndef test_postprocessing(\n task_name: str,\n raw_data: Mapping[str, Any],\n target_feature_name: str = \"targets\",\n predict_output: Optional[Sequence[str]] = None,\n score_output: Optional[Sequence[float]] = None,\n feature_encoder: feature_converters.FeatureConverter = feature_converters\n .EncDecFeatureConverter(pack=False)) -> Mapping[str, Any]:\n \"\"\"Test the postprocessing and metrics for a given task.\n\n This function injects `raw_data` into `task`, then creates an Evaluator\n based on that task. It then calls `Evaluator.evaluate()` using predict_fn and\n score_fn args that return `predict_output` and `score_output`, returning the\n output of the `evaluate()` call. (Note that, due to the fact that `evaluate`\n uses the task data, this test will also actuate the task preprocessing code.)\n\n Usually, this function will be invoked `metrics, _, _ = test_postprocessing()`\n since the second and third returned data should be the same as the passed\n predict_output and score_output.\n\n Args:\n task_name: A SeqIO task name.\n raw_data: A string-keyed dict of string-keyed dicts. The top-level dict\n should be keyed by dataset splits, and the second-level dict should hold\n the dataset data.\n target_feature_name: Feature whose vocabulary will be used to encode\n predict_output. Defaults to 'targets'.\n predict_output: A list of strings representing model predictions for the\n raw_data. Optional, only used when the task specifies metric_fns.\n score_output: A list of floats representing the score of the raw_data.\n Optional, only used when the task specifies score_metric_fns.\n feature_encoder: An optional feature encoder object. Defaults to\n None.\n\n Returns:\n metrics: a mapping from metric name to values.\n \"\"\"\n\n class PredictCallable(evaluation.PredictFnCallable):\n\n def __call__(self,\n dataset: Optional[tf.data.Dataset] = None,\n model_feature_lengths: Optional[Mapping[str, int]] = None):\n if predict_output is None:\n return []\n task = dataset_providers.get_mixture_or_task(task_name)\n return list(\n enumerate(\n task.output_features[target_feature_name].vocabulary.encode(s)\n for s in predict_output))\n\n class ScoreCallable(evaluation.PredictFnCallable):\n\n def __call__(\n self,\n dataset: Optional[tf.data.Dataset] = None,\n model_feature_lengths: Optional[Mapping[str, int]] = None,\n ):\n if score_output is None:\n return []\n return list(enumerate(score_output))\n\n with DataInjector(task_name, raw_data):\n evaluator = evaluation.Evaluator(\n task_name, feature_converter=feature_encoder)\n\n return evaluator.evaluate(\n compute_metrics=True,\n predict_fn=PredictCallable(),\n score_fn=ScoreCallable())[0].result()[task_name]\n\n\nclass MockVocabulary(vocabularies.Vocabulary):\n \"\"\"Mocks a vocabulary object for testing.\"\"\"\n\n def __init__(self, encode_dict, vocab_size=None):\n self._encode_dict = encode_dict\n self._vocab_size = vocab_size\n\n def unk_id(self) -> Optional[int]:\n raise NotImplementedError\n\n def encode(self, s):\n return self._encode_dict[s]\n\n def encode_tf(self, s):\n res = tf.constant([-1], tf.int32)\n for k, v in self._encode_dict.items():\n if tf.equal(s, k):\n res = tf.constant(v, tf.int32)\n else:\n pass\n return res\n\n def _encode(self, s: str) -> Sequence[int]:\n raise NotImplementedError\n\n def _encode_tf(self, s: tf.Tensor) -> tf.Tensor:\n raise NotImplementedError\n\n def _decode(self, ids):\n raise NotImplementedError\n\n def _decode_tf(self, ids: tf.Tensor) -> tf.Tensor:\n raise NotImplementedError\n\n def _base_vocab_size(self) -> int:\n raise NotImplementedError\n\n @property\n def vocab_size(self):\n return self._vocab_size\n\n @property\n def eos_id(self):\n return 1\n\n\ndef sentencepiece_vocab(extra_ids=0):\n return vocabularies.SentencePieceVocabulary(\n os.path.join(TEST_DATA_DIR, \"sentencepiece\", \"sentencepiece.model\"),\n extra_ids=extra_ids)\n\n\ndef bertwordpiece_vocab(start_of_sequence_id=101):\n return vocabularies.BertWordPieceVocabulary(\n os.path.join(TEST_DATA_DIR, \"bertwordpiece\", \"vocab.txt\"),\n start_of_sequence_id=start_of_sequence_id)\n\n\ndef clear_tasks():\n TaskRegistry._REGISTRY = {} # pylint:disable=protected-access\n\n\ndef clear_mixtures():\n MixtureRegistry._REGISTRY = {} # pylint:disable=protected-access\n\n\n# pylint:disable=invalid-name\nFakeLazyTfds = collections.namedtuple(\n \"FakeLazyTfds\",\n [\"name\", \"load\", \"load_shard\", \"info\", \"files\", \"size\"])\nFakeTfdsInfo = collections.namedtuple(\"FakeTfdsInfo\", [\"splits\"])\n# pylint:enable=invalid-name\n\n\nclass FakeTaskTest(absltest.TestCase):\n \"\"\"TestCase that sets up fake cached and uncached tasks.\"\"\"\n\n DEFAULT_PREPROCESSORS = (\n test_text_preprocessor,\n preprocessors.tokenize,\n dataset_providers.CacheDatasetPlaceholder(),\n preprocessors.append_eos_after_trim\n )\n\n DEFAULT_OUTPUT_FEATURES = {\n \"inputs\": dataset_providers.Feature(sentencepiece_vocab()),\n \"targets\": dataset_providers.Feature(sentencepiece_vocab())\n }\n\n def add_task(\n self,\n name,\n source,\n preprocessors=DEFAULT_PREPROCESSORS, # pylint:disable=redefined-outer-name\n output_features=None,\n **kwargs):\n\n if not output_features:\n output_features = {\n \"inputs\": dataset_providers.Feature(sentencepiece_vocab()),\n \"targets\": dataset_providers.Feature(sentencepiece_vocab())\n }\n\n return TaskRegistry.add(\n name,\n source=source,\n preprocessors=preprocessors,\n output_features=output_features,\n **kwargs)\n\n def get_tempdir(self):\n try:\n flags.FLAGS.test_tmpdir\n except flags.UnparsedFlagAccessError:\n # Need to initialize flags when running `pytest`.\n flags.FLAGS(sys.argv)\n return self.create_tempdir().full_path\n\n def setUp(self):\n super().setUp()\n self.maxDiff = None # pylint:disable=invalid-name\n\n # Set up data directory.\n self.test_tmpdir = self.get_tempdir()\n self.test_data_dir = os.path.join(self.test_tmpdir, \"test_data\")\n shutil.copytree(TEST_DATA_DIR, self.test_data_dir)\n for root, dirs, _ in os.walk(self.test_data_dir):\n for d in dirs + [\"\"]:\n os.chmod(os.path.join(root, d), 0o777)\n\n self._prepare_sources_and_tasks()\n\n def _prepare_sources_and_tasks(self):\n clear_tasks()\n clear_mixtures()\n # Prepare TfdsSource\n # Note we don't use mock.Mock since they fail to pickle.\n fake_tfds_paths = {\n \"train\": [\n { # pylint:disable=g-complex-comprehension\n \"filename\": \"train.tfrecord-%05d-of-00002\" % i,\n \"skip\": 0,\n \"take\": -1\n }\n for i in range(2)],\n \"validation\": [\n {\n \"filename\": \"validation.tfrecord-00000-of-00001\",\n \"skip\": 0,\n \"take\": -1\n }],\n }\n def _load_shard(shard_instruction, shuffle_files, seed):\n del shuffle_files\n del seed\n fname = shard_instruction[\"filename\"]\n if \"train\" in fname:\n ds = get_fake_dataset(\"train\")\n if fname.endswith(\"00000-of-00002\"):\n return ds.take(2)\n else:\n return ds.skip(2)\n else:\n return get_fake_dataset(\"validation\")\n\n fake_tfds = FakeLazyTfds(\n name=\"fake:0.0.0\",\n load=get_fake_dataset,\n load_shard=_load_shard,\n info=FakeTfdsInfo(splits={\"train\": None, \"validation\": None}),\n files=fake_tfds_paths.get,\n size=lambda x: 30 if x == \"train\" else 10)\n self._tfds_patcher = mock.patch(\n \"seqio.utils.LazyTfdsLoader\",\n new=mock.Mock(return_value=fake_tfds))\n self._tfds_patcher.start()\n\n # Set up data directory.\n self.test_tmpdir = self.get_tempdir()\n self.test_data_dir = os.path.join(self.test_tmpdir, \"test_data\")\n shutil.copytree(TEST_DATA_DIR, self.test_data_dir)\n for root, dirs, _ in os.walk(self.test_data_dir):\n for d in dirs + [\"\"]:\n os.chmod(os.path.join(root, d), 0o777)\n\n # Prepare uncached TFDS task.\n self.tfds_source = dataset_providers.TfdsDataSource(\n tfds_name=\"fake:0.0.0\",\n splits=(\"train\", \"validation\")\n )\n self.add_task(\"tfds_task\", source=self.tfds_source)\n\n # Add task with prefix\n self.add_task(\"t5:tfds_task\", source=self.tfds_source)\n\n # Prepare TextLineSource.\n _dump_fake_dataset(\n os.path.join(self.test_data_dir, \"train.tsv\"),\n _FAKE_DATASET[\"train\"], [2, 1], _dump_examples_to_tsv)\n self.text_line_source = dataset_providers.TextLineDataSource(\n split_to_filepattern={\n \"train\": os.path.join(self.test_data_dir, \"train.tsv*\"),\n },\n skip_header_lines=1,\n )\n self.add_task(\n \"text_line_task\",\n source=self.text_line_source,\n preprocessors=(split_tsv_preprocessor,) + self.DEFAULT_PREPROCESSORS)\n\n # Prepare TFExampleSource.\n _dump_fake_dataset(\n os.path.join(self.test_data_dir, \"train.tfrecord\"),\n _FAKE_DATASET[\"train\"], [2, 1], _dump_examples_to_tfrecord)\n self.tf_example_source = dataset_providers.TFExampleDataSource(\n split_to_filepattern={\n \"train\": os.path.join(self.test_data_dir, \"train.tfrecord*\"),\n },\n feature_description={\n \"prefix\": tf.io.FixedLenFeature([], tf.string),\n \"suffix\": tf.io.FixedLenFeature([], tf.string),\n }\n )\n self.add_task(\"tf_example_task\", source=self.tf_example_source)\n\n # Prepare ProtoDataSource.\n def decode_tf_example_fn(example):\n feature_description = {\n \"prefix\": tf.io.FixedLenFeature([], tf.string),\n \"suffix\": tf.io.FixedLenFeature([], tf.string),\n }\n return tf.io.parse_single_example(example, feature_description)\n\n self.proto_source = dataset_providers.ProtoDataSource(\n split_to_filepattern={\n \"train\": os.path.join(self.test_data_dir, \"train.tfrecord*\"),\n },\n decode_proto_fn=decode_tf_example_fn,\n )\n self.add_task(\"proto_task\", source=self.proto_source)\n\n # Prepare FunctionDataSource\n self.function_source = dataset_providers.FunctionDataSource(\n dataset_fn=get_fake_dataset,\n splits=[\"train\", \"validation\"]\n )\n self.add_task(\"function_task\", source=self.function_source)\n\n # Prepare Task that is tokenized and preprocessed before caching.\n self.add_task(\n \"fully_processed_precache\",\n source=self.function_source,\n preprocessors=(\n test_text_preprocessor,\n preprocessors.tokenize,\n token_preprocessor_no_sequence_length,\n dataset_providers.CacheDatasetPlaceholder(),\n )\n )\n\n # Prepare Task that is tokenized after caching.\n self.add_task(\n \"tokenized_postcache\",\n source=self.function_source,\n preprocessors=(\n test_text_preprocessor,\n dataset_providers.CacheDatasetPlaceholder(),\n preprocessors.tokenize,\n token_preprocessor_no_sequence_length,\n )\n )\n\n # Prepare Task with randomization.\n self.random_task = self.add_task(\n \"random_task\",\n source=self.function_source,\n preprocessors=(\n test_text_preprocessor,\n dataset_providers.CacheDatasetPlaceholder(),\n preprocessors.tokenize,\n random_token_preprocessor,\n )\n )\n\n self.uncached_task = self.add_task(\"uncached_task\", source=self.tfds_source)\n\n # Prepare cached task.\n dataset_utils.set_global_cache_dirs([self.test_data_dir])\n self.cached_task_dir = os.path.join(self.test_data_dir, \"cached_task\")\n _dump_fake_dataset(\n os.path.join(self.cached_task_dir, \"train.tfrecord\"),\n _FAKE_TOKENIZED_DATASET[\"train\"], [2, 1],\n _dump_examples_to_tfrecord)\n _dump_fake_dataset(\n os.path.join(self.cached_task_dir, \"validation.tfrecord\"),\n _FAKE_TOKENIZED_DATASET[\"validation\"], [2],\n _dump_examples_to_tfrecord)\n self.cached_task = self.add_task(\"cached_task\", source=self.tfds_source)\n\n # Prepare cached plaintext task.\n _dump_fake_dataset(\n os.path.join(\n self.test_data_dir, \"cached_plaintext_task\", \"train.tfrecord\"),\n _FAKE_PLAINTEXT_TOKENIZED_DATASET[\"train\"], [2, 1],\n _dump_examples_to_tfrecord)\n self.cached_plaintext_task = self.add_task(\n \"cached_plaintext_task\",\n source=self.tfds_source,\n preprocessors=self.DEFAULT_PREPROCESSORS + (test_token_preprocessor,))\n\n def tearDown(self):\n super().tearDown()\n self._tfds_patcher.stop()\n tf.random.set_seed(None)\n\n def verify_task_matches_fake_datasets( # pylint:disable=dangerous-default-value\n self,\n task_name,\n use_cached,\n token_preprocessed=False,\n ndfeatures=False,\n splits=(\"train\", \"validation\"),\n sequence_length=_DEFAULT_SEQUENCE_LENGTH,\n num_shards=None):\n \"\"\"Assert all splits for both tokenized datasets are correct.\"\"\"\n task = TaskRegistry.get(task_name)\n for split in splits:\n get_dataset = functools.partial(\n task.get_dataset, sequence_length, split, use_cached=use_cached,\n shuffle=False)\n if num_shards:\n ds = get_dataset(shard_info=dataset_providers.ShardInfo(0, num_shards))\n for i in range(1, num_shards):\n ds = ds.concatenate(\n get_dataset(\n shard_info=dataset_providers.ShardInfo(i, num_shards)))\n else:\n ds = get_dataset()\n _assert_compare_to_fake_dataset(\n ds,\n split,\n task.output_features,\n sequence_length,\n token_preprocessed=token_preprocessed,\n ndfeatures=ndfeatures,\n )\n\n\nclass FakeMixtureTest(FakeTaskTest):\n \"\"\"TestCase that sets up fake cached and uncached tasks.\"\"\"\n\n def setUp(self):\n super().setUp()\n clear_mixtures()\n MixtureRegistry.add(\n \"uncached_mixture\",\n [(\"uncached_task\", 1.0)],\n )\n self.uncached_mixture = MixtureRegistry.get(\n \"uncached_mixture\")\n MixtureRegistry.add(\n \"cached_mixture\",\n [(\"cached_task\", 1.0)],\n )\n self.cached_mixture = MixtureRegistry.get(\"cached_mixture\")\n MixtureRegistry.add(\n \"uncached_random_mixture\",\n [(\"random_task\", 1.0)],\n )\n self.uncached_mixture = MixtureRegistry.get(\n \"uncached_random_mixture\")\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.constant",
"tensorflow.summary.histogram",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.summary.image",
"tensorflow.cast",
"tensorflow.summary.audio",
"tensorflow.summary.write",
"tensorflow.compat.v1.Graph",
"tensorflow.io.gfile.rename",
"numpy.array",
"tensorflow.summary.scalar",
"tensorflow.compat.v1.Summary",
"tensorflow.summary.create_file_writer"
],
[
"tensorflow.compat.v2.data.Dataset.from_generator",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.compat.v1.enable_eager_execution"
],
[
"tensorflow.compat.v2.lookup.KeyValueTensorInitializer",
"tensorflow.compat.v2.py_function",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.less",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.dtypes.cast",
"tensorflow.compat.v2.strings.join",
"tensorflow.compat.v2.io.decode_raw",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.strings.unicode_decode",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.strings.unicode_encode"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.io.parse_single_example",
"tensorflow.compat.v2.compat.as_text",
"numpy.cumsum",
"tensorflow.compat.v2.io.TFRecordWriter",
"numpy.testing.assert_equal",
"tensorflow.compat.v2.io.FixedLenFeature",
"tensorflow.compat.v2.random.stateless_uniform",
"tensorflow.compat.v2.data.Dataset.from_tensors",
"tensorflow.compat.v2.compat.as_bytes",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.size",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.data.Dataset.from_generator",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.strings.join",
"tensorflow.compat.v2.roll",
"tensorflow.compat.v2.random.set_seed",
"tensorflow.compat.v2.greater"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TTrapper/tensorflow | [
"64f0ebd33a7c868da3c8f1ea15adf358c578f227"
] | [
"tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.contrib.data.python.ops import batching\nfrom tensorflow.contrib.data.python.ops import dataset_ops\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.platform import test\n\n\nclass DatasetConstructorTest(test.TestCase):\n\n def testTensorDataset(self):\n \"\"\"Test an dataset that represents a single tuple of tensors.\"\"\"\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))\n\n iterator = (dataset_ops.Dataset.from_tensors(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([c.shape for c in components],\n [t.shape for t in get_next])\n\n with self.test_session() as sess:\n sess.run(init_op)\n results = sess.run(get_next)\n for component, result_component in zip(components, results):\n self.assertAllEqual(component, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testTensorSliceDataset(self):\n \"\"\"Test an dataset that represents the slices from a tuple of tensors.\"\"\"\n components = (\n np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(\n np.array([[12], [13], [14], [15]]), 22),\n np.array([37.0, 38.0, 39.0, 40.0])\n )\n\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([c.shape[1:] for c in components],\n [t.shape for t in get_next])\n\n with self.test_session() as sess:\n sess.run(init_op)\n for i in range(4):\n results = sess.run(get_next)\n for component, result_component in zip(components, results):\n self.assertAllEqual(component[i], result_component)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testTensorSliceDatasetWithDict(self):\n components = {\"foo\": [1, 2, 3], \"bar\": [[4.0], [5.0], [6.0]]}\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual(dtypes.int32, iterator.output_types[\"foo\"])\n self.assertEqual(dtypes.float32, iterator.output_types[\"bar\"])\n self.assertEqual((), iterator.output_shapes[\"foo\"])\n self.assertEqual((1,), iterator.output_shapes[\"bar\"])\n\n with self.test_session() as sess:\n sess.run(init_op)\n for i in range(3):\n results = sess.run(get_next)\n self.assertEqual(components[\"foo\"][i], results[\"foo\"])\n self.assertEqual(components[\"bar\"][i], results[\"bar\"])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testSparseTensorSliceDataset(self):\n \"\"\"Test a dataset based on slices of a `tf.SparseTensor`.\"\"\"\n st = array_ops.sparse_placeholder(dtypes.float64)\n iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = sparse_tensor.SparseTensor(*iterator.get_next())\n\n with self.test_session() as sess:\n slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]\n\n # Test with sparse tensor in the appropriate order.\n indices = np.array(\n [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])\n values = np.array([val for s in slices for val in s])\n dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])\n sparse_feed = sparse_tensor.SparseTensorValue(indices, values,\n dense_shape)\n sess.run(init_op, feed_dict={st: sparse_feed})\n for i, s in enumerate(slices):\n results = sess.run(get_next)\n self.assertAllEqual(s, results.values)\n expected_indices = np.array(\n [[j] for j in range(len(slices[i]))]).reshape([-1, 1])\n self.assertAllEqual(expected_indices, results.indices)\n self.assertAllEqual(dense_shape[1:], results.dense_shape)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Test with sparse tensor in the reverse order, which is not\n # currently supported.\n reverse_order_indices = indices[::-1, :]\n reverse_order_values = values[::-1]\n sparse_feed = sparse_tensor.SparseTensorValue(\n reverse_order_indices, reverse_order_values, dense_shape)\n with self.assertRaises(errors.UnimplementedError):\n sess.run(init_op, feed_dict={st: sparse_feed})\n\n # Test with an empty sparse tensor.\n empty_indices = np.empty((0, 4), dtype=np.int64)\n empty_values = np.empty((0,), dtype=np.float64)\n empty_dense_shape = [0, 4, 37, 9]\n sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,\n empty_dense_shape)\n sess.run(init_op, feed_dict={st: sparse_feed})\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # pylint: disable=g-long-lambda,unnecessary-lambda\n def testNestedStructure(self):\n components = (np.array([1, 2, 3]), (np.array([4., 5.]), np.array([6., 7.])),\n np.array([8, 9, 10]))\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.shuffle(10, 10)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.repeat(-1)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.filter(lambda x, y, z: True)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.take(5)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)\n\n dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)\n\n dataset = dataset.flat_map(\n lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),\n (y[0], y[1])))\n )\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)\n\n dataset = dataset.batch(32)\n self.assertEquals(((dtypes.int64, dtypes.int64),\n (dtypes.float64, dtypes.float64)), dataset.output_types)\n self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),\n nest.pack_sequence_as(dataset.output_shapes, [\n s.as_list()\n for s in nest.flatten(dataset.output_shapes)\n ]))\n\n iterator = dataset.make_one_shot_iterator()\n (w, x), (y, z) = iterator.get_next()\n self.assertEquals(dtypes.int64, w.dtype)\n self.assertEquals(dtypes.int64, x.dtype)\n self.assertEquals(dtypes.float64, y.dtype)\n self.assertEquals(dtypes.float64, z.dtype)\n self.assertEquals([None, 3], w.shape.as_list())\n self.assertEquals([None, 3], x.shape.as_list())\n self.assertEquals([None, 2], y.shape.as_list())\n self.assertEquals([None, 2], z.shape.as_list())\n\n iterator = dataset.make_initializable_iterator()\n (w, x), (y, z) = iterator.get_next()\n self.assertEquals(dtypes.int64, w.dtype)\n self.assertEquals(dtypes.int64, x.dtype)\n self.assertEquals(dtypes.float64, y.dtype)\n self.assertEquals(dtypes.float64, z.dtype)\n self.assertEquals([None, 3], w.shape.as_list())\n self.assertEquals([None, 3], x.shape.as_list())\n self.assertEquals([None, 2], y.shape.as_list())\n self.assertEquals([None, 2], z.shape.as_list())\n\n # Define a separate set of components with matching leading\n # dimension for the from-slices constructor.\n components_for_slices = (np.array([1, 2, 3]), (np.array(\n [4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12]))\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)\n self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),\n dtypes.int64), dataset.output_types)\n self.assertEquals(([], ([], []), []), dataset.output_shapes)\n\n def testNestedDict(self):\n components = {\"a\": {\"aa\": 1, \"ab\": [2.0, 2.0]}, \"b\": [3, 3, 3]}\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals(dtypes.int32, dataset.output_types[\"a\"][\"aa\"])\n self.assertEquals(dtypes.float32, dataset.output_types[\"a\"][\"ab\"])\n self.assertEquals(dtypes.int32, dataset.output_types[\"b\"])\n self.assertEquals([], dataset.output_shapes[\"a\"][\"aa\"])\n self.assertEquals([2], dataset.output_shapes[\"a\"][\"ab\"])\n self.assertEquals([3], dataset.output_shapes[\"b\"])\n\n def testNonSequenceNestedStructure(self):\n components = np.array([1, 2, 3])\n\n dataset = dataset_ops.Dataset.from_tensors(components)\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n dataset = dataset.filter(\n lambda x: math_ops.reduce_all(math_ops.equal(x, components)))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n dataset = dataset.map(lambda x: array_ops.stack([x, x]))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([2, 3], dataset.output_shapes)\n\n dataset = dataset.flat_map(\n lambda x: dataset_ops.Dataset.from_tensor_slices(x))\n self.assertEquals(dtypes.int64, dataset.output_types)\n self.assertEquals([3], dataset.output_shapes)\n\n iterator = dataset.make_one_shot_iterator()\n get_next = iterator.get_next()\n self.assertEquals(dtypes.int64, get_next.dtype)\n self.assertEquals([3], get_next.shape)\n\n def _testFromGenerator(self, generator, elem_sequence, num_repeats):\n iterator = (\n dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)\n .repeat(num_repeats)\n .prefetch(5)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n for _ in range(2): # Run twice to test reinitialization.\n sess.run(init_op)\n for _ in range(num_repeats):\n for elem in elem_sequence:\n self.assertAllEqual(elem, sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def _testFromGeneratorOneShot(self, generator, elem_sequence, num_repeats):\n iterator = (\n dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)\n .repeat(num_repeats)\n .prefetch(5)\n .make_one_shot_iterator())\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n for _ in range(num_repeats):\n for elem in elem_sequence:\n self.assertAllEqual(elem, sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorUsingFunction(self):\n def generator():\n for i in range(1, 100):\n yield [i] * i\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n self._testFromGeneratorOneShot(generator, elem_sequence, 1)\n self._testFromGeneratorOneShot(generator, elem_sequence, 5)\n\n def testFromGeneratorUsingList(self):\n generator = lambda: [[i] * i for i in range(1, 100)]\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n\n def testFromGeneratorUsingNdarray(self):\n generator = lambda: np.arange(100, dtype=np.int64)\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n\n def testFromGeneratorUsingGeneratorExpression(self):\n # NOTE(mrry): Generator *expressions* are not repeatable (or in\n # general reusable), because they eagerly evaluate the `for`\n # expression as `iter(range(1, 100))` and discard the means of\n # reconstructing `range(1, 100)`. Wrapping the generator\n # expression in a `lambda` makes it repeatable.\n generator = lambda: ([i] * i for i in range(1, 100))\n elem_sequence = list(generator())\n self._testFromGenerator(generator, elem_sequence, 1)\n self._testFromGenerator(generator, elem_sequence, 5)\n\n def testFromMultipleConcurrentGenerators(self):\n num_inner_repeats = 5\n num_outer_repeats = 100\n\n def generator():\n for i in range(1, 10):\n yield ([i] * i, [i, i ** 2, i ** 3])\n input_list = list(generator())\n\n # The interleave transformation is essentially a flat map that\n # draws from multiple input datasets concurrently (in a cyclic\n # fashion). By placing `Datsaet.from_generator()` inside an\n # interleave, we test its behavior when multiple iterators are\n # active at the same time; by additionally prefetching inside the\n # interleave, we create the possibility of parallel (modulo GIL)\n # invocations to several iterators created by the same dataset.\n def interleave_fn(_):\n return (dataset_ops.Dataset.from_generator(\n generator, output_types=(dtypes.int64, dtypes.int64),\n output_shapes=([None], [3]))\n .repeat(num_inner_repeats).prefetch(5))\n\n iterator = (\n dataset_ops.Dataset.range(num_outer_repeats)\n .interleave(interleave_fn, cycle_length=10,\n block_length=len(input_list))\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(num_inner_repeats * num_outer_repeats):\n for elem in input_list:\n val0, val1 = sess.run(get_next)\n self.assertAllEqual(elem[0], val0)\n self.assertAllEqual(elem[1], val1)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorsRunningInParallel(self):\n num_parallel_iterators = 3\n\n # Define shared state that multiple iterator instances will access to\n # demonstrate their concurrent activity.\n lock = threading.Lock()\n condition = threading.Condition(lock)\n next_ticket = [0] # GUARDED_BY(lock)\n\n def generator():\n # NOTE(mrry): We yield one element before the barrier, because\n # the current implementation of `Dataset.interleave()` must\n # fetch one element from each incoming dataset to start the\n # prefetching.\n yield 0\n\n # Define a barrier that `num_parallel_iterators` iterators must enter\n # before any can proceed. Demonstrates that multiple iterators may be\n # active at the same time.\n condition.acquire()\n ticket = next_ticket[0]\n next_ticket[0] += 1\n if ticket == num_parallel_iterators - 1:\n # The last iterator to join the barrier notifies the others.\n condition.notify_all()\n else:\n # Wait until the last iterator enters the barrier.\n while next_ticket[0] < num_parallel_iterators:\n condition.wait()\n condition.release()\n\n yield 1\n\n # As in `testFromMultipleConcurrentGenerators()`, we use a combination of\n # `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple\n # iterators to be active concurrently.\n def interleave_fn(_):\n return dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2)\n\n iterator = (\n dataset_ops.Dataset.range(num_parallel_iterators)\n .interleave(\n interleave_fn, cycle_length=num_parallel_iterators, block_length=1)\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n for elem in [0, 1]:\n for _ in range(num_parallel_iterators):\n self.assertAllEqual(elem, sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorTypeError(self):\n def generator():\n yield np.array([1, 2, 3], dtype=np.int64)\n yield np.array([4, 5, 6], dtype=np.int64)\n yield \"ERROR\"\n yield np.array([7, 8, 9], dtype=np.int64)\n\n iterator = (dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int64, output_shapes=[3])\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n self.assertAllEqual([1, 2, 3], sess.run(get_next))\n self.assertAllEqual([4, 5, 6], sess.run(get_next))\n with self.assertRaisesOpError(r\"element of type .*int64.* was expected\"):\n sess.run(get_next)\n self.assertAllEqual([7, 8, 9], sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testFromGeneratorShapeError(self):\n def generator():\n yield np.array([1, 2, 3], dtype=np.int64)\n yield np.array([4, 5, 6], dtype=np.int64)\n yield np.array([7, 8, 9, 10], dtype=np.int64)\n yield np.array([11, 12, 13], dtype=np.int64)\n\n iterator = (dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int64, output_shapes=[3])\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n self.assertAllEqual([1, 2, 3], sess.run(get_next))\n self.assertAllEqual([4, 5, 6], sess.run(get_next))\n with self.assertRaisesOpError(r\"element of shape \\(3,\\) was expected\"):\n sess.run(get_next)\n self.assertAllEqual([11, 12, 13], sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testSplitPipelineFailsWithPlacementError(self):\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n\n dataset = dataset_ops.Dataset.from_tensors(0)\n\n # Define a pipeline that attempts to use variables on two\n # different devices.\n #\n # Initialize the variables before creating to iterator, to avoid the\n # placement algorithm overriding the DT_RESOURCE colocation constraints.\n with ops.device(\"/cpu:0\"):\n var_0 = resource_variable_ops.ResourceVariable(initial_value=0)\n dataset = dataset.map(lambda x: x + var_0.read_value())\n sess.run(var_0.initializer)\n\n with ops.device(\"/cpu:1\"):\n var_1 = resource_variable_ops.ResourceVariable(initial_value=0)\n dataset = dataset.map(lambda x: x + var_1.read_value())\n sess.run(var_1.initializer)\n\n iterator = dataset.make_initializable_iterator()\n\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n \"Trying to access resource located in device\"):\n sess.run(iterator.initializer)\n\n def testRestructureDataset(self):\n components = (array_ops.placeholder(dtypes.int32),\n (array_ops.placeholder(dtypes.int32, shape=[None]),\n array_ops.placeholder(dtypes.int32, shape=[20, 30])))\n dataset = dataset_ops.Dataset.from_tensors(components)\n\n i32 = dtypes.int32\n\n test_cases = [((i32, i32, i32), None),\n (((i32, i32), i32), None),\n ((i32, i32, i32), (None, None, None)),\n ((i32, i32, i32), ([17], [17], [20, 30]))]\n\n for new_types, new_shape_lists in test_cases:\n # pylint: disable=protected-access\n new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)\n # pylint: enable=protected-access\n self.assertEqual(new_types, new.output_types)\n if new_shape_lists is not None:\n for expected_shape_list, shape in zip(\n nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):\n if expected_shape_list is None:\n self.assertIs(None, shape.ndims)\n else:\n self.assertEqual(expected_shape_list, shape.as_list())\n\n fail_cases = [((i32, dtypes.int64, i32), None),\n ((i32, i32, i32, i32), None),\n ((i32, i32, i32), ((None, None), None)),\n ((i32, i32, i32), (None, None, None, None)),\n ((i32, i32, i32), (None, [None], [21, 30]))]\n\n for new_types, new_shape_lists in fail_cases:\n with self.assertRaises(ValueError):\n # pylint: disable=protected-access\n new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)\n # pylint: enable=protected-access\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.array_ops.sparse_placeholder",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.framework.ops.device",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.range",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"numpy.arange",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_generator",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.platform.test.main",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.contrib.data.python.ops.batching._RestructuredDataset",
"tensorflow.python.data.util.nest.flatten",
"numpy.array",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.contrib.data.python.ops.dataset_ops.Dataset.from_sparse_tensor_slices",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YWJae/CS234-Reinforcement-Learning-Winter-2019 | [
"aa95a42b847a0e752b8caaa7b0bfeffb514ab7d3"
] | [
"assignment 3/pg.py"
] | [
"# -*- coding: UTF-8 -*-\n\nimport os\nimport argparse\nimport sys\nimport logging\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport scipy.signal\nimport os\nimport time\nimport inspect\nfrom utils.general import get_logger, Progbar, export_plot\nfrom config import get_config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--env_name', required=True, type=str,\n choices=['cartpole', 'pendulum', 'cheetah'])\nparser.add_argument('--baseline', dest='use_baseline', action='store_true')\nparser.add_argument('--no-baseline', dest='use_baseline', action='store_false')\nparser.set_defaults(use_baseline=True)\n\ndef build_mlp(\n mlp_input,\n output_size,\n scope,\n n_layers,\n size,\n output_activation=None):\n \"\"\"\n Build a feed forward network (multi-layer perceptron, or mlp)\n with 'n_layers' hidden layers, each of size 'size' units.\n Use tf.nn.relu nonlinearity between layers.\n Args:\n mlp_input: the input to the multi-layer perceptron\n output_size: the output layer size\n scope: the scope of the neural network\n n_layers: the number of hidden layers of the network\n size: the size of each layer:\n output_activation: the activation of output layer\n Returns:\n The tensor output of the network\n\n TODO: Implement this function. This will be similar to the linear\n model you implemented for Assignment 2.\n \"tf.layers.dense\" and \"tf.variable_scope\" may be helpful.\n\n A network with n hidden layers has n 'linear transform + nonlinearity'\n operations followed by the final linear transform for the output layer\n (followed by the output activation, if it is not None).\n \"\"\"\n\n #######################################################\n ######### YOUR CODE HERE - 7-20 lines. ############\n with tf.variable_scope(scope) as _:\n x = mlp_input\n for _ in range(n_layers):\n x = tf.keras.layers.Dense(size, activation=tf.nn.relu)(x)\n output = tf.keras.layers.Dense(output_size, activation=output_activation)(x)\n return output # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n\nclass PG(object):\n \"\"\"\n Abstract Class for implementing a Policy Gradient Based Algorithm\n \"\"\"\n def __init__(self, env, config, logger=None):\n \"\"\"\n Initialize Policy Gradient Class\n\n Args:\n env: an OpenAI Gym environment\n config: class with hyperparameters\n logger: logger instance from the logging module\n\n You do not need to implement anything in this function. However,\n you will need to use self.discrete, self.observation_dim,\n self.action_dim, and self.lr in other methods.\n\n \"\"\"\n # directory for training outputs\n if not os.path.exists(config.output_path):\n os.makedirs(config.output_path)\n\n # store hyperparameters\n self.config = config\n self.logger = logger\n if logger is None:\n self.logger = get_logger(config.log_path)\n self.env = env\n\n # discrete vs continuous action space\n self.discrete = isinstance(env.action_space, gym.spaces.Discrete)\n self.observation_dim = self.env.observation_space.shape[0]\n self.action_dim = self.env.action_space.n if self.discrete else self.env.action_space.shape[0]\n\n self.lr = self.config.learning_rate\n\n # build model\n self.build()\n\n def add_placeholders_op(self):\n \"\"\"\n Add placeholders for observation, action, and advantage:\n self.observation_placeholder, type: tf.float32\n self.action_placeholder, type: depends on the self.discrete\n self.advantage_placeholder, type: tf.float32\n\n HINT: Check self.observation_dim and self.action_dim\n HINT: In the case of continuous action space, an action will be specified by\n 'self.action_dim' float32 numbers (i.e. a vector with size 'self.action_dim')\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 8-12 lines. ############\n self.observation_placeholder = tf.placeholder(tf.float32, shape=(None, self.observation_dim), name=\"observation\") # TODO\n if self.discrete:\n # \"I don't know why their can't be (None, self.action_dim)\n self.action_placeholder = tf.placeholder(tf.int32, shape=(None,), name=\"action\") # TODO\n else:\n self.action_placeholder = tf.placeholder(tf.float32, shape=(None, self.action_dim), name=\"action\") # TODO\n\n # Define a placeholder for advantages\n self.advantage_placeholder = tf.placeholder(tf.float32, shape=(None,), name=\"advantage\") # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def build_policy_network_op(self, scope=\"policy_network\"):\n \"\"\"\n Build the policy network, construct the tensorflow operation to sample\n actions from the policy network outputs, and compute the log probabilities\n of the actions taken (for computing the loss later). These operations are\n stored in self.sampled_action and self.logprob. Must handle both settings\n of self.discrete.\n\n Args:\n scope: the scope of the neural network\n\n TODO:\n Discrete case:\n action_logits: the logits for each action\n HINT: use build_mlp, check self.config for layer_size and\n n_layers\n self.sampled_action: sample from these logits\n HINT: use tf.multinomial + tf.squeeze\n self.logprob: compute the log probabilities of the taken actions\n HINT: 1. tf.nn.sparse_softmax_cross_entropy_with_logits computes\n the *negative* log probabilities of labels, given logits.\n 2. taken actions are different than sampled actions!\n\n Continuous case:\n To build a policy in a continuous action space domain, we will have the\n model output the means of each action dimension, and then sample from\n a multivariate normal distribution with these means and trainable standard\n deviation.\n\n That is, the action a_t ~ N( mu(o_t), sigma)\n where mu(o_t) is the network that outputs the means for each action\n dimension, and sigma is a trainable variable for the standard deviations.\n N here is a multivariate gaussian distribution with the given parameters.\n\n action_means: the predicted means for each action dimension.\n HINT: use build_mlp, check self.config for layer_size and\n n_layers\n log_std: a trainable variable for the log standard deviations.\n HINT: think about why we use log std as the trainable variable instead of std\n HINT: use tf.get_variables\n self.sampled_actions: sample from the gaussian distribution as described above\n HINT: use tf.random_normal\n HINT: use re-parametrization to obtain N(mu, sigma) from N(0, 1)\n self.lobprob: the log probabilities of the taken actions\n HINT: use tf.contrib.distributions.MultivariateNormalDiag\n\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n\n if self.discrete:\n action_logits = build_mlp(self.observation_placeholder,\n output_size=self.action_dim,\n scope=scope,\n n_layers=self.config.n_layers,\n size=self.config.layer_size,\n output_activation=self.config.activation) # TODO\n self.sampled_action = tf.squeeze(tf.multinomial(action_logits, 1)) # TODO\n self.logprob = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.action_placeholder,\n logits=action_logits) # TODO\n else:\n action_means = build_mlp(self.observation_placeholder,\n output_size=self.action_dim,\n scope=scope,\n n_layers=self.config.n_layers,\n size=self.config.layer_size,\n output_activation=self.config.activation) # TODO\n log_std = tf.get_variable(\"log_std\", shape=[1, self.action_dim]) # TODO\n self.sampled_action = tf.random_normal((1,), mean=action_means, stddev=log_std) # TODO\n self.logprob = tf.contrib.distributions.MultivariateNormalDiag(action_means, log_std) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def add_loss_op(self):\n \"\"\"\n Compute the loss, averaged for a given batch.\n\n Recall the update for REINFORCE with advantage:\n θ = θ + α ∇_θ log π_θ(a_t|s_t) A_t\n Think about how to express this update as minimizing a\n loss (so that tensorflow will do the gradient computations\n for you).\n\n You only have to reference fields of 'self' that have already\n been set in the previous methods.\n\n \"\"\"\n\n ######################################################\n ######### YOUR CODE HERE - 1-2 lines. ############\n self.loss = tf.reduce_mean(-self.logprob * self.advantage_placeholder)# TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def add_optimizer_op(self):\n \"\"\"\n Set 'self.train_op' using AdamOptimizer\n HINT: Use self.lr, and minimize self.loss\n \"\"\"\n ######################################################\n ######### YOUR CODE HERE - 1-2 lines. ############\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def add_baseline_op(self, scope=\"baseline\"):\n \"\"\"\n Build the baseline network within the scope.\n\n In this function we will build the baseline network.\n Use build_mlp with the same parameters as the policy network to\n get the baseline estimate. You also have to setup a target\n placeholder and an update operation so the baseline can be trained.\n\n Args:\n scope: the scope of the baseline network\n\n TODO: Set the following fields\n self.baseline\n HINT: use build_mlp, the network is the same as policy network\n check self.config for n_layers and layer_size\n HINT: tf.squeeze might be helpful\n self.baseline_target_placeholder\n self.update_baseline_op\n HINT: first construct a loss using tf.losses.mean_squared_error.\n HINT: use AdamOptimizer with self.lr\n\n \"\"\"\n ######################################################\n ######### YOUR CODE HERE - 4-8 lines. ############\n self.baseline = build_mlp(self.observation_placeholder,\n output_size=1,\n scope=scope,\n n_layers=self.config.n_layers,\n size=self.config.layer_size) # TODO\n self.baseline_target_placeholder = tf.placeholder(tf.float32, shape=(None, )) # TODO\n loss = tf.losses.mean_squared_error(self.baseline_target_placeholder, tf.squeeze(self.baseline))\n self.update_baseline_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def build(self):\n \"\"\"\n Build the model by adding all necessary variables.\n\n You don't have to change anything here - we are just calling\n all the operations you already defined above to build the tensorflow graph.\n \"\"\"\n\n # add placeholders\n self.add_placeholders_op()\n # create policy net\n self.build_policy_network_op()\n # add square loss\n self.add_loss_op()\n # add optmizer for the main networks\n self.add_optimizer_op()\n\n # add baseline\n if self.config.use_baseline:\n self.add_baseline_op()\n\n def initialize(self):\n \"\"\"\n Assumes the graph has been constructed (have called self.build())\n Creates a tf Session and run initializer of variables\n\n You don't have to change or use anything here.\n \"\"\"\n # create tf session\n self.sess = tf.Session()\n # tensorboard stuff\n self.add_summary()\n # initiliaze all variables\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n def add_summary(self):\n \"\"\"\n Tensorboard stuff.\n\n You don't have to change or use anything here.\n \"\"\"\n # extra placeholders to log stuff from python\n self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"avg_reward\")\n self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"max_reward\")\n self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"std_reward\")\n\n self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"eval_reward\")\n\n # extra summaries from python -> placeholders\n tf.summary.scalar(\"Avg Reward\", self.avg_reward_placeholder)\n tf.summary.scalar(\"Max Reward\", self.max_reward_placeholder)\n tf.summary.scalar(\"Std Reward\", self.std_reward_placeholder)\n tf.summary.scalar(\"Eval Reward\", self.eval_reward_placeholder)\n\n # logging\n self.merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.config.output_path,self.sess.graph)\n\n def init_averages(self):\n \"\"\"\n Defines extra attributes for tensorboard.\n\n You don't have to change or use anything here.\n \"\"\"\n self.avg_reward = 0.\n self.max_reward = 0.\n self.std_reward = 0.\n self.eval_reward = 0.\n\n def update_averages(self, rewards, scores_eval):\n \"\"\"\n Update the averages.\n\n You don't have to change or use anything here.\n\n Args:\n rewards: deque\n scores_eval: list\n \"\"\"\n self.avg_reward = np.mean(rewards)\n self.max_reward = np.max(rewards)\n self.std_reward = np.sqrt(np.var(rewards) / len(rewards))\n\n if len(scores_eval) > 0:\n self.eval_reward = scores_eval[-1]\n\n def record_summary(self, t):\n \"\"\"\n Add summary to tensorboard\n\n You don't have to change or use anything here.\n \"\"\"\n\n fd = {\n self.avg_reward_placeholder: self.avg_reward,\n self.max_reward_placeholder: self.max_reward,\n self.std_reward_placeholder: self.std_reward,\n self.eval_reward_placeholder: self.eval_reward,\n }\n summary = self.sess.run(self.merged, feed_dict=fd)\n # tensorboard stuff\n self.file_writer.add_summary(summary, t)\n\n def sample_path(self, env, num_episodes = None):\n \"\"\"\n Sample paths (trajectories) from the environment.\n\n Args:\n num_episodes: the number of episodes to be sampled\n if none, sample one batch (size indicated by config file)\n env: open AI Gym envinronment\n\n Returns:\n paths: a list of paths. Each path in paths is a dictionary with\n path[\"observation\"] a numpy array of ordered observations in the path\n path[\"actions\"] a numpy array of the corresponding actions in the path\n path[\"reward\"] a numpy array of the corresponding rewards in the path\n total_rewards: the sum of all rewards encountered during this \"path\"\n\n You do not have to implement anything in this function, but you will need to\n understand what it returns, and it is worthwhile to look over the code\n just so you understand how we are taking actions in the environment\n and generating batches to train on.\n \"\"\"\n episode = 0\n episode_rewards = []\n paths = []\n t = 0\n\n while (num_episodes or t < self.config.batch_size):\n state = env.reset()\n states, actions, rewards = [], [], []\n episode_reward = 0\n\n for step in range(self.config.max_ep_len):\n states.append(state)\n action = self.sess.run(self.sampled_action, feed_dict={self.observation_placeholder : state[None]})\n state, reward, done, info = env.step(action)\n actions.append(action)\n rewards.append(reward)\n episode_reward += reward\n t += 1\n if (done or step == self.config.max_ep_len-1):\n episode_rewards.append(episode_reward)\n break\n if (not num_episodes) and t == self.config.batch_size:\n break\n\n path = {\"observation\" : np.array(states),\n \"reward\" : np.array(rewards),\n \"action\" : np.array(actions)}\n paths.append(path)\n episode += 1\n if num_episodes and episode >= num_episodes:\n break\n\n return paths, episode_rewards\n\n def get_returns(self, paths):\n \"\"\"\n Calculate the returns G_t for each timestep\n\n Args:\n paths: recorded sample paths. See sample_path() for details.\n\n Return:\n returns: return G_t for each timestep\n\n After acting in the environment, we record the observations, actions, and\n rewards. To get the advantages that we need for the policy update, we have\n to convert the rewards into returns, G_t, which are themselves an estimate\n of Q^π (s_t, a_t):\n\n G_t = r_t + γ r_{t+1} + γ^2 r_{t+2} + ... + γ^{T-t} r_T\n\n where T is the last timestep of the episode.\n\n TODO: compute and return G_t for each timestep. Use self.config.gamma.\n \"\"\"\n\n all_returns = []\n for path in paths:\n rewards = path[\"reward\"]\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n returns = np.zeros_like(rewards) # TODO\n returns[-1] = rewards[-1]\n for i in reversed(range(len(rewards) - 1)):\n returns[i] = rewards[i] + self.config.gamma * returns[i + 1]\n #######################################################\n ######### END YOUR CODE. ############\n all_returns.append(returns)\n returns = np.concatenate(all_returns)\n\n return returns\n\n def calculate_advantage(self, returns, observations):\n \"\"\"\n Calculate the advantage\n\n Args:\n returns: all discounted future returns for each step\n observations: observations\n Returns:\n adv: Advantage\n\n Calculate the advantages, using baseline adjustment if necessary,\n and normalizing the advantages if necessary.\n If neither of these options are True, just return returns.\n\n TODO:\n If config.use_baseline = False and config.normalize_advantage = False,\n then the \"advantage\" is just going to be the returns (and not actually\n an advantage).\n\n if config.use_baseline, then we need to evaluate the baseline and subtract\n it from the returns to get the advantage.\n HINT: evaluate the self.baseline with self.sess.run(...)\n\n if config.normalize_advantage:\n after doing the above, normalize the advantages so that they have a mean of 0\n and standard deviation of 1.\n \"\"\"\n adv = returns\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n # modified from https://github.com/arowdy98/Stanford-CS234/blob/master/assignment3/starter_code/pg.py\n if self.config.use_baseline:\n adv -= self.sess.run(self.baseline, feed_dict={self.observation_placeholder : observations}).squeeze()# TODO\n if self.config.normalize_advantage:\n adv = (adv - adv.mean()) / (adv.std() + 1e-12) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n return adv\n\n def update_baseline(self, returns, observations):\n \"\"\"\n Update the baseline from given returns and observation.\n\n Args:\n returns: Returns from get_returns\n observations: observations\n TODO:\n apply the baseline update op with the observations and the returns.\n HINT: Run self.update_baseline_op with self.sess.run(...)\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 1-5 lines. ############\n self.sess.run(self.update_baseline_op, feed_dict={self.baseline_target_placeholder:returns,\n self.observation_placeholder:observations}) # TODO\n #######################################################\n ######### END YOUR CODE. ############\n\n def train(self):\n \"\"\"\n Performs training\n\n You do not have to change or use anything here, but take a look\n to see how all the code you've written fits together!\n \"\"\"\n last_eval = 0\n last_record = 0\n scores_eval = []\n\n self.init_averages()\n scores_eval = [] # list of scores computed at iteration time\n\n for t in range(self.config.num_batches):\n\n # collect a minibatch of samples\n paths, total_rewards = self.sample_path(self.env)\n scores_eval = scores_eval + total_rewards\n observations = np.concatenate([path[\"observation\"] for path in paths])\n actions = np.concatenate([path[\"action\"] for path in paths])\n rewards = np.concatenate([path[\"reward\"] for path in paths])\n # compute Q-val estimates (discounted future returns) for each time step\n returns = self.get_returns(paths)\n advantages = self.calculate_advantage(returns, observations)\n\n # run training operations\n if self.config.use_baseline:\n self.update_baseline(returns, observations)\n self.sess.run(self.train_op, feed_dict={\n self.observation_placeholder : observations,\n self.action_placeholder : actions,\n self.advantage_placeholder : advantages})\n\n # tf stuff\n if (t % self.config.summary_freq == 0):\n self.update_averages(total_rewards, scores_eval)\n self.record_summary(t)\n\n # compute reward statistics for this batch and log\n avg_reward = np.mean(total_rewards)\n sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards))\n msg = \"Average reward: {:04.2f} +/- {:04.2f}\".format(avg_reward, sigma_reward)\n self.logger.info(msg)\n\n if self.config.record and (t % self.config.record_freq == 0):\n self.logger.info(\"Recording...\")\n # last_record = 0\n self.record()\n\n self.logger.info(\"- Training done.\")\n export_plot(scores_eval, \"Score\", config.env_name, self.config.plot_output)\n\n def evaluate(self, env=None, num_episodes=1):\n \"\"\"\n Evaluates the return for num_episodes episodes.\n Not used right now, all evaluation statistics are computed during training\n episodes.\n \"\"\"\n if env==None: env = self.env\n paths, rewards = self.sample_path(env, num_episodes)\n avg_reward = np.mean(rewards)\n sigma_reward = np.sqrt(np.var(rewards) / len(rewards))\n msg = \"Average reward: {:04.2f} +/- {:04.2f}\".format(avg_reward, sigma_reward)\n self.logger.info(msg)\n return avg_reward\n\n def record(self):\n \"\"\"\n Recreate an env and record a video for one episode\n \"\"\"\n env = gym.make(self.config.env_name)\n env = gym.wrappers.Monitor(env, self.config.record_path, video_callable=lambda x: True, resume=True)\n self.evaluate(env, 1)\n\n def run(self):\n \"\"\"\n Apply procedures of training for a PG.\n \"\"\"\n # initialize\n self.initialize()\n # record one game at the beginning\n if self.config.record:\n self.record()\n # model\n self.train()\n # record one game at the end\n if self.config.record:\n self.record()\n\nif __name__ == '__main__':\n # args = parser.parse_args()\n # config = get_config(args.env_name, args.use_baseline)\n config = get_config('cartpole', True)\n env = gym.make(config.env_name)\n # train model\n model = PG(env, config)\n model.run()\n"
] | [
[
"tensorflow.get_variable",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.zeros_like",
"tensorflow.train.AdamOptimizer",
"numpy.var",
"tensorflow.summary.scalar",
"tensorflow.contrib.distributions.MultivariateNormalDiag",
"tensorflow.squeeze",
"tensorflow.Session",
"tensorflow.keras.layers.Dense",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"tensorflow.summary.FileWriter",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.multinomial",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
abhijeetdhupia/WCE-Classification | [
"043805fe54d14ef3d24735375df1f387c62e7896"
] | [
"utils.py"
] | [
"import torch \n\ndef calculate_topk_accuracy(y_pred, y, k = 4):\n with torch.no_grad():\n batch_size = y.shape[0]\n _, top_pred = y_pred.topk(k, 1)\n top_pred = top_pred.t()\n correct = top_pred.eq(y.view(1, -1).expand_as(top_pred))\n correct_1 = correct[:1].reshape(-1).float().sum(0, keepdim = True)\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim = True)\n acc_1 = correct_1 / batch_size\n acc_k = correct_k / batch_size\n return acc_1, acc_k\n\n\ndef evaluate(model, iterator, criterion, device):\n \n epoch_loss = 0\n epoch_acc_1 = 0\n epoch_acc_5 = 0\n \n model.eval()\n \n with torch.no_grad():\n \n for (x, y) in iterator:\n\n x = x.to(device)\n y = y.to(device)\n\n y_pred, _ = model(x)\n\n loss = criterion(y_pred, y)\n\n acc_1, acc_5 = calculate_topk_accuracy(y_pred, y)\n\n epoch_loss += loss.item()\n epoch_acc_1 += acc_1.item()\n epoch_acc_5 += acc_5.item()\n \n epoch_loss /= len(iterator)\n epoch_acc_1 /= len(iterator)\n epoch_acc_5 /= len(iterator)\n \n return epoch_loss, epoch_acc_1, epoch_acc_5\n\n#Normalization \ndef normalize_image(image):\n image_min = image.min()\n image_max = image.max()\n image.clamp_(min = image_min, max = image_max)\n image.add_(-image_min).div_(image_max - image_min + 1e-5)\n return image \n"
] | [
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anonymous633671/A-Comparison-on-Communication-and-Code-Dependency-Effects-on-Software-Code-Quality | [
"5a88f62513f9879178af3c5f763631b93e4f3054",
"5a88f62513f9879178af3c5f763631b93e4f3054"
] | [
"src/main/git_log/buggy_commit.py",
"src/RQ1_RQ2_data_extraction.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 31 12:49:38 2018\n\n@author: suvod\n\"\"\"\n\nfrom main.git_log import git2repo\nfrom main.api import api_access\nimport pygit2\nimport re\nimport pandas as pd\nfrom datetime import datetime\nimport re, unicodedata\nfrom pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE\nimport os\nfrom main.utils.utils import utils\nimport platform\nimport threading\nfrom multiprocessing import Queue\nfrom threading import Thread\nimport numpy as np\nimport itertools\nimport pandas as pd\nimport itertools\nimport math\nfrom multiprocessing import Pool, cpu_count\nfrom os.path import dirname as up\n\n\nclass ThreadWithReturnValue(Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs={}, Verbose=None):\n Thread.__init__(self, group, target, name, args, kwargs)\n self._return = None\n def run(self):\n #print(type(self._target))\n if self._target is not None:\n self._return = self._target(*self._args,\n **self._kwargs)\n def join(self, *args):\n Thread.join(self, *args)\n return self._return\n\nclass buggy_commit_maker(object):\n \n \n def __init__(self,project_name,repo_url,repo_name):\n self.project_name = project_name\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n self.data_path = os.getcwd() + '/data/'\n else:\n self.data_path = os.getcwd() + '\\\\data\\\\'\n self.commit = self.read_files('commit')\n self.committed_files = self.read_files('committed_file')\n self.initilize_repo(repo_url,repo_name)\n self.cores = cpu_count()\n \n def initilize_repo(self,repo_url,repo_name):\n self.git_repo = git2repo.git2repo(repo_url,repo_name)\n self.repo = self.git_repo.clone_repo()\n \n \n def read_files(self,file_data):\n file_path = self.data_path + self.project_name + '_' + file_data + '.pkl'\n return pd.read_pickle(file_path)\n \n \n def isBuggyCommit(self, commit):\n res=re.search(r'\\b{bug|fix|issue|error|correct|proper|deprecat|broke|optimize|patch|solve|slow|obsolete|vulnerab|debug|perf|memory|minor|wart|better|complex|break|investigat|compile|defect|inconsist|crash|problem|resol|#}\\b',utils().stemming(commit),re.IGNORECASE)\n if res is not None:\n return True\n \n \n def buggy_commits(self,commits):\n for i in range(commits.shape[0]):\n result = self.isBuggyCommit(commits.loc[i,'message'])\n if result:\n commits.loc[i,'isBuggy'] = 1\n else:\n commits.loc[i,'isBuggy'] = 0\n return commits\n\n def get_buggy_commits(self):\n threads = []\n self.commit['isBuggy'] = pd.Series([0]*self.commit.shape[0])\n column_names = self.commit.columns.tolist()\n bug_fixed_commit = pd.DataFrame([], columns = column_names)\n commits_np = np.array_split(self.commit, self.cores)\n for i in range(self.cores):\n commits = pd.DataFrame(commits_np[i], columns = column_names)\n commits.reset_index(inplace = True, drop = True)\n t = ThreadWithReturnValue(target = self.buggy_commits, args = [commits])\n threads.append(t)\n for th in threads:\n th.start()\n for th in threads:\n response = th.join()\n bug_fixed_commit = pd.concat([bug_fixed_commit,response])\n bug_fixed_commit.reset_index(inplace = True, drop = True)\n self.commit = bug_fixed_commit\n \n# def get_buggy_commits(self):\n# self.commit['isBuggy'] = pd.Series([0]*self.commit.shape[0])\n# for i in range(self.commit.shape[0]):\n# result = self.isBuggyCommit(self.commit.loc[i,'message'])\n# if result:\n# self.commit.loc[i,'isBuggy'] = 1\n# else:\n# self.commit.loc[i,'isBuggy'] = 0\n \n\n def buggy_committer(self,buggy_diffs):\n bug_creator = []\n for value in buggy_diffs:\n _diff_files = buggy_diffs[value]['files']\n self.repo.head.set_target(buggy_diffs[value]['object'].parent_ids[0])\n for _value in _diff_files:\n try:\n file_path = _diff_files[_value]['file_path']\n blame = self.git_repo.get_blame(file_path)\n for _line in _diff_files[_value]['old_lines']:\n if _line != -1:\n ref = blame.for_line(_line)\n #print(_value,ref.final_committer.name)\n bug_creator.append([ref.final_committer.name, ref.orig_commit_id, 1])\n except:\n continue\n bug_creator_df = pd.DataFrame(bug_creator, columns = ['committer','commit','ob'])\n bug_creator_df = bug_creator_df.drop_duplicates()\n return bug_creator_df\n \n \n def get_buggy_committer(self):\n threads = []\n df = pd.DataFrame([])\n # To-Do this is to saperate the data into small chunks from get_diff that is the dict\n buggy_commit_df = self.commit[self.commit['isBuggy'] == 1]\n buggy_diffs = self.git_repo.get_diffs(buggy_commit_df['commit_number'].values.tolist())\n keys = list(buggy_diffs.keys())\n len_bd = len(buggy_diffs)\n sub_list_len = len_bd/self.cores\n for i in range(self.cores):\n sub_keys = keys[int(i*sub_list_len):int((i+1)*sub_list_len)]\n subdict = {x: buggy_diffs[x] for x in sub_keys if x in buggy_diffs}\n t = ThreadWithReturnValue(target = self.buggy_committer, args = [subdict])\n threads.append(t)\n for i in range(0,len(threads),self.cores):\n _threads = threads[i:i+self.cores]\n for th in _threads:\n th.start()\n for th in _threads:\n response = th.join()\n df = pd.concat([df,response])\n df.reset_index(inplace = True, drop = True)\n df.drop_duplicates(inplace = True)\n df = df.groupby( ['committer']).count()\n defect_count = []\n for key,value in df.iterrows():\n user = key\n count = value.values.tolist()[0]\n defect_count.append([user,count])\n return defect_count\n \n# def get_buggy_committer(self):\n# buggy_commit_df = self.commit[self.commit['isBuggy'] == 1]\n# buggy_diffs = self.git_repo.get_diffs(buggy_commit_df['commit_number'].values.tolist())\n# bug_creator = []\n# for value in buggy_diffs:\n# _diff_files = buggy_diffs[value]['files']\n# self.repo.head.set_target(buggy_diffs[value]['object'].parent_ids[0])\n# for _value in _diff_files:\n# try:\n# file_path = _diff_files[_value]['file_path']\n# blame = self.git_repo.get_blame(file_path)\n# for _line in _diff_files[_value]['old_lines']:\n# if _line != -1:\n# ref = blame.for_line(_line)\n# print(_value,ref.final_committer.name)\n# bug_creator.append([ref.final_committer.name, ref.orig_commit_id, 1])\n# except:\n# continue\n# bug_creator_df = pd.DataFrame(bug_creator, columns = ['committer','commit','ob'])\n# bug_creator_df = bug_creator_df.drop_duplicates()\n# df = bug_creator_df.groupby( ['committer']).count()\n# defect_count = []\n# for key,value in df.iterrows():\n# user = key\n# count = value.values.tolist()[0]\n# defect_count.append([user,count])\n# return defect_count\n \n def get_commit_count(self):\n committer_count = []\n for i in range(self.commit.shape[0]):\n commit_id = self.commit.loc[i,'commit_number']\n user = self.repo.get(commit_id).committer\n committer_count.append([user.name, commit_id, 1])\n committer_count_df = pd.DataFrame(committer_count, columns = ['committer', 'commit_id', 'ob'])\n committer_count_df = committer_count_df.drop_duplicates()\n df = committer_count_df.groupby( ['committer']).count()\n commit_count = []\n for key,value in df.iterrows():\n user = key\n count = value.values.tolist()[0]\n commit_count.append([user,count])\n return commit_count\n \n\n ",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 16 11:50:56 2018\n\n@author: suvodeepmajumder\n\"\"\"\n\nfrom interaction import get_commit_lines\nfrom interaction import code_interaction\nimport main.git_log.buggy_commit as buggy_commit\nimport pandas as pd\nimport numpy as np\nimport csv\nfrom os.path import dirname as up\nimport os\nfrom pathlib import Path\nimport platform\nfrom main.git_log import git2data\n\n\ndef get_heros():\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n source_projects = os.getcwd() + '/project_list.csv'\n else:\n source_projects = os.getcwd() + '\\\\project_list.csv'\n project_list = pd.read_csv(source_projects)\n project_list['heros_80'] = [0]*project_list.shape[0]\n project_list['heros_85'] = [0]*project_list.shape[0]\n project_list['heros_90'] = [0]*project_list.shape[0]\n project_list['heros_95'] = [0]*project_list.shape[0]\n project_list['num_dev'] = [0]*project_list.shape[0]\n projects_hero = []\n for i in range(project_list.shape[0]):\n try:\n access_token = project_list.loc[i,'access_token']\n repo_owner = project_list.loc[i,'repo_owner']\n source_type = project_list.loc[i,'source_type']\n git_url = project_list.loc[i,'git_url']\n api_base_url = project_list.loc[i,'api_base_url']\n repo_name = project_list.loc[i,'repo_name'] \n git_data = git2data.git2data(access_token,repo_owner,source_type,git_url,api_base_url,repo_name)\n git_data.create_data()\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n data_path = os.getcwd() + '/data/' + repo_name + '/'\n else:\n data_path = os.getcwd() + '\\\\data\\\\' + repo_name + '\\\\'\n \n if not Path(data_path).is_dir():\n os.makedirs(Path(data_path))\n \n cg = get_commit_lines.create_code_interaction_graph(git_url,repo_name)\n project_details = cg.get_user_node_degree()\n project_details.sort_values(by='ob',inplace=True)\n project_details['cum_sum'] = project_details.ob.cumsum()\n total_loc = project_details.ob.sum()\n #print(project_details,total_loc)\n contr_list = [0.8,0.85,0.9,0.95]\n population_list = [0.2,0.15,0.1,0.05]\n for k in range(4):\n for j in range(project_details.shape[0]):\n if project_details.iloc[j,1] <= project_details.ob.sum()*contr_list[k]:\n continue\n else:\n break\n project_list.iloc[i,11] = project_details.shape[0]\n\n if project_details.shape[0] < 8:\n continue\n if 1 == j/project_details.shape[0]:\n project_list.iloc[i,7+k] = False\n continue\n if ((1 - j/project_details.shape[0])<population_list[k]):\n project_list.iloc[i,7+k] = True \n else:\n project_list.iloc[i,7+k] = False\n\n project_list.to_csv(os.getcwd() + '/hero_list.csv')\n except ValueError as e:\n print(\"Error\",e)\n continue\n return project_list\n\nget_heros()"
] | [
[
"pandas.concat",
"pandas.Series",
"pandas.DataFrame",
"numpy.array_split",
"pandas.read_pickle"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pjain310/scRNAseq_Cell_Classification | [
"46d73ff257eef9974e1e425a52b30b61e96e3ca4"
] | [
"Scripts/run_parallel_VC.py"
] | [
"import os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time as tm\r\nfrom joblib import Parallel, delayed\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.ensemble import VotingClassifier\r\nfrom imblearn.combine import SMOTEENN\r\nimport rpy2.robjects as robjects\r\n\r\ndef CV_Classifier(cv_fold):\r\n test_ind_i = np.array(test_ind[cv_fold], dtype='int') - 1\r\n train_ind_i = np.array(train_ind[cv_fold], dtype='int') - 1\r\n\r\n train = data.iloc[train_ind_i]\r\n test = data.iloc[test_ind_i]\r\n y_train = labels.iloc[train_ind_i]\r\n y_test = labels.iloc[test_ind_i]\r\n\r\n #Feature selection\r\n if (NumGenes > 0):\r\n feat_to_use = features.iloc[0:NumGenes, cv_fold]\r\n train = train.iloc[:, feat_to_use]\r\n test = test.iloc[:, feat_to_use]\r\n\r\n print(\"Runnning SMOTE\", cv_fold)\r\n\r\n #Imbalance removal using Smote-ENN \r\n #smt = SMOTEENN(ratio='auto', random_state=42, n_jobs = -1)\r\n #train, y_train = smt.fit_resample(train_unsampled, y_train_unsampled.values.ravel())\r\n\r\n print(\"Ran SMOTE\", cv_fold)\r\n\r\n start = tm.time()\r\n print(\"Fitting\")\r\n Classifier.fit(train, y_train.values.ravel())\r\n tr_time.append(tm.time()-start)\r\n\r\n start = tm.time()\r\n print(\"Testing\")\r\n predicted = Classifier.predict(test)\r\n ts_time.append(tm.time()-start)\r\n\r\n truelab.extend(y_test.values)\r\n pred.extend(predicted)\r\n\r\n\r\ndef run_SVM(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath = \"\", NumGenes = 0):\r\n '''\r\n run baseline classifier: SVM\r\n Wrapper script to run an SVM classifier with a linear kernel on a benchmark dataset with 5-fold cross validation,\r\n outputs lists of true and predicted cell labels as csv files, as well as computation time.\r\n\r\n Parameters\r\n ----------\r\n DataPath : Data file path (.csv), cells-genes matrix with cell unique barcodes\r\n as row names and gene names as column names.\r\n LabelsPath : Cell population annotations file path (.csv).\r\n CV_RDataPath : Cross validation RData file path (.RData), obtained from Cross_Validation.R function.\r\n OutputDir : Output directory defining the path of the exported file.\r\n GeneOrderPath : Gene order file path (.csv) obtained from feature selection,\r\n defining the genes order for each cross validation fold, default is NULL.\r\n NumGenes : Number of genes used in case of feature selection (integer), default is 0.\r\n '''\r\n\r\n # read the Rdata file\r\n robjects.r['load'](CV_RDataPath)\r\n\r\n nfolds = np.array(robjects.r['n_folds'], dtype = 'int')\r\n tokeep = np.array(robjects.r['Cells_to_Keep'], dtype = 'bool')\r\n col = np.array(robjects.r['col_Index'], dtype = 'int')\r\n col = col - 1\r\n test_ind = np.array(robjects.r['Test_Idx'])\r\n train_ind = np.array(robjects.r['Train_Idx'])\r\n\r\n # read the data\r\n print(\"readind data\")\r\n data = pd.read_csv(DataPath,index_col=0,sep=',')\r\n labels = pd.read_csv(LabelsPath, header=0,index_col=None, sep=',', usecols = col)\r\n print(\"done\")\r\n\r\n\r\n labels = labels.iloc[tokeep]\r\n data = data.iloc[tokeep]\r\n\r\n # read the feature file\r\n if (NumGenes > 0):\r\n features = pd.read_csv(GeneOrderPath,header=0,index_col=None, sep=',')\r\n\r\n # folder with results\r\n os.chdir(OutputDir)\r\n\r\n # normalize data\r\n data = np.log1p(data)\r\n\r\n svm = AdaBoostClassifier(base_estimator=LinearSVC(),n_estimators=50, algorithm='SAMME')\r\n RF = RandomForestClassifier(n_estimators=50)\r\n LDA = LinearDiscriminantAnalysis()\r\n\r\n Classifier = VotingClassifier(estimators = [('Support Vector',svm),('Random Forest',RF),('Linear Discriminant',LDA)],n_jobs = -1,weights=[0.45,0.25,0.3])\r\n\r\n\r\n tr_time=[]\r\n ts_time=[]\r\n truelab = []\r\n pred = []\r\n\r\n Parallel(n_jobs=4)(delayed(CV_Classifier(i) for i in range(np.squeeze(nfolds))))\r\n\r\n truelab = pd.DataFrame(truelab)\r\n pred = pd.DataFrame(pred)\r\n\r\n tr_time = pd.DataFrame(tr_time)\r\n ts_time = pd.DataFrame(ts_time)\r\n\r\n if (NumGenes == 0):\r\n truelab.to_csv(\"VC_parallel_True_Labels.csv\", index = False)\r\n pred.to_csv(\"VC_parallel_Pred_Labels.csv\", index = False)\r\n tr_time.to_csv(\"VC_parallel_Training_Time.csv\", index = False)\r\n ts_time.to_csv(\"VC_parallel_Testing_Time.csv\", index = False)\r\n else:\r\n truelab.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_True_Labels.csv\", index = False)\r\n pred.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_Pred_Labels.csv\", index = False)\r\n tr_time.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_Training_Time.csv\", index = False)\r\n ts_time.to_csv(\"VC_parallel_\" + str(NumGenes) + \"_Testing_Time.csv\", index = False)\r\n\r\n\r\nLabelsPath = \"~/Desktop/scRNA_Cell_Typing/scRNAseq_Benchmark_datasets/Zheng/Labels.csv\"\r\nDataPath = \"~/Desktop/scRNA_Cell_Typing/scRNAseq_Benchmark_datasets/Zheng/Filtered_68K_PBMC_data.csv\"\r\nCV_RDataPath = \"~/Desktop/scRNA_Cell_Typing/CV_folds.RData\"\r\nGeneOrderPath = \"~/Desktop/scRNA_Cell_Typing/results/og_SVM_results/rank_genes_dropouts.csv\"\r\nOutputDir = \"results/top5000_VotingRegressor_results\"\r\n\r\nrun_SVM(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath, 5000)\r\n"
] | [
[
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"numpy.squeeze",
"sklearn.ensemble.VotingClassifier",
"pandas.DataFrame",
"sklearn.svm.LinearSVC",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"numpy.log1p",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jdfekete/progressivis | [
"3bc79ce229cd628ef0aa4663136a674743697b47"
] | [
"tests/test_03_csv_crash.py"
] | [
"from . import ProgressiveTest, skip, skipIf\nfrom progressivis.io import CSVLoader\nfrom progressivis.table.constant import Constant\nfrom progressivis.table.table import Table\nfrom progressivis.datasets import (get_dataset, get_dataset_bz2,\n get_dataset_gz,\n get_dataset_lzma, DATA_DIR)\nfrom progressivis.core.utils import RandomBytesIO\nfrom progressivis.stats.counter import Counter\nfrom progressivis.storage import IS_PERSISTENT\nfrom progressivis.storage import cleanup_temp_dir, init_temp_dir_if\nfrom progressivis.core import aio\n#import logging, sys\nfrom multiprocessing import Process\nimport time, os\nimport requests\nfrom requests.packages.urllib3.exceptions import ReadTimeoutError\nfrom requests.exceptions import ConnectionError\n\n\nfrom RangeHTTPServer import RangeRequestHandler\nimport shutil\nimport numpy as np\nimport pandas as pd\n\nimport http.server as http_srv\n\nBZ2 = 'csv.bz2'\nGZ = 'csv.gz'\nXZ = 'csv.xz'\n#TRAVIS = os.getenv(\"TRAVIS\")\nPORT = 8000\nHOST = 'localhost'\nSLEEP = 10\n\n#IS_PERSISTENT = False\n\ndef _close(module):\n try:\n module.parser._input._stream.close()\n except:\n pass\n\nasync def sleep_then_stop(s, t):\n await aio.sleep(t)\n await s.stop()\n #trace_after_stop(s)\n\ndef trace_after_stop(s):\n t = s.modules()['csv_loader_1']._table\n print(\"crashed when len(_table) ==\", len(t), \"last_id:\", t._last_id)\n i = t._last_id\n print(\"border row i:\", t.loc[i-1,:].to_dict())\n print(\"border row i+1:\", t.loc[i,:].to_dict())\n\ndef make_url(name, ext='csv'):\n return 'http://{host}:{port}/{name}.{ext}'.format(host=HOST,\n port=PORT,\n name=name, ext=ext)\n\ndef run_simple_server():\n _ = get_dataset('smallfile')\n _ = get_dataset('bigfile')\n _ = get_dataset_bz2('smallfile')\n _ = get_dataset_bz2('bigfile')\n _ = get_dataset_gz('smallfile')\n _ = get_dataset_gz('bigfile')\n #if six.PY3:\n # _ = get_dataset_lzma('smallfile')\n # _ = get_dataset_lzma('bigfile')\n os.chdir(DATA_DIR)\n import RangeHTTPServer.__main__\n\nBIGFILE_DF = pd.read_csv(get_dataset('bigfile'), header=None, usecols=[0])\n\nclass _HttpSrv(object):\n def __init__(self):\n _HttpSrv.start(self)\n\n def stop(self):\n if self._http_proc is not None:\n try:\n self._http_proc.terminate()\n time.sleep(SLEEP)\n except:\n pass\n\n def start(self):\n p = Process(target=run_simple_server, args=())\n p.start()\n self._http_proc = p\n time.sleep(SLEEP)\n\n def restart(self):\n self.stop()\n self.start()\n\n#IS_PERSISTENT = False\nclass ProgressiveLoadCSVCrashRoot(ProgressiveTest):\n _http_srv = None\n def setUp(self):\n super().setUp()\n #self._http_srv = None\n cleanup_temp_dir()\n init_temp_dir_if()\n #if self._http_srv is None:\n # self._http_srv = _HttpSrv()\n\n def tearDown(self):\n super().tearDown()\n #TestProgressiveLoadCSVCrash.cleanup()\n if self._http_srv is not None:\n try:\n self._http_srv.stop()\n except:\n pass\n cleanup_temp_dir()\n\n def get_tag(self):\n return id(self._http_srv)\n\n#IS_PERSISTENT = False\nclass TestProgressiveLoadCSVCrash1(ProgressiveLoadCSVCrashRoot):\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_01_read_http_csv_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s=self.scheduler()\n url = make_url('bigfile')\n module=CSVLoader(url, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 2)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module=CSVLoader(url, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 1000000)\n arr1 = module.result.loc[:, 0].to_array().reshape(-1)\n arr2 = BIGFILE_DF.loc[:, 0].values\n #import pdb;pdb.set_trace()\n self.assertTrue(np.allclose(arr1, arr2))\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_01_read_http_csv_with_crash_and_counter(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s=self.scheduler()\n url = make_url('bigfile')\n module=CSVLoader(url, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 2)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n csv=CSVLoader(url, recovery=True, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n counter = Counter(scheduler=s)\n counter.input[0] = csv.output.result\n self.assertTrue(csv.result is None)\n aio.run(s.start())\n self.assertEqual(len(csv.result), 1000000)\n self.assertEqual(counter.result['counter'].loc[0], 1000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_02_read_http_csv_bz2_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s=self.scheduler()\n url = make_url('bigfile', ext=BZ2)\n module=CSVLoader(url, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 5)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module=CSVLoader(url, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 1000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_03_read_http_multi_csv_no_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n s=self.scheduler()\n module=CSVLoader([make_url('smallfile'),make_url('smallfile')], index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_04_read_http_multi_csv_bz2_no_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n s=self.scheduler()\n module=CSVLoader([make_url('smallfile', ext=BZ2)]*2, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\nclass TestProgressiveLoadCSVCrash2(ProgressiveLoadCSVCrashRoot):\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_05_read_http_multi_csv_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s = self.scheduler()\n url_list = [make_url('bigfile'),make_url('bigfile')]\n module = CSVLoader(url_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 3)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module = CSVLoader(url_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_06_read_http_multi_csv_bz2_with_crash(self):\n #if TRAVIS: return\n self._http_srv = _HttpSrv()\n tag = self.get_tag()\n s = self.scheduler()\n url_list = [make_url('bigfile', ext=BZ2)]*2\n module = CSVLoader(url_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 3)\n aio.run_gather(s.start(), sts)\n self._http_srv.restart()\n s=self.scheduler(clean=True)\n module = CSVLoader(url_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_07_read_multi_csv_file_no_crash(self):\n s = self.scheduler()\n module = CSVLoader([get_dataset('smallfile'), get_dataset('smallfile')], index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\nclass TestProgressiveLoadCSVCrash3(ProgressiveLoadCSVCrashRoot):\n def _tst_08_read_multi_csv_file_compress_no_crash(self, files):\n s=self.scheduler()\n module=CSVLoader(files, index_col=False, header=None, scheduler=s)#, save_context=False)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 60000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_08_read_multi_csv_file_bz2_no_crash(self):\n files = [get_dataset_bz2('smallfile')]*2\n return self._tst_08_read_multi_csv_file_compress_no_crash(files)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_08_read_multi_csv_file_gz_no_crash(self):\n files = [get_dataset_gz('smallfile')]*2\n return self._tst_08_read_multi_csv_file_compress_no_crash(files)\n\n @skip(\"Too slow ...\")\n def test_08_read_multi_csv_file_lzma_no_crash(self):\n files = [get_dataset_lzma('smallfile')]*2\n return self._tst_08_read_multi_csv_file_compress_no_crash(files)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_09_read_multi_csv_file_with_crash(self):\n s=self.scheduler()\n tag = 't9'\n file_list = [get_dataset('bigfile'), get_dataset('bigfile')]\n module=CSVLoader(file_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 3)\n aio.run_gather(s.start(), sts)\n _close(module)\n s=self.scheduler(clean=True)\n module=CSVLoader(file_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n def _tst_10_read_multi_csv_file_compress_with_crash(self, file_list, tag):\n s=self.scheduler()\n module=CSVLoader(file_list, index_col=False, recovery_tag=tag, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n sts = sleep_then_stop(s, 4)\n aio.run_gather(s.start(), sts)\n _close(module)\n s=self.scheduler(clean=True)\n module=CSVLoader(file_list, recovery=True, recovery_tag=tag, index_col=False, header=None, scheduler=s)\n self.assertTrue(module.result is None)\n aio.run(s.start())\n self.assertEqual(len(module.result), 2000000)\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_10_read_multi_csv_file_bz2_with_crash(self):\n file_list = [get_dataset_bz2('bigfile')]*2\n self._tst_10_read_multi_csv_file_compress_with_crash(file_list, 't10_1')\n\n @skipIf(not IS_PERSISTENT, \"transient storage, test skipped\")\n def test_10_read_multi_csv_file_gzip_with_crash(self):\n file_list = [get_dataset_gz('bigfile')]*2\n self._tst_10_read_multi_csv_file_compress_with_crash(file_list, 't10_2')\n\n @skip(\"Too slow ...\")\n def test_10_read_multi_csv_file_lzma_with_crash(self):\n file_list = [get_dataset_lzma('bigfile')]*2\n self._tst_10_read_multi_csv_file_compress_with_crash(file_list, 't10_3')\n\nif __name__ == '__main__':\n ProgressiveTest.main()\n"
] | [
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EstelleHuang666/OpenNMT-py | [
"f7a239086d0db156535f3f5db9ed7060291485e8"
] | [
"onmt/inputters/inputter.py"
] | [
"# -*- coding: utf-8 -*-\nimport glob\nimport os\nimport codecs\nimport math\n\nfrom collections import Counter, defaultdict\nfrom itertools import chain, cycle\n\nimport torch\nimport torchtext.data\nfrom torchtext.data import Field, RawField\nfrom torchtext.vocab import Vocab\nfrom torchtext.data.utils import RandomShuffler\n\nfrom onmt.inputters.text_dataset import text_fields, TextMultiField\nfrom onmt.inputters.image_dataset import image_fields\nfrom onmt.inputters.audio_dataset import audio_fields\nfrom onmt.utils.logging import logger\n# backwards compatibility\nfrom onmt.inputters.text_dataset import _feature_tokenize # noqa: F401\nfrom onmt.inputters.image_dataset import ( # noqa: F401\n batch_img as make_img)\n\nimport gc\n\n\n# monkey-patch to make torchtext Vocab's pickleable\ndef _getstate(self):\n return dict(self.__dict__, stoi=dict(self.stoi))\n\n\ndef _setstate(self, state):\n self.__dict__.update(state)\n self.stoi = defaultdict(lambda: 0, self.stoi)\n\n\nVocab.__getstate__ = _getstate\nVocab.__setstate__ = _setstate\n\n\ndef make_src(data, vocab):\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n\ndef make_tgt(data, vocab):\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n\ndef get_fields(\n src_data_type,\n n_src_feats,\n n_tgt_feats,\n pad='<blank>',\n bos='<s>',\n eos='</s>',\n dynamic_dict=False,\n src_truncate=None,\n tgt_truncate=None\n):\n \"\"\"\n Args:\n src_data_type: type of the source input. Options are [text|img|audio].\n n_src_feats (int): the number of source features (not counting tokens)\n to create a :class:`torchtext.data.Field` for. (If\n ``src_data_type==\"text\"``, these fields are stored together\n as a ``TextMultiField``).\n n_tgt_feats (int): See above.\n pad (str): Special pad symbol. Used on src and tgt side.\n bos (str): Special beginning of sequence symbol. Only relevant\n for tgt.\n eos (str): Special end of sequence symbol. Only relevant\n for tgt.\n dynamic_dict (bool): Whether or not to include source map and\n alignment fields.\n src_truncate: Cut off src sequences beyond this (passed to\n ``src_data_type``'s data reader - see there for more details).\n tgt_truncate: Cut off tgt sequences beyond this (passed to\n :class:`TextDataReader` - see there for more details).\n\n Returns:\n A dict mapping names to fields. These names need to match\n the dataset example attributes.\n \"\"\"\n\n assert src_data_type in ['text', 'img', 'audio'], \\\n \"Data type not implemented\"\n assert not dynamic_dict or src_data_type == 'text', \\\n 'it is not possible to use dynamic_dict with non-text input'\n fields = {}\n\n fields_getters = {\"text\": text_fields,\n \"img\": image_fields,\n \"audio\": audio_fields}\n\n src_field_kwargs = {\"n_feats\": n_src_feats,\n \"include_lengths\": True,\n \"pad\": pad, \"bos\": None, \"eos\": None,\n \"truncate\": src_truncate,\n \"base_name\": \"src\"}\n fields[\"src\"] = fields_getters[src_data_type](**src_field_kwargs)\n\n tgt_field_kwargs = {\"n_feats\": n_tgt_feats,\n \"include_lengths\": False,\n \"pad\": pad, \"bos\": bos, \"eos\": eos,\n \"truncate\": tgt_truncate,\n \"base_name\": \"tgt\"}\n fields[\"tgt\"] = fields_getters[\"text\"](**tgt_field_kwargs)\n\n indices = Field(use_vocab=False, dtype=torch.long, sequential=False)\n fields[\"indices\"] = indices\n\n if dynamic_dict:\n src_map = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n fields[\"src_map\"] = src_map\n\n src_ex_vocab = RawField()\n fields[\"src_ex_vocab\"] = src_ex_vocab\n\n align = Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n fields[\"alignment\"] = align\n\n return fields\n\n\ndef load_old_vocab(vocab, data_type=\"text\", dynamic_dict=False):\n \"\"\"Update a legacy vocab/field format.\n\n Args:\n vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the\n format formerly saved in *.vocab.pt files. Or, text data\n not using a :class:`TextMultiField`.\n data_type (str): text, img, or audio\n dynamic_dict (bool): Used for copy attention.\n\n Returns:\n a dictionary whose keys are the field names and whose values Fields.\n \"\"\"\n\n if _old_style_vocab(vocab):\n # List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]\n # -> dict[str, Field]\n vocab = dict(vocab)\n n_src_features = sum('src_feat_' in k for k in vocab)\n n_tgt_features = sum('tgt_feat_' in k for k in vocab)\n fields = get_fields(\n data_type, n_src_features, n_tgt_features,\n dynamic_dict=dynamic_dict)\n for n, f in fields.items():\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(n, f)]\n for sub_n, sub_f in f_iter:\n if sub_n in vocab:\n sub_f.vocab = vocab[sub_n]\n return fields\n\n if _old_style_field_list(vocab): # upgrade to multifield\n # Dict[str, List[Tuple[str, Field]]]\n # doesn't change structure - don't return early.\n fields = vocab\n for base_name, vals in fields.items():\n if ((base_name == 'src' and data_type == 'text') or\n base_name == 'tgt'):\n assert not isinstance(vals[0][1], TextMultiField)\n fields[base_name] = [(base_name, TextMultiField(\n vals[0][0], vals[0][1], vals[1:]))]\n\n if _old_style_nesting(vocab):\n # Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]\n # -> dict[str, Field]\n fields = dict(list(chain.from_iterable(vocab.values())))\n\n return fields\n\n\ndef _old_style_vocab(vocab):\n \"\"\"Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).\n\n Args:\n vocab: some object loaded from a *.vocab.pt file\n\n Returns:\n Whether ``vocab`` is a list of pairs where the second object\n is a :class:`torchtext.vocab.Vocab` object.\n\n This exists because previously only the vocab objects from the fields\n were saved directly, not the fields themselves, and the fields needed to\n be reconstructed at training and translation time.\n \"\"\"\n\n return isinstance(vocab, list) and \\\n any(isinstance(v[1], Vocab) for v in vocab)\n\n\ndef _old_style_nesting(vocab):\n \"\"\"Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``).\"\"\"\n return isinstance(vocab, dict) and \\\n any(isinstance(v, list) for v in vocab.values())\n\n\ndef _old_style_field_list(vocab):\n \"\"\"Detect old-style text fields.\n\n Not old style vocab, old nesting, and text-type fields not using\n ``TextMultiField``.\n\n Args:\n vocab: some object loaded from a *.vocab.pt file\n\n Returns:\n Whether ``vocab`` is not an :func:`_old_style_vocab` and not\n a :class:`TextMultiField` (using an old-style text representation).\n \"\"\"\n\n # if tgt isn't using TextMultiField, then no text field is.\n return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \\\n (not isinstance(vocab['tgt'][0][1], TextMultiField))\n\n\ndef old_style_vocab(vocab):\n \"\"\"The vocab/fields need updated.\"\"\"\n return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \\\n _old_style_nesting(vocab)\n\n\ndef filter_example(ex, use_src_len=True, use_tgt_len=True,\n min_src_len=1, max_src_len=float('inf'),\n min_tgt_len=1, max_tgt_len=float('inf')):\n \"\"\"Return whether an example is an acceptable length.\n\n If used with a dataset as ``filter_pred``, use :func:`partial()`\n for all keyword arguments.\n\n Args:\n ex (torchtext.data.Example): An object with a ``src`` and ``tgt``\n property.\n use_src_len (bool): Filter based on the length of ``ex.src``.\n use_tgt_len (bool): Similar to above.\n min_src_len (int): A non-negative minimally acceptable length\n (examples of exactly this length will be included).\n min_tgt_len (int): Similar to above.\n max_src_len (int or float): A non-negative (possibly infinite)\n maximally acceptable length (examples of exactly this length\n will be included).\n max_tgt_len (int or float): Similar to above.\n \"\"\"\n\n src_len = len(ex.src[0])\n tgt_len = len(ex.tgt[0])\n return (not use_src_len or min_src_len <= src_len <= max_src_len) and \\\n (not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)\n\n\ndef _pad_vocab_to_multiple(vocab, multiple):\n vocab_size = len(vocab)\n if vocab_size % multiple == 0:\n return\n target_size = int(math.ceil(vocab_size / multiple)) * multiple\n padding_tokens = [\n \"averyunlikelytoken%d\" % i for i in range(target_size - vocab_size)]\n vocab.extend(Vocab(Counter(), specials=padding_tokens))\n return vocab\n\n\ndef _build_field_vocab(field, counter, size_multiple=1, **kwargs):\n # this is basically copy-pasted from torchtext.\n all_specials = [\n field.unk_token, field.pad_token, field.init_token, field.eos_token\n ]\n specials = [tok for tok in all_specials if tok is not None]\n field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)\n if size_multiple > 1:\n _pad_vocab_to_multiple(field.vocab, size_multiple)\n\n\ndef _load_vocab(vocab_path, name, counters, min_freq):\n # counters changes in place\n vocab = _read_vocab_file(vocab_path, name)\n vocab_size = len(vocab)\n logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))\n for i, token in enumerate(vocab):\n # keep the order of tokens specified in the vocab file by\n # adding them to the counter with decreasing counting values\n counters[name][token] = vocab_size - i + min_freq\n return vocab, vocab_size\n\n\ndef _build_fv_from_multifield(multifield, counters, build_fv_args,\n size_multiple=1):\n for name, field in multifield:\n _build_field_vocab(\n field,\n counters[name],\n size_multiple=size_multiple,\n **build_fv_args[name])\n logger.info(\" * %s vocab size: %d.\" % (name, len(field.vocab)))\n\n\ndef _build_fields_vocab(fields, counters, data_type, share_vocab,\n vocab_size_multiple,\n src_vocab_size, src_words_min_frequency,\n tgt_vocab_size, tgt_words_min_frequency):\n build_fv_args = defaultdict(dict)\n build_fv_args[\"src\"] = dict(\n max_size=src_vocab_size, min_freq=src_words_min_frequency)\n build_fv_args[\"tgt\"] = dict(\n max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)\n tgt_multifield = fields[\"tgt\"]\n _build_fv_from_multifield(\n tgt_multifield,\n counters,\n build_fv_args,\n size_multiple=vocab_size_multiple if not share_vocab else 1)\n if data_type == 'text':\n src_multifield = fields[\"src\"]\n _build_fv_from_multifield(\n src_multifield,\n counters,\n build_fv_args,\n size_multiple=vocab_size_multiple if not share_vocab else 1)\n if share_vocab:\n # `tgt_vocab_size` is ignored when sharing vocabularies\n logger.info(\" * merging src and tgt vocab...\")\n src_field = src_multifield.base_field\n tgt_field = tgt_multifield.base_field\n _merge_field_vocabs(\n src_field, tgt_field, vocab_size=src_vocab_size,\n min_freq=src_words_min_frequency,\n vocab_size_multiple=vocab_size_multiple)\n logger.info(\" * merged vocab size: %d.\" % len(src_field.vocab))\n\n return fields\n\n\ndef build_vocab(train_dataset_files, fields, data_type, share_vocab,\n src_vocab_path, src_vocab_size, src_words_min_frequency,\n tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,\n vocab_size_multiple=1):\n \"\"\"Build the fields for all data sides.\n\n Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict[str, Field]): fields to build vocab for.\n data_type (str): A supported data type string.\n share_vocab (bool): share source and target vocabulary?\n src_vocab_path (str): Path to src vocabulary file.\n src_vocab_size (int): size of the source vocabulary.\n src_words_min_frequency (int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path (str): Path to tgt vocabulary file.\n tgt_vocab_size (int): size of the target vocabulary.\n tgt_words_min_frequency (int): the minimum frequency needed to\n include a target word in the vocabulary.\n vocab_size_multiple (int): ensure that the vocabulary size is a\n multiple of this value.\n\n Returns:\n Dict of Fields\n \"\"\"\n\n counters = defaultdict(Counter)\n\n if src_vocab_path:\n try:\n logger.info(\"Using existing vocabulary...\")\n vocab = torch.load(src_vocab_path)\n # return vocab to dump with standard name\n return vocab\n except torch.serialization.pickle.UnpicklingError:\n logger.info(\"Building vocab from text file...\")\n # empty train_dataset_files so that vocab is only loaded from\n # given paths in src_vocab_path, tgt_vocab_path\n train_dataset_files = []\n\n # Load vocabulary\n if src_vocab_path:\n src_vocab, src_vocab_size = _load_vocab(\n src_vocab_path, \"src\", counters,\n src_words_min_frequency)\n else:\n src_vocab = None\n\n if tgt_vocab_path:\n tgt_vocab, tgt_vocab_size = _load_vocab(\n tgt_vocab_path, \"tgt\", counters,\n tgt_words_min_frequency)\n else:\n tgt_vocab = None\n\n for i, path in enumerate(train_dataset_files):\n dataset = torch.load(path)\n logger.info(\" * reloading %s.\" % path)\n for ex in dataset.examples:\n for name, field in fields.items():\n try:\n f_iter = iter(field)\n except TypeError:\n f_iter = [(name, field)]\n all_data = [getattr(ex, name, None)]\n else:\n all_data = getattr(ex, name)\n for (sub_n, sub_f), fd in zip(\n f_iter, all_data):\n has_vocab = (sub_n == 'src' and src_vocab) or \\\n (sub_n == 'tgt' and tgt_vocab)\n if sub_f.sequential and not has_vocab:\n val = fd\n counters[sub_n].update(val)\n\n # Drop the none-using from memory but keep the last\n if i < len(train_dataset_files) - 1:\n dataset.examples = None\n gc.collect()\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n fields = _build_fields_vocab(\n fields, counters, data_type,\n share_vocab, vocab_size_multiple,\n src_vocab_size, src_words_min_frequency,\n tgt_vocab_size, tgt_words_min_frequency)\n\n return fields # is the return necessary?\n\n\ndef _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,\n vocab_size_multiple):\n # in the long run, shouldn't it be possible to do this by calling\n # build_vocab with both the src and tgt data?\n specials = [tgt_field.unk_token, tgt_field.pad_token,\n tgt_field.init_token, tgt_field.eos_token]\n merged = sum(\n [src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()\n )\n merged_vocab = Vocab(\n merged, specials=specials,\n max_size=vocab_size, min_freq=min_freq\n )\n if vocab_size_multiple > 1:\n _pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)\n src_field.vocab = merged_vocab\n tgt_field.vocab = merged_vocab\n assert len(src_field.vocab) == len(tgt_field.vocab)\n\n\ndef _read_vocab_file(vocab_path, tag):\n \"\"\"Loads a vocabulary from the given path.\n\n Args:\n vocab_path (str): Path to utf-8 text file containing vocabulary.\n Each token should be on a line by itself. Tokens must not\n contain whitespace (else only before the whitespace\n is considered).\n tag (str): Used for logging which vocab is being read.\n \"\"\"\n\n logger.info(\"Loading {} vocabulary from {}\".format(tag, vocab_path))\n\n if not os.path.exists(vocab_path):\n raise RuntimeError(\n \"{} vocabulary not found at {}\".format(tag, vocab_path))\n else:\n with codecs.open(vocab_path, 'r', 'utf-8') as f:\n return [line.strip().split()[0] for line in f if line.strip()]\n\n\ndef batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):\n \"\"\"Yield elements from data in chunks of batch_size, where each chunk size\n is a multiple of batch_size_multiple.\n\n This is an extended version of torchtext.data.batch.\n \"\"\"\n if batch_size_fn is None:\n def batch_size_fn(new, count, sofar):\n return count\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far >= batch_size:\n overflowed = 0\n if size_so_far > batch_size:\n overflowed += 1\n if batch_size_multiple > 1:\n overflowed += (\n (len(minibatch) - overflowed) % batch_size_multiple)\n if overflowed == 0:\n yield minibatch\n minibatch, size_so_far = [], 0\n else:\n yield minibatch[:-overflowed]\n minibatch = minibatch[-overflowed:]\n size_so_far = 0\n for i, ex in enumerate(minibatch):\n size_so_far = batch_size_fn(ex, i + 1, size_so_far)\n if minibatch:\n yield minibatch\n\n\ndef _pool(data, batch_size, batch_size_fn, batch_size_multiple,\n sort_key, random_shuffler, pool_factor):\n for p in torchtext.data.batch(\n data, batch_size * pool_factor,\n batch_size_fn=batch_size_fn):\n p_batch = list(batch_iter(\n sorted(p, key=sort_key),\n batch_size,\n batch_size_fn=batch_size_fn,\n batch_size_multiple=batch_size_multiple))\n for b in random_shuffler(p_batch):\n yield b\n\n\nclass OrderedIterator(torchtext.data.Iterator):\n\n def __init__(self,\n dataset,\n batch_size,\n pool_factor=1,\n batch_size_multiple=1,\n yield_raw_example=False,\n **kwargs):\n super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)\n self.batch_size_multiple = batch_size_multiple\n self.yield_raw_example = yield_raw_example\n self.dataset = dataset\n self.pool_factor = pool_factor\n\n def create_batches(self):\n if self.train:\n if self.yield_raw_example:\n self.batches = batch_iter(\n self.data(),\n 1,\n batch_size_fn=None,\n batch_size_multiple=1)\n else:\n self.batches = _pool(\n self.data(),\n self.batch_size,\n self.batch_size_fn,\n self.batch_size_multiple,\n self.sort_key,\n self.random_shuffler,\n self.pool_factor)\n else:\n self.batches = []\n for b in batch_iter(\n self.data(),\n self.batch_size,\n batch_size_fn=self.batch_size_fn,\n batch_size_multiple=self.batch_size_multiple):\n self.batches.append(sorted(b, key=self.sort_key))\n\n def __iter__(self):\n \"\"\"\n Extended version of the definition in torchtext.data.Iterator.\n Added yield_raw_example behaviour to yield a torchtext.data.Example\n instead of a torchtext.data.Batch object.\n \"\"\"\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n if self.sort_within_batch:\n # NOTE: `rnn.pack_padded_sequence` requires that a\n # minibatch be sorted by decreasing order, which\n # requires reversing relative to typical sort keys\n if self.sort:\n minibatch.reverse()\n else:\n minibatch.sort(key=self.sort_key, reverse=True)\n if self.yield_raw_example:\n yield minibatch[0]\n else:\n yield torchtext.data.Batch(\n minibatch,\n self.dataset,\n self.device)\n if not self.repeat:\n return\n\n\nclass MultipleDatasetIterator(object):\n \"\"\"\n This takes a list of iterable objects (DatasetLazyIter) and their\n respective weights, and yields a batch in the wanted proportions.\n \"\"\"\n def __init__(self,\n train_shards,\n fields,\n device,\n opt):\n self.index = -1\n self.iterables = []\n for shard in train_shards:\n self.iterables.append(\n build_dataset_iter(shard, fields, opt, multi=True))\n self.init_iterators = True\n self.weights = opt.data_weights\n self.batch_size = opt.batch_size\n self.batch_size_fn = max_tok_len \\\n if opt.batch_type == \"tokens\" else None\n self.batch_size_multiple = 8 if opt.model_dtype == \"fp16\" else 1\n self.device = device\n # Temporarily load one shard to retrieve sort_key for data_type\n temp_dataset = torch.load(self.iterables[0]._paths[0])\n self.sort_key = temp_dataset.sort_key\n self.random_shuffler = RandomShuffler()\n self.pool_factor = opt.pool_factor\n del temp_dataset\n\n def _iter_datasets(self):\n if self.init_iterators:\n self.iterators = [iter(iterable) for iterable in self.iterables]\n self.init_iterators = False\n for weight in self.weights:\n self.index = (self.index + 1) % len(self.iterators)\n for i in range(weight):\n yield self.iterators[self.index]\n\n def _iter_examples(self):\n for iterator in cycle(self._iter_datasets()):\n yield next(iterator)\n\n def __iter__(self):\n while True:\n for minibatch in _pool(\n self._iter_examples(),\n self.batch_size,\n self.batch_size_fn,\n self.batch_size_multiple,\n self.sort_key,\n self.random_shuffler,\n self.pool_factor):\n minibatch = sorted(minibatch, key=self.sort_key, reverse=True)\n yield torchtext.data.Batch(minibatch,\n self.iterables[0].dataset,\n self.device)\n\n\nclass DatasetLazyIter(object):\n \"\"\"Yield data from sharded dataset files.\n\n Args:\n dataset_paths: a list containing the locations of dataset files.\n fields (dict[str, Field]): fields dict for the\n datasets.\n batch_size (int): batch size.\n batch_size_fn: custom batch process function.\n device: See :class:`OrderedIterator` ``device``.\n is_train (bool): train or valid?\n \"\"\"\n\n def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,\n batch_size_multiple, device, is_train, pool_factor,\n repeat=True, num_batches_multiple=1, yield_raw_example=False):\n self._paths = dataset_paths\n self.fields = fields\n self.batch_size = batch_size\n self.batch_size_fn = batch_size_fn\n self.batch_size_multiple = batch_size_multiple\n self.device = device\n self.is_train = is_train\n self.repeat = repeat\n self.num_batches_multiple = num_batches_multiple\n self.yield_raw_example = yield_raw_example\n self.pool_factor = pool_factor\n\n def _iter_dataset(self, path):\n logger.info('Loading dataset from %s' % path)\n cur_dataset = torch.load(path)\n logger.info('number of examples: %d' % len(cur_dataset))\n cur_dataset.fields = self.fields\n cur_iter = OrderedIterator(\n dataset=cur_dataset,\n batch_size=self.batch_size,\n pool_factor=self.pool_factor,\n batch_size_multiple=self.batch_size_multiple,\n batch_size_fn=self.batch_size_fn,\n device=self.device,\n train=self.is_train,\n sort=False,\n sort_within_batch=True,\n repeat=False,\n yield_raw_example=self.yield_raw_example\n )\n for batch in cur_iter:\n self.dataset = cur_iter.dataset\n yield batch\n\n # NOTE: This is causing some issues for consumer/producer,\n # as we may still have some of those examples in some queue\n # cur_dataset.examples = None\n # gc.collect()\n # del cur_dataset\n # gc.collect()\n\n def __iter__(self):\n num_batches = 0\n paths = self._paths\n if self.is_train and self.repeat:\n # Cycle through the shards indefinitely.\n paths = cycle(paths)\n for path in paths:\n for batch in self._iter_dataset(path):\n yield batch\n num_batches += 1\n if self.is_train and not self.repeat and \\\n num_batches % self.num_batches_multiple != 0:\n # When the dataset is not repeated, we might need to ensure that\n # the number of returned batches is the multiple of a given value.\n # This is important for multi GPU training to ensure that all\n # workers have the same number of batches to process.\n for path in paths:\n for batch in self._iter_dataset(path):\n yield batch\n num_batches += 1\n if num_batches % self.num_batches_multiple == 0:\n return\n\n\ndef max_tok_len(new, count, sofar):\n \"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"\n # Maintains the longest src and tgt length in the current batch\n global max_src_in_batch, max_tgt_in_batch # this is a hack\n # Reset current longest length at a new batch (count=1)\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n # Src: [<bos> w1 ... wN <eos>]\n max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)\n # Tgt: [w1 ... wM <eos>]\n max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\ndef build_dataset_iter(corpus_type, fields, opt, is_train=True, multi=False):\n \"\"\"\n This returns user-defined train/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.\n \"\"\"\n dataset_paths = list(sorted(\n glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt')))\n if not dataset_paths:\n if is_train:\n raise ValueError('Training data %s not found' % opt.data)\n else:\n return None\n if multi:\n batch_size = 1\n batch_fn = None\n batch_size_multiple = 1\n else:\n batch_size = opt.batch_size if is_train else opt.valid_batch_size\n batch_fn = max_tok_len \\\n if is_train and opt.batch_type == \"tokens\" else None\n batch_size_multiple = 8 if opt.model_dtype == \"fp16\" else 1\n\n device = \"cuda\" if opt.gpu_ranks else \"cpu\"\n\n return DatasetLazyIter(\n dataset_paths,\n fields,\n batch_size,\n batch_fn,\n batch_size_multiple,\n device,\n is_train,\n opt.pool_factor,\n repeat=not opt.single_pass,\n num_batches_multiple=max(opt.accum_count) * opt.world_size,\n yield_raw_example=multi)\n\n\ndef build_dataset_iter_multiple(train_shards, fields, opt):\n return MultipleDatasetIterator(\n train_shards, fields, \"cuda\" if opt.gpu_ranks else \"cpu\", opt)\n"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lbouma/Cyclopath | [
"d09d927a1e6f9e07924007fd39e8e807cd9c0f8c"
] | [
"pyserver/bin/rpy2/robjects/tests/testNumpyConversions.py"
] | [
"import unittest\nimport rpy2.robjects as robjects\nr = robjects.r\n\ntry:\n import numpy\n has_numpy = True\n import rpy2.robjects.numpy2ri as rpyn\nexcept:\n has_numpy = False\n\n\nclass MissingNumpyDummyTestCase(unittest.TestCase):\n def testMissingNumpy(self):\n self.assertTrue(False) # numpy is missing. No tests.\n\nclass NumpyConversionsTestCase(unittest.TestCase):\n\n def setUp(self):\n robjects.conversion.py2ri = rpyn.numpy2ri\n\n def tearDown(self):\n robjects.conversion.py2ri = robjects.default_py2ri\n\n def checkHomogeneous(self, obj, mode, storage_mode):\n converted = robjects.conversion.py2ri(obj)\n self.assertEquals(r[\"mode\"](converted)[0], mode)\n self.assertEquals(r[\"storage.mode\"](converted)[0], storage_mode)\n self.assertEquals(list(obj), list(converted))\n self.assertTrue(r[\"is.array\"](converted)[0])\n\n def testVectorBoolean(self):\n b = numpy.array([True, False, True], dtype=numpy.bool_)\n self.checkHomogeneous(b, \"logical\", \"logical\")\n\n def testVectorInteger(self):\n i = numpy.array([1, 2, 3], dtype=\"i\")\n self.checkHomogeneous(i, \"numeric\", \"integer\")\n\n def testVectorFloat(self):\n f = numpy.array([1, 2, 3], dtype=\"f\")\n self.checkHomogeneous(f, \"numeric\", \"double\")\n\n def testVectorComplex(self):\n c = numpy.array([1j, 2j, 3j], dtype=numpy.complex_)\n self.checkHomogeneous(c, \"complex\", \"complex\")\n\n def testVectorCharacter(self):\n s = numpy.array([\"a\", \"b\", \"c\"], dtype=\"S\")\n self.checkHomogeneous(s, \"character\", \"character\")\n\n def testVectorUnicodeCharacter(self):\n u = numpy.array([u\"a\", u\"b\", u\"c\"], dtype=\"U\")\n self.checkHomogeneous(u, \"character\", \"character\")\n\n def testArray(self):\n\n i2d = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=\"i\")\n i2d_r = robjects.conversion.py2ri(i2d)\n\n self.assertEquals(r[\"storage.mode\"](i2d_r)[0], \"integer\")\n self.assertEquals(tuple(r[\"dim\"](i2d_r)), (2, 3))\n\n # Make sure we got the row/column swap right:\n self.assertEquals(i2d_r.rx(1, 2)[0], i2d[0, 1])\n\n f3d = numpy.arange(24, dtype=\"f\").reshape((2, 3, 4))\n f3d_r = robjects.conversion.py2ri(f3d)\n\n self.assertEquals(r[\"storage.mode\"](f3d_r)[0], \"double\")\n self.assertEquals(tuple(r[\"dim\"](f3d_r)), (2, 3, 4))\n\n # Make sure we got the row/column swap right:\n self.assertEquals(f3d_r.rx(1, 2, 3)[0], f3d[0, 1, 2])\n\n def testObjectArray(self):\n o = numpy.array([1, \"a\", 3.2], dtype=numpy.object_)\n o_r = robjects.conversion.py2ri(o)\n self.assertEquals(r[\"mode\"](o_r)[0], \"list\")\n self.assertEquals(r[\"[[\"](o_r, 1)[0], 1)\n self.assertEquals(r[\"[[\"](o_r, 2)[0], \"a\")\n self.assertEquals(r[\"[[\"](o_r, 3)[0], 3.2)\n\n def testRecordArray(self):\n rec = numpy.array([(1, 2.3), (2, -0.7), (3, 12.1)],\n dtype=[(\"count\", \"i\"), (\"value\", numpy.double)])\n rec_r = robjects.conversion.py2ri(rec)\n self.assertTrue(r[\"is.data.frame\"](rec_r)[0])\n self.assertEquals(tuple(r[\"names\"](rec_r)), (\"count\", \"value\"))\n count_r = r[\"$\"](rec_r, \"count\")\n value_r = r[\"$\"](rec_r, \"value\")\n self.assertEquals(r[\"storage.mode\"](count_r)[0], \"integer\")\n self.assertEquals(r[\"storage.mode\"](value_r)[0], \"double\")\n self.assertEquals(count_r[1], 2)\n self.assertEquals(value_r[2], 12.1)\n\n def testBadArray(self):\n u = numpy.array([1, 2, 3], dtype=numpy.uint32)\n self.assertRaises(ValueError, robjects.conversion.py2ri, u)\n\n def testAssignNumpyObject(self):\n x = numpy.arange(-10., 10., 1)\n env = robjects.Environment()\n env[\"x\"] = x\n self.assertEquals(1, len(env))\n self.assertTrue(isinstance(env[\"x\"], robjects.Array))\n\ndef suite():\n if has_numpy:\n return unittest.TestLoader().loadTestsFromTestCase(NumpyConversionsTestCase)\n else:\n return unittest.TestLoader().loadTestsFromTestCase(MissingNumpyDummyTestCase)\n\nif __name__ == '__main__':\n unittest.main()\n\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sivaprakasaman/Python_Coding_Toolbox | [
"8bbcfb43eed49f49de7321e330f4b3943586038a"
] | [
"signal_processing/timbral_inspection/resynthesize.py"
] | [
"#Andrew Sivaprakasam\n#Purdue University\n#Email: [email protected]\n\n#DESCRIPTION: Code written to isolate the magnitudes of harmonics of a\n#given f_0 for a given audiofile/stimulus.\n\n#Additional Dependencies: scipy, numpy, matplotlib\n# pip3 install scipy\n# pip3 install numpy\n# pip3 install matplotlib\n\n#May require ffmpeg on Ubuntu/Linux as well\n# sudo apt-get install ffmpeg\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\n\ndef extract_harmonics(fname, fs = 44100, f_0 = 440, n_harms = 3):\n fs, x = wavfile.read(fname)\n #x = np.array(aud[0])\n t_vect = np.arange(0,len(x))/fs\n f_vect = np.arange(1,n_harms+1)*f_0;\n #plt.plot(t_vect,x)\n #output = get_spect(x, fs, DR = 120, BW = 100, xlim = [0,0.5], ylim = [0,5000], colormap = 'magma')\n\n ## TODO: Try applying dpss to this. Might result in more accurate\n ## magnitudes?\n\n freq_time = np.multiply(np.asmatrix(f_vect).T,np.asmatrix(t_vect))\n x_sin = np.multiply(np.asmatrix(x),np.sin(2*np.pi*freq_time))\n x_cos = np.multiply(np.asmatrix(x),np.cos(2*np.pi*freq_time))\n sin_sum = np.sum(x_sin,1);\n cos_sum = np.sum(x_cos,1);\n\n mags = np.sqrt(np.multiply(sin_sum,sin_sum) + np.multiply(cos_sum,cos_sum))\n mags = np.squeeze(np.asarray(mags))/np.max(mags)\n\n phase = np.arctan(np.divide(sin_sum,cos_sum));\n phase = np.squeeze(np.asarray(phase));\n #phase = [0];\n #plt.stem(f_vect,mags)\n\n return [f_vect, mags, phase, x, fs]\n\nfrom signal_processing import pure_tone_complex, sound, magphase\nimport matplotlib.pyplot as plt\n#from playsound import playsound\n\ndef resynthesize(mags, fname = 'resynth.wav', fs_Hz = 44100, freq_Hz = [0], dur_sec = 1, phi = [0], scale = .75, tone_shift = 1, env_fxn = 1, fs = 44100, type = 'sin', play_write = True, plot = True):\n harmonics = len(mags)\n\n #This handling should be added to pure_tone_complex at some point\n if len(phi)<harmonics:\n phi = np.ones(harmonics)*phi;\n\n if len(freq_Hz) <harmonics:\n freq_Hz = np.arange(1,n_harms+1)*440;\n\n tone = pure_tone_complex(freq_Hz*tone_shift, fs, dur_sec, mags, phi, type)\n tone = tone[1]*env_fxn;\n tone = scale*tone/np.max(tone);\n\n t_vect = np.arange(0,len(tone))/fs_Hz;\n\n if plot:\n plt.figure()\n plt.plot(tone);\n plt.xlim([0,len(tone)])\n\n if play_write:\n sound(tone,fs_Hz,fname,1)\n\n return tone\n################################################################################\n\nimport numpy as np\n\ndef play_alma_mater(extract, freq_Hz, fname = 'alma_mater.wav', n_harms = 6, key = 1, tempo = 0.3, fxn = 'string', type = 'sin', short = True):\n shift_mat = [1.26/1.66, .85, .95, 1.00, 1.13, 1.26, 1.26, 1.32, 1.32, 1.32, 1, 1.13, 1.13, 1.26, 1.26/1.66, 1.26, 1.20, 1.26, 1.26, 1.13, 1.00, 1.13, 1.26, 1.26, 1.13, .85, .95, 1, .95, .85, 1.13, 1.26/1.66, 1.26/1.66, .85, .95, 1, 1.13, 1.26, 1.26, 1.26, 1.32, 1.32, 1, 1.13, 1.26, .85, .95, 1, .85, 1.26/1.66, 1, 1.26, 1.26/1.66, .85, 1.26, 1.13, 1, 1]\n dur_mat = [2, 1, 1, 1.5, .5, 1, 1, 1, .5, .5, 1, .5, .5, 1, 1, 1, 1, 2, 1, 1, 1.5, .5, 1, 1, 1, .5, .5, 1, .5, .5, 3, 1.5, .5, 1, 1, 1.5, .5, 1, .5, .5, 1, 1, 1, 1, 4, 1.5, .5, 1, 1, 1, 1, 1, 1, 1.5, .5, 1.5, .5, 3]\n scale_mat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ,1 , 1, 1, 1, 1]\n\n #Truncate by default, otherwise listen to music for a few extra seconds...\n if short:\n shift_mat = shift_mat[:6];\n dur_mat = dur_mat[:6];\n scale_mat = scale_mat[:6];\n\n fs = 44100;\n #Change tempo\n dur_mat = np.asarray(dur_mat)*tempo\n tone = [];\n\n for i in range(0,len(shift_mat)):\n\n t_vect = np.arange(0,dur_mat[i]*fs)/fs;\n\n if fxn == 'banjo':\n env_fxn = np.exp(-7*t_vect);\n elif fxn == 'string':\n env_fxn = (1+.25*np.sin(5*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);\n else:\n env_fxn = 1;\n\n tone_temp = resynthesize(extract[1], freq_Hz = key*freq_Hz, dur_sec = dur_mat[i], phi = extract[2], scale = scale_mat[i], tone_shift = shift_mat[i], env_fxn = env_fxn, type = type, play_write = False, plot = False)\n print(tone_temp)\n tone = np.concatenate((tone,tone_temp), axis = 0)\n\n sound(tone, fs, fname, 1)\n\n return [tone,fs];\n\n########################## IMPLEMENTATION #####################################\n\n# from signal_processing import pure_tone_complex, sound, magphase, get_spect\n# import matplotlib.pyplot as plt\n# from scipy.signal import spectrogram as sp\n# import numpy as np\n# ## TODO: Quantify Envelope, apply slepian sequences, verify magnitudes against DFT/PSD\n\n# #Can use the below line in Atom when running Hydrogen\n# #%matplotlib inline\n\n# harmonics = 7;\n# first = 0;\n# dur_sec = 1;\n# toPlay = np.array([0,1,2,3,4,5,6])\n# extract = extract_harmonics('instruments/violin_A4_normal.wav', fs = 44100, f_0 = 440, n_harms = harmonics);\n\n# fs_Hz = extract[4];\n# amp = extract[1][toPlay];\n# phase = extract[2][toPlay];\n# freq_Hz = extract[0][toPlay];\n\n# t_vect = np.arange(0,dur_sec*fs_Hz)/fs_Hz;\n# env_banj = np.exp(-9*t_vect);\n# env_string = (1+0.15*np.sin(6*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);\n\n# tone = resynthesize(amp, 'violin_all.wav', freq_Hz = freq_Hz, dur_sec = 1, phi = phase, scale = 1, tone_shift = 1, env_fxn = env_string, type = 'sin', play_write = True, plot = False)\n\n# sound(tone, fs_Hz)\n# get_spect(tone, fs_Hz, DR = 200, BW = 75, xlim = [0,1], ylim = [0,4000], colormap = 'cividis',title = 'Simulated Violin | All Harmonics');\n\n# #Play Alma Mater\n# alma_mater = play_alma_mater(extract, freq_Hz, key = 1, fxn = 'strings', type = 'sin')\n#\n# plt.figure()\n# plt.plot(np.arange(0,len(alma_mater[0]))/alma_mater[1],alma_mater[0]);\n# output = get_spect(alma_mater[0],alma_mater[1], DR = 300, BW = 200, xlim = [0.01,2], ylim = [0,5000])\n"
] | [
[
"scipy.io.wavfile.read",
"numpy.multiply",
"numpy.asarray",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.asmatrix",
"numpy.max",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.ones",
"numpy.exp",
"numpy.sum",
"numpy.divide",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
britt0508/ExplainedKinshipCorrect | [
"e0e255ff9531af1436bb9a9fe07256e72a0061f7"
] | [
"stylegan/pretrained_example.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\n\"\"\"Minimal script for generating an image using pre-trained StyleGAN generator.\"\"\"\n\nimport os\nimport pickle\nimport numpy as np\nimport PIL.Image\nimport dnnlib\nimport dnnlib.tflib as tflib\nimport config\n\n\ndef main():\n # Initialize TensorFlow.\n tflib.init_tf()\n\n # Load pre-trained network.\n url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl\n with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:\n _G, _D, Gs = pickle.load(f)\n # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.\n # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.\n # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.\n\n # Print network details.\n Gs.print_layers()\n\n # Pick latent vector.\n rnd = np.random.RandomState(5)\n latents = rnd.randn(1, Gs.input_shape[1])\n\n # Generate image.\n fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)\n\n # Save image.\n os.makedirs(config.result_dir, exist_ok=True)\n png_filename = os.path.join(config.result_dir, 'example.png')\n PIL.Image.fromarray(images[0], 'RGB').save(png_filename)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zommiommy/cache_decorator | [
"e7d71dd48890247838612533481d0b5a808c03ec"
] | [
"tests/test_npz.py"
] | [
"import numpy as np\nfrom time import sleep\nfrom shutil import rmtree\nfrom cache_decorator import Cache\nfrom .utils import standard_test_arrays\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_single(a):\n sleep(2)\n return np.array([1, 2, 3])\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_tuple(a):\n sleep(2)\n return np.array([1, 2, 3]), np.array([1, 2, 4])\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_list(a):\n sleep(2)\n return [np.array([1, 2, 3]), np.array([1, 2, 4])]\n\n@Cache(\n cache_path=\"{cache_dir}/{_hash}.npz\",\n cache_dir=\"./test_cache\",\n backup=False,\n)\ndef cached_function_dict(a):\n sleep(2)\n return {\"a\":np.array([1, 2, 3]), \"b\":np.array([1, 2, 4])}\n\ndef test_npz_single():\n standard_test_arrays(cached_function_single)\n rmtree(\"./test_cache\")\n\ndef test_npz_tuple():\n standard_test_arrays(cached_function_tuple)\n rmtree(\"./test_cache\")\n\ndef test_npz_list():\n standard_test_arrays(cached_function_list)\n rmtree(\"./test_cache\")\n\ndef test_npz_dict():\n standard_test_arrays(cached_function_dict)\n rmtree(\"./test_cache\")\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vios-s/RA_FA_Cardiac | [
"8af4b82b62b53e29e96084113a5d379774c11b12"
] | [
"dice_loss.py"
] | [
"import torch\r\nfrom torch.autograd import Function\r\n\r\n\r\nclass DiceCoeff(Function):\r\n \"\"\"Dice coeff for individual examples\"\"\"\r\n\r\n def forward(self, input, target):\r\n self.save_for_backward(input, target)\r\n eps = 0.0001\r\n self.inter = torch.dot(input.view(-1), target.view(-1))\r\n self.union = torch.sum(input) + torch.sum(target) + eps\r\n\r\n t = (2 * self.inter.float() + eps) / self.union.float()\r\n return t\r\n\r\n # This function has only a single output, so it gets only one gradient\r\n def backward(self, grad_output):\r\n\r\n input, target = self.saved_variables\r\n grad_input = grad_target = None\r\n\r\n if self.needs_input_grad[0]:\r\n grad_input = grad_output * 2 * (target * self.union - self.inter) \\\r\n / (self.union * self.union)\r\n if self.needs_input_grad[1]:\r\n grad_target = None\r\n\r\n return grad_input, grad_target\r\n\r\n\r\ndef dice_coeff(input, target, device):\r\n \"\"\"Dice coeff for batches\"\"\"\r\n if input.is_cuda:\r\n s = torch.FloatTensor(1).zero_()\r\n s = s.to(device)\r\n else:\r\n s = torch.FloatTensor(1).zero_()\r\n\r\n for i, c in enumerate(zip(input, target)):\r\n s = s + DiceCoeff().forward(c[0], c[1])\r\n\r\n return s / (i + 1)"
] | [
[
"torch.FloatTensor",
"torch.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
openforcefield/openff-recharge | [
"0ea3ef986e33c3ecf05924e64fb2e1872913b093"
] | [
"openff/recharge/esp/qcarchive.py"
] | [
"import json\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple\n\nimport numpy\nfrom openff.utilities import requires_package\nfrom pydantic import ValidationError\n\nfrom openff.recharge.esp import ESPSettings, PCMSettings\nfrom openff.recharge.esp.storage import MoleculeESPRecord\nfrom openff.recharge.grids import GridGenerator, GridSettings\nfrom openff.recharge.utilities.exceptions import RechargeException\nfrom openff.recharge.utilities.openeye import molecule_to_conformers\n\nif TYPE_CHECKING:\n import qcelemental.models\n import qcelemental.models.results\n import qcportal.models\n\nQCFractalResults = List[\n Tuple[\"qcelemental.models.Molecule\", \"qcportal.models.ResultRecord\"]\n]\nQCFractalKeywords = Dict[str, \"qcportal.models.KeywordSet\"]\n\nlogger = logging.getLogger(__name__)\n\n\nclass MissingQCMoleculesError(RechargeException):\n \"\"\"An exception raised when an expected set of molecules are not present\n in a QC data set.\"\"\"\n\n def __init__(self, data_set_name: str, missing_smiles: Iterable[str]):\n\n smiles_string = \"\\n\".join(missing_smiles)\n\n super(MissingQCMoleculesError, self).__init__(\n f\"The {smiles_string} SMILES patterns were not found in the \"\n f\"{data_set_name} data set.\"\n )\n\n self.data_set_name = data_set_name\n self.missing_smiles = missing_smiles\n\n\nclass MissingQCResultsError(RechargeException):\n \"\"\"An exception raised when an expected set of results are not present\n in a QC data set.\"\"\"\n\n def __init__(self, data_set_name: str, missing_ids: Iterable[str]):\n\n id_string = \"\\n\".join(missing_ids)\n\n super(MissingQCResultsError, self).__init__(\n f\"The result records associated with the following molecule ids from the \"\n f\"{data_set_name} data set could not be retrieved from QCA: {id_string}\"\n )\n\n self.data_set_name = data_set_name\n self.missing_ids = missing_ids\n\n\nclass MissingQCWaveFunctionError(RechargeException):\n \"\"\"An exception raised when a result does not store the required information about\n a computed QM wavefunction.\"\"\"\n\n def __init__(self, result_id: str):\n\n super(MissingQCWaveFunctionError, self).__init__(\n f\"The result with id={result_id} does not store the required wavefunction.\"\n f\"Make sure to use at minimum the 'orbitals_and_eigenvalues' wavefunction \"\n f\"protocol when computing the data set.\"\n )\n self.result_id = result_id\n\n\nclass InvalidPCMKeywordError(RechargeException):\n \"\"\"An exception raised when the PCM settings found in the 'pcm__input' entry of\n an entries keywords cannot be safely parsed.\"\"\"\n\n def __init__(self, input_string: str):\n\n super(InvalidPCMKeywordError, self).__init__(\n f\"The PCM settings could not be safely parsed: {input_string}\"\n )\n\n\ndef _parse_pcm_input(input_string: str) -> PCMSettings:\n \"\"\"Attempts to parse a set of PCM settings from a PSI4 keyword string.\"\"\"\n\n # Convert the string to a JSON like string.\n value = input_string.replace(\" \", \"\").replace(\"=\", \":\").replace(\"{\", \":{\")\n value = re.sub(r\"(\\d*[a-z][a-z\\d]*)\", r'\"\\1\"', value)\n value = re.sub(r'([\"\\d}])\"', r'\\1,\"', value.replace(\"\\n\", \"\"))\n value = value.replace('\"true\"', \"true\")\n value = value.replace('\"false\"', \"false\")\n\n solvent_map = {\"H2O\": \"Water\"}\n radii_map = {\"BONDI\": \"Bondi\", \"UFF\": \"UFF\", \"ALLINGER\": \"Allinger\"}\n\n try:\n # Load the string into a dictionary.\n pcm_dict = json.loads(f\"{{{value}}}\")\n\n # Validate some of the settings which we do not store in the settings\n # object yet.\n assert pcm_dict[\"cavity\"][\"type\"].upper() == \"GEPOL\"\n assert pcm_dict[\"cavity\"][\"mode\"].upper() == \"IMPLICIT\"\n assert numpy.isclose(pcm_dict[\"cavity\"][\"minradius\"], 52.917721067)\n assert pcm_dict[\"units\"].upper() == \"ANGSTROM\"\n assert pcm_dict[\"codata\"] == 2010\n assert pcm_dict[\"medium\"][\"nonequilibrium\"] is False\n assert pcm_dict[\"medium\"][\"matrixsymm\"] is True\n assert numpy.isclose(pcm_dict[\"medium\"][\"diagonalscaling\"], 1.07)\n assert numpy.isclose(pcm_dict[\"medium\"][\"proberadius\"], 0.52917721067)\n assert numpy.isclose(pcm_dict[\"medium\"][\"correction\"], 0.0)\n\n # noinspection PyTypeChecker\n pcm_settings = PCMSettings(\n solver=pcm_dict[\"medium\"][\"solvertype\"].upper(),\n solvent=solvent_map[pcm_dict[\"medium\"][\"solvent\"].upper()],\n radii_model=radii_map[pcm_dict[\"cavity\"][\"radiiset\"].upper()],\n radii_scaling=pcm_dict[\"cavity\"][\"scaling\"],\n cavity_area=pcm_dict[\"cavity\"][\"area\"],\n )\n\n except (AssertionError, ValidationError):\n raise InvalidPCMKeywordError(input_string)\n except Exception as e:\n raise e\n\n return pcm_settings\n\n\ndef _compare_pcm_settings(settings_a: PCMSettings, settings_b: PCMSettings) -> bool:\n \"\"\"Compares if two PCM settings are identical.\"\"\"\n\n for field in PCMSettings.__fields__:\n\n value_a = getattr(settings_a, field)\n value_b = getattr(settings_b, field)\n\n if isinstance(value_a, float) and not numpy.isclose(value_a, value_b):\n return False\n elif not isinstance(value_a, float) and value_a != value_b:\n return False\n\n return True\n\n\n@requires_package(\"cmiles\")\n@requires_package(\"qcportal\")\ndef retrieve_qcfractal_results(\n data_set_name: str,\n subset: Optional[Iterable[str]],\n method: str,\n basis: str,\n pcm_settings: Optional[PCMSettings],\n qcfractal_address: Optional[str] = None,\n error_on_missing: bool = True,\n) -> Tuple[QCFractalResults, QCFractalKeywords]:\n \"\"\"Attempt to retrieve the results for the requested data set from a QCFractal\n server.\n\n Parameters\n ----------\n data_set_name\n The name of the data set to retrieve the results from.\n subset\n The SMILES representations of the subset of molecules to retrieve from the data\n set.\n method\n The method which the results should have been computed using.\n basis\n The basis which the results should have been computed using.\n pcm_settings\n The PCM settings which the results should have been computed using.\n Use ``None`` to specify that PCM should not have been enabled.\n qcfractal_address\n An optional address to the QCFractal server instance which stores the data set.\n error_on_missing\n Whether to raise an exception when either a molecule listed in the subset\n cannot be found in the data set, or when a result record could not be found\n for one of the requested molecule in the data set.\n\n Returns\n -------\n A list of the retrieved results (alongside their corresponding molecule records)\n and a dictionary of the keywords referenced by the results entries.\n \"\"\"\n\n import cmiles\n import qcportal\n from qcelemental.models import Molecule as QCMolecule\n\n # Map the input smiles to uniform isomeric and explicit hydrogen smiles.\n subset = (\n None\n if subset is None\n else [\n cmiles.get_molecule_ids(smiles, \"openeye\", strict=False)[\n \"canonical_isomeric_explicit_hydrogen_smiles\"\n ]\n for smiles in subset\n ]\n )\n\n # Connect to the default QCA server and retrieve the data set of interest.\n if qcfractal_address is None:\n client = qcportal.FractalClient()\n else:\n client = qcportal.FractalClient(address=qcfractal_address)\n\n # noinspection PyTypeChecker\n collection: qcportal.collections.Dataset = client.get_collection(\n \"Dataset\", data_set_name\n )\n\n # Retrieve the ids of the molecules of interest.\n molecules = {}\n found_smiles = set()\n\n for _, molecule_row in collection.get_molecules().iterrows():\n\n qc_molecule: QCMolecule = molecule_row[\"molecule\"]\n\n # Manually map the molecule to a dictionary as CMILES expects a flat geometry\n # array.\n qc_molecule_dict = {\n \"symbols\": qc_molecule.symbols,\n \"connectivity\": qc_molecule.connectivity,\n \"geometry\": qc_molecule.geometry.flatten(),\n \"molecular_charge\": qc_molecule.molecular_charge,\n \"molecular_multiplicity\": qc_molecule.molecular_multiplicity,\n }\n\n cmiles_ids = cmiles.get_molecule_ids(qc_molecule_dict, toolkit=\"openeye\")\n molecule_smiles = cmiles_ids[\"canonical_isomeric_explicit_hydrogen_smiles\"]\n\n if subset is not None and molecule_smiles not in subset:\n continue\n\n molecules[qc_molecule.id] = qc_molecule\n found_smiles.add(molecule_smiles)\n\n molecule_ids = sorted(molecules)\n\n # Make sure the data set contains the requested subset.\n missing_smiles = (set() if subset is None else {*subset}) - found_smiles\n\n if len(missing_smiles) > 0:\n\n if error_on_missing:\n raise MissingQCMoleculesError(data_set_name, missing_smiles)\n else:\n logger.warning(\n f\"The following smiles count not be found in the {data_set_name} \"\n f\"data set: {missing_smiles}\"\n )\n\n # Retrieve the data sets results records\n results = []\n\n paginating = True\n page_index = 0\n\n while paginating:\n\n page_results = client.query_results(\n molecule=molecule_ids,\n method=method,\n basis=basis,\n limit=client.server_info[\"query_limit\"],\n skip=page_index,\n )\n\n results.extend(page_results)\n\n paginating = len(page_results) > 0\n page_index += client.server_info[\"query_limit\"]\n\n # Filter based on the PCM settings.\n keyword_ids = list({result.keywords for result in results})\n keywords: Dict[\n str,\n ] = {keyword_id: client.query_keywords(keyword_id)[0] for keyword_id in keyword_ids}\n\n if pcm_settings is None:\n matching_keywords = [\n keyword_id\n for keyword_id, keyword in keywords.items()\n if \"pcm\" not in keyword.values or keyword.values[\"pcm\"] is False\n ]\n else:\n matching_keywords = [\n keyword_id\n for keyword_id, keyword in keywords.items()\n if \"pcm\" in keyword.values\n and keyword.values[\"pcm\"] is True\n and \"pcm__input\" in keyword.values\n and _compare_pcm_settings(\n pcm_settings, _parse_pcm_input(keyword.values[\"pcm__input\"])\n )\n ]\n\n results = list(\n filter(lambda x: x.keywords is None or x.keywords in matching_keywords, results)\n )\n\n # Make sure none of the records are missing.\n result_ids = {result.molecule for result in results}\n\n missing_result_ids = {*molecule_ids} - {*result_ids}\n\n if len(missing_result_ids) > 0:\n\n if error_on_missing:\n raise MissingQCResultsError(data_set_name, missing_result_ids)\n else:\n logger.warning(\n f\"Result records could not be found for the following molecules in the \"\n f\"{data_set_name}: {missing_result_ids}\"\n )\n\n return (\n [(molecules[result.molecule], result) for result in results],\n {keyword_id: keywords[keyword_id] for keyword_id in matching_keywords},\n )\n\n\ndef reconstruct_density(\n wavefunction: \"qcelemental.models.results.WavefunctionProperties\", n_alpha: int\n) -> numpy.ndarray:\n \"\"\"Reconstructs a density matrix from a QCFractal wavefunction, making sure to\n order the entries in the ordering that psi4 expects (e.g. spherical, cartesian).\n\n Parameters\n ----------\n wavefunction\n The wavefunction return by QCFractal.\n n_alpha\n The number of alpha electrons in the computation.\n\n Returns\n -------\n The reconstructed density.\n \"\"\"\n\n # Reconstruct the density in CCA order\n orbitals = getattr(wavefunction, wavefunction.orbitals_a)\n density = numpy.dot(orbitals[:, :n_alpha], orbitals[:, :n_alpha].T)\n\n # Re-order the density matrix to match the ordering expected by psi4.\n angular_momenta = {\n angular_momentum\n for atom in wavefunction.basis.atom_map\n for shell in wavefunction.basis.center_data[atom].electron_shells\n for angular_momentum in shell.angular_momentum\n }\n\n spherical_maps = {\n L: numpy.array(\n list(range(L * 2 - 1, 0, -2)) + [0] + list(range(2, L * 2 + 1, 2))\n )\n for L in angular_momenta\n }\n\n # Build a flat index that we can transform the AO quantities\n ao_map = []\n counter = 0\n\n for atom in wavefunction.basis.atom_map:\n\n center = wavefunction.basis.center_data[atom]\n for shell in center.electron_shells:\n\n if shell.harmonic_type == \"cartesian\":\n ao_map.append(numpy.arange(counter, counter + shell.nfunctions()))\n\n else:\n smap = spherical_maps[shell.angular_momentum[0]]\n ao_map.append(smap + counter)\n\n counter += shell.nfunctions()\n\n ao_map = numpy.hstack(ao_map)\n\n reverse_ao_map = {map_index: i for i, map_index in enumerate(ao_map)}\n reverse_ao_map = numpy.array([reverse_ao_map[i] for i in range(len(ao_map))])\n\n reordered_density = density[reverse_ao_map[:, None], reverse_ao_map]\n return reordered_density\n\n\n@requires_package(\"psi4\")\ndef compute_esp(\n qc_molecule, density, esp_settings, grid\n) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"Computes the ESP and electric field for a particular molecule on\n a specified grid and using the specified settings.\n\n Parameters\n ----------\n qc_molecule\n The molecule to compute the ESP / electric field of.\n density\n The electron density of the molecule.\n esp_settings\n The settings to use when computing the ESP / electric field.\n grid\n The grid to evaluate the ESP and electric field on.\n\n Returns\n -------\n A tuple of the evaluated ESP with shape=(n_grid_points, 1) and the electric\n field with shape=(n_grid_points, 3)\n \"\"\"\n import psi4\n\n psi4.core.be_quiet()\n\n psi4_molecule = psi4.geometry(qc_molecule.to_string(\"psi4\", \"angstrom\"))\n psi4_molecule.reset_point_group(\"c1\")\n\n psi4_wavefunction = psi4.core.RHF(\n psi4.core.Wavefunction.build(psi4_molecule, esp_settings.basis),\n psi4.core.SuperFunctional(),\n )\n psi4_wavefunction.Da().copy(psi4.core.Matrix.from_array(density))\n\n psi4_calculator = psi4.core.ESPPropCalc(psi4_wavefunction)\n psi4_grid = psi4.core.Matrix.from_array(grid)\n\n esp = numpy.array(\n psi4_calculator.compute_esp_over_grid_in_memory(psi4_grid)\n ).reshape(-1, 1)\n\n field = numpy.array(psi4_calculator.compute_field_over_grid_in_memory(psi4_grid))\n\n return esp, field\n\n\n@requires_package(\"cmiles\")\n@requires_package(\"qcportal\")\ndef from_qcfractal_result(\n qc_result: \"qcportal.models.ResultRecord\",\n qc_molecule: \"qcelemental.models.Molecule\",\n qc_keyword_set: \"qcportal.models.KeywordSet\",\n grid_settings: GridSettings,\n) -> MoleculeESPRecord:\n \"\"\"A function which will evaluate the the ESP and electric field from a set of\n wavefunctions which have been computed by a QCFractal instance using the Psi4\n package.\n\n Parameters\n ----------\n qc_result\n The QCFractal result record which encodes the wavefunction\n qc_molecule\n The QC molecule corresponding to the result record.\n qc_keyword_set\n The keyword set used when computing the result record.\n grid_settings\n The settings which define the grid to evaluate the electronic properties on.\n\n Returns\n -------\n The values of the ESP and electric field stored in storable records.\n \"\"\"\n\n import cmiles.utils\n from qcelemental.models.results import WavefunctionProperties\n\n # Compute and store the ESP and electric field for each result.\n if qc_result.wavefunction is None:\n raise MissingQCWaveFunctionError(qc_result.id)\n\n # Retrieve the wavefunction and use it to reconstruct the electron density.\n wavefunction = WavefunctionProperties(\n **qc_result.get_wavefunction(\n [\"scf_eigenvalues_a\", \"scf_orbitals_a\", \"basis\", \"restricted\"]\n ),\n **qc_result.wavefunction[\"return_map\"],\n )\n\n density = reconstruct_density(wavefunction, qc_result.properties.calcinfo_nalpha)\n\n # Convert the OE molecule to a QC molecule and extract the conformer of\n # interest.\n oe_molecule = cmiles.utils.load_molecule(\n {\n \"symbols\": qc_molecule.symbols,\n \"connectivity\": qc_molecule.connectivity,\n \"geometry\": qc_molecule.geometry.flatten(),\n \"molecular_charge\": qc_molecule.molecular_charge,\n \"molecular_multiplicity\": qc_molecule.molecular_multiplicity,\n },\n toolkit=\"openeye\",\n )\n\n conformers = molecule_to_conformers(oe_molecule)\n assert len(conformers) == 1\n\n conformer = conformers[0]\n\n # Construct the grid to evaluate the ESP / electric field on.\n grid = GridGenerator.generate(oe_molecule, conformer, grid_settings)\n\n # Retrieve the ESP settings from the record.\n enable_pcm = \"pcm\" in qc_keyword_set.values\n\n esp_settings = ESPSettings(\n basis=qc_result.basis,\n method=qc_result.method,\n grid_settings=grid_settings,\n pcm_settings=(\n None\n if not enable_pcm\n else _parse_pcm_input(qc_keyword_set.values[\"pcm__input\"])\n ),\n )\n\n # Reconstruct the ESP and field from the density.\n esp, electric_field = compute_esp(qc_molecule, density, esp_settings, grid)\n\n return MoleculeESPRecord.from_oe_molecule(\n oe_molecule,\n conformer=conformer,\n grid_coordinates=grid,\n esp=esp,\n electric_field=electric_field,\n esp_settings=esp_settings,\n )\n"
] | [
[
"numpy.dot",
"numpy.isclose",
"numpy.hstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xuyuanjia2014/tvm | [
"892f8305e77ad506660b851f9ce4c81be0f95d9d",
"892f8305e77ad506660b851f9ce4c81be0f95d9d"
] | [
"tests/python/frontend/caffe/test_forward.py",
"python/tvm/relay/op/contrib/tensorrt.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"\nCaffe testcases\n====================\nThis article is a test script to test Caffe operator with Relay.\n\"\"\"\nimport os\n\nos.environ[\"GLOG_minloglevel\"] = \"2\"\nimport sys\nimport logging\n\nlogging.basicConfig(level=logging.ERROR)\n\nimport numpy as np\nfrom google.protobuf import text_format\nimport caffe\nfrom caffe import layers as L, params as P\nfrom caffe.proto import caffe_pb2 as pb\n\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib import utils, graph_executor\nfrom tvm.contrib.download import download_testdata\n\nCURRENT_DIR = os.path.join(os.path.expanduser(\"~\"), \".tvm_test_data\", \"caffe_test\")\n\n#######################################################################\n# Generic functions for TVM & Caffe\n# ------------------------------------------\n\n\ndef _create_dir(d_path):\n \"\"\"If the directory is not existed, create it\"\"\"\n if not (os.path.exists(d_path) and os.path.isdir(d_path)):\n os.makedirs(d_path)\n\n\ndef _list_to_str(ll):\n \"\"\"Convert list or tuple to str, separated by underline.\"\"\"\n if isinstance(ll, (tuple, list)):\n tmp = [str(i) for i in ll]\n return \"_\".join(tmp)\n\n\ndef _gen_filename_str(op_name, data_shape, *args, **kwargs):\n \"\"\"Combining the filename according to the op_name, shape and other args.\"\"\"\n file_dir = os.path.join(CURRENT_DIR, op_name)\n _create_dir(file_dir)\n res = op_name + \"_\"\n shape_str = _list_to_str(list(data_shape))\n res += shape_str\n for arg in args:\n if isinstance(arg, (tuple, list)):\n res += \"_\" + _list_to_str(arg)\n elif isinstance(arg, (int, float, str)):\n res += \"_\" + str(arg)\n for _, v in kwargs.items():\n if isinstance(v, (tuple, list)):\n res += \"_\" + _list_to_str(v)\n elif isinstance(v, (int, float, str)):\n res += \"_\" + str(v)\n res = res.replace(\".\", \"_\")\n res = res.replace(\"-\", \"_\")\n proto_file = os.path.join(file_dir, res + \".prototxt\")\n blob_file = os.path.join(file_dir, res + \".caffemodel\")\n solver_file = os.path.join(file_dir, res + \"_solver.prototxt\")\n\n return (proto_file, blob_file, solver_file)\n\n\ndef _save_prototxt(n_netspec, f_path):\n \"\"\"Generate .prototxt file according to caffe.NetSpec\"\"\"\n s = n_netspec.to_proto()\n with open(f_path, \"w\") as f:\n f.write(str(s))\n\n\ndef _save_solver(solver_file, proto_file, blob_file):\n \"\"\"Define a solver proto, you can change the configs.\"\"\"\n blob_file_prefix = blob_file.split(\".caffemodel\")[0]\n s = pb.SolverParameter()\n s.train_net = proto_file\n s.base_lr = 0.01\n s.momentum = 0.9\n s.weight_decay = 0.0005\n s.lr_policy = \"inv\"\n s.gamma = 0.0001\n s.power = 0.75\n s.display = 1\n s.max_iter = 100000\n s.snapshot = 100000\n s.snapshot_prefix = blob_file_prefix\n\n with open(solver_file, \"w\") as f:\n f.write(str(s))\n\n\ndef _save_caffemodel(solver_file, blob_file):\n \"\"\"Generate .caffemodel file.\"\"\"\n solver = caffe.SGDSolver(solver_file)\n solver.net.save(blob_file)\n\n\ndef _gen_model_files(n_netspec, proto_file, blob_file, solver_file):\n _save_prototxt(n_netspec, proto_file)\n _save_solver(solver_file, proto_file, blob_file)\n _save_caffemodel(solver_file, blob_file)\n\n\ndef _siso_op(data, func, *args, **kwargs):\n \"\"\"Create single input and single output Caffe op\"\"\"\n n = caffe.NetSpec()\n n.data = L.Input(input_param={\"shape\": {\"dim\": list(data.shape)}})\n n.output = func(n.data, *args, **kwargs)\n return n\n\n\ndef _miso_op(data_list, func, *args, **kwargs):\n \"\"\"Create multi input and single output Caffe op\"\"\"\n n = caffe.NetSpec()\n if not isinstance(data_list, (tuple, list)):\n raise TypeError(\"Need tuple or list but get {}\".format(type(data_list)))\n input_list = list()\n for idx, data in enumerate(data_list):\n n[\"data\" + str(idx)] = L.Input(input_param={\"shape\": {\"dim\": list(data.shape)}})\n input_list.append(n[\"data\" + str(idx)])\n n.output = func(*input_list, *args, **kwargs)\n return n\n\n\ndef _simo_op(data, func, *args, **kwargs):\n \"\"\"Create single input and multi output Caffe op\"\"\"\n n = caffe.NetSpec()\n n.data = L.Input(input_param={\"shape\": {\"dim\": list(data.shape)}})\n output_list = func(n.data, *args, **kwargs)\n for idx, out in enumerate(output_list):\n n[\"output\" + str(idx)] = out\n return n\n\n\ndef _run_caffe(data, proto_file, blob_file):\n \"\"\"Run caffe model by Caffe according to .caffemodel and .prototxt\"\"\"\n net = caffe.Net(proto_file, blob_file, caffe.TEST)\n if isinstance(data, (list, tuple)):\n for idx, d in enumerate(data):\n net.blobs[\"data\" + str(idx)].data[...] = d\n else:\n net.blobs[\"data\"].data[...] = data\n out = net.forward()\n\n caffe_output = list()\n for i in range(len(out.keys())):\n if \"output\" + str(i) not in out.keys():\n caffe_output.clear()\n return list(out.values())\n caffe_output.append(out[\"output\" + str(i)])\n return caffe_output\n\n\ndef _run_tvm(data, proto_file, blob_file):\n \"\"\"Run caffe model by TVM according to .caffemodel and .prototxt\"\"\"\n init_net = pb.NetParameter()\n predict_net = pb.NetParameter()\n\n # load model\n with open(proto_file, \"r\") as f:\n text_format.Merge(f.read(), predict_net)\n # load blob\n with open(blob_file, \"rb\") as f:\n init_net.ParseFromString(f.read())\n\n shape_dict = dict()\n dtype_dict = dict()\n if isinstance(data, (tuple, list)):\n for idx, d in enumerate(data):\n shape_dict[\"data\" + str(idx)] = d.shape\n dtype_dict[\"data\" + str(idx)] = \"float32\"\n else:\n shape_dict = {\"data\": data.shape}\n dtype_dict = {\"data\": \"float32\"}\n\n mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)\n\n target = \"llvm\"\n\n dev = tvm.cpu(0)\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build(mod, target=target, params=params)\n dtype = \"float32\"\n m = graph_executor.GraphModule(lib[\"default\"](dev))\n if isinstance(data, (tuple, list)):\n for idx, d in enumerate(data):\n m.set_input(\"data\" + str(idx), tvm.nd.array(d.astype(dtype)))\n else:\n m.set_input(\"data\", tvm.nd.array(data.astype(dtype)))\n # execute\n m.run()\n tvm_output = list()\n # get outputs\n for i in range(m.get_num_outputs()):\n tvm_output.append(m.get_output(i).numpy())\n return tvm_output\n\n\ndef _compare_caffe_tvm(caffe_out, tvm_out, is_network=False):\n for i in range(len(caffe_out)):\n if is_network:\n caffe_out[i] = caffe_out[i][:1]\n tvm.testing.assert_allclose(caffe_out[i], tvm_out[i], rtol=1e-5, atol=1e-5)\n\n\ndef _test_op(data, func_op, op_name, **kwargs):\n \"\"\"Single op testing pipline.\"\"\"\n shape_list = list()\n if isinstance(data, (list, tuple)):\n n = _miso_op(data, func_op, **kwargs)\n for d in data:\n shape_list.extend(list(d.shape))\n else:\n output_num = 1\n if \"ntop\" in kwargs.keys():\n output_num = kwargs[\"ntop\"]\n if output_num == 1:\n n = _siso_op(data, func_op, **kwargs)\n else:\n n = _simo_op(data, func_op, **kwargs)\n shape_list = list(data.shape)\n\n # obtain the .caffemodel file and .prototxt file\n (proto_file, blob_file, solver_file) = _gen_filename_str(op_name, shape_list, **kwargs)\n _gen_model_files(n, proto_file, blob_file, solver_file)\n # run model in Caffe\n caffe_out = _run_caffe(data, proto_file, blob_file)\n # run model in TVM\n tvm_out = _run_tvm(data, proto_file, blob_file)\n _compare_caffe_tvm(caffe_out, tvm_out)\n\n\ndef _test_network(data, proto_file, blob_file):\n # run model in Caffe\n caffe_out = _run_caffe(data, proto_file, blob_file)\n # run model in TVM\n tvm_out = _run_tvm(data, proto_file, blob_file)\n _compare_caffe_tvm(caffe_out, tvm_out, is_network=True)\n\n\n#######################################################################\n# BatchNorm\n# -----------\n\n\ndef _test_batchnorm(data, moving_average_fraction=0.999, eps=1e-5):\n \"\"\"One iteration of BatchNorm\"\"\"\n _test_op(\n data, L.BatchNorm, \"BatchNorm\", moving_average_fraction=moving_average_fraction, eps=eps\n )\n\n\ndef test_forward_BatchNorm():\n \"\"\"BatchNorm\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_batchnorm(data)\n _test_batchnorm(data, moving_average_fraction=0.88, eps=1e-4)\n\n\n#######################################################################\n# Concat\n# -----------\n\n\ndef _test_concat(data_list, axis=1):\n \"\"\"One iteration of Concat\"\"\"\n _test_op(data_list, L.Concat, \"Concat\", axis=axis)\n\n\ndef test_forward_Concat():\n \"\"\"Concat\"\"\"\n _test_concat([np.random.rand(1, 3, 10, 10), np.random.rand(1, 2, 10, 10)], axis=1)\n _test_concat([np.random.rand(3, 10, 10), np.random.rand(2, 10, 10)], axis=0)\n _test_concat([np.random.rand(3, 10), np.random.rand(2, 10)], axis=0)\n\n\n#######################################################################\n# Convolution\n# -----------\n\n\ndef _test_convolution(data, **kwargs):\n \"\"\"One iteration of Convolution\"\"\"\n _test_op(data, L.Convolution, \"Convolution\", **kwargs)\n\n\ndef test_forward_Convolution():\n \"\"\"Convolution\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_convolution(\n data,\n num_output=20,\n bias_term=True,\n pad=0,\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_convolution(\n data,\n num_output=20,\n bias_term=False,\n pad=[1, 2],\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_convolution(\n data,\n num_output=20,\n bias_term=True,\n pad=[1, 2],\n kernel_size=[3, 5],\n stride=[2, 1],\n dilation=[1, 2],\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_convolution(\n np.random.rand(1, 2, 10, 10).astype(np.float32),\n num_output=20,\n bias_term=True,\n pad=[1, 2],\n kernel_size=[3, 5],\n stride=[2, 1],\n dilation=[1, 2],\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n group=2,\n )\n _test_convolution(\n data,\n num_output=20,\n bias_term=True,\n pad_h=1,\n pad_w=2,\n kernel_h=3,\n kernel_w=5,\n stride_h=2,\n stride_w=1,\n dilation=[1, 2],\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n\n\n#######################################################################\n# Crop\n# -----------\n\n\ndef _test_crop(data, **kwargs):\n \"\"\"One iteration of Crop\"\"\"\n _test_op(data, L.Crop, \"Crop\", **kwargs)\n\n\ndef test_forward_Crop():\n \"\"\"Crop\"\"\"\n _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)])\n _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1)\n _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2)\n _test_crop(\n [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=[1, 2, 4]\n )\n _test_crop(\n [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=2, offset=[2, 4]\n )\n _test_crop([np.random.rand(10, 120, 120), np.random.rand(5, 50, 60)], axis=1, offset=[2, 4])\n _test_crop([np.random.rand(120, 120), np.random.rand(50, 60)], axis=0, offset=[2, 4])\n\n\n#######################################################################\n# Deconvolution\n# -----------\n\n\ndef _test_deconvolution(data, **kwargs):\n \"\"\"One iteration of Deconvolution\"\"\"\n _test_op(data, L.Deconvolution, \"Deconvolution\", **kwargs)\n\n\ndef test_forward_Deconvolution():\n \"\"\"Deconvolution\"\"\"\n data = np.random.rand(1, 16, 32, 32).astype(np.float32)\n _test_deconvolution(\n data,\n convolution_param=dict(\n num_output=20,\n bias_term=True,\n pad=0,\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n ),\n )\n _test_deconvolution(\n data,\n convolution_param=dict(\n num_output=20,\n bias_term=False,\n pad=[1, 2],\n kernel_size=3,\n stride=2,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n ),\n )\n _test_deconvolution(\n data,\n convolution_param=dict(\n num_output=20,\n bias_term=True,\n pad_h=1,\n pad_w=2,\n kernel_h=3,\n kernel_w=5,\n stride_h=2,\n stride_w=1,\n dilation=1,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n ),\n )\n\n\n#######################################################################\n# Dropout\n# -----------\n\n\ndef _test_dropout(data, **kwargs):\n \"\"\"One iteration of Dropout\"\"\"\n _test_op(data, L.Dropout, \"Dropout\", **kwargs)\n\n\ndef test_forward_Dropout():\n \"\"\"Dropout\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_dropout(data)\n _test_dropout(data, dropout_ratio=0.7)\n\n\n#######################################################################\n# Eltwise\n# -----------\n\n\ndef _test_eltwise(data_list, **kwargs):\n \"\"\"One iteration of Eltwise\"\"\"\n _test_op(data_list, L.Eltwise, \"Eltwise\", **kwargs)\n\n\ndef test_forward_Eltwise():\n \"\"\"Eltwise\"\"\"\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=0,\n )\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=1,\n )\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=2,\n )\n _test_eltwise(\n [\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n np.random.rand(1, 3, 10, 11).astype(np.float32),\n ],\n operation=1,\n coeff=[0.5, 1],\n )\n\n\n#######################################################################\n# Flatten\n# -----------\n\n\ndef _test_flatten(data, axis=1):\n \"\"\"One iteration of Flatten\"\"\"\n _test_op(data, L.Flatten, \"Flatten\", axis=axis)\n\n\ndef test_forward_Flatten():\n \"\"\"Flatten\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_flatten(data)\n _test_flatten(data, axis=1)\n\n\n#######################################################################\n# Flatten\n# -----------\n\n\ndef _test_inner_product(data, **kwargs):\n \"\"\"One iteration of InnerProduct\"\"\"\n _test_op(data, L.InnerProduct, \"InnerProduct\", **kwargs)\n\n\ndef test_forward_InnerProduct():\n \"\"\"InnerProduct\"\"\"\n data = np.random.rand(1, 3, 10, 10)\n _test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type=\"xavier\"))\n _test_inner_product(\n data,\n num_output=20,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_inner_product(\n np.random.rand(20, 10).astype(np.float32),\n num_output=30,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n\n\n#######################################################################\n# LRN\n# -----------\n\n\ndef _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0):\n \"\"\"One iteration of LRN\"\"\"\n _test_op(data, L.LRN, \"LRN\", local_size=local_size, alpha=alpha, beta=beta, k=k)\n\n\ndef test_forward_LRN():\n \"\"\"LRN\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_lrn(data)\n _test_lrn(data, local_size=3)\n _test_lrn(data, local_size=3, alpha=2.0)\n _test_lrn(\n data,\n local_size=3,\n alpha=2.0,\n beta=0.5,\n )\n _test_lrn(data, local_size=3, alpha=2.0, beta=0.5, k=2.0)\n\n\n#######################################################################\n# Pooling\n# -----------\n\n\ndef _test_pooling(data, **kwargs):\n \"\"\"One iteration of Pooling.\"\"\"\n _test_op(data, L.Pooling, \"Pooling\", **kwargs)\n\n\ndef test_forward_Pooling():\n \"\"\"Pooing\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n # MAX Pooling\n _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX)\n _test_pooling(\n data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.MAX\n )\n _test_pooling(data, pool=P.Pooling.MAX, global_pooling=True)\n\n # AVE Pooing\n _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.AVE)\n _test_pooling(\n data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.AVE\n )\n _test_pooling(data, pool=P.Pooling.AVE, global_pooling=True)\n\n\n#######################################################################\n# PReLU\n# -----------\n\n\ndef _test_prelu(data, **kwargs):\n \"\"\"One iteration of PReLU.\"\"\"\n _test_op(data, L.PReLU, \"PReLU\", **kwargs)\n\n\ndef test_forward_PReLU():\n \"\"\"PReLU\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_prelu(data, filler=dict(type=\"constant\", value=0.5))\n _test_prelu(data)\n _test_prelu(np.random.rand(10, 20).astype(np.float32))\n\n\n#######################################################################\n# ReLU\n# -----------\n\n\ndef _test_relu(data, **kwargs):\n \"\"\"One iteration of ReLU.\"\"\"\n _test_op(data, L.ReLU, \"ReLU\", **kwargs)\n\n\ndef test_forward_ReLU():\n \"\"\"ReLU\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_relu(data)\n _test_relu(np.random.rand(10, 20).astype(np.float32))\n\n\n#######################################################################\n# Reshape\n# -----------\n\n\ndef _test_reshape(data, **kwargs):\n \"\"\"One iteration of Reshape.\"\"\"\n _test_op(data, L.Reshape, \"Reshape\", **kwargs)\n\n\ndef test_forward_Reshape():\n \"\"\"Reshape\"\"\"\n data = np.random.rand(1, 8, 6).astype(np.float32)\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 0, 3]}})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 0, -1]}})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [0, -1]}})\n\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 3]}, \"axis\": 2})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}, \"axis\": 1})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [4, 3, 4]}, \"axis\": -3})\n\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [2, 4]}, \"axis\": 1, \"num_axes\": 1})\n _test_reshape(data, reshape_param={\"shape\": {\"dim\": [3, 16]}, \"axis\": 1, \"num_axes\": 2})\n\n\n#######################################################################\n# Scale\n# -----------\n\n\ndef _test_scale(data, **kwargs):\n \"\"\"One iteration of Scale.\"\"\"\n _test_op(data, L.Scale, \"Scale\", **kwargs)\n\n\ndef test_forward_Scale():\n \"\"\"Scale\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_scale(data, filler=dict(type=\"xavier\"))\n _test_scale(data, filler=dict(type=\"xavier\"), bias_term=True, bias_filler=dict(type=\"xavier\"))\n\n\n#######################################################################\n# Sigmoid\n# -----------\n\n\ndef _test_sigmoid(data, **kwargs):\n \"\"\"One iteration of Sigmoid.\"\"\"\n _test_op(data, L.Sigmoid, \"Sigmoid\", **kwargs)\n\n\ndef test_forward_Sigmoid():\n \"\"\"Sigmoid\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_sigmoid(data)\n\n\n#######################################################################\n# Slice\n# -----------\n\n\ndef _test_slice(data, **kwargs):\n \"\"\"One iteration of Slice\"\"\"\n _test_op(data, L.Slice, \"Slice\", **kwargs)\n\n\ndef test_forward_Slice():\n \"\"\"Slice\"\"\"\n data = np.random.rand(1, 3, 10, 10).astype(np.float32)\n _test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1]))\n _test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1]))\n _test_slice(data, ntop=3, slice_param=dict(axis=2, slice_point=[1, 6]))\n _test_slice(data, ntop=3)\n\n\n#######################################################################\n# Softmax\n# -----------\n\n\ndef _test_softmax(data, **kwargs):\n \"\"\"One iteration of Softmax\"\"\"\n _test_op(data, L.Softmax, \"Softmax\", **kwargs)\n\n\ndef test_forward_Softmax():\n \"\"\"Softmax\"\"\"\n _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32))\n _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2)\n _test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0)\n _test_softmax(np.random.rand(2, 10, 10).astype(np.float32), axis=1)\n\n\n#######################################################################\n# TanH\n# -----------\n\n\ndef _test_tanh(data, **kwargs):\n \"\"\"One iteration of TanH\"\"\"\n _test_op(data, L.TanH, \"TanH\", **kwargs)\n\n\ndef test_forward_TanH():\n \"\"\"TanH\"\"\"\n _test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32))\n _test_tanh(np.random.rand(3, 10, 10).astype(np.float32))\n _test_tanh(np.random.rand(10, 10).astype(np.float32))\n _test_tanh(np.random.rand(10).astype(np.float32))\n\n\n#######################################################################\n# Embed\n# -----------\n\n\ndef _test_embed(data, **kwargs):\n \"\"\"One iteration of Embed\"\"\"\n _test_op(data, L.Embed, \"Embed\", **kwargs)\n\n\ndef test_forward_Embed():\n k = 20\n data = [i for i in range(k)]\n np.random.shuffle(data)\n # dimension is 1\n data = np.asarray(data)\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n # dimension is 2\n data = np.reshape(data, [4, 5])\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n # dimension is 3\n data = np.reshape(data, [2, 2, 5])\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n # dimension is 4\n data = np.reshape(data, [2, 2, 5, 1])\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=True,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n _test_embed(\n data,\n num_output=30,\n input_dim=k,\n bias_term=False,\n weight_filler=dict(type=\"xavier\"),\n bias_filler=dict(type=\"xavier\"),\n )\n\n\n#######################################################################\n# Mobilenetv2\n# -----------\n\n\ndef _test_mobilenetv2(data):\n \"\"\"One iteration of Mobilenetv2\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 224, 224))\n data_process = data - mean_val\n data_process = data_process / 58.8\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/shicai/MobileNet-Caffe/raw/\" \"master/mobilenet_v2_deploy.prototxt\"\n )\n blob_file_url = (\n \"https://github.com/shicai/MobileNet-Caffe/blob/\" \"master/mobilenet_v2.caffemodel?raw=true\"\n )\n proto_file = download_testdata(proto_file_url, \"mobilenetv2.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"mobilenetv2.caffemodel\", module=\"model\")\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Mobilenetv2():\n \"\"\"Mobilenetv2\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)\n _test_mobilenetv2(data)\n\n\n#######################################################################\n# Alexnet\n# -----------\n\n\ndef _test_alexnet(data):\n \"\"\"One iteration of Alexnet\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 227, 227))\n data_process = data - mean_val\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/BVLC/caffe/raw/master/models/\" \"bvlc_alexnet/deploy.prototxt\"\n )\n blob_file_url = \"http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel\"\n proto_file = download_testdata(proto_file_url, \"alexnet.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"alexnet.caffemodel\", module=\"model\")\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Alexnet():\n \"\"\"Alexnet\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 227, 227)).astype(np.float32)\n _test_alexnet(data)\n\n\n#######################################################################\n# Resnet50\n# -----------\n\n\ndef _test_resnet50(data):\n \"\"\"One iteration of Resnet50\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 224, 224))\n data_process = data - mean_val\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/fernchen/CaffeModels/raw/\" \"master/resnet/ResNet-50-deploy.prototxt\"\n )\n blob_file_url = (\n \"https://github.com/fernchen/CaffeModels/raw/\" \"master/resnet/ResNet-50-model.caffemodel\"\n )\n\n proto_file = download_testdata(proto_file_url, \"resnet50.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"resnet50.caffemodel\", module=\"model\")\n\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Resnet50():\n \"\"\"Resnet50\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)\n _test_resnet50(data)\n\n\n#######################################################################\n# Inceptionv4\n# -----------\n\n\ndef _test_inceptionv1(data):\n \"\"\"One iteration of Inceptionv4\"\"\"\n mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)\n mean_val = np.reshape(mean_val, (1, 3, 1, 1))\n mean_val = np.tile(mean_val, (1, 1, 224, 224))\n data_process = data - mean_val\n data_process = data_process / 58.8\n data_process = data_process.astype(np.float32)\n\n proto_file_url = (\n \"https://github.com/BVLC/caffe/raw/master/models\" \"/bvlc_googlenet/deploy.prototxt\"\n )\n blob_file_url = \"http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel\"\n proto_file = download_testdata(proto_file_url, \"inceptionv1.prototxt\", module=\"model\")\n blob_file = download_testdata(blob_file_url, \"inceptionv1.caffemodel\", module=\"model\")\n _test_network(data_process, proto_file, blob_file)\n\n\ndef test_forward_Inceptionv1():\n \"\"\"Inceptionv4\"\"\"\n data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)\n _test_inceptionv1(data)\n\n\nif __name__ == \"__main__\":\n # NN\n test_forward_Convolution()\n test_forward_Deconvolution()\n test_forward_Dropout()\n test_forward_LRN()\n test_forward_Pooling()\n test_forward_Scale()\n test_forward_InnerProduct()\n test_forward_BatchNorm()\n\n # Elemwise\n test_forward_Eltwise()\n\n # Activation\n test_forward_PReLU()\n test_forward_ReLU()\n test_forward_Sigmoid()\n test_forward_Softmax()\n test_forward_TanH()\n\n # Reshape\n test_forward_Reshape()\n test_forward_Flatten()\n\n # Math\n test_forward_Concat()\n test_forward_Crop()\n test_forward_Slice()\n\n # End to End\n test_forward_Mobilenetv2()\n test_forward_Alexnet()\n test_forward_Resnet50()\n test_forward_Inceptionv1()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n\"\"\"TensorRT supported operators.\"\"\"\nimport logging\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.build_module import bind_params_by_name\nfrom tvm.relay.expr import Call, Constant, Tuple, GlobalVar, Var, TupleGetItem\nfrom tvm.ir import Op\nfrom tvm.relay.expr_functor import ExprMutator, ExprVisitor\n\nlogger = logging.getLogger(\"TensorRT\")\n\n\ndef is_tensorrt_runtime_enabled():\n \"\"\"Check if the TensorRT graph executor is present.\n Returns\n -------\n ret: bool\n True if present, False if not.\n \"\"\"\n check_enabled = tvm.get_global_func(\"relay.op.is_tensorrt_runtime_enabled\", True)\n if check_enabled:\n return check_enabled()\n return False\n\n\ndef get_tensorrt_version():\n \"\"\"Gets the version of TensorRT that TVM is built against or is targeting.\n\n Returns\n -------\n ret: Tuple[int, int, int]\n TensorRT version as a tuple of major, minor, and patch number. If TVM\n is not built with TensorRT, the value set by set_tensorrt_version() is returned instead.\n \"\"\"\n pass_ctx = tvm.transform.PassContext.current()\n if \"relay.ext.tensorrt.options\" in pass_ctx.config:\n return tuple(pass_ctx.config[\"relay.ext.tensorrt.options\"].tensorrt_version)\n return tuple(tvm.get_global_func(\"relay.op.get_tensorrt_version\")())\n\n\ndef get_tensorrt_use_implicit_batch_mode():\n pass_ctx = tvm.transform.PassContext.current()\n if \"relay.ext.tensorrt.options\" in pass_ctx.config:\n return pass_ctx.config[\"relay.ext.tensorrt.options\"].use_implicit_batch\n logger.warning(\n \"PassContext has no relay.ext.tensorrt.options config, using default value \"\n \"use_implicit_batch=True.\"\n )\n return True\n\n\ndef get_tensorrt_remove_no_mac_subgraphs():\n pass_ctx = tvm.transform.PassContext.current()\n if \"relay.ext.tensorrt.options\" in pass_ctx.config:\n return pass_ctx.config[\"relay.ext.tensorrt.options\"].remove_no_mac_subgraphs\n logger.warning(\n \"PassContext has no relay.ext.tensorrt.options config, using default value \"\n \"remove_no_mac_subgraphs=False.\"\n )\n return False\n\n\ndef partition_for_tensorrt(\n mod,\n params=None,\n version=None,\n use_implicit_batch=True,\n remove_no_mac_subgraphs=False,\n max_workspace_size=1 << 30,\n):\n \"\"\"Partition the graph greedily offloading supported operators to TensorRT.\n\n Parameters\n ----------\n mod : Module\n The module to run passes on.\n params : Optional[Dict[str, NDArray]]\n Constant input parameters.\n version : Optional[Tuple[int, int, int]]\n TensorRT version to target as tuple of (major, minor, patch). If TVM is compiled with\n USE_TENSORRT_RUNTIME=ON, the linked TensorRT version will be used instead.\n use_implicit_batch : Optional[bool]\n Use TensorRT implicit batch mode (default true). Setting to false will enable explicit batch\n mode which will widen supported operators to include those which modify the batch dimension,\n but may reduce performance for some models.\n remove_no_mac_subgraphs : Optional[bool]\n Removes subgraphs which have been partitioned for TensorRT if they do not have any\n multiply-accumulate operations. The removed subgraphs will go through TVM's standard\n compilation instead. Can improve performance.\n max_workspace_size : Optional[int]\n How many bytes of workspace size to allow each subgraph to use for TensorRT engine creation.\n See TensorRT documentation for more info.\n Returns\n -------\n mod_and_config : Tuple[Module, Dict[str, Any]]\n A tuple of 1) annotated and partitioned module and 2) \"relay.ext.tensorrt.options\"\n configuration which should be given to PassContext when building.\n \"\"\"\n config = {\n \"use_implicit_batch\": use_implicit_batch,\n \"max_workspace_size\": max_workspace_size,\n \"remove_no_mac_subgraphs\": remove_no_mac_subgraphs,\n }\n if version:\n assert isinstance(version, tuple) and len(version) == 3\n config[\"tensorrt_version\"] = version\n else:\n linked_version = tuple(tvm.get_global_func(\"relay.op.get_tensorrt_version\")())\n if not linked_version:\n logger.warning(\n \"TVM was not built against TensorRT and no version was provided to \"\n \"partition_for_tensorrt. Defaulting to 6.0.1\"\n )\n linked_version = (6, 0, 1)\n config[\"tensorrt_version\"] = linked_version\n\n if params:\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n seq = tvm.transform.Sequential(\n [\n transform.InferType(),\n RemoveDropoutPass(),\n transform.RemoveUnusedFunctions(),\n transform.ConvertLayout(\n {\n \"nn.conv1d\": [\"NCW\", \"default\"],\n \"nn.conv2d\": [\"NCHW\", \"default\"],\n \"nn.conv3d\": [\"NCDHW\", \"default\"],\n \"nn.conv2d_transpose\": [\"NCHW\", \"default\"],\n }\n ),\n transform.FoldConstant(),\n transform.AnnotateTarget(\"tensorrt\"),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n transform.InferType(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3, config={\"relay.ext.tensorrt.options\": config}):\n mod = seq(mod)\n mod = prune_tensorrt_subgraphs(mod)\n return mod, config\n\n\ndef check_dynamism(args, op_name):\n \"\"\"\n Check for dynamism inside any of the args in the op.\n\n Parameters\n ----------\n args : tvm.ir.container.Array\n Arguments of the op. Each of the argument shape is checked for presence of dynamic\n components.\n op_name: str\n Name of the op for debugging purposes only.\n Returns\n ----------\n ret : bool\n True if dynamism is present, False otherwise\n \"\"\"\n for arg in args:\n if isinstance(arg, (Call, Var, Constant, TupleGetItem)):\n for dim_shape in arg.checked_type.shape[1:]:\n if isinstance(dim_shape, tvm.tir.expr.Any):\n return True\n elif isinstance(arg, Tuple):\n return check_dynamism(arg.fields, op_name)\n else:\n logger.info(\n \"Arg not supported in TensorRT for %s with type %s\",\n op_name,\n type(arg),\n )\n return True\n return False\n\n\ndef _register_external_op_helper_with_checker(op_name, checker):\n @tvm.ir.register_op_attr(op_name, \"target.tensorrt\")\n def _func_wrapper(expr):\n attrs, args = expr.attrs, expr.args\n # ops with dynamic shapes are offloaded to VM\n if check_dynamism(args, op_name):\n return False\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if op_name == \"multiply\":\n shapes = [\n [\n int(x) if not isinstance(x, tvm.tir.expr.Any) else -1\n for x in arg.checked_type.shape\n ]\n for arg in args\n ]\n # Batched multiply operations don't work in implicit batch mode. The following shapes\n # have been excluded because they occur in PT MaskRCNN model. The long term solution is\n # to switch to explicit batch mode after performance regressions are solved.\n if all(\n [list(map(int, shape)) in [[300, 64, 7, 7], [300, 1, 1, 1]] for shape in shapes]\n ):\n return False\n return checker(attrs, args, op_name)\n\n return _func_wrapper\n\n\ndef _register_external_op_helper(op_name, supported=True):\n return _register_external_op_helper_with_checker(\n op_name, lambda attrs, args, op_name: supported\n )\n\n\ndef _register_external_dynamic_check_func(op_name):\n \"\"\"Wrapper to check dynamic shapes inside any of the args in the op.\"\"\"\n\n def _decorator_helper(checker):\n @tvm.ir.register_op_attr(op_name, \"target.tensorrt\")\n def _func_wrapper(expr):\n args = expr.args\n # ops with dynamic shapes are offloaded to VM\n if check_dynamism(args, op_name):\n return False\n return checker(expr)\n\n return _func_wrapper\n\n return _decorator_helper\n\n\n# Ops which are always supported\n_register_external_op_helper(\"nn.relu\")\n_register_external_op_helper(\"sigmoid\")\n_register_external_op_helper(\"tanh\")\n_register_external_op_helper(\"subtract\")\n_register_external_op_helper(\"multiply\")\n_register_external_op_helper(\"divide\")\n_register_external_op_helper(\"power\")\n_register_external_op_helper(\"maximum\")\n_register_external_op_helper(\"minimum\")\n_register_external_op_helper(\"exp\")\n_register_external_op_helper(\"log\")\n_register_external_op_helper(\"sqrt\")\n_register_external_op_helper(\"abs\")\n_register_external_op_helper(\"negative\")\n_register_external_op_helper(\"nn.batch_flatten\")\n_register_external_op_helper(\"clip\")\n\n\ndef reduce_annotate_fn(attrs, args, op_name):\n \"\"\"Helper for reduce operations.\"\"\"\n if get_tensorrt_use_implicit_batch_mode() and (not attrs.axis or len(attrs.axis) == 0):\n logger.info(\"%s: cannot reduce to scalar.\", op_name)\n return False\n if attrs.exclude:\n logger.info(\"%s: exclude not supported.\", op_name)\n return False\n if get_tensorrt_use_implicit_batch_mode() and any([x == 0 for x in map(int, attrs.axis)]):\n logger.info(\"%s: can't modify batch dimension.\", op_name)\n return False\n return True\n\n\n_register_external_op_helper_with_checker(\"sum\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"prod\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"max\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"min\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"mean\", reduce_annotate_fn)\n\n\ndef trt_version_annotate_fn(version):\n \"\"\"Helper for ops which require a minimum TRT version\"\"\"\n\n def _func_wrapper(attrs, args, op_name):\n if get_tensorrt_version() < version:\n logger.info(\n \"%s: requires TensorRT version %s or higher.\", op_name, \".\".join(map(str, version))\n )\n return False\n return True\n\n return _func_wrapper\n\n\n_register_external_op_helper_with_checker(\"nn.leaky_relu\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"sin\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"cos\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"atan\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"ceil\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"erf\", trt_version_annotate_fn((7, 0, 0)))\n\n\n@_register_external_dynamic_check_func(\"add\")\ndef add_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if add is supported by TensorRT.\"\"\"\n\n args = expr.args\n\n shapes = [\n [int(x) if not isinstance(x, tvm.tir.expr.Any) else -1 for x in arg.checked_type.shape]\n for arg in args\n ]\n\n # Scalars require explicit batch mode.\n if get_tensorrt_use_implicit_batch_mode() and any([len(shape) < 1 for shape in shapes]):\n return False\n\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if (\n not get_tensorrt_use_implicit_batch_mode()\n and (isinstance(args[0], Constant) or isinstance(args[1], Constant))\n and len(shapes[0]) > 0\n and len(shapes[1]) > 0\n and shapes[0][0] == shapes[1][0]\n and shapes[0][0] != 1\n and (len(shapes[0]) > 3 or len(shapes[1]) > 3)\n ):\n logger.info(\"add: bug in TRT with adding batched constants.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.batch_norm\")\ndef batch_norm_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.batch_norm is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if len(args[0].checked_type.shape) == 5 and get_tensorrt_version() < (6, 0, 1):\n logger.info(\"nn.batch_norm: TensorRT 6.0.1 or higher is required for rank 5 inputs.\")\n return False\n if len(args[0].checked_type.shape) > 5:\n logger.info(\"nn.batch_norm: Input rank must be 5 or less.\")\n return False\n if int(attrs.axis) not in (1, 3):\n logger.info(\"nn.batch_norm: axis is %d but must be 1 or 3.\", int(attrs.axis))\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.softmax\")\ndef softmax_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.softmax is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:\n logger.info(\"nn.softmax: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv1d\")\ndef conv1d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv1d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.data_layout != \"NCW\":\n logger.info(\"nn.conv1d: data_layout is %s but must be NCW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIW\":\n logger.info(\"nn.conv1d: kernel_layout is %s but must be OIW.\", attrs.kernel_layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv2d\")\ndef conv2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.data_layout != \"NCHW\":\n logger.info(\"nn.conv2d: data_layout is %s but must be NCHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIHW\":\n logger.info(\"nn.conv2d: kernel_layout is %s but must be OIHW.\", attrs.kernel_layout)\n return False\n if attrs.out_layout and attrs.out_layout != \"NCHW\":\n logger.info(\"nn.conv2d: out_layout is %s but must be NCHW.\", attrs.out_layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.dense\")\ndef dense_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if dense is supported by TensorRT.\"\"\"\n\n args = expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n input_rank = len(args[0].checked_type.shape)\n weight_rank = len(args[1].checked_type.shape)\n if input_rank not in (2, 3, 4):\n logger.info(\"nn.dense: input has rank %d but must be 2, 3 or 4.\", input_rank)\n return False\n if weight_rank != 2:\n logger.info(\"nn.dense: weight has rank %d but must be 2.\", weight_rank)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.batch_matmul\")\ndef batch_matmul_annotate_fn(expr):\n \"\"\"Check if dense is supported by TensorRT.\"\"\"\n\n if any([x.checked_type.dtype != \"float32\" for x in expr.args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and len(expr.args[0].checked_type.shape) != len(\n expr.args[1].checked_type.shape\n ):\n logger.info(\"nn.batch_matmul: requires use_implict_batch=False.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.layer_norm\")\ndef layer_norm_annotate_fn(expr):\n \"\"\"Check if dense is supported by TensorRT.\"\"\"\n\n if any([x.checked_type.dtype != \"float32\" for x in expr.args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(expr.attrs.axis) == 0:\n logger.info(\"nn.layer_norm: requires use_implict_batch=False.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.bias_add\")\ndef bias_add_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.bias_add is supported by TensorRT.\"\"\"\n\n args = expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n input_rank = len(args[0].checked_type.shape)\n if input_rank not in (2, 3, 4):\n logger.info(\"nn.bias_add: input rank is %d but must be 2, 3 or 4.\", input_rank)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.max_pool2d\")\ndef max_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.max_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.max_pool2d: layout is %s but must be NCHW.\", attrs.layout)\n return False\n if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):\n logger.info(\"nn.avg_pool2d: ceil_mode=True requires TensorRT 5.1.5 or greater.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.avg_pool2d\")\ndef avg_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.avg_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.avg_pool2d: layout is %d but must be NCHW.\", attrs.layout)\n return False\n if (\n attrs.count_include_pad\n and len(attrs.padding) == 4\n and (\n int(attrs.padding[0]) != int(attrs.padding[2])\n or int(attrs.padding[1]) != int(attrs.padding[3])\n )\n ):\n logger.info(\n \"nn.avg_pool2d: inclusive-counted blended or average \"\n \"pooling is not supported in combination with asymmetric padding\"\n )\n return False\n if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):\n logger.info(\"nn.avg_pool2d: ceil_mode=True requires TensorRT 5.1.5 or greater.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.global_max_pool2d\")\ndef global_max_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.global_max_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.global_max_pool2d: layout is %s but must be NCHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.global_avg_pool2d\")\ndef global_avg_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.global_avg_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.global_avg_pool2d: layout is %s but must be NCHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"expand_dims\")\ndef expand_dims_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if expand_dims is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:\n logger.info(\"expand_dims: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"squeeze\")\ndef squeeze_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if squeeze is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not attrs.axis:\n logger.info(\"squeeze: must explicitly set axis.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and any([axis == 0 for axis in map(int, attrs.axis)]):\n logger.info(\"squeeze: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"concatenate\")\ndef concatenate_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if concatenate is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.dtype != \"float32\" for x in args[0].checked_type.fields]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not get_tensorrt_use_implicit_batch_mode():\n return True\n if int(attrs.axis) == 0:\n logger.info(\"concatenate: can't modify batch dimension.\")\n return False\n if isinstance(args[0], Tuple):\n for tuple_input in args[0].fields:\n if isinstance(tuple_input, Constant):\n logger.info(\"concatenate: can't concatenate tensors with constants.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"split\")\ndef split_annotate_fn(expr):\n \"\"\"Check if split is supported by TensorRT.\"\"\"\n\n if any([x.checked_type.dtype != \"float32\" for x in expr.args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(expr.attrs.axis) == 0:\n logger.info(\"split: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv2d_transpose\")\ndef conv2d_transpose_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv2d_transpose is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.data_layout != \"NCHW\":\n logger.info(\"nn.conv2d_transpose: data_layout is %s but must be NCHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIHW\":\n logger.info(\n \"nn.conv2d_transpose: kernel_layout is %s but must be OIHW.\", attrs.kernel_layout\n )\n return False\n if attrs.out_layout and attrs.out_layout != \"NCHW\":\n logger.info(\"nn.conv2d_transpose: out_layout is %s but must be NCHW.\", attrs.out_layout)\n return False\n if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):\n logger.info(\"nn.conv2d_transpose: dilation rate must be 1.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"transpose\")\ndef transpose_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if transpose is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(attrs.axes[0]) != 0:\n logger.info(\"transpose: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"layout_transform\")\ndef layout_transform_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if layout_transform is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if (attrs.src_layout, attrs.dst_layout) not in [\n (\"NCHW\", \"NHWC\"),\n (\"NHWC\", \"NCHW\"),\n (\"NDHWC\", \"NCDHW\"),\n (\"NCDHW\", \"NDHWC\"),\n ]:\n logger.info(\n \"layout_transform: %s to %s is not supported.\", attrs.src_layout, attrs.dst_layout\n )\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"reshape\")\ndef reshape_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if reshape is supported by TensorRT.\"\"\"\n attrs, args = expr.attrs, expr.args\n if args[0].checked_type.dtype != \"float32\":\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if any([x < -1 for x in map(int, attrs.newshape)]):\n logger.info(\"reshape: new shape dims must be explicit.\")\n return False\n if get_tensorrt_use_implicit_batch_mode():\n shape = args[0].checked_type.shape\n new_shape = attrs.newshape\n if len(new_shape) == 0 or len(shape) == 0:\n logger.info(\"reshape: Can't reshape to or from scalar.\")\n return False\n dynamic_reshape = any([isinstance(x, tvm.tir.expr.Any) for x in shape])\n\n if dynamic_reshape:\n # Make sure that the batch dim is unmodified.\n if int(new_shape[0]) < 0:\n for shape_val, new_shape_val in zip(shape[1:], new_shape[1:]):\n if not (\n isinstance(shape_val, (int, tvm.tir.expr.IntImm))\n and isinstance(new_shape_val, (int, tvm.tir.expr.IntImm))\n and int(shape_val) == int(new_shape_val)\n ):\n return False\n elif int(new_shape[0]) > 0:\n # Currently we only allow dim[0] to be Any, so this branch will always be False\n if not (\n isinstance(shape[0], (int, tvm.tir.expr.IntImm))\n and isinstance(new_shape[0], (int, tvm.tir.expr.IntImm))\n and int(shape[0]) == int(new_shape[0])\n ):\n return False\n return True\n shape = list(map(int, shape))\n new_shape = list(map(int, new_shape))\n\n # TRT cannot modify batch dimension.\n original_volume = np.prod(shape)\n # First, resolve 0.\n for i, value in enumerate(new_shape):\n if value == 0:\n new_shape[i] = shape[i]\n # Resolve -1.\n for i, value in enumerate(new_shape):\n if value == -1:\n new_shape[i] = original_volume // np.prod([x for x in new_shape if x != -1])\n # Remove batch dimension and see if volumes match\n if shape[0] != new_shape[0]:\n logger.info(\"reshape: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.pad\")\ndef pad_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.pad is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.pad_mode != \"constant\":\n logger.info(\"nn.pad: pad mode is %s but must be constant.\", attrs.pad_mode)\n return False\n if float(attrs.pad_value) != 0.0:\n logger.info(\"nn.pad: pad value is %f but must be 0.0.\", float(attrs.pad_value))\n return False\n if len(attrs.pad_width) not in [4, 5]:\n logger.info(\"nn.pad: can only pad 4D or 5D inputs\")\n return False\n if any([x != 0 for x in attrs.pad_width[0]]) or any([x != 0 for x in attrs.pad_width[1]]):\n logger.info(\"nn.pad: can't pad batch or channel dimensions.\")\n return False\n if len(attrs.pad_width) == 5 and any([x != 0 for x in attrs.pad_width[2]]):\n logger.info(\"nn.pad: can only pad last two dimensions for 5D inputs.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"strided_slice\")\ndef strided_slice_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if strided_slice is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if args[0].checked_type.dtype != \"float32\":\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((5, 1, 5))(attrs, args, \"strided_slice\"):\n return False\n if get_tensorrt_use_implicit_batch_mode():\n batch_dim_begin_modified = attrs.begin[0] is not None and int(attrs.begin[0]) != 0\n batch_dim_end_modified = (\n attrs.end[0] is not None\n and int(attrs.end[0]) != -1\n and int(attrs.end[0]) != int(args[0].checked_type.shape[0])\n )\n if batch_dim_begin_modified or batch_dim_end_modified:\n logger.info(\"strided_slice: can't modify batch dimension.\")\n return False\n if any([x is not None and x <= 0 for x in attrs.strides]):\n logger.info(\"strided_slice: stride must be positive\")\n return False\n for i in range(0, len(args[0].checked_type.shape)):\n begin = int(attrs.begin[i])\n if attrs.slice_mode == \"end\":\n end = (\n int(attrs.end[i])\n if attrs.end[i] is not None and int(attrs.end[i]) != -1\n else args[0].checked_type.shape[i]\n )\n size = int(end) - int(begin)\n elif attrs.slice_mode == \"size\":\n size = (\n int(attrs.end[i])\n if attrs.end[i] is not None and int(attrs.end[i]) != -1\n else args[0].checked_type.shape[i] - begin\n )\n else:\n logger.warning(\"strided_slice: unknown slice mode encountered\")\n\n if int(size) < 1:\n logger.info(\"strided_slice: size of slice must be at least 1\")\n return False\n\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.adaptive_max_pool2d\")\ndef adaptive_max_pool2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.adaptive_max_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):\n logger.info(\"nn.adaptive_max_pool2d: output size must be (1, 1).\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.adaptive_avg_pool2d\")\ndef adaptive_avg_pool2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.adaptive_avg_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):\n logger.info(\"nn.adaptive_avg_pool2d: output size must be (1, 1).\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv3d\")\ndef conv3d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv3d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.conv3d\"):\n return False\n if attrs.data_layout != \"NCDHW\":\n logger.info(\"nn.conv3d: data_layout is %s but must be NCDHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIDHW\":\n logger.info(\"nn.conv3d: kernel_layout is %s but must be OIDHW.\", attrs.kernel_layout)\n return False\n if attrs.out_layout and attrs.out_layout != \"NCDHW\":\n logger.info(\"nn.conv3d: out_layout is %s but must be NCDHW.\", attrs.out_layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.max_pool3d\")\ndef max_pool_3d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.max_pool3d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.max_pool3d\"):\n return False\n if attrs.layout != \"NCDHW\":\n logger.info(\"nn.max_pool3d: layout is %s but must be NCDHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.avg_pool3d\")\ndef avg_pool_3d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.avg_pool3d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.avg_pool3d\"):\n return False\n if attrs.layout != \"NCDHW\":\n logger.info(\"nn.avg_pool3d: layout is %s but must be NCDHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv3d_transpose\")\ndef conv3d_transpose_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv3d_transpose is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.conv3d_transpose\"):\n return False\n if attrs.data_layout != \"NCDHW\":\n logger.info(\"nn.conv3d_transpose: data_layout is %s but must be NCDHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIDHW\":\n logger.info(\n \"nn.conv3d_transpose: kernel_layout is %s but must be OIDHW.\", attrs.kernel_layout\n )\n return False\n if attrs.out_layout and attrs.out_layout != \"NCDHW\":\n logger.info(\"nn.conv3d_transpose: out_layout is %s but must be NCDHW.\", attrs.out_layout)\n return False\n if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):\n logger.info(\"nn.conv3d_transpose: dilation rate must be 1.\")\n return False\n if attrs.output_padding and any([x != 0 for x in map(int, attrs.output_padding)]):\n logger.info(\"nn.conv3d_transpose: output padding is not supported.\")\n return False\n return True\n\n\nclass IsComputeIntensiveGraph(ExprVisitor):\n \"\"\"\n Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and\n its transpose, dense and batch mat-mul.\n \"\"\"\n\n def __init__(self):\n ExprVisitor.__init__(self)\n self.is_compute_intensive = False\n\n def visit_call(self, call):\n compute_intensive_ops = set(\n [\n \"nn.conv1d\",\n \"nn.conv2d\",\n \"nn.conv2d_transpose\",\n \"nn.conv3d\",\n \"nn.conv3d_transpose\",\n \"nn.dense\",\n \"nn.batch_matmul\",\n \"sum\",\n \"prod\",\n \"max\",\n \"min\",\n \"mean\",\n ]\n )\n if isinstance(call.op, tvm.tir.op.Op):\n if str(call.op) in compute_intensive_ops:\n self.is_compute_intensive = True\n\n return super().visit_call(call)\n\n def is_graph_compute_intensive(self, subgraph) -> bool:\n \"\"\"\n This function recursively visits the graph and checks if it's compute intensive\"\n \"\"\"\n self.visit(subgraph)\n return self.is_compute_intensive\n\n\ndef is_valid_subgraph(params, body):\n \"\"\"Final check on whether the subgraph is valid and should be offloaded to TensorRT.\"\"\"\n # Remove invalid subgraphs for implicit batch mode.\n if get_tensorrt_use_implicit_batch_mode():\n input_batch_sizes = []\n for var in params:\n # In implicit batch mode, all inputs must have same batch size\n # TODO: (codeislife99) : Fix different dynamic batch size inputs\n\n if isinstance(var.checked_type, relay.TupleType):\n for tupe_type in var.checked_type.fields:\n # Scalar inputs not allowed\n if len(tupe_type.shape) == 0:\n logger.info(\"tensorrt: scalar inputs not supported\")\n return False\n\n if not isinstance(tupe_type.shape[0], tvm.tir.expr.Any):\n input_batch_sizes.append(int(tupe_type.shape[0]))\n else:\n # Scalar inputs not allowed\n if len(var.checked_type.shape) == 0:\n logger.info(\"tensorrt: scalar inputs not supported\")\n return False\n if not isinstance(var.checked_type.shape[0], tvm.tir.expr.Any):\n input_batch_sizes.append(int(var.checked_type.shape[0]))\n if len(input_batch_sizes) > 1 and len(set(input_batch_sizes)) != 1:\n logger.info(\"tensorrt: inputs have different batch sizes\")\n return False\n if (\n get_tensorrt_remove_no_mac_subgraphs()\n and not IsComputeIntensiveGraph().is_graph_compute_intensive(body)\n ):\n return False\n return True\n\n\ndef prune_tensorrt_subgraphs(mod):\n \"\"\"\n Removes invalid subgraphs and those with no multiply-accumulates (if remove_no_max_subgraphs\n is set).\n \"\"\"\n\n class SubgraphRemover(ExprMutator):\n \"\"\"\n Reverts subgraphs in subgraphs_to_remove back to TVM instead of using an external codegen.\n \"\"\"\n\n def __init__(self, subgraphs_to_remove, mod, new_mod):\n ExprMutator.__init__(self)\n self.subgraphs_to_remove = subgraphs_to_remove\n self.mod = mod\n self.new_mod = new_mod\n\n def visit_call(self, call):\n if isinstance(call.op, GlobalVar):\n name = call.op.name_hint\n if name in self.subgraphs_to_remove:\n # \"Inline\" the subgraph back into new main function.\n func = self.mod[name]\n var_map = {}\n for arg, param in zip(call.args, func.params):\n var_map[param] = super().visit(arg)\n new_body = relay.bind(func.body, var_map)\n return new_body\n if name != \"main\":\n args = []\n for arg in call.args:\n args.append(super().visit(arg))\n return call.op(*args)\n return super().visit_call(call)\n\n subgraphs_to_remove = []\n # Remove invalid subgraphs\n for subgraph in mod.get_global_vars():\n name = subgraph.name_hint\n if not mod[name].attrs or mod[name].attrs[\"Compiler\"] != \"tensorrt\":\n continue\n if not is_valid_subgraph(mod[name].params, mod[name].body):\n subgraphs_to_remove.append(name)\n # Create new pruned module\n new_mod = tvm.IRModule(mod.functions, mod.type_definitions)\n new_mod[\"main\"] = SubgraphRemover(subgraphs_to_remove, mod, new_mod).visit(mod[\"main\"])\n new_mod = transform.RemoveUnusedFunctions()(new_mod)\n return new_mod\n\n\nclass RemoveDropout(ExprMutator):\n \"\"\"\n Removes all nn.dropout from an expr.\n \"\"\"\n\n def visit_tuple_getitem(self, op):\n visit = super().visit_tuple_getitem(op)\n if visit.index != 0:\n return visit\n if (\n isinstance(visit.tuple_value, Call)\n and isinstance(visit.tuple_value.op, Op)\n and visit.tuple_value.op.name == \"nn.dropout\"\n and visit.index == 0\n ):\n return visit.tuple_value.args[0]\n return visit\n\n\[email protected]_pass(opt_level=0)\nclass RemoveDropoutPass:\n def transform_function(self, func, mod, _):\n return RemoveDropout().visit(func)\n"
] | [
[
"numpy.asarray",
"numpy.reshape",
"numpy.tile",
"numpy.random.shuffle",
"numpy.random.rand",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MasterMedo/typetest | [
"7d573c6bbf0d07ffd3b2fb4a8ee9ce783df2ac26"
] | [
"typetest/analyse/typing_speed_per_char.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom io import StringIO\nfrom collections import deque\n\nfrom typetest.utils import validate_input_file_path\n\n\n@validate_input_file_path\ndef plot(input_file, size=10000, filter_func=lambda c: True):\n \"\"\"Reads last `size` lines of `input_file` and groups them by characters.\n Removes lowest and highest 10% and boxplots the data.\n\n filter_func: function taking a `char` returning `True` if char should be\n plotted, `False` otherwise. By default plots all characters.\n \"\"\"\n with open(input_file) as f:\n q = deque(f, maxlen=size)\n\n data_frame = pd.read_csv(\n StringIO(\"\".join(q)),\n header=None,\n names=[\"char\", \"duration\", \"wpm\", \"timestamp\"],\n )\n\n grouped_data_frames = filter(\n lambda t: filter_func(t[1][\"char\"].iloc[0]),\n data_frame.groupby(\"char\"),\n )\n\n typing_speeds_in_wpm = []\n chars = []\n means = []\n for char, df in grouped_data_frames:\n if filter_func(char):\n q1 = df[\"wpm\"].quantile(0.1) # noqa\n q3 = df[\"wpm\"].quantile(0.9) # noqa\n typing_speed_in_wpm = df.query(\"@q1 <= wpm <= @q3\")[\"wpm\"]\n chars.append(char)\n typing_speeds_in_wpm.append(typing_speed_in_wpm)\n mean = typing_speed_in_wpm.mean()\n means.append(mean if mean > 0 else 0)\n\n fig, ax = plt.subplots()\n\n ax.boxplot(typing_speeds_in_wpm, labels=chars)\n mean = round(sum(means) / len(means))\n ax.axhline(y=mean, color=\"r\", linestyle=\"-\", label=f\"mean {mean} wpm\")\n\n ax.set_title(f\"typing speed per character of last {size} characters\")\n ax.set_xlabel(\"characters\")\n ax.set_ylabel(\"typing speed [wpm]\")\n ax.legend()\n\n ticks = plt.yticks()[0]\n plt.yticks(np.arange(0, ticks[-1], 10))\n\n plt.show()\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
benjeffery/tsdate | [
"93c3dabdeb857a351bf994fc56bf5b8d18bb830d"
] | [
"tests/utility_functions.py"
] | [
"# MIT License\n#\n# Copyright (C) 2020 University of Oxford\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nA collection of utilities to edit and construct tree sequences for testing purposes\n\"\"\"\n\nimport msprime\nimport numpy as np\nimport tskit\nimport io\n\n\ndef add_grand_mrca(ts):\n \"\"\"\n Function to add a grand mrca node to a tree sequence\n \"\"\"\n grand_mrca = ts.max_root_time + 1\n tables = ts.dump_tables()\n new_node_number = tables.nodes.add_row(time=grand_mrca)\n for tree in ts.trees():\n tables.edges.add_row(\n tree.interval[0], tree.interval[1], new_node_number, tree.root)\n tables.sort()\n return tables.tree_sequence()\n\n\ndef single_tree_ts_n2():\n r\"\"\"\n Simple case where we have n = 2 and one tree. [] marks a sample\n 2\n / \\\n [0] [1]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 0 1\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 2 0,1\n \"\"\")\n return(tskit.load_text(nodes=nodes, edges=edges, strict=False))\n\n\ndef single_tree_ts_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n 4\n / \\\n 3 \\\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_n4():\n r\"\"\"\n Simple case where we have n = 4 and one tree.\n 6\n / \\\n 5 \\\n / \\ \\\n 4 \\ \\\n / \\ \\ \\\n [0] [1] [2] [3]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 0\n 4 0 1\n 5 0 2\n 6 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 4 0,1\n 0 1 5 2,4\n 0 1 6 3,5\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_mutation_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n 4\n / \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef site_no_mutations():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n The single site has no derived alleles.\n 4\n / \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites, strict=False)\n\n\ndef single_tree_all_samples_one_mutation_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n 4\n / \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 1\n 4 1 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef gils_example_tree():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Mutations marked on each branch by *.\n 4\n / \\\n / \\\n / *\n 3 *\n / \\ *\n * * *\n * \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.1 0\n 0.2 0\n 0.3 0\n 0.4 0\n 0.5 0\n 0.6 0\n 0.7 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 0 1\n 1 0 1\n 2 1 1\n 3 2 1\n 4 2 1\n 5 2 1\n 6 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef polytomy_tree_ts():\n r\"\"\"\n Simple case where we have n = 3 and a polytomy.\n 3\n /|\\\n / | \\\n [0][1][2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1,2\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_internal_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Node 3 is an internal sample.\n 4\n / \\\n 3 \\\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts():\n r\"\"\"\n Simple case where we have n = 3 and 2 trees.\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n 3 \\ . | | \\\n / \\ \\ . | | \\\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 0.2 3 0,1\n 0 1 4 2\n 0 0.2 4 3\n 0.2 1 4 1\n 0.2 1 5 0,4\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_extra_length():\n r\"\"\"\n Simple case where we have n = 3 and 2 trees, but with extra length\n for testing keep_intervals() and delete_intervals().\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n 3 \\ . | | \\\n / \\ \\ . | | \\\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 0.2 3 0,1\n 0 1.5 4 2\n 0 0.2 4 3\n 0.2 1.5 4 1\n 0.2 1.5 5 0,4\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_n3_non_contemporaneous():\n r\"\"\"\n Simple case where we have n = 3 and two trees with node 2 ancient.\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n 3 [2] . | |[2]\n / \\ . | |\n [0] [1] . [0] [1]\n \"\"\"\n ts = two_tree_ts()\n tables = ts.dump_tables()\n time = tables.nodes.time\n time[2] = time[3]\n tables.nodes.time = time\n return tables.tree_sequence()\n\n\ndef single_tree_ts_with_unary():\n r\"\"\"\n Simple case where we have n = 3 and some unary nodes.\n 7\n / \\\n 5 \\\n | \\\n 4 6\n | |\n 3 |\n / \\ |\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n 6 0 2\n 7 0 4\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 6 2\n 0 1 4 3\n 0 1 5 4\n 0 1 7 5,6\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_with_unary_n3():\n r\"\"\"\n Simple case where we have n = 3 and node 5 is an internal, unary node in the first\n tree. In the second tree, node t is the root, but still unary.\n 6 . 5\n / \\ . |\n 4 5 . 4\n | | . / \\\n 3 | . 3 \\\n / \\ | . / \\ \\\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n 6 0 4\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 2 3 0,1\n 0 1 5 2\n 0 2 4 3\n 0 1 6 4,5\n 1 2 4 2\n 1 2 5 4\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_mutation_ts():\n r\"\"\"\n Simple case where we have n = 3, 2 trees, three mutations.\n . 5\n . / \\\n 4 . | 4\n / \\ . | |\\\n x \\ . | | \\\n x \\ . x | \\\n / | . | | |\n 3 | . | | |\n / \\ | . | | |\n [0] [1] [2] . [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n 5 0 3\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 0.2 3 0,1\n 0 1 4 2\n 0 0.2 4 3\n 0.2 1 4 1\n 0.2 1 5 0,4\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.1 0\n 0.15 0\n 0.8 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 3 1\n 1 3 1\n 2 0 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef two_tree_two_mrcas():\n r\"\"\"\n Simple case where we have n = 4, 2 trees, one mutation.\n 6 |\n / \\ | 7\n / \\ | / \\\n / \\ | / x\n / \\ | / \\\n / \\ | / \\\n 4 5 | 4 5\n / \\ / \\ | / \\ / \\\n / \\ / \\ | / \\ / \\\n [0] [1] [2] [3] | [0] [1] [2] [3]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 0\n 4 0 1\n 5 0 1\n 6 0 3\n 7 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 4 0,1\n 0 1 5 2,3\n 0 0.3 6 4\n 0 0.3 6 5\n 0.3 1 7 4\n 0.3 1 7 5\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 5 1\n \"\"\")\n\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef loopy_tree():\n r\"\"\"\n Simple case where we have n = 3, 2 trees, three mutations.\n . 7\n . / \\\n . / |\n . / |\n 6 . / 6\n / \\ . / / \\\n / 5 . / / |\n / / \\ . / / |\n / | \\ . | | |\n / | \\ . | | |\n | 4 | . | 4 |\n | / \\ | . | / \\ |\n [0] [1] [2] [3] . [0] [1] [2] [3]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 0\n 4 0 1\n 5 0 2\n 6 0 3\n 7 0 4\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 4 0,1\n 0 0.2 5 2,4\n 0 0.2 6 5\n 0 1 6 3\n 0.2 1 6 4\n 0.2 1 7 2\n 0.2 1 7 6\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_n3_sample_as_parent():\n r\"\"\"\n Simple case where we have n = 3 and one tree. Node 3 is a sample.\n 4\n / \\\n 3 \\\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 1 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_n2_dangling():\n r\"\"\"\n Simple case where we have n = 2 and one tree. Node 0 is dangling.\n 4\n / \\\n 3 \\\n / \\ \\\n 0 [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 0 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef two_tree_ts_n2_part_dangling():\n r\"\"\"\n Simple case where we have n = 2 and two trees. Node 0 is dangling in the first tree.\n 4 4\n / \\ / \\\n 3 \\ 3 \\\n / \\ \\ \\ \\\n 0 \\ \\ 0 \\\n \\ \\ \\ \\\n [1] [2] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 0 0.5\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0\n 0 0.5 3 1\n 0.5 1 0 1\n 0 1 4 2,3\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, strict=False)\n\n\ndef single_tree_ts_2mutations_multiallelic_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Site is multiallelic.\n 4\n x \\\n 3 x\n / \\ \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 2 1\n 0 3 2\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef single_tree_ts_2mutations_singletons_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Site has two singleton mutations.\n 4\n / \\\n 3 x\n / x \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 1 1\n 0 2 1\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef single_tree_ts_2mutations_n3():\n r\"\"\"\n Simple case where we have n = 3 and one tree.\n Site has two mutations with different times.\n 4\n x \\\n 3 \\\n / x \\\n [0] [1] [2]\n \"\"\"\n nodes = io.StringIO(\"\"\"\\\n id is_sample time\n 0 1 0\n 1 1 0\n 2 1 0\n 3 0 1\n 4 0 2\n \"\"\")\n edges = io.StringIO(\"\"\"\\\n left right parent child\n 0 1 3 0,1\n 0 1 4 2,3\n \"\"\")\n sites = io.StringIO(\"\"\"\\\n position ancestral_state\n 0.5 0\n \"\"\")\n mutations = io.StringIO(\"\"\"\\\n site node derived_state\n 0 3 1\n 0 1 0\n \"\"\")\n return tskit.load_text(nodes=nodes, edges=edges, sites=sites,\n mutations=mutations, strict=False)\n\n\ndef ts_w_data_desert(gap_start, gap_end, length):\n \"\"\"\n Inside/Outside algorithm has been observed to give overflow/underflow when\n attempting to date tree sequences with large regions without data. Test\n that preprocess_ts removes regions of a specified size that have no data.\n \"\"\"\n ts = msprime.simulate(\n 100, mutation_rate=10, recombination_rate=1, length=length)\n tables = ts.dump_tables()\n sites = tables.sites.position[:]\n tables.delete_sites(np.where(np.logical_and(sites > gap_start, sites < gap_end))[0])\n deleted_ts = tables.tree_sequence()\n return deleted_ts\n\n\ndef truncate_ts_samples(ts, average_span, random_seed, min_span=5):\n \"\"\"\n Create a tree sequence that has sample nodes which have been truncated\n so that they span only a small region of the genome. The length of the\n truncated spans is given by a poisson distribution whose mean is average_span\n but which cannot go below a fixed min_span, or above the sequence_length\n\n Samples are truncated by removing the edges that connect them to the rest\n of the tree.\n \"\"\"\n\n np.random.seed(random_seed)\n # Make a list of (left,right) tuples giving the new limits of each sample\n # Keyed by sample ID.\n # for simplicity, we pick lengths from a poisson distribution of av 300 bp\n span = np.random.poisson(average_span, ts.num_samples)\n span = np.maximum(span, min_span)\n span = np.minimum(span, ts.sequence_length)\n start = np.random.uniform(0, ts.sequence_length-span)\n to_slice = {id: (a, b) for id, a, b in zip(ts.samples(), start, start + span)}\n\n tables = ts.dump_tables()\n tables.edges.clear()\n for e in ts.tables.edges:\n if e.child not in to_slice:\n left, right = e.left, e.right\n else:\n if e.right <= to_slice[e.child][0] or e.left >= to_slice[e.child][1]:\n continue # this edge is outside the focal region\n else:\n left = max(e.left, to_slice[e.child][0])\n right = min(e.right, to_slice[e.child][1])\n tables.edges.add_row(left, right, e.parent, e.child)\n # Remove mutations above isolated nodes\n mutations = tables.mutations\n keep_mutations = np.ones((mutations.num_rows, ), dtype=bool)\n positions = tables.sites.position[:]\n for i, m in enumerate(mutations):\n if m.node in to_slice:\n if not(to_slice[m.node][0] <= positions[m.site] < to_slice[m.node][1]):\n keep_mutations[i] = False\n new_ds, new_ds_offset = tskit.tables.keep_with_offset(\n keep_mutations, mutations.derived_state, mutations.derived_state_offset)\n new_md, new_md_offset = tskit.tables.keep_with_offset(\n keep_mutations, mutations.metadata, mutations.metadata_offset)\n mutations_map = np.append(np.cumsum(keep_mutations) - 1, [-1])\n mutations_map = mutations_map.astype(mutations.parent.dtype)\n # parent -1 always maps to parent -1\n tables.mutations.set_columns(\n site=mutations.site[keep_mutations],\n node=mutations.node[keep_mutations],\n derived_state=new_ds,\n derived_state_offset=new_ds_offset,\n parent=mutations_map[mutations.parent[keep_mutations]],\n metadata=new_md,\n metadata_offset=new_md_offset)\n return tables.tree_sequence().simplify(\n filter_populations=False,\n filter_individuals=False,\n filter_sites=False,\n keep_unary=True)\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.random.seed",
"numpy.cumsum",
"numpy.ones",
"numpy.random.poisson",
"numpy.random.uniform",
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xjarvik/onnxmltools | [
"e4fbdc09814ceedc7655d85b6c4203ca21d8433a"
] | [
"tests/sparkml/test_decision_tree_classifier.py"
] | [
"# SPDX-License-Identifier: Apache-2.0\n\nimport sys\nimport inspect\nimport unittest\nfrom distutils.version import StrictVersion\n\nimport onnx\nimport pandas\nimport numpy\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.classification import DecisionTreeClassifier\nfrom pyspark.ml.linalg import VectorUDT, SparseVector, Vectors\n\nfrom onnxmltools import convert_sparkml\nfrom onnxmltools.convert.common.data_types import StringTensorType, FloatTensorType\nfrom tests.sparkml.sparkml_test_utils import save_data_models, compare_results, run_onnx_model\nfrom tests.sparkml import SparkMlTestCase\nfrom pyspark.ml.feature import StringIndexer, VectorIndexer\n\n\nclass TestSparkmDecisionTreeClassifier(SparkMlTestCase):\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n @unittest.skipIf(StrictVersion(onnx.__version__) <= StrictVersion('1.3'), 'Need Greater Opset 9')\n def test_tree_pipeline(self):\n import os\n this_script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n input_path = os.path.join(this_script_dir, \"data\", \"sample_libsvm_data.txt\")\n original_data = self.spark.read.format(\"libsvm\").load(input_path)\n #\n # truncate the features\n #\n feature_count = 5\n self.spark.udf.register(\"truncateFeatures\",\n lambda x: SparseVector(feature_count, range(0,feature_count), x.toArray()[125:130]),\n VectorUDT())\n data = original_data.selectExpr(\"cast(label as string) as label\", \"truncateFeatures(features) as features\")\n label_indexer = StringIndexer(inputCol=\"label\", outputCol=\"indexedLabel\", handleInvalid='error')\n feature_indexer = VectorIndexer(inputCol=\"features\", outputCol=\"indexedFeatures\",\n maxCategories=10, handleInvalid='error')\n\n dt = DecisionTreeClassifier(labelCol=\"indexedLabel\", featuresCol=\"indexedFeatures\")\n pipeline = Pipeline(stages=[label_indexer, feature_indexer, dt])\n model = pipeline.fit(data)\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Pipeline', [\n ('label', StringTensorType([1, 1])),\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n self.assertTrue(model_onnx is not None)\n # run the model\n predicted = model.transform(data.limit(1))\n data_np = {\n 'label': data.limit(1).toPandas().label.values,\n 'features': data.limit(1).toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n }\n expected = [\n predicted.toPandas().indexedLabel.values.astype(numpy.int64),\n predicted.toPandas().prediction.values.astype(numpy.int64),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreePipeline\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['indexedLabel', 'prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n def test_tree_one_class_classification(self):\n features = [[0., 1.], [1., 1.], [2., 0.]]\n features = numpy.array(features, dtype=numpy.float32)\n labels = [1, 1, 1]\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\n dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")\n model = dt.fit(data)\n feature_count = 1\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree One Class', [\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n predicted = model.transform(data)\n expected = [\n predicted.toPandas().prediction.values.astype(numpy.float32),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreeBinaryClass\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n def test_tree_binary_classification(self):\n features = [[0, 1], [1, 1], [2, 0]]\n features = numpy.array(features, dtype=numpy.float32)\n labels = [0, 1, 0]\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\n dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")\n model = dt.fit(data)\n feature_count = 2\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Binary Class', [\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n predicted = model.transform(data)\n expected = [\n predicted.toPandas().prediction.values.astype(numpy.float32),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreeBinaryClass\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\n def test_tree_multiple_classification(self):\n features = [[0, 1], [1, 1], [2, 0], [0.5, 0.5], [1.1, 1.1], [2.1, 0.1]]\n features = numpy.array(features, dtype=numpy.float32)\n labels = [0, 1, 2, 1, 1, 2]\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\n dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")\n model = dt.fit(data)\n feature_count = 2\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Multi Class', [\n ('features', FloatTensorType([1, feature_count]))\n ], spark_session=self.spark)\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n predicted = model.transform(data)\n expected = [\n predicted.toPandas().prediction.values.astype(numpy.float32),\n predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\n ]\n paths = save_data_models(data_np, expected, model, model_onnx,\n basename=\"SparkmlDecisionTreeMultiClass\")\n onnx_model_path = paths[3]\n output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)\n compare_results(expected, output, decimal=5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.